code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/****************************************************************************
* Copyright Fabien Sartor
* Contributors: Fabien Sartor (fabien.sartor@gmail.com)
* http://fasar.fr
*
* This software is a computer program whose purpose to compute differences
* between two files.
*
****************************************************************************
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL:
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
****************************************************************************
*/
package fsart.diffTools.csvDsl
/**
*
* User: fabien
* Date: 22/05/12
* Time: 15:40
*
*/
import org.apache.commons.logging.{LogFactory, Log}
import org.junit.Test
import org.junit.Assert._
import name.fraser.neil.plaintext._
import name.fraser.neil.plaintext.diff_match_patch.Operation
import fsart.diffTools.csvBuilder.CsvBuilder
import fsart.diffTools.csvModel.CsvData
class CsvRulesDslTest {
private val log: Log = LogFactory.getLog(this.getClass)
@Test
def duplicatedLinesTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;2;3;4")
cvsBuilder.appendLine("5;6;7;8")
cvsBuilder.appendLine("1;2;3;4")
cvsBuilder.appendLine("1;2;3;500")
cvsBuilder.appendLine("5;6;7;900")
var csv1: CsvData[String] = cvsBuilder.getCvsData
import CsvRulesDsl._
var res = duplicatedLines of csv1
assertTrue(res.array.size == 2)
cvsBuilder.appendLine("5;6;7;900")
csv1 = cvsBuilder.getCvsData
res = duplicatedLines of csv1
assertTrue(res.array.size == 4)
}
@Test
def duplicatedKeysTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;2;3;4")
cvsBuilder.appendLine("5;6;7;8")
cvsBuilder.appendLine("1;2;3;4") //duplicated line, it is not in res
cvsBuilder.appendLine("1;2;3;500")
var csv1: CsvData[String] = cvsBuilder.getCvsData
import CsvRulesDsl._
var res = duplicatedKeys of csv1
System.out.println("coucou " + res)
assertTrue(res.array.size == 2)
cvsBuilder.appendLine("5;6;7;900")
csv1 = cvsBuilder.getCvsData
res = duplicatedKeys of csv1
assertTrue(res.array.size == 4)
}
@Test
def addedTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;2;3;4")
cvsBuilder.appendLine("5;6;7;8")
cvsBuilder.appendLine("7;8;9;10")
var csv1: CsvData[String] = cvsBuilder.getCvsData
val cvsBuilder2 = new CsvBuilder()
cvsBuilder2.appendLine("1;2;3;4")
cvsBuilder2.appendLine("5;6;7;8")
var csv2: CsvData[String] = cvsBuilder2.getCvsData
import CsvRulesDsl._
var res = additionsMade by csv1 withRef csv1
println("Mon tab est " + res.array)
assertTrue(res.array.size == 0)
res = additionsMade by csv2 withRef csv1
assertTrue(res.array.size == 0)
// csv1 have 2 lines more and one key more
cvsBuilder.appendLine("7;8;9;10")
csv1 = cvsBuilder.getCvsData
res = additionsMade by csv2 withRef csv1
assertTrue(res.array.size == 0)
// csv1 have 1 lines more but same number of key
cvsBuilder2.appendLine("7;8;9;10")
csv2 = cvsBuilder2.getCvsData
res = additionsMade by csv2 withRef csv1
assertTrue(res.array.size == 0)
// csv2 have 1 key more
cvsBuilder2.appendLine("10;8;9;10")
csv2 = cvsBuilder2.getCvsData
res = additionsMade by csv2 withRef csv1
println("res : " + res.array)
assertTrue(res.array.size == 1)
}
@Test
def suppressedTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;2;3;4")
cvsBuilder.appendLine("5;6;7;8")
var csv1: CsvData[String] = cvsBuilder.getCvsData
val cvsBuilder2 = new CsvBuilder()
var csv2: CsvData[String] = cvsBuilder2.getCvsData
import CsvRulesDsl._
var res = suppressionsMade by csv2 withRef csv1
assertTrue(res.array.size == 2)
// Check for duplicated lines
cvsBuilder2.appendLine("1;2;3;4")
csv2 = cvsBuilder2.getCvsData
res = suppressionsMade by csv2 withRef csv1
System.out.println("Salut coucou " + res.array.size)
assertTrue(res.array.size == 1)
// Check for 3 added lines with 2 duplicated
cvsBuilder2.appendLine("5;6;7;8")
csv2 = cvsBuilder2.getCvsData
res = suppressionsMade by csv2 withRef csv1
assertTrue(res.array.size == 0)
}
@Test
def modifiedTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;3;3;4")
cvsBuilder.appendLine("5;6;7;8")
cvsBuilder.appendLine("10;10;10;10")
var csv1: CsvData[String] = cvsBuilder.getCvsData
val cvsBuilder2 = new CsvBuilder()
cvsBuilder2.appendLine("1;2;3;2")
cvsBuilder2.appendLine("5;6;7;8")
cvsBuilder2.appendLine("11;11;11;11")
var csv2: CsvData[String] = cvsBuilder2.getCvsData
import CsvRulesDsl._
var res = modificationsMade by csv2 withRef csv1
System.out.println("Salut coucou " + res.array.size)
assertTrue(res.array.size == 2)
assertTrue(res.array(0)(1)(0).operation == Operation.DELETE)
assertTrue(res.array(0)(1)(0).text == "3")
assertTrue(res.array(0)(1)(1).operation == Operation.INSERT)
assertTrue(res.array(0)(1)(1).text == "2")
}
@Test
def modifiedAndFiltredTest {
val cvsBuilder = new CsvBuilder()
cvsBuilder.appendLine("1;3;3;4")
cvsBuilder.appendLine("2;3;4;5")
cvsBuilder.appendLine("3;10;9;10")
var csv1: CsvData[String] = cvsBuilder.getCvsData
val cvsBuilder2 = new CsvBuilder()
cvsBuilder2.appendLine("1;2;3;2")
cvsBuilder2.appendLine("2;6;7;8")
cvsBuilder2.appendLine("3;11;11;11")
var csv2: CsvData[String] = cvsBuilder2.getCvsData
import CsvRulesDsl._
var res = modificationsMade by csv2 withRef csv1 mapValuesDuringComparison ( List(
("1", "2"),
("10", "11")
))
System.out.println("Salut coucou " + res.array.size)
println("Tb " + res.array(2))
assertTrue(res.array.size == 3)
assertTrue(res.array(2)(1)(0).operation == Operation.EQUAL)
assertTrue(res.array(2)(1)(0).text == "11")
assertTrue(res.array(2)(2)(0).operation == Operation.DELETE)
assertTrue(res.array(2)(2)(0).text == "9")
assertTrue(res.array(2)(2)(1).operation == Operation.INSERT)
assertTrue(res.array(2)(2)(1).text == "11")
}
}
|
fasar/diffTools
|
model/diffTools/src/test/scala/fsart/diffTools/csvDsl/CsvRulesDslTest.scala
|
Scala
|
apache-2.0
| 7,675
|
package scavlink.task
import scala.reflect.runtime.universe._
package object schema {
type SchemaDef = Either[Pointer, Schema]
def definitionNameOf(typ: Type): String = typ.typeSymbol.name.decodedName.toString
def propertyNameOf(typ: Type): String = lowercaseName(definitionNameOf(typ))
def propertyNameOf[T: TypeTag]: String = propertyNameOf(typeOf[T])
def lowercaseName(s: String): String = if (s.isEmpty) s else s.head.toLower + s.tail
}
|
nickolasrossi/scavlink
|
src/main/scala/scavlink/task/schema/package.scala
|
Scala
|
mit
| 454
|
// AORTA is copyright (C) 2012 Dustin Carlino, Mike Depinet, and Piyush
// Khandelwal of UT Austin
// License: GNU GPL v2
package utexas.aorta.map.make
import scala.collection.mutable
import utexas.aorta.map.{Coordinate, Road}
import utexas.aorta.common.Util
class PreGraph2(old_graph: PreGraph1) {
// (v1, v2, road name) to the edge. used for detecting cul-de-sacs easily.
private val edge_lookup = new mutable.HashMap[(Coordinate, Coordinate, String), PreEdge2]
// find true edges between adjacent vertices
Util.log("Splitting " + old_graph.edges.length + " roads into edges between intersections")
var edges = old_graph.edges.flatMap(split_road)
def split_road(road: PreEdge1): List[PreEdge2] = {
// Walk the list of points in this edge, discovering chains between
// vertices
// TODO this is essentially a 'split at vertices'
val split_edges = new mutable.ListBuffer[Option[PreEdge2]]
//Util.assert_eq(old_graph.is_vert(road.points.head), true)
if (!old_graph.is_vert(road.points.head)) {
return Nil
}
var start = 0
// List.range is [lower, upper)
for (i <- List.range(0, road.points.length)) {
if (start != i && old_graph.is_vert(road.points(i))) {
// so we have an edge from start to i
// slice is [from, till), hence the +1
split_edges += find_or_make_edge(road.points.slice(start, i + 1), road)
start = i
}
}
//Util.assert_eq(start, road.points.length - 1);
// TODO seeing this break on new maps from OSM, no time to investigate right
// now
if (start != road.points.length - 1) {
return Nil
}
// maybe Nil, if so, flatMap doesn't care
// honey badger doesn't give a fuck
return split_edges.toList.flatten
}
// None if it's already there
def find_or_make_edge(points: List[Coordinate], edge_dat: PreEdge1): Option[PreEdge2] =
{
val v1 = points.head
val v2 = points.last
// do we already have an edge from v1->v2 or v2->v1?
// this handling mainly needs to deal with cul-de-sacs
return if (edge_lookup.contains((v1, v2, edge_dat.name)) ||
edge_lookup.contains((v2, v1, edge_dat.name)))
{
//Util.log("well, finally! already exists " + edge_dat.name)
// TODO make a new edge if the points dont match?
None
} else {
val e = new PreEdge2(v1, v2, points, edge_dat)
//edge_lookup((v1, v2, edge_dat.name)) = e
Some(e)
}
}
}
class PreEdge2(var from: Coordinate, var to: Coordinate,
val points: List[Coordinate], val dat: PreEdge1)
extends Ordered[PreEdge2]
{
val id = PreEdge2.next_id
PreEdge2.next_id += 1
override def compare(other: PreEdge2) = id.compare(other.id)
override def toString = s"Road ${dat.name} btwn $from and $to"
def length = Road.road_len(points)
def is_culdesac = from == to
}
object PreEdge2 {
var next_id = 0
}
|
dabreegster/aorta
|
utexas/aorta/map/make/Pass2_Part1.scala
|
Scala
|
gpl-2.0
| 2,917
|
package com.getjenny.starchat.resources
/**
* Created by Angelo Leto <angelo@getjenny.com> on 27/06/16.
*/
import akka.http.scaladsl.server.Route
import com.getjenny.starchat.routing._
import com.getjenny.starchat.services.{ConversationLogsService, QuestionAnswerService}
trait ConversationLogsResource extends StarChatResource {
private[this] val questionAnswerService: QuestionAnswerService = ConversationLogsService
private[this] val routeName: String = "conversation_logs"
private[this] val qaResource = new QAResource(questionAnswerService, routeName)
def clTermsCountRoutes: Route = qaResource.termsCountRoutes
def clDictSizeRoutes: Route = qaResource.dictSizeRoutes
def clTotalTermsRoutes: Route = qaResource.totalTermsRoutes
def clQuestionAnswerStreamRoutes: Route = qaResource.questionAnswerStreamRoutes
def clQuestionAnswerRoutes: Route = qaResource.questionAnswerRoutes
def clQuestionAnswerSearchRoutes: Route = qaResource.questionAnswerSearchRoutes
def clUpdateTermsRoutes: Route = qaResource.updateTermsRoutes
def clCountersCacheSizeRoutes: Route = qaResource.countersCacheSizeRoutes
def clQuestionAnswerConversationsRoutes: Route = qaResource.questionAnswerConversationsRoutes
def clQuestionAnswerAnalyticsRoutes: Route = qaResource.questionAnswerAnalyticsRoutes
def clAnnotationsRoutes: Route = qaResource.annotationsRoutes
def clUpdateRoutes: Route = qaResource.updateRoutes
def clAnonymConfigRoutes: Route = qaResource.anonymConfigRoutes
}
|
GetJenny/starchat
|
src/main/scala/com/getjenny/starchat/resources/ConversationLogsResource.scala
|
Scala
|
gpl-2.0
| 1,508
|
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package dagr.tasks.picard
import java.text.SimpleDateFormat
import dagr.core.execsystem._
import dagr.core.tasksystem.{JvmRanOutOfMemory, VariableResources}
import dagr.tasks.DagrDef.{DirPath, FilePath}
import htsjdk.samtools.util.Iso8601Date
import picard.util.IlluminaUtil.IlluminaAdapterPair
import scala.collection.mutable.ListBuffer
class IlluminaBasecallsToSam(basecallsDir: DirPath,
lane: Int,
runBarcode: String,
readStructure: String,
libraryParamsFile: FilePath,
runDate: Option[Iso8601Date] = None,
sequencingCenter: Option[String] = None,
includeNonPfReads: Boolean = false,
ignoreUnexpectedBarcodes: Boolean = false,
minThreads: Int = 4,
maxThreads: Int = 16,
adapterPairs: Seq[IlluminaAdapterPair] = Seq(
IlluminaAdapterPair.INDEXED,
IlluminaAdapterPair.DUAL_INDEXED,
IlluminaAdapterPair.NEXTERA_V2,
IlluminaAdapterPair.FLUIDIGM
),
barcodesDir: Option[DirPath] = None,
maxReadsInRamPerTile: Option[Int] = Some(500000),
firstTile: Option[Int] = None,
tileLimit: Option[Int] = None,
tmpDir: Option[DirPath] = None,
sort: Option[Boolean] = None
) extends PicardTask with VariableResources with JvmRanOutOfMemory {
protected val byMemoryPerThread: Memory = Memory("1GB")
protected var memoryPerThread: Memory = Memory("2GB")
/** Increases the memory per core/thread and returns true if we can run with the fewest # of threads. */
override protected def nextMemory(currentMemory: Memory): Option[Memory] = {
// Increase the amount of memory required per-core
this.memoryPerThread = this.memoryPerThread + this.byMemoryPerThread
Some(Memory(this.memoryPerThread.value * minThreads))
}
/** Chooses the maximum # of cores given a memory per core requirement. */
override def pickResources(resources: ResourceSet): Option[ResourceSet] = {
Range.inclusive(start=maxThreads, end=minThreads, step= -1)
.flatMap { cores =>
resources.subset(Cores(cores), Memory(cores * memoryPerThread.value))
}.headOption
}
override protected def addPicardArgs(buffer: ListBuffer[Any]): Unit = {
buffer += "BASECALLS_DIR=" + basecallsDir
buffer += "LANE=" + lane
buffer += "RUN_BARCODE=" + runBarcode
barcodesDir.foreach(dir => buffer += "BARCODES_DIR=" + dir)
runDate.foreach(date => buffer += "RUN_START_DATE=" + new SimpleDateFormat("yyyy/MM/dd").format(date))
buffer += "SEQUENCING_CENTER=" + sequencingCenter.getOrElse("null")
buffer += "NUM_PROCESSORS=" + resources.cores.toInt
buffer += "READ_STRUCTURE=" + readStructure.toString
buffer += "LIBRARY_PARAMS=" + libraryParamsFile
buffer += "INCLUDE_NON_PF_READS=" + includeNonPfReads
sort.foreach(buffer += "SORT=" + _)
if (ignoreUnexpectedBarcodes) buffer += "IGNORE_UNEXPECTED_BARCODES=true"
if (adapterPairs.isEmpty) buffer += "ADAPTERS_TO_CHECK=null"
else adapterPairs.foreach(buffer += "ADAPTERS_TO_CHECK=" + _)
maxReadsInRamPerTile.foreach(n => buffer += "MAX_READS_IN_RAM_PER_TILE=" + n)
firstTile.foreach(buffer += "FIRST_TILE=" + _) // If set, this is the first tile to be processed (used for debugging).
tileLimit.foreach(buffer += "TILE_LIMIT=" + _) // If set, process no more than this many tiles (used for debugging).
tmpDir.foreach(tmp => buffer += "TMP_DIR=" + tmp)
}
}
|
fulcrumgenomics/dagr
|
tasks/src/main/scala/dagr/tasks/picard/IlluminaBasecallsToSam.scala
|
Scala
|
mit
| 5,087
|
/*
* A real-time collaborative tool to develop files over the network.
* Copyright (C) 2010 Mauro Ciancio and Leandro Gilioli
* {maurociancio,legilioli} at gmail dot com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ar.noxit.paralleleditor.client.converter
import ar.noxit.paralleleditor.common.messages.Response
import ar.noxit.paralleleditor.client.CommandFromKernel
trait ResponseConverter {
def convert(response: Response): CommandFromKernel
}
|
maurociancio/parallel-editor
|
src/parallel-editor-client/src/main/scala/ar/noxit/paralleleditor/client/converter/ResponseConverter.scala
|
Scala
|
gpl-3.0
| 1,106
|
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.source.log.jul.xml
import java.io.StringReader
import java.util.Date
import org.scalatest.{Matchers, FunSuite}
import viper.source.log.jul.JULLogRecord
import viper.source.log.xml.JULXMLConsumer
/**
* Created by kieron on 06/09/2014.
*/
class JULXMLConsumerTest extends FunSuite with Matchers {
test("empty stream will return none") {
val xml = " <log> </log> "
val reader = new StringReader(xml)
val consumer = new JULXMLConsumer(reader)
for (_ <- 1 to 10) {
consumer.next() should equal (None)
}
}
test("stream with full record returns right details") {
val xml =
"""
|<record>
| <date>2014-09-06T13:43:22</date>
| <millis>1410007402801</millis>
| <sequence>161651</sequence>
| <logger>generated</logger>
| <level>INFO</level>
| <class>viper.util.LogFileGenerator</class>
| <method>logRandom</method>
| <thread>1</thread>
| <message>Now calculating the number of atoms in the universe</message>
|</record>
""".stripMargin
val reader = new StringReader(xml)
val consumer = new JULXMLConsumer(reader)
val record = consumer.nextExpected().asInstanceOf[JULLogRecord]
record.id should equal ("161651_1410007402801")
record.datetime should equal (new Date(1410007402801L))
record.timestamp should equal (1410007402801L)
record.level should equal ("INFO")
record.message should equal ("Now calculating the number of atoms in the universe")
}
}
|
vyadh/viper
|
source-log/src/test/scala/viper/source/log/jul/xml/JULXMLConsumerTest.scala
|
Scala
|
apache-2.0
| 2,153
|
package com.example
import akka.NotUsed
import com.lightbend.lagom.scaladsl.api._
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import scala.concurrent.Future
trait A extends Service {
def hello(name: String): ServiceCall[NotUsed, String]
override def descriptor = {
import Service._
named("a").withCalls(
pathCall("/hello/:name", hello _)
).withAutoAcl(true)
}
}
class AImpl extends A {
override def hello(name: String) = ServiceCall { _ =>
Future.successful(s"Hello $name")
}
}
abstract class AApplication(context: LagomApplicationContext)
extends LagomApplication(context) with AhcWSComponents {
override def lagomServer = serverFor[A](new AImpl)
}
class ALoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new AApplication(context) {
override def serviceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new AApplication(context) with LagomDevModeComponents
}
|
rstento/lagom
|
dev/sbt-plugin/src/sbt-test/sbt-plugin/external-project-scaladsl/a/src/main/scala/com/example/A.scala
|
Scala
|
apache-2.0
| 1,231
|
package monocle.std
import monocle.MonocleSuite
import monocle.law.discipline.function.{Cons1Tests, EachTests, IndexTests}
import scalaz.OneAnd
class OneAndSpec extends MonocleSuite {
checkAll("each OneAnd", EachTests[OneAnd[List, Int], Int])
checkAll("index OneAnd", IndexTests[OneAnd[List, Int], Int, Int])
checkAll("cons1 OneAnd", Cons1Tests[OneAnd[List, Int], Int, List[Int]])
}
|
rperry/Monocle
|
test/shared/src/test/scala/monocle/std/OneAndSpec.scala
|
Scala
|
mit
| 392
|
/**
* Created by Romain Reuillon on 22/09/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.openmole.core
import java.util.zip.GZIPInputStream
import org.openmole.tool.file._
import org.openmole.tool.tar._
import gridscale.http
import org.openmole.core.context._
import org.openmole.core.expansion._
import org.openmole.core.fileservice.FileService
import org.openmole.core.preference.Preference
import org.openmole.core.workspace._
import org.openmole.tool.random._
import org.openmole.tool.tar.TarInputStream
import java.io.IOException
import org.openmole.core.exception.InternalProcessingError
package object market {
import org.json4s._
import org.json4s.jackson.Serialization
implicit val formats = Serialization.formats(NoTypeHints)
def indexURL(implicit preference: Preference, randomProvider: RandomProvider, newFile: TmpDirectory, fileService: FileService) =
ExpandedString(preference(MarketIndex.marketIndexLocation)).from(Context("version" → buildinfo.version))
def marketIndex(implicit preference: Preference, randomProvider: RandomProvider, newFile: TmpDirectory, fileService: FileService) =
Serialization.read[MarketIndex](http.get(indexURL))
def downloadEntry(entry: MarketIndexEntry, path: File) = try {
http.getStream(entry.url) { is ⇒
val tis = new TarInputStream(new GZIPInputStream(is))
try tis.extract(path)
finally tis.close
path.applyRecursive(_.setExecutable(true))
}
}
catch {
case e: IOException ⇒ throw new InternalProcessingError(s"Cannot download entry at url ${entry.url}", e)
}
}
|
openmole/openmole
|
openmole/core/org.openmole.core.market/src/main/scala/org/openmole/core/market/package.scala
|
Scala
|
agpl-3.0
| 2,218
|
package org.ensime.test
import org.scalatest.Spec
import org.scalatest.matchers.ShouldMatchers
import util.Helpers._
class PackageInspectionSpec extends Spec with ShouldMatchers{
describe("Package Info") {
it("should get a package description that includes the member 'Vector'") {
withPresCompiler{ cc =>
val src = srcFile("Test1.scala", contents(
"import java.util.Vector",
"object Test1{",
"def main{",
"val dude = 1",
"}",
"}"
))
cc.askReloadAndTypeFiles(List(src))
val info = cc.askPackageByPath("java.util")
info.members.exists(m => m.name == "Vector") should be(true)
info.members.exists(m => m.name == "List") should be(true)
}
}
}
}
|
bbatsov/ensime
|
src/test/scala/org/ensime/test/PackageInspectionSpec.scala
|
Scala
|
gpl-3.0
| 713
|
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.stubs.impl
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement}
import com.intellij.util.SofterReference
import com.intellij.util.io.StringRef
import org.argus.cit.intellij.jawa.lang.psi.JawaMethodDeclaration
import org.argus.cit.intellij.jawa.lang.psi.stubs.JawaMethodStub
import org.argus.jawa.core.Signature
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
class JawaMethodStubImpl[ParentPsi <: PsiElement](parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement])
extends StubBaseWrapper[JawaMethodDeclaration](parent, elemType) with JawaMethodStub {
private var name: StringRef = _
private var signatureText: StringRef = _
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
name: String, signatureText: String) = {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
this.name = StringRef.fromString(name)
this.signatureText = StringRef.fromString(signatureText)
}
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
name: StringRef, signatureText: StringRef) = {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
this.name = name
this.signatureText = signatureText
}
override def getName: String = StringRef.toString(name)
override def isDeclaration: Boolean = true
override def getSignature: Signature = new Signature(getSignatureText)
override def getSignatureText: String = StringRef.toString(signatureText)
}
|
arguslab/argus-cit-intellij
|
src/main/scala/org/argus/cit/intellij/jawa/lang/psi/stubs/impl/JawaMethodStubImpl.scala
|
Scala
|
epl-1.0
| 2,223
|
/*
* Stratio Meta
*
* Copyright (c) 2014, Stratio, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library.
*/
package com.stratio.meta.driver
import akka.actor.{ ActorSelection, ActorSystem}
import com.stratio.meta.driver.config.{DriverSectionConfig, ServerSectionConfig, BasicDriverConfig, DriverConfig}
import akka.contrib.pattern.ClusterClient
import com.stratio.meta.driver.actor.ProxyActor
import com.stratio.meta.common.result._
import com.stratio.meta.common.ask.APICommand
import org.apache.log4j.Logger
import scala.concurrent.duration._
import java.util.UUID
import akka.pattern.ask
import com.stratio.meta.driver.result.SyncResultHandler
import com.stratio.meta.common.exceptions._
import com.stratio.meta.common.ask.Connect
import com.stratio.meta.common.ask.Command
import com.stratio.meta.common.ask.Query
import com.stratio.meta.communication.Disconnect
import com.stratio.meta.driver.utils.RetryPolitics
object BasicDriver extends DriverConfig {
/**
* Class logger.
*/
override lazy val logger = Logger.getLogger(getClass)
def getBasicDriverConfigFromFile ={
logger.debug("RetryTimes --> " + retryTimes)
logger.debug("RetryDuration --> " + retryDuration.duration.toMillis.toString)
logger.debug("ClusterName --> " + clusterName)
logger.debug("ClusterName --> " + clusterActor)
logger.debug("ClusterHosts --> " + clusterHosts.map(_.toString).toArray.toString)
new BasicDriverConfig(new DriverSectionConfig(retryTimes, retryDuration.duration.toMillis),
new ServerSectionConfig(clusterName, clusterActor, clusterHosts.map(_.toString).toArray))
}
}
class BasicDriver(basicDriverConfig: BasicDriverConfig) {
lazy val logger= BasicDriver.logger
lazy val queries: java.util.Map[String, IResultHandler] = new java.util.HashMap[String, IResultHandler]
lazy val system = ActorSystem("MetaDriverSystem",BasicDriver.config)
//For Futures
implicit val context = system.dispatcher
lazy val initialContacts: Set[ActorSelection] = contactPoints.map(contact=> system.actorSelection(contact)).toSet
lazy val clusterClientActor = system.actorOf(ClusterClient.props(initialContacts),"remote-client")
lazy val proxyActor = system.actorOf(ProxyActor.props(clusterClientActor,basicDriverConfig.serverSection.clusterActor, this), "proxy-actor")
lazy val retryPolitics: RetryPolitics = {
new RetryPolitics(basicDriverConfig.driverSection.retryTimes, basicDriverConfig.driverSection.retryDuration.millis)
}
lazy val contactPoints: List[String]= {
basicDriverConfig.serverSection.clusterHosts.toList.map(host=>"akka.tcp://" + basicDriverConfig.serverSection.clusterName + "@" + host + "/user/receptionist")
}
var userId: String = null
def this() {
this(BasicDriver.getBasicDriverConfigFromFile)
}
/**
* Release connection to MetaServer.
* @param user Login to the user (Audit only)
* @return ConnectResult
*/
@throws(classOf[ConnectionException])
def connect(user:String): Result = {
logger.info("Establishing connection with user: " + user + " to " + contactPoints)
val result = retryPolitics.askRetry(proxyActor,new Connect(user),5 second)
result match {
case errorResult: ErrorResult => {
throw new ConnectionException(errorResult.getErrorMessage)
}
case connectResult: ConnectResult => {
userId = connectResult.getSessionId
result
}
}
}
/**
* Finnish connection to MetaServer.
*/
@throws(classOf[ConnectionException])
def disconnect(): Unit = {
logger.info("Disconnecting user: " + userId + " to " + contactPoints)
val result = retryPolitics.askRetry(proxyActor, new Disconnect(userId), 5 second, retry = 1)
result match {
case errorResult: ErrorResult => {
throw new ConnectionException(errorResult.getErrorMessage)
}
case connectResult: DisconnectResult => {
userId = null
}
}
}
/**
* Execute a query in the Meta server asynchronously.
* @param user The user login.
* @param targetCatalog The target catalog.
* @param query The query.
* @param callback The callback object.
* @deprecated As of release 0.0.5, replaced by asyncExecuteQuery(targetCatalog, query, callback)}
*/
@deprecated(message = "You should use asyncExecuteQuery(targetCatalog, query, callback)", since = "0.0.5")
@throws(classOf[ConnectionException])
def asyncExecuteQuery(user:String, targetCatalog: String, query: String, callback: IResultHandler) : String = {
logger.warn("You use a deprecated method. User parameter (" + user + ") will be ignored")
asyncExecuteQuery(targetCatalog, query, callback)
}
/**
* Execute a query in the Meta server asynchronously.
* @param targetCatalog The target catalog.
* @param query The query.
* @param callback The callback object.
*/
@throws(classOf[ConnectionException])
def asyncExecuteQuery(targetCatalog: String, query: String, callback: IResultHandler) : String = {
if(userId==null){
throw new ConnectionException("You must connect to cluster")
}
val queryId = UUID.randomUUID()
queries.put(queryId.toString, callback)
sendQuery(new Query(queryId.toString, targetCatalog, query, userId))
queryId.toString
}
/**
* Launch query in Meta Server
* @param user Login the user (Audit only)
* @param targetKs Target keyspace
* @param query Launched query
* @return QueryResult
* @deprecated As of release 0.0.5, replaced by asyncExecuteQuery(targetCatalog, query, callback)}
*/
@throws(classOf[ConnectionException])
@throws(classOf[ParsingException])
@throws(classOf[ValidationException])
@throws(classOf[ExecutionException])
@throws(classOf[UnsupportedException])
@deprecated(message = "You should use executeQuery(targetKs, query)", since = "0.0.5")
def executeQuery(user: String, targetKs: String, query: String): Result = {
logger.warn("You use a deprecated method. User parameter (" + user + ") will be ignored")
executeQuery(targetKs, query)
}
/**
* Launch query in Meta Server
* @param targetKs Target keyspace
* @param query Launched query
* @return QueryResult
*/
@throws(classOf[ConnectionException])
@throws(classOf[ParsingException])
@throws(classOf[ValidationException])
@throws(classOf[ExecutionException])
@throws(classOf[UnsupportedException])
def executeQuery(targetKs: String, query: String): Result = {
if(userId==null){
throw new ConnectionException("You must connect to cluster")
}
val queryId = UUID.randomUUID()
val callback = new SyncResultHandler
queries.put(queryId.toString, callback)
sendQuery(new Query(queryId.toString, targetKs, query, userId))
val r = callback.waitForResult()
queries.remove(queryId.toString)
r
}
/**
* List the existing catalogs in the underlying database.
* @return A MetadataResult with a list of catalogs, or the object with hasError set
* containing the error message.
*/
def listCatalogs(): MetadataResult = {
val result = retryPolitics.askRetry(proxyActor, new Command(APICommand.LIST_CATALOGS, null))
result.asInstanceOf[MetadataResult]
}
/**
* List the existing tables in a database catalog.
* @return A MetadataResult with a list of tables, or the object with hasError set
* containing the error message.
*/
def listTables(catalogName: String): MetadataResult = {
val params: java.util.List[String] = new java.util.ArrayList[String]
params.add(catalogName)
val result = retryPolitics.askRetry(proxyActor, new Command(APICommand.LIST_TABLES, params))
result.asInstanceOf[MetadataResult]
}
/**
* List the existing tables in a database catalog.
* @return A MetadataResult with a map of columns.
*/
def listFields(catalogName: String, tableName: String): MetadataResult = {
val params: java.util.List[String] = new java.util.ArrayList[String]
params.add(catalogName)
params.add(tableName)
val result = retryPolitics.askRetry(proxyActor, new Command(APICommand.LIST_COLUMNS, params))
result.asInstanceOf[MetadataResult]
}
def sendQuery(message: AnyRef){
proxyActor.ask(message)(5 second)
}
/**
* Get the IResultHandler associated with a query identifier.
* @param queryId Query identifier.
* @return The result handler.
*/
def getResultHandler(queryId: String): IResultHandler = {
queries.get(queryId)
}
/**
* Remove a result handler from the internal map of callbacks.
* @param queryId The query identifier associated with the callback.
* @return Whether the callback has been removed.
*/
def removeResultHandler(queryId: String) : Boolean = {
queries.remove(queryId) != null
}
/**
* Shutdown actor system
*/
def close() {
system.shutdown()
}
}
|
dhiguero/stratio-meta
|
meta-driver/src/main/scala/com/stratio/meta/driver/BasicDriver.scala
|
Scala
|
gpl-3.0
| 9,481
|
/*
* This is free and unencumbered software released into the public domain.
*
* Anyone is free to copy, modify, publish, use, compile, sell, or
* distribute this software, either in source code form or as a compiled
* binary, for any purpose, commercial or non-commercial, and by any
* means.
*
* In jurisdictions that recognize copyright laws, the author or authors
* of this software dedicate any and all copyright interest in the
* software to the public domain. We make this dedication for the benefit
* of the public at large and to the detriment of our heirs and
* successors. We intend this dedication to be an overt act of
* relinquishment in perpetuity of all present and future rights to this
* software under copyright law.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* For more information, please refer to <http://unlicense.org/>
*/
package net.adamcin.vltpack.mojo
import java.io.File
import net.adamcin.vltpack.PutsBundles
import org.apache.maven.plugins.annotations.{LifecyclePhase, Mojo, Parameter}
/**
* PUT a bundle identified by the file parameter to the configured CQ instance
* @since 0.6.0
* @author Mark Adamcin
*/
@Mojo(name = "put-bundle-file",
defaultPhase = LifecyclePhase.PRE_INTEGRATION_TEST,
requiresProject = false,
threadSafe = true)
class PutBundleFileMojo
extends BaseMojo
with PutsBundles {
/**
* Specify a bundle file to be PUT
*/
@Parameter(property = "file", required = true)
val file: File = null
override def execute() {
putBundle(file) match {
case Left(t) => throw t
case Right(messages) => messages.foreach { getLog.info(_) }
}
}
}
|
adamcin/vltpack-maven-plugin
|
src/main/scala/net/adamcin/vltpack/mojo/PutBundleFileMojo.scala
|
Scala
|
unlicense
| 2,075
|
package spark.metrics.source
import com.codahale.metrics.MetricRegistry
trait Source {
def sourceName: String
def metricRegistry: MetricRegistry
}
|
vax11780/spark
|
core/src/main/scala/spark/metrics/source/Source.scala
|
Scala
|
apache-2.0
| 153
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.index.TemporalIndexCheck
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TemporalIndexCheckTest extends Specification {
// setup the basic types
def noDTGType = SimpleFeatureTypes.createType("noDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point")
def oneDTGType = SimpleFeatureTypes.createType("oneDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point,dtg:Date")
def twoDTGType = SimpleFeatureTypes.createType("twoDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point,dtg:Date,dtg_end_time:Date")
val DEFAULT_DATE_KEY = org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.Configs.DefaultDtgField
def copy(sft: SimpleFeatureType) = {
val b = new SimpleFeatureTypeBuilder()
b.init(sft)
b.buildFeatureType()
}
"TemporalIndexCheck" should {
"detect no valid DTG" in {
val testType = copy(noDTGType)
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beNone
}
"detect no valid DTG even if DEFAULT_DATE_KEY is set incorrectly" in {
val testType = copy(noDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beNone
}
"detect a valid DTG if DEFAULT_DATE_KEY is not set" in {
val testType = copy(oneDTGType)
testType.getUserData.remove(DEFAULT_DATE_KEY)
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"detect a valid DTG if DEFAULT_DATE_KEY is not properly set" in {
val testType = copy(oneDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "no_such_dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"present no DTG candidate if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"detect valid DTG candidates and select the first if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(twoDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "no_such_dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"present no DTG candidate if DEFAULT_DATE_KEY is set properly and there are multiple Date attributes" in {
val testType = copy(twoDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
}
"getDTGFieldName" should {
"return a dtg field name if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
testType.getDtgField must beSome("dtg")
}
"not return a dtg field name if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(noDTGType)
testType.setDtgField("dtg") must throwAn[IllegalArgumentException]
testType.getDtgField must beNone
}
}
"getDTGDescriptor" should {
"return a dtg attribute descriptor if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
testType.getDtgIndex.map(testType.getDescriptor) must beSome(oneDTGType.getDescriptor("dtg"))
}
"not return a dtg attribute descriptor if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(noDTGType)
testType.setDtgField("dtg") must throwAn[IllegalArgumentException]
testType.getDtgIndex.map(testType.getDescriptor) must beNone
}
}
}
|
aheyne/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/TemporalIndexCheckTest.scala
|
Scala
|
apache-2.0
| 4,580
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.plugins.transformer
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.FlatSpec
import de.fuberlin.wiwiss.silk.plugins.Plugins
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import de.fuberlin.wiwiss.silk.plugins.transformer.replace.ReplaceTransformer
@RunWith(classOf[JUnitRunner])
class ReplaceTransformerTest extends FlatSpec with ShouldMatchers {
Plugins.register()
val transformer = new ReplaceTransformer(search = " ", replace = "")
"ReplaceTransformer" should "return 'abc'" in {
transformer.evaluate("a b c") should equal("abc")
}
val transformer1 = new ReplaceTransformer(search = "abc", replace = "")
"ReplaceTransformer" should "return 'def'" in {
transformer1.evaluate("abcdef") should equal("def")
}
}
|
fusepoolP3/p3-silk
|
silk-core/src/test/scala/de/fuberlin/wiwiss/silk/plugins/transformer/ReplaceTransformerTest.scala
|
Scala
|
apache-2.0
| 1,378
|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import java.io.File
import sbt.Def.{ ScopedKey, Setting }
import sbt.Keys._
import sbt.SlashSyntax0._
import sbt.internal.Act
import sbt.internal.CommandStrings._
import sbt.internal.inc.ScalaInstance
import sbt.internal.util.AttributeKey
import sbt.internal.util.complete.DefaultParsers._
import sbt.internal.util.complete.{ DefaultParsers, Parser }
import sbt.io.IO
import sbt.librarymanagement.CrossVersion
/**
* Cross implements the Scala cross building commands:
* + ("cross") command and ++ ("switch") command.
*/
object Cross {
private[sbt] def spacedFirst(name: String) = opOrIDSpaced(name) ~ any.+
private case class Switch(version: ScalaVersion, verbose: Boolean, command: Option[String])
private trait ScalaVersion {
def force: Boolean
}
private case class NamedScalaVersion(name: String, force: Boolean) extends ScalaVersion
private case class ScalaHomeVersion(home: File, resolveVersion: Option[String], force: Boolean)
extends ScalaVersion
private def switchParser(state: State): Parser[Switch] = {
import DefaultParsers._
def versionAndCommand(spacePresent: Boolean) = {
val x = Project.extract(state)
import x._
val knownVersions = crossVersions(x, currentRef)
val version = token(StringBasic.examples(knownVersions: _*)).map { arg =>
val force = arg.endsWith("!")
val versionArg = if (force) arg.dropRight(1) else arg
versionArg.split("=", 2) match {
case Array(home) if new File(home).exists() =>
ScalaHomeVersion(new File(home), None, force)
case Array(v) => NamedScalaVersion(v, force)
case Array(v, home) =>
ScalaHomeVersion(new File(home), Some(v).filterNot(_.isEmpty), force)
}
}
val spacedVersion = if (spacePresent) version else version & spacedFirst(SwitchCommand)
val verboseOpt = Parser.opt(token(Space ~> "-v"))
val optionalCommand = Parser.opt(token(Space ~> matched(state.combinedParser)))
val switch1 = (token(Space ~> "-v") ~> (Space ~> version) ~ optionalCommand) map {
case v ~ command =>
Switch(v, true, command)
}
val switch2 = (spacedVersion ~ verboseOpt ~ optionalCommand) map {
case v ~ verbose ~ command =>
Switch(v, verbose.isDefined, command)
}
switch1 | switch2
}
token(SwitchCommand ~> OptSpace) flatMap { sp =>
versionAndCommand(sp.nonEmpty)
}
}
private case class CrossArgs(command: String, verbose: Boolean)
private def crossParser(state: State): Parser[CrossArgs] =
token(CrossCommand <~ OptSpace) flatMap { _ =>
(token(Parser.opt("-v" <~ Space)) ~ token(matched(state.combinedParser))).map {
case (verbose, command) => CrossArgs(command, verbose.isDefined)
}
}
private def crossRestoreSessionParser: Parser[String] = token(CrossRestoreSessionCommand)
private[sbt] def requireSession[T](p: State => Parser[T]): State => Parser[T] =
s => if (s get sessionSettings isEmpty) failure("No project loaded") else p(s)
private def resolveAggregates(extracted: Extracted): Seq[ProjectRef] = {
def findAggregates(project: ProjectRef): Seq[ProjectRef] = {
project :: (extracted.structure
.allProjects(project.build)
.find(_.id == project.project) match {
case Some(resolved) => resolved.aggregate.toList.flatMap(findAggregates)
case None => Nil
})
}
(extracted.currentRef +: extracted.currentProject.aggregate.flatMap(findAggregates)).distinct
}
private def crossVersions(extracted: Extracted, proj: ResolvedReference): Seq[String] = {
import extracted._
((proj / crossScalaVersions) get structure.data) getOrElse {
// reading scalaVersion is a one-time deal
((proj / scalaVersion) get structure.data).toSeq
}
}
/**
* Parse the given command into a list of aggregate projects and command to issue.
*/
private[sbt] def parseSlashCommand(
extracted: Extracted
)(command: String): (Seq[ProjectRef], String) = {
import extracted._
import DefaultParsers._
val parser = (OpOrID <~ charClass(_ == '/', "/")) ~ any.* map {
case seg1 ~ cmd => (seg1, cmd.mkString)
}
Parser.parse(command, parser) match {
case Right((seg1, cmd)) =>
structure.allProjectRefs.find(_.project == seg1) match {
case Some(proj) => (Seq(proj), cmd)
case _ => (resolveAggregates(extracted), command)
}
case _ => (resolveAggregates(extracted), command)
}
}
def crossBuild: Command =
Command.arb(requireSession(crossParser), crossHelp)(crossBuildCommandImpl)
private def crossBuildCommandImpl(state: State, args: CrossArgs): State = {
val extracted = Project.extract(state)
val parser = Act.aggregatedKeyParser(extracted) ~ matched(any.*)
val verbose = if (args.verbose) "-v" else ""
val allCommands = Parser.parse(args.command, parser) match {
case Left(_) =>
val (aggs, aggCommand) = parseSlashCommand(extracted)(args.command)
val projCrossVersions = aggs map { proj =>
proj -> crossVersions(extracted, proj)
}
// It's definitely not a task, check if it's a valid command, because we don't want to emit the warning
// message below for typos.
val validCommand = Parser.parse(aggCommand, state.combinedParser).isRight
val distinctCrossConfigs = projCrossVersions.map(_._2.toSet).distinct
if (validCommand && distinctCrossConfigs.size > 1) {
state.log.warn(
"Issuing a cross building command, but not all sub projects have the same cross build " +
"configuration. This could result in subprojects cross building against Scala versions that they are " +
"not compatible with. Try issuing cross building command with tasks instead, since sbt will be able " +
"to ensure that cross building is only done using configured project and Scala version combinations " +
"that are configured."
)
state.log.debug("Scala versions configuration is:")
projCrossVersions.foreach {
case (project, versions) => state.log.debug(s"$project: $versions")
}
}
// Execute using a blanket switch
projCrossVersions.toMap.apply(extracted.currentRef).flatMap { version =>
// Force scala version
Seq(s"$SwitchCommand $verbose $version!", aggCommand)
}
case Right((keys, taskArgs)) =>
def project(key: ScopedKey[_]): Option[ProjectRef] = key.scope.project.toOption match {
case Some(p: ProjectRef) => Some(p)
case _ => None
}
val fullArgs = if (taskArgs.trim.isEmpty) "" else s" ${taskArgs.trim}"
val keysByVersion = keys
.flatMap { k =>
project(k).toSeq.flatMap(crossVersions(extracted, _).map(v => v -> k))
}
.groupBy(_._1)
.mapValues(_.map(_._2).toSet)
val commandsByVersion = keysByVersion.toSeq
.flatMap {
case (v, keys) =>
val projects = keys.flatMap(project)
keys.toSeq.flatMap { k =>
project(k).filter(projects.contains).flatMap { p =>
if (p == extracted.currentRef || !projects.contains(extracted.currentRef)) {
val parts = project(k).map(_.project) ++ k.scope.config.toOption.map {
case ConfigKey(n) => n.head.toUpper + n.tail
} ++ k.scope.task.toOption.map(_.label) ++ Some(k.key.label)
Some(v -> parts.mkString("", "/", fullArgs))
} else None
}
}
}
.groupBy(_._1)
.mapValues(_.map(_._2))
.toSeq
.sortBy(_._1)
commandsByVersion.flatMap {
case (v, commands) =>
commands match {
case Seq(c) => Seq(s"$SwitchCommand $verbose $v $c")
case Seq() => Nil // should be unreachable
case multi if fullArgs.isEmpty =>
Seq(s"$SwitchCommand $verbose $v all ${multi.mkString(" ")}")
case multi => Seq(s"$SwitchCommand $verbose $v") ++ multi
}
}
}
allCommands.toList ::: CrossRestoreSessionCommand :: captureCurrentSession(state, extracted)
}
def crossRestoreSession: Command =
Command.arb(_ => crossRestoreSessionParser, crossRestoreSessionHelp)(
(s, _) => crossRestoreSessionImpl(s)
)
private def crossRestoreSessionImpl(state: State): State = {
restoreCapturedSession(state, Project.extract(state))
}
private val CapturedSession = AttributeKey[Seq[Setting[_]]]("crossCapturedSession")
private def captureCurrentSession(state: State, extracted: Extracted): State = {
state.put(CapturedSession, extracted.session.rawAppend)
}
private def restoreCapturedSession(state: State, extracted: Extracted): State = {
state.get(CapturedSession) match {
case Some(rawAppend) =>
val restoredSession = extracted.session.copy(rawAppend = rawAppend)
BuiltinCommands
.reapply(restoredSession, extracted.structure, state)
.remove(CapturedSession)
case None => state
}
}
def switchVersion: Command =
Command.arb(requireSession(switchParser), switchHelp)(switchCommandImpl)
private def switchCommandImpl(state: State, args: Switch): State = {
val (switchedState, affectedRefs) = switchScalaVersion(args, state)
val strictCmd =
if (args.version.force) {
// The Scala version was forced on the whole build, run as is
args.command
} else
args.command.map { rawCmd =>
// for now, treat `all` command specially
if (rawCmd.startsWith("all ")) rawCmd
else {
val (aggs, aggCommand) = parseSlashCommand(Project.extract(state))(rawCmd)
aggs
.intersect(affectedRefs)
.map({ case ProjectRef(_, proj) => s"$proj/$aggCommand" })
.mkString("all ", " ", "")
}
}
strictCmd.toList ::: switchedState
}
private def switchScalaVersion(switch: Switch, state: State): (State, Seq[ResolvedReference]) = {
val extracted = Project.extract(state)
import extracted._
type ScalaVersion = String
val (version, instance) = switch.version match {
case ScalaHomeVersion(homePath, resolveVersion, _) =>
val home = IO.resolve(extracted.currentProject.base, homePath)
if (home.exists()) {
val instance = ScalaInstance(home)(state.classLoaderCache.apply _)
val version = resolveVersion.getOrElse(instance.actualVersion)
(version, Some((home, instance)))
} else {
sys.error(s"Scala home directory did not exist: $home")
}
case NamedScalaVersion(v, _) => (v, None)
}
def logSwitchInfo(
included: Seq[(ProjectRef, Seq[ScalaVersion])],
excluded: Seq[(ProjectRef, Seq[ScalaVersion])]
) = {
instance.foreach {
case (home, instance) =>
state.log.info(s"Using Scala home $home with actual version ${instance.actualVersion}")
}
if (switch.version.force) {
state.log.info(s"Forcing Scala version to $version on all projects.")
} else {
state.log.info(s"Setting Scala version to $version on ${included.size} projects.")
}
if (excluded.nonEmpty && !switch.verbose) {
state.log.info(s"Excluded ${excluded.size} projects, run ++ $version -v for more details.")
}
def detailedLog(msg: => String) =
if (switch.verbose) state.log.info(msg) else state.log.debug(msg)
def logProject: (ProjectRef, Seq[ScalaVersion]) => Unit = (proj, scalaVersions) => {
val current = if (proj == currentRef) "*" else " "
detailedLog(s" $current ${proj.project} ${scalaVersions.mkString("(", ", ", ")")}")
}
detailedLog("Switching Scala version on:")
included.foreach(logProject.tupled)
detailedLog("Excluding projects:")
excluded.foreach(logProject.tupled)
}
val projects: Seq[(ResolvedReference, Seq[ScalaVersion])] = {
val projectScalaVersions =
structure.allProjectRefs.map(proj => proj -> crossVersions(extracted, proj))
if (switch.version.force) {
logSwitchInfo(projectScalaVersions, Nil)
projectScalaVersions ++ structure.units.keys
.map(BuildRef.apply)
.map(proj => proj -> crossVersions(extracted, proj))
} else {
val binaryVersion = CrossVersion.binaryScalaVersion(version)
val (included, excluded) = projectScalaVersions.partition {
case (_, scalaVersions) =>
scalaVersions.exists(v => CrossVersion.binaryScalaVersion(v) == binaryVersion)
}
if (included.isEmpty) {
sys.error(
s"""Switch failed: no subprojects list "$version" (or compatible version) in crossScalaVersions setting.
|If you want to force it regardless, call ++ $version!""".stripMargin
)
}
logSwitchInfo(included, excluded)
included
}
}
(setScalaVersionForProjects(version, instance, projects, state, extracted), projects.map(_._1))
}
private def setScalaVersionForProjects(
version: String,
instance: Option[(File, ScalaInstance)],
projects: Seq[(ResolvedReference, Seq[String])],
state: State,
extracted: Extracted
): State = {
import extracted._
val newSettings = projects.flatMap {
case (project, scalaVersions) =>
val scope = Scope(Select(project), Zero, Zero, Zero)
instance match {
case Some((home, inst)) =>
Seq(
scope / scalaVersion := version,
scope / crossScalaVersions := scalaVersions,
scope / scalaHome := Some(home),
scope / scalaInstance := inst
)
case None =>
Seq(
scope / scalaVersion := version,
scope / crossScalaVersions := scalaVersions,
scope / scalaHome := None
)
}
}
val filterKeys: Set[AttributeKey[_]] = Set(scalaVersion, scalaHome, scalaInstance).map(_.key)
val projectsContains: Reference => Boolean = projects.map(_._1).toSet.contains
// Filter out any old scala version settings that were added, this is just for hygiene.
val filteredRawAppend = session.rawAppend.filter(_.key match {
case ScopedKey(Scope(Select(ref), Zero, Zero, Zero), key)
if filterKeys.contains(key) && projectsContains(ref) =>
false
case _ => true
})
val newSession = session.copy(rawAppend = filteredRawAppend ++ newSettings)
BuiltinCommands.reapply(newSession, structure, state)
}
}
|
xuwei-k/xsbt
|
main/src/main/scala/sbt/Cross.scala
|
Scala
|
apache-2.0
| 15,125
|
package model
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by salim on 16/09/2016.
*/
class PlayerSpec extends FlatSpec with Matchers {
"Player" should "be able to add pokemon" in {
val game = Game.boot()
val player: Player = game.spawnPlayer.named("Pootle")
val pikachu = Pokemon.spawn(game.pokedex, 25)
assert(player.getPokemon == Nil)
player.addPokemon(pikachu)
assert(player.getPokemon == List(pikachu))
}
it should "be able to add no more than 6" in {
val game = Game.boot()
val player: Player = game.spawnPlayer.named("Pootle")
player.addPokemon(Pokemon.spawn(game.pokedex, 1))
player.addPokemon(Pokemon.spawn(game.pokedex, 2))
player.addPokemon(Pokemon.spawn(game.pokedex, 3))
player.addPokemon(Pokemon.spawn(game.pokedex, 4))
player.addPokemon(Pokemon.spawn(game.pokedex, 5))
player.addPokemon(Pokemon.spawn(game.pokedex, 6))
intercept[PokemonLimitReached](
player.addPokemon(Pokemon.spawn(game.pokedex, 7))
)
}
it should "be able to battle if there is at least one battleable pokemon" in {
val game = Game.boot()
val player: Player = game.spawnPlayer.named("Pootle")
assert(!player.hasBattleablePokemon)
player.addPokemon(Pokemon.spawn(game.pokedex, 1))
assert(player.hasBattleablePokemon)
}
it should "know if you have at least one battleable pokemon" in {
val game = Game.boot()
val player: Player = game.spawnPlayer.named("Pootle")
player.addPokemon(Pokemon.spawn(game.pokedex, 1))
player.addPokemon(Pokemon.spawn(game.pokedex, 2))
player.addPokemon(Pokemon.spawn(game.pokedex, 3))
}
}
|
salimfadhley/scalamoo
|
src/test/scala/model/PlayerSpec.scala
|
Scala
|
mit
| 1,655
|
package tastytest
object DelayedChained {
sealed trait Root
object Internal {
sealed trait Branch extends Root
object Deeper {
final class LeafA extends Branch
final class LeafB extends Branch
}
}
}
|
scala/scala
|
test/tasty/pos/src-3/tastytest/DelayedChained.scala
|
Scala
|
apache-2.0
| 236
|
package teststate.core
import teststate.core.Types.CheckShapeA
import teststate.data.Sack
import teststate.typeclass.PolyComposable
import teststate.typeclass.PolyComposable._
/**
* P = Point
* @ = Around
* I = Invariant
* C = Check
* A = Action
*
* P & P = P
* @ & @ = @
* I & I = I
* C & C = I
*/
object CoreComposition {
trait P0 {
implicit def checksPolyComposable[C[-_, _], D[-_, _], A, E](implicit
c: ToInvariants[CheckShapeA, C],
d: ToInvariants[CheckShapeA, D],
i: PolyComposable.Mono[AndOp, CheckShapeA[Invariant, A, E]])
: PolyComposable[AndOp, CheckShapeA[C, A, E], CheckShapeA[D, A, E], CheckShapeA[Invariant, A, E]] =
PolyComposable((fc, fd) => i.compose(c toInvariants fc, d toInvariants fd))
}
trait Implicits extends P0 {
implicit def checksMonoComposable[C[-_, _], A, E]: PolyComposable.Mono[AndOp, CheckShapeA[C, A, E]] =
PolyComposable(Sack.append)
implicit def checksCanAnd[C[-_, _], A, B]: Can[AndOp, CheckShapeA[C, A, B]] = Can
}
}
|
japgolly/test-state
|
core/shared/src/main/scala/teststate/core/CoreComposition.scala
|
Scala
|
apache-2.0
| 1,215
|
package com.github.zarkus13.slickgenerator.sbt
import com.etsy.sbt.CompileQuick._
import sbt._
import sbt.Keys._
/**
* Created by Alexis on 22/03/2014.
*/
object SlickGenerationSbtPlugin extends Plugin {
lazy val generatorPath = settingKey[String]("Path to the object containing a main method and generating the slick classes")
lazy val gentables = taskKey[Unit]("Generate slick files from tables.")
lazy val gentablesTask = (
runner in Compile,
compilers in Compile,
scalaSource in Compile,
classDirectory in Compile,
scalacOptions in Compile,
compileInputs in (Compile, compile),
generatorPath,
dependencyClasspath in Compile,
streams
) map {
(runner, compilers, scalaSource, classDirectory, scalacOptions, compileInputs, path, classpath, streams) => {
val classDirFile = classDirectory.getAbsoluteFile
if(!classDirFile.exists() || !classDirFile.isDirectory) {
classDirFile.delete()
classDirFile.mkdir()
}
compilers.scalac(
Seq(file(scalaSource.absolutePath + "/" + path.replaceAll("\\.", "/") + ".scala")),
noChanges,
classpath.files,
classDirectory,
scalacOptions,
noopCallback,
1000,
compileInputs.incSetup.cache,
streams.log
)
val realClasspath = classpath.files :+ classDirectory.getAbsoluteFile
toError(runner.run(path, realClasspath, Nil, streams.log))
}
}
lazy val settingsSlick = Seq(
generatorPath := "",
gentables <<= gentablesTask
)
}
|
Zarkus13/Slick-Generator
|
slick-generation-sbt-plugin/src/main/scala/com/github/zarkus13/slickgenerator/sbt/SlickGenerationSbtPlugin.scala
|
Scala
|
mit
| 1,569
|
package euler
package til70
import euler._
import contfract._
object Euler66 extends EulerProblem {
override def result = {
val res = (2 to 1000) filterNot isPerfectSquare map { d => (d, solveDiophantine(d).get._1) }
res.maxBy(_._2)._1
}
// The fundamental solution is the first convergent that is also a root for the equation
// http://en.wikipedia.org/wiki/Pell%27s_equation
def solveDiophantine(n: Int) = {
val convIt = new ConvergentsIterator(Iterator.continually(1), new SqrContinuedFractionIterator(n))
convIt find { case (x, y) => x * x - n * y * y == 1 }
}
}
|
TrustNoOne/Euler
|
scala/src/main/scala/euler/til70/Euler66.scala
|
Scala
|
mit
| 601
|
package io.getquill.quotation
import io.getquill.ast._
import io.getquill.ast.Implicits._
import collection.immutable.Set
case class State(seen: Set[IdentName], free: Set[IdentName])
case class FreeVariables(state: State)
extends StatefulTransformer[State] {
override def apply(ast: Ast): (Ast, StatefulTransformer[State]) =
ast match {
case ident: Ident if (!state.seen.contains(ident.idName)) =>
(ident, FreeVariables(State(state.seen, state.free + ident.idName)))
case f @ Function(params, body) =>
val (_, t) = FreeVariables(State(state.seen ++ params.map(_.idName), state.free))(body)
(f, FreeVariables(State(state.seen, state.free ++ t.state.free)))
case q @ Foreach(a, b, c) =>
(q, free(a, b, c))
case other =>
super.apply(other)
}
override def apply(o: OptionOperation): (OptionOperation, StatefulTransformer[State]) =
o match {
case q @ OptionTableFlatMap(a, b, c) =>
(q, free(a, b, c))
case q @ OptionTableMap(a, b, c) =>
(q, free(a, b, c))
case q @ OptionTableExists(a, b, c) =>
(q, free(a, b, c))
case q @ OptionTableForall(a, b, c) =>
(q, free(a, b, c))
case q @ OptionFlatMap(a, b, c) =>
(q, free(a, b, c))
case q @ OptionMap(a, b, c) =>
(q, free(a, b, c))
case q @ OptionForall(a, b, c) =>
(q, free(a, b, c))
case q @ OptionExists(a, b, c) =>
(q, free(a, b, c))
case other =>
super.apply(other)
}
override def apply(e: Assignment): (Assignment, StatefulTransformer[State]) =
e match {
case Assignment(a, b, c) =>
val t = FreeVariables(State(state.seen + a.idName, state.free))
val (bt, btt) = t(b)
val (ct, ctt) = t(c)
(Assignment(a, bt, ct), FreeVariables(State(state.seen, state.free ++ btt.state.free ++ ctt.state.free)))
}
override def apply(e: AssignmentDual): (AssignmentDual, StatefulTransformer[State]) =
e match {
case AssignmentDual(a1, a2, b, c) =>
val t = FreeVariables(State(state.seen + a1.idName + a2.idName, state.free))
val (bt, btt) = t(b)
val (ct, ctt) = t(c)
(AssignmentDual(a1, a2, bt, ct), FreeVariables(State(state.seen, state.free ++ btt.state.free ++ ctt.state.free)))
}
override def apply(action: Action): (Action, StatefulTransformer[State]) =
action match {
case q @ Returning(a, b, c) =>
(q, free(a, b, c))
case q @ ReturningGenerated(a, b, c) =>
(q, free(a, b, c))
case other =>
super.apply(other)
}
override def apply(e: OnConflict.Target): (OnConflict.Target, StatefulTransformer[State]) = (e, this)
override def apply(query: Query): (Query, StatefulTransformer[State]) =
query match {
case q @ Filter(a, b, c) => (q, free(a, b, c))
case q @ Map(a, b, c) => (q, free(a, b, c))
case q @ FlatMap(a, b, c) => (q, free(a, b, c))
case q @ ConcatMap(a, b, c) => (q, free(a, b, c))
case q @ SortBy(a, b, c, d) => (q, free(a, b, c))
case q @ GroupBy(a, b, c) => (q, free(a, b, c))
case q @ FlatJoin(t, a, b, c) => (q, free(a, b, c))
case q @ Join(t, a, b, iA, iB, on) =>
val (_, freeA) = apply(a)
val (_, freeB) = apply(b)
val (_, freeOn) = FreeVariables(State(state.seen + iA.idName + iB.idName, Set.empty))(on)
(q, FreeVariables(State(state.seen, state.free ++ freeA.state.free ++ freeB.state.free ++ freeOn.state.free)))
case _: Entity | _: Take | _: Drop | _: Union | _: UnionAll | _: Aggregation | _: Distinct | _: Nested =>
super.apply(query)
}
private def free(a: Ast, ident: Ident, c: Ast): FreeVariables =
free(a, ident.idName, c)
private def free(a: Ast, ident: IdentName, c: Ast) = {
val (_, ta) = apply(a)
val (_, tc) = FreeVariables(State(state.seen + ident, state.free))(c)
FreeVariables(State(state.seen, state.free ++ ta.state.free ++ tc.state.free))
}
}
object FreeVariables {
def apply(ast: Ast): Set[IdentName] =
new FreeVariables(State(Set.empty, Set.empty))(ast) match {
case (_, transformer) =>
transformer.state.free
}
}
|
getquill/quill
|
quill-core-portable/src/main/scala/io/getquill/quotation/FreeVariables.scala
|
Scala
|
apache-2.0
| 4,233
|
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.metamx.tranquility.test.common
import com.fasterxml.jackson.databind.ObjectMapper
import com.github.nscala_time.time.Imports._
import com.google.common.base.Charsets
import com.google.common.io.CharStreams
import com.google.common.io.Files
import com.google.inject.Injector
import com.metamx.collections.spatial.search.RectangularBound
import com.metamx.common.lifecycle.Lifecycle
import com.metamx.common.scala.concurrent._
import com.metamx.common.scala.control._
import com.metamx.common.scala.timekeeper.Timekeeper
import com.metamx.common.scala.untyped._
import com.metamx.common.scala.Jackson
import com.metamx.common.scala.Logging
import io.druid.cli.CliBroker
import io.druid.cli.CliCoordinator
import io.druid.cli.CliOverlord
import io.druid.cli.GuiceRunnable
import io.druid.granularity.QueryGranularities
import io.druid.guice.GuiceInjectors
import io.druid.query.aggregation.AggregatorFactory
import io.druid.query.aggregation.LongSumAggregatorFactory
import io.druid.query.filter.SpatialDimFilter
import io.druid.query.Druids
import io.druid.query.Query
import io.druid.server.ClientQuerySegmentWalker
import java.io.File
import java.io.InputStreamReader
import java.net.BindException
import java.net.URLClassLoader
import java.util.concurrent.atomic.AtomicInteger
import org.apache.curator.framework.CuratorFramework
import org.scalatest.FunSuite
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import scala.reflect.classTag
private object PortGenerator
{
private val port = new AtomicInteger(28000)
// Instead of using random ports with random failures lets use atomic
def reserveNext(count: Int = 1): Int = port.getAndAdd(count)
}
trait DruidIntegrationSuite extends Logging with CuratorRequiringSuite
{
self: FunSuite =>
trait DruidServerHandle
{
def injector: Injector
def close()
}
def writeConfig(configResource: String, replacements: Map[String, String]): File = {
val stream = getClass.getClassLoader.getResourceAsStream(configResource)
val text = CharStreams.toString(new InputStreamReader(stream, Charsets.UTF_8))
val configFile = File.createTempFile("runtime-", ".properties")
Files.write(replacements.foldLeft(text)((a, kv) => a.replace(kv._1, kv._2)), configFile, Charsets.UTF_8)
configFile.deleteOnExit()
configFile
}
def spawnDruidServer[A <: GuiceRunnable : ClassTag](configFile: File): DruidServerHandle = {
val serverClass = classTag[A].runtimeClass
// Make the ForkingTaskRunner work under sbt
System.setProperty("java.class.path", getClass.getClassLoader.asInstanceOf[URLClassLoader].getURLs.mkString(":"))
// Would be better to have a way to easily pass Properties into the startup injector.
System.setProperty("druid.properties.file", configFile.toString)
val (serverInjector, lifecycle) = try {
val startupInjector = GuiceInjectors.makeStartupInjector()
val server: A = startupInjector.getInstance(serverClass).asInstanceOf[A]
server.configure(startupInjector)
val serverInjector = server.makeInjector()
(serverInjector, serverInjector.getInstance(classOf[Lifecycle]))
}
finally {
System.clearProperty("druid.properties.file")
}
lifecycle.start()
log.info("Server started up: %s", serverClass.getName)
val thread = loggingThread {
try {
lifecycle.join()
}
catch {
case e: Throwable =>
log.error(e, "Failed to run server: %s", serverClass.getName)
throw e
}
}
thread.start()
new DruidServerHandle
{
def injector = serverInjector
def close() {
try {
lifecycle.stop()
}
catch {
case e: Throwable =>
log.error(e, "Failed to stop lifecycle for server: %s", serverClass.getName)
}
thread.interrupt()
}
}
}
def withBroker[A](curator: CuratorFramework)(f: DruidServerHandle => A): A = {
// Randomize, but don't bother checking for conflicts
retryOnErrors(ifException[BindException] untilCount 5) {
val brokerPort = PortGenerator.reserveNext()
val configFile = writeConfig(
"druid-broker.properties",
Map(
":DRUIDPORT:" -> brokerPort.toString,
":ZKCONNECT:" -> curator.getZookeeperClient.getCurrentConnectionString
)
)
val handle = spawnDruidServer[CliBroker](configFile)
try {
f(handle)
}
finally {
handle.close()
}
}
}
def withCoordinator[A](curator: CuratorFramework)(f: DruidServerHandle => A): A = {
// Randomize, but don't bother checking for conflicts
retryOnErrors(ifException[BindException] untilCount 5) {
val coordinatorPort = PortGenerator.reserveNext()
val configFile = writeConfig(
"druid-coordinator.properties",
Map(
":DRUIDPORT:" -> coordinatorPort.toString,
":ZKCONNECT:" -> curator.getZookeeperClient.getCurrentConnectionString
)
)
val handle = spawnDruidServer[CliCoordinator](configFile)
try {
f(handle)
}
finally {
handle.close()
}
}
}
def withOverlord[A](curator: CuratorFramework)(f: DruidServerHandle => A): A = {
// Randomize, but don't bother checking for conflicts
retryOnErrors(ifException[BindException] untilCount 5) {
val overlordPort = PortGenerator.reserveNext(2) // We need one more port for :DRUIDFORKPORT:
val configFile = writeConfig(
"druid-overlord.properties",
Map(
":DRUIDPORT:" -> overlordPort.toString,
":DRUIDFORKPORT:" -> (overlordPort + 1).toString,
":ZKCONNECT:" -> curator.getZookeeperClient.getCurrentConnectionString
)
)
val handle = spawnDruidServer[CliOverlord](configFile)
try {
f(handle)
}
finally {
handle.close()
}
}
}
def withDruidStack[A](f: (CuratorFramework, DruidServerHandle, DruidServerHandle, DruidServerHandle) => A): A = {
withLocalCurator {
curator =>
curator.create().forPath("/beams")
withBroker(curator) {
broker =>
withCoordinator(curator) {
coordinator =>
withOverlord(curator) {
overlord =>
f(curator, broker, coordinator, overlord)
}
}
}
}
}
def assertQueryResults(broker: DruidServerHandle, query: Query[_], expected: Seq[Dict]) {
val walker = broker.injector.getInstance(classOf[ClientQuerySegmentWalker])
val brokerObjectMapper = broker.injector.getInstance(classOf[ObjectMapper])
var got: Seq[Dict] = null
val start = System.currentTimeMillis()
while (got != expected && System.currentTimeMillis() < start + 300000L) {
got = Jackson.parse[Seq[Dict]](
brokerObjectMapper.writeValueAsBytes(query.run(walker, Map.empty[String, AnyRef].asJava))
)
val gotAsString = got.toString match {
case x if x.size > 1024 => x.take(1024) + " ..."
case x => x
}
if (got != expected) {
log.info("Query result[%s] != expected result[%s], waiting a bit...", gotAsString, expected)
Thread.sleep(500)
}
}
assert(got === expected)
}
def runTestQueriesAndAssertions(broker: DruidServerHandle, timekeeper: Timekeeper) {
val testQueries = Seq(
(Druids
.newTimeBoundaryQueryBuilder()
.dataSource("xxx")
.build(),
Seq(
Map(
"timestamp" -> timekeeper.now.toString(),
"result" ->
Map(
"minTime" -> timekeeper.now.toString(),
"maxTime" -> (timekeeper.now + 1.minute).toString()
)
)
)),
(Druids
.newTimeseriesQueryBuilder()
.dataSource("xxx")
.granularity(QueryGranularities.MINUTE)
.intervals("0000/3000")
.aggregators(Seq[AggregatorFactory](new LongSumAggregatorFactory("barr", "barr")).asJava)
.build(),
Seq(
Map(
"timestamp" -> timekeeper.now.withZone(DateTimeZone.UTC).toString(),
"result" -> Map("barr" -> 2)
),
Map(
"timestamp" -> (timekeeper.now + 1.minute).withZone(DateTimeZone.UTC).toString(),
"result" -> Map("barr" -> 3)
)
)),
(Druids
.newTimeseriesQueryBuilder()
.dataSource("xxx")
.granularity(QueryGranularities.MINUTE)
.intervals("0000/3000")
.aggregators(Seq[AggregatorFactory](new LongSumAggregatorFactory("barr", "barr")).asJava)
.filters(new SpatialDimFilter("coord.geo", new RectangularBound(Array(35f, 120f), Array(40f, 125f))))
.build(),
Seq(
Map(
"timestamp" -> timekeeper.now.withZone(DateTimeZone.UTC).toString(),
"result" -> Map("barr" -> 0)
),
Map(
"timestamp" -> (timekeeper.now + 1.minute).withZone(DateTimeZone.UTC).toString(),
"result" -> Map("barr" -> 3)
)
))
)
for ((query, expected) <- testQueries) {
assertQueryResults(broker, query, expected)
}
}
}
|
druid-io/tranquility
|
core/src/test/scala/com/metamx/tranquility/test/common/DruidIntegrationSuite.scala
|
Scala
|
apache-2.0
| 10,089
|
package org.orbeon.oxf.fr
import org.orbeon.oxf.fr.datamigration.PathElem
import org.orbeon.oxf.fr.process.SimpleProcessCommon
import org.orbeon.saxon.om.NodeInfo
trait FormRunnerCommon
extends FormRunnerPersistence
with FormRunnerPermissionsOps
with FormRunnerEmail
with FormRunnerLang
with FormRunnerBaseOps
with FormRunnerControlOps
with FormRunnerContainerOps
with FormRunnerSectionTemplateOps
with FormRunnerActionsOps
with FormRunnerResourcesOps
object FormRunnerCommon {
// Extensible records would be cool here. see:
//
// - https://github.com/lampepfl/dotty-feature-requests/issues/8
// - https://github.com/milessabin/shapeless/wiki/Feature-overview:-shapeless-2.0.0#extensible-records
//
case class BindPath ( bind: NodeInfo, path: List[PathElem])
case class BindPathHolders ( bind: NodeInfo, path: List[PathElem], holders: Option[List[NodeInfo]])
case class ControlBindPathHoldersResources(control: NodeInfo, bind: NodeInfo, path: List[PathElem], holders: Option[List[NodeInfo]], resources: Seq[(String, NodeInfo)])
// Do this to avoid IntelliJ failing to see the specific `FormRunner` instance
@inline def frc: FormRunnerCommon = org.orbeon.oxf.fr.FormRunner
@inline def spc: SimpleProcessCommon = org.orbeon.oxf.fr.process.SimpleProcess
}
|
orbeon/orbeon-forms
|
form-runner/shared/src/main/scala/org/orbeon/oxf/fr/FormRunnerCommon.scala
|
Scala
|
lgpl-2.1
| 1,383
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.netty
import java.io.IOException
import java.nio.ByteBuffer
import java.util.{HashMap => JHashMap, Map => JMap}
import scala.collection.JavaConverters._
import scala.concurrent.{Future, Promise}
import scala.reflect.ClassTag
import scala.util.{Success, Try}
import com.codahale.metrics.{Metric, MetricSet}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.ExecutorDeadException
import org.apache.spark.internal.config
import org.apache.spark.network._
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.client.{RpcResponseCallback, TransportClientBootstrap, TransportClientFactory}
import org.apache.spark.network.crypto.{AuthClientBootstrap, AuthServerBootstrap}
import org.apache.spark.network.server._
import org.apache.spark.network.shuffle.{BlockFetchingListener, DownloadFileManager, OneForOneBlockFetcher, RetryingBlockFetcher}
import org.apache.spark.network.shuffle.protocol.{UploadBlock, UploadBlockStream}
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.storage.BlockManagerMessages.IsExecutorAlive
import org.apache.spark.util.Utils
/**
* A BlockTransferService that uses Netty to fetch a set of blocks at time.
*/
private[spark] class NettyBlockTransferService(
conf: SparkConf,
securityManager: SecurityManager,
bindAddress: String,
override val hostName: String,
_port: Int,
numCores: Int,
driverEndPointRef: RpcEndpointRef = null)
extends BlockTransferService {
// TODO: Don't use Java serialization, use a more cross-version compatible serialization format.
private val serializer = new JavaSerializer(conf)
private val authEnabled = securityManager.isAuthenticationEnabled()
private val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle", numCores)
private[this] var transportContext: TransportContext = _
private[this] var server: TransportServer = _
private[this] var clientFactory: TransportClientFactory = _
private[this] var appId: String = _
override def init(blockDataManager: BlockDataManager): Unit = {
val rpcHandler = new NettyBlockRpcServer(conf.getAppId, serializer, blockDataManager)
var serverBootstrap: Option[TransportServerBootstrap] = None
var clientBootstrap: Option[TransportClientBootstrap] = None
if (authEnabled) {
serverBootstrap = Some(new AuthServerBootstrap(transportConf, securityManager))
clientBootstrap = Some(new AuthClientBootstrap(transportConf, conf.getAppId, securityManager))
}
transportContext = new TransportContext(transportConf, rpcHandler)
clientFactory = transportContext.createClientFactory(clientBootstrap.toSeq.asJava)
server = createServer(serverBootstrap.toList)
appId = conf.getAppId
logInfo(s"Server created on ${hostName}:${server.getPort}")
}
/** Creates and binds the TransportServer, possibly trying multiple ports. */
private def createServer(bootstraps: List[TransportServerBootstrap]): TransportServer = {
def startService(port: Int): (TransportServer, Int) = {
val server = transportContext.createServer(bindAddress, port, bootstraps.asJava)
(server, server.getPort)
}
Utils.startServiceOnPort(_port, startService, conf, getClass.getName)._1
}
override def shuffleMetrics(): MetricSet = {
require(server != null && clientFactory != null, "NettyBlockTransferServer is not initialized")
new MetricSet {
val allMetrics = new JHashMap[String, Metric]()
override def getMetrics: JMap[String, Metric] = {
allMetrics.putAll(clientFactory.getAllMetrics.getMetrics)
allMetrics.putAll(server.getAllMetrics.getMetrics)
allMetrics
}
}
}
override def fetchBlocks(
host: String,
port: Int,
execId: String,
blockIds: Array[String],
listener: BlockFetchingListener,
tempFileManager: DownloadFileManager): Unit = {
logTrace(s"Fetch blocks from $host:$port (executor id $execId)")
try {
val blockFetchStarter = new RetryingBlockFetcher.BlockFetchStarter {
override def createAndStart(blockIds: Array[String],
listener: BlockFetchingListener): Unit = {
try {
val client = clientFactory.createClient(host, port)
new OneForOneBlockFetcher(client, appId, execId, blockIds, listener,
transportConf, tempFileManager).start()
} catch {
case e: IOException =>
Try {
driverEndPointRef.askSync[Boolean](IsExecutorAlive(execId))
} match {
case Success(v) if v == false =>
throw new ExecutorDeadException(s"The relative remote executor(Id: $execId)," +
" which maintains the block data to fetch is dead.")
case _ => throw e
}
}
}
}
val maxRetries = transportConf.maxIORetries()
if (maxRetries > 0) {
// Note this Fetcher will correctly handle maxRetries == 0; we avoid it just in case there's
// a bug in this code. We should remove the if statement once we're sure of the stability.
new RetryingBlockFetcher(transportConf, blockFetchStarter, blockIds, listener).start()
} else {
blockFetchStarter.createAndStart(blockIds, listener)
}
} catch {
case e: Exception =>
logError("Exception while beginning fetchBlocks", e)
blockIds.foreach(listener.onBlockFetchFailure(_, e))
}
}
override def port: Int = server.getPort
override def uploadBlock(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit] = {
val result = Promise[Unit]()
val client = clientFactory.createClient(hostname, port)
// StorageLevel and ClassTag are serialized as bytes using our JavaSerializer.
// Everything else is encoded using our binary protocol.
val metadata = JavaUtils.bufferToArray(serializer.newInstance().serialize((level, classTag)))
val asStream = blockData.size() > conf.get(config.MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM)
val callback = new RpcResponseCallback {
override def onSuccess(response: ByteBuffer): Unit = {
logTrace(s"Successfully uploaded block $blockId${if (asStream) " as stream" else ""}")
result.success((): Unit)
}
override def onFailure(e: Throwable): Unit = {
logError(s"Error while uploading $blockId${if (asStream) " as stream" else ""}", e)
result.failure(e)
}
}
if (asStream) {
val streamHeader = new UploadBlockStream(blockId.name, metadata).toByteBuffer
client.uploadStream(new NioManagedBuffer(streamHeader), blockData, callback)
} else {
// Convert or copy nio buffer into array in order to serialize it.
val array = JavaUtils.bufferToArray(blockData.nioByteBuffer())
client.sendRpc(new UploadBlock(appId, execId, blockId.name, metadata, array).toByteBuffer,
callback)
}
result.future
}
override def close(): Unit = {
if (server != null) {
server.close()
}
if (clientFactory != null) {
clientFactory.close()
}
if (transportContext != null) {
transportContext.close()
}
}
}
|
caneGuy/spark
|
core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala
|
Scala
|
apache-2.0
| 8,353
|
/**
* *************************************************************************
* This file is part of GTuring. *
* *
* GTuring is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* GTuring is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with GTuring. If not, see <http://www.gnu.org/licenses/>. *
* *
* *************************************************************************
*/
package gnieh.turing.bytecode
/**
* @author Lucas Satabin
*
*/
class BytecodeFormatException(msg: String, inner: Throwable)
extends Exception(msg, inner) {
def this(msg: String) = this(msg, null)
}
|
satabin/gniehturing
|
bytecode/src/main/scala/gnieh/turing/bytecode/BytecodeFormatException.scala
|
Scala
|
gpl-3.0
| 1,526
|
package ch.epfl.gsn.data
import org.scalatest.FunSpec
import org.scalatest.Matchers
import ch.epfl.gsn.data.DoubleType;
import ch.epfl.gsn.data.LongType;
import ch.epfl.gsn.data.Sensor;
import ch.epfl.gsn.data.format.XmlSerializer
class XmlTest extends FunSpec with Matchers {
describe("xml write"){
val fields=Seq(
Sensing("timbo",Output("time","s1",DataUnit("s","s"),LongType)),
Sensing("air-temperature",Output("temp","s1",DataUnit("C","C"),DoubleType)),
Sensing("relative-humidity",Output("humid","s1",DataUnit("Perc","Perc"),DoubleType)))
val values:Seq[Seq[Any]]=Seq(
Array(11,36.5,98.2),
Array(12,31.5,92.2),
Array(13,30.5,94.2),
Array(14,29.5,97.2),
Array(15,32.5,95.2))
val s=new Sensor("pipo",fields,null,Map("description"->"chochos"))
it("should serialize it"){
val ss=XmlSerializer.ser(s, Seq())
println(ss)
}
}
}
|
LSIR/gsn
|
gsn-tools/src/test/scala/ch/epfl/gsn/data/XmlTest.scala
|
Scala
|
gpl-3.0
| 968
|
package edu.umass.ciir.galago
import java.io.{StringReader, File, IOException}
import org.lemurproject.galago.core.retrieval.query.{AnnotatedNode, StructuredQuery, Node}
import org.lemurproject.galago.utility.Parameters
import org.lemurproject.galago.core.parse.Document
import scala.collection.JavaConversions._
import org.lemurproject.galago.core.retrieval.{Retrieval, RetrievalFactory, ScoredPassage, ScoredDocument}
import org.lemurproject.galago.core.index.stats.NodeStatistics
import org.lemurproject.galago.core.parse.Document.DocumentComponents
import com.google.common.cache.{CacheLoader, CacheBuilder, LoadingCache}
import java.util.concurrent.TimeUnit
object GalagoSearcher {
def apply(p: Parameters): GalagoSearcher = {
new GalagoSearcher(p)
}
def apply(index:String, p: Parameters): GalagoSearcher = {
p.set("index",index)
new GalagoSearcher(p)
}
def apply(index: String): GalagoSearcher = {
val p = Parameters.create()
p.set("index", index)
new GalagoSearcher(p)
}
def apply(jsonConfigFile: File): GalagoSearcher = {
val p = Parameters.parseFile(jsonConfigFile)
new GalagoSearcher(p)
}
def apply(server: String, port: Int): GalagoSearcher = {
val p = Parameters.create()
val remoteIndex = "http://" + server + ":" + port
p.set("index", remoteIndex)
new GalagoSearcher(p)
}
}
object GalagoParamTools{
def myParamCopyFrom(toParams:Parameters,fromParams:Parameters):Parameters = {
for(key <- fromParams.getKeys) {
if (fromParams.isBoolean(key)) toParams.set(key, fromParams.getBoolean(key))
else if (fromParams.isDouble(key)) toParams.set(key, fromParams.getDouble(key))
else if (fromParams.isLong(key)) toParams.set(key, fromParams.getLong(key))
else if (fromParams.isString(key)) toParams.set(key, fromParams.getString(key))
else if (fromParams.isMap(key)) toParams.set(key, fromParams.getMap(key))
else if (fromParams.isList(key)) toParams.set(key, fromParams.getAsList(key))
else {
throw new RuntimeException("Try to copy params: errornous key "+key+" has unknown type. "+fromParams.toPrettyString)
}
// else if (fromParams.isMap(key)){
// val mparams = Parameters.create()
// fromParams.getMap(key).copyTo(mparams)
// toParams.set(key,mparams)
// }
}
toParams
}
}
class GalagoSearcher(globalParameters: Parameters) {
import GalagoParamTools.myParamCopyFrom
if (globalParameters.isString("index")) println("** Loading index from: " + globalParameters.getString("index"))
val queryParams = Parameters.create()
val m_searcher = RetrievalFactory.instance(globalParameters)
val documentCache: LoadingCache[(String, Parameters), Document] = CacheBuilder.newBuilder()
.maximumSize(100)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(
new CacheLoader[(String, Parameters), Document]() {
def load(key: (String,Parameters)): Document = {
pullDocument(key._1, key._2)
}
})
val statsCache: LoadingCache[String, NodeStatistics] = CacheBuilder.newBuilder()
.maximumSize(1000)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(
new CacheLoader[String, NodeStatistics]() {
def load(key: String): NodeStatistics = {
statsUncached(key)
}
})
def resetDocumentCache() { documentCache.cleanUp() }
def getDocument(documentName: String, params: Parameters = Parameters.create()): Document = {
documentCache.get(Pair(documentName, params))
}
def pullDocument(documentName: String, params: Parameters = Parameters.create()): Document = {
val p = Parameters.create()
myParamCopyFrom(p,globalParameters)
myParamCopyFrom(p,params)
getDocuments_(Seq(documentName), p).values.head
}
def getDocuments(documentNames: Seq[String], params: Parameters = Parameters.create()): Map[String, Document] = {
val p = Parameters.create()
myParamCopyFrom(p,globalParameters)
myParamCopyFrom(p,params)
getDocuments_(documentNames, p)
}
private def getDocuments_(identifier: Seq[String], p: Parameters, tries: Int = 5): Map[String, Document] = {
try {
val docmap = m_searcher.getDocuments(seqAsJavaList(identifier), new DocumentComponents(p))
docmap.toMap
} catch {
case ex: NullPointerException => {
println("NPE while fetching documents " + identifier)
throw ex
}
case ex: IOException => {
if (tries > 0) {
try {
Thread.sleep(100)
} catch {
case e: InterruptedException => {}
}
return getDocuments_(identifier, p, tries - 1)
} else {
throw ex
}
}
}
}
def getStatistics(query: String): NodeStatistics = {
try {
statsCache.get(query)
} catch {
case e: Exception => {
println("Error getting statistics for query: " + query)
throw e
}
}
}
private def statsUncached(query:String) : NodeStatistics = {
val root = StructuredQuery.parse(query)
root.getNodeParameters.set("queryType", "count")
val transformed = m_searcher.transformQuery(root, queryParams)
m_searcher.getNodeStatistics(transformed)
}
/**
* Select a delimiter character that is not contained in the query, so that we can instruct galago to leave special
* characters in our query by wrapping it. Say that delim = '.', we wrap it in
*
* @.query.
*
* @param query
* @return
*/
def selectDelim(query:String):Char = {
// val delimSymbols = Seq('\\"','.','!').iterator
val delimSymbols = Seq('\\"').iterator
var found:Boolean = false
var delim:Char= ' '
while(!found && delimSymbols.hasNext){
val posDelim = delimSymbols.next()
if (query.indexOf(posDelim) < 0) {
delim = posDelim
found = true
}
}
if (!found){
// we are getting desparate here
val delim2Symbols = (Char.MinValue to Char.MaxValue).view.filter(_.isLetterOrDigit).iterator
while(!found && delim2Symbols.hasNext){
val posDelim = delim2Symbols.next()
if (query.indexOf(posDelim) < 0) {
delim = posDelim
found = true
}
}
}
if (!found) {
throw new RuntimeException(" failed to find delimiter char that is not contained in query "+query)
}
delim
}
def getFieldTermCount(cleanTerm: String, field: String): Long = {
if (cleanTerm.length > 0 || cleanTerm.indexOf('#')>=0) {
val delim = selectDelim(cleanTerm)
val transformedText = "@"+delim + cleanTerm+delim+"" + "." + field
val statistics = getStatistics(transformedText)
// println(statistics.nodeFrequency.toString+" = field term count for \\""+cleanTerm+"\\" in "+field+" (delim:"+delim)
statistics.nodeFrequency
} else {
0
}
}
// LD: this is the old version. instead of dropping terms with weird symbols, we escape everything with a delimiter.
// def getFieldTermCount(cleanTerm: String, field: String): Long = {
// if (cleanTerm.length > 0 && (cleanTerm.indexOf('@') == 0)) {
// val transformedText = "\\"" + cleanTerm.replaceAllLiterally("\\"","") + "\\"" + "." + field
// val statistics = getStatistics(transformedText)
// statistics.nodeFrequency
// } else {
// 0
// }
// }
def retrieveAnnotatedScoredDocuments(query: String, params: Parameters, resultCount: Int, debugQuery: ((Node, Node) => Unit) = ((x, y) => {})): Seq[(ScoredDocument, AnnotatedNode)] = {
params.set("annotate", true)
for (scoredAnnotatedDoc <- retrieveScoredDocuments(query, Some(params), resultCount, debugQuery)) yield {
(scoredAnnotatedDoc, scoredAnnotatedDoc.annotation)
}
}
def retrieveScoredDocuments(query: String, params: Option[Parameters] = None, resultCount: Int, debugQuery: ((Node, Node) => Unit) = ((x, y) => {})): Seq[ScoredDocument] = {
val p = Parameters.create()
myParamCopyFrom(p,globalParameters)
params match {
case Some(params) => myParamCopyFrom(p,params)
case None => {}
}
p.set("startAt", 0)
p.set("resultCount", resultCount)
p.set("requested", resultCount)
val root = StructuredQuery.parse(query)
val transformed = m_searcher.transformQuery(root, p)
debugQuery(root, transformed)
val results = m_searcher.executeQuery(transformed, p).scoredDocuments
if (results != null) {
results
} else {
Seq()
}
}
def retrieveScoredPassages(query: String, params: Option[Parameters], resultCount: Int, debugQuery: ((Node, Node) => Unit) = ((x, y) => {})): Seq[ScoredPassage] = {
retrieveScoredDocuments(query, params, resultCount, debugQuery).map(_.asInstanceOf[ScoredPassage])
}
/**
* Maintains the order of the search results but augments them with Document instances
* @param resultList
* @return
*/
def fetchDocuments(resultList: Seq[ScoredDocument]): Seq[FetchedScoredDocument] = {
val docNames = resultList.map(_.documentName)
val docs = getDocuments(docNames)
for (scoredDoc <- resultList) yield {
FetchedScoredDocument(scoredDoc,
docs.getOrElse(scoredDoc.documentName, {
throw new DocumentNotInIndexException(scoredDoc.documentName)
})
)
}
}
/**
* Maintains the order of the search results but augments them with Document instances
* @param resultList
* @return
*/
def fetchPassages(resultList: Seq[ScoredPassage]): Seq[FetchedScoredPassage] = {
val docNames = resultList.map(_.documentName)
val docs = getDocuments(docNames)
for (scoredPassage <- resultList) yield {
FetchedScoredPassage(scoredPassage,
docs.getOrElse(scoredPassage.documentName, {
throw new DocumentNotInIndexException(scoredPassage.documentName)
})
)
}
}
def getUnderlyingRetrieval() : Retrieval = {
m_searcher
}
def close() {
m_searcher.close()
}
}
case class FetchedScoredDocument(scored: ScoredDocument, doc: Document)
case class FetchedScoredPassage(scored: ScoredPassage, doc: Document)
class DocumentNotInIndexException(val docName: String) extends RuntimeException
|
daltonj/CIIRShared
|
src/main/scala/edu/umass/ciir/galago/GalagoSearcher.scala
|
Scala
|
apache-2.0
| 10,589
|
package nasa.nccs.cdas.engine
import java.io.{IOException, PrintWriter, StringWriter}
import java.nio.file.{ Paths, Files }
import scala.xml
import java.io.File
import nasa.nccs.cdapi.cdm.{Collection, PartitionedFragment, _}
import nasa.nccs.cdas.loaders.{Collections, Masks}
import nasa.nccs.esgf.process._
import scala.concurrent.ExecutionContext.Implicits.global
import nasa.nccs.utilities.{Loggable, ProfilingTool, cdsutils}
import nasa.nccs.cdas.kernels.{Kernel, KernelMgr, KernelModule}
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.{Await, Future, Promise}
import nasa.nccs.cdapi.tensors.{CDArray, CDByteArray, CDFloatArray}
import nasa.nccs.caching._
import ucar.{ma2, nc2}
import nasa.nccs.cdas.utilities.{GeoTools, appParameters, runtime}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import nasa.nccs.cdas.engine.spark.CDSparkContext
import nasa.nccs.wps._
import ucar.nc2.Attribute
import scala.io.Source
class Counter(start: Int = 0) {
private val index = new AtomicReference(start)
def get: Int = {
val i0 = index.get
if(index.compareAndSet( i0, i0 + 1 )) i0 else get
}
}
object CDS2ExecutionManager extends Loggable {
val handler_type_key = "execution.handler.type"
def apply(): CDS2ExecutionManager = { new CDS2ExecutionManager }
def shutdown() = {
shutdown_python_workers()
}
def shutdown_python_workers() = {
import sys.process._
val slaves_file = Paths.get( sys.env("SPARK_HOME"), "conf", "slaves" ).toFile
val shutdown_script = Paths.get( sys.env("HOME"), ".cdas", "sbin", "shutdown_python_worker.sh" ).toFile
if( slaves_file.exists && slaves_file.canRead ) {
val shutdown_futures = for (slave <- Source.fromFile(slaves_file).getLines(); if !slave.isEmpty && !slave.startsWith("#") ) yield {
Future { "ssh %s \\"%s\\"".format(slave.trim,shutdown_script.toString) !! }
}
Future.sequence( shutdown_futures )
} else try {
logger.info( "No slaves file found, shutting down python workers locally:")
shutdown_script.toString !!
} catch {
case err: Exception => logger.error( "Error shutting down python workers: " + err.toString )
}
}
// appParameters( handler_type_key, "spark" ) match {
// case exeMgr if exeMgr.toLowerCase.startsWith("future") =>
// throw new Exception( "CDFuturesExecutionManager no currently supported.")
//// import nasa.nccs.cds2.engine.futures.CDFuturesExecutionManager
//// logger.info("\\nExecuting Futures manager: serverConfig = " + exeMgr)
//// new CDFuturesExecutionManager()
// case exeMgr if exeMgr.toLowerCase.startsWith("spark") =>
// logger.info("\\nExecuting Spark manager: serverConfig = " + exeMgr)
// new CDSparkExecutionManager()
// case x => throw new Exception("Unrecognized execution.manager.type: " + x)
// }
def getConfigParamValue( key: String, serverConfiguration: Map[String,String], default_val: String ): String =
serverConfiguration.get( key ) match {
case Some( htype ) => htype
case None => appParameters( key, default_val )
}
}
class CDS2ExecutionManager extends WPSServer with Loggable {
import CDS2ExecutionManager._
shutdown_python_workers()
val serverContext = new ServerContext( collectionDataCache, CDSparkContext() )
val kernelManager = new KernelMgr()
// def getOperationInputs( context: CDASExecutionContext ): Map[String,OperationInput] = {
// val items = for (uid <- context.operation.inputs) yield {
// context.request.getInputSpec(uid) match {
// case Some(inputSpec) =>
// logger.info("getInputSpec: %s -> %s ".format(uid, inputSpec.longname))
// uid -> context.server.getOperationInput(inputSpec)
// case None => collectionDataCache.getExistingResult(uid) match {
// case Some(tVar: RDDTransientVariable) =>
// logger.info("getExistingResult: %s -> %s ".format(uid, tVar.result.elements.values.head.metadata.mkString(",")))
// uid -> new OperationTransientInput(tVar)
// case None => throw new Exception("Unrecognized input id: " + uid)
// }
// }
// }
// Map(items:_*)
// }
def describeWPSProcess( process: String ): xml.Elem = DescribeProcess( process )
def getProcesses: Map[String,WPSProcess] = kernelManager.getKernelMap
def getKernelModule( moduleName: String ): KernelModule = {
kernelManager.getModule( moduleName.toLowerCase ) match {
case Some(kmod) => kmod
case None => throw new Exception("Unrecognized Kernel Module %s, modules = %s ".format( moduleName, kernelManager.getModuleNames.mkString("[ ",", "," ]") ) )
}
}
def getResourcePath( resource: String ): Option[String] = Option(getClass.getResource(resource)).map( _.getPath )
def getKernel( moduleName: String, operation: String ): Kernel = {
val kmod = getKernelModule( moduleName )
kmod.getKernel( operation ) match {
case Some(kernel) => kernel
case None =>
throw new Exception( s"Unrecognized Kernel %s in Module %s, kernels = %s ".format( operation, moduleName, kmod.getKernelNames.mkString("[ ",", "," ]")) )
}
}
def getKernel( kernelName: String ): Kernel = {
val toks = kernelName.split('.')
getKernel( toks.dropRight(1).mkString("."), toks.last )
}
def fatal(err: Throwable): String = {
logger.error( "\\nError Executing Kernel: %s\\n".format(err.getMessage) )
val sw = new StringWriter
err.printStackTrace(new PrintWriter(sw))
logger.error( sw.toString )
err.getMessage
}
// def createTargetGrid( request: TaskRequest ): TargetGrid = {
// request.metadata.get("id") match {
// case Some(varId) => request.variableMap.get(varId) match {
// case Some(dataContainer: DataContainer) =>
// serverContext.createTargetGrid( dataContainer, request.getDomain(dataContainer.getSource) )
// case None => throw new Exception( "Unrecognized variable id in Grid spec: " + varId )
// }
// case None => throw new Exception("Target grid specification method has not yet been implemented: " + request.metadata.toString)
// }
// }
def createRequestContext(request: TaskRequest, run_args: Map[String,String] ): RequestContext = {
val t0 = System.nanoTime
val profiler = ProfilingTool( serverContext.spark.sparkContext )
val sourceContainers = request.variableMap.values.filter(_.isSource)
val t1 = System.nanoTime
val sources = for (data_container: DataContainer <- request.variableMap.values; if data_container.isSource; domainOpt = request.getDomain(data_container.getSource) )
yield serverContext.createInputSpec( data_container, domainOpt, request )
val t2 = System.nanoTime
val sourceMap: Map[String,Option[DataFragmentSpec]] = Map(sources.toSeq:_*)
val rv = new RequestContext (request.domainMap, sourceMap, request, profiler, run_args )
val t3 = System.nanoTime
profiler.timestamp( " LoadInputDataT: %.4f %.4f %.4f, MAXINT: %.2f G".format( (t1-t0)/1.0E9, (t2-t1)/1.0E9, (t3-t2)/1.0E9, Int.MaxValue/1.0E9 ), true )
rv
}
def cacheInputData(request: TaskRequest, run_args: Map[String, String] ): Iterable[Option[(DataFragmentKey, Future[PartitionedFragment])]] = {
val sourceContainers = request.variableMap.values.filter(_.isSource)
for (data_container: DataContainer <- request.variableMap.values;
if data_container.isSource;
domainOpt = request.getDomain(data_container.getSource))
yield serverContext.cacheInputData(data_container, run_args, domainOpt, request.getTargetGrid(data_container), None )
}
def deleteFragments( fragIds: Iterable[String] ) = {
logger.info("Deleting frags: " + fragIds.mkString(", ") + "; Current Frags = " + FragmentPersistence.getFragmentIdList.mkString(", "))
serverContext.deleteFragments( fragIds )
}
def clearCache: Set[String] = serverContext.clearCache
def searchForAttrValue(metadata: Map[String, nc2.Attribute], keys: List[String], default_val: String): String = {
keys.length match {
case 0 => default_val
case x => metadata.get(keys.head) match {
case Some(valueAttr) => valueAttr.getStringValue()
case None => searchForAttrValue(metadata, keys.tail, default_val)
}
}
}
def searchForValue(metadata: Map[String,String], keys: List[String], default_val: String): String = {
keys.length match {
case 0 => default_val
case x => metadata.get(keys.head) match {
case Some(valueAttr) => valueAttr
case None => searchForValue(metadata, keys.tail, default_val)
}
}
}
def saveResultToFile( resultId: String, gridUid: String, maskedTensor: CDFloatArray, request: RequestContext, server: ServerContext, varMetadata: Map[String,String], dsetMetadata: List[nc2.Attribute] ): Option[String] = {
val optInputSpec: Option[DataFragmentSpec] = request.getInputSpec()
val targetGrid = request.getTargetGrid( gridUid ).getOrElse( throw new Exception( "Undefined Target Grid when saving result " + resultId ))
request.getCollection(server) map { collection =>
val varname = searchForValue(varMetadata, List("varname", "fullname", "standard_name", "original_name", "long_name"), "Nd4jMaskedTensor")
val resultFile = Kernel.getResultFile( resultId, true )
val writer: nc2.NetcdfFileWriter = nc2.NetcdfFileWriter.createNew(nc2.NetcdfFileWriter.Version.netcdf4, resultFile.getAbsolutePath)
assert(targetGrid.grid.getRank == maskedTensor.getRank, "Axes not the same length as data shape in saveResult")
val coordAxes = collection.grid.getCoordinateAxes
val dims: IndexedSeq[nc2.Dimension] = targetGrid.grid.axes.indices.map(idim => writer.addDimension(null, targetGrid.grid.getAxisSpec(idim).getAxisName, maskedTensor.getShape(idim)))
val dimsMap: Map[String, nc2.Dimension] = Map(dims.map(dim => (dim.getFullName -> dim)): _*)
val newCoordVars: List[(nc2.Variable, ma2.Array)] = (for (coordAxis <- coordAxes) yield optInputSpec flatMap { inputSpec => inputSpec.getRange(coordAxis.getFullName) match {
case Some(range) =>
val coordVar: nc2.Variable = writer.addVariable(null, coordAxis.getFullName, coordAxis.getDataType, coordAxis.getFullName)
for (attr <- coordAxis.getAttributes) writer.addVariableAttribute(coordVar, attr)
val newRange = dimsMap.get(coordAxis.getFullName) match {
case None => range;
case Some(dim) => if (dim.getLength < range.length) new ma2.Range(dim.getLength) else range
}
Some(coordVar, coordAxis.read(List(newRange)))
case None => None
} }).flatten
logger.info("Writing result %s to file '%s', varname=%s, dims=(%s), shape=[%s], coords = [%s]".format(
resultId, resultFile.getAbsolutePath, varname, dims.map(_.toString).mkString(","), maskedTensor.getShape.mkString(","),
newCoordVars.map { case (cvar, data) => "%s: (%s)".format(cvar.getFullName, data.getShape.mkString(",")) }.mkString(",")))
val variable: nc2.Variable = writer.addVariable(null, varname, ma2.DataType.FLOAT, dims.toList)
varMetadata map {case (key, value) => variable.addAttribute( new Attribute(key, value)) }
variable.addAttribute(new nc2.Attribute("missing_value", maskedTensor.getInvalid))
dsetMetadata.foreach(attr => writer.addGroupAttribute(null, attr))
try {
writer.create()
for (newCoordVar <- newCoordVars) {
newCoordVar match {
case (coordVar, coordData) =>
logger.info("Writing cvar %s: shape = [%s]".format(coordVar.getFullName, coordData.getShape.mkString(",")))
writer.write(coordVar, coordData)
}
}
writer.write(variable, maskedTensor)
// for( dim <- dims ) {
// val dimvar: nc2.Variable = writer.addVariable(null, dim.getFullName, ma2.DataType.FLOAT, List(dim) )
// writer.write( dimvar, dimdata )
// }
writer.close()
resultFile.getAbsolutePath
} catch {
case e: IOException => logger.error("ERROR creating file %s%n%s".format(resultFile.getAbsolutePath, e.getMessage()));
return None
}
}
}
def isCollectionPath( path: File ): Boolean = { path.isDirectory || path.getName.endsWith(".csv") }
def executeUtilityRequest(util_id: String, request: TaskRequest, run_args: Map[String, String]): WPSMergedEventReport = util_id match {
case "magg" =>
val collectionNodes = request.variableMap.values.flatMap( ds => {
val pcol = ds.getSource.collection
val base_dir = new File(pcol.dataPath)
val base_id = pcol.id
val col_dirs: Array[File] = base_dir.listFiles
for( col_path <- col_dirs; if isCollectionPath(col_path); col_id = base_id + "/" + col_path.getName ) yield {
Collection.aggregate( col_id, col_path )
}
})
new WPSMergedEventReport( collectionNodes.map( cnode => new UtilityExecutionResult( "aggregate", cnode )).toList )
case "agg" =>
val collectionNodes = request.variableMap.values.map( ds => Collection.aggregate( ds.getSource ) )
new WPSMergedEventReport( collectionNodes.map( cnode => new UtilityExecutionResult( "aggregate", cnode )).toList )
case "clearCache" =>
val fragIds = clearCache
new WPSMergedEventReport( List( new UtilityExecutionResult( "clearCache", <deleted fragments={fragIds.mkString(",")}/> ) ) )
case "cache" =>
val cached_data: Iterable[(DataFragmentKey,Future[PartitionedFragment])] = cacheInputData(request, run_args).flatten
FragmentPersistence.close()
new WPSMergedEventReport( cached_data.map( cache_result => new UtilityExecutionResult( cache_result._1.toStrRep, <cache/> ) ).toList )
case "dcol" =>
val colIds = request.variableMap.values.map( _.getSource.collection.id )
val deletedCollections = Collections.removeCollections( colIds.toArray )
new WPSMergedEventReport(List(new UtilityExecutionResult("dcol", <deleted collections={deletedCollections.mkString(",")}/> )))
case "dfrag" =>
val fragIds: Iterable[String] = request.variableMap.values.map( ds => Array( ds.getSource.name, ds.getSource.collection.id, ds.getSource.domain ).mkString("|") )
deleteFragments( fragIds )
new WPSMergedEventReport(List(new UtilityExecutionResult("dfrag", <deleted fragments={fragIds.mkString(",")}/> )))
case "dres" =>
val resIds: Iterable[String] = request.variableMap.values.map( ds => ds.uid )
logger.info( "Deleting results: " + resIds.mkString(", ") + "; Current Results = " + collectionDataCache.getResultIdList.mkString(", ") )
resIds.foreach( resId => collectionDataCache.deleteResult( resId ) )
new WPSMergedEventReport(List(new UtilityExecutionResult("dres", <deleted results={resIds.mkString(",")}/> )))
case x => throw new Exception( "Unrecognized Utility:" + x )
}
def futureExecute( request: TaskRequest, run_args: Map[String,String] ): Future[WPSResponse] = Future {
logger.info("ASYNC Execute { runargs: " + run_args.toString + ", request: " + request.toString + " }")
val requestContext = createRequestContext(request, run_args)
executeWorkflows(request, requestContext)
}
def blockingExecute( request: TaskRequest, run_args: Map[String,String] ): WPSResponse = {
logger.info("Blocking Execute { runargs: " + run_args.toString + ", request: " + request.toString + " }")
runtime.printMemoryUsage(logger)
val t0 = System.nanoTime
try {
val req_ids = request.name.split('.')
req_ids(0) match {
case "util" =>
logger.info("Executing utility request " + req_ids(1) )
executeUtilityRequest( req_ids(1), request, run_args )
case _ =>
logger.info("Executing task request " + request.name )
val requestContext = createRequestContext (request, run_args)
val response = executeWorkflows (request, requestContext )
requestContext.logTimingReport("Executed task request " + request.name)
response
}
} catch {
case err: Exception => new WPSExceptionReport(err)
}
}
// def futureExecute( request: TaskRequest, run_args: Map[String,String] ): Future[xml.Elem] = Future {
// try {
// val sourceContainers = request.variableMap.values.filter(_.isSource)
// val inputFutures: Iterable[Future[DataFragmentSpec]] = for (data_container: DataContainer <- request.variableMap.values; if data_container.isSource) yield {
// serverContext.dataLoader.loadVariableDataFuture(data_container, request.getDomain(data_container.getSource))
// }
// inputFutures.flatMap( inputFuture => for( input <- inputFuture ) yield executeWorkflows(request, run_args).toXml )
// } catch {
// case err: Exception => fatal(err)
// }
// }
def getResultVariable( resId: String ): Option[RDDTransientVariable] = collectionDataCache.getExistingResult( resId )
def getResultFilePath( resId: String ): Option[String] = getResultVariable( resId ) match {
case Some( tvar: RDDTransientVariable ) =>
val result = tvar.result.elements.values.head
val resultFile = Kernel.getResultFile( resId )
if(resultFile.exists) Some(resultFile.getAbsolutePath)
else { saveResultToFile(resId, tvar.getGridId, result.toCDFloatArray, tvar.request, serverContext, result.metadata, List.empty[nc2.Attribute] ) }
case None => None
}
def getResult( resId: String ): xml.Node = {
logger.info( "Locating result: " + resId )
collectionDataCache.getExistingResult( resId ) match {
case None =>
new WPSMergedExceptionReport( List( new WPSExceptionReport( new Exception("Unrecognized resId: " + resId + ", existing resIds: " + collectionDataCache.getResultIdList.mkString(", ") )) ) ).toXml
case Some( tvar: RDDTransientVariable ) =>
new WPSExecuteResult( "WPS", tvar ).toXml
}
}
def getResultStatus( resId: String ): xml.Node = {
logger.info( "Locating result: " + resId )
val message = collectionDataCache.getExistingResult( resId ) match {
case None => "CDAS Process has not yet completed"
case Some( tvar: RDDTransientVariable ) => "CDAS Process successfully completed"
}
new WPSExecuteStatus( "WPS", message, resId ).toXml
}
def asyncExecute( request: TaskRequest, run_args: Map[String,String] ): WPSReferenceExecuteResponse = {
logger.info("Execute { runargs: " + run_args.toString + ", request: " + request.toString + " }")
runtime.printMemoryUsage(logger)
val jobId = collectionDataCache.addJob( request.getJobRec(run_args) )
val req_ids = request.name.split('.')
req_ids(0) match {
case "util" =>
val util_result = executeUtilityRequest(req_ids(1), request, Map("jobId" -> jobId) ++ run_args )
Future(util_result)
case _ =>
val futureResult = this.futureExecute(request, Map("jobId" -> jobId) ++ run_args)
futureResult onSuccess { case results: WPSMergedEventReport =>
println("Process Completed: " + results.toString)
processAsyncResult(jobId, results)
}
futureResult onFailure { case e: Throwable => fatal(e); collectionDataCache.removeJob(jobId); throw e }
}
new AsyncExecutionResult( request.id.toString, request.getProcess, jobId )
}
def processAsyncResult( jobId: String, results: WPSMergedEventReport ) = {
collectionDataCache.removeJob( jobId )
}
// def execute( request: TaskRequest, runargs: Map[String,String] ): xml.Elem = {
// val async = runargs.getOrElse("async","false").toBoolean
// if(async) executeAsync( request, runargs ) else blockingExecute( request, runargs )
// }
def getWPSCapabilities( identifier: String ): xml.Elem =
identifier match {
case x if x.startsWith("ker") => kernelManager.toXml
case x if x.startsWith("frag") => FragmentPersistence.getFragmentListXml
case x if x.startsWith("res") => collectionDataCache.getResultListXml // collectionDataCache
case x if x.startsWith("job") => collectionDataCache.getJobListXml
case x if x.startsWith("coll") => {
val itToks = x.split(Array(':','|'))
if( itToks.length < 2 ) Collections.toXml
else <collection id={itToks(0)}> { Collections.getCollectionMetadata( itToks(1) ).map( attr => attrToXml( attr ) ) } </collection>
}
case x if x.startsWith("op") => kernelManager.getModulesXml
case x if x.startsWith("var") => {
println( "getCapabilities->identifier: " + identifier )
val itToks = x.split(Array(':','|'))
if( itToks.length < 2 ) <error message="Unspecified collection and variables" />
else Collections.getVariableListXml( itToks(1).split(',') )
}
case _ => GetCapabilities
}
def attrToXml( attr: nc2.Attribute ): xml.Elem = {
val sb = new StringBuffer()
val svals = for( index <- (0 until attr.getLength) ) {
if( index > 0 ) sb.append(",")
if (attr.isString) sb.append(attr.getStringValue(index)) else sb.append(attr.getNumericValue(index))
}
<attr id={attr.getFullName.split("--").last}> { sb.toString } </attr>
}
def executeWorkflows( request: TaskRequest, requestCx: RequestContext ): WPSResponse = {
val results = request.operations.head.moduleName match {
case "util" => new WPSMergedEventReport( request.operations.map( utilityExecution( _, requestCx )))
case x =>
logger.info( "---------->>> Execute Workflows: " + request.operations.mkString(",") )
val responses = request.workflow.executeRequest( requestCx )
new MergedWPSExecuteResponse( request.id.toString, responses )
}
FragmentPersistence.close()
// logger.info( "---------->>> Execute Workflows: Created XML response: " + results.toXml.toString )
results
}
def executeUtility( operationCx: OperationContext, requestCx: RequestContext ): UtilityExecutionResult = {
val report: xml.Elem = <ReportText> {"Completed executing utility " + operationCx.name.toLowerCase } </ReportText>
new UtilityExecutionResult( operationCx.name.toLowerCase + "~u0", report )
}
def utilityExecution( operationCx: OperationContext, requestCx: RequestContext ): UtilityExecutionResult = {
logger.info( " ***** Utility Execution: utilName=%s, >> Operation = %s ".format( operationCx.name, operationCx.toString ) )
executeUtility( operationCx, requestCx )
}
}
//object SampleTaskRequests {
//
// def createTestData() = {
// var axes = Array("time","lev","lat","lon")
// var shape = Array(1,1,180,360)
// val maskedTensor: CDFloatArray = CDFloatArray( shape, Array.fill[Float](180*360)(1f), Float.MaxValue)
// val varname = "ta"
// val resultFile = "/tmp/SyntheticTestData.nc"
// val writer: nc2.NetcdfFileWriter = nc2.NetcdfFileWriter.createNew(nc2.NetcdfFileWriter.Version.netcdf4, resultFile )
// val dims: IndexedSeq[nc2.Dimension] = shape.indices.map( idim => writer.addDimension(null, axes(idim), maskedTensor.getShape(idim)))
// val variable: nc2.Variable = writer.addVariable(null, varname, ma2.DataType.FLOAT, dims.toList)
// variable.addAttribute( new nc2.Attribute( "missing_value", maskedTensor.getInvalid ) )
// writer.create()
// writer.write( variable, maskedTensor )
// writer.close()
// println( "Writing result to file '%s'".format(resultFile) )
// }
//
// def getSpatialAve(collection: String, varname: String, weighting: String, level_index: Int = 0, time_index: Int = 0): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List( Map("name" -> "d0", "lev" -> Map("start" -> level_index, "end" -> level_index, "system" -> "indices"), "time" -> Map("start" -> time_index, "end" -> time_index, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> s"collection:/$collection", "name" -> s"$varname:v0", "domain" -> "d0")),
// "operation" -> List( Map( "input"->"v0", "axes"->"xy", "weights"->weighting ) ))
// TaskRequest( "CDSpark.average", dataInputs )
// }
//
// def getMaskedSpatialAve(collection: String, varname: String, weighting: String, level_index: Int = 0, time_index: Int = 0): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List( Map("name" -> "d0", "mask" -> "#ocean50m", "lev" -> Map("start" -> level_index, "end" -> level_index, "system" -> "indices"), "time" -> Map("start" -> time_index, "end" -> time_index, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> s"collection:/$collection", "name" -> s"$varname:v0", "domain" -> "d0")),
// "operation" -> List( Map( "input"->"v0", "axes"->"xy", "weights"->weighting ) ))
// TaskRequest( "CDSpark.average", dataInputs )
// }
//
// def getConstant(collection: String, varname: String, level_index: Int = 0 ): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List( Map("name" -> "d0", "lev" -> Map("start" -> level_index, "end" -> level_index, "system" -> "indices"), "time" -> Map("start" -> 10, "end" -> 10, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> s"collection:/$collection", "name" -> s"$varname:v0", "domain" -> "d0")),
// "operation" -> List( Map( "input"->"v0") ))
// TaskRequest( "CDSpark.const", dataInputs )
// }
//
// def getAnomalyTest: TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> -7.0854263, "end" -> -7.0854263, "system" -> "values"), "lon" -> Map("start" -> 12.075, "end" -> 12.075, "system" -> "values"), "lev" -> Map("start" -> 1000, "end" -> 1000, "system" -> "values"))),
// "variable" -> List(Map("uri" -> "collection://merra_1/hourly/aggtest", "name" -> "t:v0", "domain" -> "d0")), // collection://merra300/hourly/asm_Cp
// "operation" -> List( Map( "input"->"v0", "axes"->"t" ) ))
// TaskRequest( "CDSpark.anomaly", dataInputs )
// }
//}
//abstract class SyncExecutor {
// val printer = new scala.xml.PrettyPrinter(200, 3)
//
// def main(args: Array[String]) {
// val executionManager = getExecutionManager
// val final_result = getExecutionManager.blockingExecute( getTaskRequest(args), getRunArgs )
// println(">>>> Final Result: " + printer.format(final_result.toXml))
// }
//
// def getTaskRequest(args: Array[String]): TaskRequest
// def getRunArgs = Map("async" -> "false")
// def getExecutionManager = CDS2ExecutionManager(Map.empty)
// def getCollection( id: String ): Collection = Collections.findCollection(id) match { case Some(collection) => collection; case None=> throw new Exception(s"Unknown Collection: $id" ) }
//}
//
//object TimeAveSliceTask extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 10, "end" -> 10, "system" -> "values"), "lon" -> Map("start" -> 10, "end" -> 10, "system" -> "values"), "lev" -> Map("start" -> 8, "end" -> 8, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "hur:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t")))
// TaskRequest("CDSpark.average", dataInputs)
// }
//}
//
//object YearlyCycleSliceTask extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "period" -> 1, "unit" -> "month", "mod" -> 12)))
// TaskRequest("CDSpark.bin", dataInputs)
// }
//}
//
////object AveTimeseries extends SyncExecutor {
//// def getTaskRequest(args: Array[String]): TaskRequest = {
//// import nasa.nccs.esgf.process.DomainAxis.Type._
//// val workflows = List[WorkflowContainer](new WorkflowContainer(operations = List( OperationContext("CDSpark.average", List("v0"), Map("axis" -> "t")))))
//// val variableMap = Map[String, DataContainer]("v0" -> new DataContainer(uid = "v0", source = Some(new DataSource(name = "hur", collection = getCollection("merra/mon/atmos"), domain = "d0"))))
//// val domainMap = Map[String, DomainContainer]("d0" -> new DomainContainer(name = "d0", axes = cdsutils.flatlist(DomainAxis(Z, 1, 1), DomainAxis(Y, 100, 100), DomainAxis(X, 100, 100)), None))
//// new TaskRequest("CDSpark.average", variableMap, domainMap, workflows, Map("id" -> "v0"))
//// }
////}
//
//object CreateVTask extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices")),
// Map("name" -> "d1", "time" -> Map("start" -> "2010-01-16T12:00:00", "end" -> "2010-01-16T12:00:00", "system" -> "values"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t", "name" -> "CDSpark.anomaly"), Map("input" -> "v0", "period" -> 1, "unit" -> "month", "mod" -> 12, "name" -> "CDSpark.timeBin"), Map("input" -> "v0", "domain" -> "d1", "name" -> "CDSpark.subset")))
// TaskRequest("CDSpark.workflow", dataInputs)
// }
//}
//
//object YearlyCycleTask extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "period" -> 1, "unit" -> "month", "mod" -> 12)))
// TaskRequest("CDSpark.timeBin", dataInputs)
// }
//}
//
//object SeasonalCycleRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "time" -> Map("start" -> 0, "end" -> 36, "system" -> "indices"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "period" -> 3, "unit" -> "month", "mod" -> 4, "offset" -> 2)))
// TaskRequest("CDSpark.timeBin", dataInputs)
// }
//}
//
//object YearlyMeansRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "period" -> 12, "unit" -> "month")))
// TaskRequest("CDSpark.timeBin", dataInputs)
// }
//}
//
//object SubsetRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 45, "end" -> 45, "system" -> "values"), "lon" -> Map("start" -> 30, "end" -> 30, "system" -> "values"), "lev" -> Map("start" -> 3, "end" -> 3, "system" -> "indices")),
// Map("name" -> "d1", "time" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "domain" -> "d1")))
// TaskRequest("CDSpark.subset", dataInputs)
// }
//}
//
//object TimeSliceAnomaly extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 10, "end" -> 10, "system" -> "values"), "lon" -> Map("start" -> 10, "end" -> 10, "system" -> "values"), "lev" -> Map("start" -> 8, "end" -> 8, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t")))
// TaskRequest("CDSpark.anomaly", dataInputs)
// }
//}
//
//object MetadataRequest extends SyncExecutor {
// val level = 0
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs: Map[String, Seq[Map[String, Any]]] = level match {
// case 0 => Map()
// case 1 => Map("variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0")))
// }
// TaskRequest("CDSpark.metadata", dataInputs)
// }
//}
//
//object CacheRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra300/hourly/asm_Cp", "name" -> "t:v0", "domain" -> "d0")))
// TaskRequest("util.cache", dataInputs)
// }
//}
//
//object AggregateAndCacheRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra_1/hourly/aggTest3", "path" -> "/Users/tpmaxwel/Dropbox/Tom/Data/MERRA/DAILY/", "name" -> "t", "domain" -> "d0")))
// TaskRequest("util.cache", dataInputs)
// }
//}
//
//object AggregateRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map( "variable" -> List(Map("uri" -> "collection://merra_1/hourly/aggTest37", "path" -> "/Users/tpmaxwel/Dropbox/Tom/Data/MERRA/DAILY/" ) ) )
// TaskRequest("util.agg", dataInputs)
// }
//}
//
//
//object MultiAggregateRequest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val baseCollectionId = args(0)
// val baseDirectory = new java.io.File(args(1))
// assert( baseDirectory.isDirectory, "Base directory is not a directory: " + args(1) )
// val dataInputs = Map( "variable" -> baseDirectory.listFiles.map( dir => Map("uri" -> Array("collection:",baseCollectionId,dir.getName).mkString("/"), "path" -> dir.toString ) ).toSeq )
// TaskRequest("util.agg", dataInputs)
// }
//}
//
//object AggregateAndCacheRequest2 extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra/daily/aggTest", "path" -> "/Users/tpmaxwel/Dropbox/Tom/Data/MERRA/DAILY", "name" -> "t", "domain" -> "d0")))
// TaskRequest("util.cache", dataInputs)
// }
//}
//
//object AggregateAndCacheRequest1 extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra2/hourly/M2T1NXLND-2004-04", "path" -> "/att/pubrepo/MERRA/remote/MERRA2/M2T1NXLND.5.12.4/2004/04", "name" -> "SFMC", "domain" -> "d0")))
// TaskRequest("util.cache", dataInputs)
// }
//}
//
//object Max extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 20, "end" -> 20, "system" -> "indices"), "time" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "xy")))
// TaskRequest("CDSpark.max", dataInputs)
// }
//}
//
//object Min extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lev" -> Map("start" -> 20, "end" -> 20, "system" -> "indices"), "time" -> Map("start" -> 0, "end" -> 0, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "xy")))
// TaskRequest("CDSpark.min", dataInputs)
// }
//}
//
//object AnomalyTest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> -7.0854263, "end" -> -7.0854263, "system" -> "values"), "lon" -> Map("start" -> 12.075, "end" -> 12.075, "system" -> "values"), "lev" -> Map("start" -> 1000, "end" -> 1000, "system" -> "values"))),
// "variable" -> List(Map("uri" -> "collection://merra_1/hourly/aggtest", "name" -> "t:v0", "domain" -> "d0")), // collection://merra300/hourly/asm_Cp
// "operation" -> List(Map("input" -> "v0", "axes" -> "t")))
// TaskRequest("CDSpark.anomaly", dataInputs)
// }
//}
//
//object AnomalyTest1 extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 20.0, "end" -> 20.0, "system" -> "values"), "lon" -> Map("start" -> 0.0, "end" -> 0.0, "system" -> "values"))),
// "variable" -> List(Map("uri" -> "collection://merra2/hourly/m2t1nxlnd-2004-04", "name" -> "SFMC:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t")))
// TaskRequest("CDSpark.anomaly", dataInputs)
// }
//}
//object AnomalyTest2 extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d0", "lat" -> Map("start" -> 0.0, "end" -> 0.0, "system" -> "values"), "lon" -> Map("start" -> 0.0, "end" -> 0.0, "system" -> "values"), "level" -> Map("start" -> 10, "end" -> 10, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://merra/daily/aggTest", "name" -> "t:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t")))
// TaskRequest("CDSpark.anomaly", dataInputs)
// }
//}
//
//object AnomalyArrayTest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d1", "lat" -> Map("start" -> 3, "end" -> 3, "system" -> "indices")), Map("name" -> "d0", "lat" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"), "lon" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"), "lev" -> Map("start" -> 30, "end" -> 30, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "collection://MERRA/mon/atmos", "name" -> "ta:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t", "name" -> "CDSpark.anomaly"), Map("input" -> "v0", "domain" -> "d1", "name" -> "CDSpark.subset")))
// TaskRequest("CDSpark.workflow", dataInputs)
// }
//}
//
//object AnomalyArrayNcMLTest extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val dataInputs = Map(
// "domain" -> List(Map("name" -> "d1", "lat" -> Map("start" -> 3, "end" -> 3, "system" -> "indices")), Map("name" -> "d0", "lat" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"), "lon" -> Map("start" -> 3, "end" -> 3, "system" -> "indices"), "lev" -> Map("start" -> 30, "end" -> 30, "system" -> "indices"))),
// "variable" -> List(Map("uri" -> "file://Users/tpmaxwel/data/AConaty/comp-ECMWF/ecmwf.xml", "name" -> "Temperature:v0", "domain" -> "d0")),
// "operation" -> List(Map("input" -> "v0", "axes" -> "t", "name" -> "CDSpark.anomaly"), Map("input" -> "v0", "domain" -> "d1", "name" -> "CDSpark.subset")))
// TaskRequest("CDSpark.workflow", dataInputs)
// }
//}
//
////object AveArray extends SyncExecutor {
//// def getTaskRequest(args: Array[String]): TaskRequest = {
//// import nasa.nccs.esgf.process.DomainAxis.Type._
////
//// val workflows = List[WorkflowContainer](new WorkflowContainer(operations = List( OperationContext("CDSpark.average", List("v0"), Map("axis" -> "xy")))))
//// val variableMap = Map[String, DataContainer]("v0" -> new DataContainer(uid = "v0", source = Some(new DataSource(name = "t", collection = getCollection("merra/daily"), domain = "d0"))))
//// val domainMap = Map[String, DomainContainer]("d0" -> new DomainContainer(name = "d0", axes = cdsutils.flatlist(DomainAxis(Z, 0, 0)), None))
//// new TaskRequest("CDSpark.average", variableMap, domainMap, workflows, Map("id" -> "v0"))
//// }
////}
//
//object SpatialAve1 extends SyncExecutor {
// def getTaskRequest(args: Array[String]): TaskRequest = SampleTaskRequests.getSpatialAve("/MERRA/mon/atmos", "ta", "cosine")
//}
//
//object cdscan extends App with Loggable {
// val printer = new scala.xml.PrettyPrinter(200, 3)
// val executionManager = CDS2ExecutionManager(Map.empty)
// val final_result = executionManager.blockingExecute( getTaskRequest(args), Map("async" -> "false") )
// println(">>>> Final Result: " + printer.format(final_result.toXml))
//
// def getTaskRequest(args: Array[String]): TaskRequest = {
// val baseCollectionId = args(0)
// val baseDirectory = new java.io.File(args(1))
// logger.info( s"Running cdscan with baseCollectionId $baseCollectionId and baseDirectory $baseDirectory")
// assert( baseDirectory.isDirectory, "Base directory is not a directory: " + args(1) )
// val dataInputs = Map( "variable" -> baseDirectory.listFiles.filter( f => Collections.hasChildNcFile(f) ).map(
// dir => Map("uri" -> Array("collection:",baseCollectionId,dir.getName).mkString("/"), "path" -> dir.toString ) ).toSeq )
// TaskRequest("util.agg", dataInputs)
// }
//}
//
//
//object IntMaxTest extends App {
// printf( " MAXINT: %.2f G, MAXLONG: %.2f G".format( Int.MaxValue/1.0E9, Long.MaxValue/1.0E9 ) )
//}
// TaskRequest: name= CWT.average, variableMap= Map(v0 -> DataContainer { id = hur:v0, dset = merra/mon/atmos, domain = d0 }, ivar#1 -> OperationContext { id = ~ivar#1, name = , result = ivar#1, inputs = List(v0), optargs = Map(axis -> xy) }), domainMap= Map(d0 -> DomainContainer { id = d0, axes = List(DomainAxis { id = lev, start = 0, end = 1, system = "indices", bounds = }) })
|
nasa-nccs-cds/CDAS2
|
src/main/scala/nasa/nccs/cdas/engine/manager.scala
|
Scala
|
gpl-2.0
| 43,404
|
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.frontend.bootstrap
import java.security.cert.X509Certificate
import org.scalatest.{Matchers, WordSpecLike}
import play.api.GlobalSettings
import play.api.http.HttpEntity
import play.api.mvc._
import play.api.test.FakeHeaders
import play.api.test.Helpers._
import play.twirl.api.Html
import uk.gov.hmrc.play.frontend.exceptions.ApplicationException
import org.scalatestplus.play.OneAppPerSuite
class ShowErrorPageSpec extends WordSpecLike with Matchers with OneAppPerSuite {
object TestShowErrorPage extends ShowErrorPage with GlobalSettings {
override def standardErrorTemplate(pageTitle: String, heading: String, message: String)(
implicit rh: Request[_]): Html = Html("error")
}
import TestShowErrorPage._
"resolving an error" should {
"return a generic InternalServerError result" in {
val exception = new Exception("Runtime exception")
val result = resolveError(FakeRequestHeader, exception)
result.header.status shouldBe INTERNAL_SERVER_ERROR
result.header.headers should contain(CACHE_CONTROL -> "no-cache")
}
"return a generic InternalServerError result if the exception cause is null" in {
val exception = new Exception("Runtime exception", null)
val result = resolveError(FakeRequestHeader, exception)
result.header.status shouldBe INTERNAL_SERVER_ERROR
result.header.headers should contain(CACHE_CONTROL -> "no-cache")
}
"return an InternalServerError result for an application error" in {
val responseCode = SEE_OTHER
val location = "http://some.test.location/page"
val theResult = Result(
ResponseHeader(responseCode, Map("Location" -> location)),
HttpEntity.NoEntity
)
val appException = new ApplicationException("paye", theResult, "application exception")
val result = resolveError(FakeRequestHeader, appException)
result shouldBe theResult
}
}
}
case object FakeRequestHeader extends RequestHeader {
override def id: Long = 0L
override def remoteAddress: String = ""
override def headers: Headers = FakeHeaders()
override def queryString: Map[String, Seq[String]] = Map.empty
override def version: String = ""
override def method: String = "GET"
override def path: String = "some-path"
override def uri: String = ""
override def tags: Map[String, String] = Map.empty
override def secure: Boolean = false
override def clientCertificateChain: Option[Seq[X509Certificate]] = None
}
|
hmrc/frontend-bootstrap
|
src/test/scala/uk/gov/hmrc/play/frontend/bootstrap/ShowErrorPageSpec.scala
|
Scala
|
apache-2.0
| 3,133
|
import scala.reflect.ClassManifest
// #1435
object t1435 {
implicit def a(s:String):String = sys.error("")
implicit def a(i:Int):String = sys.error("")
implicit def b(i:Int):String = sys.error("")
}
class C1435 {
val v:String = {
import t1435.a
2
}
}
// #1492
class C1492 {
class X
def foo(x: X => X): Unit = {}
foo ( implicit x => implicitly[X] )
foo { implicit x => implicitly[X] }
}
// #1579
object Test1579 {
class Column
class Query[E](val value: E)
class Invoker(q: Any) { val foo = null }
implicit def unwrap[C](q: Query[C]) = q.value
implicit def invoker(q: Query[Column]) = new Invoker(q)
val q = new Query(new Column)
q.foo
}
// #1625
object Test1625 {
class Wrapped(x:Any) {
def unwrap() = x
}
implicit def byName[A](x: => A) = new Wrapped(x)
implicit def byVal[A](x: A) = x
def main(args: Array[String]) = {
// val res:Wrapped = 7 // works
val res = 7.unwrap() // doesn't work
println("=> result: " + res)
}
}
object Test2188 {
implicit def toJavaList[A: ClassManifest](t:collection.Seq[A]):java.util.List[A] = java.util.Arrays.asList(t.toArray:_*)
val x: java.util.List[String] = List("foo")
}
object TestNumericWidening {
val y = 1
val x: java.lang.Long = y
}
// #2709
package foo2709 {
class A
class B
package object bar {
implicit def a2b(a: A): B = new B
}
package bar {
object test {
new A: B
}
}
}
// Problem with specs
object specsProblem {
println(implicitly[Manifest[Class[_]]])
}
|
scala/scala
|
test/files/pos/implicits-old.scala
|
Scala
|
apache-2.0
| 1,533
|
package com.softwaremill.bootzooka.passwordreset.application
import java.time.temporal.ChronoUnit
import java.time.{Instant, ZoneOffset}
import java.util.UUID
import com.softwaremill.bootzooka.passwordreset.domain.PasswordResetCode
import com.softwaremill.bootzooka.test.{FlatSpecWithDb, TestHelpersWithDb}
import com.softwaremill.bootzooka.user.domain.User
import com.typesafe.config.ConfigFactory
class PasswordResetServiceSpec extends FlatSpecWithDb with TestHelpersWithDb {
lazy val config = new PasswordResetConfig {
override def rootConfig = ConfigFactory.load()
}
val passwordResetCodeDao = new PasswordResetCodeDao(sqlDatabase)
val passwordResetService =
new PasswordResetService(
userDao,
passwordResetCodeDao,
emailService,
emailTemplatingEngine,
config,
passwordHashing
)
"sendResetCodeToUser" should "do nothing when login doesn't exist" in {
passwordResetService.sendResetCodeToUser("Does not exist").futureValue
}
"performPasswordReset" should "delete code after it was used once" in {
// given
val user = newRandomStoredUser()
val code = PasswordResetCode(randomString(), user)
passwordResetCodeDao.add(code).futureValue
val newPassword1 = randomString()
val newPassword2 = randomString()
// when
val result1 = passwordResetService.performPasswordReset(code.code, newPassword1).futureValue
val result2 = passwordResetService.performPasswordReset(code.code, newPassword2).futureValue
result1 should be('right)
result2 should be('left)
val updatedUser = userDao.findById(user.id).futureValue.get
passwordHashing.verifyPassword(updatedUser.password, newPassword1, updatedUser.salt) should be(true)
passwordHashing.verifyPassword(updatedUser.password, newPassword2, updatedUser.salt) should be(false)
passwordResetCodeDao.findByCode(code.code).futureValue should be(None)
}
"performPasswordReset" should "delete code and do nothing if the code expired" in {
// given
val user = newRandomStoredUser()
val previousDay = Instant.now().minus(24, ChronoUnit.HOURS).atOffset(ZoneOffset.UTC)
val code = PasswordResetCode(UUID.randomUUID(), randomString(), user, previousDay)
passwordResetCodeDao.add(code).futureValue
val newPassword = randomString()
// when
val result = passwordResetService.performPasswordReset(code.code, newPassword).futureValue
result should be('left)
val updatedUser = userDao.findById(user.id).futureValue.get
passwordHashing.verifyPassword(updatedUser.password, newPassword, updatedUser.salt) should be(false)
passwordResetCodeDao.findByCode(code.code).futureValue should be(None)
}
"performPasswordReset" should "calculate different hash values for the same passwords" in {
// given
val password = randomString()
val user = newRandomStoredUser(Some(password))
val originalPasswordHash = userDao.findById(user.id).futureValue.get.password
val code = PasswordResetCode(randomString(), user)
passwordResetCodeDao.add(code).futureValue
// when
val result = passwordResetService.performPasswordReset(code.code, password).futureValue
result should be('right)
val newPasswordHash = userDao.findById(user.id).futureValue.get.password
originalPasswordHash should not be equal(newPasswordHash)
}
}
|
ldrygala/bootzooka
|
backend/src/test/scala/com/softwaremill/bootzooka/passwordreset/application/PasswordResetServiceSpec.scala
|
Scala
|
apache-2.0
| 3,424
|
package gitbucket.core.api
import gitbucket.core.util.JGitUtil.TagInfo
import gitbucket.core.util.RepositoryName
import org.eclipse.jgit.lib.Ref
case class ApiRefCommit(
sha: String,
`type`: String,
url: ApiPath
)
case class ApiRef(
ref: String,
node_id: String = "",
url: ApiPath,
`object`: ApiRefCommit,
)
object ApiRef {
def fromRef(
repositoryName: RepositoryName,
ref: Ref
): ApiRef =
ApiRef(
ref = ref.getName,
url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/git/${ref.getName}"),
`object` = ApiRefCommit(
sha = ref.getObjectId.getName,
url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/commits/${ref.getObjectId.getName}"),
`type` = "commit"
)
)
def fromTag(
repositoryName: RepositoryName,
tagInfo: TagInfo
): ApiRef =
ApiRef(
ref = s"refs/tags/${tagInfo.name}",
url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/git/refs/tags/${tagInfo.name}"),
//the GH api distinguishes between "releases" and plain git tags
//for "releases", the api returns a reference to the release object (with type `tag`)
//this would be something like s"/api/v3/repos/${repositoryName.fullName}/git/tags/<hash-of-tag>"
//with a hash for the tag, which I do not fully understand
//since this is not yet implemented in GB, we always return a link to the plain `commit` object,
//which GH does for tags that are not annotated
`object` = ApiRefCommit(
sha = tagInfo.objectId,
url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/commits/${tagInfo.objectId}"),
`type` = "commit"
)
)
}
|
gitbucket/gitbucket
|
src/main/scala/gitbucket/core/api/ApiRef.scala
|
Scala
|
apache-2.0
| 1,683
|
/*
* Copyright 2016 Cisco Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
object HelloWorldMain extends App with Service {
implicit val system = ActorSystem("my-system")
implicit val materializer = ActorMaterializer()
println("Hello world starting on http://localhost:8080/hello ...")
Http().bindAndHandle(routes, "0.0.0.0", 8080)
}
trait Service {
val routes =
path("hello") {
get {
complete {
<h1>Say hello to akka-http</h1>
}
}
}
}
|
cisco/elsy
|
examples/sbt-scala/src/main/scala/HelloWorldMain.scala
|
Scala
|
apache-2.0
| 1,241
|
/*
* Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/parser/AuthorizationHeader.scala
*
* Copyright (C) 2011-2012 spray.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package parser
import cats.data.NonEmptyList
import org.http4s.internal.parboiled2.{Rule0, Rule1, ParserInput}
import org.http4s.headers.Authorization
import org.http4s.syntax.string._
private[parser] trait AuthorizationHeader {
def AUTHORIZATION(value: String): ParseResult[`Authorization`] =
new AuthorizationParser(value).parse
// scalastyle:off public.methods.have.type
private class AuthorizationParser(input: ParserInput) extends Http4sHeaderParser[Authorization](input) {
def entry: Rule1[Authorization] = rule {
CredentialDef ~ EOI ~> { creds: Credentials => Authorization(creds) }
}
def CredentialDef = rule {
AuthParamsCredentialsDef |
TokenCredentialsDef
}
def TokenCredentialsDef = rule {
Token ~ LWS ~ token68 ~> {(scheme: String, value: String) =>
Credentials.Token(scheme.ci, value)
}
}
def AuthParamsCredentialsDef = rule {
Token ~ OptWS ~ CredentialParams ~> { (scheme: String, params: NonEmptyList[(String, String)]) =>
Credentials.AuthParams(scheme.ci, params) }
}
def CredentialParams: Rule1[NonEmptyList[(String, String)]] = rule {
oneOrMore(AuthParam).separatedBy(ListSep) ~> {
params: Seq[(String, String)] => NonEmptyList(params.head, params.tail.toList)
}
}
def AuthParam: Rule1[(String, String)] = rule {
Token ~ "=" ~ (Token | QuotedString) ~> { (s1: String, s2: String) => (s1, s2) }
}
def Base64Char: Rule0 = rule { Alpha | Digit | '+' | '/' | '=' }
// https://tools.ietf.org/html/rfc6750#page-5
def b64token: Rule1[String] = rule {
capture(oneOrMore(Alpha | Digit | anyOf("-._~+/")) ~ zeroOrMore('=') )
}
def token68: Rule1[String] = b64token
}
// scalastyle:on public.methods.have.type
}
|
ZizhengTai/http4s
|
core/src/main/scala/org/http4s/parser/AuthorizationHeader.scala
|
Scala
|
apache-2.0
| 2,545
|
/**
* FILE: UpgradeBuilding.scala
* PERCORSO /Codice/sgad/servertier/src/main/scala/sgad/servertier/businesslogic/operations
* DATA CREAZIONE: 25 Febbraio 2014
* AUTORE: ProTech
* EMAIL: protech.unipd@gmail.com
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-25 - Creazione della classe - Segantin Fabio
*/
package sgad.servertier.businesslogic.operations
import sgad.servertier.dataaccess.data.userdata.{BuildingPossession, UserData}
import java.util.NoSuchElementException
import sgad.servertier.dataaccess.data.shareddata.{BuildingWithLevel, DataFactory}
/**
* Classe che rappresenta l'operazione di avanzamento di livello di un edificio posseduto dall'utente.
*/
class UpgradeBuilding extends Operation {
/**
* Metodo per l'esecuzione di un upgrade di un edificio.
* @param userData Dati dell'utente su cui verrà effettuata l'operazione.
* @param data Dati accompagnatori alla richiesta dell'operazione.
* @param loginAuthorization Autorizzazione a operare richieste di login. Di default è false.
* @param registrationAuthorization Autorizzazione a operare richieste di registrazione. Di default è false.
* @param userAuthorization Autorizzazione a operare richieste di user. Di default è false.
* @param internalAuthorization Autorizzazione a operare richieste interne. Di default è false.
* @return Stringa da restituire.
*/
def execute(userData: UserData, data: String, loginAuthorization: Boolean, registrationAuthorization: Boolean,
userAuthorization: Boolean, internalAuthorization: Boolean): String = {
OperationFactory.getOperation("UpdateUserData").execute(userData, "", internalAuthorization = true)
var answer = "data:false, unauthorized:true"
if (userAuthorization) {
//controllo l'autorizzazione
val dataMap = decodeData(data)
try {
if (userData.getAuthenticationData.getAuthenticationString == dataMap("authentication")) {
//controllo l'autenticazione della richiesta
val building = userData.getOwnedBuilding(dataMap("key"))
val nextLevelBuilding = DataFactory.getBuilding(building.getBuilding.getNextLevelKey)
val preconditionCheck = nextLevelBuilding.getPrecondition.forall((building) => {
userData.buildingIsOwned(building.asInstanceOf[BuildingWithLevel].getKey)
}) //controllo le precondizioni
val resourceCheck = nextLevelBuilding.getCost.getQuantityResource.forall((resource) => {
userData.getOwnedResource(resource.getResource.getKey).getQuantity >= resource.getQuantity
})
//controllo delle risorse
val workersavaible = userData.getOwnedUnitMap.values.foldLeft(0)((current, unit) => {
//sommo uno per ogni unità posseduta che è lavoratore
var count = current
if (unit.getUnit.getIsBuilder)
count += unit.getQuantity
count
}) - userData.getOwnedBuildingsMap.values.foldLeft(0)((current: Int, building) => {
//per ogni edificio posseduto sommo 1 se non è finito
var count = current
if (!building.getIsFinished) {
count += 1
}
count
})
//e sottraggo gli edifici dai lavoratori in modo da avere il numero di lavoratori impegnati
answer = "data:false, resources:" + resourceCheck + ", precondition:" + preconditionCheck + ", workers:" + workersavaible + ", messages:" + parsePiggy(userData.getPiggy)
if (preconditionCheck && resourceCheck && workersavaible > 0) {
//se entro in questo ramo significa che ho tutte le condizioni soddisfatte per poter potenziare una costruzione
upgradeBuilding(userData, building, nextLevelBuilding)
//chiamo il metodo ausiliario che mi permette di potenziare l'edificio
answer = "data:true, messages:" + parsePiggy(userData.getPiggy)
}
} else {
//autenticazione non andata a buon fine
answer = "data:false, authentication:false"
}
} catch {
case _: NoSuchElementException => answer = "data:false, parameters:false"
}
}
"{" + answer + "}"
}
/**
* Metodo utilizzato dalla classe per effettuare l'upgrade, il metodo richiede che i test di consistenza con i dati siano stati eseguiti.
* @param data I dati utente di chi richieda l'upgrade.
* @param possession La BuildingPossession corrente.
* @param level Il modello della costruzione di livello superiore.
* @return Unit
*/
private def upgradeBuilding(data: UserData, possession: BuildingPossession, level: BuildingWithLevel) = {
val building = data.getOwnedBuilding(possession.getKey)
//mi prendo la costruzione in questione
data.removeBuildingPossession(possession)
//rimuovo la costruzione corrente da quelle possedute
building.setBuilding(level)
//cambio il modello di costruzione alla costruzione da migliorare
data.addBuildingPossession(building)
//e lo aggiungo alle costruzione possedute.
building.setTime(System.currentTimeMillis / 1000L)
//imposto il tempo di inizio costruzione al tempo corrente in secondi.
val unitInProgress = building.getUnitInProgress
//prendo le unità che erano in coda
if (unitInProgress != null) //se ci sono
building.getUnitInProgress.setQuantity(0) //le metto a zero
level.getCost.getQuantityResource.foreach((resource) => {
//per ogni risorsa richiesta
val owned = data.getOwnedResource(resource.getResource.getKey)
owned.setQuantity(owned.getQuantity - resource.getQuantity)
//la rimuovo da quelle possedute.
})
}
}
|
protechunipd/SGAD
|
Codice/sgad/servertier/src/main/scala/sgad/servertier/businesslogic/operations/UpgradeBuilding.scala
|
Scala
|
apache-2.0
| 5,493
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.apache.accumulo.core.security.Authorizations
import org.geotools.data.{Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.index.z2.Z2Index
import org.locationtech.geomesa.index.index.z3.Z3Index
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.conf.IndexId
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.index.IndexMode
import org.locationtech.geomesa.utils.io.WithClose
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ConfigurableIndexesTest extends Specification with TestWithFeatureType {
sequential
override val spec = s"name:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled='${Z3Index.name}'"
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"f-$i")
sf.setAttribute(0, s"name-$i")
sf.setAttribute(1, s"2016-01-01T0$i:01:00.000Z")
sf.setAttribute(2, s"POINT(4$i 5$i)")
sf
}
addFeatures(features)
"AccumuloDataStore" should {
"only create the z3 index" >> {
val indices = ds.manager.indices(sft)
indices must haveLength(1)
indices.head.name mustEqual Z3Index.name
val z3Tables = indices.head.getTableNames()
z3Tables must not(beEmpty)
foreach(z3Tables)(t => ds.connector.tableOperations().exists(t) must beTrue)
}
"be able to use z3 for spatial queries" >> {
val filter = "BBOX(geom,40,50,50,60)"
val query = new Query(sftName, ECQL.toFilter(filter))
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveSize(10)
results.map(_.getID) must containTheSameElementsAs((0 until 10).map(i => s"f-$i"))
}
"be able to use z3 for spatial ors" >> {
val filter = "BBOX(geom,40,50,45,55) OR BBOX(geom,44,54,50,60) "
val query = new Query(sftName, ECQL.toFilter(filter))
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveSize(10)
results.map(_.getID) must containTheSameElementsAs((0 until 10).map(i => s"f-$i"))
}
"be able to use z3 for spatial and attribute ors" >> {
val filter = "BBOX(geom,40,50,45,55) OR name IN ('name-6', 'name-7', 'name-8', 'name-9')"
val query = new Query(sftName, ECQL.toFilter(filter))
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveSize(10)
results.map(_.getID) must containTheSameElementsAs((0 until 10).map(i => s"f-$i"))
}
"add another empty index" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val updated = SimpleFeatureTypes.mutable(sft)
updated.setIndices(sft.getIndices :+ IndexId(Z2Index.name, Z2Index.version, Seq("geom"), IndexMode.ReadWrite))
ds.updateSchema(sftName, updated)
val indices = ds.manager.indices(updated)
indices must haveLength(2)
indices.map(_.name) must containTheSameElementsAs(Seq(Z3Index.name, Z2Index.name))
forall(indices) { i =>
val tables = i.getTableNames()
tables must not(beEmpty)
foreach(tables)(t => ds.connector.tableOperations().exists(t) must beTrue)
if (i.name == Z2Index.name) {
foreach(tables) { table =>
WithClose(ds.connector.createScanner(table, new Authorizations))(_.iterator.hasNext must beFalse)
}
} else {
ok
}
}
}
"use another index" >> {
val filter = "BBOX(geom,40,50,51,61)"
val query = new Query(sftName, ECQL.toFilter(filter))
var results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must beEmpty
val sf = new ScalaSimpleFeature(ds.getSchema(sftName), s"f-10")
sf.setAttribute(0, "name-10")
sf.setAttribute(1, "2016-01-01T10:01:00.000Z")
sf.setAttribute(2, "POINT(50 60)")
addFeatures(Seq(sf))
results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveSize(1)
results.head.getID mustEqual "f-10"
}
"use the original index" >> {
val filter = "BBOX(geom,40,50,51,61) AND dtg DURING 2016-01-01T00:00:00.000Z/2016-01-02T00:00:00.000Z"
val query = new Query(sftName, ECQL.toFilter(filter))
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results must haveSize(11)
results.map(_.getID) must containTheSameElementsAs((0 until 11).map(i => s"f-$i"))
}
"throw an exception if the indices are not valid" >> {
val schema = "*geom:LineString:srid=4326;geomesa.indices.enabled="
forall(Seq("z2", "xz3", "z3", "attr", "xz2,xz3", "foo")) { enabled =>
ds.createSchema(SimpleFeatureTypes.createType(sft.getTypeName + "_fail", s"$schema'$enabled'")) must
throwAn[IllegalArgumentException]
}
}
}
}
|
aheyne/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/ConfigurableIndexesTest.scala
|
Scala
|
apache-2.0
| 5,799
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import com.twitter.zipkin.Constants
/**
* A span represents one RPC request. A trace is made up of many spans.
*
* A span can contain multiple annotations, some are always incuded such as
* Client send -> Server received -> Server send -> Client receive.
*
* Some are created by users, describing application specific information,
* such as cache hits/misses.
*/
object Span {
def apply(span: Span): Span = Span(span.traceId, span.name, span.id,
span.parentId, span.annotations, span.binaryAnnotations)
}
/**
* @param traceId random long that identifies the trace, will be set in all spans in this trace
* @param name name of span, can be rpc method name for example
* @param id random long that identifies this span
* @param parentId reference to the parent span in the trace tree
* @param annotations annotations, containing a timestamp and some value. both user generated and
* some fixed ones from the tracing framework
* @param binaryAnnotations binary annotations, can contain more detailed information such as
* serialized objects
*/
case class Span(traceId: Long, name: String, id: Long, parentId: Option[Long],
annotations: List[Annotation], binaryAnnotations: Seq[BinaryAnnotation]) {
/**
* Order annotations by timestamp.
*/
val timestampOrdering = new Ordering[Annotation] {
def compare(a: Annotation, b: Annotation) = {a.timestamp.compare(b.timestamp)}
}
def serviceNames: Set[String] = {
annotations.flatMap(a => a.host.map(h => h.serviceName.toLowerCase)).toSet
}
/**
* Tries to extract the best possible service name
*/
def serviceName: Option[String] = {
if (annotations.isEmpty) {
None
} else {
val sName = serverSideAnnotations.flatMap(_.host).headOption.map(_.serviceName)
val cName = clientSideAnnotations.flatMap(_.host).headOption.map(_.serviceName)
sName match {
case Some(s) => Some(s)
case None => cName
}
}
}
/**
* Iterate through list of annotations and return the one with the given value.
*/
def getAnnotation(value: String): Option[Annotation] = {
annotations.find {a => a.value == value}
}
/**
* Take two spans with the same span id and merge all data into one of them.
*/
def mergeSpan(mergeFrom: Span): Span = {
if (id != mergeFrom.id) {
throw new IllegalArgumentException("Span ids must match")
}
// ruby tracing can give us an empty name in one part of the span
val selectedName = name match {
case "" => mergeFrom.name
case "Unknown" => mergeFrom.name
case _ => name
}
new Span(traceId, selectedName, id, parentId,
annotations ++ mergeFrom.annotations,
binaryAnnotations ++ mergeFrom.binaryAnnotations)
}
/**
* Get the first annotation by timestamp.
*/
def firstAnnotation: Option[Annotation] = {
try {
Some(annotations.min(timestampOrdering))
} catch {
case e: UnsupportedOperationException => None
}
}
/**
* Get the last annotation by timestamp.
*/
def lastAnnotation: Option[Annotation] = {
try {
Some(annotations.max(timestampOrdering))
} catch {
case e: UnsupportedOperationException => None
}
}
/**
* Endpoints involved in this span
*/
def endpoints: Set[Endpoint] = {
annotations.flatMap(a => a.host).toSet
}
/**
* Assuming this is an RPC span, is it from the client side?
*/
def isClientSide(): Boolean = {
annotations.exists(a => {
a.value.equals(Constants.ClientSend) || a.value.equals(Constants.ClientRecv)
})
}
/**
* Pick out the core client side annotations
*/
def clientSideAnnotations: Seq[Annotation] = {
annotations.filter(a => Constants.CoreClient.contains(a.value))
}
/**
* Pick out the core server side annotations
*/
def serverSideAnnotations: Seq[Annotation] = {
annotations.filter(a => Constants.CoreServer.contains(a.value))
}
/**
* Duration of this span. May be None if we cannot find any annotations.
*/
def duration: Option[Long] = {
for (first <- firstAnnotation; last <- lastAnnotation)
yield last.timestamp - first.timestamp
}
/**
* @return true if Span contains at most one of each core annotation
* false otherwise
*/
def isValid: Boolean = {
Constants.CoreAnnotations.map { c =>
annotations.filter { _.value == c }.length > 1
}.count {b => b} == 0
}
/**
* Get the annotations as a map with value to annotation bindings.
*/
def getAnnotationsAsMap(): Map[String, Annotation] = annotations.map{ a => a.value -> a}.toMap
}
|
vdt/zipkin
|
zipkin-common/src/main/scala/com/twitter/zipkin/common/Span.scala
|
Scala
|
apache-2.0
| 5,290
|
/*
* Copyright (C) 2015 Language Technology Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package model
import model.queryable.impl.RelationshipQueryableImpl
import scalikejdbc.WrappedResultSet
/**
* Representation for relationships.
*
* @param id unique id and primary key of the relationship.
* @param e1 first entity. The order of entities is determined alphabetically. Especially, if (e1, e2) is a
* relationship, (e2, e1) is not.
* @param e2 second entity.
* @param frequency frequency of the relationship (i.e. co-occurrence) in the underlying data.
*/
case class Relationship(id: Long, e1: Long, e2: Long, var frequency: Int = 0)
/**
* Companion object for [[model.Relationship]] instances.
*/
object Relationship extends RelationshipQueryableImpl {
def apply(rs: WrappedResultSet): Relationship = Relationship(
rs.long("id"),
rs.long("entity1"),
rs.long("entity2"),
rs.int("frequency")
)
}
|
tudarmstadt-lt/DIVID-DJ
|
common/src/main/scala/model/Relationship.scala
|
Scala
|
agpl-3.0
| 1,580
|
import stainless.annotation._
object MutateInside2 {
case class Mut[@mutable T](var t: T)
case class Thing(var field: Int)
def resetThing(m: Mut[Thing]): Unit = {
m.t = Thing(0)
}
def main(): Unit = {
val thing = Thing(123)
resetThing(Mut(thing))
assert(thing.field == 123)
}
}
|
epfl-lara/stainless
|
frontends/benchmarks/imperative/valid/MutateInside2.scala
|
Scala
|
apache-2.0
| 309
|
package ucesoft.cbm.misc
import ucesoft.cbm.cpu.Memory
import java.awt.Desktop
import java.io.PrintWriter
import java.io.FileWriter
object BasicListExplorer {
private[this] val TOKEN_MAP = Map(
0x80 -> "END",
0x81 -> "FOR",
0x82 -> "NEXT",
0x83 -> "DATA",
0x84 -> "INPUT#",
0x85 -> "INPUT",
0x86 -> "DIM",
0x87 -> "READ",
0x88 -> "LET",
0x89 -> "GOTO",
0x8a -> "RUN",
0x8b -> "IF",
0x8c -> "RESTORE",
0x8d -> "GOSUB",
0x8e -> "RETURN",
0x8f -> "REM",
0x90 -> "STOP",
0x91 -> "ON",
0x92 -> "WAIT",
0x93 -> "LOAD",
0x94 -> "SAVE",
0x95 -> "VERIFY",
0x96 -> "DEF",
0x97 -> "POKE",
0x98 -> "PRINT#",
0x99 -> "PRINT",
0x9a -> "CONT",
0x9b -> "LIST",
0x9c -> "CLR",
0x9d -> "CMD",
0x9e -> "SYS",
0x9f -> "OPEN",
0xa0 -> "CLOSE",
0xa1 -> "GET",
0xa2 -> "NEW",
0xa3 -> "TAB(",
0xa4 -> "TO",
0xa5 -> "FN",
0xa6 -> "SPC(",
0xa7 -> "THEN",
0xa8 -> "NOT",
0xa9 -> "STEP",
0xaa -> "+",
0xab -> "-",
0xac -> "*",
0xad -> "/",
0xae -> "^",
0xaf -> "AND",
0xb0 -> "OR",
0xb1 -> ">",
0xb2 -> "=",
0xb3 -> "<",
0xb4 -> "SGN",
0xb5 -> "INT",
0xb6 -> "ABS",
0xb7 -> "USR",
0xb8 -> "FRE",
0xb9 -> "POS",
0xba -> "SQR",
0xbb -> "RND",
0xbc -> "LOG",
0xbd -> "EXP",
0xbe -> "COS",
0xbf -> "SIN",
0xc0 -> "TAN",
0xc1 -> "ATN",
0xc2 -> "PEEK",
0xc3 -> "LEN",
0xc4 -> "STR$",
0xc5 -> "VAL",
0xc6 -> "ASC",
0xc7 -> "CHR$",
0xc8 -> "LEFT$",
0xc9 -> "RIGHT$",
0xca -> "MID$",
0xcb -> "GO",
0xff -> "{pi}",
0xcc -> "RGR",
0xcd -> "RCLR",
0xce -> "*PREFIX*",
0xcf -> "JOY",
0xd0 -> "RDOT",
0xd1 -> "DEC",
0xd2 -> "HEX$",
0xd3 -> "ERR$",
0xd4 -> "INSTR",
0xd5 -> "ELSE",
0xd6 -> "RESUME",
0xd7 -> "TRAP",
0xd8 -> "TRON",
0xd9 -> "TROFF",
0xda -> "SOUND",
0xdb -> "VOL",
0xdc -> "AUTO",
0xdd -> "PUDEF",
0xde -> "GRAPHIC",
0xdf -> "PAINT",
0xe0 -> "CHAR",
0xe1 -> "BOX",
0xe2 -> "CIRCLE",
0xe3 -> "GSHAPE",
0xe4 -> "SSHAPE",
0xe5 -> "DRAW",
0xe6 -> "LOCATE",
0xe7 -> "COLOR",
0xe8 -> "SCNCLR",
0xe9 -> "SCALE",
0xea -> "HELP",
0xeb -> "DO",
0xec -> "LOOP",
0xed -> "EXIT",
0xee -> "DIRECTORY",
0xef -> "DSAVE",
0xf0 -> "DLOAD",
0xf1 -> "HEADER",
0xf2 -> "SCRATCH",
0xf3 -> "COLLECT",
0xf4 -> "COPY",
0xf5 -> "RENAME",
0xf6 -> "BACKUP",
0xf7 -> "DELETE",
0xf8 -> "RENUMBER",
0xf9 -> "KEY",
0xfa -> "MONITOR",
0xfb -> "USING",
0xfc -> "UNTIL",
0xfd -> "WHILE",
0xfe -> "*PREFIX*"
)
private[this] val EXTENDED_TOKEN_MAP = Map(
0xce -> Map (
0x02 -> "POT",
0x03 -> "BUMP",
0x04 -> "PEN",
0x05 -> "RSPPOS",
0x06 -> "RSSPRITE",
0x07 -> "RSPCOLOR",
0x08 -> "XOR",
0x09 -> "RWINDOW",
0x0A -> "POINTER"),
0xfe -> Map(
0x02 -> "BANK",
0x03 -> "FILTER",
0x04 -> "PLAY",
0x05 -> "TEMPO",
0x06 -> "MOVSPR",
0x07 -> "SPRITE",
0x08 -> "SPRCOLOR",
0x09 -> "RREG",
0x0a -> "ENVELOPE",
0x0b -> "SLEEP",
0x0c -> "CATALOG",
0x0d -> "DOPEN",
0x0e -> "APPEND",
0x0f -> "DCLOSE",
0x10 -> "BSAVE",
0x11 -> "BLOAD",
0x12 -> "RECORD",
0x13 -> "CONCAT",
0x14 -> "DVERIFY",
0x15 -> "DCLEAR",
0x16 -> "SPRSAV",
0x17 -> "COLLISION",
0x18 -> "BEGIN",
0x19 -> "BEND",
0x1a -> "WINDOW",
0x1b -> "BOOT",
0x1c -> "WIDTH",
0x1d -> "SPRDEF",
0x1e -> "QUIT",
0x1f -> "STASH",
0x21 -> "FETCH",
0x23 -> "SWAP",
0x24 -> "OFF",
0x25 -> "FAST",
0x26 -> "SLOW"
)
)
private def findToken(token:Int,nextByte:Int) : (String,Int) = {
TOKEN_MAP get token match {
case None => "{Token not found}" -> 1
case Some(t) if t == "*PREFIX*" =>
EXTENDED_TOKEN_MAP get token match {
case None => "{Ext. token not found}" -> 1
case Some(emap) =>
emap get nextByte match {
case None => "{Ext. byte token not found}" -> 1
case Some(t) => t -> 2
}
}
case Some(t) => t -> 1
}
}
// taken from C64Studio
private def mapChar(c:Int) : String = {
c match {
case 5 => "{white}"
case 10 => "\\n"
case 17 => "{down}"
case 18 => "{rvon}"
case 19 => "{home}"
case 20 => "{del}"
case 28 => "{red}"
case 29 => "{right}"
case 30 => "{green}"
case 31 => "{blue}"
case 92 => "{pound}"
case 94 => "^"
case 95 => "{Shift-ArrowLeft}"
case 96 => "{Shift-*}"
case 97 => "{Shift-A}"
case 98 => "{Shift-B}"
case 99 => "{Shift-C}"
case 100 => "{Shift-D}"
case 101 => "{Shift-E}"
case 102 => "{Shift-F}"
case 103 => "{Shift-G}"
case 104 => "{Shift-H}"
case 105 => "{Shift-I}"
case 106 => "{Shift-J}"
case 107 => "{Shift-K}"
case 108 => "{Shift-L}"
case 109 => "{Shift-M}"
case 110 => "{Shift-N}"
case 111 => "{Shift-O}"
case 112 => "{Shift-P}"
case 113 => "{Shift-Q}"
case 114 => "{Shift-R}"
case 115 => "{Shift-S}"
case 116 => "{Shift-T}"
case 117 => "{Shift-U}"
case 118 => "{Shift-V}"
case 119 => "{Shift-W}"
case 120 => "{Shift-X}"
case 121 => "{Shift-Y}"
case 122 => "{Shift-Z}"
case 123 => "{Shift-+}"
case 124 => "{CBM--}"
case 125 => "{Shift--}"
case 127 => "{CBM-*}"
case 129 => "{orange}"
case 133 => "{F1}"
case 134 => "{F3}"
case 135 => "{F5}"
case 136 => "{F7}"
case 137 => "{F2}"
case 138 => "{F4}"
case 139 => "{F6}"
case 140 => "{F8}"
case 144 => "{black}"
case 145 => "{up}"
case 146 => "{rvof}"
case 147 => "{clr}"
case 148 => "{ins}"
case 149 => "{brown}"
case 150 => "{lred}"
case 151 => "{gry1}"
case 152 => "{gry2}"
case 153 => "{lgrn}"
case 154 => "{lblu}"
case 155 => "{gry3}"
case 156 => "{purple}"
case 157 => "{left}"
case 158 => "{yellow}"
case 159 => "{cyn}"
case 161 => "{CBM-K}"
case 162 => "{CBM-I}"
case 163 => "{CBM-T}"
case 164 => "{CBM-@}"
case 165 => "{CBM-G}"
case 166 => "{CBM-+}"
case 167 => "{CBM-N}"
case 168 => "{CBM-£}"
case 169 => "{Shift-£}"
case 170 => "{CBM-M}"
case 171 => "{CBM-Q}"
case 172 => "{CBM-D}"
case 173 => "{CBM-Z}"
case 174 => "{CBM-S}"
case 175 => "{CBM-P}"
case 176 => "{CBM-A}"
case 177 => "{CBM-E}"
case 178 => "{CBM-R}"
case 179 => "{CBM-W}"
case 180 => "{CBM-H}"
case 181 => "{CBM-J}"
case 182 => "{CBM-L}"
case 183 => "{CBM-Y}"
case 184 => "{CBM-U}"
case 185 => "{CBM-O}"
case 187 => "{CBM-F}"
case 188 => "{CBM-C}"
case 189 => "{CBM-X}"
case 190 => "{CBM-V}"
case 191 => "{CBM-B}"
case 255 => "{Shift-Arrowup}"
case p if p >= 32 && p < 95 => p.toChar.toString
case x => s"{$x}"
}
}
def createSource(ram:Memory,startAddress:Int): StringBuilder = {
val sb = new StringBuilder
var adr = startAddress
var keepListing = true
while (keepListing && adr < 0x10000) {
val nextAdr = ram.read(adr) | ram.read(adr + 1) << 8
adr += 2
keepListing = nextAdr != 0
if (keepListing) {
val line = ram.read(adr) | ram.read(adr + 1) << 8
var stringMode = false
adr += 2
sb.append(s"$line ")
var token = ram.read(adr)
while (token != 0 && adr < 0x10000) {
if (token == 0x22) stringMode = !stringMode
val nextByte = ram.read(adr + 1)
if (!stringMode && (token & 0x80) > 0) {
val (text,length) = findToken(token,nextByte)
adr += length
sb.append(text)
}
else {
sb.append(mapChar(token))
adr += 1
}
token = ram.read(adr)
}
adr = nextAdr
sb.append("\\n")
}
}
sb
}
def list(ram:Memory,startAddress:Int) : Unit = {
val sb = createSource(ram, startAddress)
if (Desktop.isDesktopSupported) {
val file = java.io.File.createTempFile("kernal64",".txt")
file.deleteOnExit
val pw = new PrintWriter(new FileWriter(file))
pw.println(sb.toString)
pw.close
Desktop.getDesktop.edit(file)
}
else println(sb)
}
}
|
abbruzze/kernal64
|
Kernal64/src/ucesoft/cbm/misc/BasicListExplorer.scala
|
Scala
|
mit
| 9,222
|
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops
import org.platanios.tensorflow.api.core.Graph
import org.platanios.tensorflow.api.core.client.Session
import org.platanios.tensorflow.api.core.exception.InvalidArgumentException
import org.platanios.tensorflow.api.implicits.Implicits._
import org.platanios.tensorflow.api.ops.basic.Basic
import org.platanios.tensorflow.api.tensors.Tensor
import org.platanios.tensorflow.api.utilities.using
import org.scalatestplus.junit.JUnitSuite
import org.junit.Test
/**
* @author Emmanouil Antonios Platanios
*/
class TextSuite extends JUnitSuite {
@Test def testRegexReplaceRemovePrefix(): Unit = using(Graph()) { graph =>
Op.createWith(graph) {
val input = Basic.constant(Tensor("a:foo", "a:bar", "a:foo", "b:baz", "b:qux", "ca:b"))
val output = Text.regexReplace(input, "^(a:|b:)", "", replaceGlobal = false)
val session = Session()
val result = session.run(fetches = output)
assert(result.entriesIterator.toSeq.map(_.asInstanceOf[String]) == Seq("foo", "bar", "foo", "baz", "qux", "ca:b"))
}
}
@Test def testRegexReplace(): Unit = using(Graph()) { graph =>
Op.createWith(graph) {
val input = Basic.constant(Tensor("aba\\naba", "abcdabcde"))
val output = Text.regexReplace(input, "a.*a", "(\\\\0)")
val session = Session()
val result = session.run(fetches = output)
assert(result.entriesIterator.toSeq.map(_.asInstanceOf[String]) == Seq("(aba)\\n(aba)", "(abcda)bcde"))
}
}
@Test def testRegexReplaceEmptyMatch(): Unit = using(Graph()) { graph =>
Op.createWith(graph) {
val input = Basic.constant(Tensor("abc", "1"))
val output = Text.regexReplace(input, "", "x")
val session = Session()
val result = session.run(fetches = output)
assert(result.entriesIterator.toSeq == Seq("xaxbxcx", "x1x"))
}
}
@Test def testRegexReplaceInvalidPattern(): Unit = using(Graph()) { graph =>
Op.createWith(graph) {
val input = Basic.constant(Tensor("abc", "1"))
val output = Text.regexReplace(input, "A[", "x")
val session = Session()
assert(intercept[InvalidArgumentException](session.run(fetches = output)).getMessage ===
"Invalid pattern: A[, error: missing ]: [\\n\\t [[{{node RegexReplace}}]]")
}
}
@Test def testRegexReplaceGlobal(): Unit = using(Graph()) { graph =>
Op.createWith(graph) {
val input = Basic.constant(Tensor("ababababab", "abcabcabc", ""))
val output = Text.regexReplace(input, "ab", "abc", replaceGlobal = true)
val session = Session()
val result = session.run(fetches = output)
assert(result.entriesIterator.toSeq == Seq("abcabcabcabcabc", "abccabccabcc", ""))
}
}
}
|
eaplatanios/tensorflow_scala
|
modules/api/src/test/scala/org/platanios/tensorflow/api/ops/TextSuite.scala
|
Scala
|
apache-2.0
| 3,362
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalautils
import org.scalatest._
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
import scala.collection.{mutable,immutable}
class ConversionCheckedMapEqualityConstraintsSpec extends Spec with NonImplicitAssertions with ConversionCheckedTripleEquals with MapEqualityConstraints {
case class Super(size: Int)
class Sub(sz: Int) extends Super(sz)
val super1: Super = new Super(1)
val sub1: Sub = new Sub(1)
val super2: Super = new Super(2)
val sub2: Sub = new Sub(2)
val nullSuper: Super = null
case class Fruit(name: String)
class Apple extends Fruit("apple")
class Orange extends Fruit("orange")
object `the MapEqualityConstraints trait` {
def `should allow any Map to be compared with any other Map, so long as the element types of the two Maps adhere to the equality constraint in force for those types` {
assert(mutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3) === immutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3))
assert(mutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3) === immutable.HashMap('a' -> 1L, 'b' -> 2L, 'c' -> 3L)) // does not compile last time I checked
assert(mutable.HashMap('a' -> 1L, 'b' -> 2L, 'c' -> 3L) === immutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3)) // does not compile last time I checked
assert(immutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3) === mutable.HashMap('a' -> 1L, 'b' -> 2L, 'c' -> 3L)) // does not compile last time I checked
assert(immutable.HashMap('a' -> 1L, 'b' -> 2L, 'c' -> 3L) === mutable.HashMap('a' -> 1, 'b' -> 2, 'c' -> 3)) // does not compile last time I checked
assert(mutable.HashMap('a' -> new Apple, 'b' -> new Apple) === immutable.HashMap('a' -> new Fruit("apple"), 'b' -> new Fruit("apple")))
assert(immutable.HashMap('a' -> new Fruit("apple"), 'b' -> new Fruit("apple")) === mutable.HashMap('a' -> new Apple, 'b' -> new Apple))
// assert(mutable.HashMap('a' -> new Apple, 'b' -> new Apple) === immutable.HashMap('a' -> new Orange, 'b' -> new Orange)) // does not compile last time I checked
// assert(immutable.HashMap('a' -> new Apple, 'b' -> new Apple) === mutable.HashMap('a' -> new Orange, 'b' -> new Orange)) // does not compile last time I checked
// assert(immutable.HashMap('a' -> new Orange, 'b' -> new Orange) === mutable.HashMap('a' -> new Apple, 'b' -> new Apple)) // does not compile last time I checked
// assert(mutable.HashMap('a' -> new Orange, 'b' -> new Orange) === immutable.HashMap('a' -> new Apple, 'b' -> new Apple)) // does not compile last time I checked
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalautils/ConversionCheckedMapEqualityConstraintsSpec.scala
|
Scala
|
apache-2.0
| 3,323
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.granturing.spark.powerbi
import org.apache.spark.sql.{SaveMode, SQLContext}
import org.apache.spark.{SparkContext, SparkConf}
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import scala.concurrent.Await
case class Person(name: String, age: Int, birthday: java.sql.Date, timestamp: java.sql.Timestamp)
class PowerBISuite extends FunSuite with BeforeAndAfterAll {
val dataset = "PowerBI Spark Test"
var datasetId: String = _
val table = "People"
val tableSchema = Table(
table, Seq(
Column("name", "string"),
Column("age", "Int64"),
Column("birthday", "Datetime"),
Column("timestamp", "Datetime")
))
val group = sys.env.get("POWERBI_GROUP")
val opts = {
val _opts = Map("dataset" -> dataset, "table" -> table)
group match {
case Some(g) => _opts ++ Map("group" -> g)
case None => _opts
}
}
val conf = new SparkConf().
setAppName("spark-powerbi-test").
setMaster("local[1]").
set("spark.task.maxFailures", "1")
val clientConf = ClientConf.fromSparkConf(new SparkConf())
val client: Client = new Client(clientConf)
val sc: SparkContext = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
override def beforeAll: Unit = {
val groupId = group match {
case Some(grp) => {
val grpOpt = Await.result(client.getGroups, clientConf.timeout).filter(g => grp.equals(g.name)).map(_.id).headOption
grpOpt match {
case Some(g) => Some(g)
case None => sys.error(s"group $grp not found")
}
}
case None => None
}
val ds = Await.result(client.getDatasets(groupId), clientConf.timeout)
datasetId = ds.filter(_.name == dataset).headOption match {
case Some(d) => {
Await.result(client.clearTable(d.id, table, groupId), clientConf.timeout)
Await.result(client.updateTableSchema(d.id, table, tableSchema, groupId), clientConf.timeout)
d.id
}
case None => {
val result = Await.result(client.createDataset(Schema(dataset, Seq(tableSchema)), groupId), clientConf.timeout)
result.id
}
}
}
override def afterAll: Unit = {
sc.stop()
}
test("RDD saves to PowerBI") {
val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime))))
data.saveToPowerBI(dataset, table, group = group)
}
test(s"RDD with over ${clientConf.maxPartitions} partitions saves to PowerBI") {
val data = sc.parallelize(
0 to clientConf.maxPartitions map { i =>
Person(s"Person$i", i, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime))
},
clientConf.maxPartitions+1)
data.saveToPowerBI(dataset, table, group = group)
}
test("RDD over batch size saves to PowerBI") {
val data = sc.parallelize(
1 to clientConf.batchSize + 1 map { i =>
Person(s"Person$i", i, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime))
}
, 1)
data.saveToPowerBI(dataset, table, group = group)
}
test("DataFrame saves with overwrite to PowerBI") {
val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF
data.
write.
format("com.granturing.spark.powerbi").
options(opts).
mode(SaveMode.Overwrite).save
}
test("DataFrame saves with append to PowerBI") {
val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF
data.
write.
format("com.granturing.spark.powerbi").
options(opts).
mode(SaveMode.Append).save
}
test("DataFrame save fails if exists") {
val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF
val ex = intercept[RuntimeException] {
data.
write.
format("com.granturing.spark.powerbi").
options(opts).
mode(SaveMode.ErrorIfExists).save
}
assertResult(ex.getMessage())(s"table $table already exists")
}
}
|
granturing/spark-power-bi
|
src/test/scala/com/granturing/spark/powerbi/PowerBISuite.scala
|
Scala
|
apache-2.0
| 4,851
|
///*
// * Copyright 2009-2010 LinkedIn, Inc
// *
// * Licensed under the Apache License, Version 2.0 (the "License"); you may not
// * use this file except in compliance with the License. You may obtain a copy of
// * the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// * License for the specific language governing permissions and limitations under
// * the License.
// */
//package com.linkedin.norbert
//package network
//package common
//
//import org.specs.Specification
//import org.specs.mock.Mockito
//import com.google.protobuf.Message
//import protos.NorbertExampleProtos
//
//class MessageRegistrySpec extends Specification with Mockito {
//// val messageRegistry = new MessageRegistry
//
// val proto = NorbertExampleProtos.Ping.newBuilder.setTimestamp(System.currentTimeMillis).build
//
// "MessageRegistry" should {
// "throw a NullPointerException if requestMessage is null" in {
//// messageRegistry.registerMessage(null, null) must throwA[NullPointerException]
// }
//
// "throw an InvalidMessageExceptio if the requestMessage isn't registered" in {
// messageRegistry.hasResponse(proto) must throwA[InvalidMessageException]
// messageRegistry.responseMessageDefaultInstanceFor(proto) must throwA[InvalidMessageException]
// }
//
// "contains returns true if the specified request message has been registered" in {
// val response = mock[Message]
//
// messageRegistry.contains(proto) must beFalse
// messageRegistry.registerMessage(proto, proto)
// messageRegistry.contains(proto) must beTrue
// }
//
// "return true for hasResponse if the responseMessage is not null" in {
// messageRegistry.registerMessage(proto, null)
// messageRegistry.hasResponse(proto) must beFalse
//
// messageRegistry.registerMessage(proto, proto)
// messageRegistry.hasResponse(proto) must beTrue
// }
//
// "return true if the response message is of the correct type" in {
// val name = "norbert.PingResponse"
// messageRegistry.registerMessage(proto, null)
// messageRegistry.validResponseFor(proto, name) must beFalse
//
// messageRegistry.registerMessage(proto, proto)
// messageRegistry.validResponseFor(proto, name) must beFalse
//
// messageRegistry.validResponseFor(proto, proto.getDescriptorForType.getFullName) must beTrue
// }
// }
//}
|
linkedin-sna/norbert
|
network/src/test/scala/com/linkedin/norbert/network/common/MessageRegistrySpec.scala
|
Scala
|
apache-2.0
| 2,616
|
package scala.lms
package epfl
package test4
import common.{BlockExp,EffectExp}
import common.ScalaGenEffect // don't import FunctionsExp
import test2._
import test3._
import util.ClosureCompare
import scala.reflect.SourceContext
import scala.collection.{immutable, mutable}
trait FunctionsExpClever extends test3.FunctionsExp {
def exec[A:Typ,B:Typ](fun: Exp[A]=>Exp[B], arg: Exp[A]): Exp[B]
override def doApply[A:Typ,B:Typ](fun: Exp[A => B], arg: Exp[A])(implicit pos: SourceContext): Exp[B] = fun match {
case Def(Lambda(fun)) =>
exec(fun, arg)
case _ => super.doApply(fun, arg)
}
}
trait FunctionExpUnfoldAll extends FunctionsExpClever {
def exec[A:Typ,B:Typ](fun: Exp[A]=>Exp[B], arg: Exp[A]): Exp[B] = {
fun(arg)
}
}
trait FunctionExpUnfoldFixedDepth extends FunctionsExpClever {
var curDepth: Int = 0
def maxDepth: Int = 5
def exec[A:Typ,B:Typ](fun: Exp[A]=>Exp[B], arg: Exp[A]): Exp[B] = {
if (curDepth < maxDepth) {
curDepth += 1
val res = fun(arg)
curDepth -= 1
res
} else
Apply(Sym[A=>B](-2), arg)
}
}
trait FunctionExpUnfoldRecursion extends FunctionsExpClever with FunctionsCanonical {
var recursion: List[(Function[_,_], Exp[Any], Int)] = List()
val maxDepth: Int = 1
def exec[A:Typ,B:Typ](f: Exp[A]=>Exp[B], x: Exp[A]): Exp[B] = {
recursion.find(m => m._1 == f) match {
case Some((_, y, `maxDepth`)) => // hit recursion bound!
println("-- hit recursion: " + f.getClass + " " + x + " <- "+ y)
// y should be a symbol, and it might take on value x
Apply(Sym[A=>B](-2), x)
case Some((_, y, recCount)) => // hit, but below depth bound
val saveRecursion = recursion
recursion = (f,x, recCount + 1)::recursion
val res = f(x) // look for recursion
recursion = saveRecursion
res
case None =>
val saveRecursion = recursion
recursion = (f,x, 1)::recursion
val res = f(x) // look for recursion
recursion = saveRecursion
res
}
}
}
trait FunctionsCanonical extends FunctionsExp with ClosureCompare {
var funTable: List[(Function[_,_], Any)] = List()
def lookupFun[A:Typ,B:Typ](f: Exp[A]=>Exp[B]): (Exp[A]=>Exp[B]) = {
var can = canonicalize(f)
funTable.find(_._2 == can) match {
case Some((g, _)) =>
//println("-- found fun: " + g.getClass.getName)
g.asInstanceOf[Exp[A]=>Exp[B]]
case _ =>
funTable = (f,can)::funTable
f
}
}
override def doLambda[A:Typ,B:Typ](fun: Exp[A]=>Exp[B])(implicit pos: SourceContext) = {
super.doLambda(lookupFun(fun))
}
}
trait FunctionsExternalDef0 extends FunctionsExp with BlockExp {
case class DefineFun[A,B](res: Block[B])(val arg: Sym[A]) extends Def[A=>B]
override def boundSyms(e: Any): List[Sym[Any]] = e match {
case f@DefineFun(y) => f.arg::effectSyms(y)
case _ => super.boundSyms(e)
}
override def symsFreq(e: Any): List[(Sym[Any], Double)] = e match {
case DefineFun(y) => freqHot(y)
case _ => super.symsFreq(e)
}
}
trait FunctionsExternalDef01 extends FunctionsExternalDef0 { // not used
override def doLambda[A:Typ,B:Typ](f: Exp[A]=>Exp[B])(implicit pos: SourceContext): Exp[A=>B] = {
var funSym = fresh[A=>B]
var argSym = fresh[A]//Sym(-1)
createDefinition(funSym, DefineFun[A,B](Block(f(argSym)))(argSym)) //FIXME: use reify (conflict with test3.effects)
funSym
}
}
trait FunctionsExternalDef1 extends FunctionsExternalDef0 with ClosureCompare { // not used (New: used by TestMatcherNew)
var funTable: List[(Function[_,_], Any, Sym[_])] = List()
override def doLambda[A:Typ,B:Typ](f: Exp[A]=>Exp[B])(implicit pos: SourceContext): Exp[A=>B] = {
var can = canonicalize(f)
funTable.find(_._2 == can) match {
case Some((g, _, funSym)) =>
//println("-- found fun: " + g.getClass.getName)
funSym.asInstanceOf[Sym[A=>B]]
case _ =>
var funSym = fresh[A=>B]
var argSym = fresh[A]//Sym(-1)
val g = (x: Exp[A]) => Apply(funSym, x): Exp[B]
funTable = (g,can,funSym)::funTable
val y = Block(f(argSym)) // should use reifyEffects!
createDefinition(funSym, DefineFun[A,B](y)(argSym))
funSym
}
}
}
trait FunctionsExternalDef2 extends FunctionsCanonical with FunctionsExternalDef0 {
override def lookupFun[A:Typ,B:Typ](f: Exp[A]=>Exp[B]): (Exp[A]=>Exp[B]) = {
var can = canonicalize(f)
funTable.find(_._2 == can) match {
case Some((g, _)) =>
//println("-- found fun: " + g.getClass.getName)
g.asInstanceOf[Exp[A]=>Exp[B]]
case _ =>
var funSym = fresh[A=>B]
var argSym = fresh[A]//Sym(-1)
val g = (x: Exp[A]) => Apply(funSym, x): Exp[B]
funTable = (g,can)::funTable
Block(f(argSym)) match { //FIXME: use reify (conflict with test3.effects)
case Block(c @ Const(_)) =>
val g = (x: Exp[A]) => c
funTable = (g,can)::funTable // ok?
g
case e =>
createDefinition(funSym, DefineFun[A,B](e)(argSym))
g
}
}
}
}
trait ScalaGenFunctionsExternal extends ScalaGenEffect {
val IR: FunctionsExternalDef0 with EffectExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case e@DefineFun(y) =>
emitValDef(sym, "{" + quote(e.arg) + ": (" + remap(e.arg.tp) + ") => "/*}*/)
emitBlock(y)
stream.println(quote(getBlockResult(y)))
stream.println("}")
case Apply(fun, arg) =>
emitValDef(sym, quote(fun) + "(" + quote(arg) + ")")
case Apply2(fun, arg1, arg2) =>
emitValDef(sym, quote(fun) + "(" + quote(arg1) + ", " + quote(arg2) + ")")
case _ => super.emitNode(sym, rhs)
}
}
|
astojanov/virtualization-lms-core
|
test-src/epfl/test4-functions/Functions.scala
|
Scala
|
bsd-3-clause
| 5,901
|
import akka.actor.ActorSystem
import akka.testkit.TestKit
import models.{Stock, LimitAskOrder, AskOrderBook}
import org.scalatest.{BeforeAndAfterAll, FeatureSpecLike, GivenWhenThen, Matchers}
import scala.util.Random
class AskOrderBookSpec extends TestKit(ActorSystem("AskOrderBookSpec")) with
FeatureSpecLike with
GivenWhenThen with
Matchers with
BeforeAndAfterAll {
/** Shutdown actor system when finished. */
override def afterAll(): Unit = {
system.shutdown()
}
/** Maximum share price for testing. */
val maxPrice = 1000.0
/** Maximum number of share for testing. */
val maxQuantity = 1000000
val testInstrument = Stock("AAPL")
feature("An AskOrderBook should maintain price priority") {
scenario("Multiple limit ask orders are added to an empty ask order book.") {
Given("An empty ask order book,")
val askOrderBook = AskOrderBook(testInstrument)
When("two limit ask orders are added to the book with the lower priced order first,")
val lowPrice = Random.nextDouble() * maxPrice
val quantity1 = Random.nextInt(maxQuantity)
val ask1 = LimitAskOrder(testActor, testInstrument, lowPrice, quantity1)
val highPrice = (1 + Random.nextDouble()) * lowPrice
val quantity2 = Random.nextInt(maxQuantity)
val ask2 = LimitAskOrder(testActor, testInstrument, highPrice, quantity2)
askOrderBook += (ask1, ask2)
Then("the lower priced order should be at the top of the ask order book queue.")
askOrderBook.dequeue() should be(ask1)
askOrderBook.dequeue() should be(ask2)
askOrderBook.headOption should be(None)
Given("An empty ask order book,")
assert(askOrderBook.isEmpty)
When("that two limit orders asks are added to the book with the higher priced order first,")
askOrderBook += (ask2, ask1)
Then("the lower priced order should be at the top of the ask order book queue.")
askOrderBook.dequeue() should be(ask1)
askOrderBook.dequeue() should be(ask2)
askOrderBook.headOption should be(None)
}
scenario("An aggressive limit ask order lands in an ask order book with existing orders.") {
val askOrderBook = AskOrderBook(testInstrument)
Given("An ask order book that contains existing orders")
val lowPrice = Random.nextDouble() * maxPrice
val quantity1 = Random.nextInt(maxQuantity)
val existingAsk1 = LimitAskOrder(testActor, testInstrument, lowPrice, quantity1)
val highPrice = (1 + Random.nextDouble()) * lowPrice
val quantity2 = Random.nextInt(maxQuantity)
val existingAsk2 = LimitAskOrder(testActor, testInstrument, highPrice, quantity2)
askOrderBook += (existingAsk1, existingAsk2)
When("an aggressive limit ask order lands in the book,")
val aggressivePrice = Random.nextDouble() * lowPrice
val quantity3 = Random.nextInt(maxQuantity)
val aggressiveAsk = LimitAskOrder(testActor, testInstrument, aggressivePrice, quantity3)
askOrderBook += aggressiveAsk
Then("the aggressive limit ask order should be at the top of the ask order book queue.")
askOrderBook.head should be(aggressiveAsk)
askOrderBook.clear()
}
scenario("A passive limit ask order lands in an ask order book with existing orders.") {
val askOrderBook = AskOrderBook(testInstrument)
Given("An ask order book that contains existing orders")
val lowPrice = Random.nextDouble() * maxPrice
val quantity1 = Random.nextInt(maxQuantity)
val existingAsk1 = LimitAskOrder(testActor, testInstrument, lowPrice, quantity1)
val highPrice = (1 + Random.nextDouble()) * lowPrice
val quantity2 = Random.nextInt(maxQuantity)
val existingAsk2 = LimitAskOrder(testActor, testInstrument, highPrice, quantity2)
askOrderBook += (existingAsk1, existingAsk2)
When("a passive limit ask order lands in the book,")
val passivePrice = 0.5 * (lowPrice + highPrice)
val quantity3 = Random.nextInt(maxQuantity)
val passiveAsk = LimitAskOrder(testActor, testInstrument, passivePrice, quantity3)
askOrderBook += passiveAsk
Then("the ask order book should maintain price priority.")
askOrderBook.dequeue() should be(existingAsk1)
askOrderBook.dequeue() should be(passiveAsk)
askOrderBook.dequeue() should be(existingAsk2)
}
}
feature("An AskOrderBook should maintaining time priority.") {
scenario("A limit ask order lands in an ask order book with existing orders.") {
val askOrderBook = AskOrderBook(testInstrument)
Given("An ask order book that contains existing orders")
val lowPrice = Random.nextDouble() * maxPrice
val quantity1 = Random.nextInt(maxQuantity)
val existingAsk1 = LimitAskOrder(testActor, testInstrument, lowPrice, quantity1)
val highPrice = (1 + Random.nextDouble()) * lowPrice
val quantity2 = Random.nextInt(maxQuantity)
val existingAsk2 = LimitAskOrder(testActor, testInstrument, highPrice, quantity2)
askOrderBook +=(existingAsk1, existingAsk2)
When("a limit ask order at the same price as the best existing limit ask order,")
val quantity3 = Random.nextInt(maxQuantity)
val incomingAsk = LimitAskOrder(testActor, testInstrument, lowPrice, quantity3)
askOrderBook += incomingAsk
Then("the best existing limit ask order should be at the top of the ask order book queue.")
askOrderBook.dequeue() should be(existingAsk1)
askOrderBook.dequeue() should be(incomingAsk)
askOrderBook.dequeue() should be(existingAsk2)
}
}
feature("An AskOrderBook with existing orders should have a best limit order") {
Given("An ask order book that contains existing orders")
val lowPrice = Random.nextDouble() * maxPrice
val quantity1 = Random.nextInt(maxQuantity)
val existingAsk1 = LimitAskOrder(testActor, testInstrument, lowPrice, quantity1)
val highPrice = (1 + Random.nextDouble()) * lowPrice
val quantity2 = Random.nextInt(maxQuantity)
val existingAsk2 = LimitAskOrder(testActor, testInstrument, highPrice, quantity2)
val askOrderBook = AskOrderBook(testInstrument)
askOrderBook +=(existingAsk1, existingAsk2)
Then("the best existing limit ask order should be at the top of the ask order book queue.")
askOrderBook.bestLimitOrder should be(askOrderBook.headOption)
askOrderBook.clear()
}
feature("An empty AskOrderBook should not have a best limit order") {
Given("An empty ask order book")
val askOrderBook = AskOrderBook(testInstrument)
Then("the best existing limit ask order should be None.")
askOrderBook.bestLimitOrder should be(None)
}
}
|
davidrpugh/play-securities-exchange
|
test/AskOrderBookSpec.scala
|
Scala
|
apache-2.0
| 6,744
|
package peregin.gpv.gui.gauge
import peregin.gpv.model.Telemetry
/**
* In addition to the gauge painter the chart painter is initialized with the current telemetry data, thus allowing
* to paint a chart based on a specific data (such as elevation, speed, heart rate, etc).
*/
trait ChartPainter extends GaugePainter {
protected var data: Option[Telemetry] = None
def telemetry: Telemetry = data.getOrElse(Telemetry.empty())
def telemetry_= (on: Telemetry): Unit = data = Some(on)
}
|
peregin/gps-overlay-on-video
|
src/main/scala/peregin/gpv/gui/gauge/ChartPainter.scala
|
Scala
|
mit
| 495
|
package models
import java.net.URI
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import play.api.Play.current
/**
* Handles configuration, context and so
*
* @author Daniel Voigt Godoy.
*/
object SparkCommons {
val appName = "PlayBenford"
/*val hadoop = true
val localMode = false
val masterIP = "ip-172-31-0-199"*/
val hadoop = false
val localMode = true
val masterIP = "MONSTER"
val masterPort = 7077
val metricsPort = 4040
val masterURL = if (localMode) "local[*]" else "spark://" + masterIP + ":" + masterPort.toString
val metricsURL = "http://" + masterIP + ":" + metricsPort.toString + "/api/v1/applications/" + appName + "/jobs"
val tmpFolder = "/tmp"
def libs: Seq[String] = {
val libDir = play.api.Play.application.getFile("lib")
return if ( libDir.exists ) {
libDir.listFiles().map(_.getCanonicalFile().getAbsolutePath()).filter(_.endsWith(".jar"))
} else {
throw new IllegalStateException(s"lib dir is missing: $libDir")
}
}
//build the SparkConf object at once
lazy val conf = {
new SparkConf(false)
.setMaster(masterURL)
.setAppName(appName)
.set("spark.logConf", "true")
.setJars(libs)
}
lazy val sc = SparkContext.getOrCreate(conf)
lazy val sqlContext = new SQLContext(sc)
def copyToHdfs(localPath: String, fileName: String) = {
val hdfsConfig = new Configuration
val hdfsURI = "hdfs://" + SparkCommons.masterIP + ":9000"
val hdfs = FileSystem.get(new URI(hdfsURI), hdfsConfig)
val destFile = hdfsURI + SparkCommons.tmpFolder + "/" + fileName
val targetPath = new Path(destFile)
if (hdfs.exists(targetPath)) {
hdfs.delete(targetPath, true)
}
val oriPath = new Path(localPath + fileName)
hdfs.copyFromLocalFile(oriPath, targetPath)
hdfs.close()
}
}
|
dvgodoy/play-benford-analysis
|
app/models/SparkCommons.scala
|
Scala
|
apache-2.0
| 1,953
|
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.accumulo.commands
import com.beust.jcommander.{JCommander, Parameters}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.tools.accumulo.GeoMesaConnectionParams
import org.locationtech.geomesa.tools.accumulo.commands.DescribeCommand._
import org.locationtech.geomesa.tools.common.FeatureTypeNameParam
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType._
import scala.collection.JavaConversions._
import scala.util.control.NonFatal
class DescribeCommand(parent: JCommander) extends CommandWithCatalog(parent) with LazyLogging {
override val command = "describe"
override val params = new DescribeParameters
def execute() = {
logger.info(s"Describing attributes of feature '${params.featureName}' from catalog table '$catalog'...")
try {
val sft = ds.getSchema(params.featureName)
val sb = new StringBuilder()
sft.getAttributeDescriptors.foreach { attr =>
sb.clear()
val name = attr.getLocalName
// TypeName
sb.append(name)
sb.append(": ")
sb.append(attr.getType.getBinding.getSimpleName)
if (sft.getDtgField.exists(_ == name)) sb.append(" (ST-Time-index)")
if (sft.getGeometryDescriptor == attr) sb.append(" (ST-Geo-index)")
if (attr.isIndexed) sb.append(" (Indexed)")
if (attr.getDefaultValue != null) sb.append("- Default Value: ", attr.getDefaultValue)
println(sb.toString())
}
val userData = sft.getUserData
if (!userData.isEmpty) {
println("\\nUser data:")
userData.foreach {
case (KEYWORDS_KEY, v) => println(s" $KEYWORDS_KEY: " +
"[".concat(v.asInstanceOf[String].split(KEYWORDS_DELIMITER)
.map{ "\\"%s\\"".format(_)}.mkString(",").concat("]")))
case (key, value) => println(s" $key: $value")
}
}
} catch {
case npe: NullPointerException =>
logger.error(s"Error: feature '${params.featureName}' not found. Check arguments...", npe)
case e: Exception =>
logger.error(s"Error describing feature '${params.featureName}': " + e.getMessage, e)
case NonFatal(e) =>
logger.warn(s"Non fatal error encountered describing feature '${params.featureName}': ", e)
} finally {
ds.dispose()
}
}
}
object DescribeCommand {
@Parameters(commandDescription = "Describe the attributes of a given feature in GeoMesa")
class DescribeParameters extends GeoMesaConnectionParams
with FeatureTypeNameParam {}
}
|
mdzimmerman/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/accumulo/commands/DescribeCommand.scala
|
Scala
|
apache-2.0
| 3,151
|
package controllers
import org.scalatest._
import play.api.test._
import play.api.test.Helpers._
/**
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class ApplicationIT extends FunSpec with Matchers {
describe ("Application") {
it ("should send 404 on a bad request") {
running(FakeApplication()) {
val bad = route(FakeRequest(GET, "/boum")).get
status(bad) should be (NOT_FOUND)
}
}
it ("should render the index page") {
running(FakeApplication()) {
val result = route(FakeRequest(GET, "/")).get
status(result) should be (SEE_OTHER)
redirectLocation(result) should be (Some("/signIn"))
}
}
}
}
|
phosphene/play-slick-silhouette-scalatest
|
test/controllers/ApplicationIT.scala
|
Scala
|
apache-2.0
| 757
|
def typePattern1(value: Any):String =
value match {
case _ :Int => "Int matched"
case x :String => "String matched. "+x.toUpperCase
case _ :Boolean => "Boolean matched"
case _ => "Another type matched"
}
|
grzegorzbalcerek/scala-book-examples
|
examples/PatternsTypes1.scala
|
Scala
|
mit
| 224
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.net.{URI, URISyntaxException}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.hadoop.fs.{FileContext, FsConstants, Path}
import org.apache.hadoop.fs.permission.{AclEntry, AclEntryScope, AclEntryType, FsAction, FsPermission}
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, UnresolvedAttribute}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTableType._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.DescribeTableSchema
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier, CaseInsensitiveMap}
import org.apache.spark.sql.execution.datasources.{DataSource, PartitioningUtils}
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.csv.CSVDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.json.JsonDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.orc.OrcDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetDataSourceV2
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
/**
* A command to create a table with the same definition of the given existing table.
* In the target table definition, the table comment is always empty but the column comments
* are identical to the ones defined in the source table.
*
* The CatalogTable attributes copied from the source table are storage(inputFormat, outputFormat,
* serde, compressed, properties), schema, provider, partitionColumnNames, bucketSpec by default.
*
* Use "CREATE TABLE t1 LIKE t2 USING file_format" to specify new provider for t1.
* For Hive compatibility, use "CREATE TABLE t1 LIKE t2 STORED AS hiveFormat"
* to specify new file storage format (inputFormat, outputFormat, serde) for t1.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* LIKE [other_db_name.]existing_table_name
* [USING provider |
* [
* [ROW FORMAT row_format]
* [STORED AS file_format] [WITH SERDEPROPERTIES (...)]
* ]
* ]
* [locationSpec]
* [TBLPROPERTIES (property_name=property_value, ...)]
* }}}
*/
case class CreateTableLikeCommand(
targetTable: TableIdentifier,
sourceTable: TableIdentifier,
fileFormat: CatalogStorageFormat,
provider: Option[String],
properties: Map[String, String] = Map.empty,
ifNotExists: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val sourceTableDesc = catalog.getTempViewOrPermanentTableMetadata(sourceTable)
val newProvider = if (provider.isDefined) {
if (!DDLUtils.isHiveTable(provider)) {
// check the validation of provider input, invalid provider will throw
// AnalysisException, ClassNotFoundException, or NoClassDefFoundError
DataSource.lookupDataSource(provider.get, sparkSession.sessionState.conf)
}
provider
} else if (sourceTableDesc.tableType == CatalogTableType.VIEW) {
Some(sparkSession.sessionState.conf.defaultDataSourceName)
} else if (fileFormat.inputFormat.isDefined) {
Some(DDLUtils.HIVE_PROVIDER)
} else {
sourceTableDesc.provider
}
val newStorage = if (fileFormat.inputFormat.isDefined) {
fileFormat
} else {
sourceTableDesc.storage.copy(locationUri = fileFormat.locationUri)
}
// If the location is specified, we create an external table internally.
// Otherwise create a managed table.
val tblType = if (newStorage.locationUri.isEmpty) {
CatalogTableType.MANAGED
} else {
CatalogTableType.EXTERNAL
}
val newTableDesc =
CatalogTable(
identifier = targetTable,
tableType = tblType,
storage = newStorage,
schema = sourceTableDesc.schema,
provider = newProvider,
partitionColumnNames = sourceTableDesc.partitionColumnNames,
bucketSpec = sourceTableDesc.bucketSpec,
properties = properties)
catalog.createTable(newTableDesc, ifNotExists)
Seq.empty[Row]
}
}
// TODO: move the rest of the table commands from ddl.scala to this file
/**
* A command to create a table.
*
* Note: This is currently used only for creating Hive tables.
* This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1 data_type [COMMENT col_comment], ...)]
* [COMMENT table_comment]
* [PARTITIONED BY (col3 data_type [COMMENT col_comment], ...)]
* [CLUSTERED BY (col1, ...) [SORTED BY (col1 [ASC|DESC], ...)] INTO num_buckets BUCKETS]
* [SKEWED BY (col1, col2, ...) ON ((col_value, col_value, ...), ...)
* [STORED AS DIRECTORIES]
* [ROW FORMAT row_format]
* [STORED AS file_format | STORED BY storage_handler_class [WITH SERDEPROPERTIES (...)]]
* [LOCATION path]
* [TBLPROPERTIES (property_name=property_value, ...)]
* [AS select_statement];
* }}}
*/
case class CreateTableCommand(
table: CatalogTable,
ignoreIfExists: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.catalog.createTable(table, ignoreIfExists)
Seq.empty[Row]
}
}
/**
* A command that renames a table/view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 RENAME TO table2;
* ALTER VIEW view1 RENAME TO view2;
* }}}
*/
case class AlterTableRenameCommand(
oldName: TableIdentifier,
newName: TableIdentifier,
isView: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
// If this is a temp view, just rename the view.
// Otherwise, if this is a real table, we also need to uncache and invalidate the table.
if (catalog.isTemporaryTable(oldName)) {
catalog.renameTable(oldName, newName)
} else {
val table = catalog.getTableMetadata(oldName)
DDLUtils.verifyAlterTableType(catalog, table, isView)
// If an exception is thrown here we can just assume the table is uncached;
// this can happen with Hive tables when the underlying catalog is in-memory.
val wasCached = Try(sparkSession.catalog.isCached(oldName.unquotedString)).getOrElse(false)
if (wasCached) {
try {
sparkSession.catalog.uncacheTable(oldName.unquotedString)
} catch {
case NonFatal(e) => log.warn(e.toString, e)
}
}
// Invalidate the table last, otherwise uncaching the table would load the logical plan
// back into the hive metastore cache
catalog.refreshTable(oldName)
catalog.renameTable(oldName, newName)
if (wasCached) {
sparkSession.catalog.cacheTable(newName.unquotedString)
}
}
Seq.empty[Row]
}
}
/**
* A command that add columns to a table
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
* }}}
*/
case class AlterTableAddColumnsCommand(
table: TableIdentifier,
colsToAdd: Seq[StructField]) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val catalogTable = verifyAlterTableAddColumn(sparkSession.sessionState.conf, catalog, table)
try {
sparkSession.catalog.uncacheTable(table.quotedString)
} catch {
case NonFatal(e) =>
log.warn(s"Exception when attempting to uncache table ${table.quotedString}", e)
}
catalog.refreshTable(table)
SchemaUtils.checkColumnNameDuplication(
(colsToAdd ++ catalogTable.schema).map(_.name),
"in the table definition of " + table.identifier,
conf.caseSensitiveAnalysis)
DDLUtils.checkDataColNames(catalogTable, colsToAdd.map(_.name))
catalog.alterTableDataSchema(table, StructType(catalogTable.dataSchema ++ colsToAdd))
Seq.empty[Row]
}
/**
* ALTER TABLE ADD COLUMNS command does not support temporary view/table,
* view, or datasource table with text, orc formats or external provider.
* For datasource table, it currently only supports parquet, json, csv, orc.
*/
private def verifyAlterTableAddColumn(
conf: SQLConf,
catalog: SessionCatalog,
table: TableIdentifier): CatalogTable = {
val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table)
if (catalogTable.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"""
|ALTER ADD COLUMNS does not support views.
|You must drop and re-create the views for adding the new columns. Views: $table
""".stripMargin)
}
if (DDLUtils.isDatasourceTable(catalogTable)) {
DataSource.lookupDataSource(catalogTable.provider.get, conf).
getConstructor().newInstance() match {
// For datasource table, this command can only support the following File format.
// TextFileFormat only default to one column "value"
// Hive type is already considered as hive serde table, so the logic will not
// come in here.
case _: CSVFileFormat | _: JsonFileFormat | _: ParquetFileFormat =>
case _: JsonDataSourceV2 | _: CSVDataSourceV2 |
_: OrcDataSourceV2 | _: ParquetDataSourceV2 =>
case s if s.getClass.getCanonicalName.endsWith("OrcFileFormat") =>
case s =>
throw new AnalysisException(
s"""
|ALTER ADD COLUMNS does not support datasource table with type $s.
|You must drop and re-create the table for adding the new columns. Tables: $table
""".stripMargin)
}
}
catalogTable
}
}
/**
* A command that loads data into a Hive table.
*
* The syntax of this command is:
* {{{
* LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE tablename
* [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
case class LoadDataCommand(
table: TableIdentifier,
path: String,
isLocal: Boolean,
isOverwrite: Boolean,
partition: Option[TablePartitionSpec]) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val targetTable = catalog.getTableMetadata(table)
val tableIdentwithDB = targetTable.identifier.quotedString
val normalizedSpec = partition.map { spec =>
PartitioningUtils.normalizePartitionSpec(
spec,
targetTable.partitionColumnNames,
tableIdentwithDB,
sparkSession.sessionState.conf.resolver)
}
if (targetTable.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(s"Target table in LOAD DATA cannot be a view: $tableIdentwithDB")
}
if (DDLUtils.isDatasourceTable(targetTable)) {
throw new AnalysisException(
s"LOAD DATA is not supported for datasource tables: $tableIdentwithDB")
}
if (targetTable.partitionColumnNames.nonEmpty) {
if (partition.isEmpty) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
s"but no partition spec is provided")
}
if (targetTable.partitionColumnNames.size != partition.get.size) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
s"but number of columns in provided partition spec (${partition.get.size}) " +
s"do not match number of partitioned columns in table " +
s"(${targetTable.partitionColumnNames.size})")
}
} else {
if (partition.nonEmpty) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is not " +
s"partitioned, but a partition spec was provided.")
}
}
val loadPath = {
if (isLocal) {
val localFS = FileContext.getLocalFSFileContext()
LoadDataCommand.makeQualified(FsConstants.LOCAL_FS_URI, localFS.getWorkingDirectory(),
new Path(path))
} else {
val loadPath = new Path(path)
// Follow Hive's behavior:
// If no schema or authority is provided with non-local inpath,
// we will use hadoop configuration "fs.defaultFS".
val defaultFSConf = sparkSession.sessionState.newHadoopConf().get("fs.defaultFS")
val defaultFS = if (defaultFSConf == null) new URI("") else new URI(defaultFSConf)
// Follow Hive's behavior:
// If LOCAL is not specified, and the path is relative,
// then the path is interpreted relative to "/user/<username>"
val uriPath = new Path(s"/user/${System.getProperty("user.name")}/")
// makeQualified() will ignore the query parameter part while creating a path, so the
// entire string will be considered while making a Path instance,this is mainly done
// by considering the wild card scenario in mind.as per old logic query param is
// been considered while creating URI instance and if path contains wild card char '?'
// the remaining charecters after '?' will be removed while forming URI instance
LoadDataCommand.makeQualified(defaultFS, uriPath, loadPath)
}
}
val fs = loadPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
// This handling is because while resolving the invalid URLs starting with file:///
// system throws IllegalArgumentException from globStatus API,so in order to handle
// such scenarios this code is added in try catch block and after catching the
// runtime exception a generic error will be displayed to the user.
try {
val fileStatus = fs.globStatus(loadPath)
if (fileStatus == null || fileStatus.isEmpty) {
throw new AnalysisException(s"LOAD DATA input path does not exist: $path")
}
} catch {
case e: IllegalArgumentException =>
log.warn(s"Exception while validating the load path $path ", e)
throw new AnalysisException(s"LOAD DATA input path does not exist: $path")
}
if (partition.nonEmpty) {
catalog.loadPartition(
targetTable.identifier,
loadPath.toString,
normalizedSpec.get,
isOverwrite,
inheritTableSpecs = true,
isSrcLocal = isLocal)
} else {
catalog.loadTable(
targetTable.identifier,
loadPath.toString,
isOverwrite,
isSrcLocal = isLocal)
}
// Refresh the metadata cache to ensure the data visible to the users
catalog.refreshTable(targetTable.identifier)
CommandUtils.updateTableStats(sparkSession, targetTable)
Seq.empty[Row]
}
}
object LoadDataCommand {
/**
* Returns a qualified path object. Method ported from org.apache.hadoop.fs.Path class.
*
* @param defaultUri default uri corresponding to the filesystem provided.
* @param workingDir the working directory for the particular child path wd-relative names.
* @param path Path instance based on the path string specified by the user.
* @return qualified path object
*/
private[sql] def makeQualified(defaultUri: URI, workingDir: Path, path: Path): Path = {
val newPath = new Path(workingDir, path)
val pathUri = if (path.isAbsolute()) path.toUri() else newPath.toUri()
if (pathUri.getScheme == null || pathUri.getAuthority == null &&
defaultUri.getAuthority != null) {
val scheme = if (pathUri.getScheme == null) defaultUri.getScheme else pathUri.getScheme
val authority = if (pathUri.getAuthority == null) {
if (defaultUri.getAuthority == null) "" else defaultUri.getAuthority
} else {
pathUri.getAuthority
}
try {
val newUri = new URI(scheme, authority, pathUri.getPath, null, pathUri.getFragment)
new Path(newUri)
} catch {
case e: URISyntaxException =>
throw new IllegalArgumentException(e)
}
} else {
newPath
}
}
}
/**
* A command to truncate table.
*
* The syntax of this command is:
* {{{
* TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
case class TruncateTableCommand(
tableName: TableIdentifier,
partitionSpec: Option[TablePartitionSpec]) extends RunnableCommand {
override def run(spark: SparkSession): Seq[Row] = {
val catalog = spark.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
if (table.tableType == CatalogTableType.EXTERNAL) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE on external tables: $tableIdentWithDB")
}
if (table.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE on views: $tableIdentWithDB")
}
if (table.partitionColumnNames.isEmpty && partitionSpec.isDefined) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE ... PARTITION is not supported " +
s"for tables that are not partitioned: $tableIdentWithDB")
}
if (partitionSpec.isDefined) {
DDLUtils.verifyPartitionProviderIsHive(spark, table, "TRUNCATE TABLE ... PARTITION")
}
val partCols = table.partitionColumnNames
val locations =
if (partCols.isEmpty) {
Seq(table.storage.locationUri)
} else {
val normalizedSpec = partitionSpec.map { spec =>
PartitioningUtils.normalizePartitionSpec(
spec,
partCols,
table.identifier.quotedString,
spark.sessionState.conf.resolver)
}
val partLocations =
catalog.listPartitions(table.identifier, normalizedSpec).map(_.storage.locationUri)
// Fail if the partition spec is fully specified (not partial) and the partition does not
// exist.
for (spec <- partitionSpec if partLocations.isEmpty && spec.size == partCols.length) {
throw new NoSuchPartitionException(table.database, table.identifier.table, spec)
}
partLocations
}
val hadoopConf = spark.sessionState.newHadoopConf()
val ignorePermissionAcl = SQLConf.get.truncateTableIgnorePermissionAcl
locations.foreach { location =>
if (location.isDefined) {
val path = new Path(location.get)
try {
val fs = path.getFileSystem(hadoopConf)
// Not all fs impl. support these APIs.
var optPermission: Option[FsPermission] = None
var optAcls: Option[java.util.List[AclEntry]] = None
if (!ignorePermissionAcl) {
val fileStatus = fs.getFileStatus(path)
try {
optPermission = Some(fileStatus.getPermission())
} catch {
case NonFatal(_) => // do nothing
}
try {
optAcls = Some(fs.getAclStatus(path).getEntries)
} catch {
case NonFatal(_) => // do nothing
}
}
fs.delete(path, true)
// We should keep original permission/acl of the path.
// For owner/group, only super-user can set it, for example on HDFS. Because
// current user can delete the path, we assume the user/group is correct or not an issue.
fs.mkdirs(path)
if (!ignorePermissionAcl) {
optPermission.foreach { permission =>
try {
fs.setPermission(path, permission)
} catch {
case NonFatal(e) =>
throw new SecurityException(
s"Failed to set original permission $permission back to " +
s"the created path: $path. Exception: ${e.getMessage}")
}
}
optAcls.foreach { acls =>
val aclEntries = acls.asScala.filter(_.getName != null).asJava
// If the path doesn't have default ACLs, `setAcl` API will throw an error
// as it expects user/group/other permissions must be in ACL entries.
// So we need to add tradition user/group/other permission
// in the form of ACL.
optPermission.map { permission =>
aclEntries.add(newAclEntry(AclEntryScope.ACCESS,
AclEntryType.USER, permission.getUserAction()))
aclEntries.add(newAclEntry(AclEntryScope.ACCESS,
AclEntryType.GROUP, permission.getGroupAction()))
aclEntries.add(newAclEntry(AclEntryScope.ACCESS,
AclEntryType.OTHER, permission.getOtherAction()))
}
try {
fs.setAcl(path, aclEntries)
} catch {
case NonFatal(e) =>
throw new SecurityException(
s"Failed to set original ACL $aclEntries back to " +
s"the created path: $path. Exception: ${e.getMessage}")
}
}
}
} catch {
case NonFatal(e) =>
throw new AnalysisException(
s"Failed to truncate table $tableIdentWithDB when removing data of the path: $path " +
s"because of ${e.toString}")
}
}
}
// After deleting the data, invalidate the table to make sure we don't keep around a stale
// file relation in the metastore cache.
spark.sessionState.refreshTable(tableName.unquotedString)
// Also try to drop the contents of the table from the columnar cache
try {
spark.sharedState.cacheManager.uncacheQuery(spark.table(table.identifier), cascade = true)
} catch {
case NonFatal(e) =>
log.warn(s"Exception when attempting to uncache table $tableIdentWithDB", e)
}
if (table.stats.nonEmpty) {
// empty table after truncation
val newStats = CatalogStatistics(sizeInBytes = 0, rowCount = Some(0))
catalog.alterTableStats(tableName, Some(newStats))
}
Seq.empty[Row]
}
private def newAclEntry(
scope: AclEntryScope,
aclType: AclEntryType,
permission: FsAction): AclEntry = {
new AclEntry.Builder()
.setScope(scope)
.setType(aclType)
.setPermission(permission).build()
}
}
abstract class DescribeCommandBase extends RunnableCommand {
override val output = DescribeTableSchema.describeTableAttributes()
protected def describeSchema(
schema: StructType,
buffer: ArrayBuffer[Row],
header: Boolean): Unit = {
if (header) {
append(buffer, s"# ${output.head.name}", output(1).name, output(2).name)
}
schema.foreach { column =>
append(buffer, column.name, column.dataType.simpleString, column.getComment().orNull)
}
}
protected def append(
buffer: ArrayBuffer[Row], column: String, dataType: String, comment: String): Unit = {
buffer += Row(column, dataType, comment)
}
}
/**
* Command that looks like
* {{{
* DESCRIBE [EXTENDED|FORMATTED] table_name partitionSpec?;
* }}}
*/
case class DescribeTableCommand(
table: TableIdentifier,
partitionSpec: TablePartitionSpec,
isExtended: Boolean)
extends DescribeCommandBase {
override def run(sparkSession: SparkSession): Seq[Row] = {
val result = new ArrayBuffer[Row]
val catalog = sparkSession.sessionState.catalog
if (catalog.isTemporaryTable(table)) {
if (partitionSpec.nonEmpty) {
throw new AnalysisException(
s"DESC PARTITION is not allowed on a temporary view: ${table.identifier}")
}
describeSchema(catalog.lookupRelation(table).schema, result, header = false)
} else {
val metadata = catalog.getTableMetadata(table)
if (metadata.schema.isEmpty) {
// In older version(prior to 2.1) of Spark, the table schema can be empty and should be
// inferred at runtime. We should still support it.
describeSchema(sparkSession.table(metadata.identifier).schema, result, header = false)
} else {
describeSchema(metadata.schema, result, header = false)
}
describePartitionInfo(metadata, result)
if (partitionSpec.nonEmpty) {
// Outputs the partition-specific info for the DDL command:
// "DESCRIBE [EXTENDED|FORMATTED] table_name PARTITION (partitionVal*)"
describeDetailedPartitionInfo(sparkSession, catalog, metadata, result)
} else if (isExtended) {
describeFormattedTableInfo(metadata, result)
}
}
result
}
private def describePartitionInfo(table: CatalogTable, buffer: ArrayBuffer[Row]): Unit = {
if (table.partitionColumnNames.nonEmpty) {
append(buffer, "# Partition Information", "", "")
describeSchema(table.partitionSchema, buffer, header = true)
}
}
private def describeFormattedTableInfo(table: CatalogTable, buffer: ArrayBuffer[Row]): Unit = {
// The following information has been already shown in the previous outputs
val excludedTableInfo = Seq(
"Partition Columns",
"Schema"
)
append(buffer, "", "", "")
append(buffer, "# Detailed Table Information", "", "")
table.toLinkedHashMap.filterKeys(!excludedTableInfo.contains(_)).foreach {
s => append(buffer, s._1, s._2, "")
}
}
private def describeDetailedPartitionInfo(
spark: SparkSession,
catalog: SessionCatalog,
metadata: CatalogTable,
result: ArrayBuffer[Row]): Unit = {
if (metadata.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"DESC PARTITION is not allowed on a view: ${table.identifier}")
}
DDLUtils.verifyPartitionProviderIsHive(spark, metadata, "DESC PARTITION")
val partition = catalog.getPartition(table, partitionSpec)
if (isExtended) describeFormattedDetailedPartitionInfo(table, metadata, partition, result)
}
private def describeFormattedDetailedPartitionInfo(
tableIdentifier: TableIdentifier,
table: CatalogTable,
partition: CatalogTablePartition,
buffer: ArrayBuffer[Row]): Unit = {
append(buffer, "", "", "")
append(buffer, "# Detailed Partition Information", "", "")
append(buffer, "Database", table.database, "")
append(buffer, "Table", tableIdentifier.table, "")
partition.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
append(buffer, "", "", "")
append(buffer, "# Storage Information", "", "")
table.bucketSpec match {
case Some(spec) =>
spec.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
case _ =>
}
table.storage.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
}
}
/**
* Command that looks like
* {{{
* DESCRIBE [QUERY] statement
* }}}
*
* Parameter 'statement' can be one of the following types :
* 1. SELECT statements
* 2. SELECT statements inside set operators (UNION, INTERSECT etc)
* 3. VALUES statement.
* 4. TABLE statement. Example : TABLE table_name
* 5. statements of the form 'FROM table SELECT *'
* 6. Multi select statements of the following form:
* select * from (from a select * select *)
* 7. Common table expressions (CTEs)
*/
case class DescribeQueryCommand(queryText: String, plan: LogicalPlan)
extends DescribeCommandBase {
override def simpleString(maxFields: Int): String = s"$nodeName $queryText".trim
override def run(sparkSession: SparkSession): Seq[Row] = {
val result = new ArrayBuffer[Row]
val queryExecution = sparkSession.sessionState.executePlan(plan)
describeSchema(queryExecution.analyzed.schema, result, header = false)
result
}
}
/**
* A command to list the info for a column, including name, data type, comment and column stats.
*
* The syntax of using this command in SQL is:
* {{{
* DESCRIBE [EXTENDED|FORMATTED] table_name column_name;
* }}}
*/
case class DescribeColumnCommand(
table: TableIdentifier,
colNameParts: Seq[String],
isExtended: Boolean)
extends RunnableCommand {
override val output: Seq[Attribute] = {
Seq(
AttributeReference("info_name", StringType, nullable = false,
new MetadataBuilder().putString("comment", "name of the column info").build())(),
AttributeReference("info_value", StringType, nullable = false,
new MetadataBuilder().putString("comment", "value of the column info").build())()
)
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val resolver = sparkSession.sessionState.conf.resolver
val relation = sparkSession.table(table).queryExecution.analyzed
val colName = UnresolvedAttribute(colNameParts).name
val field = {
relation.resolve(colNameParts, resolver).getOrElse {
throw new AnalysisException(s"Column $colName does not exist")
}
}
if (!field.isInstanceOf[Attribute]) {
// If the field is not an attribute after `resolve`, then it's a nested field.
throw new AnalysisException(
s"DESC TABLE COLUMN command does not support nested data types: $colName")
}
val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table)
val colStatsMap = catalogTable.stats.map(_.colStats).getOrElse(Map.empty)
val colStats = if (conf.caseSensitiveAnalysis) colStatsMap else CaseInsensitiveMap(colStatsMap)
val cs = colStats.get(field.name)
val comment = if (field.metadata.contains("comment")) {
Option(field.metadata.getString("comment"))
} else {
None
}
val buffer = ArrayBuffer[Row](
Row("col_name", field.name),
Row("data_type", field.dataType.catalogString),
Row("comment", comment.getOrElse("NULL"))
)
if (isExtended) {
// Show column stats when EXTENDED or FORMATTED is specified.
buffer += Row("min", cs.flatMap(_.min.map(_.toString)).getOrElse("NULL"))
buffer += Row("max", cs.flatMap(_.max.map(_.toString)).getOrElse("NULL"))
buffer += Row("num_nulls", cs.flatMap(_.nullCount.map(_.toString)).getOrElse("NULL"))
buffer += Row("distinct_count",
cs.flatMap(_.distinctCount.map(_.toString)).getOrElse("NULL"))
buffer += Row("avg_col_len", cs.flatMap(_.avgLen.map(_.toString)).getOrElse("NULL"))
buffer += Row("max_col_len", cs.flatMap(_.maxLen.map(_.toString)).getOrElse("NULL"))
val histDesc = for {
c <- cs
hist <- c.histogram
} yield histogramDescription(hist)
buffer ++= histDesc.getOrElse(Seq(Row("histogram", "NULL")))
}
buffer
}
private def histogramDescription(histogram: Histogram): Seq[Row] = {
val header = Row("histogram",
s"height: ${histogram.height}, num_of_bins: ${histogram.bins.length}")
val bins = histogram.bins.zipWithIndex.map {
case (bin, index) =>
Row(s"bin_$index",
s"lower_bound: ${bin.lo}, upper_bound: ${bin.hi}, distinct_count: ${bin.ndv}")
}
header +: bins
}
}
/**
* A command for users to get tables in the given database.
* If a databaseName is not given, the current database will be used.
* The syntax of using this command in SQL is:
* {{{
* SHOW TABLES [(IN|FROM) database_name] [[LIKE] 'identifier_with_wildcards'];
* SHOW TABLE EXTENDED [(IN|FROM) database_name] LIKE 'identifier_with_wildcards'
* [PARTITION(partition_spec)];
* }}}
*/
case class ShowTablesCommand(
databaseName: Option[String],
tableIdentifierPattern: Option[String],
isExtended: Boolean = false,
partitionSpec: Option[TablePartitionSpec] = None) extends RunnableCommand {
// The result of SHOW TABLES/SHOW TABLE has three basic columns: database, tableName and
// isTemporary. If `isExtended` is true, append column `information` to the output columns.
override val output: Seq[Attribute] = {
val tableExtendedInfo = if (isExtended) {
AttributeReference("information", StringType, nullable = false)() :: Nil
} else {
Nil
}
AttributeReference("database", StringType, nullable = false)() ::
AttributeReference("tableName", StringType, nullable = false)() ::
AttributeReference("isTemporary", BooleanType, nullable = false)() :: tableExtendedInfo
}
override def run(sparkSession: SparkSession): Seq[Row] = {
// Since we need to return a Seq of rows, we will call getTables directly
// instead of calling tables in sparkSession.
val catalog = sparkSession.sessionState.catalog
val db = databaseName.getOrElse(catalog.getCurrentDatabase)
if (partitionSpec.isEmpty) {
// Show the information of tables.
val tables =
tableIdentifierPattern.map(catalog.listTables(db, _)).getOrElse(catalog.listTables(db))
tables.map { tableIdent =>
val database = tableIdent.database.getOrElse("")
val tableName = tableIdent.table
val isTemp = catalog.isTemporaryTable(tableIdent)
if (isExtended) {
val information = catalog.getTempViewOrPermanentTableMetadata(tableIdent).simpleString
Row(database, tableName, isTemp, s"$information\\n")
} else {
Row(database, tableName, isTemp)
}
}
} else {
// Show the information of partitions.
//
// Note: tableIdentifierPattern should be non-empty, otherwise a [[ParseException]]
// should have been thrown by the sql parser.
val tableIdent = TableIdentifier(tableIdentifierPattern.get, Some(db))
val table = catalog.getTableMetadata(tableIdent).identifier
val partition = catalog.getPartition(tableIdent, partitionSpec.get)
val database = table.database.getOrElse("")
val tableName = table.table
val isTemp = catalog.isTemporaryTable(table)
val information = partition.simpleString
Seq(Row(database, tableName, isTemp, s"$information\\n"))
}
}
}
/**
* A command for users to list the properties for a table. If propertyKey is specified, the value
* for the propertyKey is returned. If propertyKey is not specified, all the keys and their
* corresponding values are returned.
* The syntax of using this command in SQL is:
* {{{
* SHOW TBLPROPERTIES table_name[('propertyKey')];
* }}}
*/
case class ShowTablePropertiesCommand(table: TableIdentifier, propertyKey: Option[String])
extends RunnableCommand {
override val output: Seq[Attribute] = {
val schema = AttributeReference("value", StringType, nullable = false)() :: Nil
propertyKey match {
case None => AttributeReference("key", StringType, nullable = false)() :: schema
case _ => schema
}
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(table)
propertyKey match {
case Some(p) =>
val propValue = catalogTable
.properties
.getOrElse(p, s"Table ${catalogTable.qualifiedName} does not have property: $p")
Seq(Row(propValue))
case None =>
catalogTable.properties.map(p => Row(p._1, p._2)).toSeq
}
}
}
/**
* A command to list the column names for a table.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW COLUMNS (FROM | IN) table_identifier [(FROM | IN) database];
* }}}
*/
case class ShowColumnsCommand(
databaseName: Option[String],
tableName: TableIdentifier) extends RunnableCommand {
override val output: Seq[Attribute] = {
AttributeReference("col_name", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val lookupTable = databaseName match {
case None => tableName
case Some(db) => TableIdentifier(tableName.identifier, Some(db))
}
val table = catalog.getTempViewOrPermanentTableMetadata(lookupTable)
table.schema.map { c =>
Row(c.name)
}
}
}
/**
* A command to list the partition names of a table. If the partition spec is specified,
* partitions that match the spec are returned. [[AnalysisException]] exception is thrown under
* the following conditions:
*
* 1. If the command is called for a non partitioned table.
* 2. If the partition spec refers to the columns that are not defined as partitioning columns.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW PARTITIONS [db_name.]table_name [PARTITION(partition_spec)]
* }}}
*/
case class ShowPartitionsCommand(
tableName: TableIdentifier,
spec: Option[TablePartitionSpec]) extends RunnableCommand {
override val output: Seq[Attribute] = {
AttributeReference("partition", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
/**
* Validate and throws an [[AnalysisException]] exception under the following conditions:
* 1. If the table is not partitioned.
* 2. If it is a datasource table.
* 3. If it is a view.
*/
if (table.tableType == VIEW) {
throw new AnalysisException(s"SHOW PARTITIONS is not allowed on a view: $tableIdentWithDB")
}
if (table.partitionColumnNames.isEmpty) {
throw new AnalysisException(
s"SHOW PARTITIONS is not allowed on a table that is not partitioned: $tableIdentWithDB")
}
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "SHOW PARTITIONS")
/**
* Validate the partitioning spec by making sure all the referenced columns are
* defined as partitioning columns in table definition. An AnalysisException exception is
* thrown if the partitioning spec is invalid.
*/
if (spec.isDefined) {
val badColumns = spec.get.keySet.filterNot(table.partitionColumnNames.contains)
if (badColumns.nonEmpty) {
val badCols = badColumns.mkString("[", ", ", "]")
throw new AnalysisException(
s"Non-partitioning column(s) $badCols are specified for SHOW PARTITIONS")
}
}
val partNames = catalog.listPartitionNames(tableName, spec)
partNames.map(Row(_))
}
}
/**
* Provides common utilities between `ShowCreateTableCommand` and `ShowCreateTableAsSparkCommand`.
*/
trait ShowCreateTableCommandBase {
protected val table: TableIdentifier
protected def showTableLocation(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.tableType == EXTERNAL) {
metadata.storage.locationUri.foreach { location =>
builder ++= s"LOCATION '${escapeSingleQuotedString(CatalogUtils.URIToString(location))}'\\n"
}
}
}
protected def showTableComment(metadata: CatalogTable, builder: StringBuilder): Unit = {
metadata
.comment
.map("COMMENT '" + escapeSingleQuotedString(_) + "'\\n")
.foreach(builder.append)
}
protected def showTableProperties(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.properties.nonEmpty) {
val props = metadata.properties.map { case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
builder ++= "TBLPROPERTIES "
builder ++= concatByMultiLines(props)
}
}
protected def concatByMultiLines(iter: Iterable[String]): String = {
iter.mkString("(\\n ", ",\\n ", ")\\n")
}
}
/**
* A command that shows the Spark DDL syntax that can be used to create a given table.
* For Hive serde table, this command will generate Spark DDL that can be used to
* create corresponding Spark table.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW CREATE TABLE [db_name.]table_name
* }}}
*/
case class ShowCreateTableCommand(table: TableIdentifier)
extends RunnableCommand with ShowCreateTableCommandBase {
override val output: Seq[Attribute] = Seq(
AttributeReference("createtab_stmt", StringType, nullable = false)()
)
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
if (catalog.isTemporaryTable(table)) {
throw new AnalysisException(
s"SHOW CREATE TABLE is not supported on a temporary view: ${table.identifier}")
} else {
val tableMetadata = catalog.getTableMetadata(table)
// TODO: [SPARK-28692] unify this after we unify the
// CREATE TABLE syntax for hive serde and data source table.
val metadata = if (DDLUtils.isDatasourceTable(tableMetadata)) {
tableMetadata
} else {
// For a Hive serde table, we try to convert it to Spark DDL.
if (tableMetadata.unsupportedFeatures.nonEmpty) {
throw new AnalysisException(
"Failed to execute SHOW CREATE TABLE against table " +
s"${tableMetadata.identifier}, which is created by Hive and uses the " +
"following unsupported feature(s)\\n" +
tableMetadata.unsupportedFeatures.map(" - " + _).mkString("\\n") + ". " +
s"Please use `SHOW CREATE TABLE ${tableMetadata.identifier} AS SERDE` " +
"to show Hive DDL instead."
)
}
if (tableMetadata.tableType == VIEW) {
throw new AnalysisException("Hive view isn't supported by SHOW CREATE TABLE")
}
if ("true".equalsIgnoreCase(tableMetadata.properties.getOrElse("transactional", "false"))) {
throw new AnalysisException(
"SHOW CREATE TABLE doesn't support transactional Hive table. " +
s"Please use `SHOW CREATE TABLE ${tableMetadata.identifier} AS SERDE` " +
"to show Hive DDL instead.")
}
convertTableMetadata(tableMetadata)
}
val stmt = showCreateDataSourceTable(metadata)
Seq(Row(stmt))
}
}
private def convertTableMetadata(tableMetadata: CatalogTable): CatalogTable = {
val hiveSerde = HiveSerDe(
serde = tableMetadata.storage.serde,
inputFormat = tableMetadata.storage.inputFormat,
outputFormat = tableMetadata.storage.outputFormat)
// Looking for Spark data source that maps to to the Hive serde.
// TODO: some Hive fileformat + row serde might be mapped to Spark data source, e.g. CSV.
val source = HiveSerDe.serdeToSource(hiveSerde)
if (source.isEmpty) {
val builder = StringBuilder.newBuilder
hiveSerde.serde.foreach { serde =>
builder ++= s" SERDE: $serde"
}
hiveSerde.inputFormat.foreach { format =>
builder ++= s" INPUTFORMAT: $format"
}
hiveSerde.outputFormat.foreach { format =>
builder ++= s" OUTPUTFORMAT: $format"
}
throw new AnalysisException(
"Failed to execute SHOW CREATE TABLE against table " +
s"${tableMetadata.identifier}, which is created by Hive and uses the " +
"following unsupported serde configuration\\n" +
builder.toString()
)
} else {
// TODO: should we keep Hive serde properties?
val newStorage = tableMetadata.storage.copy(properties = Map.empty)
tableMetadata.copy(provider = source, storage = newStorage)
}
}
private def showDataSourceTableDataColumns(
metadata: CatalogTable, builder: StringBuilder): Unit = {
val columns = metadata.schema.fields.map(_.toDDL)
builder ++= concatByMultiLines(columns)
}
private def showDataSourceTableOptions(metadata: CatalogTable, builder: StringBuilder): Unit = {
// For datasource table, there is a provider there in the metadata.
// If it is a Hive table, we already convert its metadata and fill in a provider.
builder ++= s"USING ${metadata.provider.get}\\n"
val dataSourceOptions = SQLConf.get.redactOptions(metadata.storage.properties).map {
case (key, value) => s"${quoteIdentifier(key)} '${escapeSingleQuotedString(value)}'"
}
if (dataSourceOptions.nonEmpty) {
builder ++= "OPTIONS "
builder ++= concatByMultiLines(dataSourceOptions)
}
}
private def showDataSourceTableNonDataColumns(
metadata: CatalogTable, builder: StringBuilder): Unit = {
val partCols = metadata.partitionColumnNames
if (partCols.nonEmpty) {
builder ++= s"PARTITIONED BY ${partCols.mkString("(", ", ", ")")}\\n"
}
metadata.bucketSpec.foreach { spec =>
if (spec.bucketColumnNames.nonEmpty) {
builder ++= s"CLUSTERED BY ${spec.bucketColumnNames.mkString("(", ", ", ")")}\\n"
if (spec.sortColumnNames.nonEmpty) {
builder ++= s"SORTED BY ${spec.sortColumnNames.mkString("(", ", ", ")")}\\n"
}
builder ++= s"INTO ${spec.numBuckets} BUCKETS\\n"
}
}
}
private def showCreateDataSourceTable(metadata: CatalogTable): String = {
val builder = StringBuilder.newBuilder
builder ++= s"CREATE TABLE ${table.quotedString} "
showDataSourceTableDataColumns(metadata, builder)
showDataSourceTableOptions(metadata, builder)
showDataSourceTableNonDataColumns(metadata, builder)
showTableComment(metadata, builder)
showTableLocation(metadata, builder)
showTableProperties(metadata, builder)
builder.toString()
}
}
/**
* This commands generates the DDL for Hive serde table.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW CREATE TABLE table_identifier AS SERDE;
* }}}
*/
case class ShowCreateTableAsSerdeCommand(table: TableIdentifier)
extends RunnableCommand with ShowCreateTableCommandBase {
override val output: Seq[Attribute] = Seq(
AttributeReference("createtab_stmt", StringType, nullable = false)()
)
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val tableMetadata = catalog.getTableMetadata(table)
val stmt = if (DDLUtils.isDatasourceTable(tableMetadata)) {
throw new AnalysisException(
s"$table is a Spark data source table. Use `SHOW CREATE TABLE` without `AS SERDE` instead.")
} else {
showCreateHiveTable(tableMetadata)
}
Seq(Row(stmt))
}
private def showCreateHiveTable(metadata: CatalogTable): String = {
def reportUnsupportedError(features: Seq[String]): Unit = {
throw new AnalysisException(
s"Failed to execute SHOW CREATE TABLE against table/view ${metadata.identifier}, " +
"which is created by Hive and uses the following unsupported feature(s)\\n" +
features.map(" - " + _).mkString("\\n")
)
}
if (metadata.unsupportedFeatures.nonEmpty) {
reportUnsupportedError(metadata.unsupportedFeatures)
}
val builder = StringBuilder.newBuilder
val tableTypeString = metadata.tableType match {
case EXTERNAL => " EXTERNAL TABLE"
case VIEW => " VIEW"
case MANAGED => " TABLE"
case t =>
throw new IllegalArgumentException(
s"Unknown table type is found at showCreateHiveTable: $t")
}
builder ++= s"CREATE$tableTypeString ${table.quotedString}"
if (metadata.tableType == VIEW) {
showViewDataColumns(metadata, builder)
showTableComment(metadata, builder)
showViewProperties(metadata, builder)
showViewText(metadata, builder)
} else {
showHiveTableHeader(metadata, builder)
showTableComment(metadata, builder)
showHiveTableNonDataColumns(metadata, builder)
showHiveTableStorageInfo(metadata, builder)
showTableLocation(metadata, builder)
showTableProperties(metadata, builder)
}
builder.toString()
}
private def showViewDataColumns(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.schema.nonEmpty) {
val viewColumns = metadata.schema.map { f =>
val comment = f.getComment()
.map(escapeSingleQuotedString)
.map(" COMMENT '" + _ + "'")
// view columns shouldn't have data type info
s"${quoteIdentifier(f.name)}${comment.getOrElse("")}"
}
builder ++= concatByMultiLines(viewColumns)
}
}
private def showViewProperties(metadata: CatalogTable, builder: StringBuilder): Unit = {
val viewProps = metadata.properties.filterKeys(!_.startsWith(CatalogTable.VIEW_PREFIX))
if (viewProps.nonEmpty) {
val props = viewProps.map { case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
builder ++= s"TBLPROPERTIES ${concatByMultiLines(props)}"
}
}
private def showViewText(metadata: CatalogTable, builder: StringBuilder): Unit = {
builder ++= metadata.viewText.mkString("AS ", "", "\\n")
}
private def showHiveTableHeader(metadata: CatalogTable, builder: StringBuilder): Unit = {
val columns = metadata.schema.filterNot { column =>
metadata.partitionColumnNames.contains(column.name)
}.map(_.toDDL)
if (columns.nonEmpty) {
builder ++= concatByMultiLines(columns)
}
}
private def showHiveTableNonDataColumns(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.partitionColumnNames.nonEmpty) {
val partCols = metadata.partitionSchema.map(_.toDDL)
builder ++= partCols.mkString("PARTITIONED BY (", ", ", ")\\n")
}
if (metadata.bucketSpec.isDefined) {
val bucketSpec = metadata.bucketSpec.get
builder ++= s"CLUSTERED BY (${bucketSpec.bucketColumnNames.mkString(", ")})\\n"
if (bucketSpec.sortColumnNames.nonEmpty) {
builder ++= s"SORTED BY (${bucketSpec.sortColumnNames.map(_ + " ASC").mkString(", ")})\\n"
}
builder ++= s"INTO ${bucketSpec.numBuckets} BUCKETS\\n"
}
}
private def showHiveTableStorageInfo(metadata: CatalogTable, builder: StringBuilder): Unit = {
val storage = metadata.storage
storage.serde.foreach { serde =>
builder ++= s"ROW FORMAT SERDE '$serde'\\n"
val serdeProps = SQLConf.get.redactOptions(metadata.storage.properties).map {
case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
builder ++= s"WITH SERDEPROPERTIES ${concatByMultiLines(serdeProps)}"
}
if (storage.inputFormat.isDefined || storage.outputFormat.isDefined) {
builder ++= "STORED AS\\n"
storage.inputFormat.foreach { format =>
builder ++= s" INPUTFORMAT '${escapeSingleQuotedString(format)}'\\n"
}
storage.outputFormat.foreach { format =>
builder ++= s" OUTPUTFORMAT '${escapeSingleQuotedString(format)}'\\n"
}
}
}
}
|
darionyaphet/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
|
Scala
|
apache-2.0
| 52,008
|
package algorithms.Datastructures
import scala.annotation.tailrec
sealed trait HeapType
case object MaxHeap extends HeapType
case object MinHeap extends HeapType
class Heap(lst: Seq[Int], heapType: HeapType) {
import Heap.{insert, swapTwoElementsByIndex}
private val length: Int = lst.length
private val compare: (Int, Int) => Boolean = heapType match {
case MaxHeap => (parentValue, childValue) => parentValue > childValue
case MinHeap => (parentValue, childValue) => parentValue < childValue
}
def heapify(): Array[Int] = {
val heap: Array[Int] = Array.fill(length)(0)
for { (elem, ix) <- lst.view.zipWithIndex } insert(heap, elem, compare, ix)
heap
}
def heapSort(): Array[Int] = {
val heap: Array[Int] = heapify()
for { lastIndex <- (length - 1) to 1 by -1 } {
swapTwoElementsByIndex(heap, 0, lastIndex)
restoreHeapProperty(heap, lastIndex)
}
heap
}
def partialSort(k: Int): Array[Int] = {
if (k >= length) heapSort()
else {
val heap: Array[Int] = heapify()
for { lastIndex <- (length - 1) to (length - k) by -1 } {
swapTwoElementsByIndex(heap, 0, lastIndex)
restoreHeapProperty(heap, lastIndex)
}
heap.takeRight(k)
}
}
private def getIndexOfLargestChild(parentIx: Int, leftChildIx: Int, heap: Array[Int], length: Int): Int = {
val rightChildIx: Int = leftChildIx + 1
val changeWithLeft: Boolean = compare(heap(leftChildIx), heap(parentIx))
val swapWithParentIx: Int = if (changeWithLeft) leftChildIx else parentIx
val changeWithRight: Boolean = (rightChildIx < length) && compare(heap(rightChildIx), heap(swapWithParentIx))
if (changeWithRight) rightChildIx else swapWithParentIx
}
private def restoreHeapProperty(heap: Array[Int], reducedLength: Int): Unit = {
@tailrec
def siftDown(parentIx: Int, leftChildIx: Int): Unit = {
if (leftChildIx <= reducedLength - 1) {
val childIx: Int = getIndexOfLargestChild(parentIx, leftChildIx, heap, reducedLength)
if (childIx > parentIx) {
swapTwoElementsByIndex(heap, childIx, parentIx)
siftDown(childIx, 2 * childIx + 1)
}
}
}
siftDown(parentIx = 0, leftChildIx = 1)
}
}
object Heap {
private def getParentIndex(childIx: Int): Int = 0 max ((childIx - 1) / 2)
private def swapTwoElementsByIndex(heap: Array[Int], ix: Int, jy: Int): Unit = {
val elem: Int = heap(ix)
heap(ix) = heap(jy)
heap(jy) = elem
}
private def insert(heap: Array[Int], elem: Int, compare: (Int, Int) => Boolean, indexOfElem: Int): Unit = {
heap(indexOfElem) = elem
@tailrec
def bubbleUp(childIx: Int, parentIx: Int): Unit = {
if (compare(heap(childIx), heap(parentIx))) {
swapTwoElementsByIndex(heap, childIx, parentIx)
bubbleUp(parentIx, getParentIndex(parentIx))
}
}
val indexOfParent: Int = getParentIndex(indexOfElem)
bubbleUp(indexOfElem, indexOfParent)
}
}
|
ghostrider77/Bioinformatics
|
Bioinformatics/src/main/scala-2.11/algorithms/Datastructures/Heap.scala
|
Scala
|
mit
| 3,088
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.util.NoSuchElementException
import java.util.zip.ZipOutputStream
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.util.control.NonFatal
import scala.xml.Node
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.status.api.v1.{ApiRootResource, ApplicationInfo, ApplicationsListResource, UIRoot}
import org.apache.spark.ui.{SparkUI, UIUtils, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.{ShutdownHookManager, SystemClock, Utils}
/**
* A web server that renders SparkUIs of completed applications.
*
* For the standalone mode, MasterWebUI already achieves this functionality. Thus, the
* main use case of the HistoryServer is in other deploy modes (e.g. Yarn or Mesos).
*
* The logging directory structure is as follows: Within the given base directory, each
* application's event logs are maintained in the application's own sub-directory. This
* is the same structure as maintained in the event log write code path in
* EventLoggingListener.
*/
class HistoryServer(
conf: SparkConf,
provider: ApplicationHistoryProvider,
securityManager: SecurityManager,
port: Int)
extends WebUI(securityManager, securityManager.getSSLOptions("historyServer"), port, conf)
with Logging with UIRoot with ApplicationCacheOperations {
// How many applications to retain
private val retainedApplications = conf.getInt("spark.history.retainedApplications", 50)
// How many applications the summary ui displays
private[history] val maxApplications = conf.get(HISTORY_UI_MAX_APPS);
// application
private val appCache = new ApplicationCache(this, retainedApplications, new SystemClock())
// and its metrics, for testing as well as monitoring
val cacheMetrics = appCache.metrics
private val loaderServlet = new HttpServlet {
protected override def doGet(req: HttpServletRequest, res: HttpServletResponse): Unit = {
// Parse the URI created by getAttemptURI(). It contains an app ID and an optional
// attempt ID (separated by a slash).
val parts = Option(req.getPathInfo()).getOrElse("").split("/")
if (parts.length < 2) {
res.sendError(HttpServletResponse.SC_BAD_REQUEST,
s"Unexpected path info in request (URI = ${req.getRequestURI()}")
return
}
val appId = parts(1)
val attemptId = if (parts.length >= 3) Some(parts(2)) else None
// Since we may have applications with multiple attempts mixed with applications with a
// single attempt, we need to try both. Try the single-attempt route first, and if an
// error is raised, then try the multiple attempt route.
if (!loadAppUi(appId, None) && (!attemptId.isDefined || !loadAppUi(appId, attemptId))) {
val msg = <div class="row-fluid">Application {appId} not found.</div>
res.setStatus(HttpServletResponse.SC_NOT_FOUND)
UIUtils.basicSparkPage(msg, "Not Found").foreach { n =>
res.getWriter().write(n.toString)
}
return
}
// Note we don't use the UI retrieved from the cache; the cache loader above will register
// the app's UI, and all we need to do is redirect the user to the same URI that was
// requested, and the proper data should be served at that point.
// Also, make sure that the redirect url contains the query string present in the request.
val requestURI = req.getRequestURI + Option(req.getQueryString).map("?" + _).getOrElse("")
res.sendRedirect(res.encodeRedirectURL(requestURI))
}
// SPARK-5983 ensure TRACE is not supported
protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse): Unit = {
res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
override def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T = {
appCache.withSparkUI(appId, attemptId)(fn)
}
initialize()
/**
* Initialize the history server.
*
* This starts a background thread that periodically synchronizes information displayed on
* this UI with the event logs in the provided base directory.
*/
def initialize() {
attachPage(new HistoryPage(this))
attachHandler(ApiRootResource.getServletHandler(this))
attachHandler(createStaticHandler(SparkUI.STATIC_RESOURCE_DIR, "/static"))
val contextHandler = new ServletContextHandler
contextHandler.setContextPath(HistoryServer.UI_PATH_PREFIX)
contextHandler.addServlet(new ServletHolder(loaderServlet), "/*")
attachHandler(contextHandler)
}
/** Bind to the HTTP server behind this web interface. */
override def bind() {
super.bind()
}
/** Stop the server and close the file system. */
override def stop() {
super.stop()
provider.stop()
}
/** Attach a reconstructed UI to this server. Only valid after bind(). */
override def attachSparkUI(
appId: String,
attemptId: Option[String],
ui: SparkUI,
completed: Boolean) {
assert(serverInfo.isDefined, "HistoryServer must be bound before attaching SparkUIs")
ui.getHandlers.foreach(attachHandler)
addFilters(ui.getHandlers, conf)
}
/** Detach a reconstructed UI from this server. Only valid after bind(). */
override def detachSparkUI(appId: String, attemptId: Option[String], ui: SparkUI): Unit = {
assert(serverInfo.isDefined, "HistoryServer must be bound before detaching SparkUIs")
ui.getHandlers.foreach(detachHandler)
provider.onUIDetached(appId, attemptId, ui)
}
/**
* Get the application UI and whether or not it is completed
* @param appId application ID
* @param attemptId attempt ID
* @return If found, the Spark UI and any history information to be used in the cache
*/
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
provider.getAppUI(appId, attemptId)
}
/**
* Returns a list of available applications, in descending order according to their end time.
*
* @return List of all known applications.
*/
def getApplicationList(): Iterator[ApplicationHistoryInfo] = {
provider.getListing()
}
def getEventLogsUnderProcess(): Int = {
provider.getEventLogsUnderProcess()
}
def getLastUpdatedTime(): Long = {
provider.getLastUpdatedTime()
}
def getApplicationInfoList: Iterator[ApplicationInfo] = {
getApplicationList().map(ApplicationsListResource.appHistoryInfoToPublicAppInfo)
}
def getApplicationInfo(appId: String): Option[ApplicationInfo] = {
provider.getApplicationInfo(appId).map(ApplicationsListResource.appHistoryInfoToPublicAppInfo)
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
provider.writeEventLogs(appId, attemptId, zipStream)
}
/**
* @return html text to display when the application list is empty
*/
def emptyListingHtml(): Seq[Node] = {
provider.getEmptyListingHtml()
}
/**
* Returns the provider configuration to show in the listing page.
*
* @return A map with the provider's configuration.
*/
def getProviderConfig(): Map[String, String] = provider.getConfig()
/**
* Load an application UI and attach it to the web server.
* @param appId application ID
* @param attemptId optional attempt ID
* @return true if the application was found and loaded.
*/
private def loadAppUi(appId: String, attemptId: Option[String]): Boolean = {
try {
appCache.withSparkUI(appId, attemptId) { _ =>
// Do nothing, just force the UI to load.
}
true
} catch {
case NonFatal(e: NoSuchElementException) =>
false
}
}
/**
* String value for diagnostics.
* @return a multi-line description of the server state.
*/
override def toString: String = {
s"""
| History Server;
| provider = $provider
| cache = $appCache
""".stripMargin
}
}
/**
* The recommended way of starting and stopping a HistoryServer is through the scripts
* start-history-server.sh and stop-history-server.sh. The path to a base log directory,
* as well as any other relevant history server configuration, should be specified via
* the $SPARK_HISTORY_OPTS environment variable. For example:
*
* export SPARK_HISTORY_OPTS="-Dspark.history.fs.logDirectory=/tmp/spark-events"
* ./sbin/start-history-server.sh
*
* This launches the HistoryServer as a Spark daemon.
*/
object HistoryServer extends Logging {
private val conf = new SparkConf
val UI_PATH_PREFIX = "/history"
def main(argStrings: Array[String]): Unit = {
Utils.initDaemon(log)
new HistoryServerArguments(conf, argStrings)
initSecurity()
val securityManager = createSecurityManager(conf)
val providerName = conf.getOption("spark.history.provider")
.getOrElse(classOf[FsHistoryProvider].getName())
val provider = Utils.classForName(providerName)
.getConstructor(classOf[SparkConf])
.newInstance(conf)
.asInstanceOf[ApplicationHistoryProvider]
val port = conf.getInt("spark.history.ui.port", 18080)
val server = new HistoryServer(conf, provider, securityManager, port)
server.bind()
ShutdownHookManager.addShutdownHook { () => server.stop() }
// Wait until the end of the world... or if the HistoryServer process is manually stopped
while(true) { Thread.sleep(Int.MaxValue) }
}
/**
* Create a security manager.
* This turns off security in the SecurityManager, so that the History Server can start
* in a Spark cluster where security is enabled.
* @param config configuration for the SecurityManager constructor
* @return the security manager for use in constructing the History Server.
*/
private[history] def createSecurityManager(config: SparkConf): SecurityManager = {
if (config.getBoolean(SecurityManager.SPARK_AUTH_CONF, false)) {
logDebug(s"Clearing ${SecurityManager.SPARK_AUTH_CONF}")
config.set(SecurityManager.SPARK_AUTH_CONF, "false")
}
if (config.getBoolean("spark.acls.enable", config.getBoolean("spark.ui.acls.enable", false))) {
logInfo("Either spark.acls.enable or spark.ui.acls.enable is configured, clearing it and " +
"only using spark.history.ui.acl.enable")
config.set("spark.acls.enable", "false")
config.set("spark.ui.acls.enable", "false")
}
new SecurityManager(config)
}
def initSecurity() {
// If we are accessing HDFS and it has security enabled (Kerberos), we have to login
// from a keytab file so that we can access HDFS beyond the kerberos ticket expiration.
// As long as it is using Hadoop rpc (hdfs://), a relogin will automatically
// occur from the keytab.
if (conf.getBoolean("spark.history.kerberos.enabled", false)) {
// if you have enabled kerberos the following 2 params must be set
val principalName = conf.get("spark.history.kerberos.principal")
val keytabFilename = conf.get("spark.history.kerberos.keytab")
SparkHadoopUtil.get.loginUserFromKeytab(principalName, keytabFilename)
}
}
private[history] def getAttemptURI(appId: String, attemptId: Option[String]): String = {
val attemptSuffix = attemptId.map { id => s"/$id" }.getOrElse("")
s"${HistoryServer.UI_PATH_PREFIX}/${appId}${attemptSuffix}"
}
}
|
cin/spark
|
core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala
|
Scala
|
apache-2.0
| 12,437
|
package com.seanshubin.schulze.persistence
import datomic.{Entity, Database}
import com.seanshubin.schulze.persistence.datomic_util.ScalaAdaptor._
import com.seanshubin.schulze.domain.{Ranking, Candidate}
import java.lang.{Long=>JavaLong}
class DatomicPersistenceSnapshotApi(db: Database) extends PersistenceSnapshotApi {
def electionNames(): Seq[String] = {
val query = "[:find ?electionName :where [?election :election/name ?electionName]]"
val rows = queryRows(singleString, query, db)
rows
}
def voterNames(): Seq[String] = {
val query = "[:find ?voterName :where [?voter :voter/name ?voterName]]"
val rows = queryRows(singleString, query, db)
rows
}
def candidates(electionName: String): Seq[Candidate] = {
val candidateIds = lookupCandidateIds(electionName)
val candidateDomainObjects = candidateIds.map(idToCandidate)
candidateDomainObjects
}
private def lookupCandidateIds(electionName: String): Seq[Long] = {
val query =
"[:find ?candidate :in $ ?electionName :where " +
"[?election :election/name ?electionName ]" +
"[?candidate :candidate/election ?election ]]"
val candidateIds = queryRows(singleLong, query, db, electionName)
candidateIds
}
private def idToCandidate(id:Long) = {
val entity = db.entity(id)
val name = entity.get(":candidate/name").asInstanceOf[String]
val maybeDescription = Option(entity.get(":candidate/description").asInstanceOf[String])
Candidate(name, maybeDescription)
}
def candidate(electionName: String, candidateName: String): Option[Candidate] = {
val query =
"[:find ?candidate :in $ ?electionName ?candidateName :where " +
"[?election :election/name ?electionName ]" +
"[?candidate :candidate/election ?election ]" +
"[?candidate :candidate/name ?candidateName ]]"
val candidateRows = queryRows(singleLong, query, db, electionName, candidateName)
val maybeCandidate = if(candidateRows.size == 1) {
val candidate = idToCandidate(candidateRows.head)
Some(candidate)
} else if(candidateRows.size == 0){
None
} else {
throw new RuntimeException(
s"Expected exactly one candidate matching election $electionName " +
s"and candidate $candidateName, got ${candidateRows.size}")
}
maybeCandidate
}
def votes(electionName: String): Map[String, Map[String, Long]] = {
val query =
"[:find ?voterName ?candidateName ?rank :in $ ?electionName :where " +
"[?election :election/name ?electionName ]" +
"[?ranking :ranking/election ?election ]" +
"[?ranking :ranking/voter ?voter ]" +
"[?voter :voter/name ?voterName ]" +
"[?ranking :ranking/candidate ?candidate ]" +
"[?candidate :candidate/name ?candidateName]" +
"[?ranking :ranking/rank ?rank ]]"
val rows = queryRows(tupleStringStringLong, query, db, electionName)
def addRow(soFar: Map[String, Map[String, Long]], row: (String, String, Long)): Map[String, Map[String, Long]] = {
val (voterName, candidateName, rank)= row
val oldRankings = soFar.getOrElse(voterName, Map())
val newRankings = oldRankings + (candidateName -> rank)
val newSoFar = soFar + (voterName -> newRankings)
newSoFar
}
rows.foldLeft(Map[String, Map[String, Long]]())(addRow)
}
def vote(electionName: String, voterName: String): Seq[Ranking] = {
val electionId:Long = querySingleRow(singleLong,
"[:find ?election :in $ ?electionName :where [?election :election/name ?electionName]]", db, electionName)
val candidateIdsQuery =
"[:find ?candidate :in $ ?election :where " +
"[?candidate :candidate/election ?election]]"
val candidateAndRankQuery =
"[:find ?candidate ?rank :in $ ?election ?voterName :where " +
"[?voter :voter/name ?voterName]" +
"[?ranking :ranking/candidate ?candidate]" +
"[?ranking :ranking/election ?election ]" +
"[?ranking :ranking/voter ?voter ]" +
"[?ranking :ranking/rank ?rank ]]"
val rankByCandidateId:Map[Long, Long] = queryRows(tupleLongLong, candidateAndRankQuery, db, electionId, voterName).toMap
val candidateIds:Seq[Long] = queryRows(singleLong, candidateIdsQuery, db, electionId)
def candidateIdToRanking(candidateId:Long):Ranking = {
val candidate:Entity = db.entity(candidateId)
val name:String = candidate.get(":candidate/name").asInstanceOf[String]
val maybeDescription:Option[String] = Option(candidate.get(":candidate/description").asInstanceOf[String])
val maybeRank:Option[Long] = rankByCandidateId.get(candidateId)
Ranking(name, maybeDescription, maybeRank)
}
val rankings = candidateIds.map(candidateIdToRanking)
rankings
}
}
|
SeanShubin/schulze
|
persistence/src/main/scala/com/seanshubin/schulze/persistence/DatomicPersistenceSnapshotApi.scala
|
Scala
|
unlicense
| 4,931
|
package edu.gemini.sp.vcs2
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
object SortSpec extends Specification with ScalaCheck {
case class Test(merged: List[Char], preferred: List[Char], alternate: List[Char]) {
val sorted = SortHeuristic.sort(merged, preferred, alternate)(identity)
override def toString: String =
s"""----------
|Merged...: ${merged.mkString}
|Preferred: ${preferred.mkString}
|Alternate: ${alternate.mkString}
|Sorted...: ${sorted.mkString}
""".stripMargin
}
val genTest = for {
p <- Gen.listOf(Gen.alphaChar)
a <- Gen.listOf(Gen.alphaChar)
m <- Gen.someOf(p ++ a)
} yield Test(m.toList.distinct, p.toList.distinct, a.toList.distinct)
// checks whether l is in order with respect to orderedList and contains no
// characters not contained in orderedList (though orderedList might contain
// characters not in l)
def isOrdered(l: List[Char], orderedList: List[Char]): Boolean = {
val order = orderedList.zipWithIndex.toMap
val indices = l.map(c => order.getOrElse(c, -1))
indices == indices.sorted && !indices.contains(-1)
}
"sort" should {
"not add or remove anything" !
forAll(genTest) { t => t.sorted.toSet == t.merged.toSet }
"respect preferred order" !
forAll(genTest) { t => isOrdered(t.sorted.filter(t.preferred.contains), t.preferred) }
"respect alternate order" !
forAll(genTest) { t =>
val isPref = t.preferred.toSet
def part(l: List[Char]): List[List[Char]] =
(l:\List(List.empty[Char])) { case (c, res) =>
if (isPref(c))
res match {
case Nil :: _ => res
case _ => Nil :: res
}
else (c :: res.head) :: res.tail
}
part(t.sorted).forall { isOrdered(_,t.alternate) }
}
"place alternate chars close to preceding character in alternate order" !
forAll(genTest) { t =>
val isPref = t.preferred.toSet
val isMissing = (isPref ++ t.alternate.toSet) &~ t.merged.toSet
def preds(l: List[Char]): Map[Char, List[Char]] = {
val rev = l.reverse
rev.zip(rev.tails.drop(1).toIterable).toMap
}
val altPreds = preds(t.alternate)
val sortPreds = preds(t.sorted)
t.sorted.forall { c =>
isPref(c) || (sortPreds(c).headOption == altPreds(c).dropWhile(isMissing).headOption)
}
}
}
}
|
arturog8m/ocs
|
bundle/edu.gemini.sp.vcs/src/test/scala/edu/gemini/sp/vcs2/SortSpec.scala
|
Scala
|
bsd-3-clause
| 2,579
|
// Worker.scala
//
//
package com.example.piakka
import akka.actor.{Actor, ActorRef, ActorPath, ActorLogging}
import scala.annotation.tailrec
import akka.pattern.pipe
import scala.concurrent.Future
object Worker {
case class WorkerCreated(worker: ActorRef)
case class SendWork(worker: ActorRef)
case class CalculationFinished(workerSender: ActorRef, calculation: Double)
}
class Worker(masterLocation: ActorPath) extends Actor with ActorLogging {
import Master._
import Worker._
// Find the location of the Master
val master = context.actorFor(masterLocation)
// Notify the Master that we're alive
override def preStart() = master ! WorkerCreated(self)
def calculatePi(start: Int, numberOfElements: Int) : Double = {
@tailrec
def calculatePiFor(start: Int, limit: Int, acc: Double, count: Int) : Double =
count match {
case x if x == limit => acc
case _ => calculatePiFor(start + 1, limit, acc + 4.0 * (1 - (start % 2) * 2) / (2 * start + 1),
count + 1)
}
calculatePiFor(start, numberOfElements , 0.0, 0)
}
implicit val ec = context.dispatcher
def doCalculation(workSender: ActorRef, start: Int, numberOfElements: Int ): Unit = {
Future {
CalculationFinished( workSender, calculatePi(start, numberOfElements) )
} pipeTo self
}
def busy: Receive = {
case WorkerIsReady => log.info("Received a message to do work but I'm busy")
case CalculationFinished(worker, result) => master ! Result(worker, result)
context.become(idle)
}
def idle: Receive = {
case WorkerIsReady => master ! SendWork(self)
case Calculate(worker, Work(start, numberOfElements )) =>
doCalculation(worker, start, numberOfElements)
context.become(busy)
}
def receive = idle
}
|
dtinblack/Scala-AKKACluster
|
PiWorkerPull/src/main/scala/Worker.scala
|
Scala
|
mit
| 2,021
|
package com.geishatokyo.smartmerger.injection
/**
* Created by takeshita on 2014/06/03.
*/
class InjectionRule {
/**
*
*/
var ignoreNotExistMergeBlock = false
/**
*
*/
var leftNotMergedBlock = true
}
|
geishatokyo/smart-merger
|
src/main/scala/com/geishatokyo/smartmerger/injection/InjectionRule.scala
|
Scala
|
mit
| 225
|
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.scope._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScPattern
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor
import scala.collection.mutable
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
class ScEnumeratorsImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScEnumerators {
override def toString: String = "Enumerators"
def enumerators: mutable.WrappedArray[ScEnumerator] = findChildrenByClass[ScEnumerator](classOf[ScEnumerator])
def generators: mutable.WrappedArray[ScGenerator] = findChildrenByClass[ScGenerator](classOf[ScGenerator])
def guards: mutable.WrappedArray[ScGuard] = findChildrenByClass[ScGuard](classOf[ScGuard])
def namings: Seq[ScPatterned] =
for (c <- getChildren if c.isInstanceOf[ScGenerator] || c.isInstanceOf[ScEnumerator])
yield c.asInstanceOf[ScPatterned]
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
val reverseChildren = getChildren.reverse
val children =
if (reverseChildren.contains(lastParent)) reverseChildren.drop(reverseChildren.indexOf(lastParent) + (
lastParent match {
case _: ScGenerator => 1
case _ => 0
}
))
else reverseChildren
for (c <- children) {
c match {
case c: ScGenerator =>
for (b <- c.pattern.bindings) if (!processor.execute(b, state)) return false
processor match {
case b: BaseProcessor => b.changedLevel
case _ =>
}
case c: ScEnumerator =>
for (b <- c.pattern.bindings) if (!processor.execute(b, state)) return false
case _ =>
}
}
true
}
override def patterns: Seq[ScPattern] = namings.reverse.map(_.pattern)
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScEnumeratorsImpl.scala
|
Scala
|
apache-2.0
| 2,171
|
import org.specs2.mutable._
class MaxSumSubsetTests extends Specification {
import MaxSumSubset._
"maxSumSubArray" should {
"return 12 for [1, -3, 5, -2, 9, -8, -6, 4]" in {
maxSumSubArray(Array(1, -3, 5, -2, 9, -8, -6, 4)) must_== 12
}
}
}
|
eflan/practice
|
TreesAndGraphs/eliot/scala/src/test/scala/MaxSumSubsetTests.scala
|
Scala
|
apache-2.0
| 263
|
abstract class Q[T] {
type t <: T
}
class User(q: Q[String]) {
def r(v : q.t) = {
v.<ref>toLowerCase
}
}
|
ilinum/intellij-scala
|
testdata/resolve/nonlocal/substAliasBound.scala
|
Scala
|
apache-2.0
| 116
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import javax.annotation.Nullable
import scala.collection.Map
import com.fasterxml.jackson.annotation.JsonTypeInfo
import org.apache.spark.TaskEndReason
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics}
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.storage.{BlockManagerId, BlockUpdatedInfo}
@DeveloperApi
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "Event")
trait SparkListenerEvent {
/* Whether output this event to the event log */
protected[spark] def logEvent: Boolean = true
}
@DeveloperApi
case class SparkListenerStageSubmitted(stageInfo: StageInfo, properties: Properties = null)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerStageCompleted(stageInfo: StageInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskStart(stageId: Int, stageAttemptId: Int, taskInfo: TaskInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskGettingResult(taskInfo: TaskInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerSpeculativeTaskSubmitted(stageId: Int) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskEnd(
stageId: Int,
stageAttemptId: Int,
taskType: String,
reason: TaskEndReason,
taskInfo: TaskInfo,
// may be null if the task has failed
@Nullable taskMetrics: TaskMetrics)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerJobStart(
jobId: Int,
time: Long,
stageInfos: Seq[StageInfo],
properties: Properties = null)
extends SparkListenerEvent {
// Note: this is here for backwards-compatibility with older versions of this event which
// only stored stageIds and not StageInfos:
val stageIds: Seq[Int] = stageInfos.map(_.stageId)
}
@DeveloperApi
case class SparkListenerJobEnd(
jobId: Int,
time: Long,
jobResult: JobResult)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerEnvironmentUpdate(environmentDetails: Map[String, Seq[(String, String)]])
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockManagerAdded(
time: Long,
blockManagerId: BlockManagerId,
maxMem: Long,
maxOnHeapMem: Option[Long] = None,
maxOffHeapMem: Option[Long] = None) extends SparkListenerEvent {
}
@DeveloperApi
case class SparkListenerBlockManagerRemoved(time: Long, blockManagerId: BlockManagerId)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerUnpersistRDD(rddId: Int) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorAdded(time: Long, executorId: String, executorInfo: ExecutorInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorRemoved(time: Long, executorId: String, reason: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorBlacklisted(
time: Long,
executorId: String,
taskFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorBlacklistedForStage(
time: Long,
executorId: String,
taskFailures: Int,
stageId: Int,
stageAttemptId: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeBlacklistedForStage(
time: Long,
hostId: String,
executorFailures: Int,
stageId: Int,
stageAttemptId: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorUnblacklisted(time: Long, executorId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeBlacklisted(
time: Long,
hostId: String,
executorFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeUnblacklisted(time: Long, hostId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockUpdated(blockUpdatedInfo: BlockUpdatedInfo) extends SparkListenerEvent
/**
* Periodic updates from executors.
* @param execId executor id
* @param accumUpdates sequence of (taskId, stageId, stageAttemptId, accumUpdates)
* @param executorUpdates executor level metrics updates
*/
@DeveloperApi
case class SparkListenerExecutorMetricsUpdate(
execId: String,
accumUpdates: Seq[(Long, Int, Int, Seq[AccumulableInfo])],
executorUpdates: Option[ExecutorMetrics] = None)
extends SparkListenerEvent
/**
* Peak metric values for the executor for the stage, written to the history log at stage
* completion.
* @param execId executor id
* @param stageId stage id
* @param stageAttemptId stage attempt
* @param executorMetrics executor level metrics, indexed by ExecutorMetricType.values
*/
@DeveloperApi
case class SparkListenerStageExecutorMetrics(
execId: String,
stageId: Int,
stageAttemptId: Int,
executorMetrics: ExecutorMetrics)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationStart(
appName: String,
appId: Option[String],
time: Long,
sparkUser: String,
appAttemptId: Option[String],
driverLogs: Option[Map[String, String]] = None,
driverAttributes: Option[Map[String, String]] = None) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationEnd(time: Long) extends SparkListenerEvent
/**
* An internal class that describes the metadata of an event log.
*/
@DeveloperApi
case class SparkListenerLogStart(sparkVersion: String) extends SparkListenerEvent
/**
* Interface for listening to events from the Spark scheduler. Most applications should probably
* extend SparkListener or SparkFirehoseListener directly, rather than implementing this class.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
private[spark] trait SparkListenerInterface {
/**
* Called when a stage completes successfully or fails, with information on the completed stage.
*/
def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit
/**
* Called when a stage is submitted
*/
def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit
/**
* Called when a task starts
*/
def onTaskStart(taskStart: SparkListenerTaskStart): Unit
/**
* Called when a task begins remotely fetching its result (will not be called for tasks that do
* not need to fetch the result remotely).
*/
def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit
/**
* Called when a task ends
*/
def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit
/**
* Called when a job starts
*/
def onJobStart(jobStart: SparkListenerJobStart): Unit
/**
* Called when a job ends
*/
def onJobEnd(jobEnd: SparkListenerJobEnd): Unit
/**
* Called when environment properties have been updated
*/
def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit
/**
* Called when a new block manager has joined
*/
def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit
/**
* Called when an existing block manager has been removed
*/
def onBlockManagerRemoved(blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit
/**
* Called when an RDD is manually unpersisted by the application
*/
def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit
/**
* Called when the application starts
*/
def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit
/**
* Called when the application ends
*/
def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit
/**
* Called when the driver receives task metrics from an executor in a heartbeat.
*/
def onExecutorMetricsUpdate(executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit
/**
* Called with the peak memory metrics for a given (executor, stage) combination. Note that this
* is only present when reading from the event log (as in the history server), and is never
* called in a live application.
*/
def onStageExecutorMetrics(executorMetrics: SparkListenerStageExecutorMetrics): Unit
/**
* Called when the driver registers a new executor.
*/
def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit
/**
* Called when the driver removes an executor.
*/
def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit
/**
* Called when the driver blacklists an executor for a Spark application.
*/
def onExecutorBlacklisted(executorBlacklisted: SparkListenerExecutorBlacklisted): Unit
/**
* Called when the driver blacklists an executor for a stage.
*/
def onExecutorBlacklistedForStage(
executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit
/**
* Called when the driver blacklists a node for a stage.
*/
def onNodeBlacklistedForStage(nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit
/**
* Called when the driver re-enables a previously blacklisted executor.
*/
def onExecutorUnblacklisted(executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit
/**
* Called when the driver blacklists a node for a Spark application.
*/
def onNodeBlacklisted(nodeBlacklisted: SparkListenerNodeBlacklisted): Unit
/**
* Called when the driver re-enables a previously blacklisted node.
*/
def onNodeUnblacklisted(nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit
/**
* Called when the driver receives a block update info.
*/
def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit
/**
* Called when a speculative task is submitted
*/
def onSpeculativeTaskSubmitted(speculativeTask: SparkListenerSpeculativeTaskSubmitted): Unit
/**
* Called when other events like SQL-specific events are posted.
*/
def onOtherEvent(event: SparkListenerEvent): Unit
}
/**
* :: DeveloperApi ::
* A default implementation for `SparkListenerInterface` that has no-op implementations for
* all callbacks.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
@DeveloperApi
abstract class SparkListener extends SparkListenerInterface {
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { }
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { }
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { }
override def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit = { }
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { }
override def onJobStart(jobStart: SparkListenerJobStart): Unit = { }
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = { }
override def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit = { }
override def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit = { }
override def onBlockManagerRemoved(
blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit = { }
override def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit = { }
override def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit = { }
override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { }
override def onExecutorMetricsUpdate(
executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit = { }
override def onStageExecutorMetrics(
executorMetrics: SparkListenerStageExecutorMetrics): Unit = { }
override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { }
override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { }
override def onExecutorBlacklisted(
executorBlacklisted: SparkListenerExecutorBlacklisted): Unit = { }
def onExecutorBlacklistedForStage(
executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit = { }
def onNodeBlacklistedForStage(
nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit = { }
override def onExecutorUnblacklisted(
executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit = { }
override def onNodeBlacklisted(
nodeBlacklisted: SparkListenerNodeBlacklisted): Unit = { }
override def onNodeUnblacklisted(
nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit = { }
override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = { }
override def onSpeculativeTaskSubmitted(
speculativeTask: SparkListenerSpeculativeTaskSubmitted): Unit = { }
override def onOtherEvent(event: SparkListenerEvent): Unit = { }
}
|
aosagie/spark
|
core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala
|
Scala
|
apache-2.0
| 13,533
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Usman Nisar, Aravind Kalimurthy, John Miller
* @version 1.3
* @date Tue Dec 20 15:10:55 EST 2016
* @see LICENSE (MIT style license file).
*
* @see www2012.wwwconference.org/proceedings/proceedings/p949.pdf
*
* Graph Simulation CAR Using Mutable Sets
*/
package scalation.graphalytics.mutable
import scala.collection.mutable.{Set => SET}
import scala.reflect.ClassTag
import scalation.graphalytics.mutable.{ExampleGraphS => EX_GRAPH}
import scalation.util.MultiSet
import LabelFunctions._
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `GraphSimCAR` class provides an implementation for Simple Graph Simulation.
* @param g the data graph G(V, E, l)
* @param q the query graph Q(U, D, k)
*/
class GraphSimCAR [TLabel: ClassTag] (g: Graph [TLabel], q: Graph [TLabel])
extends GraphMatcher (g, q)
{
/** The DEBUG flag
*/
private val DEBUG = false
/** The Child labels for the query graph
*/
private val cLabel = Array.ofDim [MultiSet [TLabel]] (q.size)
for (u <- cLabel.indices) cLabel(u) = qChildLabels (q, u)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Graph Simulation pattern matching algorithm to find the mappings
* from the query graph 'q' to the data graph 'g'. These are represented by a
* multi-valued function 'phi' that maps each query graph vertex 'u' to a
* set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] = nisarGraphSimCAR (feasibleMates ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given the mappings 'phi' produced by the 'feasibleMates' method,
* eliminate mappings 'u -> v' when v's children fail to match u's.
* @param phi array of mappings from a query vertex u to { graph vertices v }
*/
private def nisarGraphSimCAR (phi: Array [SET [Int]]): Array [SET [Int]] =
{
var alter = true
while (alter) { // check for matching children
alter = false
// loop over query vertices u, data vertices v in phi(u), and u's children u_c
for (u <- qRange; v <- phi(u)) {
val chu = cLabel(u)
val chv = gChildLabels(g, v, u, q.ch(u), phi)
val res = ! (chu ⊆ chv)
if (DEBUG) println("u : " + u + " v : " + v + " chu : " + chu + " chv : " + chv + " res : " + res)
if (res) {
phi(u) -= v // remove v due to lack of child match
alter = true
} // if
} // for
} // while
phi
} // nisarGraphSimCAR
} // GraphSimCAR class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `GraphSimCARTest` object is used to test the `GraphSimCAR` class.
* > run-main scalation.graphalytics.GraphSimCARTest
*/
object GraphSimCARTest extends App
{
val g = EX_GRAPH.g1
val q = EX_GRAPH.q1
println (s"g.checkEdges = ${g.checkEdges}")
q.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
g.printG ()
(new GraphSimCAR (g, q)).test ("GraphSimCAR") // Graph Simulation Pattern Matcher
} // GraphSimCARTest object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `GraphSimCARTest2` object is used to test the `GraphSimCAR` class.
* > run-main scalation.graphalytics.GraphSimCARTest2
*/
object GraphSimCARTest2 extends App
{
val g = EX_GRAPH.g2
val q = EX_GRAPH.q2
println (s"g.checkEdges = ${g.checkEdges}")
q.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
g.printG ()
(new GraphSimCAR (g, q)).test ("GraphSimCAR") // Graph Simulation Pattern Matcher
} // GraphSimCARTest2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `GraphSimCARTest3` object is used to test the `GraphSimCAR` class.
* > run-main scalation.graphalytics.GraphSimCARTest3
*
object GraphSimCARTest3 extends App
{
val g = EX_GRAPH.g3
val q = EX_GRAPH.q3
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new GraphSimCAR (g, q)).test ("GraphSimCAR") // Graph Simulation Pattern Matcher
} // GraphSimCARTest3 object
*/
|
scalation/fda
|
scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/mutable/GraphSimCAR.scala
|
Scala
|
mit
| 4,526
|
package slamdata.engine.api
import slamdata.Predef._
import slamdata.engine._; import Backend._
import slamdata.engine.analysis.fixplate.{Term}
import slamdata.engine.config._
import slamdata.engine.fp._
import slamdata.engine.fs._; import Path._
import scala.concurrent.duration._
import scalaz._
import scalaz.concurrent._
import scalaz.stream._
import Scalaz._
import org.specs2.mutable._
import slamdata.specs2._
import argonaut._, Argonaut._
import dispatch._
import com.ning.http.client.{Response}
sealed trait Action
object Action {
final case class Save(path: Path, rows: List[Data]) extends Action
final case class Append(path: Path, rows: List[Data]) extends Action
final case class Reload(cfg: Config) extends Action
}
class ApiSpecs extends Specification with DisjunctionMatchers with PendingWithAccurateCoverage with org.specs2.time.NoTimeConversions {
sequential // Each test binds the same port
args.report(showtimes = true)
val port = 8888
var historyBuff = scala.collection.mutable.ListBuffer[Action]()
def history = historyBuff.toList
/**
Start a server, with the given backend, execute something, and then tear
down the server.
*/
def withServer[A](backend: Backend, config: Config)(body: => A): A = {
val srv = Server.run(port, 1.seconds, FileSystemApi(backend, ".", config, cfg => Task.delay {
historyBuff += Action.Reload(cfg)
()
})).run
try {
body
}
finally {
ignore(srv.shutdown.run)
historyBuff.clear
}
}
object Stub {
case class Plan(description: String)
implicit val PlanRenderTree = new RenderTree[Plan] {
def render(v: Plan) = Terminal(List("Stub.Plan"), None)
}
lazy val planner = new Planner[Plan] {
def plan(logical: Term[LogicalPlan]) = Planner.emit(Vector.empty, \\/-(Plan("logical: " + logical.toString)))
}
lazy val evaluator: Evaluator[Plan] = new Evaluator[Plan] {
def execute(physical: Plan) =
EitherT.right(Task.now(ResultPath.Temp(Path("tmp/out"))))
def compile(physical: Plan) = "Stub" -> Cord(physical.toString)
def checkCompatibility = ???
}
def showNative(plan: Plan): String = plan.toString
def backend(files: Map[Path, List[Data]]): Backend = new PlannerBackend[Plan] {
val planner = Stub.planner
val evaluator = Stub.evaluator
val RP = PlanRenderTree
def scan0(path: Path, offset: Long, limit: Option[Long]) =
files.get(path).fold(
Process.eval[Backend.ResTask, Data](EitherT.left(Task.now(Backend.ResultPathError(NonexistentPathError(path, Some("no backend")))))))(
Process.emitAll(_)
.drop(offset.toInt)
.take(limit.fold(Int.MaxValue)(_.toInt)))
def count0(path: Path) =
EitherT(Task.now[PathError \\/ List[Data]](files.get(path) \\/> NonexistentPathError(path, Some("no backend")))).map(_.length.toLong)
def save0(path: Path, values: Process[Task, Data]) =
if (path.pathname.contains("pathError"))
EitherT.left(Task.now(PPathError(InvalidPathError("simulated (client) error"))))
else if (path.pathname.contains("valueError"))
EitherT.left(Task.now(PWriteError(WriteError(Data.Str(""), Some("simulated (value) error")))))
else Errors.liftE[ProcessingError](values.runLog.map { rows =>
historyBuff += Action.Save(path, rows.toList)
()
})
def append0(path: Path, values: Process[Task, Data]) =
if (path.pathname.contains("pathError"))
Process.eval[Backend.PathTask, WriteError](EitherT.left(Task.now(InvalidPathError("simulated (client) error"))))
else if (path.pathname.contains("valueError"))
Process.eval(WriteError(Data.Str(""), Some("simulated (value) error")).point[Backend.PathTask])
else Process.eval_(Backend.liftP(values.runLog.map { rows =>
historyBuff += Action.Append(path, rows.toList)
()
}))
def delete0(path: Path) = ().point[Backend.PathTask]
def move0(src: Path, dst: Path, semantics: Backend.MoveSemantics) = ().point[Backend.PathTask]
def ls0(dir: Path): Backend.PathTask[Set[Backend.FilesystemNode]] = {
val children = files.keys.toList.map(_.rebase(dir).toOption.map(p => Backend.FilesystemNode(p.head, Backend.Plain))).flatten
children.toSet.point[Backend.PathTask]
}
def defaultPath = Path.Current
}
}
/** Handler for response bodies containing newline-separated JSON documents, for use with Dispatch. */
object asJson extends (Response => String \\/ (String, List[Json])) {
private def sequenceStrs[A](vs: scala.collection.Seq[String \\/ A]): String \\/ List[A] =
vs.toList.map(_.validation.toValidationNel).sequenceU.leftMap(_.list.mkString("; ")).disjunction
private def parseJsonLines(str: String): String \\/ List[Json] =
if (str == "") \\/-(Nil)
else sequenceStrs(str.split("\\n").map(Parse.parse(_)))
def apply(r: Response) =
(dispatch.as.String andThen parseJsonLines)(r).map((r.getContentType, _))
}
def asLines(r: Response): (String, List[String]) = (r.getContentType, dispatch.as.String(r).split("\\r\\n").toList)
/** Handlers for use with Dispatch. */
val code: Response => Int = _.getStatusCode
def header(name: String): Response => Option[String] = r => Option(r.getHeader(name))
def commaSep: Option[String] => List[String] = _.fold(List[String]())(_.split(", ").toList)
val svc = dispatch.host("localhost", port)
def errorFromBody(resp: Response): String \\/ String = {
val mt = resp.getContentType.split(";").head
(for {
_ <- if (mt == "application/json" || mt == "application/ldjson") \\/-(())
else -\\/("bad content-type: " + mt + " (body: " + resp.getResponseBody + ")")
json <- Parse.parse(resp.getResponseBody)
err <- json.field("error") \\/> ("`error` missing: " + json)
errStr <- err.string \\/> ("`error` not a string: " + err)
} yield errStr)
}
val files1 = ListMap(
Path("bar") -> List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Int(2))),
Data.Obj(ListMap("c" -> Data.Set(List(Data.Int(3)))))),
Path("dir/baz") -> List(),
Path("tmp/out") -> List(Data.Obj(ListMap("0" -> Data.Str("ok")))),
Path("tmp/dup") -> List(Data.Obj(ListMap("4" -> Data.Str("ok")))),
Path("a file") -> List(Data.Obj(ListMap("1" -> Data.Str("ok")))),
Path("quoting") -> List(
Data.Obj(ListMap(
"a" -> Data.Str("\\"Hey\\""),
"b" -> Data.Str("a, b, c")))),
Path("empty") -> List())
val noBackends = NestedBackend(Map())
val backends1 = NestedBackend(ListMap(
DirNode("empty") -> Stub.backend(ListMap()),
DirNode("foo") -> Stub.backend(files1),
DirNode("non") -> NestedBackend(ListMap(
DirNode("root") -> NestedBackend(ListMap(
DirNode("mounting") -> Stub.backend(files1))))),
DirNode("badPath1") -> Stub.backend(ListMap()),
DirNode("badPath2") -> Stub.backend(ListMap())))
val config1 = Config(SDServerConfig(Some(port)), ListMap(
Path("/foo/") -> MongoDbConfig("mongodb://localhost/foo"),
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting")))
val corsMethods = header("Access-Control-Allow-Methods") andThen commaSep
val corsHeaders = header("Access-Control-Allow-Headers") andThen commaSep
"OPTIONS" should {
val optionsRoot = svc.OPTIONS
"advertise GET and POST for /query path" in {
withServer(noBackends, config1) {
val methods = Http(optionsRoot / "query" / "fs" / "" > corsMethods)
methods() must contain(allOf("GET", "POST"))
}
}
"advertise Destination header for /query path and method POST" in {
withServer(noBackends, config1) {
val headers = Http((optionsRoot / "query" / "fs" / "").setHeader("Access-Control-Request-Method", "POST") > corsHeaders)
headers() must contain(allOf("Destination"))
}
}
"advertise GET, PUT, POST, DELETE, and MOVE for /data path" in {
withServer(noBackends, config1) {
val methods = Http(optionsRoot / "data" / "fs" / "" > corsMethods)
methods() must contain(allOf("GET", "PUT", "POST", "DELETE", "MOVE"))
}
}
"advertise Destination header for /data path and method MOVE" in {
withServer(noBackends, config1) {
val headers = Http((optionsRoot / "data" / "fs" / "").setHeader("Access-Control-Request-Method", "MOVE") > corsHeaders)
headers() must contain(allOf("Destination"))
}
}
}
val jsonContentType = "application/json"
val preciseContentType = "application/ldjson; mode=\\"precise\\"; charset=UTF-8"
val readableContentType = "application/ldjson; mode=\\"readable\\"; charset=UTF-8"
val arrayContentType = "application/json; mode=\\"readable\\"; charset=UTF-8"
val csvContentType = "text/csv"
val charsetParam = "; charset=UTF-8"
val csvResponseContentType = csvContentType + "; columnDelimiter=\\",\\"; rowDelimiter=\\"\\\\\\\\r\\\\\\\\n\\"; quoteChar=\\"\\\\\\"\\"; escapeChar=\\"\\\\\\"\\"" + charsetParam
"/metadata/fs" should {
val root = svc / "metadata" / "fs" / "" // Note: trailing slash required
"return no filesystems" in {
withServer(noBackends, config1) {
val meta = Http(root OK asJson)
meta() must beRightDisjunction((jsonContentType, List(Json("children" := List[Json]()))))
}
}
"be 404 with missing backend" in {
withServer(noBackends, config1) {
val req = root / "missing"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
resp.getResponseBody must_== ""
}
}
"return empty for null fs" in {
withServer(backends1, config1) {
val path = root / "empty" / ""
val meta = Http(path OK asJson)
meta() must beRightDisjunction((jsonContentType, List(Json("children" := List[Json]()))))
}
}
"return empty for missing path" in {
withServer(backends1, config1) {
val path = root / "foo" / "baz" / ""
val meta = Http(path OK asJson)
meta() must beRightDisjunction((jsonContentType, List(Json("children" := List[Json]()))))
}
}
"find stubbed filesystems" in {
withServer(backends1, config1) {
val meta = Http(root OK asJson)
meta() must beRightDisjunction((
jsonContentType,
List(
Json("children" := List(
Json("name" := "badPath1", "type" := "mount"),
Json("name" := "badPath2", "type" := "mount"),
Json("name" := "empty", "type" := "mount"),
Json("name" := "foo", "type" := "mount"),
Json("name" := "non", "type" := "directory"))))))
}
}
"find stubbed files" in {
withServer(backends1, config1) {
val path = root / "foo" / ""
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
jsonContentType,
List(
Json("children" := List(
Json("name" := "a file", "type" := "file"),
Json("name" := "bar", "type" := "file"),
Json("name" := "dir", "type" := "directory"),
Json("name" := "empty", "type" := "file"),
Json("name" := "quoting", "type" := "file"),
Json("name" := "tmp", "type" := "directory"))))))
}
}
"find intermediate directory" in {
withServer(backends1, config1) {
val path = root / "non" / ""
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
jsonContentType,
List(
Json("children" := List(
Json("name" := "root", "type" := "directory"))))))
}
}
"find nested mount" in {
withServer(backends1, config1) {
val path = root / "non" / "root" / ""
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
jsonContentType,
List(
Json("children" := List(
Json("name" := "mounting", "type" := "mount"))))))
}
}
"be 404 for file with same name as existing directory (minus the trailing slash)" in {
withServer(backends1, config1) {
val req = root / "foo"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
resp.getResponseBody must_== ""
}
}
"be empty for file" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
jsonContentType,
List(
Json())))
}
}
"also contain CORS headers" in {
withServer(noBackends, config1) {
val methods = Http(root > corsMethods)
methods() must contain(allOf("GET", "POST"))
}
}
}
"/data/fs" should {
val root = svc / "data" / "fs" / ""
"GET" should {
"be 404 for missing backend" in {
withServer(noBackends, config1) {
val req = root / "missing"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing: doesn't exist")
}
}
"be 404 for missing file" in {
withServer(backends1, config1) {
val req = root / "empty" / "anything"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./anything: no backend")
}
}
"read entire file readably by default" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
readableContentType,
List(Json("a" := 1), Json("b" := 2), Json("c" := List(3)))))
}
}
"read empty file" in {
withServer(backends1, config1) {
val path = root / "foo" / "empty"
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
readableContentType,
List()))
}
}
"read entire file precisely when specified" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path.setHeader("Accept", "application/ldjson;mode=precise") OK asJson)
meta() must beRightDisjunction((
preciseContentType,
List(Json("a" := 1), Json("b" := 2), Json("c" := Json("$set" := List(3))))))
}
}
"read entire file precisely when specified in request-headers" in {
withServer(backends1, config1) {
val req = root / "foo" / "bar" <<? Map("request-headers" -> """{"Accept": "application/ldjson; mode=precise" }""")
val meta = Http(req OK asJson)
meta() must beRightDisjunction((
preciseContentType,
List(Json("a" := 1), Json("b" := 2), Json("c" := Json("$set" := List(3))))))
}
}
"read entire file precisely with complicated Accept" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path.setHeader("Accept", "application/ldjson;q=0.9;mode=readable,application/json;boundary=NL;mode=precise") OK asJson)
meta() must beRightDisjunction((
preciseContentType,
List(Json("a" := 1), Json("b" := 2), Json("c" := Json("$set" := List(3))))))
}
}
"read entire file in JSON array when specified" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").setHeader("Accept", "application/json")
val meta = Http(req OK as.String)
meta() must_==
"""[
|{ "a": 1 },
|{ "b": 2 },
|{ "c": [ 3 ] }
|]
|""".stripMargin.replace("\\n", "\\r\\n")
}
}
"read entire file with gzip encoding" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").setHeader("Accept-Encoding", "gzip")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 200
resp.getHeader("Content-Encoding") must_== "gzip"
}
}
"read entire file (with space)" in {
withServer(backends1, config1) {
val path = root / "foo" / "a file"
val meta = Http(path OK asJson)
meta() must beRightDisjunction((readableContentType, List(Json("1" := "ok"))))
}
}
"read entire file as CSV" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path.setHeader("Accept", csvContentType) OK asLines)
meta() must_==
csvResponseContentType ->
List("a,b,c[0]", "1,,", ",2,", ",,3")
}
}
"read entire file as CSV with quoting" in {
withServer(backends1, config1) {
val path = root / "foo" / "quoting"
val meta = Http(path.setHeader("Accept", csvContentType) OK asLines)
meta() must_==
csvResponseContentType ->
List("a,b", "\\"\\"\\"Hey\\"\\"\\",\\"a, b, c\\"")
}
}
"read entire file as CSV with alternative delimiters" in {
val mt = List(
csvContentType,
"columnDelimiter=\\"\\t\\"",
"rowDelimiter=\\";\\"",
"quoteChar=\\"'\\"", // NB: probably doesn't need quoting, but http4s renders it that way
"escapeChar=\\"\\\\\\\\\\"").mkString("; ")
withServer(backends1, config1) {
val req = (root / "foo" / "bar")
.setHeader("Accept", mt)
val meta = Http(req OK asLines)
meta() must_==
mt + charsetParam ->
List("a\\tb\\tc[0];1\\t\\t;\\t2\\t;\\t\\t3;")
}
}
"read entire file as CSV with standard delimiters specified" in {
val mt = List(
csvContentType,
"columnDelimiter=\\",\\"",
"rowDelimiter=\\"\\\\\\\\r\\\\\\\\n\\"",
"quoteChar=\\"\\"",
"escapeChar=\\"\\\\\\"\\"").mkString("; ")
withServer(backends1, config1) {
val req = (root / "foo" / "bar")
.setHeader("Accept", mt)
val meta = Http(req OK asLines)
meta() must_==
csvResponseContentType ->
List("a,b,c[0]", "1,,", ",2,", ",,3")
}
}
"read with disposition" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar")
.setHeader("Accept", "application/ldjson; disposition=\\"attachment; filename=data.json\\"")
val meta = Http(req)
val resp = meta()
resp.getHeader("Content-Disposition") must_== "attachment; filename=\\"data.json\\""
}
}
"read partial file with offset and limit" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar" <<? Map("offset" -> "1", "limit" -> "1")
val meta = Http(path OK asJson)
meta() must beRightDisjunction((
readableContentType,
List(Json("b" := 2))))
}
}
"download zipped directory" in {
withServer(backends1, config1) {
val req = (root / "foo" / "")
.setHeader("Accept", "text/csv; disposition=\\"attachment; filename=foo.zip\\"")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 200
resp.getHeader("Content-Type") must_== "application/zip"
resp.getHeader("Content-Disposition") must_== "attachment; filename=\\"foo.zip\\""
}
}
"be 400 with negative offset" in {
withServer(backends1, config1) {
val req = root / "foo" / "bar" <<? Map("offset" -> "-10", "limit" -> "10")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("invalid offset: -10 (must be >= 0)")
}
}
"be 400 with negative limit" in {
withServer(backends1, config1) {
val req = root / "foo" / "bar" <<? Map("offset" -> "10", "limit" -> "-10")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("invalid limit: -10 (must be >= 1)")
}
}
"be 400 with unparsable limit" in {
withServer(backends1, config1) {
val req = root / "foo" / "bar" <<? Map("limit" -> "a")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("???")
}
}.pendingUntilFixed("#773")
}
"PUT" should {
"be 404 for missing backend" in {
withServer(noBackends, config1) {
val req = (root / "missing").PUT.setBody("{\\"a\\": 1}\\n{\\"b\\": 2}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing: doesn't exist")
}
}
"be 400 with no body" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
}
}
"be 400 with invalid JSON" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
.setBody("{")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
}
}
"accept valid (Precise) JSON" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path.PUT.setBody("{\\"a\\": 1}\\n{\\"b\\": \\"12:34:56\\"}") OK as.String)
meta() must_== ""
history must_== List(
Action.Save(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Str("12:34:56"))))))
}
}
"accept valid (Readable) JSON" in {
withServer(backends1, config1) {
val path = (root / "foo" / "bar").setHeader("Content-Type", readableContentType)
val meta = Http(path.PUT.setBody("{\\"a\\": 1}\\n{\\"b\\": \\"12:34:56\\"}") OK as.String)
meta() must_== ""
history must_== List(
Action.Save(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Time(org.threeten.bp.LocalTime.parse("12:34:56")))))))
}
}
"accept valid (standard) CSV" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
.setHeader("Content-Type", csvContentType)
.setBody("a,b\\n1,\\n,12:34:56")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Save(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Time(org.threeten.bp.LocalTime.parse("12:34:56")))))))
}
}
"accept valid (weird) CSV" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
.setHeader("Content-Type", csvContentType)
.setBody("a|b\\n1|\\n|'[1|2|3]'\\n")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Save(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Str("[1|2|3]"))))))
}
}
"be 400 with empty CSV (no headers)" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
.setHeader("Content-Type", csvContentType)
.setBody("")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
history must_== Nil
}
}
"be 400 with broken CSV (after the tenth data line)" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").PUT
.setHeader("Content-Type", csvContentType)
.setBody("\\"a\\",\\"b\\"\\n1,2\\n3,4\\n5,6\\n7,8\\n9,10\\n11,12\\n13,14\\n15,16\\n17,18\\n19,20\\n\\",\\n") // NB: missing quote char _after_ the tenth data row
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
history must_== Nil
}
}
"be 400 with simulated path error" in {
withServer(backends1, config1) {
val req = (root / "foo" / "pathError").PUT
.setBody("{\\"a\\": 1}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("simulated (client) error")
}
}
"be 500 with simulated error on a particular value" in {
withServer(backends1, config1) {
val req = (root / "foo" / "valueError").PUT
.setBody("{\\"a\\": 1}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 500
errorFromBody(resp) must_== \\/-("simulated (value) error; value: Str()")
}
}
}
"POST" should {
"be 404 for missing backend" in {
withServer(noBackends, config1) {
val req = (root / "missing").POST
.setBody("{\\"a\\": 1}\\n{\\"b\\": 2}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing: doesn't exist")
}
}
"be 400 with no body" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
}
}
"be 400 with invalid JSON" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setBody("{")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
}
}
"produce two errors with partially invalid JSON" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST.setBody(
"""{"a": 1}
|"unmatched
|{"b": 2}
|}
|{"c": 3}""".stripMargin)
val meta = Http(req > asJson)
meta() must beRightDisjunction { (resp: (String, List[Json])) =>
val (_, json) = resp
json.length == 1 &&
(for {
obj <- json.head.obj
errors <- obj("details")
eArr <- errors.array
} yield eArr.length == 2).getOrElse(false)
}
}
}
"accept valid (Precise) JSON" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setBody("{\\"a\\": 1}\\n{\\"b\\": \\"12:34:56\\"}")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Append(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Str("12:34:56"))))))
}
}
"accept valid (Readable) JSON" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setHeader("Content-Type", readableContentType)
.setBody("{\\"a\\": 1}\\n{\\"b\\": \\"12:34:56\\"}")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Append(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Time(org.threeten.bp.LocalTime.parse("12:34:56")))))))
}
}
"accept valid (standard) CSV" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setHeader("Content-Type", csvContentType)
.setBody("a,b\\n1,\\n,12:34:56")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Append(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Time(org.threeten.bp.LocalTime.parse("12:34:56")))))))
}
}
"accept valid (weird) CSV" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setHeader("Content-Type", csvContentType)
.setBody("a|b\\n1|\\n|'[1|2|3]'")
val meta = Http(req OK as.String)
meta() must_== ""
history must_== List(
Action.Append(
Path("./bar"),
List(
Data.Obj(ListMap("a" -> Data.Int(1))),
Data.Obj(ListMap("b" -> Data.Str("[1|2|3]"))))))
}
}
"be 400 with empty CSV (no headers)" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setHeader("Content-Type", csvContentType)
.setBody("")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
history must_== Nil
}
}
"be 400 with broken CSV (after the tenth data line)" in {
withServer(backends1, config1) {
val req = (root / "foo" / "bar").POST
.setHeader("Content-Type", csvContentType)
.setBody("\\"a\\",\\"b\\"\\n1,2\\n3,4\\n5,6\\n7,8\\n9,10\\n11,12\\n13,14\\n15,16\\n17,18\\n19,20\\n\\",\\n") // NB: missing quote char _after_ the tenth data row
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
history must_== Nil
}
}
"be 400 with simulated path error" in {
withServer(backends1, config1) {
val req = (root / "foo" / "pathError").POST
.setBody("{\\"a\\": 1}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("simulated (client) error")
}
}
"be 500 with simulated error on a particular value" in {
withServer(backends1, config1) {
val req = (root / "foo" / "valueError").POST
.setBody("{\\"a\\": 1}")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 500
errorFromBody(resp) must_== \\/-("some uploaded value(s) could not be processed")
}
}
}
"MOVE" should {
val moveRoot = root.setMethod("MOVE")
"be 400 for missing src backend" in {
withServer(noBackends, config1) {
val req = moveRoot / "foo"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The 'Destination' header must be specified")
}
}
"be 404 for missing source file" in {
withServer(backends1, config1) {
val req = (moveRoot / "missing" / "a" ).setHeader("Destination", "/foo/bar")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing/a: doesn't exist")
}
}
"be 404 for missing dst backend" in {
withServer(backends1, config1) {
val req = (moveRoot / "foo" / "bar").setHeader("Destination", "/missing/a")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing/a: doesn't exist")
}
}
"be 201 for file" in {
withServer(backends1, config1) {
val req = (moveRoot / "foo" / "bar").setHeader("Destination", "/foo/baz")
val meta = Http(req > code)
meta() must_== 201
}
}
"be 201 for dir" in {
withServer(backends1, config1) {
val req = (moveRoot / "foo" / "dir" / "").setHeader("Destination", "/foo/dir2/")
val meta = Http(req > code)
meta() must_== 201
}
}
"be 501 for src and dst not in same backend" in {
withServer(backends1, config1) {
val req = (moveRoot / "foo" / "bar").setHeader("Destination", "/empty/a")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 501
errorFromBody(resp) must_== \\/-("src and dst path not in the same backend")
}
}
}
"DELETE" should {
"be 404 for missing backend" in {
withServer(noBackends, config1) {
val req = (root / "missing").DELETE
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("./missing: doesn't exist")
}
}
"be 200 with existing file" in {
withServer(backends1, config1) {
val path = root / "foo" / "bar"
val meta = Http(path.DELETE > code)
meta() must_== 200
}
}
"be 200 with existing dir" in {
withServer(backends1, config1) {
val path = root / "foo" / "dir" / ""
val meta = Http(path.DELETE > code)
meta() must_== 200
}
}
"be 200 with missing file (idempotency)" in {
withServer(backends1, config1) {
val path = root / "foo" / "missing"
val meta = Http(path.DELETE > code)
meta() must_== 200
}
}
"be 200 with missing dir (idempotency)" in {
withServer(backends1, config1) {
val path = root / "foo" / "missingDir" / ""
val meta = Http(path.DELETE > code)
meta() must_== 200
}
}
}
}
"/query/fs" should {
val root = svc / "query" / "fs" / ""
"GET" should {
"be 404 for missing backend" in {
withServer(noBackends, config1) {
val req = root / "missing" <<? Map("q" -> "select * from bar")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("???")
}
}.pendingUntilFixed("#771")
"be 400 for missing query" in {
withServer(backends1, config1) {
val req = root / "foo" / ""
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The request must contain a query")
}
}
"execute simple query" in {
withServer(backends1, config1) {
val path = root / "foo" / "" <<? Map("q" -> "select * from bar")
val result = Http(path OK asJson)
result() must beRightDisjunction((
readableContentType,
List(Json("0" := "ok"))))
}
}
"be 400 for query error" in {
withServer(backends1, config1) {
val req = root / "foo" / "" <<? Map("q" -> "select date where")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("keyword 'case' expected; `where'")
}
}
}
"POST" should {
"be 404 with missing backend" in {
withServer(noBackends, config1) {
val req = (root / "missing" / "").POST.setBody("select * from bar").setHeader("Destination", "/tmp/gen0")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("???")
}
}.pendingUntilFixed("#771")
"be 400 with missing query" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST.setHeader("Destination", "/foo/tmp/gen0")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The body of the POST must contain a query")
}
}
"be 400 with missing Destination header" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST.setBody("select * from bar")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The 'Destination' header must be specified")
}
}
"execute simple query" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST.setBody("select * from bar").setHeader("Destination", "/foo/tmp/gen0")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 200
(for {
json <- Parse.parse(resp.getResponseBody).toOption
out <- json.field("out")
outStr <- out.string
} yield outStr) must beSome("/foo/tmp/gen0")
}
}
"be 400 for query error" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST
.setBody("select date where")
.setHeader("Destination", "tmp0")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("keyword 'case' expected; `where'")
}
}
}
}
"/compile/fs" should {
val root = svc / "compile" / "fs" / ""
"GET" should {
"be 404 with missing backend" in {
withServer(noBackends, config1) {
val req = root / "missing" / "" <<? Map("q" -> "select * from bar")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("???")
}
}.pendingUntilFixed("#771")
"be 400 with missing query" in {
withServer(backends1, config1) {
val req = root / "foo" / ""
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The request must contain a query")
}
}
"plan simple query" in {
withServer(backends1, config1) {
val path = root / "foo" / "" <<? Map("q" -> "select * from bar")
val result = Http(path OK as.String)
result() must_== "Stub\\nPlan(logical: Squash(Read(Path(\\"bar\\"))))"
}
}
"be 400 for query error" in {
withServer(backends1, config1) {
val req = root / "foo" / "" <<? Map("q" -> "select date where")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("keyword 'case' expected; `where'")
}
}
}
"POST" should {
"be 404 with missing backend" in {
withServer(noBackends, config1) {
val req = (root / "missing" / "").POST.setBody("select * from bar")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("???")
}
}.pendingUntilFixed("#771")
"be 400 with missing query" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The body of the POST must contain a query")
}
}
"plan simple query" in {
withServer(backends1, config1) {
val path = (root / "foo" / "").POST.setBody("select * from bar")
val result = Http(path OK as.String)
result() must_== "Stub\\nPlan(logical: Squash(Read(Path(\\"bar\\"))))"
}
}
"be 400 for query error" in {
withServer(backends1, config1) {
val req = (root / "foo" / "").POST.setBody("select date where")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("keyword 'case' expected; `where'")
}
}
}
}
"/mount/fs" should {
val root = svc / "mount" / "fs" / ""
"GET" should {
"be 404 with missing mount" in {
withServer(noBackends, config1) {
val req = root / "missing" / ""
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
resp.getResponseBody must_== "There is no mount point at /missing/"
}
}
"succeed with correct path" in {
withServer(noBackends, config1) {
val req = root / "foo" / ""
val result = Http(req OK asJson)
result() must beRightDisjunction((
jsonContentType,
List(Json("mongodb" := Json("connectionUri" := "mongodb://localhost/foo")))))
}
}
"be 404 with missing trailing slash" in {
withServer(noBackends, config1) {
val req = root / "foo"
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
resp.getResponseBody must_== "There is no mount point at /foo"
}
}
}
"MOVE" should {
"succeed with valid paths" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "")
.setMethod("MOVE")
.setHeader("Destination", "/foo2/")
val result = Http(req OK as.String)
result() must_== "moved /foo/ to /foo2/"
history must_== List(Action.Reload(Config(SDServerConfig(Some(port)), Map(
Path("/foo2/") -> MongoDbConfig("mongodb://localhost/foo"),
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting")))))
}
}
"be 404 with missing source" in {
withServer(noBackends, config1) {
val req = (root / "missing" / "")
.setMethod("MOVE")
.setHeader("Destination", "/foo/")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 404
errorFromBody(resp) must_== \\/-("There is no mount point at /missing/")
history must_== Nil
}
}
"be 400 with missing destination" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "")
.setMethod("MOVE")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The 'Destination' header must be specified")
history must_== Nil
}
}
"be 400 with relative path" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "")
.setMethod("MOVE")
.setHeader("Destination", "foo2/")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("Not an absolute path: ./foo2/")
history must_== Nil
}
}
"be 400 with non-directory path for MongoDB mount" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "")
.setMethod("MOVE")
.setHeader("Destination", "/foo2")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("Not a directory path: /foo2")
history must_== Nil
}
}
}
"POST" should {
"succeed with valid MongoDB config" in {
withServer(noBackends, config1) {
val req = root.POST
.setHeader("X-File-Name", "local/")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/test" } }""")
val result = Http(req OK as.String)
result() must_== "added /local/"
history must_== List(Action.Reload(Config(SDServerConfig(Some(port)), Map(
Path("/foo/") -> MongoDbConfig("mongodb://localhost/foo"),
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting"),
Path("/local/") -> MongoDbConfig("mongodb://localhost/test")))))
}
}
"be 409 with existing path" in {
withServer(noBackends, config1) {
val req = root.POST
.setHeader("X-File-Name", "foo/")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/foo2" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 409
errorFromBody(resp) must_== \\/-("Can't add a mount point above the existing mount point at /foo/")
history must_== Nil
}
}
"be 409 for conflicting mount above" in {
withServer (backends1, config1) {
val req = (root / "non" / "").POST
.setHeader("X-File-Name", "root/")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/root" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 409
errorFromBody(resp) must_== \\/-("Can't add a mount point above the existing mount point at /non/root/mounting/")
history must_== Nil
}
}
"be 409 for conflicting mount below" in {
withServer (backends1, config1) {
val req = (root / "foo" / "").POST
.setHeader("X-File-Name", "nope/")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/root" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 409
errorFromBody(resp) must_== \\/-("Can't add a mount point below the existing mount point at /foo/")
history must_== Nil
}
}
"be 400 with missing file-name" in {
withServer(noBackends, config1) {
val req = root.POST
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/test" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("The 'X-File-Name' header must be specified")
history must_== Nil
}
}
"be 400 with invalid MongoDB path (no trailing slash)" in {
withServer(noBackends, config1) {
val req = root.POST
.setHeader("X-File-Name", "local")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/test" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("Not a directory path: /local")
history must_== Nil
}
}
"be 400 with invalid JSON" in {
withServer(noBackends, config1) {
val req = root.POST
.setHeader("X-File-Name", "local/")
.setBody("""{ "mongodb":""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("input error: JSON terminates unexpectedly.")
history must_== Nil
}
}
"be 400 with invalid MongoDB URI (extra slash)" in {
withServer(noBackends, config1) {
val req = root.POST
.setHeader("X-File-Name", "local/")
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost:8080//test" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("invalid connection URI: mongodb://localhost:8080//test")
history must_== Nil
}
}
}
"PUT" should {
"succeed with valid MongoDB config" in {
withServer(noBackends, config1) {
val req = (root / "local" / "").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/test" } }""")
val result = Http(req OK as.String)
result() must_== "added /local/"
history must_== List(Action.Reload(Config(SDServerConfig(Some(port)), Map(
Path("/foo/") -> MongoDbConfig("mongodb://localhost/foo"),
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting"),
Path("/local/") -> MongoDbConfig("mongodb://localhost/test")))))
}
}
"succeed with valid, overwritten MongoDB config" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/foo2" } }""")
val result = Http(req OK as.String)
result() must_== "updated /foo/"
history must_== List(Action.Reload(Config(SDServerConfig(Some(port)), Map(
Path("/foo/") -> MongoDbConfig("mongodb://localhost/foo2"),
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting")))))
}
}
"be 409 for conflicting mount above" in {
withServer (backends1, config1) {
val req = (root / "non" / "root" / "").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/root" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 409
errorFromBody(resp) must_== \\/-("Can't add a mount point above the existing mount point at /non/root/mounting/")
history must_== Nil
}
}
"be 409 for conflicting mount below" in {
withServer (backends1, config1) {
val req = (root / "foo" / "nope" / "").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/root" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 409
errorFromBody(resp) must_== \\/-("Can't add a mount point below the existing mount point at /foo/")
history must_== Nil
}
}
"be 400 with invalid MongoDB path (no trailing slash)" in {
withServer(noBackends, config1) {
val req = (root / "local").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost/test" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("Not a directory path: /local")
history must_== Nil
}
}
"be 400 with invalid JSON" in {
withServer(noBackends, config1) {
val req = (root / "local" / "").PUT
.setBody("""{ "mongodb":""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("input error: JSON terminates unexpectedly.")
history must_== Nil
}
}
"be 400 with invalid MongoDB URI (extra slash)" in {
withServer(noBackends, config1) {
val req = (root / "local" / "").PUT
.setBody("""{ "mongodb": { "connectionUri": "mongodb://localhost:8080//test" } }""")
val meta = Http(req)
val resp = meta()
resp.getStatusCode must_== 400
errorFromBody(resp) must_== \\/-("invalid connection URI: mongodb://localhost:8080//test")
history must_== Nil
}
}
}
"DELETE" should {
"succeed with correct path" in {
withServer(noBackends, config1) {
val req = (root / "foo" / "").DELETE
val result = Http(req OK as.String)
result() must_== "deleted /foo/"
history must_== List(Action.Reload(Config(SDServerConfig(Some(port)), Map(
Path("/non/root/mounting/") -> MongoDbConfig("mongodb://localhost/mounting")))))
}
}
"succeed with missing path (no action)" in {
withServer(noBackends, config1) {
val req = (root / "missing" / "").DELETE
val result = Http(req OK as.String)
result() must_== ""
history must_== Nil
}
}
}
}
"/server" should {
val root = svc / "server"
"be capable of providing it's name and version" in {
withServer(noBackends, config1) {
val req = (root / "info").GET
val result = Http(req OK as.String)
result() must_== versionAndNameInfo.toString
history must_== Nil
}
}
}
step {
// Explicitly close dispatch's executor, since it no longer detects running in SBT properly.
Http.shutdown
}
}
class ResponseFormatSpecs extends Specification {
import org.http4s._, QValue._
import org.http4s.headers.{Accept}
import ResponseFormat._
"fromAccept" should {
"be Readable by default" in {
fromAccept(None) must_== JsonStream.Readable
}
"choose precise" in {
val accept = Accept(
new MediaType("application", "ldjson").withExtensions(Map("mode" -> "precise")))
fromAccept(Some(accept)) must_== JsonStream.Precise
}
"choose streaming via boundary extension" in {
val accept = Accept(
new MediaType("application", "json").withExtensions(Map("boundary" -> "NL")))
fromAccept(Some(accept)) must_== JsonStream.Readable
}
"choose precise list" in {
val accept = Accept(
new MediaType("application", "json").withExtensions(Map("mode" -> "precise")))
fromAccept(Some(accept)) must_== JsonArray.Precise
}
"choose streaming and precise via extensions" in {
val accept = Accept(
new MediaType("application", "json").withExtensions(Map("mode" -> "precise", "boundary" -> "NL")))
fromAccept(Some(accept)) must_== JsonStream.Precise
}
"choose CSV" in {
val accept = Accept(
new MediaType("text", "csv"))
fromAccept(Some(accept)) must_== Csv.Default
}
"choose CSV with custom format" in {
val accept = Accept(
new MediaType("text", "csv").withExtensions(Map(
"columnDelimiter" -> "\\t",
"rowDelimiter" -> ";",
"quoteChar" -> "'",
"escapeChar" -> "\\\\")))
fromAccept(Some(accept)) must_== Csv('\\t', ";", '\\'', '\\\\', None)
}
"choose CSV over JSON" in {
val accept = Accept(
new MediaType("text", "csv").withQValue(q(1.0)),
new MediaType("application", "ldjson").withQValue(q(0.9)))
fromAccept(Some(accept)) must_== Csv.Default
}
"choose JSON over CSV" in {
val accept = Accept(
new MediaType("text", "csv").withQValue(q(0.9)),
new MediaType("application", "ldjson"))
fromAccept(Some(accept)) must_== JsonStream.Readable
}
}
"Csv.escapeNewlines" should {
"""escape \\r\\n""" in {
Csv.escapeNewlines("\\r\\n") must_== """\\r\\n"""
}
"""not affect \\"""" in {
Csv.escapeNewlines("\\\\\\"") must_== "\\\\\\""
}
}
"Csv.unescapeNewlines" should {
"""unescape \\r\\n""" in {
Csv.unescapeNewlines("""\\r\\n""") must_== "\\r\\n"
}
"""not affect \\"""" in {
Csv.escapeNewlines("""\\"""") must_== """\\""""
}
}
}
class HeaderParamSpecs extends Specification {
import org.http4s.util._
import HeaderParam._
"parse" should {
"parse one" in {
parse("""{ "Accept": "text/csv" }""") must_==
\\/-(Map(CaseInsensitiveString("Accept") -> List("text/csv")))
}
"parse mulitple values" in {
parse("""{ "Foo": [ "bar", "baz" ] }""") must_==
\\/-(Map(CaseInsensitiveString("Foo") -> List("bar", "baz")))
}
"fail with invalid json" in {
parse("""{""") must_==
-\\/("parse error (JSON terminates unexpectedly.)")
}
"fail with non-object" in {
parse("""0""") must_==
-\\/("expected a JSON object; found: 0")
}
"fail with non-string/array value" in {
parse("""{ "Foo": 0 }""") must_==
-\\/("expected a string or array of strings; found: 0")
}
"fail with non-string value in array" in {
parse("""{ "Foo": [ 0 ] }""") must_==
-\\/("expected string in array; found: 0")
}
}
"rewrite" should {
import org.http4s._
import org.http4s.headers._
"overwrite conflicting header" in {
val headers = rewrite(
Headers(`Accept`(MediaType.`text/csv`)),
Map(CaseInsensitiveString("accept") -> List("application/json")))
headers.get(`Accept`) must beSome(`Accept`(MediaType.`application/json`))
}
"add non-conflicting header" in {
val headers = rewrite(
Headers(`Accept`(MediaType.`text/csv`)),
Map(CaseInsensitiveString("user-agent") -> List("some_phone_browser/0.0.1")))
headers.get(`Accept`) must beSome(`Accept`(MediaType.`text/csv`))
headers.get(`User-Agent`) must beSome(`User-Agent`(AgentProduct("some_phone_browser", Some("0.0.1"))))
}
}
}
|
wemrysi/quasar
|
web/src/test/scala/slamdata/engine/api/fs.scala
|
Scala
|
apache-2.0
| 58,541
|
/*
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.libs.json
import java.io.InputStream
import play.api.libs.iteratee.Execution.Implicits.defaultExecutionContext
import play.api.libs.json.jackson.JacksonJson
/**
* Helper functions to handle JsValues.
*/
object Json {
/**
* Parse a String representing a json, and return it as a JsValue.
*
* @param input a String to parse
* @return the JsValue representing the string
*/
def parse(input: String): JsValue = JacksonJson.parseJsValue(input)
/**
* Parse an InputStream representing a json, and return it as a JsValue.
*
* @param input as InputStream to parse
* @return the JsValue representing the InputStream
*/
// def parse(input: InputStream): JsValue = JacksonJson.parseJsValue(input)
/**
* Parse a byte array representing a json, and return it as a JsValue.
*
* The character encoding used will be automatically detected as UTF-8, UTF-16 or UTF-32, as per the heuristics in
* RFC-4627.
*
* @param input a byte array to parse
* @return the JsValue representing the byte array
*/
def parse(input: Array[Byte]): JsValue = JacksonJson.parseJsValue(input)
/**
* Convert a JsValue to its string representation.
*
* {{{
* scala> Json.stringify(Json.obj(
* "field1" -> Json.obj(
* "field11" -> "value11",
* "field12" -> Json.arr("alpha", 123L)
* )
* ))
* res0: String = {"field1":{"field11":"value11","field12":["alpha",123]}}
*
* scala> Json.stringify(res0)
* res1: String = {"field1":{"field11":"value11","field12":["alpha",123]}}
* }}}
*
* @param json the JsValue to convert
* @return a String with the json representation
*/
def stringify(json: JsValue): String = JacksonJson.generateFromJsValue(json)
//We use unicode \u005C for a backlash in comments, because Scala will replace unicode escapes during lexing
//anywhere in the program.
/**
* Convert a JsValue to its string representation, escaping all non-ascii characters using \u005CuXXXX syntax.
*
* This is particularly useful when the output JSON will be executed as javascript, since JSON is not a strict
* subset of javascript
* (see <a href="http://timelessrepo.com/json-isnt-a-javascript-subset">JSON: The JavaScript subset that isn't</a>).
*
* {{{
* scala> Json.asciiStringify(JsString("some\u005Cu2028text\u005Cu2029"))
* res0: String = "some\u005Cu2028text\u005Cu2029"
*
* scala> Json.stringify(JsString("some\u005Cu2028text\u005Cu2029"))
* res1: String = "sometext"
* }}}
*
* @param json the JsValue to convert
* @return a String with the json representation with all non-ascii characters escaped.
*/
def asciiStringify(json: JsValue): String = JacksonJson.generateFromJsValue(json, true)
/**
* Convert a JsValue to its pretty string representation using default Jackson
* pretty printer (line feeds after each fields and 2-spaces indentation).
*
* {{{
* scala> Json.stringify(Json.obj(
* "field1" -> Json.obj(
* "field11" -> "value11",
* "field12" -> Json.arr("alpha", 123L)
* )
* ))
* res0: String = {"field1":{"field11":"value11","field12":["alpha",123]}}
*
* scala> Json.prettyPrint(res0)
* res1: String =
* {
* "field1" : {
* "field11" : "value11",
* "field12" : [ "alpha", 123 ]
* }
* }
* }}}
*
* @param json the JsValue to convert
* @return a String with the json representation
*/
def prettyPrint(json: JsValue): String = JacksonJson.prettyPrint(json)
/**
* Provided a Writes implicit for its type is available, convert any object into a JsValue.
*
* @param o Value to convert in Json.
*/
def toJson[T](o: T)(implicit tjs: Writes[T]): JsValue = tjs.writes(o)
/**
* Provided a Reads implicit for that type is available, convert a JsValue to any type.
*
* @param json Json value to transform as an instance of T.
*/
def fromJson[T](json: JsValue)(implicit fjs: Reads[T]): JsResult[T] = fjs.reads(json)
/**
* Next is the trait that allows Simplified Json syntax :
*
* Example :
* {{{
* JsObject(Seq(
* "key1", JsString("value"),
* "key2" -> JsNumber(123),
* "key3" -> JsObject(Seq("key31" -> JsString("value31")))
* )) == Json.obj( "key1" -> "value", "key2" -> 123, "key3" -> obj("key31" -> "value31"))
*
* JsArray(JsString("value"), JsNumber(123), JsBoolean(true)) == Json.arr( "value", 123, true )
* }}}
*
* There is an implicit conversion from any Type with a Json Writes to JsValueWrapper
* which is an empty trait that shouldn't end into unexpected implicit conversions.
*
* Something to note due to `JsValueWrapper` extending `NotNull` :
* `null` or `None` will end into compiling error : use JsNull instead.
*/
sealed trait JsValueWrapper extends NotNull
private case class JsValueWrapperImpl(field: JsValue) extends JsValueWrapper
import scala.language.implicitConversions
implicit def toJsFieldJsValueWrapper[T](field: T)(implicit w: Writes[T]): JsValueWrapper = JsValueWrapperImpl(w.writes(field))
def obj(fields: (String, JsValueWrapper)*): JsObject = JsObject(fields.map(f => (f._1, f._2.asInstanceOf[JsValueWrapperImpl].field)))
def arr(fields: JsValueWrapper*): JsArray = JsArray(fields.map(_.asInstanceOf[JsValueWrapperImpl].field))
import play.api.libs.iteratee.Enumeratee
/**
* Transform a stream of A to a stream of JsValue
* {{{
* val fooStream: Enumerator[Foo] = ???
* val jsonStream: Enumerator[JsValue] = fooStream &> Json.toJson
* }}}
*/
def toJson[A: Writes]: Enumeratee[A, JsValue] = Enumeratee.map[A](Json.toJson(_))
/**
* Transform a stream of JsValue to a stream of A, keeping only successful results
* {{{
* val jsonStream: Enumerator[JsValue] = ???
* val fooStream: Enumerator[Foo] = jsonStream &> Json.fromJson
* }}}
*/
def fromJson[A: Reads]: Enumeratee[JsValue, A] =
Enumeratee.map[JsValue]((json: JsValue) => Json.fromJson(json)) ><> Enumeratee.collect[JsResult[A]] { case JsSuccess(value, _) => value }
/**
* Experimental JSON extensions to replace asProductXXX by generating
* Reads[T]/Writes[T]/Format[T] from case class at COMPILE time using
* new Scala 2.10 macro & reflection features.
*/
import scala.reflect.macros.Context
import language.experimental.macros
/**
* Creates a Reads[T] by resolving case class fields & required implcits at COMPILE-time.
*
* If any missing implicit is discovered, compiler will break with corresponding error.
* {{{
* import play.api.libs.json.Json
*
* case class User(name: String, age: Int)
*
* implicit val userReads = Json.reads[User]
* // macro-compiler replaces Json.reads[User] by injecting into compile chain
* // the exact code you would write yourself. This is strictly equivalent to:
* implicit val userReads = (
* (__ \ 'name).read[String] and
* (__ \ 'age).read[Int]
* )(User)
* }}}
*/
def reads[A] = macro JsMacroImpl.readsImpl[A]
/**
* Creates a Writes[T] by resolving case class fields & required implcits at COMPILE-time
*
* If any missing implicit is discovered, compiler will break with corresponding error.
* {{{
* import play.api.libs.json.Json
*
* case class User(name: String, age: Int)
*
* implicit val userWrites = Json.writes[User]
* // macro-compiler replaces Json.writes[User] by injecting into compile chain
* // the exact code you would write yourself. This is strictly equivalent to:
* implicit val userWrites = (
* (__ \ 'name).write[String] and
* (__ \ 'age).write[Int]
* )(unlift(User.unapply))
* }}}
*/
def writes[A] = macro JsMacroImpl.writesImpl[A]
/**
* Creates a Format[T] by resolving case class fields & required implicits at COMPILE-time
*
* If any missing implicit is discovered, compiler will break with corresponding error.
* {{{
* import play.api.libs.json.Json
*
* case class User(name: String, age: Int)
*
* implicit val userWrites = Json.format[User]
* // macro-compiler replaces Json.format[User] by injecting into compile chain
* // the exact code you would write yourself. This is strictly equivalent to:
* implicit val userWrites = (
* (__ \ 'name).format[String] and
* (__ \ 'age).format[Int]
* )(User.apply, unlift(User.unapply))
* }}}
*/
def format[A] = macro JsMacroImpl.formatImpl[A]
}
|
jeantil/play-json-extra
|
play-json-extra/js/src/main/scala/play/api/libs/json/Json.scala
|
Scala
|
apache-2.0
| 8,623
|
package com.aristocrat.mandrill.requests.Urls
import com.aristocrat.mandrill.requests.MandrillRequest
case class TrackingDomains(key: String) extends MandrillRequest
|
aristocratic/mandrill
|
src/main/scala/com/aristocrat/mandrill/requests/Urls/TrackingDomains.scala
|
Scala
|
mit
| 168
|
package se.meldrum.machine
import se.meldrum.machine.utils.Config
trait PsqlTestConfig extends Config {
val host = config.getString("postgres-test.host")
val port = config.getInt("postgres-test.port")
val dbName = config.getString("postgres-test.dbname")
val user = config.getString("postgres-test.user")
val password = config.getString("postgres-test.password")
val url = config.getString("postgres-test.url")
}
|
Max-Meldrum/machine
|
src/test/scala/se/meldrum/machine/PsqlTestConfig.scala
|
Scala
|
apache-2.0
| 428
|
package es.weso.computex
import org.scalatest._
import org.scalatest.matchers.ShouldMatchers
import com.typesafe.config.ConfigFactory
import com.typesafe.config.Config
import es.weso.utils.JenaUtils._
import es.weso.utils.JenaUtils
import CexUtils._
import PREFIXES._
import scala.io.Source
import scala.util.parsing.json._
class SimpleNormalizationSuite extends FunSpec
with ShouldMatchers
with ComputeUtilsSuite {
val year = 2012
val model = JenaUtils.parseFromURI("file:examples/example-imputed.ttl")
val expanded = AddComputations.addComputations(model,year)
def ValidateNormalizedValues(country: String) : Unit = {
describe("Validate all normalized values for " + country) {
info("Validating " + country)
val map = getTableValues("simple_NormalizedValues"+country)
map.keys.foreach {
k => {
it("Should validate " + k + " for country " + country) {
val yearToCheck = if (isPrimary(expanded,k)) year
else year - 1
map(k) match {
case None => findValueCompType(expanded,k,yearToCheck,country,cex_Normalize) should be(None)
case Some(d) => matchDoublesCompType(expanded,k,yearToCheck,country,cex_Normalize,d)
}
}
}
}
}
}
ValidateNormalizedValues("ESP")
}
|
weso/wiCompute
|
src/test/scala/es/weso/computex/SimpleNormalizationSuite.scala
|
Scala
|
mit
| 1,332
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
import org.scalactic.TripleEquals
import org.scalactic.TypeCheckedTripleEquals
import org.scalactic.ConversionCheckedTripleEquals
import SharedHelpers._
import Matchers._
import exceptions.TestFailedException
class ShouldCollectedTripleEqualsSpec extends Spec with NonImplicitAssertions {
case class Super(size: Int)
class Sub(sz: Int) extends Super(sz)
val super1: Super = new Super(1)
val sub1: Sub = new Sub(1)
val super2: Super = new Super(2)
val sub2: Sub = new Sub(2)
val nullSuper: Super = null
object `the custom equality should === (operator` {
object `with TripleEquals` {
def `should compare anything with anything` {
new TripleEquals {
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(1, 1, 1)) should === (1L)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
all (List("1", "1", "1")) should !== (1)
intercept[TestFailedException] { all (List("1", "1", "1")) should === (1) }
all (List(1, 1, 1)) should !== ("1")
intercept[TestFailedException] { all (List(1, 1, 1)) should === ("1") }
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
val caught1 = intercept[TestFailedException] { all (List(super1, super1, super1)) should === (null) }
caught1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, Super(1) did not equal null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(Super(1), Super(1), Super(1))"))
all (List(super1, super1, super1)) should !== (null)
all (List(nullSuper, nullSuper, nullSuper)) should === (null)
val caught2 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should !== (null) }
caught2.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null equaled null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
val caught3 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should === (super1) }
caught3.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null did not equal Super(1) (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
all (List(nullSuper, nullSuper, nullSuper)) should !== (super1)
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (Map("I" -> 1, "II" -> 2))
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (Map("1" -> 1, "2" -> 2))
intercept[TestFailedException] { all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (7) }
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (7)
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (Set(1, 2, 3))
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (Set(2, 3, 4))
intercept[TestFailedException] { all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (7) }
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (7)
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should === (List(1, 2, 3))
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should !== (List(2, 3, 4))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Array(1, 2, 3))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Array(2, 3, 4))
all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should === (Array(1, 2, 3))
all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should !== (Array(2, 3, 4))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Seq(1, 2, 3))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Seq(2, 3, 4))
all (List((), (),())) should === (())
all (List((), (),())) should !== (7)
}
}
def `should be overridable with TypeCheckedTripleEquals locally when TripleEquals imported` {
object O extends TripleEquals
import O._
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
// The rest should not compile
// all (List(1, 1, 1)) should === (1L)
// all (List(1L, 1L, 1L)) should === (1)
// all (List(1, 1, 1)) should !== (1L)
// all (List(1L, 1L, 1L)) should !== (1)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
def `should be overridable with TypeCheckedTripleEquals locally when TripleEquals mixed in` {
object O extends TripleEquals {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
// The rest should not compile
// all (List(1, 1, 1)) should === (1L)
// all (List(1L, 1L, 1L)) should === (1)
// all (List(1, 1, 1)) should !== (1L)
// all (List(1L, 1L, 1L)) should !== (1)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TripleEquals imported` {
object O extends TripleEquals
import O._
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
// These should work with implicit conversions
all (List(1, 1, 1)) should === (1L)
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
// The rest should not compile
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TripleEquals mixed in` {
object O extends TripleEquals {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
// These should work with implicit conversions
all (List(1, 1, 1)) should === (1L)
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
// The rest should not compile
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
}
}
object `with TypeCheckedTripleEquals` {
def `should compare supertypes with subtypes on either side` {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
val caught1 = intercept[TestFailedException] { all (List(super1, super1, super1)) should === (null) }
caught1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, Super(1) did not equal null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(Super(1), Super(1), Super(1))"))
all (List(super1, super1, super1)) should !== (null)
all (List(nullSuper, nullSuper, nullSuper)) should === (null)
val caught2 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should !== (null) }
caught2.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null equaled null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
val caught3 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should === (super1) }
caught3.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null did not equal Super(1) (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
all (List(nullSuper, nullSuper, nullSuper)) should !== (super1)
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (Map("I" -> 1, "II" -> 2))
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (Map("1" -> 1, "2" -> 2))
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (Set(1, 2, 3))
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (Set(2, 3, 4))
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should === (List(1, 2, 3))
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should !== (List(2, 3, 4))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Array(1, 2, 3))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Array(2, 3, 4))
all (List((), (),())) should === (())
// The rest should not compile
// all (List((), (),())) should !== (7)
// all (List(1, 1, 1)) should === (1L)
// all (List(1L, 1L, 1L)) should === (1)
// all (List(1, 1, 1)) should !== (1L)
// all (List(1L, 1L, 1L)) should !== (1)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
// all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Seq(1, 2, 3))
// all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Seq(2, 3, 4))
// all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should === (Array(1, 2, 3))
// all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should !== (Array(2, 3, 4))
// intercept[TestFailedException] { all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (7) }
// all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (7)
// intercept[TestFailedException] { all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (7) }
// all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (7)
}
}
def `should be overridable with TripleEquals locally when TypeCheckedTripleEquals imported` {
object O extends TypeCheckedTripleEquals
import O._
new TripleEquals {
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(1, 1, 1)) should === (1L)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
all (List("1", "1", "1")) should !== (1)
intercept[TestFailedException] { all (List("1", "1", "1")) should === (1) }
all (List(1, 1, 1)) should !== ("1")
intercept[TestFailedException] { all (List(1, 1, 1)) should === ("1") }
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
// all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
// all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
}
}
def `should be overridable with TripleEquals locally when TypeCheckedTripleEquals mixed in` {
object O extends TypeCheckedTripleEquals {
new TripleEquals {
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(1, 1, 1)) should === (1L)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
all (List("1", "1", "1")) should !== (1)
intercept[TestFailedException] { all (List("1", "1", "1")) should === (1) }
all (List(1, 1, 1)) should !== ("1")
intercept[TestFailedException] { all (List(1, 1, 1)) should === ("1") }
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
// all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
// all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
}
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TypeCheckedTripleEquals imported` {
object O extends TypeCheckedTripleEquals
import O._
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) } // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (fr) // compiles on 2.10 but not 2.9
all (List(fr, fr, fr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (cr) // compiles on 2.10 but not 2.9
all (List(cr, cr, cr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (super2) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (sub2) // compiles on 2.10 but not 2.9
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (sub1) // compiles on 2.10 but not 2.9
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
// These should work with implicit conversions
all (List(1, 1, 1)) should === (1L)
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
// The rest should not compile
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TypeCheckedTripleEquals mixed in` {
object O extends TypeCheckedTripleEquals {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) } // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (fr) // compiles on 2.10 but not 2.9
all (List(fr, fr, fr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (cr) // compiles on 2.10 but not 2.9
all (List(cr, cr, cr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (super2) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (sub2) // compiles on 2.10 but not 2.9
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (sub1) // compiles on 2.10 but not 2.9
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
// These should work with implicit conversions
all (List(1, 1, 1)) should === (1L)
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
// The rest should not compile
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
}
}
object `with ConversionCheckedTripleEquals` {
def `should compare supertypes with subtypes on either side as well as types with implicit conversions in either direction` {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(ap, ap, ap)) should === (fr)
all (List(fr, fr, fr)) should === (ap)
all (List(ap, ap, ap)) should === (cr)
all (List(cr, cr, cr)) should === (ap)
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1)
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1)
// These should work with implicit conversions
all (List(1, 1, 1)) should === (1L)
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) }
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
// Should work sensibly with nulls
val caught1 = intercept[TestFailedException] { all (List(super1, super1, super1)) should === (null) }
caught1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, Super(1) did not equal null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(Super(1), Super(1), Super(1))"))
all (List(super1, super1, super1)) should !== (null)
all (List(nullSuper, nullSuper, nullSuper)) should === (null)
val caught2 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should !== (null) }
caught2.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null equaled null (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
val caught3 = intercept[TestFailedException] { all (List(nullSuper, nullSuper, nullSuper)) should === (super1) }
caught3.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, null did not equal Super(1) (ShouldCollectedTripleEqualsSpec.scala:" + (thisLineNumber - 2) + ") \\n" +
"in List(null, null, null)"))
all (List(nullSuper, nullSuper, nullSuper)) should !== (super1)
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (Map("I" -> 1, "II" -> 2))
all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (Map("1" -> 1, "2" -> 2))
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (Set(1, 2, 3))
all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (Set(2, 3, 4))
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should === (List(1, 2, 3))
all (List(List(1, 2, 3), List(1, 2, 3), List(1, 2, 3))) should !== (List(2, 3, 4))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Array(1, 2, 3))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Array(2, 3, 4))
all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should === (Array(1, 2, 3))
all (List(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))) should !== (Array(2, 3, 4))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should === (Seq(1, 2, 3))
all (List(Array(1, 2, 3), Array(1, 2, 3), Array(1, 2, 3))) should !== (Seq(2, 3, 4))
all (List((), (),())) should === (())
// The rest should not compile
// all (List((), (),())) should !== (7)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
// intercept[TestFailedException] { all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should === (7) }
// all (List(Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2), Map("I" -> 1, "II" -> 2))) should !== (7)
// intercept[TestFailedException] { all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should === (7) }
// all (List(Set(1, 2, 3), Set(1, 2, 3), Set(1, 2, 3))) should !== (7)
}
}
def `should be overridable with TripleEquals locally when ConversionCheckedTripleEquals imported` {
object O extends ConversionCheckedTripleEquals
import O._
new TripleEquals {
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(1, 1, 1)) should === (1L) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) } // compiles on 2.10 but not 2.9
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
all (List("1", "1", "1")) should !== (1)
intercept[TestFailedException] { all (List("1", "1", "1")) should === (1) }
all (List(1, 1, 1)) should !== ("1")
intercept[TestFailedException] { all (List(1, 1, 1)) should === ("1") }
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
}
}
def `should be overridable with TripleEquals locally when ConversionCheckedTripleEquals mixed in` {
object O extends ConversionCheckedTripleEquals {
new TripleEquals {
all (List(1, 1, 1)) should === (1)
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) }
all (List(1, 1, 1)) should === (1L) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1L) } // compiles on 2.10 but not 2.9
all (List(1L, 1L, 1L)) should === (1)
intercept[TestFailedException] { all (List(1L, 1L, 1L)) should !== (1) }
all (List("1", "1", "1")) should !== (1)
intercept[TestFailedException] { all (List("1", "1", "1")) should === (1) }
all (List(1, 1, 1)) should !== ("1")
intercept[TestFailedException] { all (List(1, 1, 1)) should === ("1") }
all (List(super1, super1, super1)) should !== (super2)
all (List(super1, super1, super1)) should !== (sub2)
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1)
all (List(super1, super1, super1)) should === (sub1)
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
}
}
}
def `should be overridable with TypeCheckedTripleEquals locally when ConversionCheckedTripleEquals imported` {
object O extends ConversionCheckedTripleEquals
import O._
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) } // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (fr) // compiles on 2.10 but not 2.9
all (List(fr, fr, fr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (cr) // compiles on 2.10 but not 2.9
all (List(cr, cr, cr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (super2) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (sub2) // compiles on 2.10 but not 2.9
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (sub1) // compiles on 2.10 but not 2.9
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
// The rest should not compile
// all (List(1, 1, 1)) should === (1L)
// all (List(1L, 1L, 1L)) should === (1)
// all (List(1, 1, 1)) should !== (1L)
// all (List(1L, 1L, 1L)) should !== (1)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
def `should be overridable with TypeCheckedTripleEquals locally when ConversionCheckedTripleEquals mixed in` {
object O extends ConversionCheckedTripleEquals {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
all (List(1, 1, 1)) should === (1) // compiles on 2.10 but not 2.9
intercept[TestFailedException] { all (List(1, 1, 1)) should !== (1) } // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (fr) // compiles on 2.10 but not 2.9
all (List(fr, fr, fr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(ap, ap, ap)) should === (cr) // compiles on 2.10 but not 2.9
all (List(cr, cr, cr)) should === (ap) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (super2) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should !== (sub2) // compiles on 2.10 but not 2.9
all (List(sub2, sub2, sub2)) should !== (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (super1) // compiles on 2.10 but not 2.9
all (List(super1, super1, super1)) should === (sub1) // compiles on 2.10 but not 2.9
all (List(sub1, sub1, sub1)) should === (super1) // compiles on 2.10 but not 2.9
// The rest should not compile
// all (List(1, 1, 1)) should === (1L)
// all (List(1L, 1L, 1L)) should === (1)
// all (List(1, 1, 1)) should !== (1L)
// all (List(1L, 1L, 1L)) should !== (1)
// all (List("1", "1", "1")) should === (1)
// all (List(1, 1, 1)) should === ("1")
// all (List("1", "1", "1")) should !== (1)
// all (List(1, 1, 1)) should !== ("1")
// all (List(fr, fr, fr)) should === (cr)
// all (List(cr, cr, cr)) should === (fr)
}
}
}
}
}
}
|
SRGOM/scalatest
|
scalatest-test/src/test/scala/org/scalatest/ShouldCollectedTripleEqualsSpec.scala
|
Scala
|
apache-2.0
| 37,744
|
package lila.round
import actorApi._
import actorApi.round._
import akka.actor.{ ActorSystem, Cancellable, CoordinatedShutdown, Scheduler }
import chess.format.Uci
import chess.{ Black, Centis, Color, MoveMetrics, Speed, White }
import play.api.libs.json._
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Promise }
import lila.chat.{ BusChan, Chat }
import lila.common.{ Bus, IpAddress, Lilakka }
import lila.game.Game.{ FullId, PlayerId }
import lila.game.{ Event, Game, Pov }
import lila.hub.actorApi.map.{ Exists, Tell, TellAll, TellIfExists, TellMany }
import lila.hub.actorApi.round.{ Abort, Berserk, RematchNo, RematchYes, Resign, TourStanding }
import lila.hub.actorApi.socket.remote.TellSriIn
import lila.hub.actorApi.tv.TvSelect
import lila.hub.AsyncActorConcMap
import lila.room.RoomSocket.{ Protocol => RP, _ }
import lila.socket.RemoteSocket.{ Protocol => P, _ }
import lila.socket.Socket.{ makeMessage, SocketVersion }
import lila.user.User
import reactivemongo.api.Cursor
final class RoundSocket(
remoteSocketApi: lila.socket.RemoteSocket,
roundDependencies: RoundAsyncActor.Dependencies,
proxyDependencies: GameProxy.Dependencies,
scheduleExpiration: ScheduleExpiration,
tournamentActor: lila.hub.actors.TournamentApi,
messenger: Messenger,
goneWeightsFor: Game => Fu[(Float, Float)],
shutdown: CoordinatedShutdown
)(implicit
ec: ExecutionContext,
system: ActorSystem
) {
import RoundSocket._
private var stopping = false
Lilakka.shutdown(shutdown, _.PhaseServiceUnbind, "Stop round socket") { () =>
stopping = true
rounds.tellAllWithAck(RoundAsyncActor.LilaStop.apply) map { nb =>
Lilakka.logger.info(s"$nb round asyncActors have stopped")
}
}
def getGame(gameId: Game.ID): Fu[Option[Game]] =
rounds.getOrMake(gameId).getGame addEffect { g =>
if (g.isEmpty) finishRound(Game.Id(gameId))
}
def getGames(gameIds: List[Game.ID]): Fu[List[(Game.ID, Option[Game])]] =
gameIds.map { id =>
rounds.getOrMake(id).getGame dmap { id -> _ }
}.sequenceFu
def gameIfPresent(gameId: Game.ID): Fu[Option[Game]] = rounds.getIfPresent(gameId).??(_.getGame)
// get the proxied version of the game
def upgradeIfPresent(game: Game): Fu[Game] =
rounds.getIfPresent(game.id).fold(fuccess(game))(_.getGame.dmap(_ | game))
// update the proxied game
def updateIfPresent(gameId: Game.ID)(f: Game => Game): Funit =
rounds.getIfPresent(gameId) ?? {
_ updateGame f
}
val rounds = new AsyncActorConcMap[RoundAsyncActor](
mkAsyncActor = id =>
makeRoundActor(id, SocketVersion(0), roundDependencies.gameRepo game id recoverDefault none),
initialCapacity = 65536
)
private def makeRoundActor(id: Game.ID, version: SocketVersion, gameFu: Fu[Option[Game]]) = {
val proxy = new GameProxy(id, proxyDependencies, gameFu)
val roundActor = new RoundAsyncActor(
dependencies = roundDependencies,
gameId = id,
socketSend = sendForGameId(id),
version = version
)(ec, proxy)
terminationDelay schedule Game.Id(id)
gameFu dforeach {
_ foreach { game =>
scheduleExpiration(game)
goneWeightsFor(game) dforeach { w =>
roundActor ! RoundAsyncActor.SetGameInfo(game, w)
}
}
}
roundActor
}
private def tellRound(gameId: Game.Id, msg: Any): Unit = rounds.tell(gameId.value, msg)
private lazy val roundHandler: Handler = {
case Protocol.In.PlayerMove(fullId, uci, blur, lag) if !stopping =>
tellRound(fullId.gameId, HumanPlay(fullId.playerId, uci, blur, lag, none))
case Protocol.In.PlayerDo(id, tpe) if !stopping =>
tpe match {
case "moretime" => tellRound(id.gameId, Moretime(id.playerId))
case "rematch-yes" => tellRound(id.gameId, RematchYes(id.playerId.value))
case "rematch-no" => tellRound(id.gameId, RematchNo(id.playerId.value))
case "takeback-yes" => tellRound(id.gameId, TakebackYes(id.playerId))
case "takeback-no" => tellRound(id.gameId, TakebackNo(id.playerId))
case "draw-yes" => tellRound(id.gameId, DrawYes(id.playerId))
case "draw-no" => tellRound(id.gameId, DrawNo(id.playerId))
case "draw-claim" => tellRound(id.gameId, DrawClaim(id.playerId))
case "resign" => tellRound(id.gameId, Resign(id.playerId.value))
case "resign-force" => tellRound(id.gameId, ResignForce(id.playerId))
case "draw-force" => tellRound(id.gameId, DrawForce(id.playerId))
case "abort" => tellRound(id.gameId, Abort(id.playerId.value))
case "outoftime" => tellRound(id.gameId, QuietFlag) // mobile app BC
case t => logger.warn(s"Unhandled round socket message: $t")
}
case Protocol.In.Flag(gameId, color, fromPlayerId) => tellRound(gameId, ClientFlag(color, fromPlayerId))
case Protocol.In.PlayerChatSay(id, Right(color), msg) =>
gameIfPresent(id.value) foreach {
_ foreach {
messenger.owner(_, color, msg).unit
}
}
case Protocol.In.PlayerChatSay(id, Left(userId), msg) =>
messenger.owner(id, userId, msg).unit
case Protocol.In.WatcherChatSay(id, userId, msg) =>
messenger.watcher(id, userId, msg).unit
case RP.In.ChatTimeout(roomId, modId, suspect, reason, text) =>
messenger.timeout(Chat.Id(s"$roomId/w"), modId, suspect, reason, text).unit
case Protocol.In.Berserk(gameId, userId) => tournamentActor ! Berserk(gameId.value, userId)
case Protocol.In.PlayerOnlines(onlines) =>
onlines foreach {
case (gameId, Some(on)) =>
tellRound(gameId, on)
terminationDelay cancel gameId
case (gameId, _) =>
if (rounds exists gameId.value) terminationDelay schedule gameId
}
case Protocol.In.Bye(fullId) => tellRound(fullId.gameId, ByePlayer(fullId.playerId))
case RP.In.TellRoomSri(_, P.In.TellSri(_, _, tpe, _)) =>
logger.warn(s"Unhandled round socket message: $tpe")
case hold: Protocol.In.HoldAlert => tellRound(hold.fullId.gameId, hold)
case r: Protocol.In.SelfReport => Bus.publish(r, "selfReport")
case P.In.TellSri(sri, userId, tpe, msg) => // eval cache
Bus.publish(TellSriIn(sri.value, userId, msg), s"remoteSocketIn:$tpe")
case RP.In.SetVersions(versions) =>
preloadRoundsWithVersions(versions)
send(Protocol.Out.versioningReady)
case P.In.Ping(id) => send(P.Out.pong(id))
case Protocol.In.WsLatency(millis) => MoveLatMonitor.wsLatency.set(millis)
case P.In.WsBoot =>
logger.warn("Remote socket boot")
// schedule termination for all game asyncActors
// until players actually reconnect
rounds foreachKey { id =>
terminationDelay schedule Game.Id(id)
}
rounds.tellAll(RoundAsyncActor.WsBoot)
}
private def finishRound(gameId: Game.Id): Unit =
rounds.terminate(gameId.value, _ ! RoundAsyncActor.Stop)
private lazy val send: Sender = remoteSocketApi.makeSender("r-out", parallelism = 8)
private lazy val sendForGameId: Game.ID => String => Unit = gameId => msg => send.sticky(gameId, msg)
remoteSocketApi.subscribeRoundRobin("r-in", Protocol.In.reader, parallelism = 8)(
roundHandler orElse remoteSocketApi.baseHandler
) >>- send(P.Out.boot)
Bus.subscribeFun("tvSelect", "roundSocket", "tourStanding", "startGame", "finishGame") {
case TvSelect(gameId, speed, json) => sendForGameId(gameId)(Protocol.Out.tvSelect(gameId, speed, json))
case Tell(gameId, e @ BotConnected(color, v)) =>
rounds.tell(gameId, e)
sendForGameId(gameId)(Protocol.Out.botConnected(gameId, color, v))
case Tell(gameId, msg) => rounds.tell(gameId, msg)
case TellIfExists(gameId, msg) => rounds.tellIfPresent(gameId, msg)
case TellMany(gameIds, msg) => rounds.tellIds(gameIds, msg)
case TellAll(msg) => rounds.tellAll(msg)
case Exists(gameId, promise) => promise success rounds.exists(gameId)
case TourStanding(tourId, json) => send(Protocol.Out.tourStanding(tourId, json))
case lila.game.actorApi.StartGame(game) if game.hasClock =>
game.userIds.some.filter(_.nonEmpty) foreach { usersPlaying =>
sendForGameId(game.id)(Protocol.Out.startGame(usersPlaying))
}
case lila.game.actorApi.FinishGame(game, _, _) if game.hasClock =>
game.userIds.some.filter(_.nonEmpty) foreach { usersPlaying =>
sendForGameId(game.id)(Protocol.Out.finishGame(game.id, game.winnerColor, usersPlaying))
}
}
{
import lila.chat.actorApi._
Bus.subscribeFun(BusChan.Round.chan, BusChan.Global.chan) {
case ChatLine(Chat.Id(id), l) =>
val line = RoundLine(l, id endsWith "/w")
rounds.tellIfPresent(if (line.watcher) id take Game.gameIdSize else id, line)
case OnTimeout(Chat.Id(id), userId) =>
send(RP.Out.tellRoom(RoomId(id take Game.gameIdSize), makeMessage("chat_timeout", userId)))
case OnReinstate(Chat.Id(id), userId) =>
send(RP.Out.tellRoom(RoomId(id take Game.gameIdSize), makeMessage("chat_reinstate", userId)))
}
}
system.scheduler.scheduleWithFixedDelay(25 seconds, tickInterval) { () =>
rounds.tellAll(RoundAsyncActor.Tick)
}
system.scheduler.scheduleWithFixedDelay(60 seconds, 60 seconds) { () =>
lila.mon.round.asyncActorCount.update(rounds.size).unit
}
private val terminationDelay = new TerminationDelay(system.scheduler, 1 minute, finishRound)
// on startup we get all ongoing game IDs and versions from lila-ws
// load them into round actors with batched DB queries
private def preloadRoundsWithVersions(rooms: Iterable[(Game.ID, SocketVersion)]) = {
val bootLog = lila log "boot"
// load all actors synchronously, giving them game futures from promises we'll fulfill later
val gamePromises: Map[Game.ID, Promise[Option[Game]]] = rooms.view.map { case (id, version) =>
val promise = Promise[Option[Game]]()
rounds.loadOrTell(
id,
load = () => makeRoundActor(id, version, promise.future),
tell = _ ! SetVersion(version)
)
id -> promise
}.toMap
// fullfill the promises with batched DB requests
rooms
.map(_._1)
.grouped(1024)
.map { ids =>
roundDependencies.gameRepo
.byIdsCursor(ids)
.foldWhile[Set[Game.ID]](Set.empty[Game.ID])(
(ids, game) =>
Cursor.Cont[Set[Game.ID]] {
gamePromises.get(game.id).foreach(_ success game.some)
ids + game.id
},
Cursor.ContOnError { (_, err) => bootLog.error("Can't load round game", err) }
)
.recover { case e: Exception =>
bootLog.error(s"RoundSocket Can't load ${ids.size} round games", e)
Set.empty
}
.chronometer
.log(bootLog)(loadedIds => s"RoundSocket Loaded ${loadedIds.size}/${ids.size} round games")
.result
}
.sequenceFu
.map(_.flatten.toSet)
.andThen {
case scala.util.Success(loadedIds) =>
val missingIds = gamePromises.keySet -- loadedIds
if (missingIds.nonEmpty) {
bootLog.warn(
s"RoundSocket ${missingIds.size} round games could not be loaded: ${missingIds.take(20) mkString " "}"
)
missingIds.foreach { id =>
gamePromises.get(id).foreach(_ success none)
}
}
case scala.util.Failure(err) =>
bootLog.error(s"RoundSocket Can't load ${gamePromises.size} round games", err)
}
.chronometer
.log(bootLog)(ids => s"RoundSocket Done loading ${ids.size}/${gamePromises.size} round games")
}
}
object RoundSocket {
val tickSeconds = 5
val tickInterval = tickSeconds.seconds
val ragequitTimeout = 10.seconds
val disconnectTimeout = 40.seconds
def povDisconnectTimeout(pov: Pov): FiniteDuration =
disconnectTimeout * {
pov.game.speed match {
case Speed.Classical => 3
case Speed.Rapid => 2
case _ => 1
}
} / {
import chess.variant._
(pov.game.chess.board.materialImbalance, pov.game.variant) match {
case (_, Antichess | Crazyhouse | Horde) => 1
case (i, _) if (pov.color.white && i <= -4) || (pov.color.black && i >= 4) => 3
case _ => 1
}
} / {
if (pov.player.hasUser) 1 else 2
}
object Protocol {
object In {
case class PlayerOnlines(onlines: Iterable[(Game.Id, Option[RoomCrowd])]) extends P.In
case class PlayerDo(fullId: FullId, tpe: String) extends P.In
case class PlayerMove(fullId: FullId, uci: Uci, blur: Boolean, lag: MoveMetrics) extends P.In
case class PlayerChatSay(gameId: Game.Id, userIdOrColor: Either[User.ID, Color], msg: String)
extends P.In
case class WatcherChatSay(gameId: Game.Id, userId: User.ID, msg: String) extends P.In
case class Bye(fullId: FullId) extends P.In
case class HoldAlert(fullId: FullId, ip: IpAddress, mean: Int, sd: Int) extends P.In
case class Flag(gameId: Game.Id, color: Color, fromPlayerId: Option[PlayerId]) extends P.In
case class Berserk(gameId: Game.Id, userId: User.ID) extends P.In
case class SelfReport(fullId: FullId, ip: IpAddress, userId: Option[User.ID], name: String) extends P.In
case class WsLatency(millis: Int) extends P.In
val reader: P.In.Reader = raw =>
raw.path match {
case "r/ons" =>
PlayerOnlines {
P.In.commas(raw.args) map {
_ splitAt Game.gameIdSize match {
case (gameId, cs) =>
(
Game.Id(gameId),
if (cs.isEmpty) None else Some(RoomCrowd(cs(0) == '+', cs(1) == '+'))
)
}
}
}.some
case "r/do" =>
raw.get(2) { case Array(fullId, payload) =>
for {
obj <- Json.parse(payload).asOpt[JsObject]
tpe <- obj str "t"
} yield PlayerDo(FullId(fullId), tpe)
}
case "r/move" =>
raw.get(5) { case Array(fullId, uciS, blurS, lagS, mtS) =>
Uci(uciS) map { uci =>
PlayerMove(FullId(fullId), uci, P.In.boolean(blurS), MoveMetrics(centis(lagS), centis(mtS)))
}
}
case "chat/say" =>
raw.get(3) { case Array(roomId, author, msg) =>
PlayerChatSay(Game.Id(roomId), readColor(author).toRight(author), msg).some
}
case "chat/say/w" =>
raw.get(3) { case Array(roomId, userId, msg) =>
WatcherChatSay(Game.Id(roomId), userId, msg).some
}
case "r/berserk" =>
raw.get(2) { case Array(gameId, userId) =>
Berserk(Game.Id(gameId), userId).some
}
case "r/bye" => Bye(Game.FullId(raw.args)).some
case "r/hold" =>
raw.get(4) { case Array(fullId, ip, meanS, sdS) =>
for {
mean <- meanS.toIntOption
sd <- sdS.toIntOption
ip <- IpAddress.from(ip)
} yield HoldAlert(FullId(fullId), ip, mean, sd)
}
case "r/report" =>
raw.get(4) { case Array(fullId, ip, user, name) =>
IpAddress.from(ip) map { ip =>
SelfReport(FullId(fullId), ip, P.In.optional(user), name)
}
}
case "r/flag" =>
raw.get(3) { case Array(gameId, color, playerId) =>
readColor(color) map {
Flag(Game.Id(gameId), _, P.In.optional(playerId) map PlayerId.apply)
}
}
case "r/latency" => raw.args.toIntOption map WsLatency
case _ => RP.In.reader(raw)
}
private def centis(s: String): Option[Centis] =
if (s == "-") none
else s.toIntOption map Centis.apply
private def readColor(s: String) =
if (s == "w") Some(White)
else if (s == "b") Some(Black)
else None
}
object Out {
def resyncPlayer(fullId: FullId) = s"r/resync/player $fullId"
def gone(fullId: FullId, gone: Boolean) = s"r/gone $fullId ${P.Out.boolean(gone)}"
def goneIn(fullId: FullId, millis: Long) = {
val seconds = Math.ceil(millis / 1000d / tickSeconds).toInt * tickSeconds
s"r/goneIn $fullId $seconds"
}
def tellVersion(roomId: RoomId, version: SocketVersion, e: Event) = {
val flags = new StringBuilder(2)
if (e.watcher) flags += 's'
else if (e.owner) flags += 'p'
else
e.only.map(_.fold('w', 'b')).orElse {
e.moveBy.map(_.fold('W', 'B'))
} foreach flags.+=
if (e.troll) flags += 't'
if (flags.isEmpty) flags += '-'
s"r/ver $roomId $version $flags ${e.typ} ${e.data}"
}
def tvSelect(gameId: Game.ID, speed: chess.Speed, data: JsObject) =
s"tv/select $gameId ${speed.id} ${Json stringify data}"
def botConnected(gameId: Game.ID, color: Color, v: Boolean) =
s"r/bot/online $gameId ${P.Out.color(color)} ${P.Out.boolean(v)}"
def tourStanding(tourId: String, data: JsValue) =
s"r/tour/standing $tourId ${Json stringify data}"
def startGame(users: List[User.ID]) = s"r/start ${P.Out.commas(users)}"
def finishGame(gameId: Game.ID, winner: Option[Color], users: List[User.ID]) =
s"r/finish $gameId ${P.Out.color(winner)} ${P.Out.commas(users)}"
def versioningReady = "r/versioning-ready"
}
}
final private class TerminationDelay(
scheduler: Scheduler,
duration: FiniteDuration,
terminate: Game.Id => Unit
)(implicit ec: scala.concurrent.ExecutionContext) {
import java.util.concurrent.ConcurrentHashMap
private[this] val terminations = new ConcurrentHashMap[String, Cancellable](65536)
def schedule(gameId: Game.Id): Unit =
terminations
.compute(
gameId.value,
(id, canc) => {
Option(canc).foreach(_.cancel())
scheduler.scheduleOnce(duration) {
terminations remove id
terminate(Game.Id(id))
}
}
)
.unit
def cancel(gameId: Game.Id): Unit =
Option(terminations remove gameId.value).foreach(_.cancel())
}
}
|
luanlv/lila
|
modules/round/src/main/RoundSocket.scala
|
Scala
|
mit
| 18,948
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.util
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.conf.Configuration
object HadoopUtil {
def newJob(): Job = {
newJob(new Configuration())
}
def newJob(config: Configuration): Job = {
val jobClass: Class[_] = Class.forName("org.apache.hadoop.mapreduce.Job")
try {
// Use the getInstance method in Hadoop 2
jobClass.getMethod("getInstance", classOf[Configuration]).invoke(null, config).asInstanceOf[Job]
} catch {
case ex: NoSuchMethodException =>
// Drop back to Hadoop 1 constructor
jobClass.getConstructor(classOf[Configuration]).newInstance(config).asInstanceOf[Job]
}
}
/**
* Create a job using either the Hadoop 1 or 2 API
* @param sc A Spark context
*/
def newJob(sc: SparkContext): Job = {
newJob(sc.hadoopConfiguration)
}
/**
* In Hadoop 2.x, isDir is deprecated in favor of isDirectory
* @param fs
* @return
*/
def isDirectory(fs: FileStatus): Boolean = {
val fsClass: Class[_] = fs.getClass
try {
// Use the isDirectory method in Hadoop 2
fsClass.getMethod("isDirectory").invoke(fs).asInstanceOf[Boolean]
} catch {
case ex: NoSuchMethodException =>
// Drop back to Hadoop 1 isDir method
fsClass.getMethod("isDir").invoke(fs).asInstanceOf[Boolean]
}
}
}
|
tomwhite/adam
|
adam-core/src/main/scala/org/bdgenomics/adam/util/HadoopUtil.scala
|
Scala
|
apache-2.0
| 2,236
|
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package index
import _root_.org.jetbrains.plugins.scala.lang.psi.impl.search.ScSourceFilterScope
import api.statements.{ScValue, ScVariable}
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.stubs.{StringStubIndexExtension, StubIndexKey}
/**
* User: Alexander Podkhalyuzin
* Date: 18.10.2008
*/
class ScVariableNameIndex extends StringStubIndexExtension[ScVariable] {
override def get(key: String, project: Project, scope: GlobalSearchScope): java.util.Collection[ScVariable] =
super.get(key, project, new ScSourceFilterScope(scope, project))
def getKey: StubIndexKey[String, ScVariable] = ScVariableNameIndex.KEY
}
object ScVariableNameIndex {
val KEY = ScalaIndexKeys.VARIABLE_NAME_KEY
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/index/ScVariableNameIndex.scala
|
Scala
|
apache-2.0
| 849
|
package org.nisshiee.toban.controller.api
import play.api._
import play.api.mvc._
import play.api.db._
import play.api.Play.current
import play.api.libs.json._, Json._
import play.api.libs.Jsonp
import org.nisshiee.toban.model._
import org.nisshiee.toban.controller.ControllerHelper
object TaskController extends Controller with ControllerHelper {
def getAll(callback: String) = ApiAction {
val tasks = DB.withTransaction { implicit c =>
Task.all
}
callback match {
case "" => Ok(toJson(tasks))
case c => Ok(Jsonp(c, toJson(tasks)))
}
}
}
|
nisshiee/to-ban
|
app/controllers/api/TaskController.scala
|
Scala
|
mit
| 581
|
package me.rjfarmer.rlh.client
import me.rjfarmer.rlh.api.CacheableResponse
/** history items have text with time information about the response */
class HistoryItem[T <: CacheableResponse] (val item: T ) extends HasResponseTimeAgo {
updateResponseTimestamp(item.timestamp)
}
/**
* Maintains result history for T
*
* Results are sorted by timestamps (DESC).
*
* @tparam T result type
*/
class History [T <: CacheableResponse] (_vector: Vector[HistoryItem[T]]){
private[this] val maxEntries = 16
private[this] var items: Vector[HistoryItem[T]] = _vector
def this() = {
this(Vector())
}
def add(item: T): HistoryItem[T] = {
val hItem = new HistoryItem(item)
val already = items.find(x => x.item.cacheKey == item.cacheKey)
already match {
case None =>
items = (items :+ hItem)
.sorted(ByTimestamp)
.take(maxEntries)
hItem
case Some(hItem) =>
hItem
}
}
def history: Vector[HistoryItem[T]] = items
object ByTimestamp extends Ordering[HasResponseTimeAgo] {
// descending!
override def compare(x: HasResponseTimeAgo, y: HasResponseTimeAgo): Int = y.timestamp.compareTo(x.timestamp)
}
}
|
random-j-farmer/little-helper
|
app/js/src/main/scala/me/rjfarmer/rlh/client/History.scala
|
Scala
|
mit
| 1,200
|
/**
* Copyright (C) 2012 Typesafe, Inc. <http://www.typesafe.com>
*/
package org.pantsbuild.zinc
import java.io.File
import sbt.{ Hash, IO }
object Util {
//
// Time
//
/**
* Current timestamp and time passed since start time.
*/
def timing(start: Long): String = {
val end = System.currentTimeMillis
"at %s [%s]" format (dateTime(end), duration(end - start))
}
/**
* Format a minutes:seconds.millis time.
*/
def duration(millis: Long): String = {
val secs = millis / 1000
val (m, s, ms) = (secs / 60, secs % 60, millis % 1000)
if (m > 0) "%d:%02d.%03ds" format (m, s, ms)
else "%d.%03ds" format (s, ms)
}
/**
* Creating a readable timestamp.
*/
def dateTime(time: Long): String = {
java.text.DateFormat.getDateTimeInstance().format(new java.util.Date(time))
}
//
// Files
//
/**
* Normalise file in relation to actual current working directory.
*/
def normalise(cwd: Option[File])(file: File): File = {
if (cwd.isDefined && !file.isAbsolute) new File(cwd.get, file.getPath) else file
}
/**
* Normalise optional file in relation to actual current working directory.
*/
def normaliseOpt(cwd: Option[File])(optFile: Option[File]): Option[File] = {
if (cwd.isDefined) optFile map normalise(cwd) else optFile
}
/**
* Normalise file pair in relation to actual current working directory.
*/
def normalisePair(cwd: Option[File])(pair: (File, File)): (File, File) = {
if (cwd.isDefined) (normalise(cwd)(pair._1), normalise(cwd)(pair._2)) else pair
}
/**
* Normalise sequence of files in relation to actual current working directory.
*/
def normaliseSeq(cwd: Option[File])(files: Seq[File]): Seq[File] = {
if (cwd.isDefined) files map normalise(cwd) else files
}
/**
* Normalise file map in relation to actual current working directory.
*/
def normaliseMap(cwd: Option[File])(mapped: Map[File, File]): Map[File, File] = {
if (cwd.isDefined) mapped map { case (l, r) => (normalise(cwd)(l), normalise(cwd)(r)) } else mapped
}
/**
* Normalise file sequence map in relation to actual current working directory.
*/
def normaliseSeqMap(cwd: Option[File])(mapped: Map[Seq[File], File]): Map[Seq[File], File] = {
if (cwd.isDefined) mapped map { case (l, r) => (normaliseSeq(cwd)(l), normalise(cwd)(r)) } else mapped
}
/**
* Fully relativize a path, relative to any other base.
*/
def relativize(base: File, path: File): String = {
import scala.tools.nsc.io.Path._
(base relativize path).toString
}
/**
* Check a file is writable.
*/
def checkWritable(file: File) = {
if (file.exists) file.canWrite else file.getParentFile.canWrite
}
/**
* Clean all class files from a directory.
*/
def cleanAllClasses(dir: File): Unit = {
import sbt.Path._
IO.delete((dir ** "*.class").get)
}
/**
* Hash of a file's canonical path.
*/
def pathHash(file: File): String = {
Hash.toHex(Hash(file.getCanonicalPath))
}
//
// Properties
//
/**
* Create int from system property.
*/
def intProperty(name: String, default: Int): Int = {
val value = System.getProperty(name)
if (value ne null) try value.toInt catch { case _: Exception => default } else default
}
/**
* Create set of strings, split by comma, from system property.
*/
def stringSetProperty(name: String, default: Set[String]): Set[String] = {
val value = System.getProperty(name)
if (value ne null) (value split ",").toSet else default
}
/**
* Create a file, default empty, from system property.
*/
def fileProperty(name: String): File = new File(System.getProperty(name, ""))
/**
* Create an option file from system property.
*/
def optFileProperty(name: String): Option[File] = Option(System.getProperty(name, null)).map(new File(_))
/**
* Get a property from a properties file resource in the classloader.
*/
def propertyFromResource(resource: String, property: String, classLoader: ClassLoader): Option[String] = {
val props = propertiesFromResource(resource, classLoader)
Option(props.getProperty(property))
}
/**
* Get all properties from a properties file resource in the classloader.
*/
def propertiesFromResource(resource: String, classLoader: ClassLoader): java.util.Properties = {
val props = new java.util.Properties
val stream = classLoader.getResourceAsStream(resource)
try { props.load(stream) }
catch { case e: Exception => }
finally { if (stream ne null) stream.close }
props
}
/**
* Set system properties.
*/
def setProperties(props: Seq[String]): Unit = {
for (prop <- props) {
val kv = prop split "="
if (kv.length == 2) System.setProperty(kv(0), kv(1))
}
}
//
// Timers
//
/**
* Simple duration regular expression.
*/
val Duration = """(\d+)([hms])""".r
/**
* Milliseconds from string duration of the form Nh|Nm|Ns, otherwise default.
*/
def duration(arg: String, default: Long): Long =
arg match {
case Duration(length, unit) =>
val multiplier = unit match {
case "h" => 60 * 60 * 1000
case "m" => 60 * 1000
case "s" => 1000
case _ => 0
}
try { length.toLong * multiplier } catch { case _: Exception => default }
case _ => default
}
/**
* Schedule a resettable timer.
*/
def timer(delay: Long)(body: => Unit) = new Alarm(delay)(body)
/**
* Resettable timer.
*/
class Alarm(delay: Long)(body: => Unit) {
import java.util.{ Timer, TimerTask }
private[this] var timer: Timer = _
private[this] var task: TimerTask = _
schedule()
private[this] def schedule(): Unit = {
if ((task eq null) && delay > 0) {
if (timer eq null) timer = new Timer(true) // daemon = true
task = new TimerTask { def run = body }
timer.schedule(task, delay)
}
}
def reset(): Unit = synchronized {
if (task ne null) { task.cancel(); task = null }
schedule()
}
def cancel(): Unit = if (timer ne null) timer.cancel()
}
//
// Debug output
//
/**
* General utility for displaying objects for debug output.
*/
def show(thing: Any, output: String => Unit, prefix: String = "", level: Int = 0): Unit = {
def out(s: String) = output((" " * level) + s)
thing match {
case (label: Any, value: Any) => show(value, output, label.toString + " = ", level)
case Some(value: Any) => show(value, output, prefix, level)
case None => out(prefix)
case t: Traversable[_] if t.isEmpty => out(prefix + "{}")
case t: Traversable[_] =>
out(prefix + "{")
t foreach { a => show(a, output, "", level + 1) }
out("}")
case any => out(prefix + any.toString)
}
}
def counted(count: Int, prefix: String, single: String, plural: String): String = {
count.toString + " " + prefix + (if (count == 1) single else plural)
}
}
|
kwlzn/pants
|
src/scala/org/pantsbuild/zinc/Util.scala
|
Scala
|
apache-2.0
| 7,058
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.top
import org.apache.ignite._
import org.apache.ignite.cluster.ClusterNode
import org.apache.ignite.internal.IgniteNodeAttributes._
import org.apache.ignite.internal.util.scala.impl
import org.apache.ignite.internal.util.typedef.X
import org.apache.ignite.internal.util.{IgniteUtils => U}
import org.apache.ignite.lang.IgnitePredicate
import org.apache.ignite.visor.VisorTag
import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable}
import org.apache.ignite.visor.visor._
import java.net.{InetAddress, UnknownHostException}
import scala.collection.JavaConversions._
import scala.language.{implicitConversions, reflectiveCalls}
import scala.util.control.Breaks._
/**
* ==Overview==
* Contains Visor command `top` implementation.
*
* ==Help==
* {{{
* +--------------------------------+
* | top | Prints current topology. |
* +--------------------------------+
* }}}
*
* ====Specification====
* {{{
* top "{-c1=e1<num> -c2=e2<num> ... -ck=ek<num>} {-h=<host1> ... -h=<hostk>} {-a}"
* }}}
*
* ====Arguments====
* {{{
* -ck=ek<num>
* This defines a mnemonic for node filter:
* -cc Number of available CPUs on the node.
* -cl Average CPU load (in %) on the node.
* -aj Active jobs on the node.
* -cj Cancelled jobs on the node.
* -tc Thread count on the node.
* -it Idle time on the node.
* Note: <num> can have 's', 'm', or 'h' suffix indicating
* seconds, minutes, and hours. By default (no suffix provided)
* value is assumed to be in milliseconds.
* -ut Up time on the node.
* Note: <num> can have 's', 'm', or 'h' suffix indicating
* seconds, minutes, and hours. By default (no suffix provided)
* value is assumed to be in milliseconds.
* -je Job execute time on the node.
* -jw Job wait time on the node.
* -wj Waiting jobs count on the node.
* -rj Rejected jobs count on the node.
* -hu Heap memory used (in MB) on the node.
* -hm Heap memory maximum (in MB) on the node.
*
* Comparison part of the mnemonic predicate:
* =eq<num> Equal '=' to '<num>' number.
* =neq<num> Not equal '!=' to '<num>' number.
* =gt<num> Greater than '>' to '<num>' number.
* =gte<num> Greater than or equal '>=' to '<num>' number.
* =lt<num> Less than '<' to '<num>' number.
* =lte<num> Less than or equal '<=' to '<num>' number.
* -h=<host>
* This defines a host to show nodes from.
* Multiple hosts can be provided.
* -a
* This defines whether to show a separate table of nodes
* with detail per-node information.
* }}}
*
* ====Examples====
* {{{
* top "-cc=eq2"
* Prints topology for all nodes with two CPUs.
* top "-cc=eq2 -a"
* Prints full information for all nodes with two CPUs.
* top "-h=10.34.2.122 -h=10.65.3.11"
* Prints topology for provided hosts.
* top
* Prints full topology.
* }}}
*/
class VisorTopologyCommand extends VisorConsoleCommand {
@impl protected val name = "top"
/**
* ===Command===
* Prints full topology.
*
* ===Examples===
* <ex>top</ex>
* Prints full topology.
*/
def top() {
top("")
}
/**
* ===Command===
* Prints topology for provided mnemonic predicate.
*
* ===Examples===
* <ex>top "-cc=eq2"</ex>
* Prints topology for all nodes with two CPUs.
*
* <ex>top "-cc=eq2 -a"</ex>
* Prints full information for all nodes with two CPUs.
*
* <ex>top "-h=10.34.2.122 -h=10.65.3.11"</ex>
* Prints topology for provided hosts.
*
* @param args Command arguments.
*/
def top(args: String) = breakable {
assert(args != null)
if (!isConnected)
adviseToConnect()
else {
val argLst = parseArgs(args)
val hosts = argLst.filter(_._1 == "h").map((a: Arg) =>
try
InetAddress.getByName(a._2).getHostAddress
catch {
case e: UnknownHostException => scold("Unknown host: " + a._2).^^
"" // Never happens.
}
).filter(!_.isEmpty).toSet
val all = hasArgFlag("a", argLst)
var f: NodeFilter = (ClusterNode) => true
try {
argLst foreach (arg => {
val (n, v) = arg
n match {
case "cc" if v != null => f = make(v, f, _.metrics.getTotalCpus)
case "cl" if v != null => f = make(v, f, (n: ClusterNode) =>
(n.metrics.getCurrentCpuLoad * 100).toLong)
case "aj" if v != null => f = make(v, f, _.metrics.getCurrentActiveJobs)
case "cj" if v != null => f = make(v, f, _.metrics.getCurrentCancelledJobs)
case "tc" if v != null => f = make(v, f, _.metrics.getCurrentThreadCount)
case "ut" if v != null => f = make(v, f, _.metrics.getUpTime)
case "je" if v != null => f = make(v, f, _.metrics.getCurrentJobExecuteTime)
case "jw" if v != null => f = make(v, f, _.metrics.getCurrentJobWaitTime)
case "wj" if v != null => f = make(v, f, _.metrics.getCurrentWaitingJobs)
case "rj" if v != null => f = make(v, f, _.metrics.getCurrentRejectedJobs)
case "hu" if v != null => f = make(v, f, _.metrics.getHeapMemoryUsed)
case "hm" if v != null => f = make(v, f, _.metrics.getHeapMemoryMaximum)
case _ => ()
}
})
show(n => f(n), hosts, all)
}
catch {
case e: NumberFormatException => scold(e)
case e: IgniteException => scold(e)
}
}
}
/**
* @param exprStr Expression string.
* @param f Node filter
* @param v Value generator.
*/
private def make(exprStr: String, f: NodeFilter, v: ClusterNode => Long): NodeFilter = {
assert(exprStr != null)
assert(f != null)
assert(v != null)
val expr = makeExpression(exprStr)
// Note that if 'f(n)' is false - 'value' won't be evaluated.
if (expr.isDefined)
(n: ClusterNode) => f(n) && expr.get.apply(v(n))
else
throw new IgniteException("Invalid expression: " + exprStr)
}
/**
* Prints topology.
*
* @param f Node filtering predicate.
* @param hosts Set of hosts to take nodes from.
* @param all Whether to show full information.
*/
private def show(f: NodeFilter, hosts: Set[String], all: Boolean) = breakable {
assert(f != null)
assert(hosts != null)
var nodes = ignite.cluster.forPredicate(new IgnitePredicate[ClusterNode] {
override def apply(e: ClusterNode) = f(e)
}).nodes()
if (hosts.nonEmpty)
nodes = nodes.filter(n => n.addresses.toSet.intersect(hosts).nonEmpty)
if (nodes.isEmpty)
println("Empty topology.").^^
if (all) {
val nodesT = VisorTextTable()
nodesT #= ("Node ID8(@), IP", "Start Time", "Up Time",
//"Idle Time",
"CPUs", "CPU Load", "Free Heap")
nodes foreach ((n: ClusterNode) => {
val m = n.metrics
val usdMem = m.getHeapMemoryUsed
val maxMem = m.getHeapMemoryMaximum
val freeHeapPct = (maxMem - usdMem) * 100 / maxMem
val cpuLoadPct = m.getCurrentCpuLoad * 100
// Add row.
nodesT += (
nodeId8Addr(n.id),
formatDateTime(m.getStartTime),
X.timeSpan2HMS(m.getUpTime),
m.getTotalCpus,
safePercent(cpuLoadPct),
formatDouble(freeHeapPct) + " %"
)
})
println("Nodes: " + nodes.size)
nodesT.render()
nl()
}
val neighborhood = U.neighborhood(nodes)
val hostsT = VisorTextTable()
hostsT #= ("Int./Ext. IPs", "Node ID8(@)","Node Type", "OS", "CPUs", "MACs", "CPU Load")
neighborhood.foreach {
case (_, neighbors) =>
var ips = Set.empty[String]
var id8s = List.empty[String]
var nodeTypes = List.empty[String]
var macs = Set.empty[String]
var cpuLoadSum = 0.0
val n1 = neighbors.head
assert(n1 != null)
val cpus = n1.metrics.getTotalCpus
val os = "" +
n1.attribute("os.name") + " " +
n1.attribute("os.arch") + " " +
n1.attribute("os.version")
var i = 1
neighbors.foreach(n => {
id8s = id8s :+ (i.toString + ": " + nodeId8(n.id))
nodeTypes = nodeTypes :+ (if (n.isClient) "Client" else "Server")
i += 1
ips = ips ++ n.addresses()
cpuLoadSum += n.metrics().getCurrentCpuLoad
macs = macs ++ n.attribute[String](ATTR_MACS).split(", ").map(_.grouped(2).mkString(":"))
})
// Add row.
hostsT += (
ips.toSeq,
id8s,
nodeTypes,
os,
cpus,
macs.toSeq,
safePercent(cpuLoadSum / neighbors.size() * 100)
)
}
println("Hosts: " + neighborhood.size)
hostsT.render()
nl()
val m = ignite.cluster.forNodes(nodes).metrics()
val freeHeap = (m.getHeapMemoryTotal - m.getHeapMemoryUsed) * 100 / m.getHeapMemoryTotal
val sumT = VisorTextTable()
sumT += ("Total hosts", U.neighborhood(nodes).size)
sumT += ("Total nodes", m.getTotalNodes)
sumT += ("Total CPUs", m.getTotalCpus)
sumT += ("Avg. CPU load", safePercent(m.getAverageCpuLoad * 100))
sumT += ("Avg. free heap", formatDouble(freeHeap) + " %")
sumT += ("Avg. Up time", X.timeSpan2HMS(m.getUpTime))
sumT += ("Snapshot time", formatDateTime(System.currentTimeMillis))
println("Summary:")
sumT.render()
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorTopologyCommand {
/** Singleton command. */
private val cmd = new VisorTopologyCommand
// Adds command's help to visor.
addHelp(
name = "top",
shortInfo = "Prints current topology.",
spec = List(
"top {-c1=e1<num> -c2=e2<num> ... -ck=ek<num>} {-h=<host1> ... -h=<hostk>} {-a}"
),
args = List(
"-ck=ek<num>" -> List(
"This defines a mnemonic for node filter:",
" -cc Number of available CPUs on the node.",
" -cl Average CPU load (in %) on the node.",
" -aj Active jobs on the node.",
" -cj Cancelled jobs on the node.",
" -tc Thread count on the node.",
// " -it Idle time on the node.",
// " Note: <num> can have 's', 'm', or 'h' suffix indicating",
// " seconds, minutes, and hours. By default (no suffix provided)",
// " value is assumed to be in milliseconds.",
" -ut Up time on the node.",
" Note: <num> can have 's', 'm', or 'h' suffix indicating",
" seconds, minutes, and hours. By default (no suffix provided)",
" value is assumed to be in milliseconds.",
" -je Job execute time on the node.",
" -jw Job wait time on the node.",
" -wj Waiting jobs count on the node.",
" -rj Rejected jobs count on the node.",
" -hu Heap memory used (in MB) on the node.",
" -hm Heap memory maximum (in MB) on the node.",
"",
"Comparison part of the mnemonic predicate:",
" =eq<num> Equal '=' to '<num>' number.",
" =neq<num> Not equal '!=' to '<num>' number.",
" =gt<num> Greater than '>' to '<num>' number.",
" =gte<num> Greater than or equal '>=' to '<num>' number.",
" =lt<num> Less than '<' to '<num>' number.",
" =lte<num> Less than or equal '<=' to '<num>' number."
),
"-h=<host>" -> List(
"This defines a host to show nodes from.",
"Multiple hosts can be provided."
),
"-a" -> List(
"This defines whether to show a separate table of nodes",
"with detail per-node information."
)
),
examples = List(
"top -cc=eq2" ->
"Prints topology for all nodes with two CPUs.",
"top -cc=eq2 -a" ->
"Prints full information for all nodes with two CPUs.",
"top -h=10.34.2.122 -h=10.65.3.11" ->
"Prints topology for provided hosts.",
"top" ->
"Prints full topology."
),
emptyArgs = cmd.top,
withArgs = cmd.top
)
/**
* Singleton.
*/
def apply() = cmd
/**
* Implicit converter from visor to commands "pimp".
*
* @param vs Visor tagging trait.
*/
implicit def fromTop2Visor(vs: VisorTag): VisorTopologyCommand = cmd
}
|
tkpanther/ignite
|
modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/top/VisorTopologyCommand.scala
|
Scala
|
apache-2.0
| 14,991
|
import scala.quoted.*
object Macro {
inline def foo: Unit = ${ fooImpl }
def fooImpl(using Quotes): Expr[Unit] = '{}
}
|
lampepfl/dotty
|
tests/run-macros/i4515b/Macro_1.scala
|
Scala
|
apache-2.0
| 124
|
package com.github.ghik.silencer
import scala.tools.nsc.Settings
object CrossTestUtils {
def enableUnusedImports(settings: Settings): Unit = {
settings.warnUnused.enable(settings.UnusedWarnings.Imports)
}
}
|
ghik/silencer
|
silencer-plugin/src/test/scala-2.12-13/com/github/ghik/silencer/CrossTestUtils.scala
|
Scala
|
apache-2.0
| 217
|
/*
* Copyright 2008 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package org.wso2.as
package snippet
import scala.xml.{NodeSeq,Text}
import net.liftweb._
import http._
import S._
import common._
import util._
import Helpers._
import javax.persistence.{EntityExistsException,PersistenceException}
import org.wso2.as.model._
import Model._
class AuthorOps extends Loggable {
def list (xhtml : NodeSeq) : NodeSeq = {
val authors = Model.createNamedQuery[Author]("findAllAuthors").getResultList()
authors.flatMap(author =>
bind("author", xhtml,
"name" -> Text(author.name),
"count" -> SHtml.link("/books/search.html", {() =>
BookOps.resultVar(Model.createNamedQuery[Book]("findBooksByAuthor", "id" ->author.id).getResultList().toList)
}, Text(author.books.size().toString)),
"edit" -> SHtml.link("add.html", () => authorVar(author), Text(?("Edit")))))
}
// Set up a requestVar to track the author object for edits and adds
object authorVar extends RequestVar(new Author())
def author = authorVar.is
def add (xhtml : NodeSeq) : NodeSeq = {
def doAdd () = {
if (author.name.length == 0) {
error("emptyAuthor", "The author's name cannot be blank")
} else {
try {
Model.mergeAndFlush(author)
redirectTo("list.html")
} catch {
case ee : EntityExistsException => error("Author already exists")
case pe : PersistenceException => error("Error adding author"); logger.error("Error adding author", pe)
}
}
}
// Hold a val here so that the "id" closure holds it when we re-enter this method
val currentId = author.id
bind("author", xhtml,
"id" -> SHtml.hidden(() => author.id = currentId),
"name" -> SHtml.text(author.name, author.name = _),
"submit" -> SHtml.submit(?("Save"), doAdd))
}
}
|
wso2as-developer/scala-samples
|
lift-jpa/web/src/main/scala/org/wso2/as/snippet/Author.scala
|
Scala
|
apache-2.0
| 2,428
|
package ee.cone.c4actor
import ee.cone.c4assemble.ToPrimaryKey
trait ModelAccessFactoryApp {
def modelAccessFactory: ModelAccessFactory = modelAccessFactoryImpl
private lazy val modelAccessFactoryImpl = ModelAccessFactoryImpl
}
object ModelAccessFactoryImpl extends ModelAccessFactory {
def to[P <: Product](product: P): Option[Access[P]] = {
val name = product.getClass.getName
val lens = TxProtoLens[P](product)
Option(AccessImpl(product,Option(lens),NameMetaAttr(name) :: Nil))
}
}
case class AccessImpl[P](
initialValue: P, updatingLens: Option[Lens[Context, P] with Product], metaList: List[AbstractMetaAttr]
) extends Access[P] {
def to[V](inner: ProdLens[P,V]): Access[V] = {
val rValue = inner.of(initialValue)
val rLens = updatingLens.map(l⇒ComposedLens(l,inner))
val rMeta = metaList ::: inner.metaList
AccessImpl[V](rValue,rLens,rMeta)
}
def zoom: Access[P] = AccessImpl[P](initialValue,
MakeTxProtoLens(initialValue),
metaList)
}
case class ComposedLens[C,T,I](
outer: Lens[C,T] with Product, inner: Lens[T,I] with Product
) extends AbstractLens[C,I] {
def set: I ⇒ C ⇒ C = item ⇒ outer.modify(inner.set(item))
def of: C ⇒ I = container ⇒ inner.of(outer.of(container))
}
case object MakeTxProtoLens {
def apply[P](initialValue: P): Option[Lens[Context, P] with Product] =
initialValue match {
case a:Product ⇒ Option(TxProtoLens(a)).asInstanceOf[Option[Lens[Context, P] with Product]]
case _ ⇒ None
}
}
case class TxProtoLens[V<:Product](initialValue: V) extends AbstractLens[Context,V] {
private def className = initialValue.getClass.getName
private def srcId = ToPrimaryKey(initialValue)
private def key = ByPrimaryKeyGetter(className)
def of: Context ⇒ V = local ⇒ key.of(local).getOrElse(srcId,initialValue)
def set: V ⇒ Context ⇒ Context = value ⇒ local ⇒ {
if(initialValue != of(local)) throw new Exception(s"'$initialValue' != '${of(local)}'")
val eventsC = List(UpdateLEvent(srcId, className, value))
val eventsA = LEvent.update(value)
if(eventsC != eventsA) throw new Exception(s"'$eventsC' != '$eventsA'")
TxAdd(eventsC)(local)
}
}
|
wregs/c4proto
|
c4actor-base/src/main/scala/ee/cone/c4actor/AccessImpl.scala
|
Scala
|
apache-2.0
| 2,203
|
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.exec
// Put this file in Hive's exec package to access package level visible fields and methods.
import java.util.{ArrayList => JArrayList, HashMap => JHashMap}
import scala.collection.immutable.BitSet.BitSet1
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.BeanProperty
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.abm.udaf.{GenericUDAFEvaluatorWithInstruction, Instruction}
import org.apache.hadoop.hive.ql.plan.AggregationDesc
import org.apache.hadoop.hive.ql.plan.{ExprNodeConstantDesc, ExprNodeDesc}
import org.apache.hadoop.hive.ql.plan.GroupByDesc
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory,
ObjectInspectorUtils, StructObjectInspector}
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import shark.execution.UnaryOperator
/**
* The pre-shuffle group by operator responsible for map side aggregations.
*/
class GroupByPreShuffleOperator extends UnaryOperator[GroupByDesc] {
@BeanProperty var conf: GroupByDesc = _
@BeanProperty var minReductionHashAggr: Float = _
@BeanProperty var numRowsCompareHashAggr: Int = _
@transient var keyFactory: KeyWrapperFactory = _
@transient var rowInspector: ObjectInspector = _
// The aggregation functions.
@transient var aggregationEvals: Array[GenericUDAFEvaluator] = _
@transient var aggregationObjectInspectors: Array[ObjectInspector] = _
// Key fields to be grouped.
@transient var keyFields: Array[ExprNodeEvaluator] = _
// A struct object inspector composing of all the fields.
@transient var keyObjectInspector: StructObjectInspector = _
@transient var aggregationParameterFields: Array[Array[ExprNodeEvaluator]] = _
@transient var aggregationParameterObjectInspectors: Array[Array[ObjectInspector]] = _
@transient var aggregationParameterStandardObjectInspectors: Array[Array[ObjectInspector]] = _
@transient var aggregationIsDistinct: Array[Boolean] = _
@transient var currentKeyObjectInspectors: Array[ObjectInspector] = _
// Grouping set related properties.
@transient var groupingSetsPresent: Boolean = _
@transient var groupingSets: java.util.List[java.lang.Integer] = _
@transient var groupingSetsPosition: Int = _
@transient var newKeysGroupingSets: Array[Object] = _
@transient var groupingSetsBitSet: Array[BitSet1] = _
@transient var cloneNewKeysArray: Array[Object] = _
def createLocals() {
aggregationEvals = conf.getAggregators.map(_.getGenericUDAFEvaluator).toArray
aggregationIsDistinct = conf.getAggregators.map(_.getDistinct).toArray
rowInspector = objectInspector.asInstanceOf[StructObjectInspector]
keyFields = conf.getKeys().map(k => ExprNodeEvaluatorFactory.get(k)).toArray
val keyObjectInspectors: Array[ObjectInspector] = keyFields.map(k => k.initialize(rowInspector))
currentKeyObjectInspectors = keyObjectInspectors.map { k =>
ObjectInspectorUtils.getStandardObjectInspector(k, ObjectInspectorCopyOption.WRITABLE)
}
aggregationParameterFields = conf.getAggregators.toArray.map { aggr =>
aggr.asInstanceOf[AggregationDesc].getParameters.toArray.map { param =>
ExprNodeEvaluatorFactory.get(param.asInstanceOf[ExprNodeDesc])
}
}
aggregationParameterObjectInspectors = aggregationParameterFields.map { aggr =>
aggr.map { param => param.initialize(rowInspector) }
}
aggregationParameterStandardObjectInspectors = aggregationParameterObjectInspectors.map { ois =>
ois.map { oi =>
ObjectInspectorUtils.getStandardObjectInspector(oi, ObjectInspectorCopyOption.WRITABLE)
}
}
aggregationEvals.zipWithIndex.map { pair =>
pair._1.init(conf.getAggregators.get(pair._2).getMode,
aggregationParameterObjectInspectors(pair._2))
}
aggregationObjectInspectors =
Array.tabulate[ObjectInspector](aggregationEvals.length) { i=>
val mode = conf.getAggregators()(i).getMode()
aggregationEvals(i).init(mode, aggregationParameterObjectInspectors(i))
}
val keyFieldNames = conf.getOutputColumnNames.slice(0, keyFields.length)
val totalFields = keyFields.length + aggregationEvals.length
val keyois = new JArrayList[ObjectInspector](totalFields)
keyObjectInspectors.foreach(keyois.add(_))
keyObjectInspector = ObjectInspectorFactory.
getStandardStructObjectInspector(keyFieldNames, keyois)
keyFactory = new KeyWrapperFactory(keyFields, keyObjectInspectors, currentKeyObjectInspectors)
// Initializations for grouping set.
groupingSetsPresent = conf.isGroupingSetsPresent()
if (groupingSetsPresent) {
groupingSets = conf.getListGroupingSets()
groupingSetsPosition = conf.getGroupingSetPosition()
newKeysGroupingSets = new Array[Object](groupingSets.size)
groupingSetsBitSet = new Array[BitSet1](groupingSets.size)
cloneNewKeysArray = new Array[Object](groupingSets.size)
groupingSets.zipWithIndex.foreach { case(groupingSet, i) =>
val groupingSetValueEvaluator: ExprNodeEvaluator =
ExprNodeEvaluatorFactory.get(new ExprNodeConstantDesc(String.valueOf(groupingSet)));
newKeysGroupingSets(i) = groupingSetValueEvaluator.evaluate(null)
groupingSetsBitSet(i) = new BitSet1(groupingSet.longValue())
}
}
// ABM: give all aggregate evaluators the same instruction
if (conf.isUncertain) {
val instruction: Instruction = new Instruction
aggregationEvals.foreach(_.asInstanceOf[GenericUDAFEvaluatorWithInstruction].setInstruction(instruction))
}
}
protected final def getNewKeysIterator (newKeysArray: Array[Object]): Iterator[Unit] = {
// This iterator abstracts the operation that gets an array of groupby keys for the next
// grouping set of the grouping sets and makes such logic re-usable in several places.
//
// Initially, newKeysArray is an array containing all groupby keys for the superset of
// grouping sets. next() method updates newKeysArray to be an array of groupby keys for
// the next grouping set.
new Iterator[Unit]() {
Array.copy(newKeysArray, 0, cloneNewKeysArray, 0, groupingSetsPosition)
var groupingSetIndex = 0
override def hasNext: Boolean = groupingSetIndex < groupingSets.size
// Update newKeys according to the current grouping set.
override def next(): Unit = {
for (i <- 0 until groupingSetsPosition) {
newKeysArray(i) = null
}
groupingSetsBitSet(groupingSetIndex).foreach {keyIndex =>
newKeysArray(keyIndex) = cloneNewKeysArray(keyIndex)
}
newKeysArray(groupingSetsPosition) = newKeysGroupingSets(groupingSetIndex)
groupingSetIndex += 1
}
}
}
def createRemotes() {
conf = desc
minReductionHashAggr = hconf.get(HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION.varname).toFloat
numRowsCompareHashAggr = hconf.get(HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL.varname).toInt
}
override def initializeOnMaster() {
super.initializeOnMaster()
createRemotes()
createLocals()
}
override def initializeOnSlave() {
super.initializeOnSlave()
createLocals()
}
// copied from the org.apache.hadoop.hive.ql.exec.GroupByOperator
override def outputObjectInspector() = {
val totalFields = keyFields.length + aggregationEvals.length
val ois = new ArrayBuffer[ObjectInspector](totalFields)
ois ++= (currentKeyObjectInspectors)
ois ++= (aggregationObjectInspectors)
val fieldNames = conf.getOutputColumnNames()
import scala.collection.JavaConversions._
ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, ois.toList)
}
override def processPartition(split: Int, iter: Iterator[_]) = {
logDebug("Running Pre-Shuffle Group-By")
var numRowsInput = 0
var numRowsHashTbl = 0
var useHashAggr = true
// Do aggregation on map side using hashAggregations hash table.
val hashAggregations = new JHashMap[KeyWrapper, Array[AggregationBuffer]]()
val newKeys: KeyWrapper = keyFactory.getKeyWrapper()
while (iter.hasNext && useHashAggr) {
val row = iter.next().asInstanceOf[AnyRef]
numRowsInput += 1
newKeys.getNewKey(row, rowInspector)
val newKeysIter =
if (groupingSetsPresent) getNewKeysIterator(newKeys.getKeyArray) else null
do {
if (groupingSetsPresent) {
newKeysIter.next
}
newKeys.setHashKey()
var aggs = hashAggregations.get(newKeys)
var isNewKey = false
if (aggs == null) {
isNewKey = true
val newKeyProber = newKeys.copyKey()
aggs = newAggregations()
hashAggregations.put(newKeyProber, aggs)
numRowsHashTbl += 1
}
if (isNewKey) {
aggregateNewKey(row, aggs)
} else {
aggregateExistingKey(row, aggs)
}
} while (groupingSetsPresent && newKeysIter.hasNext)
// Disable partial hash-based aggregation if desired minimum reduction is
// not observed after initial interval.
if (numRowsInput == numRowsCompareHashAggr) {
if (numRowsHashTbl > numRowsInput * minReductionHashAggr) {
useHashAggr = false
logInfo("Mapside hash aggregation disabled")
} else {
logInfo("Mapside hash aggregation enabled")
}
logInfo("#hash table=" + numRowsHashTbl + " #rows=" +
numRowsInput + " reduction=" + numRowsHashTbl.toFloat/numRowsInput +
" minReduction=" + minReductionHashAggr)
}
}
// Generate an iterator for the aggregation output from hashAggregations.
val outputCache = new Array[Object](keyFields.length + aggregationEvals.length)
hashAggregations.toIterator.map { case(key, aggrs) =>
val keyArr = key.getKeyArray()
var i = 0
while (i < keyArr.length) {
outputCache(i) = keyArr(i)
i += 1
}
i = 0
while (i < aggrs.length) {
outputCache(i + keyArr.length) = aggregationEvals(i).evaluate(aggrs(i))
i += 1
}
outputCache
} ++ {
// Concatenate with iterator for remaining rows not in hashAggregations.
val newIter = iter.map { case row: AnyRef =>
newKeys.getNewKey(row, rowInspector)
val newAggrKey = newKeys.copyKey()
val aggrs = newAggregations()
aggregateNewKey(row, aggrs)
val keyArr = newAggrKey.getKeyArray()
var i = 0
while (i < keyArr.length) {
outputCache(i) = keyArr(i)
i += 1
}
i = 0
while (i < aggrs.length) {
outputCache(i + keyArr.length) = aggregationEvals(i).evaluate(aggrs(i))
i += 1
}
outputCache
}
if (groupingSetsPresent) {
val outputBuffer = new Array[Array[Object]](groupingSets.size)
newIter.flatMap { row: Array[Object] =>
val newKeysIter = getNewKeysIterator(row)
var i = 0
while (newKeysIter.hasNext) {
newKeysIter.next
outputBuffer(i) = row.clone()
i += 1
}
outputBuffer
}
} else {
newIter
}
}
}
@inline protected final
def aggregateNewKey(row: Object, aggregations: Array[AggregationBuffer]) {
// ABM: evaluate the last one first, as it generates the instruction needed by others
var i = aggregations.length - 1
while (i >= 0) {
aggregationEvals(i).aggregate(
aggregations(i), aggregationParameterFields(i).map(_.evaluate(row)))
i -= 1
}
}
@inline protected final
def aggregateExistingKey(row: AnyRef, aggregations: Array[AggregationBuffer]) {
// ABM: evaluate the last one first, as it generates the instruction needed by others
var i = aggregations.length - 1
while (i >= 0) {
if (!aggregationIsDistinct(i)) {
aggregationEvals(i).aggregate(
aggregations(i), aggregationParameterFields(i).map(_.evaluate(row)))
}
i -= 1
}
}
protected def newAggregations(): Array[AggregationBuffer] = {
aggregationEvals.map(eval => eval.getNewAggregationBuffer)
}
}
|
uclaabs/abs
|
src/main/scala/shark/execution/GroupByPreShuffleOperator.scala
|
Scala
|
apache-2.0
| 13,150
|
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.tuplejump.calliope.thrift
import org.apache.cassandra.hadoop.ColumnFamilyInputFormat
import scala.collection.JavaConversions._
import java.text.SimpleDateFormat
import java.util.Date
import com.tuplejump.calliope.ThriftCasBuilder
import org.apache.spark._
import org.apache.spark.rdd.RDD
import com.tuplejump.calliope.utils.{SparkHadoopMapReduceUtil, CassandraPartition}
import com.tuplejump.calliope.Types.{ThriftRowMap, ThriftRowKey}
import org.apache.hadoop.mapreduce.{InputSplit, TaskAttemptID, JobID}
import scala.reflect.ClassTag
class ThriftCassandraRDD[T: ClassTag](sc: SparkContext,
@transient cas: ThriftCasBuilder,
unmarshaller: (ThriftRowKey, ThriftRowMap) => T)
extends RDD[T](sc, Nil)
with SparkHadoopMapReduceUtil
with Logging {
// A Hadoop Configuration can be about 10 KB, which is pretty big, so broadcast it
@transient private val haadoopConf = cas.configuration
private val confBroadcast = sc.broadcast(new SerializableWritable(haadoopConf))
@transient val jobId = new JobID(System.currentTimeMillis().toString, id)
private val jobtrackerId: String = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
formatter.format(new Date())
}
def compute(theSplit: Partition, context: TaskContext): Iterator[T] = new Iterator[T] {
val conf = confBroadcast.value.value
val format = new ColumnFamilyInputFormat
val split = theSplit.asInstanceOf[CassandraPartition]
//Set configuration
val attemptId = new TaskAttemptID(jobtrackerId, id, true, split.index, 0)
val hadoopAttemptContext = newTaskAttemptContext(conf, attemptId)
val reader = format.createRecordReader(
split.inputSplit.value, hadoopAttemptContext)
reader.initialize(split.inputSplit.value, hadoopAttemptContext)
context.addOnCompleteCallback(() => close())
var havePair = false
var finished = false
override def hasNext: Boolean = {
if (!finished && !havePair) {
finished = !reader.nextKeyValue
havePair = !finished
}
!finished
}
override def next: T = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
havePair = false
val rowAsMap = reader.getCurrentValue.map {
case (name, column) => column.name() -> column.value()
}.toMap
unmarshaller(reader.getCurrentKey, rowAsMap)
//return (keyUnmarshaller(reader.getCurrentKey), rowUnmarshaller(rowAsMap))
}
private def close() {
try {
reader.close()
} catch {
case e: Exception => logWarning("Exception in RecordReader.close()", e)
}
}
}
def getPartitions: Array[Partition] = {
val jc = newJobContext(haadoopConf, jobId)
val inputFormat = new ColumnFamilyInputFormat()
val rawSplits = inputFormat.getSplits(jc).toArray
val result = new Array[Partition](rawSplits.size)
for (i <- 0 until rawSplits.size) {
result(i) = new CassandraPartition(id, i, rawSplits(i).asInstanceOf[InputSplit])
}
result
}
override protected[calliope] def getPreferredLocations(split: Partition): Seq[String] = {
split.asInstanceOf[CassandraPartition].s.getLocations
}
}
|
brenttheisen/calliope
|
src/main/scala/com/tuplejump/calliope/thrift/ThriftCassandraRDD.scala
|
Scala
|
apache-2.0
| 4,099
|
package lila.event
import play.api.Configuration
import com.softwaremill.macwire._
import lila.common.config.CollName
import lila.common.config._
final class Env(
appConfig: Configuration,
db: lila.db.Db,
cacheApi: lila.memo.CacheApi
)(implicit ec: scala.concurrent.ExecutionContext) {
private lazy val eventColl = db(appConfig.get[CollName]("event.collection.event"))
lazy val api = wire[EventApi]
}
|
luanlv/lila
|
modules/event/src/main/Env.scala
|
Scala
|
mit
| 422
|
package org.orbeon.oxf.portlet.liferay
import org.orbeon.oxf.portlet.{RequestPrependHeaders, RequestRemoveHeaders}
import org.orbeon.oxf.util.CollectionUtils
import javax.portlet.filter._
import javax.portlet._
class AddLiferayUserHeadersFilter
extends PortletFilter
with RenderFilter
with ActionFilter
with ResourceFilter
with EventFilter {
import AddLiferayUserHeadersFilter._
def doFilter(req: RenderRequest, res: RenderResponse, chain: FilterChain): Unit =
chain.doFilter(amendRequestWithUser(req)(wrapWithLiferayUserHeaders), res)
def doFilter(req: ActionRequest, res: ActionResponse, chain: FilterChain): Unit =
chain.doFilter(amendRequestWithUser(req)(wrapWithLiferayUserHeaders), res)
def doFilter(req: ResourceRequest, res: ResourceResponse, chain: FilterChain): Unit =
chain.doFilter(amendRequestWithUser(req)(wrapWithLiferayUserHeaders), res)
def doFilter(req: EventRequest, res: EventResponse, chain: FilterChain): Unit =
chain.doFilter(amendRequestWithUser(req)(wrapWithLiferayUserHeaders), res)
def init(filterConfig: filter.FilterConfig) = ()
def destroy() = ()
}
object AddLiferayUserHeadersFilter {
// NOTE: request.getRemoteUser() can be configured in liferay-portlet.xml with user-principal-strategy to either
// userId (a number) or screenName (a string). It seems more reliable to use the API below to obtain the user.
def amendRequestWithUser[T <: PortletRequest](req: T)(amend: (T, LiferayUser) => T): T =
LiferaySupport.getLiferayUser(req) match {
case Some(user) => amend(req, user)
case None => req
}
def wrapWithLiferayUserHeaders[T <: PortletRequest](req: T, user: LiferayUser): T = {
val liferayUserHeaders =
CollectionUtils.combineValues[String, String, Array](user.userHeaders) map
{ case (name, value) => name.toLowerCase -> value } toMap
wrap(req, LiferaySupport.AllHeaderNamesLower, liferayUserHeaders)
}
def wrap[T <: PortletRequest](req: T, remove: Set[String], prepend: Map[String, Array[String]]): T = {
trait CustomProperties extends RequestRemoveHeaders with RequestPrependHeaders {
val headersToRemove = remove
val headersToPrepend = prepend
}
req match {
case r: RenderRequest => new RenderRequestWrapper(r) with CustomProperties
case r: ActionRequest => new ActionRequestWrapper(r) with CustomProperties
case r: ResourceRequest => new ResourceRequestWrapper(r) with CustomProperties
case r: EventRequest => new EventRequestWrapper(r) with CustomProperties
case r: PortletRequest => new PortletRequestWrapper(r) with CustomProperties
}
}.asInstanceOf[T] // We can prove that the types work out for us ;)
}
|
orbeon/orbeon-forms
|
portlet-support/src/main/scala/org/orbeon/oxf/portlet/liferay/AddLiferayUserHeadersFilter.scala
|
Scala
|
lgpl-2.1
| 2,757
|
package com.programmaticallyspeaking.ncd.testing
import java.nio.charset.Charset
import java.util.Base64
object StringUtils {
private val encoder = Base64.getEncoder
private val decoder = Base64.getDecoder
private val utf8 = Charset.forName("utf-8")
def toBase64(s: String): String = encoder.encodeToString(s.getBytes(utf8))
def fromBase64(s: String): String = new String(decoder.decode(s), utf8)
}
|
provegard/ncdbg
|
src/test/scala/com/programmaticallyspeaking/ncd/testing/StringUtils.scala
|
Scala
|
bsd-3-clause
| 412
|
package org.jetbrains.plugins.scala
package refactoring.changeSignature
import com.intellij.psi.PsiMember
import com.intellij.refactoring.changeSignature.{ChangeSignatureProcessorBase, ParameterInfo}
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.{ScalaChangeSignatureHandler, ScalaParameterInfo}
import org.junit.Assert._
/**
* Nikolay.Tropin
* 2014-09-11
*/
class ChangeSignatureInScalaTest extends ChangeSignatureTestBase {
override def findTargetElement: PsiMember = {
val element = new ScalaChangeSignatureHandler().findTargetMember(getFileAdapter, getEditorAdapter)
assertTrue("<caret> is not on method name", element.isInstanceOf[ScMethodLike])
element.asInstanceOf[ScMethodLike]
}
override def folderPath: String = baseRootPath() + "changeSignature/inScala/"
override def processor(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]]): ChangeSignatureProcessorBase = {
scalaProcessor(newVisibility, newName, newReturnType, newParams, isAddDefaultValue)
}
override def mainFileName(testName: String): String = testName + ".scala"
override def mainFileAfterName(testName: String): String = testName + "_after.scala"
override def secondFileName(testName: String): String = null
override def secondFileAfterName(testName: String): String = null
private def parameterInfo(name: String, oldIdx: Int, tpe: ScType, defVal: String = "", isRep: Boolean = false, isByName: Boolean = false) = {
new ScalaParameterInfo(name, oldIdx, tpe, getProjectAdapter, isRep, isByName, defVal)
}
def testVisibility(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", -1, Int, "1"))
doTest("protected", "foo", null, Seq(params))
}
def testAddRepeatedParam(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("b", 1, Boolean),
parameterInfo("xs", -1, Int, isRep = true, defVal = "1"))
doTest(null, "foo", null, Seq(params))
}
def testAddRepeatedWithoutDefault(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("xs", -1, Int, isRep = true))
doTest(null, "foo", null, Seq(params))
}
def testMakeRepeatedParam(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("b", 1, Boolean, isRep = true))
doTest(null, "foo", null, Seq(params))
}
def testRemoveRepeatedParam(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("b", 1, Boolean))
doTest(null, "foo", null, Seq(params))
}
def testNoDefaultArg(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("j", -1, Int))
doTest(null, "foo", null, Seq(params))
}
def testNoDefaultArg2(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("j", -1, Int))
doTest(null, "foo", null, Seq(params))
}
def testAnonFunWithDefaultArg(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("j", -1, Int, "0"))
doTest(null, "foo", null, Seq(params))
}
def testAnonFunModifyCall(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("j", -1, Int, "0"))
doTest(null, "foo", null, Seq(params))
}
def testAnonFunManyParams(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("j", 1, Int),
parameterInfo("b", 2, Boolean),
parameterInfo("s", -1, AnyRef, "\"\""))
doTest(null,"foo", null, Seq(params))
}
def testLocalFunction(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("s", -1, Boolean, "true"))
doTest(null, "local", null, Seq(params))
}
def testImported(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", -1, Int, "0"))
doTest(null, "foo", null, Seq(params))
}
def testAddClauseConstructorVararg(): Unit = {
isAddDefaultValue = false
val params = Seq(Seq(parameterInfo("b", 0, Boolean)), Seq(parameterInfo("x", -1, Int, "10"), parameterInfo("i", 1, Int, isRep = true)))
doTest(null, "AddClauseConstructorVararg", null, params)
}
def testCaseClass(): Unit = {
isAddDefaultValue = true
val params = Seq(
Seq(parameterInfo("ii", 0, Int), parameterInfo("argss", 2, Int, isRep = true)),
Seq(parameterInfo("cc", 1, Char), parameterInfo("b", -1, Boolean, "true"))
)
doTest(null, "CClass", null, params)
}
}
|
whorbowicz/intellij-scala
|
test/org/jetbrains/plugins/scala/refactoring/changeSignature/ChangeSignatureInScalaTest.scala
|
Scala
|
apache-2.0
| 4,917
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import org.json4s.{DefaultFormats, JObject}
import org.json4s.JsonDSL._
import org.apache.spark.annotation.Since
import org.apache.spark.ml.{PredictionModel, Predictor}
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.tree._
import org.apache.spark.ml.tree.impl.RandomForest
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.DefaultParamsReader.Metadata
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.mllib.tree.model.{RandomForestModel => OldRandomForestModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions._
/**
* <a href="http://en.wikipedia.org/wiki/Random_forest">Random Forest</a>
* learning algorithm for regression.
* It supports both continuous and categorical features.
*/
@Since("1.4.0")
class RandomForestRegressor @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends Predictor[Vector, RandomForestRegressor, RandomForestRegressionModel]
with RandomForestRegressorParams with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("rfr"))
// Override parameter setters from parent trait for Java API compatibility.
// Parameters from TreeRegressorParams:
/** @group setParam */
@Since("1.4.0")
override def setMaxDepth(value: Int): this.type = set(maxDepth, value)
/** @group setParam */
@Since("1.4.0")
override def setMaxBins(value: Int): this.type = set(maxBins, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value)
/**
* Specifies how often to checkpoint the cached node IDs.
* E.g. 10 means that the cache will get checkpointed every 10 iterations.
* This is only used if cacheNodeIds is true and if the checkpoint directory is set in
* [[org.apache.spark.SparkContext]].
* Must be at least 1.
* (default = 10)
* @group setParam
*/
@Since("1.4.0")
override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/** @group setParam */
@Since("1.4.0")
override def setImpurity(value: String): this.type = set(impurity, value)
// Parameters from TreeEnsembleParams:
/** @group setParam */
@Since("1.4.0")
override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value)
/** @group setParam */
@Since("1.4.0")
override def setSeed(value: Long): this.type = set(seed, value)
// Parameters from RandomForestParams:
/** @group setParam */
@Since("1.4.0")
override def setNumTrees(value: Int): this.type = set(numTrees, value)
/** @group setParam */
@Since("1.4.0")
override def setFeatureSubsetStrategy(value: String): this.type =
set(featureSubsetStrategy, value)
override protected def train(dataset: Dataset[_]): RandomForestRegressionModel = {
val categoricalFeatures: Map[Int, Int] =
MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset)
val strategy =
super.getOldStrategy(categoricalFeatures, numClasses = 0, OldAlgo.Regression, getOldImpurity)
val instr = Instrumentation.create(this, oldDataset)
instr.logParams(labelCol, featuresCol, predictionCol, impurity, numTrees,
featureSubsetStrategy, maxDepth, maxBins, maxMemoryInMB, minInfoGain,
minInstancesPerNode, seed, subsamplingRate, cacheNodeIds, checkpointInterval)
val trees = RandomForest
.run(oldDataset, strategy, getNumTrees, getFeatureSubsetStrategy, getSeed, Some(instr))
.map(_.asInstanceOf[DecisionTreeRegressionModel])
val numFeatures = oldDataset.first().features.size
val m = new RandomForestRegressionModel(uid, trees, numFeatures)
instr.logSuccess(m)
m
}
@Since("1.4.0")
override def copy(extra: ParamMap): RandomForestRegressor = defaultCopy(extra)
}
@Since("1.4.0")
object RandomForestRegressor extends DefaultParamsReadable[RandomForestRegressor]{
/** Accessor for supported impurity settings: variance */
@Since("1.4.0")
final val supportedImpurities: Array[String] = TreeRegressorParams.supportedImpurities
/** Accessor for supported featureSubsetStrategy settings: auto, all, onethird, sqrt, log2 */
@Since("1.4.0")
final val supportedFeatureSubsetStrategies: Array[String] =
TreeEnsembleParams.supportedFeatureSubsetStrategies
@Since("2.0.0")
override def load(path: String): RandomForestRegressor = super.load(path)
}
/**
* <a href="http://en.wikipedia.org/wiki/Random_forest">Random Forest</a> model for regression.
* It supports both continuous and categorical features.
*
* @param _trees Decision trees in the ensemble.
* @param numFeatures Number of features used by this model
*/
@Since("1.4.0")
class RandomForestRegressionModel private[ml] (
override val uid: String,
private val _trees: Array[DecisionTreeRegressionModel],
override val numFeatures: Int)
extends PredictionModel[Vector, RandomForestRegressionModel]
with RandomForestRegressorParams with TreeEnsembleModel[DecisionTreeRegressionModel]
with MLWritable with Serializable {
require(_trees.nonEmpty, "RandomForestRegressionModel requires at least 1 tree.")
/**
* Construct a random forest regression model, with all trees weighted equally.
*
* @param trees Component trees
*/
private[ml] def this(trees: Array[DecisionTreeRegressionModel], numFeatures: Int) =
this(Identifiable.randomUID("rfr"), trees, numFeatures)
@Since("1.4.0")
override def trees: Array[DecisionTreeRegressionModel] = _trees
// Note: We may add support for weights (based on tree performance) later on.
private lazy val _treeWeights: Array[Double] = Array.fill[Double](_trees.length)(1.0)
@Since("1.4.0")
override def treeWeights: Array[Double] = _treeWeights
override protected def transformImpl(dataset: Dataset[_]): DataFrame = {
val bcastModel = dataset.sparkSession.sparkContext.broadcast(this)
val predictUDF = udf { (features: Any) =>
bcastModel.value.predict(features.asInstanceOf[Vector])
}
dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol))))
}
override def predict(features: Vector): Double = {
// TODO: When we add a generic Bagging class, handle transform there. SPARK-7128
// Predict average of tree predictions.
// Ignore the weights since all are 1.0 for now.
_trees.map(_.rootNode.predictImpl(features).prediction).sum / getNumTrees
}
@Since("1.4.0")
override def copy(extra: ParamMap): RandomForestRegressionModel = {
copyValues(new RandomForestRegressionModel(uid, _trees, numFeatures), extra).setParent(parent)
}
@Since("1.4.0")
override def toString: String = {
s"RandomForestRegressionModel (uid=$uid) with $getNumTrees trees"
}
/**
* Estimate of the importance of each feature.
*
* Each feature's importance is the average of its importance across all trees in the ensemble
* The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
* (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
* and follows the implementation from scikit-learn.
*
* @see `DecisionTreeRegressionModel.featureImportances`
*/
@Since("1.5.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures)
/** (private[ml]) Convert to a model in the old API */
private[ml] def toOld: OldRandomForestModel = {
new OldRandomForestModel(OldAlgo.Regression, _trees.map(_.toOld))
}
@Since("2.0.0")
override def write: MLWriter =
new RandomForestRegressionModel.RandomForestRegressionModelWriter(this)
}
@Since("2.0.0")
object RandomForestRegressionModel extends MLReadable[RandomForestRegressionModel] {
@Since("2.0.0")
override def read: MLReader[RandomForestRegressionModel] = new RandomForestRegressionModelReader
@Since("2.0.0")
override def load(path: String): RandomForestRegressionModel = super.load(path)
private[RandomForestRegressionModel]
class RandomForestRegressionModelWriter(instance: RandomForestRegressionModel)
extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata: JObject = Map(
"numFeatures" -> instance.numFeatures,
"numTrees" -> instance.getNumTrees)
EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata)
}
}
private class RandomForestRegressionModelReader extends MLReader[RandomForestRegressionModel] {
/** Checked against metadata when loading model */
private val className = classOf[RandomForestRegressionModel].getName
private val treeClassName = classOf[DecisionTreeRegressionModel].getName
override def load(path: String): RandomForestRegressionModel = {
implicit val format = DefaultFormats
val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) =
EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName, false)
val numFeatures = (metadata.metadata \ "numFeatures").extract[Int]
val numTrees = (metadata.metadata \ "numTrees").extract[Int]
val trees: Array[DecisionTreeRegressionModel] = treesData.map { case (treeMetadata, root) =>
val tree = new DecisionTreeRegressionModel(treeMetadata.uid,
root.asInstanceOf[RegressionNode], numFeatures)
treeMetadata.getAndSetParams(tree)
tree
}
require(numTrees == trees.length, s"RandomForestRegressionModel.load expected $numTrees" +
s" trees based on metadata but found ${trees.length} trees.")
val model = new RandomForestRegressionModel(metadata.uid, trees, numFeatures)
metadata.getAndSetParams(model)
model
}
}
/** Convert a model from the old API */
private[ml] def fromOld(
oldModel: OldRandomForestModel,
parent: RandomForestRegressor,
categoricalFeatures: Map[Int, Int],
numFeatures: Int = -1): RandomForestRegressionModel = {
require(oldModel.algo == OldAlgo.Regression, "Cannot convert RandomForestModel" +
s" with algo=${oldModel.algo} (old API) to RandomForestRegressionModel (new API).")
val newTrees = oldModel.trees.map { tree =>
// parent for each tree is null since there is no good way to set this.
DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
}
val uid = if (parent != null) parent.uid else Identifiable.randomUID("rfr")
new RandomForestRegressionModel(uid, newTrees, numFeatures)
}
}
|
lxsmnv/spark
|
mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala
|
Scala
|
apache-2.0
| 11,992
|
case class i1[i1](i1:i1=>Either[i1,Nothing]){def i1(}
|
lampepfl/dotty
|
tests/pending/fuzzy/E-61466476fb2b670ec310e86fc2692b75ce455b09.scala
|
Scala
|
apache-2.0
| 54
|
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import scala.util.control.NoStackTrace
/** Exception dealing with invalid response
* @param msg description if what makes the response invalid
*/
final case class InvalidResponseException(msg: String) extends Exception(msg) with NoStackTrace
|
http4s/http4s
|
core/shared/src/main/scala/org/http4s/InvalidResponseException.scala
|
Scala
|
apache-2.0
| 862
|
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.testFramework.EditorTestUtil
/**
* @author Nikolay.Tropin
*/
class SizeToLengthTest extends OperationsOnCollectionInspectionTest {
import EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
override protected val classOfInspection: Class[_ <: OperationOnCollectionInspection] =
classOf[SizeToLengthInspection]
override protected val hint: String =
InspectionBundle.message("size.to.length")
def testString(): Unit = {
doTest(s"""|"".${START}size$END""".stripMargin, "\\"\\".size", "\\"\\".length")
doTest(
s"""
|object Foo {
| val s = ""
| s.${START}size$END
|}
""".stripMargin,
"""
|object Foo {
| val s = ""
| s.size
|}
""".stripMargin,
"""
|object Foo {
| val s = ""
| s.length
|}
""".stripMargin
)
}
def testArray(): Unit = {
doTest(s"Array(1, 2).${START}size$END", "Array(1, 2).size", "Array(1, 2).length")
doTest(s"Seq(1, 2).toArray.${START}size$END", "Seq(1, 2).toArray.size", "Seq(1, 2).toArray.length")
doTest(
s"""
|object Foo {
| val arr = Array(1, 2)
| arr.${START}size$END
|}
""".stripMargin,
"""
|object Foo {
| val arr = Array(1, 2)
| arr.size
|}
""".stripMargin,
"""
|object Foo {
| val arr = Array(1, 2)
| arr.length
|}
""".stripMargin
)
}
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/codeInspection/collections/SizeToLengthTest.scala
|
Scala
|
apache-2.0
| 1,627
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.felix.servicediagnostics.webconsole
import scala.collection.JavaConversions._
import scala.collection.mutable.{Map => mMap}
import java.io.PrintStream
import javax.servlet.http._
import org.json.JSONObject
import org.osgi.service.http.HttpContext
import org.osgi.service.http.HttpService
import org.apache.felix.servicediagnostics.ServiceDiagnostics
/**
* This is the servlet responding to the ajax requests, using the ServiceDiagnostics service
*
* @author <a href="mailto:dev@felix.apache.org">Felix Project Team</a>
*/
class Servlet extends HttpServlet
{
var engine:ServiceDiagnostics = _ //dependency injection. see Activator.
/*
* dependency injection. see Activator.
* registers this servlet into the HttpService
*/
def setHttpService(hs:HttpService) =
{
val hc = hs.createDefaultHttpContext
hs.registerServlet("/servicegraph/json", this, null, hc)
hs.registerResources("/servicegraph", "/html", hc)
}
override def service(req:HttpServletRequest, resp:HttpServletResponse) =
{
val cmd = req.getPathInfo
if(cmd.endsWith("all")) reply(resp, engine.allServices)
else if(cmd.endsWith("notavail")) reply(resp, engine.notavail)
else println("Unrecognized cmd: "+cmd)
}
/**
* turn the ServiceDiagnostics output into a JSON representation.
*/
private def reply(resp:HttpServletResponse, map:Map[String,List[AnyRef]]) =
{
new PrintStream(resp.getOutputStream, true).println(
new JSONObject(asJavaMap(mMap() ++ map.map(kv => (kv._1, asJavaList(kv._2))))))
}
}
|
boneman1231/org.apache.felix
|
trunk/webconsole-plugins/servicediagnostics/core/src/main/scala/servicediagnostics/webconsole/Servlet.scala
|
Scala
|
apache-2.0
| 2,447
|
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.mongodb
import slamdata.Predef._
import quasar.{RenderTree, Terminal}
import matryoshka._
import matryoshka.data.Fix
import matryoshka.implicits._
import monocle.Prism
import scalaz._, Scalaz._
package object expression {
/** The type for expressions targeting MongoDB 2.6 specifically. */
type Expr2_6[A] = ExprOpCoreF[A]
/** The type for expressions targeting MongoDB 3.0 specifically. */
type Expr3_0[A] = Coproduct[ExprOp3_0F, ExprOpCoreF, A]
/** The type for expressions targeting MongoDB 3.2 specifically. */
type Expr3_2[A] = Coproduct[ExprOp3_2F, Expr3_0, A]
/** The type for expressions targeting MongoDB 3.4 specifically. */
type Expr3_4[A] = Coproduct[ExprOp3_4F, Expr3_2, A]
/** The type for expressions supporting the most advanced capabilities. */
type ExprOp[A] = Expr3_4[A]
val fixExprOp =
new ExprOpCoreF.fixpoint[Fix[ExprOp], ExprOp](_.embed)
val DocField = Prism.partial[DocVar, BsonField] {
case DocVar.ROOT(Some(tail)) => tail
} (DocVar.ROOT(_))
// FIXME: no way to put this in anybody's companion where it will be found?
implicit def exprOpRenderTree[T[_[_]]: RecursiveT, EX[_]: Functor](implicit ops: ExprOpOps.Uni[EX]): RenderTree[T[EX]] =
new RenderTree[T[EX]] {
def render(v: T[EX]) = Terminal(List("ExprOp"), v.cata(ops.bson).toJs.pprint(0).some)
}
}
|
drostron/quasar
|
mongodb/src/main/scala/quasar/physical/mongodb/expression/package.scala
|
Scala
|
apache-2.0
| 1,967
|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.io.File
import java.nio.file.FileSystem
import java.nio.file.Path
import java.nio.file.{ Files => JFiles }
import com.google.common.jimfs.Configuration
import com.google.common.jimfs.Jimfs
import play.api.libs.Files.TemporaryFile
import play.api.libs.Files.TemporaryFileCreator
import scala.util.Try
class InMemoryTemporaryFile(val path: Path, val temporaryFileCreator: TemporaryFileCreator) extends TemporaryFile {
def file: File = path.toFile
}
class InMemoryTemporaryFileCreator(totalSpace: Long) extends TemporaryFileCreator {
private val fsConfig: Configuration = Configuration.unix.toBuilder
.setMaxSize(totalSpace)
.build()
private val fs: FileSystem = Jimfs.newFileSystem(fsConfig)
private val playTempFolder: Path = fs.getPath("/tmp")
def create(prefix: String = "", suffix: String = ""): TemporaryFile = {
JFiles.createDirectories(playTempFolder)
val tempFile = JFiles.createTempFile(playTempFolder, prefix, suffix)
new InMemoryTemporaryFile(tempFile, this)
}
def create(path: Path): TemporaryFile = new InMemoryTemporaryFile(path, this)
def delete(file: TemporaryFile): Try[Boolean] = Try(JFiles.deleteIfExists(file.path))
}
|
wegtam/playframework
|
core/play/src/test/scala/play/api/mvc/InMemoryTemporaryFileCreator.scala
|
Scala
|
apache-2.0
| 1,287
|
/**
* COPYRIGHT (C) 2015 Alpine Data Labs Inc. All Rights Reserved.
*/
package com.alpine.plugin.core.dialog
import com.alpine.plugin.core.annotation.AlpineSdkApi
/**
* :: AlpineSdkApi ::
*/
@AlpineSdkApi
trait DoubleBox extends DialogElement {
def setValue(value: Double): Unit
def getValue: Double
def getMin: Double
def getMax: Double
def getInclusiveMin: Boolean
def getInclusiveMax: Boolean
}
|
holdenk/PluginSDK
|
plugin-core/src/main/scala/com/alpine/plugin/core/dialog/DoubleBox.scala
|
Scala
|
apache-2.0
| 417
|
package de.guderlei.spray.database
import org.squeryl.{Session, SessionFactory}
import org.squeryl.adapters.{MySQLAdapter, H2Adapter}
import de.guderlei.spray.domain.TodoItem
import java.util.Date
import org.squeryl.PrimitiveTypeMode._
import scala.Some
import com.jolbox.bonecp.{BoneCP, BoneCPConfig}
import org.slf4j.LoggerFactory
/**
* sample squeryl configuration with mysql database
*/
trait DatabaseConfiguration {
//Class.forName("com.mysql.jdbc.Driver")
Class.forName("org.h2.Driver")
//configure an instantiate BoneCP connection pool
val poolConfig = new BoneCPConfig()
//poolConfig.setJdbcUrl("jdbc:mysql://localhost:3306/todoexample")
poolConfig.setJdbcUrl("jdbc:h2:mem:test")
poolConfig.setUsername("todouser")
poolConfig.setPassword("password")
poolConfig.setMinConnectionsPerPartition(5);
poolConfig.setMaxConnectionsPerPartition(1000);
poolConfig.setPartitionCount(1);
val connectionPool = new BoneCP(poolConfig)
// create a squeryl session factory
val log = LoggerFactory.getLogger("Database")
SessionFactory.concreteFactory = Some(
()=> Session.create(connectionPool.getConnection(), new H2Adapter())
)
// initalize database schema on the fly
initializeSchema()
/**
* initialize the database schema. The schema is created iff it does not
* exist in the database.
*/
def initializeSchema() {
log.info("initialize database")
try {
transaction {
from( Todos.todos ) (s => select(s)).toList
}
} catch {
case e: Exception => {
try {
transaction{
log.info("create schema")
Todos.create
}
transaction{
from( Todos.todos ) (s => select(s)).toList
}
} catch {
case e:Exception => {
log.error(e.getMessage, e)
}
}
}
}
}
}
|
KizuRos/spray-example
|
src/main/scala/de/guderlei/spray/database/DatabaseHandling.scala
|
Scala
|
mit
| 1,913
|
/**
* Created by Romain Reuillon on 07/02/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.openmole.plugin.environment.desktopgrid
import org.openmole.core.preference.{ ConfigurationLocation, Preference }
import org.openmole.tool.crypto.Cypher
object DesktopGridAuthentication {
val desktopGridPassword = ConfigurationLocation[String]("desktopgrid", "password", None)
def update(cypheredPassword: String)(implicit preference: Preference) = preference.setPreference(desktopGridPassword, cypheredPassword)
def password(implicit preference: Preference, cypher: Cypher) = cypher.decrypt(preference(desktopGridPassword))
def passwordOption(implicit preference: Preference, cypher: Cypher) = preference.preferenceOption(desktopGridPassword).map(cypher.decrypt(_))
def clear(implicit preference: Preference) = preference.clearPreference(desktopGridPassword)
}
|
openmole/openmole
|
openmole/plugins/org.openmole.plugin.environment.desktopgrid/src/main/scala/org/openmole/plugin/environment/desktopgrid/DesktopGridAuthentication.scala
|
Scala
|
agpl-3.0
| 1,496
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.logical
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.schema.DataStreamTable
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan._
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.TableScan
import org.apache.calcite.rel.logical.LogicalTableScan
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollation, RelCollationTraitDef, RelNode}
import java.util
import java.util.function.Supplier
/**
* Sub-class of [[TableScan]] that is a relational operator
* which returns the contents of a [[DataStreamTable]] in Flink.
*/
class FlinkLogicalDataStreamTableScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
table: RelOptTable)
extends TableScan(cluster, traitSet, table)
with FlinkLogicalRel {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new FlinkLogicalDataStreamTableScan(cluster, traitSet, table)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val rowCnt = mq.getRowCount(this)
val rowSize = mq.getAverageRowSize(this)
planner.getCostFactory.makeCost(rowCnt, rowCnt, rowCnt * rowSize)
}
}
class FlinkLogicalDataStreamTableScanConverter
extends ConverterRule(
classOf[LogicalTableScan],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalDataStreamTableScanConverter") {
override def matches(call: RelOptRuleCall): Boolean = {
val scan: TableScan = call.rel(0)
val dataStreamTable = scan.getTable.unwrap(classOf[DataStreamTable[_]])
dataStreamTable != null
}
def convert(rel: RelNode): RelNode = {
val scan = rel.asInstanceOf[TableScan]
FlinkLogicalDataStreamTableScan.create(rel.getCluster, scan.getTable)
}
}
object FlinkLogicalDataStreamTableScan {
val CONVERTER = new FlinkLogicalDataStreamTableScanConverter
def isDataStreamTableScan(scan: TableScan): Boolean = {
val dataStreamTable = scan.getTable.unwrap(classOf[DataStreamTable[_]])
dataStreamTable != null
}
def create(cluster: RelOptCluster, relOptTable: RelOptTable): FlinkLogicalDataStreamTableScan = {
val dataStreamTable = relOptTable.unwrap(classOf[DataStreamTable[_]])
val traitSet = cluster.traitSetOf(FlinkConventions.LOGICAL).replaceIfs(
RelCollationTraitDef.INSTANCE, new Supplier[util.List[RelCollation]]() {
def get: util.List[RelCollation] = {
if (dataStreamTable != null) {
dataStreamTable.getStatistic.getCollations
} else {
ImmutableList.of[RelCollation]
}
}
}).simplify()
new FlinkLogicalDataStreamTableScan(cluster, traitSet, dataStreamTable)
}
}
|
gyfora/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/logical/FlinkLogicalDataStreamTableScan.scala
|
Scala
|
apache-2.0
| 3,655
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.