code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package edu.berkeley.nlp.entity.wiki
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import edu.berkeley.nlp.entity.coref.Mention
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.futile.util.Counter
/**
* Simple data structure to store information about a query to the Wikipedia
* title given surface database formed from a particular mention.
*
* @author gdurrett
*/
case class Query(val words: Seq[String],
val originalMent: Mention,
val finalSpan: (Int, Int),
val queryType: String,
val removePuncFromQuery: Boolean = true) {
def getFinalQueryStr = {
val wordsNoPunc = if (removePuncFromQuery) {
words.map(str => str.filter(c => !Query.PuncList.contains(c))).filter(!_.isEmpty);
} else {
words;
}
if (wordsNoPunc.isEmpty) "" else wordsNoPunc.reduce(_ + " " + _);
}
}
object Query {
def makeNilQuery(ment: Mention) = {
new Query(Seq[String]("XXNILXX"), ment, (ment.headIdx + 1, ment.headIdx + 1), "NIL");
}
// These parameter settings have been tuned to give best performance on query extraction
// for ACE, so are probably good there but might need to be revisited in other settings.
val CapitalizationQueryExpand = false;
val PluralQueryExpand = true;
val RemovePuncFromQuery = true;
val UseFirstHead = true;
val MaxQueryLen = 4;
val BlackList = Set("the", "a", "my", "your", "his", "her", "our", "their", "its", "this", "that", "these", "those")
val PuncList = Set(',', '.', '!', '?', ':', ';', '\\'', '"', '(', ')', '[', ']', '{', '}', ' ');
/**
* Check if a token is "blacklisted", meaning that we shouldn't form a query that starts with
* it (such queries tend to do weird and bad things
*/
def isBlacklisted(word: String, mentStartIdx: Int) = {
BlackList.contains(word) || (mentStartIdx == 0 && BlackList.contains(word.toLowerCase));
}
/**
* Very crappy stemmer
*/
def removePlural(word: String) = {
if (word.endsWith("sses")) {
word.dropRight(2);
} else if (word.endsWith("ies")) {
// Not quite right...
word.substring(0, word.size - 3) + "y";
} else if (word.endsWith("s")) {
word.dropRight(1);
} else {
word;
}
}
/**
* Given a mention, extracts the set of possible queries that we'll consider. This is done by
* considering different subsets of the words in the mention and munging capitalization and
* stemming, since lowercasing and dropping a plural-marking "s" are useful for nominals.
*/
def extractQueriesBest(ment: Mention, addNilQuery: Boolean = false): Seq[Query] = {
val queries = new ArrayBuffer[Query];
val mentWords = ment.words;
// Try the whole query, then prefixes ending in the head
val relHeadIdx = (if (UseFirstHead) ment.contextTree.getSpanHeadACECustom(ment.startIdx, ment.endIdx) else ment.headIdx) - ment.startIdx;
val indicesToTry = (Seq((0, mentWords.size)) ++ (0 to relHeadIdx).map(i => (i, relHeadIdx + 1))).filter(indices => {
indices._2 - indices._1 == 1 || !isBlacklisted(mentWords(indices._1), ment.startIdx);
}).filter(indices => indices._2 - indices._1 > 0 && indices._2 - indices._1 <= MaxQueryLen).distinct;
for (indices <- indicesToTry) {
// Query the full thing as is
val queriesThisSlice = new ArrayBuffer[Query];
val query = new Query(mentWords.slice(indices._1, indices._2), ment, indices, "STD", RemovePuncFromQuery);
val firstWord = mentWords(indices._1);
val lastWord = mentWords(indices._2 - 1);
queriesThisSlice += query;
// Handle capitalization: if the first word does not have any uppercase characters
if (!firstWord.map(Character.isUpperCase(_)).reduce(_ || _) && Character.isLowerCase(firstWord(0))) {
queriesThisSlice += new Query(Seq(wikiCase(firstWord)) ++ mentWords.slice(indices._1 + 1, indices._2), ment, indices, "WIKICASED", RemovePuncFromQuery);
}
// Stemming (but only on head alone)
if (PluralQueryExpand && (indices._2 - indices._1) == 1 && firstWord.last == 's') {
queriesThisSlice ++= queriesThisSlice.map(query => new Query(Seq(removePlural(query.words(0))), ment, indices, query.queryType + "-STEM", RemovePuncFromQuery));
}
queries ++= queriesThisSlice;
}
// Finally, strip punctuation from queries; we don't do this earlier because it makes it hard
// to find the head
// val finalQueries = if (RemovePuncFromQuery) {
// queries.map(_.map(str => str.filter(c => !PuncList.contains(c))).filter(!_.isEmpty)).filter(!_.isEmpty)
// } else {
// queries;
// }
queries.filter(!_.getFinalQueryStr.isEmpty) ++ (if (addNilQuery) Seq(Query.makeNilQuery(ment)) else Seq[Query]());
}
def extractDenotationSetWithNil(queries: Seq[Query], queryDisambigs: Seq[Counter[String]], maxDenotations: Int): Seq[String] = {
val choicesEachQuery = queryDisambigs.map(_.getSortedKeys().asScala);
val optionsAndPriorities = (0 until queryDisambigs.size).flatMap(i => {
val sortedKeys = queryDisambigs(i).getSortedKeys().asScala
(0 until sortedKeys.size).map(j => (sortedKeys(j), j * 1000 + i));
});
// choicesEachQuery.foreach(Logger.logss(_));
// Logger.logss(optionsAndPriorities);
val allFinalOptions = Seq(NilToken) ++ optionsAndPriorities.sortBy(_._2).map(_._1).distinct;
val finalOptionsTruncated = allFinalOptions.slice(0, Math.min(allFinalOptions.size, maxDenotations));
// Logger.logss(finalOptions);
finalOptionsTruncated;
}
}
|
malcolmgreaves/berkeley-entity
|
src/main/java/edu/berkeley/nlp/entity/wiki/Query.scala
|
Scala
|
gpl-3.0
| 5,611
|
package edu.ncrn.cornell.xml
import scala.xml.Node
/**
* @author Brandon Barker
* 9/15/2016
*/
object ScalaXmlExtra {
implicit class NodeExtra(val node: Node) extends AnyVal{
/**
* Convenience method for looking up attribute values using
* the attribute name.
*/
def attributeVal(attrName: String): Option[String] =
node.attributes.asAttrMap.get(attrName)
/**
*
* @return the qualified name of the node: "prefix:label";
* like Node.nameToString but doesnt' require passing
* a StringBuilder
*/
def fullName: String = Option(node.prefix) match {
case Some(pfx) => pfx + ":" + node.label
case None => node.label
}
def hasLabel: Boolean = !node.label.trim.isEmpty
}
}
|
ncrncornell/XPathEnumerator
|
src/main/scala/edu/ncrn/cornell/xml/ScalaXmlExtra.scala
|
Scala
|
apache-2.0
| 798
|
package info.glennengstrand.news.resource
import javax.inject.Inject
import play.api.Logger
import play.api.data.Form
import play.api.libs.json.Json
import io.circe._, io.circe.generic.auto._, io.circe.parser._, io.circe.syntax._
import play.api._
import play.api.mvc._
import info.glennengstrand.news.model._
import scala.util.{Success, Failure}
import scala.concurrent.{ExecutionContext, Future}
/**
* Takes HTTP requests and produces JSON.
*/
class InboundController @Inject()(cc: NewsControllerComponents)(
implicit ec: ExecutionContext)
extends NewsBaseController(cc) {
private val logger = Logger(getClass)
def get(id: Int): Action[AnyContent] = NewsAction.async {
implicit request =>
logger.trace(s"show: id = $id")
inboundService.lookup(id).map { p =>
Ok(p.asJson.noSpaces)
}
}
}
|
gengstrand/clojure-news-feed
|
server/feed12/app/info/glennengstrand/news/resource/InboundController.scala
|
Scala
|
epl-1.0
| 843
|
/*
* Copyright 2016 Thomas Puhl
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.digitalistbesser.diff.io
import de.digitalistbesser.diff.{Delete, Hunk, Insert, Match}
import org.scalatest.Inside._
import org.scalatest.Matchers._
class NormalFormatSpec extends LineBasedHunkFormatSpec {
private val casing: Seq[String] = List(
"1,2c1,2",
"< ABC",
"< XYZ",
"---",
"> XYZ",
"> ABC")
private val empty: Seq[String] = Nil
private val emptySource: Seq[String] = List(
"0a1,3",
"> a",
"> b",
"> c")
private val emptyTarget: Seq[String] = List(
"1,3d0",
"< a",
"< b",
"< c")
private val multipleHunks: Seq[String] = List(
"16c16",
"< p",
"---",
"> P",
"24c24",
"< x",
"---",
"> X")
private val singleHunk: Seq[String] = List(
"2c2,3",
"< b",
"---",
"> B",
"> C")
private val hunkWithMultipleEdits: Seq[String] = List(
"16c16",
"< p",
"---",
"> P",
"20,21c20",
"< t",
"< u",
"---",
"> T")
private val format = new NormalFormat with SeqBasedHunkFormat
import format._
"NormalFormat" should "write nothing for empty input" in {
val patch = write(EmptyData)
assert(patch == this.empty)
}
it should "write a single hunk correctly" in {
val hunks = Seq(Hunk(1, 1, Seq(Delete("b"), Insert("B"), Insert("C"))))
val patch = write(HunkData(hunks))
assert(patch == this.singleHunk)
}
it should "write multiple hunks correctly" in {
val hunks = Seq(Hunk(12, 12, Seq(Match("m"), Match("n"), Match("o"), Delete("p"), Insert("P"), Match("q"), Match("r"), Match("s"))), Hunk(20, 20, Seq(Match("u"), Match("v"), Match("w"), Delete("x"), Insert("X"), Match("y"), Match("z"))))
val patch = write(HunkData(hunks))
assert(patch == this.multipleHunks)
}
it should "write a hunk with multiple edits joined by context matches correctly" in {
val hunks = Seq(Hunk(12, 12, Seq(Match("m"), Match("n"), Match("o"), Delete("p"), Insert("P"), Match("q"), Match("r"), Match("s"), Delete("t"), Delete("u"), Insert("T"), Match("v"), Match("w"))))
val patch = write(HunkData(hunks))
assert(patch == this.hunkWithMultipleEdits)
}
it should "write an empty source correctly" in {
val hunks = Seq(Hunk(0, 0, Seq(Insert("a"), Insert("b"), Insert("c"))))
val patch = write(HunkData(hunks))
assert(patch == this.emptySource)
}
it should "write an empty target correctly" in {
val hunks = Seq(Hunk(0, 0, Seq(Delete("a"), Delete("b"), Delete("c"))))
val patch = write(HunkData(hunks))
assert(patch == this.emptyTarget)
}
it should "write using the supplied output transformation function" in {
val hunks = Seq(Hunk(0, 0, Seq(Delete("aBc"), Delete("xYz"), Insert("XyZ"), Insert("AbC"))))
val patch = write(HunkData(hunks))(_.toUpperCase)
assert(patch == this.casing)
}
it should "read nothing from an empty file" in {
val result = read(this.empty)
inside(result) { case ReadSuccess(EmptyData) =>
}
}
it should "read a single hunk correctly" in {
val result = read(this.singleHunk)
inside(result) { case ReadSuccess(HunkData(Seq(Hunk(1, 1, Seq(Delete("b"), Insert("B"), Insert("C")))))) =>
}
}
it should "read multiple hunks correctly" in {
val result1 = read(this.multipleHunks)
inside(result1) { case ReadSuccess(HunkData(Seq(Hunk(15, 15, Seq(Delete("p"), Insert("P"))), Hunk(23, 23, Seq(Delete("x"), Insert("X")))))) =>
}
val result2 = read(this.hunkWithMultipleEdits)
inside(result2) { case ReadSuccess(HunkData(Seq(Hunk(15, 15, Seq(Delete("p"), Insert("P"))), Hunk(19, 19, Seq(Delete("t"), Delete("u"), Insert("T")))))) =>
}
}
it should "read an empty source correctly" in {
val result = read(this.emptySource)
inside(result) { case ReadSuccess(HunkData(Seq(Hunk(0, 0, Seq(Insert("a"), Insert("b"), Insert("c")))))) =>
}
}
it should "read an empty target correctly" in {
val result = read(this.emptyTarget)
inside(result) { case ReadSuccess(HunkData(Seq(Hunk(0, 0, Seq(Delete("a"), Delete("b"), Delete("c")))))) =>
}
}
it should "read using the supplied input transformation function" in {
val result = read(this.casing)(_.toLowerCase)
inside(result) { case ReadSuccess(HunkData(Seq(Hunk(0, 0, Seq(Delete("abc"), Delete("xyz"), Insert("xyz"), Insert("abc")))))) =>
}
}
it should "fail reading input with missing hunk header" in {
val missingHunkHeader: Seq[String] = this.singleHunk.drop(1)
val result = read(missingHunkHeader)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 1))) =>
l should equal (missingHunkHeader.head)
}
}
it should "fail reading input with malformed hunk header" in {
val malformedHunkHeader: Seq[String] = this.singleHunk.updated(0, "abc")
val result = read(malformedHunkHeader)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 1))) =>
l should equal (malformedHunkHeader.head)
}
}
it should "fail reading input with malformed hunk data" in {
val malformedHunkData: Seq[String] = this.singleHunk.updated(3, "Test")
val result = read(malformedHunkData)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 4))) =>
l should equal (malformedHunkData(3))
}
}
it should "fail reading input with malformed hunk separator" in {
val malformedHunkData: Seq[String] = this.singleHunk.updated(2, "--")
val result = read(malformedHunkData)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 3))) =>
l should equal (malformedHunkData(2))
}
}
it should "fail reading input with invalid source edit" in {
val invalidSourceEdit: Seq[String] = this.emptyTarget.take(2) ++: "> b" +: this.emptyTarget.drop(3)
val result = read(invalidSourceEdit)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 3))) =>
l should equal (invalidSourceEdit(2))
}
}
it should "fail reading input with invalid target edit" in {
val invalidTargetEdit: Seq[String] = this.emptySource.take(2) ++: "< b" +: this.emptySource.drop(3)
val result = read(invalidTargetEdit)
inside(result) { case ReadFailure(_: HunkFormatException, Some(Line(l, 3))) =>
l should equal (invalidTargetEdit(2))
}
}
it should "fail reading input with missing deletions" in {
val invalidSourceEdit: Seq[String] = this.emptyTarget.take(3)
val result1 = read(invalidSourceEdit)
inside(result1) { case ReadFailure(_: HunkFormatException, None) =>
}
val invalidHunk: Seq[String] = this.singleHunk.take(1) ++: this.singleHunk.drop(2)
val result2 = read(invalidHunk)
inside(result2) { case ReadFailure(_: HunkFormatException, Some(Line(l, 2))) =>
l should equal (invalidHunk(1))
}
}
it should "fail reading input with additional deletions" in {
val invalidSourceEdit: Seq[String] = this.emptyTarget :+ "< d"
val result1 = read(invalidSourceEdit)
inside(result1) { case ReadFailure(_: HunkFormatException, Some(Line(l, 5))) =>
l should equal (invalidSourceEdit(4))
}
val invalidHunk: Seq[String] = this.singleHunk.take(2) ++: "< c" +: this.singleHunk.drop(2)
val result2 = read(invalidHunk)
inside(result2) { case ReadFailure(_: HunkFormatException, Some(Line(l, 3))) =>
l should equal (invalidHunk(2))
}
}
it should "fail reading input with missing insertions" in {
val invalidTargetEdit: Seq[String] = this.emptySource.take(3)
val result1 = read(invalidTargetEdit)
inside(result1) { case ReadFailure(_: HunkFormatException, None) =>
}
val invalidHunk: Seq[String] = this.singleHunk.take(4)
val result2 = read(invalidHunk)
inside(result2) { case ReadFailure(_: HunkFormatException, None) =>
}
}
it should "fail reading input with additional insertions" in {
val invalidTargetEdit: Seq[String] = this.emptySource:+ "> d"
val result1 = read(invalidTargetEdit)
inside(result1) { case ReadFailure(_: HunkFormatException, Some(Line(l, 5))) =>
l should equal (invalidTargetEdit(4))
}
val invalidHunk: Seq[String] = this.singleHunk :+ "< D"
val result2 = read(invalidHunk)
inside(result2) { case ReadFailure(_: HunkFormatException, Some(Line(l, 6))) =>
l should equal (invalidHunk(5))
}
}
}
|
digitalistbesser/scaladiff
|
src/test/scala/de/digitalistbesser/diff/io/NormalFormatSpec.scala
|
Scala
|
apache-2.0
| 8,929
|
package app.circumstances
import utils.WithJsBrowser
import app.FunctionalTestCommon
import utils.pageobjects.circumstances.start_of_process.GReportChangesPage
import utils.pageobjects.xml_validation.{XMLBusinessValidation, XMLCircumstancesBusinessValidation}
import utils.pageobjects.{Page, PageObjects, TestData, XmlPage}
/**
* End-to-End functional tests using input files created by Steve Moody.
* @author Jorge Migueis
* Date: 02/08/2013
*/
class FunctionalTestCase21Spec extends FunctionalTestCommon {
isolated
section("functional")
"The application Circumstances" should {
"Successfully run absolute Circumstances Test Case 21" in new WithJsBrowser with PageObjects {
val page = GReportChangesPage(context)
val circs = TestData.readTestDataFromFile("/functional_scenarios/circumstances/TestCase21.csv")
page goToThePage()
val lastPage = page runClaimWith(circs)
lastPage match {
case p: XmlPage => {
val validator: XMLBusinessValidation = new XMLCircumstancesBusinessValidation
validateAndPrintErrors(p, circs, validator) should beTrue
}
case p: Page => println(p.source)
}
}
}
section("functional")
}
|
Department-for-Work-and-Pensions/ClaimCapture
|
c3/test/app/circumstances/FunctionalTestCase21Spec.scala
|
Scala
|
mit
| 1,224
|
package de.erna.collectors
/**
* @author Eros Candelaresi <eros@candelaresi.de>
* @since 17.10.13 21:05
*
* Base trait for all collector actors.
*/
trait Collector {
}
|
DerEros/krautmail
|
server/base/collectors/src/main/scala/de/erna/collectors/Collector.scala
|
Scala
|
gpl-3.0
| 182
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kinesis
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkException}
import org.apache.spark.storage.{BlockId, BlockManager, StorageLevel, StreamBlockId}
abstract class KinesisBackedBlockRDDTests(aggregateTestData: Boolean)
extends KinesisFunSuite with BeforeAndAfterEach with LocalSparkContext {
private val testData = 1 to 8
private var testUtils: KinesisTestUtils = null
private var shardIds: Seq[String] = null
private var shardIdToData: Map[String, Seq[Int]] = null
private var shardIdToSeqNumbers: Map[String, Seq[String]] = null
private var shardIdToDataAndSeqNumbers: Map[String, Seq[(Int, String)]] = null
private var shardIdToRange: Map[String, SequenceNumberRange] = null
private var allRanges: Seq[SequenceNumberRange] = null
private var blockManager: BlockManager = null
override def beforeAll(): Unit = {
super.beforeAll()
runIfTestsEnabled("Prepare KinesisTestUtils") {
testUtils = new KPLBasedKinesisTestUtils()
testUtils.createStream()
shardIdToDataAndSeqNumbers = testUtils.pushData(testData, aggregate = aggregateTestData)
require(shardIdToDataAndSeqNumbers.size > 1, "Need data to be sent to multiple shards")
shardIds = shardIdToDataAndSeqNumbers.keySet.toSeq
shardIdToData = shardIdToDataAndSeqNumbers.mapValues { _.map { _._1 }}
shardIdToSeqNumbers = shardIdToDataAndSeqNumbers.mapValues { _.map { _._2 }}
shardIdToRange = shardIdToSeqNumbers.map { case (shardId, seqNumbers) =>
val seqNumRange = SequenceNumberRange(
testUtils.streamName, shardId, seqNumbers.head, seqNumbers.last)
(shardId, seqNumRange)
}
allRanges = shardIdToRange.values.toSeq
}
}
override def beforeEach(): Unit = {
super.beforeEach()
val conf = new SparkConf().setMaster("local[4]").setAppName("KinesisBackedBlockRDDSuite")
sc = new SparkContext(conf)
blockManager = sc.env.blockManager
}
override def afterAll(): Unit = {
try {
if (testUtils != null) {
testUtils.deleteStream()
}
} finally {
super.afterAll()
}
}
testIfEnabled("Basic reading from Kinesis") {
// Verify all data using multiple ranges in a single RDD partition
val receivedData1 = new KinesisBackedBlockRDD[Array[Byte]](sc, testUtils.regionName,
testUtils.endpointUrl, fakeBlockIds(1),
Array(SequenceNumberRanges(allRanges.toArray))
).map { bytes => new String(bytes).toInt }.collect()
assert(receivedData1.toSet === testData.toSet)
// Verify all data using one range in each of the multiple RDD partitions
val receivedData2 = new KinesisBackedBlockRDD[Array[Byte]](sc, testUtils.regionName,
testUtils.endpointUrl, fakeBlockIds(allRanges.size),
allRanges.map { range => SequenceNumberRanges(Array(range)) }.toArray
).map { bytes => new String(bytes).toInt }.collect()
assert(receivedData2.toSet === testData.toSet)
// Verify ordering within each partition
val receivedData3 = new KinesisBackedBlockRDD[Array[Byte]](sc, testUtils.regionName,
testUtils.endpointUrl, fakeBlockIds(allRanges.size),
allRanges.map { range => SequenceNumberRanges(Array(range)) }.toArray
).map { bytes => new String(bytes).toInt }.collectPartitions()
assert(receivedData3.length === allRanges.size)
for (i <- 0 until allRanges.size) {
assert(receivedData3(i).toSeq === shardIdToData(allRanges(i).shardId))
}
}
testIfEnabled("Read data available in both block manager and Kinesis") {
testRDD(numPartitions = 2, numPartitionsInBM = 2, numPartitionsInKinesis = 2)
}
testIfEnabled("Read data available only in block manager, not in Kinesis") {
testRDD(numPartitions = 2, numPartitionsInBM = 2, numPartitionsInKinesis = 0)
}
testIfEnabled("Read data available only in Kinesis, not in block manager") {
testRDD(numPartitions = 2, numPartitionsInBM = 0, numPartitionsInKinesis = 2)
}
testIfEnabled("Read data available partially in block manager, rest in Kinesis") {
testRDD(numPartitions = 2, numPartitionsInBM = 1, numPartitionsInKinesis = 1)
}
testIfEnabled("Test isBlockValid skips block fetching from block manager") {
testRDD(numPartitions = 2, numPartitionsInBM = 2, numPartitionsInKinesis = 0,
testIsBlockValid = true)
}
testIfEnabled("Test whether RDD is valid after removing blocks from block manager") {
testRDD(numPartitions = 2, numPartitionsInBM = 2, numPartitionsInKinesis = 2,
testBlockRemove = true)
}
/**
* Test the WriteAheadLogBackedRDD, by writing some partitions of the data to block manager
* and the rest to a write ahead log, and then reading reading it all back using the RDD.
* It can also test if the partitions that were read from the log were again stored in
* block manager.
*
*
*
* @param numPartitions Number of partitions in RDD
* @param numPartitionsInBM Number of partitions to write to the BlockManager.
* Partitions 0 to (numPartitionsInBM-1) will be written to BlockManager
* @param numPartitionsInKinesis Number of partitions to write to the Kinesis.
* Partitions (numPartitions - 1 - numPartitionsInKinesis) to
* (numPartitions - 1) will be written to Kinesis
* @param testIsBlockValid Test whether setting isBlockValid to false skips block fetching
* @param testBlockRemove Test whether calling rdd.removeBlock() makes the RDD still usable with
* reads falling back to the WAL
* Example with numPartitions = 5, numPartitionsInBM = 3, and numPartitionsInWAL = 4
*
* numPartitionsInBM = 3
* |------------------|
* | |
* 0 1 2 3 4
* | |
* |-------------------------|
* numPartitionsInKinesis = 4
*/
private def testRDD(
numPartitions: Int,
numPartitionsInBM: Int,
numPartitionsInKinesis: Int,
testIsBlockValid: Boolean = false,
testBlockRemove: Boolean = false
): Unit = {
require(shardIds.size > 1, "Need at least 2 shards to test")
require(numPartitionsInBM <= shardIds.size,
"Number of partitions in BlockManager cannot be more than the Kinesis test shards available")
require(numPartitionsInKinesis <= shardIds.size,
"Number of partitions in Kinesis cannot be more than the Kinesis test shards available")
require(numPartitionsInBM <= numPartitions,
"Number of partitions in BlockManager cannot be more than that in RDD")
require(numPartitionsInKinesis <= numPartitions,
"Number of partitions in Kinesis cannot be more than that in RDD")
// Put necessary blocks in the block manager
val blockIds = fakeBlockIds(numPartitions)
blockIds.foreach(blockManager.removeBlock(_))
(0 until numPartitionsInBM).foreach { i =>
val blockData = shardIdToData(shardIds(i)).iterator.map { _.toString.getBytes() }
blockManager.putIterator(blockIds(i), blockData, StorageLevel.MEMORY_ONLY)
}
// Create the necessary ranges to use in the RDD
val fakeRanges = Array.fill(numPartitions - numPartitionsInKinesis)(
SequenceNumberRanges(SequenceNumberRange("fakeStream", "fakeShardId", "xxx", "yyy")))
val realRanges = Array.tabulate(numPartitionsInKinesis) { i =>
val range = shardIdToRange(shardIds(i + (numPartitions - numPartitionsInKinesis)))
SequenceNumberRanges(Array(range))
}
val ranges = (fakeRanges ++ realRanges)
// Make sure that the left `numPartitionsInBM` blocks are in block manager, and others are not
require(
blockIds.take(numPartitionsInBM).forall(blockManager.get(_).nonEmpty),
"Expected blocks not in BlockManager"
)
require(
blockIds.drop(numPartitionsInBM).forall(blockManager.get(_).isEmpty),
"Unexpected blocks in BlockManager"
)
// Make sure that the right sequence `numPartitionsInKinesis` are configured, and others are not
require(
ranges.takeRight(numPartitionsInKinesis).forall {
_.ranges.forall { _.streamName == testUtils.streamName }
}, "Incorrect configuration of RDD, expected ranges not set: "
)
require(
ranges.dropRight(numPartitionsInKinesis).forall {
_.ranges.forall { _.streamName != testUtils.streamName }
}, "Incorrect configuration of RDD, unexpected ranges set"
)
val rdd = new KinesisBackedBlockRDD[Array[Byte]](
sc, testUtils.regionName, testUtils.endpointUrl, blockIds, ranges)
val collectedData = rdd.map { bytes =>
new String(bytes).toInt
}.collect()
assert(collectedData.toSet === testData.toSet)
// Verify that the block fetching is skipped when isBlockValid is set to false.
// This is done by using an RDD whose data is only in memory but is set to skip block fetching
// Using that RDD will throw exception, as it skips block fetching even if the blocks are in
// in BlockManager.
if (testIsBlockValid) {
require(numPartitionsInBM === numPartitions, "All partitions must be in BlockManager")
require(numPartitionsInKinesis === 0, "No partitions must be in Kinesis")
val rdd2 = new KinesisBackedBlockRDD[Array[Byte]](
sc, testUtils.regionName, testUtils.endpointUrl, blockIds.toArray, ranges,
isBlockIdValid = Array.fill(blockIds.length)(false))
intercept[SparkException] {
rdd2.collect()
}
}
// Verify that the RDD is not invalid after the blocks are removed and can still read data
// from write ahead log
if (testBlockRemove) {
require(numPartitions === numPartitionsInKinesis,
"All partitions must be in WAL for this test")
require(numPartitionsInBM > 0, "Some partitions must be in BlockManager for this test")
rdd.removeBlocks()
assert(rdd.map { bytes => new String(bytes).toInt }.collect().toSet === testData.toSet)
}
}
/** Generate fake block ids */
private def fakeBlockIds(num: Int): Array[BlockId] = {
Array.tabulate(num) { i => new StreamBlockId(0, i) }
}
}
class WithAggregationKinesisBackedBlockRDDSuite
extends KinesisBackedBlockRDDTests(aggregateTestData = true)
class WithoutAggregationKinesisBackedBlockRDDSuite
extends KinesisBackedBlockRDDTests(aggregateTestData = false)
|
ZxlAaron/mypros
|
external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
|
Scala
|
apache-2.0
| 11,291
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
package typedef
import com.intellij.execution.junit.JUnitUtil
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.project.DumbService
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi._
import com.intellij.psi.impl.PsiClassImplUtil.MemberType
import com.intellij.psi.impl.{PsiClassImplUtil, PsiSuperMethodImplUtil}
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.scope.processor.MethodsProcessor
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.util.{PsiTreeUtil, PsiUtil}
import org.jetbrains.plugins.scala.caches.{CachesUtil, ScalaShortNamesCacheManager}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil.isLineTerminator
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSelfTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers
import org.jetbrains.plugins.scala.lang.psi.light.ScFunctionWrapper
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScThisType
import org.jetbrains.plugins.scala.lang.psi.types.result.{TypeResult, Typeable, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInsidePsiElement, ModCount}
import scala.collection.JavaConverters._
/**
* @author ven
*/
trait ScTemplateDefinition extends ScNamedElement with PsiClass with Typeable {
import com.intellij.psi.PsiMethod
def qualifiedName: String = null
def additionalJavaNames: Array[String] = Array.empty
@Cached(synchronized = false, ModCount.anyScalaPsiModificationCount, this)
def extendsBlock: ScExtendsBlock = this.stubOrPsiChild(ScalaElementTypes.EXTENDS_BLOCK).orNull
def innerExtendsListTypes: Array[PsiClassType] = {
val eb = extendsBlock
if (eb != null) {
val tp = eb.templateParents
implicit val elementScope = ElementScope(getProject)
tp match {
case Some(tp1) => (for (te <- tp1.allTypeElements;
t = te.getType(TypingContext.empty).getOrAny;
asPsi = t.toPsiType()
if asPsi.isInstanceOf[PsiClassType]) yield asPsi.asInstanceOf[PsiClassType]).toArray[PsiClassType]
case _ => PsiClassType.EMPTY_ARRAY
}
} else PsiClassType.EMPTY_ARRAY
}
def showAsInheritor: Boolean = extendsBlock.templateBody.isDefined
override def findMethodBySignature(patternMethod: PsiMethod, checkBases: Boolean): PsiMethod = {
PsiClassImplUtil.findMethodBySignature(this, patternMethod, checkBases)
}
override def findMethodsBySignature(patternMethod: PsiMethod, checkBases: Boolean): Array[PsiMethod] = {
PsiClassImplUtil.findMethodsBySignature(this, patternMethod, checkBases)
}
override def findMethodsByName(name: String, checkBases: Boolean): Array[PsiMethod] = {
val toSearchWithIndices = Set("main", JUnitUtil.SUITE_METHOD_NAME) //these methods may be searched from EDT, search them without building a whole type hierarchy
def withIndices(): Array[PsiMethod] = {
val inThisClass = functionsByName(name)
val files = this.allSupers.flatMap(_.containingVirtualFile).asJava
val scope = GlobalSearchScope.filesScope(getProject, files)
val manager = ScalaShortNamesCacheManager.getInstance(getProject)
val candidates = manager.getMethodsByName(name, scope)
val inBaseClasses = candidates.filter(m => this.isInheritor(m.containingClass, deep = true))
(inThisClass ++ inBaseClasses).toArray
}
if (toSearchWithIndices.contains(name)) withIndices()
else PsiClassImplUtil.findMethodsByName(this, name, checkBases)
}
override def findFieldByName(name: String, checkBases: Boolean): PsiField = {
PsiClassImplUtil.findFieldByName(this, name, checkBases)
}
override def findInnerClassByName(name: String, checkBases: Boolean): PsiClass = {
PsiClassImplUtil.findInnerByName(this, name, checkBases)
}
import java.util.{Collection => JCollection, List => JList}
import com.intellij.openapi.util.{Pair => IPair}
def getAllFields: Array[PsiField] = {
PsiClassImplUtil.getAllFields(this)
}
override def findMethodsAndTheirSubstitutorsByName(name: String,
checkBases: Boolean): JList[IPair[PsiMethod, PsiSubstitutor]] = {
//the reordering is a hack to enable 'go to test location' for junit test methods defined in traits
import scala.collection.JavaConversions._
PsiClassImplUtil.findMethodsAndTheirSubstitutorsByName(this, name, checkBases).toList.sortBy(myPair =>
myPair.first match {
case wrapper: ScFunctionWrapper if wrapper.delegate.isInstanceOf[ScFunctionDeclaration] => 1
case wrapper: ScFunctionWrapper if wrapper.delegate.isInstanceOf[ScFunctionDefinition] => wrapper.containingClass match {
case myClass: ScTemplateDefinition if myClass.members.contains(wrapper.delegate) => 0
case _ => 1
}
case _ => 1
})
}
override def getAllMethodsAndTheirSubstitutors: JList[IPair[PsiMethod, PsiSubstitutor]] = {
PsiClassImplUtil.getAllWithSubstitutorsByMap(this, MemberType.METHOD)
}
@CachedInsidePsiElement(this, CachesUtil.getDependentItem(this)())
override def getVisibleSignatures: JCollection[HierarchicalMethodSignature] = {
PsiSuperMethodImplUtil.getVisibleSignatures(this)
}
def getTypeWithProjections(ctx: TypingContext, thisProjections: Boolean = false): TypeResult[ScType]
def members: Seq[ScMember] = extendsBlock.members ++ syntheticMembers
def functions: Seq[ScFunction] = extendsBlock.functions
def aliases: Seq[ScTypeAlias] = extendsBlock.aliases
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMethodsWithOverride: Seq[PsiMethod] = syntheticMethodsWithOverrideImpl
/**
* Implement it carefully to avoid recursion.
*/
protected def syntheticMethodsWithOverrideImpl: Seq[PsiMethod] = Seq.empty
def allSynthetics: Seq[PsiMethod] = syntheticMethodsNoOverride ++ syntheticMethodsWithOverride
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMethodsNoOverride: Seq[PsiMethod] = syntheticMethodsNoOverrideImpl
protected def syntheticMethodsNoOverrideImpl: Seq[PsiMethod] = Seq.empty
def typeDefinitions: Seq[ScTypeDefinition] = extendsBlock.typeDefinitions
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticTypeDefinitions: Seq[ScTypeDefinition] = syntheticTypeDefinitionsImpl
protected def syntheticTypeDefinitionsImpl: Seq[ScTypeDefinition] = Seq.empty
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def syntheticMembers: Seq[ScMember] = syntheticMembersImpl
protected def syntheticMembersImpl: Seq[ScMember] = Seq.empty
def selfTypeElement: Option[ScSelfTypeElement] = {
val qual = qualifiedName
if (qual != null && (qual == "scala.Predef" || qual == "scala")) return None
extendsBlock.selfTypeElement
}
def selfType: Option[ScType] = extendsBlock.selfType
def superTypes: List[ScType] = extendsBlock.superTypes
def supers: Seq[PsiClass] = extendsBlock.supers
def allTypeAliases: Seq[(PsiNamedElement, ScSubstitutor)] = TypeDefinitionMembers.getTypes(this).allFirstSeq().flatMap(n => n.map {
case (_, x) => (x.info, x.substitutor)
}) ++ syntheticTypeDefinitions.filter(!_.isObject).map((_, ScSubstitutor.empty))
def allTypeAliasesIncludingSelfType: Seq[(PsiNamedElement, ScSubstitutor)] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getTypes(c, Some(clazzType), this).allFirstSeq().
flatMap(_.map { case (_, n) => (n.info, n.substitutor) })
case _ =>
allTypeAliases
}
case _ =>
allTypeAliases
}
}
def allVals: Seq[(PsiNamedElement, ScSubstitutor)] =
TypeDefinitionMembers.getSignatures(this).allFirstSeq()
.flatMap(n => n.filter {
case (_, x) => !x.info.isInstanceOf[PhysicalSignature] &&
(x.info.namedElement match {
case v =>
ScalaPsiUtil.nameContext(v) match {
case _: ScVariable => v.name == x.info.name
case _: ScValue => v.name == x.info.name
case _ => true
}
})
})
.distinctBy { case (_, y) => y.info.namedElement }
.map { case (_, n) => (n.info.namedElement, n.substitutor) }
def allValsIncludingSelfType: Seq[(PsiNamedElement, ScSubstitutor)] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().flatMap(n => n.filter{
case (_, x) => !x.info.isInstanceOf[PhysicalSignature] &&
(x.info.namedElement match {
case v =>
ScalaPsiUtil.nameContext(v) match {
case _: ScVariable => v.name == x.info.name
case _: ScValue => v.name == x.info.name
case _ => true
}
})}).map { case (_, n) => (n.info.namedElement, n.substitutor) }
case _ =>
allVals
}
case _ =>
allVals
}
}
def allMethods: Iterable[PhysicalSignature] =
TypeDefinitionMembers.getSignatures(this).allFirstSeq().flatMap(_.filter {
case (_, n) => n.info.isInstanceOf[PhysicalSignature]}).
map { case (_, n) => n.info.asInstanceOf[PhysicalSignature] } ++
syntheticMethodsNoOverride.map(new PhysicalSignature(_, ScSubstitutor.empty))
def allMethodsIncludingSelfType: Iterable[PhysicalSignature] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().flatMap(_.filter {
case (_, n) => n.info.isInstanceOf[PhysicalSignature]}).
map { case (_, n) => n.info.asInstanceOf[PhysicalSignature] } ++
syntheticMethodsNoOverride.map(new PhysicalSignature(_, ScSubstitutor.empty))
case _ =>
allMethods
}
case _ =>
allMethods
}
}
def allSignatures: Seq[Signature] = TypeDefinitionMembers.getSignatures(this).allFirstSeq().flatMap(_.map { case (_, n) => n.info })
def allSignaturesIncludingSelfType: Seq[Signature] = {
selfType match {
case Some(selfType) =>
val clazzType = getTypeWithProjections(TypingContext.empty).getOrAny
selfType.glb(clazzType) match {
case c: ScCompoundType =>
TypeDefinitionMembers.getSignatures(c, Some(clazzType), this).allFirstSeq().
flatMap(_.map { case (_, n) => n.info })
case _ =>
allSignatures
}
case _ =>
allSignatures
}
}
def isScriptFileClass: Boolean = getContainingFile match {
case file: ScalaFile => file.isScriptFile
case _ => false
}
def processDeclarations(processor: PsiScopeProcessor,
oldState: ResolveState,
lastParent: PsiElement,
place: PsiElement) : Boolean = {
if (!processor.isInstanceOf[BaseProcessor]) {
val lastChild = this.lastChildStub.orNull
val languageLevel: LanguageLevel =
processor match {
case methodProcessor: MethodsProcessor => methodProcessor.getLanguageLevel
case _ => PsiUtil.getLanguageLevel(place)
}
return PsiClassImplUtil.processDeclarationsInClass(this, processor, oldState, null, lastChild, place, languageLevel, false)
}
if (extendsBlock.templateBody.isDefined &&
PsiTreeUtil.isContextAncestor(extendsBlock.templateBody.get, place, false) && lastParent != null) return true
processDeclarationsForTemplateBody(processor, oldState, lastParent, place)
}
def processDeclarationsForTemplateBody(processor: PsiScopeProcessor,
oldState: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (DumbService.getInstance(getProject).isDumb) return true
var state = oldState
//exception cases
this match {
case s: ScTypeParametersOwner => s.typeParametersClause match {
case Some(tpc) if PsiTreeUtil.isContextAncestor(tpc, place, false) => return true
case _ =>
}
case _ =>
}
// Process selftype reference
selfTypeElement match {
case Some(se) if se.name != "_" => if (!processor.execute(se, state)) return false
case _ =>
}
state = state.put(BaseProcessor.FROM_TYPE_KEY,
if (ScalaPsiUtil.isPlaceTdAncestor(this, place)) ScThisType(this)
else ScalaType.designator(this))
val eb = extendsBlock
eb.templateParents match {
case Some(p) if PsiTreeUtil.isContextAncestor(p, place, false) =>
eb.earlyDefinitions match {
case Some(ed) => for (m <- ed.members) {
ProgressManager.checkCanceled()
m match {
case _var: ScVariable => for (declared <- _var.declaredElements) {
ProgressManager.checkCanceled()
if (!processor.execute(declared, state)) return false
}
case _val: ScValue => for (declared <- _val.declaredElements) {
ProgressManager.checkCanceled()
if (!processor.execute(declared, state)) return false
}
}
}
case None =>
}
true
case _ =>
eb.earlyDefinitions match {
case Some(ed) if PsiTreeUtil.isContextAncestor(ed, place, true) =>
case _ =>
extendsBlock match {
case e: ScExtendsBlock if e != null =>
if (PsiTreeUtil.isContextAncestor(e, place, true) || !PsiTreeUtil.isContextAncestor(this, place, true)) {
this match {
case t: ScTypeDefinition if selfTypeElement.isDefined &&
!PsiTreeUtil.isContextAncestor(selfTypeElement.get, place, true) &&
PsiTreeUtil.isContextAncestor(e.templateBody.orNull, place, true) &&
processor.isInstanceOf[BaseProcessor] && !t.isInstanceOf[ScObject] =>
selfTypeElement match {
case Some(_) => processor.asInstanceOf[BaseProcessor].processType(ScThisType(t), place, state)
case _ =>
if (!TypeDefinitionMembers.processDeclarations(this, processor, state, lastParent, place)) {
return false
}
}
case _ =>
if (!TypeDefinitionMembers.processDeclarations(this, processor, state, lastParent, place)) return false
}
}
case _ =>
}
}
true
}
}
def addMember(member: ScMember, anchor: Option[PsiElement]): ScMember = {
implicit val projectContext = member.projectContext
extendsBlock.templateBody.map {
_.getNode
}.map { node =>
val beforeNode = anchor.map {
_.getNode
}.getOrElse {
val last = node.getLastChildNode
last.getTreePrev match {
case result if isLineTerminator(result.getPsi) => result
case _ => last
}
}
val before = beforeNode.getPsi
if (isLineTerminator(before))
node.addChild(createNewLineNode(), beforeNode)
node.addChild(member.getNode, beforeNode)
val newLineNode = createNewLineNode()
if (isLineTerminator(before)) {
node.replaceChild(beforeNode, newLineNode)
} else {
node.addChild(newLineNode, beforeNode)
}
member
}.getOrElse {
val node = extendsBlock.getNode
node.addChild(createWhitespace.getNode)
node.addChild(createBodyFromMember(member.getText).getNode)
members.head
}
}
def deleteMember(member: ScMember) {
member.getParent.getNode.removeChild(member.getNode)
}
def functionsByName(name: String): Seq[PsiMethod] = {
(for ((p: PhysicalSignature, _) <- TypeDefinitionMembers.getSignatures(this).forName(name)._1) yield p.method).
++(syntheticMethodsNoOverride.filter(_.name == name))
}
override def isInheritor(baseClass: PsiClass, deep: Boolean): Boolean = {
val basePath = Path.of(baseClass)
// These doesn't appear in the superTypes at the moment, so special case required.
if (basePath == Path.javaObject || (basePath == Path.scalaObject && !baseClass.isDeprecated)) return true
if (basePath.kind.isFinal) return false
if (deep) superPathsDeep.contains(basePath)
else superPaths.contains(basePath)
}
@Cached(synchronized = false, ModCount.getModificationCount, this)
def cachedPath: Path = {
val kind = this match {
case _: ScTrait => Kind.ScTrait
case _: ScClass => Kind.ScClass
case _: ScObject => Kind.ScObject
case _: ScNewTemplateDefinition => Kind.ScNewTd
case s: ScSyntheticClass if s.className != "AnyRef" && s.className != "AnyVal" => Kind.SyntheticFinal
case _ => Kind.NonScala
}
Path(name, Option(qualifiedName), kind)
}
@Cached(synchronized = false, ModCount.getModificationCount, this)
private def superPaths: Set[Path] = {
if (DumbService.getInstance(getProject).isDumb) return Set.empty //to prevent failing during indexes
supers.map(Path.of).toSet
}
@Cached(synchronized = false, ModCount.getModificationCount, this)
private def superPathsDeep: Set[Path] = {
if (DumbService.getInstance(getProject).isDumb) return Set.empty //to prevent failing during indexes
var collected = Set[Path]()
def addForClass(c: PsiClass): Unit = {
val path = c match {
case td: ScTemplateDefinition => td.cachedPath
case _ => Path.of(c)
}
if (!collected.contains(path)) {
collected += path
c match {
case td: ScTemplateDefinition =>
val supersIterator = td.supers.iterator
while (supersIterator.hasNext) {
addForClass(supersIterator.next())
}
case other =>
val supersIterator = other.getSuperTypes.iterator
while (supersIterator.hasNext) {
val psiT = supersIterator.next()
val next = psiT.resolveGenerics.getElement
if (next != null) {
addForClass(next)
}
}
}
}
}
addForClass(this)
collected - cachedPath
}
def isMetaAnnotatationImpl: Boolean = {
members.exists(_.getModifierList.findChildrenByType(ScalaTokenTypes.kINLINE).nonEmpty) ||
members.exists({case ah: ScAnnotationsHolder => ah.hasAnnotation("scala.meta.internal.inline.inline")})
}
}
object ScTemplateDefinition {
object ExtendsBlock {
def unapply(definition: ScTemplateDefinition): Some[ScExtendsBlock] = Some(definition.extendsBlock)
}
sealed abstract class Kind(val isFinal: Boolean)
object Kind {
object ScClass extends Kind(false)
object ScTrait extends Kind(false)
object ScObject extends Kind(true)
object ScNewTd extends Kind(true)
object SyntheticFinal extends Kind(true)
object NonScala extends Kind(false)
}
case class Path(name: String, qName: Option[String], kind: Kind)
object Path {
def of(c: PsiClass): Path = {
c match {
case td: ScTemplateDefinition =>
td.cachedPath
case s: ScSyntheticClass if s.className != "AnyRef" && s.className != "AnyVal" =>
Path(c.name, Option(c.qualifiedName), Kind.SyntheticFinal)
case s: ScSyntheticClass =>
Path(c.name, Option(c.qualifiedName), Kind.ScClass)
case _ =>
Path(c.name, Option(c.qualifiedName), Kind.NonScala)
}
}
val javaObject = Path("Object", Some("java.lang.Object"), Kind.NonScala)
val scalaObject = Path("ScalaObject", Some("scala.ScalaObject"), Kind.ScTrait)
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/typedef/ScTemplateDefinition.scala
|
Scala
|
apache-2.0
| 21,554
|
package org.wartremover
package contrib.warts
import scala.collection.mutable
object ExposedTuples extends WartTraverser {
val message: String =
"Avoid using tuples in public interfaces, as they only supply type information. Consider using a custom case class to add semantic meaning."
private final case class LineInFile(path: String, line: Int)
def apply(u: WartUniverse): u.Traverser = {
import u.universe._
val linesWithError = mutable.Set.empty[LineInFile]
def addError(pos: Position): Unit = {
try {
error(u)(pos, message)
linesWithError.add(LineInFile(pos.source.path, pos.line))
} catch {
case _: UnsupportedOperationException =>
// Not supported in 2.10.x but we also don't need deduplication in that version anyway
}
}
def errorAlreadyExists(pos: Position): Boolean = {
try {
linesWithError.contains(LineInFile(pos.source.path, pos.line))
} catch {
case _: UnsupportedOperationException =>
// Not supported in 2.10.x but we also don't need deduplication in that version anyway
false
}
}
def typeRefContainsTuple(typeRef: TypeRef): Boolean = {
val TypeRef(_, sym, args) = typeRef
if (sym.fullName.matches("scala\\\\.Tuple[\\\\d]+")) {
true
} else {
args.exists {
case nextTypeTree: TypeTree => typeTreeContainsTuple(nextTypeTree)
case nextTypeRef: TypeRef => typeRefContainsTuple(nextTypeRef)
case _ => false
}
}
}
def typeTreeContainsTuple(typeTree: TypeTree): Boolean = {
typeTree.tpe match {
case typeRef: TypeRef => typeRefContainsTuple(typeRef)
case _ => false
}
}
def valDefContainsTuple(valDef: u.universe.ValDef): Boolean = {
valDef.tpt match {
case typeTree: TypeTree => typeTreeContainsTuple(typeTree)
case _ => false
}
}
// No FlagOps.& :(
val publicUnscopedValues = Seq(
NoFlags, Flag.IMPLICIT,
Flag.MUTABLE, Flag.MUTABLE | Flag.IMPLICIT,
Flag.LAZY, Flag.LAZY | Flag.IMPLICIT,
Flag.PROTECTED | Flag.LAZY, Flag.PROTECTED | Flag.LAZY | Flag.IMPLICIT
)
new u.Traverser {
override def traverse(tree: Tree): Unit = {
tree match {
// Ignore trees marked by SuppressWarnings
case t if hasWartAnnotation(u)(t) =>
// Do not print out multiple errors for the same line, since internal implementation of vals and vars may
// cause this. Do not traverse into these places since we wouldn't have done anyway.
case _ if errorAlreadyExists(tree.pos) =>
// Return values
case DefDef(modifiers, name, _, _, returnType: TypeTree, _) if !modifiers.hasFlag(Flag.PRIVATE) && !modifiers.hasFlag(Flag.LOCAL) && name.toString != "unapply" && typeTreeContainsTuple(returnType) =>
addError(tree.pos)
// Parameters
case DefDef(modifiers, _, _, parameterLists, _, _) if !modifiers.hasFlag(Flag.PRIVATE) && !modifiers.hasFlag(Flag.LOCAL) && parameterLists.exists(_.exists(valDefContainsTuple)) =>
addError(tree.pos)
// Val/var declarations that are not covered by the above definitions
case ValDef(modifiers, _, returnType: TypeTree, _) if publicUnscopedValues.contains(modifiers.flags) && typeTreeContainsTuple(returnType) =>
addError(tree.pos)
// Do not traverse into value / variable / lazy values and method definitions since nothing inside them is
// publicly exposed.
case _: ValOrDefDef =>
case _ =>
super.traverse(tree)
}
}
}
}
}
|
tim-zh/wartremover-contrib
|
core/src/main/scala/wartremover/contrib/warts/ExposedTuples.scala
|
Scala
|
apache-2.0
| 3,713
|
package org.tribbloid.spookystuff.example.forum
import org.tribbloid.spookystuff.SpookyContext
import org.tribbloid.spookystuff.actions.Wget
import org.tribbloid.spookystuff.example.QueryCore
import org.tribbloid.spookystuff.dsl._
/**
* Created by peng on 9/26/14.
*/
object Yelp extends QueryCore {
override def doMain(spooky: SpookyContext) = {
import spooky._
sc.parallelize(Seq(
"http://www.yelp.com/biz/bottega-louie-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/wurstk%C3%BCche-los-angeles-2?sort_by=date_desc",
"http://www.yelp.com/biz/daikokuya-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/pizzeria-mozza-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/sushi-gen-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/animal-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/blu-jam-caf%C3%A9-los-angeles-2?sort_by=date_desc",
"http://www.yelp.com/biz/langers-los-angeles-2?sort_by=date_desc",
"http://www.yelp.com/biz/roscoes-house-of-chicken-and-waffles-los-angeles-3?sort_by=date_desc",
"http://www.yelp.com/biz/masa-of-echo-park-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/bld-los-angeles?sort_by=date_desc",
"http://www.yelp.com/biz/providence-los-angeles-2?sort_by=date_desc"
))
.fetch(
Wget('_)
)
.wgetExplore($"a.page-option.prev-next:contains(→)", depthKey = 'page)
.flatSelect($"div.review", ordinalKey = 'row) (
A"p.review_comment".text ~ 'comment,
A"div.review-content span.rating-qualifier".text ~ 'date_status,
A"div.biz-rating div div.rating-very-large meta".attr("content") ~ 'stars,
A"div.review-wrapper > div.review-footer a.ybtn.useful span.i-wrap span.count".text ~ 'useful,
A"li.user-name a.user-display-name".text ~ 'user_name,
A"li.user-location".text ~ 'user_location,
A"li.friend-count b" ~ 'friend_count,
A"li.review-count b".text ~ 'review_count
)
.toDataFrame()
}
}
|
chenUT/spookystuff
|
example/src/main/scala/org/tribbloid/spookystuff/example/forum/Yelp.scala
|
Scala
|
apache-2.0
| 2,059
|
package connectors
import com.github.tomakehurst.wiremock.client.WireMock._
import connectors.NonRepudiationConnector.StoreNrsPayloadSuccess
import fixtures.ITRegistrationFixtures
import itutil.IntegrationSpecBase
import play.api.libs.json.Json
import play.api.test.Helpers._
import support.AppAndStubs
import uk.gov.hmrc.http.InternalServerException
class NonRepudiationConnectorISpec extends IntegrationSpecBase with AppAndStubs with ITRegistrationFixtures {
val connector = app.injector.instanceOf[NonRepudiationConnector]
val storeNrsApiUrl = s"/vatreg/$testRegId/nrs-payload"
val testBase64 = "SFRNTCBUTyBFTkNPREUi"
val requestBody = Json.obj(
"payload" -> testBase64
)
"storeEncodedUserAnswers" must {
"return StoreNrsPayloadSuccess if the backend API returns OK" in {
stubPatch(storeNrsApiUrl, OK, "")
val res = await(connector.storeEncodedUserAnswers(testRegId, testBase64))
verify(patchRequestedFor(urlEqualTo(storeNrsApiUrl)).withRequestBody(equalToJson(requestBody.toString)))
res mustBe StoreNrsPayloadSuccess
}
"throw an exception for any other status" in {
stubPatch(storeNrsApiUrl, IM_A_TEAPOT, "")
intercept[InternalServerException] {
await(connector.storeEncodedUserAnswers(testRegId, testBase64))
}
verify(1, patchRequestedFor(urlEqualTo(storeNrsApiUrl)).withRequestBody(equalToJson(requestBody.toString)))
}
}
}
|
hmrc/vat-registration-frontend
|
it/connectors/NonRepudiationConnectorISpec.scala
|
Scala
|
apache-2.0
| 1,430
|
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Sun Sep 25 22:39:34 EDT 2011
* @see LICENSE (MIT style license file).
*/
package apps.dynamics
import scala.util.control.Breaks.{breakable, break}
import scalation.dynamics.{DormandPrince, RungeKutta}
import scalation.dynamics.Derivatives.DerivativeV
import scalation.linalgebra.{MatrixD, VectorD}
import scalation.plot.Plot
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Pathway2` object is used to simulate a simple plant metabolic pathway.
* @see Experimental and mathematical approaches to modeling plant metabolic networks
* @see Phytochemistry Vol. 68 (2007) pp. 2351–2374, Elsevier Science Direct
* > run-main apps.dynamics.Pathway2
*/
object Pathway2 extends App
{
val t0 = 0.0 // initial time
val tf = 720.0 // final time
val n = 720 // number of time steps
val km1 = .8
val km2 = .1
val km3 = .004
val vm1 = .0018
val vm2 = .0018
val vm3 = .000012
// concentrations A B C
// 0 1 2
var c = VectorD (1.0, 0.0, 0.0)
// define the system of Ordinary Differential Equations (ODEs)
def dA_dt (t: Double, c: VectorD) = -vm1 * c(0) / (c(0) + km1) - vm2 * c(0) / (c(0) + km2)
def dB_dt (t: Double, c: VectorD) = vm1 * c(0) / (c(0) + km1) - vm3 * c(0) / (c(0) + km3)
def dC_dt (t: Double, c: VectorD) = -vm3 * c(0) / (c(0) + km3) + vm2 * c(0) / (c(0) + km2)
val odes: Array [DerivativeV] = Array (dA_dt, dB_dt, dC_dt)
println ("dA_dt = " + dA_dt (0.0, c))
println ("dB_dt = " + dB_dt (0.0, c))
println ("dC_dt = " + dC_dt (0.0, c))
println (" A, B, C")
println ("> at t = " + "%6.3f".format (t0) + " c = " + c)
val dt = tf / n // time step
var t = t0 + dt // next time point to examine
breakable { for (i <- 1 to n) {
// c = RungeKutta.integrateVV (odes, c, dt) // compute new concentrations using RK
c = DormandPrince.integrateVV (odes, c, dt) // compute new concentrations using DP
println ("> at t = " + "%6.3f".format (t) + " c = " + c)
t += dt
}} // for
/*
new Plot (tt, p_r.col(0), p_d.col(0), "Plot x vs. t (black-RK, red-DP)")
new Plot (tt, p_r.col(1), p_d.col(1), "Plot y vs. t (black-RK, red-DP)")
new Plot (tt, p_r.col(1), p_e.col(1), "Plot y vs. t (black-RK, red-EX)")
new Plot (tt, p_d.col(1), p_e.col(1), "Plot y vs. t (black-DP, red-EX)")
new Plot (p_d.col(0), p_d.col(1))
*/
} // Pathway2 object
|
NBKlepp/fda
|
scalation_1.3/scalation_models/src/main/scala/apps/dynamics/Pathway2.scala
|
Scala
|
mit
| 2,779
|
package hu.frankdavid.ranking.workbench
import hu.frankdavid.ranking._
import org.apache.commons.math3.distribution.NormalDistribution
import org.apache.commons.math3.random.{RandomGenerator, Well19937c}
import org.apache.commons.math3.util.Precision
import scala.annotation.tailrec
import scala.collection.mutable
import scala.util.Random
case class TestRunner(numberOfPlayers: Int,
awardedPlayers: Int,
maxParallelism: Int,
playerPerformanceDeviation: Double,
resultPredictionDeviation: Double,
private val randomGenerator: RandomGenerator = new Well19937c()) {
private val playerSkillDistribution = new NormalDistribution(randomGenerator, 0, 1)
private val predictionDistribution = new NormalDistribution(randomGenerator, 0, resultPredictionDeviation max
Precision.EPSILON)
private lazy val players: (Seq[Player], Seq[Player]) = {
val strengths = (0 until numberOfPlayers).map(_ => playerSkillDistribution.sample()).sorted.reverse
val players = strengths.zipWithIndex.map {
case (strength, i) =>
val skill = new NormalDistribution(randomGenerator, strength, playerPerformanceDeviation max Precision.EPSILON)
(Player(i.toString, skill), strength + predictionDistribution.sample())
}
(players.map(_._1), players.sortBy(_._2).map(_._1).reverse)
}
def run(strategy: TournamentStrategy): SingleTestResult = {
val initialContext = TournamentContext(players._2, awardedPlayers, Random.nextLong(), maxParallelism, 0)
try {
runIteratively(strategy, initialContext, Map(), players._1)
} catch {
case e: Throwable => e.printStackTrace(); copy().run(strategy)
}
}
def runMany(strategy: TournamentStrategy, times: Int): TestResultLike = {
if (times == 1) {
run(strategy)
} else {
val results = (0 until times).map(_ => copy().run(strategy))
new AverageTestResult(results)
}
}
@tailrec
private def runIteratively(strategy: TournamentStrategy, context: TournamentContext,
playerGames: scala.collection.Map[Player, Int] = Map(),
expectedResults: Seq[Player]): SingleTestResult = {
val matchesOrResult = strategy.matchesOrResult(context)
matchesOrResult match {
case Matches(matches) =>
val updatedPlayerGames = new mutable.HashMap[Player, Int]().withDefaultValue(0) ++ playerGames
matches.foreach { m =>
updatedPlayerGames.update(m.player1, updatedPlayerGames(m.player1) + 1)
updatedPlayerGames.update(m.player2, updatedPlayerGames(m.player2) + 1)
}
val games = matches.toList.map(organizeGame)
val updatedContext = context.copy(round = context.round + 1).withGames(games)
runIteratively(strategy, updatedContext, updatedPlayerGames, expectedResults)
case Result(results) =>
SingleTestResult(strategy, results, expectedResults, context.round, playerGames)
}
}
private def organizeGame(matchup: MatchUp): Game = {
val perf1 = matchup.player1.randomPerformance
val perf2 = matchup.player2.randomPerformance
val epsilon = 0.1
if (matchup.enableDraw && math.abs(perf1 - perf2) < epsilon) {
val avg = (perf1 + perf2) / 2
Game(matchup, avg, avg)
} else {
Game(matchup, perf1, perf2)
}
}
}
|
frankdavid/ranking
|
src/main/scala/hu/frankdavid/ranking/workbench/TestRunner.scala
|
Scala
|
apache-2.0
| 3,415
|
package io.digitalmagic.akka.dsl
import java.time.Instant
import akka.actor.{ActorSelection, Props}
import io.digitalmagic.coproduct.{Cop, CopK, TNilK}
import io.digitalmagic.akka.dsl.API._
import io.digitalmagic.akka.dsl.EventSourcedActorWithInterpreter.IndexFuture
import io.digitalmagic.akka.dsl.context.ProgramContextOps
import scalaz._
import scalaz.Scalaz._
import scala.reflect.ClassTag
object Actor1 {
sealed trait Query[A] extends API.Query[A]
case object GetValue extends Query[Int]
case class SetValue(value: Int) extends Command[Unit]
class Api[Program[_]](implicit N: Query ~> Program) {
def getValue: Program[Int] = N(GetValue)
}
def interpreter(actorSelection: ActorSelection): Query ~> LazyFuture = Lambda[Query ~> LazyFuture] {
case q: GetValue.type => actorSelection query q
}
sealed trait Actor1Event extends Event
case class ValueSet(value: Int) extends Actor1Event {
override type TimestampType = Instant
override var timestamp: Instant = Instant.now
}
case class Actor1State(value: Int) extends PersistentState {
override type EventType = Actor1Event
}
def props: Props = Props(new Actor1)
}
trait Actor1Programs extends EventSourcedPrograms {
import Actor1._
override type Environment = Unit
override val contextOps: ProgramContextOps = new ProgramContextOps
override type EventType = Actor1Event
override lazy val eventTypeTag: ClassTag[Actor1Event] = implicitly
override type State = Actor1State
override lazy val stateTag: ClassTag[State] = implicitly
override lazy val persistentState: PersistentStateProcessor[State] = new PersistentStateProcessor[State] {
override def empty: State = Actor1State(0)
override def process(state: State, event: EventType): State = event match {
case ValueSet(value) => state.copy(value = value)
}
}
override type TransientState = Unit
override lazy val initialTransientState: TransientState = ()
override type EntityIdType = Unit
override type QueryList = TNilK
override type QueryAlgebra[A] = CopK[QueryList, A]
override val algebraIsQuery: IsQuery[QueryAlgebra] = implicitly
override type Index = EmptyIndexList
override val clientRuntime: ClientRuntime[Index#List, Index] = implicitly
def getValue: Program[Int] = gets(_.value)
def setValue(value: Int): Program[Unit] = for {
_ <- log(_.info("setting value"))
_ <- emit(ValueSet(value))
} yield ()
override def getEnvironment(r: Request[_]): Unit = ()
override def processSnapshot(s: Any): Option[State] = s match {
case x: State => Some(x)
case _ => None
}
override def getProgram: Request ~> MaybeProgram = Lambda[Request ~> MaybeProgram] {
case GetValue => Some(getValue)
case SetValue(value) => Some(setValue(value))
case _ => None
}
}
class Actor1 extends Actor1Programs with EventSourcedActorWithInterpreter {
override def entityId: Unit = ()
override def persistenceId: String = s"${context.system.name}.Actor1"
override def interpreter: QueryAlgebra ~> LazyFuture = CopK.NaturalTransformation.summon
override def indexInterpreter: Index#Algebra ~> IndexFuture = CopK.NaturalTransformation.summon
override def clientApiInterpreter: Index#ClientAlgebra ~> Const[Unit, *] = CopK.NaturalTransformation.summon
override def localApiInterpreter: Index#LocalAlgebra ~> Id = CopK.NaturalTransformation.summon
override def clientEventInterpreter: ClientEventInterpreter = Cop.Function.summon
}
|
digital-magic-io/akka-cqrs-dsl
|
akka-cqrs-dsl-core/src/test/scala/io/digitalmagic/akka/dsl/Actor1.scala
|
Scala
|
apache-2.0
| 3,484
|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.core.server.common
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.ByteString
import play.api.Logger
import play.api.mvc._
import play.api.http._
import play.api.http.HeaderNames._
import play.api.http.Status._
import play.api.mvc.request.RequestAttrKey
import play.core.utils.AsciiBitSet
import play.core.utils.AsciiRange
import play.core.utils.AsciiSet
import scala.annotation.tailrec
import scala.collection.immutable.ArraySeq
import scala.concurrent.Future
import scala.util.control.NonFatal
private[play] final class ServerResultUtils(
sessionBaker: SessionCookieBaker,
flashBaker: FlashCookieBaker,
cookieHeaderEncoding: CookieHeaderEncoding
) {
private val logger = Logger(getClass)
/**
* Determine whether the connection should be closed, and what header, if any, should be added to the response.
*/
def determineConnectionHeader(request: RequestHeader, result: Result): ConnectionHeader = {
if (request.version == HttpProtocol.HTTP_1_1) {
if (result.header.headers.get(CONNECTION).exists(_.equalsIgnoreCase(CLOSE))) {
// Close connection, header already exists
DefaultClose
} else if ((result.body.isInstanceOf[HttpEntity.Streamed] && result.body.contentLength.isEmpty)
|| request.headers.get(CONNECTION).exists(_.equalsIgnoreCase(CLOSE))) {
// We need to close the connection and set the header
SendClose
} else {
DefaultKeepAlive
}
} else {
if (result.header.headers.get(CONNECTION).exists(_.equalsIgnoreCase(CLOSE))) {
DefaultClose
} else if ((result.body.isInstanceOf[HttpEntity.Streamed] && result.body.contentLength.isEmpty) ||
request.headers.get(CONNECTION).forall(!_.equalsIgnoreCase(KEEP_ALIVE))) {
DefaultClose
} else {
SendKeepAlive
}
}
}
/**
* Validate the result.
*
* Returns the validated result, which may be an error result if validation failed.
*/
def validateResult(request: RequestHeader, result: Result, httpErrorHandler: HttpErrorHandler)(
implicit mat: Materializer
): Future[Result] = {
if (request.version == HttpProtocol.HTTP_1_0 && result.body.isInstanceOf[HttpEntity.Chunked]) {
cancelEntity(result.body)
val exception = new ServerResultException("HTTP 1.0 client does not support chunked response", result, null)
val errorResult: Future[Result] = httpErrorHandler.onServerError(request, exception)
import play.core.Execution.Implicits.trampoline
errorResult.map { (originalErrorResult: Result) =>
// Update the original error with a new status code and a "Connection: close" header
import originalErrorResult.{ header => h }
val newHeader = h.copy(
status = Status.HTTP_VERSION_NOT_SUPPORTED,
headers = h.headers + (CONNECTION -> CLOSE)
)
originalErrorResult.copy(header = newHeader)
}
} else if (!mayHaveEntity(result.header.status) && !result.body.isKnownEmpty) {
cancelEntity(result.body)
Future.successful(result.copy(body = HttpEntity.Strict(ByteString.empty, result.body.contentType)))
} else {
Future.successful(result)
}
}
/** Set of characters that are allowed in a header name. */
private[this] val allowedHeaderNameChars: AsciiBitSet = {
/*
* From https://tools.ietf.org/html/rfc7230#section-3.2:
* field-name = token
* From https://tools.ietf.org/html/rfc7230#section-3.2.6:
* token = 1*tchar
* tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
* / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
* / DIGIT / ALPHA
*/
val TChar = AsciiSet('!', '#', '$', '%', '&', '\\'', '*', '+', '-', '.', '^', '_', '`', '|',
'~') ||| AsciiSet.Sets.Digit ||| AsciiSet.Sets.Alpha
TChar.toBitSet
}
def validateHeaderNameChars(headerName: String): Unit =
validateString(allowedHeaderNameChars, "header name", headerName)
/** Set of characters that are allowed in a header name. */
private[this] val allowedHeaderValueChars: AsciiBitSet = {
/*
* From https://tools.ietf.org/html/rfc7230#section-3.2:
* field-value = *( field-content / obs-fold )
* field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
* field-vchar = VCHAR / obs-text
* From https://tools.ietf.org/html/rfc7230#section-3.2.6:
* obs-text = %x80-FF
*/
val ObsText = new AsciiRange(0x80, 0xFF)
val FieldVChar = AsciiSet.Sets.VChar ||| ObsText
val FieldContent = FieldVChar ||| AsciiSet(' ', '\\t')
FieldContent.toBitSet
}
def validateHeaderValueChars(headerValue: String): Unit =
validateString(allowedHeaderValueChars, "header value", headerValue)
private def validateString(allowedSet: AsciiBitSet, setDescription: String, string: String): Unit = {
@tailrec def loop(i: Int): Unit = {
if (i < string.length) {
val c = string.charAt(i)
if (!allowedSet.get(c))
throw new InvalidHeaderCharacterException(
s"Invalid $setDescription character: '$c' (${c.toInt}) in string $string at position $i",
c
)
loop(i + 1)
}
}
loop(0)
}
/**
* Handles result conversion in a safe way.
*
* 1. Tries to convert the `Result`.
* 2. If there's an error, calls the `HttpErrorHandler` to get a new
* `Result`, then converts that.
* 3. If there's an error with *that* `Result`, uses the
* `DefaultHttpErrorHandler` to get another `Result`, then converts
* that.
* 4. Hopefully there are no more errors. :)
* 5. If calling an `HttpErrorHandler` throws an exception, then a
* fallback response is returned, without an conversion.
*/
def resultConversionWithErrorHandling[R](
requestHeader: RequestHeader,
result: Result,
errorHandler: HttpErrorHandler
)(resultConverter: Result => Future[R])(fallbackResponse: => R): Future[R] = {
import play.core.Execution.Implicits.trampoline
def handleConversionError(conversionError: Throwable): Future[R] = {
val isInvalidHeaderCharacter = conversionError.isInstanceOf[InvalidHeaderCharacterException]
val shouldLog = if (isInvalidHeaderCharacter) logger.isInfoEnabled else logger.isErrorEnabled
def log(message: String, error: Throwable) =
if (isInvalidHeaderCharacter) logger.info(message, error) else logger.error(message, error)
try {
// Log some information about the error
if (shouldLog) {
val prettyHeaders =
result.header.headers.map { case (name, value) => s"<$name>: <$value>" }.mkString("[", ", ", "]")
val msg =
s"Exception occurred while converting Result with headers $prettyHeaders. Calling HttpErrorHandler to get alternative Result."
log(msg, conversionError)
}
// Call the HttpErrorHandler to generate an alternative error
val futureErrorResult = if (isInvalidHeaderCharacter) {
errorHandler.onClientError(
requestHeader,
400,
s"Invalid header: ${conversionError.getMessage()}"
)
} else {
errorHandler.onServerError(
requestHeader,
new ServerResultException("Error converting Play Result for server backend", result, conversionError)
)
}
futureErrorResult.flatMap { errorResult =>
// Convert errorResult using normal conversion logic. This time use
// the DefaultErrorHandler if there are any problems, e.g. if the
// current HttpErrorHandler returns an invalid Result.
resultConversionWithErrorHandling(requestHeader, errorResult, DefaultHttpErrorHandler)(resultConverter)(
fallbackResponse
)
}
} catch {
case NonFatal(onErrorError) =>
// Conservatively handle exceptions thrown by HttpErrorHandlers by
// returning a fallback response.
logger.error("Error occurred during error handling. Original error: ", conversionError)
logger.error("Error occurred during error handling. Error handling error: ", onErrorError)
Future.successful(fallbackResponse)
}
}
try {
// Try to convert the result
resultConverter(result).recoverWith { case t => handleConversionError(t) }
} catch {
case NonFatal(e) => handleConversionError(e)
}
}
/** Whether the given status may have an entity or not. */
def mayHaveEntity(status: Int): Boolean = status match {
case CONTINUE | SWITCHING_PROTOCOLS | NO_CONTENT | NOT_MODIFIED =>
false
case _ =>
true
}
/**
* Cancel the entity.
*
* While theoretically, an Akka streams Source is not supposed to hold resources, in practice, this is very often not
* the case, for example, the response from an Akka HTTP client may have an associated Source that must be consumed
* (or cancelled) before the associated connection can be returned to the connection pool.
*/
def cancelEntity(entity: HttpEntity)(implicit mat: Materializer) = {
entity match {
case HttpEntity.Chunked(chunks, _) => chunks.runWith(Sink.cancelled)
case HttpEntity.Streamed(data, _, _) => data.runWith(Sink.cancelled)
case _ =>
}
}
/**
* The connection header logic to use for the result.
*/
sealed trait ConnectionHeader {
def willClose: Boolean
def header: Option[String]
}
/**
* A `Connection: keep-alive` header should be sent. Used to
* force an HTTP 1.0 connection to remain open.
*/
case object SendKeepAlive extends ConnectionHeader {
override def willClose = false
override def header = Some(KEEP_ALIVE)
}
/**
* A `Connection: close` header should be sent. Used to
* force an HTTP 1.1 connection to close.
*/
case object SendClose extends ConnectionHeader {
override def willClose = true
override def header = Some(CLOSE)
}
/**
* No `Connection` header should be sent. Used on an HTTP 1.0
* connection where the default behavior is to close the connection,
* or when the response already has a Connection: close header.
*/
case object DefaultClose extends ConnectionHeader {
override def willClose = true
override def header = None
}
/**
* No `Connection` header should be sent. Used on an HTTP 1.1
* connection where the default behavior is to keep the connection
* open.
*/
case object DefaultKeepAlive extends ConnectionHeader {
override def willClose = false
override def header = None
}
// Values for the Connection header
private val KEEP_ALIVE = "keep-alive"
private val CLOSE = "close"
/**
* Bake the cookies and prepare the new Set-Cookie header.
*/
def prepareCookies(requestHeader: RequestHeader, result: Result): Result = {
val requestHasFlash = requestHeader.attrs.get(RequestAttrKey.Flash) match {
case None =>
// The request didn't have a flash object in it, either because we
// used a custom RequestFactory which didn't install the flash object
// or because there was an error in request processing which caused
// us to bypass the application's RequestFactory. In this case we
// can assume that there is no flash object we need to clear.
false
case Some(flashCell) =>
// The request had a flash object and it was non-empty, so the flash
// cookie value may need to be cleared.
!flashCell.value.isEmpty
}
result.bakeCookies(cookieHeaderEncoding, sessionBaker, flashBaker, requestHasFlash)
}
/**
* Given a map of headers, split it into a sequence of individual headers.
* Most headers map into a single pair in the new sequence. The exception is
* the `Set-Cookie` header which we split into a pair for each cookie it
* contains. This allows us to work around issues with clients that can't
* handle combined headers. (Also RFC6265 says multiple headers shouldn't
* be folded together, which Play's API unfortunately does.)
*/
def splitSetCookieHeaders(headers: Map[String, String]): Iterable[(String, String)] = {
if (headers.contains(SET_COOKIE)) {
// Rewrite the headers with Set-Cookie split into separate headers
headers.toSeq.flatMap {
case (SET_COOKIE, value) =>
splitSetCookieHeaderValue(value)
.map { cookiePart =>
SET_COOKIE -> cookiePart
}
case (name, value) =>
Seq((name, value))
}
} else {
// No Set-Cookie header so we can just use the headers as they are
headers
}
}
def splitSetCookieHeaderValue(value: String): Seq[String] =
ArraySeq.unsafeWrapArray(cookieHeaderEncoding.SetCookieHeaderSeparatorRegex.split(value))
}
|
mkurz/playframework
|
transport/server/play-server/src/main/scala/play/core/server/common/ServerResultUtils.scala
|
Scala
|
apache-2.0
| 13,128
|
/*
* Copyright 2014-2015 Sphonic Ltd. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sphonic.phantom.connector
import com.datastax.driver.core.Session
/**
* Represents a single Cassandra keySpace.
*
* Provides access to the associated `Session` as well as to a
* `Connector` trait that can be mixed into `CassandraTable`
* instances.
*
* @param name the name of the keySpace
* @param provider the provider for this keySpace
*/
class KeySpace (val name: String, val provider: SessionProvider) { outer =>
/**
* The Session associated with this keySpace.
*/
lazy val session: Session = provider.getSession(name)
/**
* Trait that can be mixed into `CassandraTable`
* instances.
*/
trait Connector extends com.sphonic.phantom.connector.Connector {
lazy val provider = outer.provider
lazy val keySpace = outer.name
}
}
|
melodious/sphonic-phantom
|
connector/src/main/scala/com/sphonic/phantom/connector/KeySpace.scala
|
Scala
|
apache-2.0
| 1,433
|
package com.twitter.util
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.annotation.tailrec
import scala.collection.generic.CanBuild
import scala.collection.immutable.Queue
import scala.language.higherKinds
/**
* Events are instantaneous values, defined only at particular
* instants in time (cf. [[com.twitter.util.Var Vars]], which are
* defined at all times). It is possible to view Events as the
* discrete counterpart to [[com.twitter.util.Var Var]]'s continuous
* nature.
*
* Events are observed by registering [[com.twitter.util.Witness Witnesses]]
* to which the Event's values are notified.
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.util.AbstractEvent]].
*/
trait Event[+T] { self =>
/**
* Register the given [[com.twitter.util.Witness Witness]] to
* this Event. Witnesses are notified of new values until it is
* deregistered by the returned [[com.twitter.util.Closable Closable]].
*/
def register(s: Witness[T]): Closable
/**
* Observe this event with function `s`. Equivalent to
* `register(Witness(s))`.
*/
final def respond(s: T => Unit): Closable = register(Witness(s))
/**
* Build a new Event by applying the given function to each value
* observed. Event values for which the partial function `f` does
* not apply are dropped; other values are transformed by `f`.
*/
def collect[U](f: PartialFunction[T, U]): Event[U] = new Event[U] {
def register(s: Witness[U]): Closable =
self.respond { t =>
f.runWith(s.notify)(t)
}
}
/**
* Build a new Event by keeping only those Event values that match
* the predicate `p`.
*/
def filter(p: T => Boolean): Event[T] =
collect { case t if p(t) => t }
/**
* Build a new Event by transforming each new event value with `f`.
*/
def map[U](f: T => U): Event[U] =
collect { case t => f(t) }
/**
* Build a new Event by incrementally accumulating over events,
* starting with value `z`. Each intermediate aggregate is notified
* to the derived event.
*/
def foldLeft[U](z: U)(f: (U, T) => U): Event[U] = new Event[U] {
def register(s: Witness[U]): Closable = {
var a = z
val mu = new Object()
self.respond { Function.synchronizeWith(mu) { t =>
a = f(a, t)
s.notify(a)
}}
}
}
/**
* Build a new Event representing a sliding window of at-most `n`.
* Each event notified by the parent are added to a queue of size
* at-most `n`. This queue is in turn notified to register of
* the returned event.
*/
def sliding(n: Int): Event[Seq[T]] = new Event[Seq[T]] {
require(n > 0)
def register(s: Witness[Seq[T]]): Closable = {
val mu = new Object()
var q = Queue.empty[T]
self.respond { t =>
s.notify(mu.synchronized {
q = q.enqueue(t)
while (q.length > n) {
val (_, q1) = q.dequeue
q = q1
}
q
})
}
}
}
/**
* The Event which merges the events resulting from `f` applied
* to each element in this Event.
*/
def mergeMap[U](f: T => Event[U]): Event[U] = new Event[U] {
def register(s: Witness[U]): Closable = {
@volatile var inners = Nil: List[Closable]
val outer = self.respond { el =>
inners.synchronized { inners ::= f(el).register(s) }
}
Closable.make { deadline =>
outer.close(deadline) before {
Closable.all(inners:_*).close(deadline)
}
}
}
}
/**
* Merge two Events of different types.
*/
def select[U](other: Event[U]): Event[Either[T, U]] = new Event[Either[T, U]] {
def register(s: Witness[Either[T, U]]): Closable = Closable.all(
self.register(s.comap { t => Left(t) }),
other.register(s.comap { u => Right(u) })
)
}
/**
* Merge two event streams in lock-step, combining corresponding
* event values.
*
* @note This can be dangerous! Since the implementation needs to
* queue outstanding Event-values from the slower producer, if one
* Event outpaces another, this queue can grow in an unbounded
* fashion.
*/
def zip[U](other: Event[U]): Event[(T, U)] = new Event[(T, U)] {
def register(s: Witness[(T, U)]): Closable = {
val mu = new Object()
var state: Option[Either[Queue[T], Queue[U]]] = None
val left = self.respond { Function.synchronizeWith(mu) { t =>
state match {
case None =>
state = Some(Left(Queue(t)))
case Some(Left(q)) =>
state = Some(Left(q enqueue t))
case Some(Right(Queue(u, rest@_*))) =>
if (rest.isEmpty) state = None
else state = Some(Right(Queue(rest:_*)))
s.notify((t, u))
}
}}
val right = other.respond { Function.synchronizeWith(mu) { u =>
state match {
case None =>
state = Some(Right(Queue(u)))
case Some(Right(q)) =>
state = Some(Right(q enqueue u))
case Some(Left(Queue(t, rest@_*))) =>
if (rest.isEmpty) state = None
else state = Some(Left(Queue(rest:_*)))
s.notify((t, u))
}
}}
Closable.all(left, right)
}
}
/**
* Join two events into a new Event which notifies a tuple of the
* last value in each underlying event.
*/
def joinLast[U](other: Event[U]): Event[(T, U)] = new Event[(T, U)] {
def register(s: Witness[(T, U)]): Closable = {
import Event.JoinState
import JoinState._
var state: JoinState[T, U] = Empty
val mu = new Object()
val left = self.respond { Function.synchronizeWith(mu) { t =>
state match {
case Empty | LeftHalf(_) =>
state = LeftHalf(t)
case RightHalf(u) =>
state = Full(t, u)
s.notify((t, u))
case Full(_, u) =>
state = Full(t, u)
s.notify((t, u))
}
}}
val right = other.respond { Function.synchronizeWith(mu) { u =>
state match {
case Empty | RightHalf(_) =>
state = RightHalf(u)
case LeftHalf(t) =>
state = Full(t, u)
s.notify((t, u))
case Full(t, _) =>
state = Full(t, u)
s.notify((t, u))
}
}}
Closable.all(left, right)
}
}
/**
* An event which consists of the first `howmany` values
* in the parent Event.
*/
def take(howmany: Int): Event[T] = new Event[T] {
def register(s: Witness[T]): Closable = {
val n = new AtomicInteger(0)
val c = new AtomicReference(Closable.nop)
c.set(self.respond { t =>
if (n.incrementAndGet() <= howmany) s.notify(t)
else c.getAndSet(Closable.nop).close()
})
if (n.get() == howmany)
c.getAndSet(Closable.nop).close()
Closable.ref(c)
}
}
/**
* Merge two events; the resulting event interleaves events
* from this and `other`.
*/
def merge[U >: T](other: Event[U]): Event[U] = new Event[U] {
def register(s: Witness[U]): Closable = {
val c1 = self.register(s)
val c2 = other.register(s)
Closable.all(c1, c2)
}
}
/**
* Progressively build a collection of events using the passed-in
* builder. A value containing the current version of the collection
* is notified for each incoming event.
*/
def build[U >: T, That](implicit cbf: CanBuild[U, That]) = new Event[That] {
def register(s: Witness[That]): Closable = {
val b = cbf()
self.respond { t =>
b += t
s.notify(b.result())
}
}
}
/**
* A Future which is satisfied by the first value observed.
*/
def toFuture(): Future[T] = {
val p = new Promise[T]
val c = register(Witness(p))
p.setInterruptHandler { case exc =>
p.updateIfEmpty(Throw(exc))
}
p.ensure { c.close() }
}
/**
* The [[Event]] that stores the difference between successive
* updates to the parent event. This can be used to perform
* incremental computation on large data structures.
*/
def diff[CC[_]: Diffable, U](implicit toCC: T <:< CC[U]): Event[Diff[CC, U]] = new Event[Diff[CC, U]] {
def register(s: Witness[Diff[CC, U]]): Closable = {
var left: CC[U] = Diffable.empty[CC, U]
self.respond { t =>
synchronized {
val right = toCC(t)
val diff = Diffable.diff(left, right)
left = right
s.notify(diff)
}
}
}
}
/**
* Patch up an [[Event]] of differences (like those produced by
* [[Event.diff]]) into an [[Event]] that reflects the current
* version of a data structure. That is: `(event:
* Event[CC[T]]).diff.patch` is equivalent to `event`
*/
def patch[CC[_]: Diffable, U](implicit ev: T <:< Diff[CC, U]): Event[CC[U]] = new Event[CC[U]] {
def register(s: Witness[CC[U]]): Closable = {
var last: CC[U] = Diffable.empty[CC, U]
self.respond { diff =>
synchronized {
last = diff.patch(last)
s.notify(last)
}
}
}
}
/**
* Build a new Event by keeping only those Event values where the
* equality predicate `eq` applied to the current and new values
* does not match.
*/
def dedupWith(eq: (T, T) => Boolean): Event[T] =
sliding(2).collect {
case Seq(init) => init
case Seq(current, next) if !eq(current, next) => next
}
/**
* Builds a new Event by keeping only the Events where
* the previous and current values are not `==` to each other.
*/
def dedup: Event[T] = dedupWith { (a, b) => a == b }
}
/**
* Abstract `Event` class for Java compatibility.
*/
abstract class AbstractEvent[T] extends Event[T]
/**
* Note: There is a Java-friendly API for this object: [[com.twitter.util.Events]].
*/
object Event {
private sealed trait JoinState[+T, +U]
private object JoinState {
object Empty extends JoinState[Nothing, Nothing]
case class LeftHalf[T](t: T) extends JoinState[T, Nothing]
case class RightHalf[U](u: U) extends JoinState[Nothing, U]
case class Full[T, U](t: T, u: U) extends JoinState[T, U]
}
/**
* A new [[Event]] of type `T` which is also a [[Witness]].
*/
def apply[T](): Event[T] with Witness[T] = new Event[T] with Witness[T] {
private[this] val witnesses = new AtomicReference(Set.empty[Witness[T]])
def register(w: Witness[T]): Closable = {
casAdd(w)
Closable.make { _ =>
casRemove(w)
Future.Done
}
}
/**
* Notifies registered witnesses
*
* @note This method is synchronized to ensure that all witnesses
* receive notifications in the same order. Consequently it will block
* until the witnesses are notified.
*/
def notify(t: T): Unit = synchronized {
val current = witnesses.get
for (w <- current)
w.notify(t)
}
@tailrec
private def casAdd(w: Witness[T]): Unit = {
val current = witnesses.get
if (!witnesses.compareAndSet(current, current + w)) {
casAdd(w)
}
}
@tailrec
private def casRemove(w: Witness[T]): Unit = {
val current = witnesses.get
if (!witnesses.compareAndSet(current, current - w)) {
casRemove(w)
}
}
}
}
/**
* A witness is the recipient of [[com.twitter.util.Event Event]].
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.util.AbstractWitness]].
*/
trait Witness[-N] { self =>
/**
* Notify this Witness with the given note.
*/
def notify(note: N): Unit
def comap[M](f: M => N): Witness[M] = new Witness[M] {
def notify(m: M) = self.notify(f(m))
}
}
/**
* Abstract `Witness` class for Java compatibility.
*/
abstract class AbstractWitness[T] extends Witness[T]
/**
* Note: There is Java-friendly API for this object: [[com.twitter.util.Witnesses]].
*/
object Witness {
/**
* Create a Witness from an atomic reference.
*/
def apply[T](ref: AtomicReference[T]): Witness[T] = new Witness[T] {
def notify(t: T): Unit = ref.set(t)
}
/**
* Create a Witness from a [[com.twitter.util.Promise Promise]].
*/
def apply[T](p: Promise[T]): Witness[T] = new Witness[T] {
def notify(t: T): Unit = p.updateIfEmpty(Return(t))
}
/**
* Create a Witness from a function.
*/
def apply[T](f: T => Unit): Witness[T] = new Witness[T] {
def notify(t: T): Unit = f(t)
}
def apply[T](u: Updatable[T]): Witness[T] = new Witness[T] {
def notify(t: T): Unit = u() = t
}
/**
* A Witness which prints to the console.
*/
val printer: Witness[Any] = Witness(println(_))
}
/**
* A Java analog of `Event[A]()`.
*/
class WitnessedEvent[T] extends Event[T] with Witness[T] {
private[this] val underlying = Event[T]()
def register(s: Witness[T]): Closable = underlying.register(s)
def notify(note: T): Unit = underlying.notify(note)
}
|
tdyas/util
|
util-core/src/main/scala/com/twitter/util/Event.scala
|
Scala
|
apache-2.0
| 12,990
|
package blended.itestsupport.ldap
import java.util
import java.util.concurrent.atomic.AtomicBoolean
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import akka.actor.{ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import blended.itestsupport.condition.{AsyncChecker, AsyncCondition}
import javax.naming.directory.InitialDirContext
object LDAPAvailableCondition {
def apply(env: Map[String, String], t: Option[FiniteDuration])(implicit system: ActorSystem) =
AsyncCondition(LDAPChecker.props(env), "LDAPAvailableCondition", t)
}
private[ldap] object LDAPChecker {
def props(env: Map[String, String]): Props = Props(new LDAPChecker(env))
}
private[ldap] class LDAPChecker(env: Map[String, String]) extends AsyncChecker {
var connected: AtomicBoolean = new AtomicBoolean(false)
var connecting: AtomicBoolean = new AtomicBoolean(false)
override def supervisorStrategy = OneForOneStrategy() {
case _ => SupervisorStrategy.Stop
}
override def performCheck(condition: AsyncCondition): Future[Boolean] = {
if (!connected.get() && !(connecting.get())) {
connecting.set(true)
val ldapEnv = new util.Hashtable[String, String]
env.foreach { case (k, v) => ldapEnv.put(k, v) }
try {
val ctxt = new InitialDirContext(ldapEnv)
connected.set(true)
ctxt.close()
} catch {
case _: Throwable => connected.set(false)
}
connecting.set(false)
}
Future(connected.get())
}
}
|
woq-blended/blended
|
blended.itestsupport/src/main/scala/blended/itestsupport/ldap/LDAPAvailableCondition.scala
|
Scala
|
apache-2.0
| 1,519
|
package typeformation.cf
import CfExp.ParameterRef
import enum.Enum
sealed trait Parameter extends HasLogicalId with HasRef {
def logicalId: String
def Type: Parameter.DataType
def Description: Option[String]
def NoEcho: Option[Boolean]
def ConstraintDescription: Option[String]
override def ref: CfExp[String] = ParameterRef(this)
}
object Parameter {
sealed trait AwsParamType
object AwsParamType {
case object `AWS::EC2::AvailabilityZone::Name` extends AwsParamType
case object `AWS::EC2::Instance::Id` extends AwsParamType
case object `AWS::EC2::Image::Id` extends AwsParamType
case object `AWS::EC2::KeyPair::KeyName` extends AwsParamType
case object `AWS::EC2::SecurityGroup::GroupName` extends AwsParamType
case object `AWS::EC2::SecurityGroup::Id` extends AwsParamType
case object `AWS::EC2::Subnet::Id` extends AwsParamType
case object `AWS::EC2::Volume::Id` extends AwsParamType
case object `AWS::EC2::VPC::Id` extends AwsParamType
case object `AWS::Route53::HostedZone::Id` extends AwsParamType
implicit val awsParamEnum: Enum[AwsParamType] = Enum.derived[AwsParamType]
}
sealed trait DataType
object DataType {
case object String extends DataType
case object Number extends DataType
case object `List<Number>` extends DataType
case object CommaDelimitedList extends DataType
case class AwsType(tpe: Parameter.AwsParamType) extends DataType
case class AwsTypeList(tpe: Parameter.AwsParamType) extends DataType
}
case class Str(logicalId: String,
MaxLength: Option[Int] = None,
MinLength: Option[Int] = None,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
AllowedValues: Option[Set[String]] = None,
AllowedPattern: Option[String] = None,
Default: Option[String] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override def Type: DataType = DataType.String
}
case class Double(logicalId: String,
MaxValue: Option[scala.Double] = None,
MinValue: Option[scala.Double] = None,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
Default: Option[scala.Double] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override val Type = DataType.Number
}
case class Integer(logicalId: String,
MaxValue: Option[Int] = None,
MinValue: Option[Int] = None,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
Default: Option[Int] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override val Type = DataType.Number
}
case class CommaDelimited(logicalId: String,
AllowedValues: Option[Set[String]] = None,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
Default: Option[Set[String]] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override val Type = DataType.CommaDelimitedList
}
case class Aws(logicalId: String,
awsType: Parameter.AwsParamType,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
Default: Option[String] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override def Type = DataType.AwsType(awsType)
}
case class AwsList(logicalId: String,
awsType: Parameter.AwsParamType,
Description: Option[String] = None,
NoEcho: Option[Boolean] = None,
Default: Option[String] = None,
ConstraintDescription: Option[String] = None) extends Parameter {
override def Type = DataType.AwsTypeList(awsType)
}
}
|
typeformation/typeformation
|
cf/src/main/scala/typeformation/cf/Parameter.scala
|
Scala
|
mit
| 4,177
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.common.metadata
import java.util.Properties
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.dbcp2.{PoolingDataSource, _}
import org.apache.commons.pool2.impl.{GenericObjectPool, GenericObjectPoolConfig}
import org.locationtech.geomesa.fs.storage.api._
import org.locationtech.geomesa.utils.io.{CloseQuietly, WithClose}
import org.locationtech.geomesa.utils.stats.MethodProfiling
import scala.util.control.NonFatal
class JdbcMetadataFactory extends StorageMetadataFactory {
override def name: String = JdbcMetadata.MetadataType
/**
* Creates a metadata instance from an existing root. The metadata connection info is persisted in a
* `metadata.json` file under the root path.
*
* If a previous check was made to load a file from this root, and the file did not exist, will not
* re-attempt to load it until after a configurable timeout.
*
* @see `org.locationtech.geomesa.fs.storage.common.utils.PathCache#CacheDurationProperty()`
* @param context file context
* @return
**/
override def load(context: FileSystemContext): Option[JdbcMetadata] = {
MetadataJson.readMetadata(context).collect {
case NamedOptions(name, opts) if name.equalsIgnoreCase(this.name) =>
val root = context.root.toUri.toString
val source = JdbcMetadataFactory.createDataSource(opts)
try {
val metadata = JdbcMetadata.load(source, root).getOrElse {
throw new IllegalArgumentException(s"Could not load metadata at root '$root'")
}
val sft = namespaced(metadata.sft, context.namespace)
new JdbcMetadata(source, root, sft, metadata)
} catch {
case NonFatal(e) => CloseQuietly(source).foreach(e.addSuppressed); throw e
}
}
}
override def create(context: FileSystemContext, config: Map[String, String], meta: Metadata): JdbcMetadata = {
// load the partition scheme first in case it fails
PartitionSchemeFactory.load(meta.sft, meta.scheme)
MetadataJson.writeMetadata(context, NamedOptions(name, config))
val root = context.root.toUri.toString
val sft = namespaced(meta.sft, context.namespace)
val source = JdbcMetadataFactory.createDataSource(config)
try {
JdbcMetadata.create(source, root, meta)
new JdbcMetadata(source, root, sft, meta)
} catch {
case NonFatal(e) => CloseQuietly(source).foreach(e.addSuppressed); throw e
}
}
}
object JdbcMetadataFactory extends MethodProfiling with LazyLogging {
/**
* Create a jdbc data source based on a configuration
*
* @param config config
* @return
*/
def createDataSource(config: Map[String, String]): PoolingDataSource[PoolableConnection] = {
import JdbcMetadata.Config._
val url = config.getOrElse(UrlKey, throw new IllegalArgumentException(s"JdbcMetadata requires '$UrlKey'"))
config.get(DriverKey).foreach(Class.forName) // required for older drivers
val props = new Properties()
config.get(UserKey).foreach(props.put("user", _))
config.get(PasswordKey).foreach(props.put("password", _))
val driver = new DriverManagerConnectionFactory(url, props)
// validate the connection parameters
WithClose(driver.createConnection()) { connection =>
if (!connection.isValid(10)) {
throw new IllegalArgumentException(
s"Could not create valid connection using configuration ${config.mkString(", ")}")
}
}
def setPoolConfig[T](key: String, conversion: String => T, method: T => Unit): Unit = {
config.get(key).foreach { v =>
try { method.apply(conversion(v)) } catch {
case NonFatal(e) => logger.warn(s"Invalid configuration value '$v' for key $key: $e")
}
}
}
val poolConfig = new GenericObjectPoolConfig[PoolableConnection]()
setPoolConfig[Int](MaxIdleKey, _.toInt, poolConfig.setMaxIdle)
setPoolConfig[Int](MinIdleKey, _.toInt, poolConfig.setMinIdle)
setPoolConfig[Int](MaxSizeKey, _.toInt, poolConfig.setMaxTotal)
setPoolConfig[Boolean](FairnessKey, _.toBoolean, poolConfig.setFairness)
setPoolConfig[Boolean](TestOnBorrowKey, _.toBoolean, poolConfig.setTestOnBorrow)
setPoolConfig[Boolean](TestOnCreateKey, _.toBoolean, poolConfig.setTestOnCreate)
setPoolConfig[Boolean](TestWhileIdlKey, _.toBoolean, poolConfig.setTestWhileIdle)
val factory = new PoolableConnectionFactory(driver, null)
val pool = new GenericObjectPool(factory, poolConfig)
factory.setPool(pool)
new PoolingDataSource[PoolableConnection](pool)
}
}
|
locationtech/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/main/scala/org/locationtech/geomesa/fs/storage/common/metadata/JdbcMetadataFactory.scala
|
Scala
|
apache-2.0
| 5,082
|
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.service.meta
import com.webtrends.harness.utils.{Json, JsonSerializable}
import org.joda.time.DateTime
case class ServiceMetaData(name: String, version: String, loaded: DateTime,
path: String, akkaPath: String, jar: String,
supportsHttp: Boolean, dependencies: List[String]) extends JsonSerializable {
override def toJson(): String = {
val props = Map[String, Any](
"name" -> name,
"version" -> version,
"loaded" -> loaded.toString,
"path" -> path,
"akkaPath" -> akkaPath,
"jar" -> jar,
"supportsHttp" -> supportsHttp,
"dependencies" -> dependencies
)
Json.build(props).toString
}
}
|
Crashfreak/wookiee
|
wookiee-core/src/main/scala/com/webtrends/harness/service/meta/ServiceMetaData.scala
|
Scala
|
apache-2.0
| 1,469
|
package ch.octo.cffpoc.gtfs.raw
import java.io.File
import ch.octo.cffpoc.gtfs._
import com.github.tototoshi.csv.CSVReader
/**
* Created by alex on 02/05/16.
*/
object RawTripReader extends RawDataCollectionReader[RawTrip] {
override def builReadFunction(header: Array[String]): (Array[String]) => RawTrip = {
val h2i = header.zipWithIndex.toMap
(line: Array[String]) => RawTrip(
RouteId(line(h2i("route_id"))),
ServiceId(line(h2i("service_id"))),
TripId(line(h2i("trip_id"))),
StopName(line(h2i("trip_headsign"))),
TripShortName(line(h2i("trip_short_name")))
)
}
}
|
alexmasselot/gtfs-simulation-play
|
src/main/scala/ch/octo/cffpoc/gtfs/raw/RawTripReader.scala
|
Scala
|
apache-2.0
| 620
|
package freecli
package config
package api
import argument.{api => A}
import option.{api => O}
sealed trait Action
case class ArgumentAction(a: A.Action) extends Action {
def run(): Unit = a match {
case A.NoOp =>
()
sys.exit(0)
}
}
case class OptionAction(o: O.Action) extends Action {
def run(help: String) = {
o match {
case v: O.VersionAction =>
v.run()
case h: O.HelpAction.type =>
h.run(help)
}
sys.exit(0)
}
}
|
pavlosgi/freecli
|
core/src/main/scala/freecli/config/api/Action.scala
|
Scala
|
apache-2.0
| 484
|
package jigg.nlp.ccg
/*
Copyright 2013-2016 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.config.CommandLineParser
object TrainParser {
import ParserTrainer.Params
def main(args: Array[String]) = {
val params = CommandLineParser.readIn[Params](args)
val trainer = mkTrainer(params)
trainer.trainAndSave()
}
def mkTrainer(params: Params): ParserTrainer = params.bank.lang match {
case "ja" => new JapaneseParserTrainer(params)
case "en" => new EnglishParserTrainer(params)
}
}
|
mynlp/jigg
|
src/main/scala/jigg/nlp/ccg/TrainParser.scala
|
Scala
|
apache-2.0
| 1,032
|
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class B750(value: Option[Int]) extends CtBoxIdentifier("") with CtOptionalInteger with Input
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B750.scala
|
Scala
|
apache-2.0
| 807
|
/*
* Copyright (c) 2014-2018 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.server
package model
// This project
import util.IgluPostgresDriver.simple._
// Scala
import scala.annotation.meta.field
// Java
import java.util.UUID
// Joda
import org.joda.time.LocalDateTime
// Json4s
import org.json4s.jackson.Serialization.writePretty
// Slick
import Database.dynamicSession
// Akka Http
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.StatusCodes._
// Swagger
import io.swagger.annotations.{ApiModel, ApiModelProperty}
/**
* Case class representing an API key in the database.
* @constructor create an API key object from required data
* @param uid API key uuid serving as primary key
* @param vendorPrefix of the API key
* @param permission API key permission in (read, write, super)
* @param createdAt date at which point the API key was created
*/
@ApiModel(value = "ApiKey", description = "represents an API key in the database")
case class ApiKey(
@(ApiModelProperty @field)(value = "Unique identifier of the key")
uid: UUID,
@(ApiModelProperty @field)(value = "Vendor of the key")
vendorPrefix: String,
@(ApiModelProperty @field)(value = "Permission of the key")
permission: String,
@(ApiModelProperty @field)(value = "Date at which this key was created", hidden = true)
createdAt: LocalDateTime
)
/**
* DAO for accessing the apikeys table in the database
* @constructor create an API key DAO with a reference to the database
* @param db a reference to a ``Database``
*/
class ApiKeyDAO(val db: Database) extends DAO {
private val uidRegex =
"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
/**
* Schema for the apikeys table.
*/
class ApiKeys(tag: Tag) extends Table[ApiKey](tag, "apikeys") {
def uid = column[UUID]("uid", O.PrimaryKey, O.DBType("uuid"))
def vendorPrefix = column[String]("vendor_prefix", O.DBType("varchar(200)"),
O.NotNull)
def permission = column[String]("permission",
O.DBType("varchar(20)"), O.NotNull, O.Default[String]("read"))
def createdAt = column[LocalDateTime]("createdat", O.DBType("timestamp"),
O.NotNull)
def * = (uid, vendorPrefix, permission, createdAt) <>
(ApiKey.tupled, ApiKey.unapply)
}
//Object used to access the table
val apiKeys = TableQuery[ApiKeys]
/**
* Creates the apikeys table.
*/
def createTable = db withDynSession { apiKeys.ddl.create }
/**
* Deletes the apikeys table.
*/
def dropTable = db withDynSession { apiKeys.ddl.drop }
/**
* Gets an API key from an uuid.
* @param uid the API key's uuid
* @return an option containing a (vendorPrefix, permission) pair
*/
def get(uid: String): Either[String, Option[(String, String)]] = {
if (uid matches uidRegex) {
val uuid = UUID.fromString(uid)
db withDynSession {
val tupleList: List[(String, String)] =
apiKeys
.filter(_.uid === uuid)
.map(k => (k.vendorPrefix, k.permission))
.list
tupleList match {
case single :: Nil => Right(Some(single))
case Nil => Right(None)
case _ => throw new RuntimeException("Multiple UUID keys")
}
}
} else Left(s"apikey [$uid] does not match UUID format")
}
/**
* Validates that a new vendorPrefix is not conflicting with an existing one
* (same prefix).
* @param vendorPrefix vendorPrefix of the new API keys being validated
* @return a boolean indicating whether or not we allow this new API key
* vendor prefix
*/
private def validate(vendorPrefix: String): Boolean =
db withDynSession {
!apiKeys
.map(_.vendorPrefix)
.list
.exists(o => (o.startsWith(vendorPrefix) || vendorPrefix.startsWith(o) || o == vendorPrefix) && o != "*")
}
/**
* Adds a new API key.
* @param vendorPrefix vendorPrefix of the new API key
* @param permission permission of the new API key
* @return a status code and a json response pair
*/
private def add(vendorPrefix: String, permission: String): (StatusCode, String) =
db withDynSession {
val uid = UUID.randomUUID()
apiKeys.insert(
ApiKey(uid, vendorPrefix, permission, new LocalDateTime())) match {
case 0 => (InternalServerError, "Something went wrong")
case _ => (OK, uid.toString)
}
}
def addRead(vendorPrefix: String): (StatusCode, String) =
db withDynSession {
if (validate(vendorPrefix)) {
val (_, keyRead) = add(vendorPrefix, "read")
(Created, writePretty(Map("read" -> keyRead)))
} else {
(Unauthorized, result(401, "This vendor prefix is conflicting with an existing one"))
}
}
/**
* Adds both read and write API keys for a vendor prefix after validating it.
* @param vendorPrefix vendorPrefix of the new pair of keys
* @return a status code and a json containing the pair of API keys.
*/
def addReadWrite(vendorPrefix: String): (StatusCode, String) =
db withDynSession {
if (validate(vendorPrefix)) {
val (statusRead, keyRead) = add(vendorPrefix, "read")
val (statusWrite, keyWrite) = add(vendorPrefix, "write")
if(statusRead == InternalServerError || statusWrite == InternalServerError) {
delete(keyRead)
delete(keyWrite)
(InternalServerError, result(500, "Something went wrong"))
} else {
(Created, writePretty(Map("read" -> keyRead, "write" -> keyWrite)))
}
} else {
(Unauthorized, result(401, "This vendor prefix is conflicting with an existing one"))
}
}
/**
* Deletes an API key from its uuid.
* @param uid the API key's uuid
* @return a status code and json response pair
*/
def delete(uid: String): (StatusCode, String) =
if (uid matches uidRegex) {
db withDynSession {
apiKeys.filter(_.uid === UUID.fromString(uid)).delete match {
case 0 => (NotFound, result(404, "API key not found"))
case 1 => (OK, result(200, "API key successfully deleted"))
case _ => (InternalServerError, result(500, "Something went wrong"))
}
}
} else {
(Unauthorized, result(401, "The API key provided is not an UUID"))
}
/**
* Deletes all API keys having to the specified vendor prefix.
* @param vendorPrefix vendor prefix of the API keys we want to delete
* @return a (status code, json response) pair
*/
def deleteFromVendorPrefix(vendorPrefix: String): (StatusCode, String) =
db withDynSession {
apiKeys.filter(_.vendorPrefix === vendorPrefix).delete match {
case 0 => (NotFound, result(404, "Vendor prefix not found"))
case 1 => (OK, result(200, "API key deleted for " + vendorPrefix))
case _ => (OK, result(200, "API keys deleted for " + vendorPrefix))
}
}
}
|
snowplow/iglu
|
2-repositories/iglu-server/src/main/scala/com.snowplowanalytics.iglu.server/model/ApiKey.scala
|
Scala
|
apache-2.0
| 7,767
|
package at.logic.gapt.expr.fol
import at.logic.gapt.expr._
import at.logic.gapt.expr.hol._
import at.logic.gapt.expr.schema._
import at.logic.gapt.proofs.HOLSequent
object reduceHolToFol extends reduceHolToFol
/**
* Creates a FOL formula from a HOL formula, but applies transformations which do _not_ preserve validity!
* Transformations applied:
*
* - Replace all subterms (\x.t) by a constant. The scope parameter is needed to pass existing term-constant mappings.
* - Change the type of constants and variables s.t. they are first order (i.e. Const("c", To->Ti) is mapped to FOLConst("c",Ti)
* - Logical operators inside the term structure are replaced by first order terms
*
* @note Make sure you need all of these tricks. To only replace abstraction subterms, use [[replaceAbstractions]].
*
*/
class reduceHolToFol {
private def folexp2term( exp: FOLExpression ) = exp match {
case e: FOLTerm => exp.asInstanceOf[FOLTerm]
case _ => throw new Exception( "Cannot cast " + exp + " to a fol term!" + exp.getClass )
}
/**
* Convenience method when only a single expression is converted. Multiple expressions need to pass a scope which
* holds the replacements which happened so far.
* @param term a HOL expression to convert
* @return the reduced FOL expression
*/
def apply( term: LambdaExpression ): FOLExpression = {
val counter = new { private var state = 0; def nextId = { state = state + 1; state } }
val emptymap = Map[LambdaExpression, StringSymbol]()
apply( term, emptymap, counter )._1
}
/**
* Convenience method when only a single formula is converted. Multiple expressions need to pass a scope which
* holds the replacements which happened so far.
* @param formula a HOL formula to convert
* @return the reduced FOL formula
*/
def apply( formula: HOLFormula ): FOLFormula =
//inner cast needed to call the correct apply method
reduceHolToFol( formula.asInstanceOf[LambdaExpression] ).asInstanceOf[FOLFormula]
/**
* Convenience method when only a single fsequent is converted. Multiple expressions need to pass a scope which
* holds the replacements which happened so far.
* @param fs an fsequent to convert
* @return the reduced fsequent
*/
def apply( fs: HOLSequent ): HOLSequent = {
val counter = new { private var state = 0; def nextId = { state = state + 1; state } }
val emptymap = Map[LambdaExpression, StringSymbol]()
apply( fs, emptymap, counter )._1
}
/**
* Convenience method when a single list of fsequents is converted. Multiple expressions need to pass a scope which
* holds the replacements which happened so far.
* @param fs an fsequent to convert
* @return the reduced fsequent
*/
def apply( fs: List[HOLSequent] ): List[HOLSequent] = {
val counter = new { private var state = 0; def nextId = { state = state + 1; state } }
val emptymap = Map[LambdaExpression, StringSymbol]()
apply( fs, emptymap, counter )._1
}
/**
* Apply method for a formula when scope needs to passed on in a recursion.
* @param formula the formula to convert
* @param scope a mapping of replaced subterms to the constant names which replaced them. you need this for chained applications, like
* sequents or lists of formulas.
* @param id an object with a function which nextId, which provides new numbers.
* @return a pair of the reduced formula and the updated scope
*/
def apply( formula: HOLFormula, scope: Map[LambdaExpression, StringSymbol], id: { def nextId: Int } ): ( FOLFormula, Map[LambdaExpression, StringSymbol] ) = {
val ( scope_, qterm ) = replaceAbstractions( formula, scope, id )
( apply_( qterm ).asInstanceOf[FOLFormula], scope_ )
}
/**
* Apply method for a an expression when scope needs to passed on in a recursion.
* @param term the expression to convert
* @param scope a mapping of replaced subterms to the constant names which replaced them. you need this for chained applications, like
* sequents or lists of formulas.
* @param id an object with a function which nextId, which provides new numbers.
* @return a pair of the reduced expression and the updated scope
*/
def apply( term: LambdaExpression, scope: Map[LambdaExpression, StringSymbol], id: { def nextId: Int } ) = {
val ( scope_, qterm ) = replaceAbstractions( term, scope, id )
( apply_( qterm ), scope_ )
}
/**
* Apply method for a an FSequent when scope needs to passed on in a recursion.
* @param s the fsequent to convert
* @param scope a mapping of replaced subterms to the constant names which replaced them. you need this for chained applications, like
* sequents or lists of formulas.
* @param id an object with a function which nextId, which provides new numbers.
* @return a pair of the reduced expression and the updated scope
*/
def apply( s: HOLSequent, scope: Map[LambdaExpression, StringSymbol], id: { def nextId: Int } ): ( HOLSequent, Map[LambdaExpression, StringSymbol] ) = {
val ( scope1, ant ) = s.antecedent.foldLeft( ( scope, List[HOLFormula]() ) )( ( r, formula ) => {
val ( scope_, f_ ) = replaceAbstractions( formula, r._1, id )
( scope_, f_.asInstanceOf[HOLFormula] :: r._2 )
} )
val ( scope2, succ ) = s.succedent.foldLeft( ( scope1, List[HOLFormula]() ) )( ( r, formula ) => {
val ( scope_, f_ ) = replaceAbstractions( formula, r._1, id )
( scope_, f_.asInstanceOf[HOLFormula] :: r._2 )
} )
( HOLSequent( ant.reverse map apply_, succ.reverse map apply_ ), scope ++ scope2 )
}
/**
* Apply method for a an FSequent when scope needs to passed on in a recursion.
* @param fss the fsequent to convert
* @param scope a mapping of replaced subterms to the constant names which replaced them. you need this for chained applications, like
* sequents or lists of formulas.
* @param id an object with a function which nextId, which provides new numbers.
* @return a pair of the reduced expression and the updated scope
*/
def apply( fss: List[HOLSequent], scope: Map[LambdaExpression, StringSymbol], id: { def nextId: Int } ): ( List[HOLSequent], Map[LambdaExpression, StringSymbol] ) = {
fss.foldRight( ( List[HOLSequent](), scope ) )( ( fs, pair ) => {
val ( list, scope ) = pair
val ( fs_, scope_ ) = apply( fs, scope, id )
( fs_ :: list, scope_ )
} )
}
private def apply_( f: HOLFormula ): FOLFormula =
apply_( f.asInstanceOf[LambdaExpression] ).asInstanceOf[FOLFormula]
//assumes we are on the logical level of the hol formula - all types are mapped to i, i>o or i>i>o respectively
private def apply_( term: LambdaExpression ): FOLExpression = {
term match {
case e: FOLExpression => e // if it's already FOL - great, we are done.
case indexedFOVar( name, index ) => FOLVar( name ++ intTermLength( index.asInstanceOf[IntegerTerm] ).toString )
case foVar( name ) => FOLVar( name )
case foConst( name ) => FOLConst( name )
case Const( n, To ) => FOLAtom( n, Nil )
case Var( n, _ ) => FOLVar( n )
case Const( n, _ ) => FOLConst( n )
case Top() => Top()
case Bottom() => Bottom()
case Neg( n ) => Neg( apply_( n ).asInstanceOf[FOLFormula] )
case And( n1, n2 ) => And( apply_( n1 ), apply_( n2 ) )
case Or( n1, n2 ) => Or( apply_( n1 ), apply_( n2 ) )
case Imp( n1, n2 ) => Imp( apply_( n1 ), apply_( n2 ) )
case All( v: Var, n ) => All( apply_( v ).asInstanceOf[FOLVar], apply_( n ) )
case Ex( v: Var, n ) => Ex( apply_( v ).asInstanceOf[FOLVar], apply_( n ) )
case HOLAtom( Const( n, _ ), ls ) =>
FOLAtom( n, ls.map( x => folexp2term( apply_termlevel( x ) ) ) )
case HOLAtom( Var( n, _ ), ls ) =>
FOLAtom( n, ls.map( x => folexp2term( apply_termlevel( x ) ) ) )
case HOLFunction( Const( n, _ ), ls ) =>
FOLFunction( n, ls.map( x => folexp2term( apply_( x ) ) ) )
case HOLFunction( Var( n, _ ), ls ) =>
FOLFunction( n, ls.map( x => folexp2term( apply_( x ) ) ) )
//this case is added for schema
case App( func, arg ) => {
func match {
case Var( sym, _ ) =>
val new_arg = apply_( arg ).asInstanceOf[FOLTerm]
return FOLFunction( sym, new_arg :: Nil )
case _ =>
println( "WARNING: FO schema term: " + term )
throw new Exception( "Probably unrecognized object from schema!" )
}
}
case _ => throw new IllegalArgumentException( "Cannot reduce hol term: " + term.toString + " to fol as it is a higher order variable function or atom" ) // for cases of higher order atoms and functions
}
}
//if we encountered an atom, we need to convert logical formulas to the term level too
private def apply_termlevel( term: LambdaExpression ): FOLTerm = {
term match {
case e: FOLTerm => e // if it's already FOL - great, we are done.
case indexedFOVar( name, index ) => FOLVar( name ++ intTermLength( index.asInstanceOf[IntegerTerm] ).toString )
case foVar( name ) => FOLVar( name.toString )
case foConst( name ) => FOLConst( name.toString )
case Var( n, _ ) => FOLVar( n )
case Const( n, _ ) => FOLConst( n )
//we cannot use the logical symbols directly because they are treated differently by the Function matcher
case Neg( n ) => FOLFunction( NegC.name, List( apply_termlevel( n ) ) )
case And( n1, n2 ) => FOLFunction( AndC.name, List( apply_termlevel( n1 ), apply_termlevel( n2 ) ) )
case Or( n1, n2 ) => FOLFunction( OrC.name, List( apply_termlevel( n1 ), apply_termlevel( n2 ) ) )
case Imp( n1, n2 ) => FOLFunction( ImpC.name, List( apply_termlevel( n1 ), apply_termlevel( n2 ) ) )
case All( v: Var, n ) =>
FOLFunction( ForallC.name, List( apply_termlevel( v ).asInstanceOf[FOLVar], apply_termlevel( n ) ) )
case Ex( v: Var, n ) =>
FOLFunction( ExistsC.name, List( apply_termlevel( v ).asInstanceOf[FOLVar], apply_termlevel( n ) ) )
case HOLAtom( head, ls ) =>
FOLFunction( head.toString, ls.map( x => folexp2term( apply_termlevel( x ) ) ) )
case HOLFunction( Const( name, _ ), ls ) =>
FOLFunction( name, ls.map( x => folexp2term( apply_termlevel( x ) ) ) )
//this case is added for schema
/*
case App(func,arg) => {
val nLine = sys.props("line.separator")
func match {
case Var(sym,_) => {
val new_arg = apply_(arg).asInstanceOf[FOLTerm]
return at.logic.gapt.language.fol.Function(new ConstantStringSymbol(sym.toString), new_arg::Nil)
}
case _ => println( nLine + "WARNING: FO schema term!" + nLine)
}
throw new Exception( nLine + "Probably unrecognized object from schema!" + nLine)
}
*/
// This case replaces an abstraction by a function term.
//
// the scope we choose for the variant is the Abs itself as we want all abs identical up to variant use the same symbol
//
// TODO: at the moment, syntactic equality is used here... This means that alpha-equivalent terms may be replaced
// by different constants, which is undesirable.
/*
case a @ Abs(v, exp) => {
val sym = scope.getOrElseUpdate(a.variant(new VariantGenerator(new {var idd = 0; def nextId = {idd = idd+1; idd}}, "myVariantName")), ConstantStringSymbol("q_{" + id.nextId + "}"))
val freeVarList = a.getFreeVariables.toList.sortWith((x,y) => x.toString < y.toString).map(x => apply(x.asInstanceOf[LambdaExpression],scope,id))
if (freeVarList.isEmpty) FOLConst(sym) else Function(sym, freeVarList.asInstanceOf[List[FOLTerm]])
}
*/
case _ => throw new IllegalArgumentException( "Cannot reduce hol term: " + term.toString + " to fol as it is a higher order variable function or atom" ) // for cases of higher order atoms and functions
}
}
//transforms a ground integer term to Int
private def intTermLength( t: IntegerTerm ): Int = t match {
case IntZero() => 0
case Succ( t1 ) => 1 + intTermLength( t1 )
case _ => throw new Exception( sys.props( "line.separator" ) + "Error in reduceHolToFol.length(...) !" + sys.props( "line.separator" ) )
}
}
object replaceAbstractions extends replaceAbstractions
/**
* Replace lambda-abstractions by constants.
*
* Each abstraction in an [[at.logic.gapt.proofs.HOLSequent]] is replaced by a separate constant symbol; the used
* constants are returned in a Map.
*/
class replaceAbstractions {
type ConstantsMap = Map[LambdaExpression, StringSymbol]
def apply( l: List[HOLSequent] ): ( ConstantsMap, List[HOLSequent] ) = {
val counter = new { private var state = 0; def nextId = { state = state + 1; state } }
l.foldLeft( ( Map[LambdaExpression, StringSymbol](), List[HOLSequent]() ) )( ( rec, el ) => {
val ( scope_, f ) = rec
val ( nscope, rfs ) = replaceAbstractions( el, scope_, counter )
( nscope, rfs :: f )
} )
}
def apply( f: HOLSequent, scope: ConstantsMap, id: { def nextId: Int } ): ( ConstantsMap, HOLSequent ) = {
val ( scope1, ant ) = f.antecedent.foldLeft( ( scope, List[HOLFormula]() ) )( ( rec, formula ) => {
val ( scope_, f ) = rec
val ( nscope, nformula ) = replaceAbstractions( formula, scope_, id )
( nscope, nformula.asInstanceOf[HOLFormula] :: f )
} )
val ( scope2, succ ) = f.succedent.foldLeft( ( scope1, List[HOLFormula]() ) )( ( rec, formula ) => {
val ( scope_, f ) = rec
val ( nscope, nformula ) = replaceAbstractions( formula, scope_, id )
( nscope, nformula.asInstanceOf[HOLFormula] :: f )
} )
( scope2, HOLSequent( ant.reverse, succ.reverse ) )
}
def apply( e: LambdaExpression ): LambdaExpression = {
val counter = new {
private var state = 0;
def nextId = {
state = state + 1; state
}
}
apply( e, Map[LambdaExpression, StringSymbol](), counter )._2
}
def apply( formula: HOLFormula ): HOLFormula =
apply( formula.asInstanceOf[LambdaExpression] ).asInstanceOf[HOLFormula]
// scope and id are used to give the same names for new functions and constants between different calls of this method
def apply( e: LambdaExpression, scope: ConstantsMap, id: { def nextId: Int } ): ( ConstantsMap, LambdaExpression ) = e match {
case Var( _, _ ) =>
( scope, e )
case Const( _, _ ) =>
( scope, e )
//quantifiers should be kept
case All( x, f ) =>
val ( scope_, e_ ) = replaceAbstractions( f, scope, id )
( scope_, All( x, e_.asInstanceOf[HOLFormula] ) )
case Ex( x, f ) =>
val ( scope_, e_ ) = replaceAbstractions( f, scope, id )
( scope_, Ex( x, e_.asInstanceOf[HOLFormula] ) )
case App( s, t ) =>
val ( scope1, s1 ) = replaceAbstractions( s, scope, id )
val ( scope2, t1 ) = replaceAbstractions( t, scope1, id )
( scope2, App( s1, t1 ) )
// This case replaces an abstraction by a function term.
// the scope we choose for the variant is the Abs itself as we want all abs identical up to variant use the same symbol
case Abs( v, exp ) =>
//systematically rename free variables for the index
//val normalizeda = e.variant(new VariantGenerator(new {var idd = 0; def nextId = {idd = idd+1; idd}}, "myVariantName"))
//TODO: check if variable renaming is really what we want
val ( normalizeda, mapping ) = normalizeFreeVariables( e )
//println("e: "+e)
//println("norm: "+normalizeda)
//update scope with a new constant if neccessary
//println(scope)
val scope_ = if ( scope contains normalizeda ) scope else scope + ( ( normalizeda, StringSymbol( "q_{" + id.nextId + "}" ) ) )
//println(scope_)
val sym = scope_( normalizeda )
val freeVarList = freeVariables( e ).toList.sortBy( _.toString ).asInstanceOf[List[LambdaExpression]]
if ( freeVarList.isEmpty )
( scope_, Const( sym, e.exptype ) )
else {
val c = Const( sym, FunctionType( e.exptype, freeVarList.map( _.exptype ) ) )
( scope_, HOLFunction( c, freeVarList ) )
}
case _ =>
throw new Exception( "Unhandled case in abstraction replacement!" + e )
}
}
object undoReplaceAbstractions extends undoReplaceAbstractions
/**
* Replaces the constants introduced by [[replaceAbstractions]] with the original lambda-abstractions.
*/
class undoReplaceAbstractions {
import at.logic.gapt.expr.fol.replaceAbstractions.ConstantsMap
def apply( fs: HOLSequent, map: ConstantsMap ): HOLSequent = HOLSequent(
fs.antecedent.map( apply( _, map ) ),
fs.succedent.map( apply( _, map ) )
)
def apply( f: HOLFormula, map: ConstantsMap ): HOLFormula = apply( f.asInstanceOf[LambdaExpression], map ).asInstanceOf[HOLFormula]
def apply( e: LambdaExpression, map: ConstantsMap ): LambdaExpression = {
val stringsmap = map.map( x => ( x._2.toString(), x._1 ) ) //inverting the map works because the symbols are unique
HOLPosition.getPositions( e ).foldLeft( e )( ( exp, position ) =>
//we check if the position is a constant with an abstraction symbol
e( position ) match {
case Const( name, _ ) if stringsmap.contains( name ) =>
//if yes, we replace it by the original expression
exp.replace( position, stringsmap( name ) )
case _ => exp
} )
}
}
/**
* Introducing abstractions and converting to fol changes more complex types to fol compatible ones. With changeTypeIn
* you can change them back.
*/
object changeTypeIn {
type TypeMap = Map[String, Ty]
/* TODO: this broken, since e.g. for (a b) q with type(q)=alpha, type(b)=beta then type(a)=beta > (alpha > gamma)
we need to actually change the type of a when changing the type of q
*/
/*
def oldapply(e:LambdaExpression, tmap : TypeMap) : LambdaExpression = e match {
case Var(name, ta) =>
if (tmap.contains(name.toString()))
e.factory.createVar(name, tmap(name.toString()))
else
e
case App(s,t) => s.factory.createApp(oldapply(s,tmap), oldapply(t,tmap))
case Abs(x,t) => t.factory.createAbs(oldapply(x,tmap).asInstanceOf[Var], oldapply(t,tmap))
} */
//Remark: this only works for changing the type of leaves in the term tree!
def apply( e: LambdaExpression, tmap: TypeMap ): LambdaExpression = e match {
case Var( name, ta ) => if ( tmap contains name.toString() ) Var( name, tmap( name.toString() ) ) else
Var( name, ta )
case Const( name, ta ) => if ( tmap contains name.toString() ) Const( name, tmap( name.toString() ) ) else
Const( name, ta )
case HOLFunction( Const( f, exptype ), args ) =>
val args_ = args.map( x => apply( x, tmap ) )
val freturntype = exptype match { case FunctionType( r, _ ) => r }
val f_ = Const( f, FunctionType( freturntype, args.map( _.exptype ) ) )
HOLFunction( f_, args_ )
case HOLFunction( Var( f, exptype ), args ) =>
val args_ = args.map( x => apply( x, tmap ) )
val freturntype = exptype match { case FunctionType( r, _ ) => r }
val f_ = Var( f, FunctionType( freturntype, args.map( _.exptype ) ) )
HOLFunction( f_, args_ )
case HOLAtom( Const( f, exptype ), args ) =>
val args_ = args.map( x => apply( x, tmap ) )
val f_ = Const( f, FunctionType( To, args.map( _.exptype ) ) )
HOLAtom( f_, args_ )
case HOLAtom( Var( f, exptype ), args ) =>
val args_ = args.map( x => apply( x, tmap ) )
val f_ = Var( f, FunctionType( To, args.map( _.exptype ) ) )
HOLAtom( f_, args_ )
case Neg( x ) => Neg( apply( x, tmap ) )
case And( s, t ) => And( apply( s, tmap ), apply( t, tmap ) )
case Or( s, t ) => Or( apply( s, tmap ), apply( t, tmap ) )
case Imp( s, t ) => Imp( apply( s, tmap ), apply( t, tmap ) )
case All( x, t ) => All( apply( x.asInstanceOf[Var], tmap ).asInstanceOf[Var], apply( t, tmap ) )
case Ex( x, t ) => Ex( apply( x.asInstanceOf[Var], tmap ).asInstanceOf[Var], apply( t, tmap ) )
case Abs( x, t ) => Abs( apply( x.asInstanceOf[Var], tmap ).asInstanceOf[Var], apply( t, tmap ) )
case App( s, t ) => App( apply( s, tmap ), apply( t, tmap ) )
case _ => throw new Exception( "Unhandled case of a HOL Formula! " + e )
}
def apply( e: FOLTerm, tmap: TypeMap ): FOLTerm = apply( e.asInstanceOf[LambdaExpression], tmap ).asInstanceOf[FOLTerm]
def apply( e: HOLFormula, tmap: TypeMap ): HOLFormula = apply( e.asInstanceOf[LambdaExpression], tmap ).asInstanceOf[HOLFormula]
def apply( e: FOLFormula, tmap: TypeMap ): FOLFormula = apply( e.asInstanceOf[LambdaExpression], tmap ).asInstanceOf[FOLFormula]
def apply( fs: HOLSequent, tmap: TypeMap ): HOLSequent = HOLSequent(
fs.antecedent.map( x => apply( x, tmap ) ),
fs.succedent.map( x => apply( x, tmap ) )
)
//different names bc of type erasure
private def holsub( s: Substitution, tmap: TypeMap ): Substitution = Substitution(
s.map.map( x =>
( apply( x._1, tmap ).asInstanceOf[Var], apply( x._2, tmap ) ) )
)
private def folsub( s: FOLSubstitution, tmap: TypeMap ): FOLSubstitution = FOLSubstitution( s.folmap.map( x =>
( apply( x._1, tmap ).asInstanceOf[FOLVar], apply( x._2, tmap ) ) ) )
}
|
loewenheim/gapt
|
src/main/scala/at/logic/gapt/expr/fol/hol2fol.scala
|
Scala
|
gpl-3.0
| 21,747
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnetexamples.infer.objectdetector
import java.io.File
import org.apache.mxnet.Context
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.slf4j.LoggerFactory
import scala.sys.process.Process
class ObjectDetectorExampleSuite extends FunSuite with BeforeAndAfterAll {
private val logger = LoggerFactory.getLogger(classOf[ObjectDetectorExampleSuite])
test("testObjectDetectionExample") {
logger.info("Downloading resnetssd model")
val tempDirPath = System.getProperty("java.io.tmpdir")
logger.info("tempDirPath: %s".format(tempDirPath))
val modelBase = "https://s3.amazonaws.com/model-server/models/resnet50_ssd/"
val synsetBase = "https://raw.githubusercontent.com/awslabs/mxnet-model-server/master/examples/"
val imageBase = "https://s3.amazonaws.com/model-server/inputs/"
Process("wget " + modelBase + "resnet50_ssd_model-symbol.json " + "-P " +
tempDirPath + "/resnetssd/ -q") !
Process("wget " + modelBase + "resnet50_ssd_model-0000.params " +
"-P " + tempDirPath + "/resnetssd/ -q") !
Process("wget " + synsetBase + "ssd/synset.txt " + "-P" +
tempDirPath + "/resnetssd/ -q") !
Process("wget " +
imageBase + "dog-ssd.jpg " +
"-P " + tempDirPath + "/inputImages/") !
val modelDirPath = tempDirPath + File.separator + "resnetssd/"
val inputImagePath = tempDirPath + File.separator +
"inputImages/dog-ssd.jpg"
val inputImageDir = tempDirPath + File.separator + "inputImages/"
var context = Context.cpu()
if (System.getenv().containsKey("SCALA_TEST_ON_GPU") &&
System.getenv("SCALA_TEST_ON_GPU").toInt == 1) {
context = Context.gpu()
}
val output = SSDClassifierExample.runObjectDetectionSingle(modelDirPath + "resnet50_ssd_model",
inputImagePath, context)
assert(output(0)(0)._1 === "car")
val outputList = SSDClassifierExample.runObjectDetectionBatch(
modelDirPath + "resnet50_ssd_model",
inputImageDir, context)
assert(output(0)(0)._1 === "car")
Process("rm -rf " + modelDirPath + " " + inputImageDir) !
}
}
|
CodingCat/mxnet
|
scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala
|
Scala
|
apache-2.0
| 2,919
|
package nz.wicker.autoencoder.math.optimization
import scala.math.abs
import scala.math.max
import scala.math.min
import scala.math.sqrt
import nz.wicker.autoencoder.math.structure.VectorSpace
import nz.wicker.autoencoder.visualization.Observer
/**
* Implementation of the conjugate gradient descent as described in the
* article "A new conjugate gradient method wyth guaranteed descent and
* an efficient line search" by William W. Hager. and Hongchao Zhang.
*
* It uses Polak-Ribiere-Polyak-like update method for calculation of the
* next direction and inexact line search with approximate Wolfe conditions.
*
* This implementation seems broke. It does not outperform
* naive gradient descent on fairly simple 2D-functions.
*/
class ConjugateGradientDescent_HagerZhang(
configuration: ConjugateGradientDescent_HagerZhangConfiguration
) extends Minimizer {
import configuration._
def minimize[V <: VectorSpace[V]](
f: DifferentiableFunction[V],
start: V,
progressObservers: List[Observer[V]] = Nil
): V = {
// we formulate the algorithm tail-recursively
def rec(
currentPosition: V,
currentGradient: V,
currentDirection: V,
remainingIterations: Int,
history: History
): V = {
if (remainingIterations <= 0) {
// force the algorithm to terminate
println("OPTI TERMINATED. Full history: " + history)
currentPosition
} else {
// TODO: add some "good-enough"-conditions
val (alpha, nextHistory) = lineSearch(
f,
currentPosition,
currentDirection,
history
)
val nextPosition = currentPosition + currentDirection * alpha
val nextGradient = f.grad(nextPosition)
// helper variables that are used to calculate the next direction
val gradientDifference = nextGradient - currentGradient
val denominator = currentDirection dot gradientDifference
val beta =
(gradientDifference -
currentDirection * 2 * gradientDifference.normSq / denominator
) dot nextGradient / denominator
val bound = -1 / sqrt(
currentDirection.normSq * min(eta * eta, currentGradient.normSq)
)
val boundedBeta = max(beta, bound)
val nextDirection = - nextGradient + currentDirection * boundedBeta
rec(
nextPosition,
nextGradient,
nextDirection,
remainingIterations - 1,
nextHistory
)
}
}
// call the recursive helper method with the start values
val startGradient = f.grad(start);
rec(start, startGradient, -startGradient, maxIters, new History())
}
/**
* Sometimes it's possible to learn something about the problem while
* executing line searches, and to adjust some parameters. Such adjusted
* parameters are returned by line search, and passed to the next line
* search.
*/
protected case class History(
bisectionSteps: List[Double] = Nil
) {
/**
* adds the most recent bisection step to the log, returns the
* step together with new History
*/
def withBisectionStep(step: Double): (Double, History) = {
(step, History(step :: this.bisectionSteps))
}
def proposeInitialBisectionStep: Double = {
if (bisectionSteps.isEmpty) 1 else bisectionSteps.head
}
}
/**
* Hybrid line search algorithm, which starts with the dumb & brutal
* backtracking algorithm, and switches to a faster strategy if it
* finds appropriate conditions.
*/
protected def lineSearch[V <: VectorSpace[V]](
f: DifferentiableFunction[V],
position: V,
direction: V,
history: History
): (Double, History) = {
// define the differentiable function phi(alpha) = f(pos + alpha * dir)
val phi = new DifferentiableFunction[Double]() {
// dirty work-around to avoid multiple evaluation on interval
// boundaries. One should rewrite the method signatures instead...
private var cachedValues = List[(Double, Double, Double)]()
override def valueAndGrad(alpha: Double): (Double, Double) = {
for ((t, value, gradient) <- cachedValues) {
if (t == alpha) {
return (value, gradient)
}
}
val timeStart = System.currentTimeMillis()
val (value, fGrad) = f.valueAndGrad(position + direction * alpha)
val grad = fGrad dot direction
val duration = System.currentTimeMillis() - timeStart
// println("OPTI: " +
// "eval phi(" + alpha + ") = " + value + " phi' = " + grad +
// " [" + duration + "ms]")
cachedValues ::= (alpha, value, grad)
(value, grad)
}
override def apply(alpha: Double) = valueAndGrad(alpha)._1
override def grad(alpha: Double) = valueAndGrad(alpha)._2
}
// evaluate the function at 0, these values are used often
val (phiValueAtZero, phiGradAtZero) = phi.valueAndGrad(0)
// interval update
// The notation in the paper is just gut-wrenching...
// weird non-obviously complementary if-conditions and goto's... :(((
def intervalUpdate(a: Double, b: Double, c: Double): (Double, Double) = {
val scaledEpsilon = epsilon * abs(phiValueAtZero) // \\epsilon_k in paper
val (phiValueAtC, phiGradAtC) = phi.valueAndGrad(c)
if (c <= a || c >= b) {
(a, b)
} else {
if (phiGradAtC >= 0) {
(a, c)
} else {
if (phiValueAtC <= phiValueAtZero + scaledEpsilon) {
(c, b)
} else {
def rec(a: Double, b: Double): (Double, Double) = {
val d = (1 - theta) * a + theta * b
val (phiValueAtD, phiGradAtD) = phi.valueAndGrad(d)
if (phiGradAtD >= 0) {
(a, d)
} else {
if (phiValueAtD <= phiValueAtZero + scaledEpsilon)
rec(d, b)
else
rec(a, d)
}
}
rec(a, c)
}
}
}
}
// secant function
def secant(a: Double, phiGradAtA: Double, b: Double, phiGradAtB: Double) = {
(a * phiGradAtB - b * phiGradAtA) / (phiGradAtB - phiGradAtA)
}
// double secant step
def doubleSecant(a: Double, b: Double) = {
val (phiValueAtA, phiGradAtA) = phi.valueAndGrad(a)
val (phiValueAtB, phiGradAtB) = phi.valueAndGrad(b)
val c = secant(a, phiValueAtA, b, phiValueAtB)
val (nextA, nextB) = intervalUpdate(a, b, c)
if (c != nextA && c != nextB) {
(nextA, nextB)
} else {
val cBar = if (c == nextA) {
val (phiValueAtNextA, phiGradAtNextA) = phi.valueAndGrad(nextA)
secant(a, phiValueAtA, nextA, phiValueAtNextA)
} else {
val (phiValueAtNextB, phiGradAtNextB) = phi.valueAndGrad(nextA)
secant(b, phiValueAtB, nextA, phiValueAtNextB)
}
intervalUpdate(nextA, nextB, cBar)
}
}
// find an initial interval satisfying (4.4)
// (could NOT find any algorithm for that anywhere
// I can't even understand why this interval is supposed to
// exist [good luck searching this interval for exp(-x) starting at 0???])
// val (a0, b0) = initialInterval(phi)
// perform nesting interval updates until either the original or the
// approximate Wolfe conditions are satisfied
def fastLineSearch(
start: Double,
end: Double,
remainingEvaluations: Int,
history: History
): (Double, History) = {
if (remainingEvaluations <= 0) {
history.withBisectionStep((start + end) / 2)
} else {
val (phiValueAtStart, phiGradAtStart) = phi.valueAndGrad(start)
val (phiValueAtEnd, phiGradAtEnd) = phi.valueAndGrad(end)
if (wolfeConditions(start, phiValueAtStart, phiGradAtStart) ||
approximateWolfeConditions(start, phiValueAtStart, phiGradAtStart)
) {
history.withBisectionStep(start)
} else if(wolfeConditions(end, phiValueAtEnd, phiGradAtEnd) ||
approximateWolfeConditions(end, phiValueAtEnd, phiGradAtEnd)) {
history.withBisectionStep(end)
} else {
val (nextStart, nextEnd) = {
val (startCandidate, endCandidate) = doubleSecant(start, end)
val candidateLength = startCandidate - endCandidate
val length = start - end
if (candidateLength > gamma * length) {
intervalUpdate(
startCandidate,
endCandidate,
(startCandidate + endCandidate) / 2
)
} else {
(startCandidate, endCandidate)
}
}
fastLineSearch(
nextStart,
nextEnd,
remainingEvaluations - 1,
history
)
}
}
}
def wolfeConditions(
alpha: Double,
valueAtAlpha: Double,
derivativeAtAlpha: Double
): Boolean = {
(valueAtAlpha - phiValueAtZero <= phiGradAtZero * delta * alpha) &&
(derivativeAtAlpha >= phiGradAtZero * sigma)
}
def approximateWolfeConditions(
alpha: Double,
valueAtAlpha: Double,
derivativeAtAlpha: Double
): Boolean = {
(valueAtAlpha <= phiValueAtZero + epsilon * abs(valueAtAlpha)) &&
(phiGradAtZero * (2 * delta - 1) >= derivativeAtAlpha) &&
(derivativeAtAlpha >= sigma * phiGradAtZero)
}
// start with a simpler algorithm, as soon as the prerequisites for
// the more sophisticated algorithm are fulfilled, start the more
// sophisticated algorithm.
def brutalWolfeBisection(
start: Double,
t: Double,
end: Double,
remainingEvaluations: Int
): (Double, History) = {
if (remainingEvaluations == 0) {
// return the current value and terminate
history.withBisectionStep(t)
} else {
// keep iterating
val (phiValue, phiGrad) = phi.valueAndGrad(t)
if (phiValue < phiValueAtZero + epsilon * abs(phiValueAtZero) &&
phiGrad >= 0) {
// launch the faster algorithm of Hager & Zhang
fastLineSearch(start, t, remainingEvaluations, history)
} else {
// continue with the brutal bisection algorithm
if (phiValue > phiValueAtZero + delta * t * phiGradAtZero) {
brutalWolfeBisection(
start,
(start + t) / 2,
t,
remainingEvaluations - 1
)
} else if (phiGrad < sigma * phiGradAtZero) {
brutalWolfeBisection(
t,
if (end.isInfinite()) 2 * t else (t + end) / 2,
end,
remainingEvaluations - 1
)
} else {
history.withBisectionStep(t)
}
}
}
}
brutalWolfeBisection(
0,
history.proposeInitialBisectionStep,
Double.PositiveInfinity,
maxEvalsPerLineSearch
)
}
/**
* Finds an initial interval that satisfies condition (4.4)
*
* 1) Why should there be something like that
* 2) How am I supposed to find it?...
*/
private def initialInterval(
f: DifferentiableFunction[Double]
): (Double, Double) = {
(0, 1d/0d)
}
}
|
joergwicker/autoencoder
|
src/main/scala/nz/wicker/autoencoder/math/optimization/ConjugateGradientDescent_HagerZhang.scala
|
Scala
|
gpl-3.0
| 11,493
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.connector.read.{InputPartition, Scan}
import org.apache.spark.sql.connector.read.streaming.{ContinuousPartitionReaderFactory, ContinuousStream, Offset}
import org.apache.spark.sql.execution.streaming.continuous._
/**
* Physical plan node for scanning data from a streaming data source with continuous mode.
*/
case class ContinuousScanExec(
output: Seq[Attribute],
@transient scan: Scan,
@transient stream: ContinuousStream,
@transient start: Offset) extends DataSourceV2ScanExecBase {
// TODO: unify the equal/hashCode implementation for all data source v2 query plans.
override def equals(other: Any): Boolean = other match {
case other: ContinuousScanExec => this.stream == other.stream
case _ => false
}
override def hashCode(): Int = stream.hashCode()
override lazy val partitions: Seq[InputPartition] = stream.planInputPartitions(start)
override lazy val readerFactory: ContinuousPartitionReaderFactory = {
stream.createContinuousReaderFactory()
}
override lazy val inputRDD: RDD[InternalRow] = {
EpochCoordinatorRef.get(
sparkContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY),
sparkContext.env)
.askSync[Unit](SetReaderPartitions(partitions.size))
new ContinuousDataSourceRDD(
sparkContext,
conf.continuousStreamingExecutorQueueSize,
conf.continuousStreamingExecutorPollIntervalMs,
partitions,
schema,
readerFactory,
customMetrics)
}
}
|
ueshin/apache-spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ContinuousScanExec.scala
|
Scala
|
apache-2.0
| 2,503
|
package com.benkolera.Rt
import org.specs2._
import scalaz._
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
object QueryBuilderSpec extends mutable.Specification {
import QueryBuilder._
val dtz = DateTimeZone.UTC
val dt = new DateTime(2013,7,6,13,33,42,DateTimeZone.forOffsetHours(10))
val bqs = buildQueryString(dtz) _
"The query AST" should {
"Print a simple comparison with an unquoted value" in {
val s = bqs(Compare( TicketId , Eq , IntValue( 1337 ) ) )
s must_== ("Id = 1337")
}
"Print a simple comparison with an quoted value" in {
val s = bqs(Compare( TicketId , Eq , StringValue( "1337" )))
s must_== ("Id = '1337'")
}
"Print a simple comparison with a DateTime printed in UTC" in {
val s = bqs( Compare( Due , Eq , DateTimeValue( dt ) ) )
s must_== ("Due = '2013-07-06 03:33:42'")
}
"Print a conjunction" in {
val q1 = Compare( TicketId , Eq , IntValue( 1337 ) )
val q2 = Compare( Queue, Eq , StringValue( "dev.support" ) )
val q3 = Compare( Status, Eq , StringValue( "open" ) )
val s = bqs( And(q1,q2,q3) )
s must_== ("(Id = 1337 AND Queue = 'dev.support' AND Status = 'open')")
}
"Print a disjunction" in {
val q1 = Compare( TicketId , Eq , IntValue( 1337 ) )
val q2 = Compare( Queue, Eq , StringValue( "dev.support" ) )
val q3 = Compare( Status, Eq , StringValue( "open" ) )
val s = bqs( Or(q1,q2,q3) )
s must_== ("(Id = 1337 OR Queue = 'dev.support' OR Status = 'open')")
}
"Print nested queries" in {
val q1 = Compare( Queue, Eq , StringValue( "dev" ) )
val q2 = Compare( Status, Eq , StringValue( "open" ) )
val q3 = Compare( Status, Eq , StringValue( "new" ) )
val s = bqs( And(q1,Or(q2,q3)) )
s must_== ("(Queue = 'dev' AND (Status = 'open' OR Status = 'new'))")
}
"Print out an 'in' comparison" in {
val q1 = SetCompare(
Status, In , NonEmptyList( StringValue("open") , StringValue("new") )
)
val s = bqs( q1 )
s must_== ("(Status = 'open' OR Status = 'new')")
}
"Print out an 'not in' comparison" in {
val q1 = SetCompare(
Status,NotIn,NonEmptyList(StringValue("closed"),StringValue("rejected"))
)
val s = bqs( q1 )
s must_== ("(Status != 'closed' AND Status != 'rejected')")
}
}
"The query Implicit conversions" should {
import QueryBuilder._
"Coerce Strings to StringValues" in {
val q = Compare( Queue, Eq , "dev" )
bqs( q ) must_== ("Queue = 'dev'")
}
"Coerce CustomFieldNames to CF Identifiers" in {
val q = Compare( CustomFieldName("Custom"), Eq , "foo" )
bqs( q ) must_== ("'CF.{Custom}' = 'foo'")
}
"Coerce Ints to IntValues" in {
val q = Compare( TicketId, Eq , 1 )
bqs( q ) must_== ("Id = 1")
}
"Coerce Longs to LongValues" in {
val q = Compare( TicketId, Eq , 1L )
bqs( q ) must_== ("Id = 1")
}
"Coerce Doubles to DoubleValues" in {
val q = Compare( TicketId, Eq , 1.0 )
bqs( q ) must_== ("Id = 1.0")
}
"Coerce DateTimes to DateTimeValues" in {
val q = Compare( Due, Eq , dt )
bqs( q ) must_== ("Due = '2013-07-06 03:33:42'")
}
}
"The builder syntax" should {
import QueryBuilder._
"Build comparisions" in {
bqs( Due.eqs(dt) ) must_== ("Due = '2013-07-06 03:33:42'")
bqs(
CF("Power Level").gt(9000)
) must_== ("'CF.{Power Level}' > 9000")
}
"Build lte and gte queries" in {
bqs( Due.gte(dt) ) must_== ("(Due = '2013-07-06 03:33:42' OR Due > '2013-07-06 03:33:42')" )
bqs( Due.lte(dt) ) must_== ("(Due = '2013-07-06 03:33:42' OR Due < '2013-07-06 03:33:42')" )
}
"Build queries" in {
val q = Queue.matches("dev") AND CF("Power Level").gt(9000)
bqs( q ) must_== (
"(Queue LIKE 'dev' AND 'CF.{Power Level}' > 9000)"
)
}
"Build nested queries like a goddamn champion" in {
val q = Queue.in("dev","dev.projects") AND (Status.eqs("open") OR Status.eqs("new") )
bqs( q ) must_== (
"((Queue = 'dev' OR Queue = 'dev.projects') AND (Status = 'open' OR Status = 'new'))"
)
}
"Build inNel queries fine" in {
val q = Queue.in("dev","dev.projects") AND Status.inNel(NonEmptyList("open","new"))
bqs( q ) must_== (
"((Queue = 'dev' OR Queue = 'dev.projects') AND (Status = 'open' OR Status = 'new'))"
)
}
}
}
|
benkolera/scala-rt
|
src/test/scala/Rt/QueryBuilder.scala
|
Scala
|
mit
| 4,511
|
package me.shadaj.genalgo.sequences
import scala.collection.{mutable, IndexedSeqLike}
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.ArrayBuffer
import me.shadaj.genalgo.util.BitStorage
final class Protein private(storage: BitStorage, val length: Int) extends BioSequence[AminoAcid] with IndexedSeqLike[AminoAcid, Protein] {
type C = Protein
def self = this
override def seqBuilder: mutable.Builder[AminoAcid, Protein] = Protein.newBuilder
def apply(index: Int): AminoAcid = {
if (index < 0 || index >= length) throw new IndexOutOfBoundsException
storage(index, AminoAcid.fromInt)
}
}
object Protein {
def apply(str: String): Protein = {
Protein(str.map(AminoAcid.fromChar))
}
def apply(bases: AminoAcid*): Protein = {
Protein(bases.toIndexedSeq)
}
def apply(seq: IndexedSeq[AminoAcid]): Protein = {
new Protein(BitStorage(5, seq.toArray, AminoAcid.toInt), seq.length)
}
def newBuilder: mutable.Builder[AminoAcid, Protein] = (new ArrayBuffer).mapResult(apply)
implicit def canBuildFrom: CanBuildFrom[Protein, AminoAcid, Protein] = new CanBuildFrom[Protein, AminoAcid, Protein] {
def apply() = newBuilder
def apply(from: Protein) = newBuilder
}
}
|
shadaj/genalgo
|
shared/src/main/scala/me/shadaj/genalgo/sequences/Protein.scala
|
Scala
|
mit
| 1,245
|
package com.twitter.inject.server.tests
import com.twitter.finagle.http.Status
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.inject.server.{EmbeddedTwitterServer, FeatureTest}
import org.scalatest.concurrent.Eventually
/** Test a non-inject TwitterServer using an InMemoryStatsReceiver implementation with the [[FeatureTest]] trait */
class FeatureTestNonInjectionInMemoryStatsReceiverTest extends FeatureTest with Eventually {
private[this] val inMemoryStatsReceiver: InMemoryStatsReceiver = new InMemoryStatsReceiver
override val server: EmbeddedTwitterServer =
new EmbeddedTwitterServer(
twitterServer = new NonInjectionTestServer(Some(inMemoryStatsReceiver)),
args = Seq("http.port=:0"),
disableTestLogging = true,
statsReceiverOverride = Some(inMemoryStatsReceiver)
)
/* Works as we provide an override that is an InMemoryStatsReceiver */
override val printStats: Boolean = true
/**
* Explicitly start the server before all tests, close will be attempted
* by [[com.twitter.inject.server.FeatureTestMixin]] in `afterAll`.
*/
override def beforeAll(): Unit = {
server.start()
// the server starts and is marked as healthy as soon as the admin starts
// we need a little time for the exposed server to start and setup stats
eventually {
assert(
inMemoryStatsReceiver.gauges.nonEmpty
) /* we add a build revision gauge in startup of the server */
}
}
test("TestServer#starts up") {
server.assertHealthy()
}
test("TwitterServer#stats receivers") {
// even though the server under test is a non-injectable server, we
// have been provided with a stats receiver override which we return here
server.statsReceiver
// and the override is an in-memory stats receiver
server.inMemoryStatsReceiver should be(inMemoryStatsReceiver)
}
test("TestServer#feature test") {
server.httpGetAdmin(
"/admin/lint.json",
andExpect = Status.Ok
)
server.httpGetAdmin(
"/admin/registry.json",
andExpect = Status.Ok
)
}
}
|
twitter/finatra
|
inject/inject-server/src/test/scala/com/twitter/inject/server/tests/FeatureTestNonInjectionInMemoryStatsReceiverTest.scala
|
Scala
|
apache-2.0
| 2,108
|
package com.gmadorell.youtube_sync.module.synchronize.test.behaviour.sync
import com.gmadorell.youtube_sync.module.shared.stub.ListStub
import com.gmadorell.youtube_sync.module.synchronize.application.sync.{
PlayListSynchronizer,
SynchronizePlayListCommandHandler
}
import com.gmadorell.youtube_sync.module.synchronize.domain.error.PlayListNotFound
import com.gmadorell.youtube_sync.module.synchronize.domain.model.{PlayList, PlayListVideo}
import com.gmadorell.youtube_sync.module.synchronize.test.behaviour.YoutubeBehaviourSpec
import com.gmadorell.youtube_sync.module.synchronize.test.infrastructure.stub.{
PlayListIdStub,
PlayListStub,
PlayListVideoStub,
SynchronizePlayListCommandStub
}
final class SynchronizePlayListSpec extends YoutubeBehaviourSpec {
val handler = new SynchronizePlayListCommandHandler(
new PlayListSynchronizer(playListRepository, remotePlayListVideoRepository, localPlayListVideoRepository))
"A SynchronizePlayListCommandHandler" should {
"create videos that exist in the remote but not locally" in {
val command = SynchronizePlayListCommandStub.create()
val playListId = PlayListIdStub.create(command.playListId)
val playList = PlayListStub.create(playListId)
val videosBothInRemoteAndLocal = randomPlayListVideos(playList)
val onlyRemoteVideos = randomPlayListVideos(playList)
val onlyLocalVideos = randomPlayListVideos(playList)
val remoteVideos = onlyRemoteVideos ++ videosBothInRemoteAndLocal
val localVideos = onlyLocalVideos ++ videosBothInRemoteAndLocal
shouldFindPlayList(playListId, playList)
shouldFindRemotePlayListVideos(playList, remoteVideos)
shouldFindLocalPlayListVideos(playList, localVideos)
onlyRemoteVideos.foreach(shouldCreateLocalPlayListVideo)
handler.handle(command).futureValue.isRight should ===(true)
}
"fail when the playlist doesn't exist" in {
val command = SynchronizePlayListCommandStub.create()
val playListId = PlayListIdStub.create(command.playListId)
shouldNotFindPlayList(playListId)
val result = handler.handle(command).futureValue
result.isLeft should ===(true)
result.left.map(error => error should ===(PlayListNotFound(playListId)))
}
}
def randomPlayListVideos(playList: PlayList): List[PlayListVideo] =
ListStub.randomElements(() => PlayListVideoStub.create(playList))
}
|
GMadorell/youtube_sync
|
src/test/scala/com/gmadorell/youtube_sync/module/synchronize/test/behaviour/sync/SynchronizePlayListSpec.scala
|
Scala
|
mit
| 2,496
|
package marcin
import scala.collection.mutable
/**
* Created by m on 2015-08-22.
*/
class InteligentMap {
val dateCells = mutable.HashMap.empty[Long,mutable.HashMap[String,Cell]]
def add(date:Any,c:Cell):Unit={
if(date==null) return
val ldate=date.asInstanceOf[Long]
if(dateCells.contains(ldate)){
dateCells.get(ldate).get+=(""+c.getField("Address")->c)
} else {
val listBuffer=mutable.HashMap.empty[String,Cell]
dateCells.put(ldate,listBuffer)
listBuffer+=(""+c.getField("Address")->c)
}
}
}
|
MarcinGrabowiecki/iwlistScanner2
|
src/main/scala/marcin/InteligentMap.scala
|
Scala
|
gpl-3.0
| 572
|
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.db.evolutions
import java.sql.ResultSet
import org.specs2.mutable.{ After, Specification }
import play.api.Configuration
import play.api.db.DefaultDBApi
// TODO: fuctional test with InvalidDatabaseRevision exception
object EvolutionsSpec extends Specification {
sequential
import TestEvolutions._
"Evolutions" should {
"apply up scripts" in new WithEvolutions {
val scripts = evolutions.scripts("default", Seq(a1, a2, a3))
scripts must have length (3)
scripts must_== Seq(UpScript(a1), UpScript(a2), UpScript(a3))
evolutions.evolve("default", scripts, autocommit = true)
val resultSet = executeQuery("select * from test")
resultSet.next must beTrue
resultSet.getLong(1) must_== 1L
resultSet.getString(2) must_== "alice"
resultSet.getInt(3) must_== 42
resultSet.next must beFalse
}
"apply down scripts" in new WithEvolutions {
val original = evolutions.scripts("default", Seq(a1, a2, a3))
evolutions.evolve("default", original, autocommit = true)
val scripts = evolutions.scripts("default", Seq(b1, a2, b3))
scripts must have length (6)
scripts must_== Seq(DownScript(a3), DownScript(a2), DownScript(a1), UpScript(b1), UpScript(a2), UpScript(b3))
evolutions.evolve("default", scripts, autocommit = true)
val resultSet = executeQuery("select * from test")
resultSet.next must beTrue
resultSet.getLong(1) must_== 1L
resultSet.getString(2) must_== "bob"
resultSet.getInt(3) must_== 42
resultSet.next must beFalse
}
"report inconsistent state and resolve" in new WithEvolutions {
val broken = evolutions.scripts("default", Seq(c1, a2, a3))
val fixed = evolutions.scripts("default", Seq(a1, a2, a3))
evolutions.evolve("default", broken, autocommit = true) must throwAn[InconsistentDatabase]
// inconsistent until resolved
evolutions.evolve("default", fixed, autocommit = true) must throwAn[InconsistentDatabase]
evolutions.resolve("default", 1)
evolutions.evolve("default", fixed, autocommit = true)
}
}
trait WithEvolutions extends After {
lazy val db = new DefaultDBApi(
Configuration.from(Map(
"default.driver" -> "org.h2.Driver",
"default.url" -> "jdbc:h2:mem:evolutions-test"
))
)
lazy val evolutions = new DefaultEvolutionsApi(db)
lazy val connection = db.database("default").getConnection()
def executeQuery(sql: String): ResultSet = connection.createStatement.executeQuery(sql)
def after = {
connection.close()
db.shutdown()
}
}
object TestEvolutions {
val a1 = Evolution(
1,
"create table test (id bigint not null, name varchar(255));",
"drop table if exists test;"
)
val a2 = Evolution(
2,
"alter table test add (age int);",
"alter table test drop if exists age;"
)
val a3 = Evolution(
3,
"insert into test (id, name, age) values (1, 'alice', 42);",
"delete from test;"
)
val b1 = Evolution(
1,
"create table test (id bigint not null, content varchar(255));",
"drop table if exists test;"
)
val b3 = Evolution(
3,
"insert into test (id, content, age) values (1, 'bob', 42);",
"delete from test;"
)
val c1 = Evolution(
1,
"creaTYPOe table test (id bigint not null, name varchar(255));",
"drop table if exists test;"
)
}
}
|
jyotikamboj/container
|
pf-framework/src/play-jdbc/src/test/scala/play/api/db/evolutions/EvolutionsSpec.scala
|
Scala
|
mit
| 3,586
|
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.iterators
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.data._
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
/**
* Iterator for the record table. Applies transforms and ECQL filters.
*/
class RecordTableIterator
extends GeomesaFilteringIterator
with HasFeatureType
with SetTopInclude
with SetTopFilter
with SetTopTransform
with SetTopFilterTransform {
var setTopOptimized: (Key) => Unit = null
override def init(source: SortedKeyValueIterator[Key, Value],
options: java.util.Map[String, String],
env: IteratorEnvironment) = {
super.init(source, options, env)
initFeatureType(options)
init(featureType, options)
// pick the execution path once based on the filters and transforms we need to apply
// see org.locationtech.geomesa.core.iterators.IteratorFunctions
setTopOptimized = (filter, transform) match {
case (null, null) => setTopInclude
case (_, null) => setTopFilter
case (null, _) => setTopTransform
case (_, _) => setTopFilterTransform
}
}
override def setTopConditionally(): Unit = setTopOptimized(source.getTopKey)
}
|
mmatz-ccri/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/iterators/RecordTableIterator.scala
|
Scala
|
apache-2.0
| 1,899
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.{Date, Timestamp}
import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.types.UTF8String
class UnsafeRowConverterSuite extends SparkFunSuite with Matchers {
private def roundedSize(size: Int) = ByteArrayMethods.roundNumberOfBytesToNearestWord(size)
test("basic conversion with only primitive types") {
val fieldTypes: Array[DataType] = Array(LongType, LongType, IntegerType)
val converter = UnsafeProjection.create(fieldTypes)
val row = new SpecificMutableRow(fieldTypes)
row.setLong(0, 0)
row.setLong(1, 1)
row.setInt(2, 2)
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.getSizeInBytes === 8 + (3 * 8))
assert(unsafeRow.getLong(0) === 0)
assert(unsafeRow.getLong(1) === 1)
assert(unsafeRow.getInt(2) === 2)
val unsafeRowCopy = unsafeRow.copy()
assert(unsafeRowCopy.getLong(0) === 0)
assert(unsafeRowCopy.getLong(1) === 1)
assert(unsafeRowCopy.getInt(2) === 2)
unsafeRow.setLong(1, 3)
assert(unsafeRow.getLong(1) === 3)
unsafeRow.setInt(2, 4)
assert(unsafeRow.getInt(2) === 4)
// Mutating the original row should not have changed the copy
assert(unsafeRowCopy.getLong(0) === 0)
assert(unsafeRowCopy.getLong(1) === 1)
assert(unsafeRowCopy.getInt(2) === 2)
// Make sure the converter can be reused, i.e. we correctly reset all states.
val unsafeRow2: UnsafeRow = converter.apply(row)
assert(unsafeRow2.getSizeInBytes === 8 + (3 * 8))
assert(unsafeRow2.getLong(0) === 0)
assert(unsafeRow2.getLong(1) === 1)
assert(unsafeRow2.getInt(2) === 2)
}
test("basic conversion with primitive, string and binary types") {
val fieldTypes: Array[DataType] = Array(LongType, StringType, BinaryType)
val converter = UnsafeProjection.create(fieldTypes)
val row = new SpecificMutableRow(fieldTypes)
row.setLong(0, 0)
row.update(1, UTF8String.fromString("Hello"))
row.update(2, "World".getBytes)
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.getSizeInBytes === 8 + (8 * 3) +
roundedSize("Hello".getBytes.length) +
roundedSize("World".getBytes.length))
assert(unsafeRow.getLong(0) === 0)
assert(unsafeRow.getString(1) === "Hello")
assert(unsafeRow.getBinary(2) === "World".getBytes)
}
test("basic conversion with primitive, string, date and timestamp types") {
val fieldTypes: Array[DataType] = Array(LongType, StringType, DateType, TimestampType)
val converter = UnsafeProjection.create(fieldTypes)
val row = new SpecificMutableRow(fieldTypes)
row.setLong(0, 0)
row.update(1, UTF8String.fromString("Hello"))
row.update(2, DateTimeUtils.fromJavaDate(Date.valueOf("1970-01-01")))
row.update(3, DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf("2015-05-08 08:10:25")))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.getSizeInBytes === 8 + (8 * 4) + roundedSize("Hello".getBytes.length))
assert(unsafeRow.getLong(0) === 0)
assert(unsafeRow.getString(1) === "Hello")
// Date is represented as Int in unsafeRow
assert(DateTimeUtils.toJavaDate(unsafeRow.getInt(2)) === Date.valueOf("1970-01-01"))
// Timestamp is represented as Long in unsafeRow
DateTimeUtils.toJavaTimestamp(unsafeRow.getLong(3)) should be
(Timestamp.valueOf("2015-05-08 08:10:25"))
unsafeRow.setInt(2, DateTimeUtils.fromJavaDate(Date.valueOf("2015-06-22")))
assert(DateTimeUtils.toJavaDate(unsafeRow.getInt(2)) === Date.valueOf("2015-06-22"))
unsafeRow.setLong(3, DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf("2015-06-22 08:10:25")))
DateTimeUtils.toJavaTimestamp(unsafeRow.getLong(3)) should be
(Timestamp.valueOf("2015-06-22 08:10:25"))
}
test("null handling") {
val fieldTypes: Array[DataType] = Array(
NullType,
BooleanType,
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
StringType,
BinaryType,
DecimalType.USER_DEFAULT,
DecimalType.SYSTEM_DEFAULT
// ArrayType(IntegerType)
)
val converter = UnsafeProjection.create(fieldTypes)
val rowWithAllNullColumns: InternalRow = {
val r = new SpecificMutableRow(fieldTypes)
for (i <- fieldTypes.indices) {
r.setNullAt(i)
}
r
}
val createdFromNull: UnsafeRow = converter.apply(rowWithAllNullColumns)
for (i <- fieldTypes.indices) {
assert(createdFromNull.isNullAt(i))
}
assert(createdFromNull.getBoolean(1) === false)
assert(createdFromNull.getByte(2) === 0)
assert(createdFromNull.getShort(3) === 0)
assert(createdFromNull.getInt(4) === 0)
assert(createdFromNull.getLong(5) === 0)
assert(createdFromNull.getFloat(6) === 0.0f)
assert(createdFromNull.getDouble(7) === 0.0d)
assert(createdFromNull.getUTF8String(8) === null)
assert(createdFromNull.getBinary(9) === null)
assert(createdFromNull.getDecimal(10, 10, 0) === null)
assert(createdFromNull.getDecimal(11, 38, 18) === null)
// assert(createdFromNull.get(11) === null)
// If we have an UnsafeRow with columns that are initially non-null and we null out those
// columns, then the serialized row representation should be identical to what we would get by
// creating an entirely null row via the converter
val rowWithNoNullColumns: InternalRow = {
val r = new SpecificMutableRow(fieldTypes)
r.setNullAt(0)
r.setBoolean(1, false)
r.setByte(2, 20)
r.setShort(3, 30)
r.setInt(4, 400)
r.setLong(5, 500)
r.setFloat(6, 600)
r.setDouble(7, 700)
r.update(8, UTF8String.fromString("hello"))
r.update(9, "world".getBytes)
r.setDecimal(10, Decimal(10), 10)
r.setDecimal(11, Decimal(10.00, 38, 18), 38)
// r.update(11, Array(11))
r
}
val setToNullAfterCreation = converter.apply(rowWithNoNullColumns)
assert(setToNullAfterCreation.isNullAt(0) === rowWithNoNullColumns.isNullAt(0))
assert(setToNullAfterCreation.getBoolean(1) === rowWithNoNullColumns.getBoolean(1))
assert(setToNullAfterCreation.getByte(2) === rowWithNoNullColumns.getByte(2))
assert(setToNullAfterCreation.getShort(3) === rowWithNoNullColumns.getShort(3))
assert(setToNullAfterCreation.getInt(4) === rowWithNoNullColumns.getInt(4))
assert(setToNullAfterCreation.getLong(5) === rowWithNoNullColumns.getLong(5))
assert(setToNullAfterCreation.getFloat(6) === rowWithNoNullColumns.getFloat(6))
assert(setToNullAfterCreation.getDouble(7) === rowWithNoNullColumns.getDouble(7))
assert(setToNullAfterCreation.getString(8) === rowWithNoNullColumns.getString(8))
assert(setToNullAfterCreation.getBinary(9) === rowWithNoNullColumns.getBinary(9))
assert(setToNullAfterCreation.getDecimal(10, 10, 0) ===
rowWithNoNullColumns.getDecimal(10, 10, 0))
assert(setToNullAfterCreation.getDecimal(11, 38, 18) ===
rowWithNoNullColumns.getDecimal(11, 38, 18))
for (i <- fieldTypes.indices) {
// Cann't call setNullAt() on DecimalType
if (i == 11) {
setToNullAfterCreation.setDecimal(11, null, 38)
} else {
setToNullAfterCreation.setNullAt(i)
}
}
setToNullAfterCreation.setNullAt(0)
setToNullAfterCreation.setBoolean(1, false)
setToNullAfterCreation.setByte(2, 20)
setToNullAfterCreation.setShort(3, 30)
setToNullAfterCreation.setInt(4, 400)
setToNullAfterCreation.setLong(5, 500)
setToNullAfterCreation.setFloat(6, 600)
setToNullAfterCreation.setDouble(7, 700)
// setToNullAfterCreation.update(8, UTF8String.fromString("hello"))
// setToNullAfterCreation.update(9, "world".getBytes)
setToNullAfterCreation.setDecimal(10, Decimal(10), 10)
setToNullAfterCreation.setDecimal(11, Decimal(10.00, 38, 18), 38)
// setToNullAfterCreation.update(11, Array(11))
assert(setToNullAfterCreation.isNullAt(0) === rowWithNoNullColumns.isNullAt(0))
assert(setToNullAfterCreation.getBoolean(1) === rowWithNoNullColumns.getBoolean(1))
assert(setToNullAfterCreation.getByte(2) === rowWithNoNullColumns.getByte(2))
assert(setToNullAfterCreation.getShort(3) === rowWithNoNullColumns.getShort(3))
assert(setToNullAfterCreation.getInt(4) === rowWithNoNullColumns.getInt(4))
assert(setToNullAfterCreation.getLong(5) === rowWithNoNullColumns.getLong(5))
assert(setToNullAfterCreation.getFloat(6) === rowWithNoNullColumns.getFloat(6))
assert(setToNullAfterCreation.getDouble(7) === rowWithNoNullColumns.getDouble(7))
// assert(setToNullAfterCreation.getString(8) === rowWithNoNullColumns.getString(8))
// assert(setToNullAfterCreation.get(9) === rowWithNoNullColumns.get(9))
assert(setToNullAfterCreation.getDecimal(10, 10, 0) ===
rowWithNoNullColumns.getDecimal(10, 10, 0))
assert(setToNullAfterCreation.getDecimal(11, 38, 18) ===
rowWithNoNullColumns.getDecimal(11, 38, 18))
// assert(setToNullAfterCreation.get(11) === rowWithNoNullColumns.get(11))
}
test("NaN canonicalization") {
val fieldTypes: Array[DataType] = Array(FloatType, DoubleType)
val row1 = new SpecificMutableRow(fieldTypes)
row1.setFloat(0, java.lang.Float.intBitsToFloat(0x7f800001))
row1.setDouble(1, java.lang.Double.longBitsToDouble(0x7ff0000000000001L))
val row2 = new SpecificMutableRow(fieldTypes)
row2.setFloat(0, java.lang.Float.intBitsToFloat(0x7fffffff))
row2.setDouble(1, java.lang.Double.longBitsToDouble(0x7fffffffffffffffL))
val converter = UnsafeProjection.create(fieldTypes)
assert(converter.apply(row1).getBytes === converter.apply(row2).getBytes)
}
test("basic conversion with struct type") {
val fieldTypes: Array[DataType] = Array(
new StructType().add("i", IntegerType),
new StructType().add("nest", new StructType().add("l", LongType))
)
val converter = UnsafeProjection.create(fieldTypes)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, InternalRow(1))
row.update(1, InternalRow(InternalRow(2L)))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields == 2)
val row1 = unsafeRow.getStruct(0, 1)
assert(row1.getSizeInBytes == 8 + 1 * 8)
assert(row1.numFields == 1)
assert(row1.getInt(0) == 1)
val row2 = unsafeRow.getStruct(1, 1)
assert(row2.numFields() == 1)
val innerRow = row2.getStruct(0, 1)
{
assert(innerRow.getSizeInBytes == 8 + 1 * 8)
assert(innerRow.numFields == 1)
assert(innerRow.getLong(0) == 2L)
}
assert(row2.getSizeInBytes == 8 + 1 * 8 + innerRow.getSizeInBytes)
assert(unsafeRow.getSizeInBytes == 8 + 2 * 8 + row1.getSizeInBytes + row2.getSizeInBytes)
}
private def createArray(values: Any*): ArrayData = new GenericArrayData(values.toArray)
private def createMap(keys: Any*)(values: Any*): MapData = {
assert(keys.length == values.length)
new ArrayBasedMapData(createArray(keys: _*), createArray(values: _*))
}
private def testArrayInt(array: UnsafeArrayData, values: Seq[Int]): Unit = {
assert(array.numElements == values.length)
assert(array.getSizeInBytes == 4 + (4 + 4) * values.length)
values.zipWithIndex.foreach {
case (value, index) => assert(array.getInt(index) == value)
}
}
private def testMapInt(map: UnsafeMapData, keys: Seq[Int], values: Seq[Int]): Unit = {
assert(keys.length == values.length)
assert(map.numElements == keys.length)
testArrayInt(map.keyArray, keys)
testArrayInt(map.valueArray, values)
assert(map.getSizeInBytes == 4 + map.keyArray.getSizeInBytes + map.valueArray.getSizeInBytes)
}
test("basic conversion with array type") {
val fieldTypes: Array[DataType] = Array(
ArrayType(IntegerType),
ArrayType(ArrayType(IntegerType))
)
val converter = UnsafeProjection.create(fieldTypes)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, createArray(1, 2))
row.update(1, createArray(createArray(3, 4)))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields() == 2)
val unsafeArray1 = unsafeRow.getArray(0)
testArrayInt(unsafeArray1, Seq(1, 2))
val unsafeArray2 = unsafeRow.getArray(1)
assert(unsafeArray2.numElements == 1)
val nestedArray = unsafeArray2.getArray(0)
testArrayInt(nestedArray, Seq(3, 4))
assert(unsafeArray2.getSizeInBytes == 4 + 4 + nestedArray.getSizeInBytes)
val array1Size = roundedSize(unsafeArray1.getSizeInBytes)
val array2Size = roundedSize(unsafeArray2.getSizeInBytes)
assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + array1Size + array2Size)
}
test("basic conversion with map type") {
val fieldTypes: Array[DataType] = Array(
MapType(IntegerType, IntegerType),
MapType(IntegerType, MapType(IntegerType, IntegerType))
)
val converter = UnsafeProjection.create(fieldTypes)
val map1 = createMap(1, 2)(3, 4)
val innerMap = createMap(5, 6)(7, 8)
val map2 = createMap(9)(innerMap)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, map1)
row.update(1, map2)
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields == 2)
val unsafeMap1 = unsafeRow.getMap(0)
testMapInt(unsafeMap1, Seq(1, 2), Seq(3, 4))
val unsafeMap2 = unsafeRow.getMap(1)
assert(unsafeMap2.numElements == 1)
val keyArray = unsafeMap2.keyArray
testArrayInt(keyArray, Seq(9))
val valueArray = unsafeMap2.valueArray
{
assert(valueArray.numElements == 1)
val nestedMap = valueArray.getMap(0)
testMapInt(nestedMap, Seq(5, 6), Seq(7, 8))
assert(valueArray.getSizeInBytes == 4 + 4 + nestedMap.getSizeInBytes)
}
assert(unsafeMap2.getSizeInBytes == 4 + keyArray.getSizeInBytes + valueArray.getSizeInBytes)
val map1Size = roundedSize(unsafeMap1.getSizeInBytes)
val map2Size = roundedSize(unsafeMap2.getSizeInBytes)
assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + map1Size + map2Size)
}
test("basic conversion with struct and array") {
val fieldTypes: Array[DataType] = Array(
new StructType().add("arr", ArrayType(IntegerType)),
ArrayType(new StructType().add("l", LongType))
)
val converter = UnsafeProjection.create(fieldTypes)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, InternalRow(createArray(1)))
row.update(1, createArray(InternalRow(2L)))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields() == 2)
val field1 = unsafeRow.getStruct(0, 1)
assert(field1.numFields == 1)
val innerArray = field1.getArray(0)
testArrayInt(innerArray, Seq(1))
assert(field1.getSizeInBytes == 8 + 8 + roundedSize(innerArray.getSizeInBytes))
val field2 = unsafeRow.getArray(1)
assert(field2.numElements == 1)
val innerStruct = field2.getStruct(0, 1)
{
assert(innerStruct.numFields == 1)
assert(innerStruct.getSizeInBytes == 8 + 8)
assert(innerStruct.getLong(0) == 2L)
}
assert(field2.getSizeInBytes == 4 + 4 + innerStruct.getSizeInBytes)
assert(unsafeRow.getSizeInBytes ==
8 + 8 * 2 + field1.getSizeInBytes + roundedSize(field2.getSizeInBytes))
}
test("basic conversion with struct and map") {
val fieldTypes: Array[DataType] = Array(
new StructType().add("map", MapType(IntegerType, IntegerType)),
MapType(IntegerType, new StructType().add("l", LongType))
)
val converter = UnsafeProjection.create(fieldTypes)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, InternalRow(createMap(1)(2)))
row.update(1, createMap(3)(InternalRow(4L)))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields() == 2)
val field1 = unsafeRow.getStruct(0, 1)
assert(field1.numFields == 1)
val innerMap = field1.getMap(0)
testMapInt(innerMap, Seq(1), Seq(2))
assert(field1.getSizeInBytes == 8 + 8 + roundedSize(innerMap.getSizeInBytes))
val field2 = unsafeRow.getMap(1)
val keyArray = field2.keyArray
testArrayInt(keyArray, Seq(3))
val valueArray = field2.valueArray
{
assert(valueArray.numElements == 1)
val innerStruct = valueArray.getStruct(0, 1)
assert(innerStruct.numFields == 1)
assert(innerStruct.getSizeInBytes == 8 + 8)
assert(innerStruct.getLong(0) == 4L)
assert(valueArray.getSizeInBytes == 4 + 4 + innerStruct.getSizeInBytes)
}
assert(field2.getSizeInBytes == 4 + keyArray.getSizeInBytes + valueArray.getSizeInBytes)
assert(unsafeRow.getSizeInBytes ==
8 + 8 * 2 + field1.getSizeInBytes + roundedSize(field2.getSizeInBytes))
}
test("basic conversion with array and map") {
val fieldTypes: Array[DataType] = Array(
ArrayType(MapType(IntegerType, IntegerType)),
MapType(IntegerType, ArrayType(IntegerType))
)
val converter = UnsafeProjection.create(fieldTypes)
val row = new GenericMutableRow(fieldTypes.length)
row.update(0, createArray(createMap(1)(2)))
row.update(1, createMap(3)(createArray(4)))
val unsafeRow: UnsafeRow = converter.apply(row)
assert(unsafeRow.numFields() == 2)
val field1 = unsafeRow.getArray(0)
assert(field1.numElements == 1)
val innerMap = field1.getMap(0)
testMapInt(innerMap, Seq(1), Seq(2))
assert(field1.getSizeInBytes == 4 + 4 + innerMap.getSizeInBytes)
val field2 = unsafeRow.getMap(1)
assert(field2.numElements == 1)
val keyArray = field2.keyArray
testArrayInt(keyArray, Seq(3))
val valueArray = field2.valueArray
{
assert(valueArray.numElements == 1)
val innerArray = valueArray.getArray(0)
testArrayInt(innerArray, Seq(4))
assert(valueArray.getSizeInBytes == 4 + (4 + innerArray.getSizeInBytes))
}
assert(field2.getSizeInBytes == 4 + keyArray.getSizeInBytes + valueArray.getSizeInBytes)
assert(unsafeRow.getSizeInBytes ==
8 + 8 * 2 + roundedSize(field1.getSizeInBytes) + roundedSize(field2.getSizeInBytes))
}
}
|
pronix/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/UnsafeRowConverterSuite.scala
|
Scala
|
apache-2.0
| 19,192
|
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.function
import org.orbeon.oxf.xforms.NodeInfoFactory.elementInfo
import org.orbeon.saxon.expr.XPathContext
import org.orbeon.saxon.om.{EmptyIterator, Item}
import org.orbeon.scaxon.Implicits._
/**
* xf:element()
*/
class XFormsElement extends XFormsFunction {
override def evaluateItem(xpathContext: XPathContext): Item = {
// Element QName and content sequence
val qName = argument.lift(0) map (getQNameFromExpression(_)(xpathContext)) get
val content = argument.lift(1) map (_.iterate(xpathContext)) getOrElse EmptyIterator.getInstance
elementInfo(qName, asScalaIterator(content).toList)
}
}
|
brunobuzzi/orbeon-forms
|
xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/function/XFormsElement.scala
|
Scala
|
lgpl-2.1
| 1,301
|
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.MetadataAPI.Utility
import java.io.File
import com.ligadata.MetadataAPI.{MetadataAPIImpl,ApiResult,ErrorCodeConstants}
import scala.io.Source
import org.apache.logging.log4j._
/**
* Created by dhaval on 8/7/15.
*/
object ContainerService {
private val userid: Option[String] = Some("metadataapi")
val loggerName = this.getClass.getName
lazy val logger = LogManager.getLogger(loggerName)
def addContainer(input: String): String ={
var response = ""
var containerFileDir: String = ""
//val gitMsgFile = "https://raw.githubusercontent.com/ligadata-dhaval/Kamanja/master/HelloWorld_Msg_Def.json"
if (input == "") {
containerFileDir = MetadataAPIImpl.GetMetadataAPIConfig.getProperty("CONTAINER_FILES_DIR")
if (containerFileDir == null) {
response = "CONTAINER_FILES_DIR property missing in the metadata API configuration"
} else {
//verify the directory where messages can be present
IsValidDir(containerFileDir) match {
case true => {
//get all files with json extension
val containers: Array[File] = new java.io.File(containerFileDir).listFiles.filter(_.getName.endsWith(".json"))
containers.length match {
case 0 => {
response="Container not found at " + containerFileDir
}
case option => {
val containerDefs = getUserInputFromMainMenu(containers)
for (containerDef <- containerDefs) {
response += MetadataAPIImpl.AddContainer(containerDef.toString, "JSON", userid)
}
}
}
}
case false => {
//println("Message directory is invalid.")
response = "Message directory is invalid."
}
}
}
} else {
//input provided
var container = new File(input.toString)
if( container.exists()){
val containerDef = Source.fromFile(container).mkString
response = MetadataAPIImpl.AddContainer(containerDef, "JSON", userid)
}else{
response = "Input container file does not exist"
}
}
//Got the container.
response
}
def updateContainer(input: String): String ={
var response = ""
var containerFileDir: String = ""
//val gitMsgFile = "https://raw.githubusercontent.com/ligadata-dhaval/Kamanja/master/HelloWorld_Msg_Def.json"
if (input == "") {
containerFileDir = MetadataAPIImpl.GetMetadataAPIConfig.getProperty("CONTAINER_FILES_DIR")
if (containerFileDir == null) {
response = "CONTAINER_FILES_DIR property missing in the metadata API configuration"
} else {
//verify the directory where messages can be present
IsValidDir(containerFileDir) match {
case true => {
//get all files with json extension
val containers: Array[File] = new java.io.File(containerFileDir).listFiles.filter(_.getName.endsWith(".json"))
containers.length match {
case 0 => {
response="Container not found at " + containerFileDir
}
case option => {
val containerDefs = getUserInputFromMainMenu(containers)
for (containerDef <- containerDefs) {
response += MetadataAPIImpl.UpdateContainer(containerDef.toString, "JSON", userid)
}
}
}
}
case false => {
//println("Message directory is invalid.")
response = "Message directory is invalid."
}
}
}
} else {
//input provided
var container = new File(input.toString)
val containerDef = Source.fromFile(container).mkString
response = MetadataAPIImpl.AddContainer(containerDef, "JSON", userid)
}
//Got the container.
response
}
def getContainer(param: String = ""): String ={
var response=""
if (param.length > 0) {
val(ns, name, ver) = com.ligadata.kamanja.metadata.Utils.parseNameToken(param)
try {
return MetadataAPIImpl.GetContainerDefFromCache(ns, name,"JSON", ver, userid)
} catch {
case e: Exception => e.printStackTrace()
}
}
val containerKeys = MetadataAPIImpl.GetAllContainersFromCache(true, None)
if (containerKeys.length == 0) {
response="Sorry, No containers available in the Metadata"
}else{
println("\\nPick the container from the following list: ")
var srNo = 0
for(containerKey <- containerKeys){
srNo+=1
println("["+srNo+"] "+containerKey)
}
print("\\nEnter your choice: ")
val choice: Int = readInt()
if (choice < 1 || choice > containerKeys.length) {
response="Invalid choice " + choice + ",start with main menu..."
}else{
val containerKey = containerKeys(choice - 1)
val contKeyTokens = containerKey.split("\\\\.")
val contNameSpace = contKeyTokens(0)
val contName = contKeyTokens(1)
val contVersion = contKeyTokens(2)
response=MetadataAPIImpl.GetContainerDefFromCache(contNameSpace, contName, "JSON", contVersion, userid)
}
}
response
}
def getAllContainers: String ={
var response = ""
var containerKeysList = ""
try {
val containerKeys: Array[String] = MetadataAPIImpl GetAllContainersFromCache(true, userid)
if (containerKeys.length == 0) {
var emptyAlert = "Sorry, No containers are available in the Metadata"
response=(new ApiResult(ErrorCodeConstants.Success, "ContainerService",null, emptyAlert)).toString
} else {
response= (new ApiResult(ErrorCodeConstants.Success, "ContainerService", containerKeys.mkString(", "), "Successfully retrieved all the messages")).toString
}
} catch {
case e: Exception => {
response = e.getStackTrace.toString
response= (new ApiResult(ErrorCodeConstants.Failure, "ContainerService",null, response)).toString
}
}
response
}
def removeContainer(parm: String = ""): String ={
var response = ""
try{
if (parm.length > 0) {
val(ns, name, ver) = com.ligadata.kamanja.metadata.Utils.parseNameToken(parm)
try {
return MetadataAPIImpl.RemoveContainer(ns, name, ver.toInt, userid)
} catch {
case e: Exception => e.printStackTrace()
}
}
val contKeys = MetadataAPIImpl.GetAllContainersFromCache(true, None)
if (contKeys.length == 0) {
response=("Sorry, No containers available in the Metadata")
}else{
println("\\nPick the container to be deleted from the following list: ")
var seq = 0
contKeys.foreach(key => { seq += 1; println("[" + seq + "] " + key) })
print("\\nEnter your choice: ")
val choice: Int = readInt()
if (choice < 1 || choice > contKeys.length) {
return ("Invalid choice " + choice + ",start with main menu...")
}else{
val contKey = contKeys(choice - 1)
val(contNameSpace, contName, contVersion) = com.ligadata.kamanja.metadata.Utils.parseNameToken(contKey)
return MetadataAPIImpl.RemoveContainer(contNameSpace, contName, contVersion.toLong, userid)
}
}
} catch {
case e: NumberFormatException => {
response=("\\n Entry not in desired format. Please enter only one choice correctly")
}
case e: Exception => {
response=(e.toString)
}
}
response
}
//utilities
def IsValidDir(dirName: String): Boolean = {
val iFile = new File(dirName)
if (!iFile.exists) {
println("The File Path (" + dirName + ") is not found: ")
false
} else if (!iFile.isDirectory) {
println("The File Path (" + dirName + ") is not a directory: ")
false
} else
true
}
def getUserInputFromMainMenu(containers: Array[File]): Array[String] = {
var listOfContainerDef: Array[String] = Array[String]()
var srNo = 0
println("\\nPick a Container Definition file(s) from below choices\\n")
for (container <- containers) {
srNo += 1
println("[" + srNo + "]" + container)
}
print("\\nEnter your choice(If more than 1 choice, please use commas to seperate them): \\n")
val userOptions: List[Int] = Console.readLine().filter(_ != '\\n').split(',').filter(ch => (ch != null && ch != "")).map(_.trim.toInt).toList
//check if user input valid. If not exit
for (userOption <- userOptions) {
userOption match {
case userOption if (1 to srNo).contains(userOption) => {
//find the file location corresponding to the message
var container = containers(userOption - 1)
//process message
val containerDef = Source.fromFile(container).mkString
listOfContainerDef = listOfContainerDef :+ containerDef
}
case _ => {
println("Unknown option: ")
}
}
}
listOfContainerDef
}
}
|
traytonwhite/Kamanja
|
trunk/MetadataAPI/src/main/scala/com/ligadata/MetadataAPI/Utility/ContainerService.scala
|
Scala
|
apache-2.0
| 9,676
|
package chen.guo.dagexe.config
import chen.guo.test.common.UnitSpec
import com.typesafe.config.ConfigFactory
class DAGSpec extends UnitSpec {
"dag" should "give error for cycled graph" in {
val dag = new DAG()
val n1 = SleepNode("n1", "100")
val n2 = SleepNode("n2", "100")
dag.addEdge(n1, n2)
dag.addEdge(n2, n1)
dag.execute()
}
}
|
enjoyear/Simple-DAG-Execution
|
sde/src/test/scala/chen/guo/dagexe/config/DAGSpec.scala
|
Scala
|
apache-2.0
| 362
|
package ch.bsisa.hyperbird.report
import play.api.Play
class ReportConfig {
/**
* Wkhtmltopdf binary path
*/
lazy val wkhtmltopdfPath: String = Play.current.configuration.getString(ReportConfig.WkhtmltopdfPathKey) match {
case Some(path) => path
case None => throw ReportConfigException(s"Report HTML to PDF tool path configuration information ${ReportConfig.WkhtmltopdfPathKey} missing")
}
/**
* PDF merging binary path
*/
lazy val pdfMergingPath: String = Play.current.configuration.getString(ReportConfig.PdfMergingPathKey) match {
case Some(path) => path
case None => throw ReportConfigException(s"Report PDF merging tool path configuration information ${ReportConfig.PdfMergingPathKey} missing")
}
}
/**
* Collections configuration exception class
*/
case class ReportConfigException(message: String = null, cause: Throwable = null) extends Exception(message, cause)
object ReportConfig {
val WkhtmltopdfPathKey = "hb.report.wkhtmltopdf.path"
val PdfMergingPathKey = "hb.report.pdfmerging.path"
/**
* Header message is an optional string message configured on a report configuration basis (static).
* It is passed to configured header template as message template parameter.
*/
val CAR_NAME_HEADER_MESSAGE = "headerMessage"
/**
* Page orientation is an optional string message configured on a report configuration.
* It defines the report default page orientation.
* If not available `portrait` layout is used.
*/
val CAR_NAME_PAGE_ORIENTATION = "pageOrientation"
/**
* Page orientation `landscape` is currently the only page orientation accepted value.
* Any other value will be considered `portrait` page orientation.
*/
val CAR_VALUE_PAGE_ORIENTATION_LANDSCAPE = "landscape"
/**
* PDF include first is a string containing a HB triplet uniquely identifying an ELFIN object (IDG/CLASS/Id)
* This object first ANNEX document in PDF format will be used for PDF merging at first position.
*/
val CAR_NAME_PDF_INCLUDE_FIRST = "pdfIncludeFirst"
/**
* PDF include last is a string containing a HB triplet uniquely identifying an ELFIN object (IDG/CLASS/Id)
* This object first ANNEX document in PDF format will be used for PDF merging at last position.
*/
val CAR_NAME_PDF_INCLUDE_LAST = "pdfIncludeLast"
/**
* Watermark Elfin is a string containing a HB triplet uniquely identifying an ELFIN object (IDG/CLASS/Id)
* This object first ANNEX document in HTML supported image format (SVG preferred) is meant to be used
* as image background in PDF reports.
* Common usage are: 'This is a draft', 'Not validated', 'Confidential',...
*/
val CAR_NAME_WATERMARK_ELFIN_REF = "watermarkElfin"
}
|
bsisa/hb-api
|
app/ch/bsisa/hyperbird/report/ReportConfig.scala
|
Scala
|
gpl-2.0
| 2,770
|
package org.littlewings.javaee7.config
import javax.inject.Inject
import org.apache.deltaspike.core.api.config.ConfigResolver
import org.apache.deltaspike.core.api.projectstage.ProjectStage
import org.apache.deltaspike.testcontrol.api.junit.CdiTestRunner
import org.junit.{Before, Test}
import org.junit.runner.RunWith
import org.scalatest.Matchers
import org.scalatest.junit.JUnitSuite
@RunWith(classOf[CdiTestRunner])
class CustomConfigSpec extends JUnitSuite with Matchers {
@Inject
var myApplicationConfig: MyApplicationConfig = _
@Inject
var projectStage: ProjectStage = _
@Before
def setUp(): Unit = {
val configResolverProjectStageField = classOf[ConfigResolver].getDeclaredField("projectStage")
configResolverProjectStageField.setAccessible(true)
configResolverProjectStageField.set(null, projectStage)
}
@Test
def test(): Unit = {
myApplicationConfig.applicationName should be("Default")
}
}
|
kazuhira-r/javaee7-scala-examples
|
cdi-deltaspike-configuration/src/test/scala/org/littlewings/javaee7/config/CustomConfigSpec.scala
|
Scala
|
mit
| 943
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import matchers.{BeMatcher, MatchResult, BePropertyMatcher, BePropertyMatchResult}
import exceptions.TestFailedException
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
class ShouldNotShorthandForAllSpec extends Spec with EmptyMocks with BookPropertyMatchers {
def errorMessage(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (ShouldNotShorthandForAllSpec.scala:" + lineNumber + ") \\n" +
"in " + decorateToStringValue(left)
object `The shouldNot syntax` {
def `should work with theSameInstanceAs` {
val string = "Hi"
val obj: AnyRef = string
val otherString = new String("Hi")
all(List(otherString)) shouldNot { be theSameInstanceAs (string) }
all(List(otherString)) shouldNot be theSameInstanceAs (string)
val list1: List[AnyRef] = List(obj)
val caught1 = intercept[TestFailedException] {
all(list1) shouldNot { be theSameInstanceAs (string) }
}
assert(caught1.message === Some(errorMessage(0, "\\"Hi\\" was the same instance as \\"Hi\\"", thisLineNumber - 2, list1)))
assert(caught1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val list2: List[AnyRef] = List(obj)
val caught2 = intercept[TestFailedException] {
all(list2) shouldNot be theSameInstanceAs string
}
assert(caught2.message === Some(errorMessage(0, "\\"Hi\\" was the same instance as \\"Hi\\"", thisLineNumber - 2, list2)))
assert(caught2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should work with any` {
all(List(1)) shouldNot { be (2) }
all(List(1)) shouldNot be (2)
all(List("hi")) shouldNot be (null)
val list1 = List(1)
val caught1 = intercept[TestFailedException] {
all(list1) shouldNot { be (1) }
}
assert(caught1.message === Some(errorMessage(0, "1 was equal to 1", thisLineNumber - 2, list1)))
assert(caught1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val list2 = List(1)
val caught2 = intercept[TestFailedException] {
all(list1) shouldNot be (1)
}
assert(caught2.message === Some(errorMessage(0, "1 was equal to 1", thisLineNumber - 2, list2)))
assert(caught2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val list3 = List[String](null)
val caught3 = intercept[TestFailedException] {
all(list3) shouldNot be (null)
}
assert(caught3.message === Some(errorMessage(0, "The reference was null", thisLineNumber - 2, list3)))
assert(caught3.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val list4 = List(1)
val caught4 = intercept[TestFailedException] {
all(list4) shouldNot be (1)
}
assert(caught4.message === Some(errorMessage(0, "1 was equal to 1", thisLineNumber - 2, list4)))
assert(caught4.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
val list5 = List(1)
val caught5 = intercept[TestFailedException] {
all(list5) shouldNot (not (not be (1)))
}
assert(caught5.message === Some(errorMessage(0, "1 was equal to 1", thisLineNumber - 2, list5)))
assert(caught5.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught5.failedCodeLineNumber === Some(thisLineNumber - 4))
all(List(6)) shouldNot be > 7
val list6 = List(8)
val caught6 = intercept[TestFailedException] {
all(list6) shouldNot be > 7
}
assert(caught6.message === Some(errorMessage(0, "8 was greater than 7", thisLineNumber - 2, list6)))
assert(caught6.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught6.failedCodeLineNumber === Some(thisLineNumber - 4))
all(List(8)) shouldNot be < 7
val list7 = List(5)
val caught7 = intercept[TestFailedException] {
all(list7) shouldNot be < 7
}
assert(caught7.message === Some(errorMessage(0, "5 was less than 7", thisLineNumber - 2, list7)))
assert(caught7.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught7.failedCodeLineNumber === Some(thisLineNumber - 4))
all(List(3)) shouldNot be >= 7
val list8 = List(8)
val caught8 = intercept[TestFailedException] {
all(list8) shouldNot be >= 7
}
assert(caught8.message === Some(errorMessage(0, "8 was greater than or equal to 7", thisLineNumber - 2, list8)))
assert(caught8.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught8.failedCodeLineNumber === Some(thisLineNumber - 4))
all(List(8)) shouldNot be <= 7
val list9 = List(3)
val caught9 = intercept[TestFailedException] {
all(list9) shouldNot be <= 7
}
assert(caught9.message === Some(errorMessage(0, "3 was less than or equal to 7", thisLineNumber - 2, list9)))
assert(caught9.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught9.failedCodeLineNumber === Some(thisLineNumber - 4))
all(List(true)) shouldNot be (false)
val list10 = List(true)
val caught10 = intercept[TestFailedException] {
all(list10) shouldNot be (true)
}
assert(caught10.message === Some(errorMessage(0, "true was true", thisLineNumber - 2, list10)))
assert(caught10.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught10.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should work with BeMatcher` {
class OddMatcher extends BeMatcher[Int] {
def apply(left: Int): MatchResult = {
MatchResult(
left % 2 == 1,
left.toString + " was even",
left.toString + " was odd"
)
}
}
val odd = new OddMatcher
val even = not (odd)
all(List(2)) shouldNot be (odd)
all(List(1)) shouldNot be (even)
all(List(22)) shouldNot (not (be (even)))
all(List(1)) shouldNot (not (be (odd)))
val list1 = List(3)
val caught1 = intercept[TestFailedException] {
all(list1) shouldNot be (odd)
}
assert(caught1.message === Some(errorMessage(0, "3 was odd", thisLineNumber - 2, list1)))
assert(caught1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val list2 = List(6)
val caught2 = intercept[TestFailedException] {
all(list2) shouldNot be (even)
}
assert(caught2.message === Some(errorMessage(0, "6 was even", thisLineNumber - 2, list2)))
assert(caught2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val list3 = List(6)
val caught3 = intercept[TestFailedException] {
all(list3) shouldNot (not (be (odd)))
}
assert(caught3.message === Some(errorMessage(0, "6 was even", thisLineNumber - 2, list3)))
assert(caught3.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should work with symbol` {
all(List(notEmptyMock)) shouldNot { be ('empty) }
all(List(notEmptyMock)) shouldNot be ('empty)
all(List(isNotEmptyMock)) shouldNot { be ('empty) }
all(List(isNotEmptyMock)) shouldNot be ('empty)
val list1 = List(noPredicateMock)
val ex1 = intercept[TestFailedException] {
all(list1) shouldNot { be ('empty) }
}
assert(ex1.message === Some(errorMessage(0, "NoPredicateMock has neither an empty nor an isEmpty method", thisLineNumber - 2, list1)))
assert(ex1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(ex1.failedCodeLineNumber === Some(thisLineNumber - 4))
val list2 = List(noPredicateMock)
val ex2 = intercept[TestFailedException] {
all(list2) shouldNot (be ('full))
}
assert(ex2.message === Some(errorMessage(0, "NoPredicateMock has neither a full nor an isFull method", thisLineNumber - 2, list2)))
assert(ex2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(ex2.failedCodeLineNumber === Some(thisLineNumber - 4))
val list3 = List(noPredicateMock)
val ex3 = intercept[TestFailedException] {
all(list3) shouldNot be ('empty)
}
assert(ex3.message === Some(errorMessage(0, "NoPredicateMock has neither an empty nor an isEmpty method", thisLineNumber - 2, list3)))
assert(ex3.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(ex3.failedCodeLineNumber === Some(thisLineNumber - 4))
val list4 = List(noPredicateMock)
val ex4 = intercept[TestFailedException] {
all(list4) shouldNot be ('full)
}
assert(ex4.message === Some(errorMessage(0, "NoPredicateMock has neither a full nor an isFull method", thisLineNumber - 2, list4)))
assert(ex4.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(ex4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should work with BePropertyMatcher` {
case class MyFile(
val name: String,
val file: Boolean,
val isDirectory: Boolean
)
class FileBePropertyMatcher extends BePropertyMatcher[MyFile] {
def apply(file: MyFile) = {
new BePropertyMatchResult(file.file, "file")
}
}
class DirectoryBePropertyMatcher extends BePropertyMatcher[MyFile] {
def apply(file: MyFile) = {
new BePropertyMatchResult(file.isDirectory, "directory")
}
}
def file = new FileBePropertyMatcher
def directory = new DirectoryBePropertyMatcher
val myFile = new MyFile("temp.txt", true, false)
val book = new Book("A Tale of Two Cities", "Dickens", 1859, 45, true)
val badBook = new Book("A Tale of Two Cities", "Dickens", 1859, 45, false)
all(List(badBook)) shouldNot be (goodRead)
all(List(badBook)) shouldNot be a goodRead
all(List(badBook)) shouldNot be an goodRead
val list1 = List(book)
val caught1 = intercept[TestFailedException] {
all(list1) shouldNot be (goodRead)
}
assert(caught1.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was goodRead", thisLineNumber - 2, list1)))
assert(caught1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val list2 = List(book)
val caught2 = intercept[TestFailedException] {
all(list2) shouldNot be a goodRead
}
assert(caught2.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was a goodRead", thisLineNumber - 2, list2)))
assert(caught2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val list3 = List(book)
val caught3 = intercept[TestFailedException] {
all(list3) shouldNot be an goodRead
}
assert(caught3.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was an goodRead", thisLineNumber - 2, list3)))
assert(caught3.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val list4 = List(book)
val caught4 = intercept[TestFailedException] {
all(list4) shouldNot (be (goodRead))
}
assert(caught4.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was goodRead", thisLineNumber - 2, list4)))
assert(caught4.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
val list5 = List(book)
val caught5 = intercept[TestFailedException] {
all(list5) shouldNot (be a (goodRead))
}
assert(caught5.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was a goodRead", thisLineNumber - 2, list5)))
assert(caught5.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught5.failedCodeLineNumber === Some(thisLineNumber - 4))
val list6 = List(book)
val caught6 = intercept[TestFailedException] {
all(list6) shouldNot (be an (goodRead))
}
assert(caught6.message === Some(errorMessage(0, "Book(A Tale of Two Cities,Dickens,1859,45,true) was an goodRead", thisLineNumber - 2, list6)))
assert(caught6.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught6.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should with +-` {
val sevenDotOh = 7.0
val minusSevenDotOh = -7.0
val sevenDotOhFloat = 7.0f
val minusSevenDotOhFloat = -7.0f
val sevenLong = 7L
val minusSevenLong = -7L
val sevenInt = 7
val minusSevenInt = -7
val sevenShort: Short = 7
val minusSevenShort: Short = -7
val sevenByte: Byte = 7
val minusSevenByte: Byte = -7
// Double +- Double
all(List(sevenDotOh)) shouldNot { be (7.5 +- 0.2) }
all(List(sevenDotOh)) shouldNot be (7.5 +- 0.2)
all(List(sevenDotOh)) shouldNot be (6.5 +- 0.2)
all(List(minusSevenDotOh)) shouldNot { be (-7.5 +- 0.2) }
all(List(minusSevenDotOh)) shouldNot be (-7.5 +- 0.2)
all(List(minusSevenDotOh)) shouldNot be (-6.5 +- 0.2)
// Double +- Float
all(List(sevenDotOh)) shouldNot { be (7.5 +- 0.2f) }
all(List(sevenDotOh)) shouldNot be (7.5 +- 0.2f)
all(List(sevenDotOh)) shouldNot be (6.5 +- 0.2f)
all(List(minusSevenDotOh)) shouldNot { be (-7.5 +- 0.2f) }
all(List(minusSevenDotOh)) shouldNot be (-7.5 +- 0.2f)
all(List(minusSevenDotOh)) shouldNot be (-6.5 +- 0.2f)
// Double +- Long
all(List(sevenDotOh)) shouldNot { be (10.0 +- 2L) }
all(List(sevenDotOh)) shouldNot be (4.0 +- 2L)
all(List(sevenDotOh)) shouldNot be (9.1 +- 2L)
all(List(minusSevenDotOh)) shouldNot { be (-10.0 +- 2L) }
all(List(minusSevenDotOh)) shouldNot be (-4.0 +- 2L)
all(List(minusSevenDotOh)) shouldNot be (-9.1 +- 2L)
// Double +- Int
all(List(sevenDotOh)) shouldNot { be (10.0 +- 2) }
all(List(sevenDotOh)) shouldNot be (4.0 +- 2)
all(List(sevenDotOh)) shouldNot be (9.1 +- 2)
all(List(minusSevenDotOh)) shouldNot { be (-10.0 +- 2) }
all(List(minusSevenDotOh)) shouldNot be (-4.0 +- 2)
all(List(minusSevenDotOh)) shouldNot be (-9.1 +- 2)
// Double +- Short
all(List(sevenDotOh)) shouldNot { be (10.0 +- 2.toShort) }
all(List(sevenDotOh)) shouldNot be (4.0 +- 2.toShort)
all(List(sevenDotOh)) shouldNot be (9.1 +- 2.toShort)
all(List(minusSevenDotOh)) shouldNot { be (-10.0 +- 2.toShort) }
all(List(minusSevenDotOh)) shouldNot be (-4.0 +- 2.toShort)
all(List(minusSevenDotOh)) shouldNot be (-9.1 +- 2.toShort)
// Double +- Byte
all(List(sevenDotOh)) shouldNot { be (10.0 +- 2.toByte) }
all(List(sevenDotOh)) shouldNot be (4.0 +- 2.toByte)
all(List(sevenDotOh)) shouldNot be (9.1 +- 2.toByte)
all(List(minusSevenDotOh)) shouldNot { be (-10.0 +- 2.toByte) }
all(List(minusSevenDotOh)) shouldNot be (-4.0 +- 2.toByte)
all(List(minusSevenDotOh)) shouldNot be (-9.1 +- 2.toByte)
// Float +- Float
all(List(sevenDotOhFloat)) shouldNot { be (7.5f +- 0.2f) }
all(List(sevenDotOhFloat)) shouldNot be (7.5f +- 0.2f)
all(List(sevenDotOhFloat)) shouldNot be (6.5f +- 0.2f)
all(List(minusSevenDotOhFloat)) shouldNot { be (-7.5f +- 0.2f) }
all(List(minusSevenDotOhFloat)) shouldNot be (-7.5f +- 0.2f)
all(List(minusSevenDotOhFloat)) shouldNot be (-6.5f +- 0.2f)
// Float +- Long
all(List(sevenDotOhFloat)) shouldNot { be (10.0f +- 2L) }
all(List(sevenDotOhFloat)) shouldNot be (4.0f +- 2L)
all(List(sevenDotOhFloat)) shouldNot be (9.1f +- 2L)
all(List(minusSevenDotOhFloat)) shouldNot { be (-10.0f +- 2L) }
all(List(minusSevenDotOhFloat)) shouldNot be (-4.0f +- 2L)
all(List(minusSevenDotOhFloat)) shouldNot be (-9.1f +- 2L)
// Float +- Int
all(List(sevenDotOhFloat)) shouldNot { be (10.0f +- 2) }
all(List(sevenDotOhFloat)) shouldNot be (4.0f +- 2)
all(List(sevenDotOhFloat)) shouldNot be (9.1f +- 2)
all(List(minusSevenDotOhFloat)) shouldNot { be (-10.0f +- 2) }
all(List(minusSevenDotOhFloat)) shouldNot be (-4.0f +- 2)
all(List(minusSevenDotOhFloat)) shouldNot be (-9.1f +- 2)
// Float +- Short
all(List(sevenDotOhFloat)) shouldNot { be (10.0f +- 2.toShort) }
all(List(sevenDotOhFloat)) shouldNot be (4.0f +- 2.toShort)
all(List(sevenDotOhFloat)) shouldNot be (9.1f +- 2.toShort)
all(List(minusSevenDotOhFloat)) shouldNot { be (-10.0f +- 2.toShort) }
all(List(minusSevenDotOhFloat)) shouldNot be (-4.0f +- 2.toShort)
all(List(minusSevenDotOhFloat)) shouldNot be (-9.1f +- 2.toShort)
// Float +- Byte
all(List(sevenDotOhFloat)) shouldNot { be (10.0f +- 2.toByte) }
all(List(sevenDotOhFloat)) shouldNot be (4.0f +- 2.toByte)
all(List(sevenDotOhFloat)) shouldNot be (9.1f +- 2.toByte)
all(List(minusSevenDotOhFloat)) shouldNot { be (-10.0f +- 2.toByte) }
all(List(minusSevenDotOhFloat)) shouldNot be (-4.0f +- 2.toByte)
all(List(minusSevenDotOhFloat)) shouldNot be (-9.1f +- 2.toByte)
// Long +- Long
all(List(sevenLong)) shouldNot { be (10L +- 2L) }
all(List(sevenLong)) shouldNot be (4L +- 2L)
all(List(sevenLong)) shouldNot be (10L +- 2L)
all(List(minusSevenLong)) shouldNot { be (-10L +- 2L) }
all(List(minusSevenLong)) shouldNot be (-4L +- 2L)
all(List(minusSevenLong)) shouldNot be (-10L +- 2L)
// Long +- Int
all(List(sevenLong)) shouldNot { be (10L +- 2) }
all(List(sevenLong)) shouldNot be (4L +- 2)
all(List(sevenLong)) shouldNot be (10L +- 2)
all(List(minusSevenLong)) shouldNot { be (-10L +- 2) }
all(List(minusSevenLong)) shouldNot be (-4L +- 2)
all(List(minusSevenLong)) shouldNot be (-10L +- 2)
// Long +- Short
all(List(sevenLong)) shouldNot { be (10L +- 2.toShort) }
all(List(sevenLong)) shouldNot be (4L +- 2.toShort)
all(List(sevenLong)) shouldNot be (10L +- 2.toShort)
all(List(minusSevenLong)) shouldNot { be (-10L +- 2.toShort) }
all(List(minusSevenLong)) shouldNot be (-4L +- 2.toShort)
all(List(minusSevenLong)) shouldNot be (-10L +- 2.toShort)
// Long +- Byte
all(List(sevenLong)) shouldNot { be (10L +- 2.toByte) }
all(List(sevenLong)) shouldNot be (4L +- 2.toByte)
all(List(sevenLong)) shouldNot be (10L +- 2.toByte)
all(List(minusSevenLong)) shouldNot { be (-10L +- 2.toByte) }
all(List(minusSevenLong)) shouldNot be (-4L +- 2.toByte)
all(List(minusSevenLong)) shouldNot be (-10L +- 2.toByte)
// Int +- Int
all(List(sevenInt)) shouldNot { be (10 +- 2) }
all(List(sevenInt)) shouldNot be (4 +- 2)
all(List(sevenInt)) shouldNot be (10 +- 2)
all(List(minusSevenInt)) shouldNot { be (-10 +- 2) }
all(List(minusSevenInt)) shouldNot be (-4 +- 2)
all(List(minusSevenInt)) shouldNot be (-10 +- 2)
// Int +- Short
all(List(sevenInt)) shouldNot { be (10 +- 2.toShort) }
all(List(sevenInt)) shouldNot be (4 +- 2.toShort)
all(List(sevenInt)) shouldNot be (10 +- 2.toShort)
all(List(minusSevenInt)) shouldNot { be (-10 +- 2.toShort) }
all(List(minusSevenInt)) shouldNot be (-4 +- 2.toShort)
all(List(minusSevenInt)) shouldNot be (-10 +- 2.toShort)
// Int +- Byte
all(List(sevenInt)) shouldNot { be (10 +- 2.toByte) }
all(List(sevenInt)) shouldNot be (4 +- 2.toByte)
all(List(sevenInt)) shouldNot be (10 +- 2.toByte)
all(List(minusSevenInt)) shouldNot { be (-10 +- 2.toByte) }
all(List(minusSevenInt)) shouldNot be (-4 +- 2.toByte)
all(List(minusSevenInt)) shouldNot be (-10 +- 2.toByte)
// Short +- Short
all(List(sevenShort)) shouldNot { be (10.toShort +- 2.toShort) }
all(List(sevenShort)) shouldNot be (4.toShort +- 2.toShort)
all(List(sevenShort)) shouldNot be (10.toShort +- 2.toShort)
all(List(minusSevenShort)) shouldNot { be ((-10).toShort +- 2.toShort) }
all(List(minusSevenShort)) shouldNot be ((-4).toShort +- 2.toShort)
all(List(minusSevenShort)) shouldNot be ((-10).toShort +- 2.toShort)
// Short +- Byte
all(List(sevenShort)) shouldNot { be (10.toShort +- 2.toByte) }
all(List(sevenShort)) shouldNot be (4.toShort +- 2.toByte)
all(List(sevenShort)) shouldNot be (10.toShort +- 2.toByte)
all(List(minusSevenShort)) shouldNot { be ((-10).toShort +- 2.toByte) }
all(List(minusSevenShort)) shouldNot be ((-4).toShort +- 2.toByte)
all(List(minusSevenShort)) shouldNot be ((-10).toShort +- 2.toByte)
// Byte +- Byte
all(List(sevenByte)) shouldNot { be (10.toByte +- 2.toByte) }
all(List(sevenByte)) shouldNot be (4.toByte +- 2.toByte)
all(List(sevenByte)) shouldNot be (10.toByte +- 2.toByte)
all(List(minusSevenByte)) shouldNot { be ((-10).toByte +- 2.toByte) }
all(List(minusSevenByte)) shouldNot be ((-4).toByte +- 2.toByte)
all(List(minusSevenByte)) shouldNot be ((-10).toByte +- 2.toByte)
// Double +- Double
val list1 = List(sevenDotOh)
val caught1 = intercept[TestFailedException] {
all(list1) shouldNot be (7.1 +- 0.2)
}
assert(caught1.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 0.2", thisLineNumber - 2, list1)))
assert(caught1.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
// Double +- Float
val list2 = List(sevenDotOh)
val caught2 = intercept[TestFailedException] {
all(list2) shouldNot be (7.1 +- 0.2f)
}
assert(caught2.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 0.20000000298023224", thisLineNumber - 2, list2)))
assert(caught2.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
// Double +- Long
val list3 = List(sevenDotOh)
val caught3 = intercept[TestFailedException] {
all(list3) shouldNot be (7.1 +- 2L)
}
assert(caught3.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list3)))
assert(caught3.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
// Double +- Int
val list4 = List(sevenDotOh)
val caught4 = intercept[TestFailedException] {
all(list4) shouldNot be (7.1 +- 2)
}
assert(caught4.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list4)))
assert(caught4.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
// Double +- Short
val list5 = List(sevenDotOh)
val caught5 = intercept[TestFailedException] {
all(list5) shouldNot be (7.1 +- 2.toShort)
}
assert(caught5.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list5)))
assert(caught5.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught5.failedCodeLineNumber === Some(thisLineNumber - 4))
// Double +- Byte
val list6 = List(sevenDotOh)
val caught6 = intercept[TestFailedException] {
all(list6) shouldNot be (7.1 +- 2.toByte)
}
assert(caught6.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list6)))
assert(caught6.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught6.failedCodeLineNumber === Some(thisLineNumber - 4))
// Float +- Float
val list7 = List(sevenDotOhFloat)
val caught7 = intercept[TestFailedException] {
all(list7) shouldNot be (7.1f +- 0.2f)
}
assert(caught7.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 0.2", thisLineNumber - 2, list7)))
assert(caught7.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught7.failedCodeLineNumber === Some(thisLineNumber - 4))
// Float +- Long
val list8 = List(sevenDotOhFloat)
val caught8 = intercept[TestFailedException] {
all(list8) shouldNot be (7.1f +- 2L)
}
assert(caught8.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list8)))
assert(caught8.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught8.failedCodeLineNumber === Some(thisLineNumber - 4))
// Float +- Int
val list9 = List(sevenDotOhFloat)
val caught9 = intercept[TestFailedException] {
all(list9) shouldNot be (7.1f +- 2)
}
assert(caught9.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list9)))
assert(caught9.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught9.failedCodeLineNumber === Some(thisLineNumber - 4))
// Float +- Short
val list10 = List(sevenDotOhFloat)
val caught10 = intercept[TestFailedException] {
all(list10) shouldNot be (7.1f +- 2.toShort)
}
assert(caught10.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list10)))
assert(caught10.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught10.failedCodeLineNumber === Some(thisLineNumber - 4))
// Float +- Byte
val list11 = List(sevenDotOhFloat)
val caught11 = intercept[TestFailedException] {
all(list11) shouldNot be (7.1f +- 2.toByte)
}
assert(caught11.message === Some(errorMessage(0, "7.0 was 7.1 plus or minus 2.0", thisLineNumber - 2, list11)))
assert(caught11.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught11.failedCodeLineNumber === Some(thisLineNumber - 4))
// Long +- Long
val list12 = List(sevenLong)
val caught12 = intercept[TestFailedException] {
all(list12) shouldNot be (9L +- 2L)
}
assert(caught12.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list12)))
assert(caught12.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught12.failedCodeLineNumber === Some(thisLineNumber - 4))
// Long +- Int
val list13 = List(sevenLong)
val caught13 = intercept[TestFailedException] {
all(list13) shouldNot be (9L +- 2)
}
assert(caught13.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list13)))
assert(caught13.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught13.failedCodeLineNumber === Some(thisLineNumber - 4))
// Long +- Short
val list14 = List(sevenLong)
val caught14 = intercept[TestFailedException] {
all(list14) shouldNot be (9L +- 2.toShort)
}
assert(caught14.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list14)))
assert(caught14.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught14.failedCodeLineNumber === Some(thisLineNumber - 4))
// Long +- Byte
val list15 = List(sevenLong)
val caught15 = intercept[TestFailedException] {
all(list15) shouldNot be (9L +- 2.toByte)
}
assert(caught15.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list15)))
assert(caught15.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught15.failedCodeLineNumber === Some(thisLineNumber - 4))
// Int +- Int
val list16 = List(sevenInt)
val caught16 = intercept[TestFailedException] {
all(list16) shouldNot be (9 +- 2)
}
assert(caught16.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list16)))
assert(caught16.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught16.failedCodeLineNumber === Some(thisLineNumber - 4))
// Int +- Short
val list17 = List(sevenInt)
val caught17 = intercept[TestFailedException] {
all(list17) shouldNot be (9 +- 2.toShort)
}
assert(caught17.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list17)))
assert(caught17.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught17.failedCodeLineNumber === Some(thisLineNumber - 4))
// Int +- Byte
val list18 = List(sevenInt)
val caught18 = intercept[TestFailedException] {
all(list18) shouldNot be (9 +- 2.toByte)
}
assert(caught18.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list18)))
assert(caught18.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught18.failedCodeLineNumber === Some(thisLineNumber - 4))
// Short +- Short
val list19 = List(sevenShort)
val caught19 = intercept[TestFailedException] {
all(list19) shouldNot be (9.toShort +- 2.toShort)
}
assert(caught19.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list19)))
assert(caught19.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught19.failedCodeLineNumber === Some(thisLineNumber - 4))
// Short +- Byte
val list20 = List(sevenShort)
val caught20 = intercept[TestFailedException] {
all(list20) shouldNot be (9.toShort +- 2.toByte)
}
assert(caught20.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list20)))
assert(caught20.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught20.failedCodeLineNumber === Some(thisLineNumber - 4))
// Byte +- Byte
val list21 = List(sevenByte)
val caught21 = intercept[TestFailedException] {
all(list21) shouldNot be (9.toByte +- 2.toByte)
}
assert(caught21.message === Some(errorMessage(0, "7 was 9 plus or minus 2", thisLineNumber - 2, list21)))
assert(caught21.failedCodeFileName === Some("ShouldNotShorthandForAllSpec.scala"))
assert(caught21.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
|
cheeseng/scalatest
|
scalatest-test/src/test/scala/org/scalatest/ShouldNotShorthandForAllSpec.scala
|
Scala
|
apache-2.0
| 32,551
|
package com.mdataset.lib.worker.basic
import com.mdataset.excavator.core.ENode
import com.mdataset.lib.basic.model.MdsSourceMainDTO
import com.mdataset.lib.worker.basic.exchange.{MdsAPIExchangeWorker, MdsDataExchangeWorker, MdsDefaultAPIExchangeWorker, MdsDefaultDataExchangeWorker}
/**
* Worker基础上下文
*/
object MdsWorkerBasicContext {
// Worker服务适配器
var adapter: MdsAdapter = _
// 数据源主体
var source: MdsSourceMainDTO = _
// API Service交互实现
var apiExchangeWorker:MdsAPIExchangeWorker = MdsDefaultAPIExchangeWorker
// BD Service交互实现
var dataExchangeWorker:MdsDataExchangeWorker = MdsDefaultDataExchangeWorker
var excavator: ENode = _
}
|
MDataSet/mds
|
modules/lib_worker_basic/src/main/scala/com/mdataset/lib/worker/basic/MdsWorkerBasicContext.scala
|
Scala
|
apache-2.0
| 707
|
package uk.gov.homeoffice.console
import org.specs2.mutable.Specification
class ConsoleSpec extends Specification with Console {
"Console" should {
"present some text" in {
println(present("Hello World"))
ok
}
}
}
|
UKHomeOffice/rtp-io-lib
|
src/test/scala/uk/gov/homeoffice/console/ConsoleSpec.scala
|
Scala
|
mit
| 239
|
package no.digipost.labs.items
import scala.language.postfixOps
import scala.collection.mutable.ListBuffer
import java.util.Date
import org.bson.types.ObjectId
class TestItemsRepository extends ItemsRepository {
lazy val items: ListBuffer[DbItem] = ListBuffer()
def search(query: Option[String]) = List()
def findAll(start: Option[Int] = Some(0)): (Seq[DbItem], Int) = {
val found = items.drop(start.getOrElse(0))
(found, found.size)
}
def findById(id: String): Option[DbItem] = {
items.find(_._id == id)
}
def findByOldId(oldId: String): Option[DbItem] = items.find(_.oldId.contains(oldId))
def findByType(t: String, start: Option[Int] = Some(0)): (Seq[DbItem], Int) = {
val found = items.filter(_.`type`== t).drop(start.getOrElse(0))
(found, found.size)
}
override def insert(item: DbItem) = {
items += item
Some(item)
}
override def update(item: DbItem, id: String): Option[DbItem] =
updateItem(id, _.copy(title = item.title, body = item.body, source = item.source))
override def delete(id: String) = items.find(_._id == id).foreach(i => items -= i)
override def insertComment(comment: DbComment, parentId: String): Option[DbItem] = {
updateItem(parentId, i => i.copy(comments = comment :: i.comments))
}
def findLatestComments(): Seq[(String, DbComment)] = items.flatMap(item => item.comments.map(comment => (item._id.toHexString, comment))).sortBy(_._2.date)(Ordering[Date].reverse)
override def deleteComment(parentId: String, commentId: String): Option[DbItem] = {
updateItem(parentId, i => i.copy(comments = i.comments.filter(_._id.toHexString != commentId)))
}
override def addVote(itemId: String, userId: String) = {
updateItem(itemId, i => i.copy(votes = i.votes + new ObjectId(userId)))
}
private def updateItem(itemId: String, action: DbItem => DbItem) = {
val oldItem = items.find(_._id == itemId)
val newItem = oldItem.map(action)
oldItem foreach (items -=)
newItem foreach (items +=)
newItem
}
}
|
digipost/labs
|
backend/src/test/scala/no/digipost/labs/items/TestItemsRepository.scala
|
Scala
|
apache-2.0
| 2,036
|
package rpm4s.cli.repo
import org.http4s.Uri
case class RepoFile(
name: String,
baseurl: Uri,
enabled: Boolean = true,
autorefresh: Boolean = true,
gpgcheck: Boolean = false
)
object RepoFile {
def toFile(repoFile: RepoFile): String = {
s"""|[${repoFile.name}]
|type=rpm-md
|baseurl=${repoFile.baseurl}
|enabled=${if (repoFile.enabled) "1" else "0"}
|autorefresh=${if (repoFile.autorefresh) "1" else "0"}
|gpgcheck=${if (repoFile.gpgcheck) "1" else "0"}
""".stripMargin
}
}
|
lucidd/rpm4s
|
cli/src/main/scala/rpm4s/cli/repo/RepoFile.scala
|
Scala
|
mit
| 548
|
/**
* Copyright (C) 2017-2018 the original author or authors.
* See the LICENSE file distributed with this work for additional
* information regarding copyright ownership.
*
* @author Matthew Cross <github.com/mcross1991>
*/
package mahjong
package test
import org.scalatest.{FlatSpec, Matchers}
class BaseSpec extends FlatSpec with Matchers
|
mcross1991/mahjong
|
src/test/scala/mahjong/BaseSpec.scala
|
Scala
|
mit
| 357
|
import io.gatling.core.Predef._
import io.gatling.http.Predef._
object TeamNotes {
def lookup() = exec(http("TeamNotes")
.get("""/patientNote/listTeam"""))
.pause(10)
}
|
silverbullet-dk/opentele-performance-tests
|
src/test/scala/user-files/simulations/processes/clinician/TeamNotes.scala
|
Scala
|
apache-2.0
| 182
|
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.viz
import java.io.Writer
import com.twitter.summingbird.{Platform, Producer, Dependants, NamedProducer, IdentityKeyedProducer}
import com.twitter.summingbird.planner._
object VizGraph {
def apply[P <: Platform[P]](dag: Dag[P], writer: Writer): Unit = writer.write(apply(dag))
def apply[P <: Platform[P]](dag: Dag[P]): String = DagViz(dag).toString
def apply[P <: Platform[P]](tail: Producer[P, _], writer: Writer):Unit = writer.write(VizGraph(tail))
def apply[P <: Platform[P]](tail: Producer[P, _]):String = ProducerViz(tail).toString
}
|
sengt/summingbird-batch
|
summingbird-core/src/main/scala/com/twitter/summingbird/viz/Viz.scala
|
Scala
|
apache-2.0
| 1,150
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.util
import org.apache.flink.table.calcite.{FlinkTypeFactory, FlinkTypeSystem}
import org.apache.calcite.rex.{RexBuilder, RexUtil}
import org.apache.calcite.sql.`type`.SqlTypeName._
import org.apache.calcite.sql.fun.SqlStdOperatorTable._
import org.junit.Assert.{assertEquals, assertFalse}
import org.junit.Test
import java.math.BigDecimal
class FlinkRexUtilTest {
private val typeFactory: FlinkTypeFactory = new FlinkTypeFactory(new FlinkTypeSystem())
private val rexBuilder = new RexBuilder(typeFactory)
private val varcharType = typeFactory.createSqlType(VARCHAR)
private val intType = typeFactory.createSqlType(INTEGER)
@Test
def testToCnf_ComplexPredicate(): Unit = {
// From TPC-DS q41.sql
val i_manufact = rexBuilder.makeInputRef(varcharType, 0)
val i_category = rexBuilder.makeInputRef(varcharType, 1)
val i_color = rexBuilder.makeInputRef(varcharType, 2)
val i_units = rexBuilder.makeInputRef(varcharType, 3)
val i_size = rexBuilder.makeInputRef(varcharType, 4)
// this predicate contains 95 RexCalls. however,
// if this predicate is converted to CNF, the result contains 736450 RexCalls.
val predicate = rexBuilder.makeCall(OR,
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_manufact, rexBuilder.makeLiteral("able")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Women")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("powder")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("khaki"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Ounce")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Oz"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("medium")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("extra large"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Women")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("brown")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("honeydew"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Bunch")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Ton"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("N/A")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("small"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Men")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("floral")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("deep"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("N/A")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Dozen"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("petite")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("large"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Men")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("light")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("cornflower"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Box")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Pound"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("medium")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("extra large"))
)
)
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_manufact, rexBuilder.makeLiteral("able")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Women")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("midnight")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("snow"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Pallet")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Gross"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("medium")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("extra large"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Women")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("cyan")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("papaya"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Cup")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Dram"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("N/A")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("small"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Men")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("orange")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("frosted"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Each")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Tbl"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("petite")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("large"))
)
),
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, i_category, rexBuilder.makeLiteral("Men")),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("forest")),
rexBuilder.makeCall(EQUALS, i_color, rexBuilder.makeLiteral("ghost"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Lb")),
rexBuilder.makeCall(EQUALS, i_units, rexBuilder.makeLiteral("Bundle"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("medium")),
rexBuilder.makeCall(EQUALS, i_size, rexBuilder.makeLiteral("extra large"))
)
)
)
)
)
// the number of RexCall in the CNF result exceeds 95 * 2, so returns the original expression
val newPredicate1 = FlinkRexUtil.toCnf(rexBuilder, -1, predicate)
assertEquals(predicate.toString, newPredicate1.toString)
val newPredicate2 = FlinkRexUtil.toCnf(rexBuilder, 736449, predicate)
assertEquals(predicate.toString, newPredicate2.toString)
val newPredicate3 = FlinkRexUtil.toCnf(rexBuilder, 736450, predicate)
assertEquals(RexUtil.toCnf(rexBuilder, predicate).toString, newPredicate3.toString)
val newPredicate4 = FlinkRexUtil.toCnf(rexBuilder, Int.MaxValue, predicate)
assertFalse(RexUtil.eq(predicate, newPredicate4))
assertEquals(RexUtil.toCnf(rexBuilder, predicate).toString, newPredicate4.toString)
}
@Test
def testToCnf_SimplePredicate(): Unit = {
// (a="1" AND b="2") OR c="3"
val a = rexBuilder.makeInputRef(varcharType, 0)
val b = rexBuilder.makeInputRef(varcharType, 1)
val c = rexBuilder.makeInputRef(varcharType, 2)
val predicate = rexBuilder.makeCall(OR,
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, rexBuilder.makeLiteral("1")),
rexBuilder.makeCall(EQUALS, b, rexBuilder.makeLiteral("2"))
),
rexBuilder.makeCall(EQUALS, c, rexBuilder.makeLiteral("3"))
)
// (a="1" OR c="3") OR (b="2" OR c="3")
val expected = rexBuilder.makeCall(AND,
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, a, rexBuilder.makeLiteral("1")),
rexBuilder.makeCall(EQUALS, c, rexBuilder.makeLiteral("3"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, b, rexBuilder.makeLiteral("2")),
rexBuilder.makeCall(EQUALS, c, rexBuilder.makeLiteral("3"))
)
)
val newPredicate1 = FlinkRexUtil.toCnf(rexBuilder, -1, predicate)
assertEquals(expected.toString, newPredicate1.toString)
assertEquals(expected.toString, RexUtil.toCnf(rexBuilder, predicate).toString)
val newPredicate2 = FlinkRexUtil.toCnf(rexBuilder, 0, predicate)
assertEquals(predicate.toString, newPredicate2.toString)
}
@Test
def testSimplify(): Unit = {
val a = rexBuilder.makeInputRef(varcharType, 0)
val b = rexBuilder.makeInputRef(varcharType, 1)
val c = rexBuilder.makeInputRef(intType, 2)
val d = rexBuilder.makeInputRef(intType, 3)
// a = b AND a = b
val predicate0 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(EQUALS, a, b)
)
val newPredicate0 = FlinkRexUtil.simplify(rexBuilder, predicate0)
assertEquals(rexBuilder.makeCall(EQUALS, a, b).toString, newPredicate0.toString)
// a = b AND b = a
val predicate1 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(EQUALS, b, a)
)
val newPredicate1 = FlinkRexUtil.simplify(rexBuilder, predicate1)
assertEquals(rexBuilder.makeCall(EQUALS, a, b).toString, newPredicate1.toString)
// a = b OR b = a
val predicate2 = rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(EQUALS, b, a)
)
val newPredicate2 = FlinkRexUtil.simplify(rexBuilder, predicate2)
assertEquals(rexBuilder.makeCall(EQUALS, a, b).toString, newPredicate2.toString)
// a = b AND c < d AND b = a AND d > c
val predicate3 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d),
rexBuilder.makeCall(EQUALS, b, a),
rexBuilder.makeCall(GREATER_THAN, d, c)
)
val newPredicate3 = FlinkRexUtil.simplify(rexBuilder, predicate3)
assertEquals(rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d)).toString,
newPredicate3.toString)
// cast(a as INTEGER) >= c and c <= cast(a as INTEGER)
val predicate4 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(GREATER_THAN_OR_EQUAL,
rexBuilder.makeCast(typeFactory.createSqlType(INTEGER), a), c),
rexBuilder.makeCall(LESS_THAN_OR_EQUAL,
c, rexBuilder.makeCast(typeFactory.createSqlType(INTEGER), a))
)
val newPredicate4 = FlinkRexUtil.simplify(rexBuilder, predicate4)
assertEquals(rexBuilder.makeCall(GREATER_THAN_OR_EQUAL,
rexBuilder.makeCast(typeFactory.createSqlType(INTEGER), a), c).toString,
newPredicate4.toString)
// (substring(a, 1, 3) = b OR c <= d + 1 OR d + 1 >= c)
// AND
// (b = (substring(a, 1, 3) OR d + 1 >= c OR c <= d + 1)
val aSubstring13 = rexBuilder.makeCall(SUBSTRING, a,
rexBuilder.makeBigintLiteral(BigDecimal.ONE),
rexBuilder.makeBigintLiteral(BigDecimal.valueOf(3)))
val dPlus1 = rexBuilder.makeCall(PLUS, d, rexBuilder.makeBigintLiteral(BigDecimal.ONE))
val predicate5 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, aSubstring13, b),
rexBuilder.makeCall(LESS_THAN_OR_EQUAL, c, dPlus1),
rexBuilder.makeCall(GREATER_THAN_OR_EQUAL, dPlus1, c)),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, b, aSubstring13),
rexBuilder.makeCall(GREATER_THAN_OR_EQUAL, dPlus1, c),
rexBuilder.makeCall(LESS_THAN_OR_EQUAL, c, dPlus1))
)
val newPredicate5 = FlinkRexUtil.simplify(rexBuilder, predicate5)
assertEquals(rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, aSubstring13, b),
rexBuilder.makeCall(LESS_THAN_OR_EQUAL, c, dPlus1)).toString,
newPredicate5.toString)
// (a = b OR c < d OR a > 'l') AND (b = a OR d > c OR b < 'k')
val predicate6 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d),
rexBuilder.makeCall(GREATER_THAN, a, rexBuilder.makeLiteral("l"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, b, a),
rexBuilder.makeCall(GREATER_THAN, d, c),
rexBuilder.makeCall(LESS_THAN, b, rexBuilder.makeLiteral("k"))
)
)
val newPredicate6 = FlinkRexUtil.simplify(rexBuilder, predicate6)
assertEquals(rexBuilder.makeCall(AND,
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d),
rexBuilder.makeCall(GREATER_THAN, a, rexBuilder.makeLiteral("l"))
),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d),
rexBuilder.makeCall(LESS_THAN, b, rexBuilder.makeLiteral("k"))
)
).toString, newPredicate6.toString)
// (a = b AND c < d) AND b = a
val predicate7 = rexBuilder.makeCall(AND,
rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d)
),
rexBuilder.makeCall(EQUALS, b, a)
)
val newPredicate7 = FlinkRexUtil.simplify(rexBuilder, predicate7)
assertEquals(rexBuilder.makeCall(AND,
rexBuilder.makeCall(EQUALS, a, b),
rexBuilder.makeCall(LESS_THAN, c, d)).toString,
newPredicate7.toString)
// b >= a OR (a <= b OR c = d)
val predicate8 = rexBuilder.makeCall(OR,
rexBuilder.makeCall(GREATER_THAN_OR_EQUAL, b, a),
rexBuilder.makeCall(OR,
rexBuilder.makeCall(LESS_THAN_OR_EQUAL, a, b),
rexBuilder.makeCall(EQUALS, c, d)
)
)
val newPredicate8 = FlinkRexUtil.simplify(rexBuilder, predicate8)
assertEquals(rexBuilder.makeCall(OR,
rexBuilder.makeCall(GREATER_THAN_OR_EQUAL, b, a),
rexBuilder.makeCall(EQUALS, c, d)).toString,
newPredicate8.toString)
// true AND true
val predicate9 = rexBuilder.makeCall(AND,
rexBuilder.makeLiteral(true), rexBuilder.makeLiteral(true))
val newPredicate9 = FlinkRexUtil.simplify(rexBuilder, predicate9)
assertEquals(rexBuilder.makeLiteral(true).toString, newPredicate9.toString)
// false OR false
val predicate10 = rexBuilder.makeCall(OR,
rexBuilder.makeLiteral(false), rexBuilder.makeLiteral(false))
val newPredicate10 = FlinkRexUtil.simplify(rexBuilder, predicate10)
assertEquals(rexBuilder.makeLiteral(false).toString, newPredicate10.toString)
// a = a
val predicate11 = rexBuilder.makeCall(EQUALS, a, a)
val newPredicate11 = FlinkRexUtil.simplify(rexBuilder, predicate11)
assertEquals(rexBuilder.makeLiteral(true).toString, newPredicate11.toString)
// a >= a
val predicate12 = rexBuilder.makeCall(GREATER_THAN_OR_EQUAL, a, a)
val newPredicate12 = FlinkRexUtil.simplify(rexBuilder, predicate12)
assertEquals(rexBuilder.makeLiteral(true).toString, newPredicate12.toString)
// a <= a
val predicate13 = rexBuilder.makeCall(LESS_THAN_OR_EQUAL, a, a)
val newPredicate13 = FlinkRexUtil.simplify(rexBuilder, predicate13)
assertEquals(rexBuilder.makeLiteral(true).toString, newPredicate13.toString)
// a <> a
val predicate14 = rexBuilder.makeCall(NOT_EQUALS, a, a)
val newPredicate14 = FlinkRexUtil.simplify(rexBuilder, predicate14)
assertEquals(rexBuilder.makeLiteral(false).toString, newPredicate14.toString)
// a > a
val predicate15 = rexBuilder.makeCall(GREATER_THAN, a, a)
val newPredicate15 = FlinkRexUtil.simplify(rexBuilder, predicate15)
assertEquals(rexBuilder.makeLiteral(false).toString, newPredicate15.toString)
// a < a
val predicate16 = rexBuilder.makeCall(LESS_THAN, a, a)
val newPredicate16 = FlinkRexUtil.simplify(rexBuilder, predicate16)
assertEquals(rexBuilder.makeLiteral(false).toString, newPredicate16.toString)
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/plan/util/FlinkRexUtilTest.scala
|
Scala
|
apache-2.0
| 18,246
|
package provingground.library
import provingground._
import HoTT._
import induction._
object bool$cases_on {
lazy val value = lambda("'e_1326394388" :: FuncTyp("bool" :: Type, Type))(lambda("'f_363947866" :: "bool" :: Type)(lmbda("'g_1623762820" :: ("'e_1326394388" :: FuncTyp("bool" :: Type, Type))("bool.ff" :: "bool" :: Type))(lmbda("'h_1083310302" :: ("'e_1326394388" :: FuncTyp("bool" :: Type, Type))("bool.tt" :: "bool" :: Type))(({
val rxyz = boolInd.value.induc(lmbda("$ta_696879078" :: "bool" :: Type)(("'e_1326394388" :: FuncTyp("bool" :: Type, Type))("$ta_696879078" :: "bool" :: Type)))
rxyz
})("'g_1623762820" :: ("'e_1326394388" :: FuncTyp("bool" :: Type, Type))("bool.ff" :: "bool" :: Type))("'h_1083310302" :: ("'e_1326394388" :: FuncTyp("bool" :: Type, Type))("bool.tt" :: "bool" :: Type))("'f_363947866" :: "bool" :: Type)))))
}
|
siddhartha-gadgil/ProvingGround
|
leanlib/src/main/scala/provingground/library/definitions/bool.cases_on.scala
|
Scala
|
mit
| 859
|
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark
import org.apache.commons.csv.CSVFormat
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.spark.sql.{DataFrame, SQLContext}
import com.databricks.spark.csv.util.TextFile
package object csv {
val defaultCsvFormat =
CSVFormat.DEFAULT.withRecordSeparator(System.getProperty("line.separator", "\\n"))
/**
* Adds a method, `csvFile`, to SQLContext that allows reading CSV data.
*/
implicit class CsvContext(sqlContext: SQLContext) extends Serializable{
def csvFile(
filePath: String,
useHeader: Boolean = true,
delimiter: Char = ',',
quote: Char = '"',
escape: Character = null,
comment: Character = null,
mode: String = "PERMISSIVE",
parserLib: String = "COMMONS",
ignoreLeadingWhiteSpace: Boolean = false,
ignoreTrailingWhiteSpace: Boolean = false,
charset: String = TextFile.DEFAULT_CHARSET.name(),
inferSchema: Boolean = false): DataFrame = {
val csvRelation = CsvRelation(
() => TextFile.withCharset(sqlContext.sparkContext, filePath, charset),
location = Some(filePath),
useHeader = useHeader,
delimiter = delimiter,
quote = quote,
escape = escape,
comment = comment,
parseMode = mode,
parserLib = parserLib,
ignoreLeadingWhiteSpace = ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace = ignoreTrailingWhiteSpace,
treatEmptyValuesAsNulls = false,
inferCsvSchema = inferSchema)(sqlContext)
sqlContext.baseRelationToDataFrame(csvRelation)
}
def tsvFile(
filePath: String,
useHeader: Boolean = true,
parserLib: String = "COMMONS",
ignoreLeadingWhiteSpace: Boolean = false,
ignoreTrailingWhiteSpace: Boolean = false,
charset: String = TextFile.DEFAULT_CHARSET.name(),
inferSchema: Boolean = false): DataFrame = {
val csvRelation = CsvRelation(
() => TextFile.withCharset(sqlContext.sparkContext, filePath, charset),
location = Some(filePath),
useHeader = useHeader,
delimiter = '\\t',
quote = '"',
escape = '\\\\',
comment = '#',
parseMode = "PERMISSIVE",
parserLib = parserLib,
ignoreLeadingWhiteSpace = ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace = ignoreTrailingWhiteSpace,
treatEmptyValuesAsNulls = false,
inferCsvSchema = inferSchema)(sqlContext)
sqlContext.baseRelationToDataFrame(csvRelation)
}
}
implicit class CsvSchemaRDD(dataFrame: DataFrame) {
/**
* Saves DataFrame as csv files. By default uses ',' as delimiter, and includes header line.
* If compressionCodec is not null the resulting output will be compressed.
* Note that a codec entry in the parameters map will be ignored.
*/
def saveAsCsvFile(path: String, parameters: Map[String, String] = Map(),
compressionCodec: Class[_ <: CompressionCodec] = null): Unit = {
// TODO(hossein): For nested types, we may want to perform special work
val delimiter = parameters.getOrElse("delimiter", ",")
val delimiterChar = if (delimiter.length == 1) {
delimiter.charAt(0)
} else {
throw new Exception("Delimiter cannot be more than one character.")
}
val escape = parameters.getOrElse("escape", null)
val escapeChar: Character = if (escape == null) {
null
} else if (escape.length == 1) {
escape.charAt(0)
} else {
throw new Exception("Escape character cannot be more than one character.")
}
val quote = parameters.getOrElse("quote", "\\"")
val quoteChar: Character = if (quote == null) {
null
} else if (quote.length == 1) {
quote.charAt(0)
} else {
throw new Exception("Quotation cannot be more than one character.")
}
val nullValue = parameters.getOrElse("nullValue", "null")
val csvFormat = defaultCsvFormat
.withDelimiter(delimiterChar)
.withQuote(quoteChar)
.withEscape(escapeChar)
.withSkipHeaderRecord(false)
.withNullString(nullValue)
val generateHeader = parameters.getOrElse("header", "false").toBoolean
val header = if (generateHeader) {
csvFormat.format(dataFrame.columns.map(_.asInstanceOf[AnyRef]): _*)
} else {
"" // There is no need to generate header in this case
}
val strRDD = dataFrame.rdd.mapPartitionsWithIndex { case (index, iter) =>
val csvFormat = defaultCsvFormat
.withDelimiter(delimiterChar)
.withQuote(quoteChar)
.withEscape(escapeChar)
.withSkipHeaderRecord(false)
.withNullString(nullValue)
new Iterator[String] {
var firstRow: Boolean = generateHeader
override def hasNext: Boolean = iter.hasNext || firstRow
override def next: String = {
if (iter.nonEmpty) {
val row = csvFormat.format(iter.next().toSeq.map(_.asInstanceOf[AnyRef]): _*)
if (firstRow) {
firstRow = false
header + csvFormat.getRecordSeparator() + row
} else {
row
}
} else {
firstRow = false
header
}
}
}
}
compressionCodec match {
case null => strRDD.saveAsTextFile(path)
case codec => strRDD.saveAsTextFile(path, codec)
}
}
}
}
|
abridgett/spark-csv
|
src/main/scala/com/databricks/spark/csv/package.scala
|
Scala
|
apache-2.0
| 6,189
|
package crawling
trait Crawler {
def siteMap(): Iterable[String]
}
|
mlucchini/wd-crawler
|
src/main/scala/crawling/Crawler.scala
|
Scala
|
gpl-3.0
| 69
|
package com.ctask.data
import com.ctask.utils.UniqueId
import play.api.libs.json.{JsValue, Json, Reads, Writes}
import play.api.libs.json._
import play.api.libs.functional.syntax._
object TaskListJsonUtils {
val idGenerator = new UniqueId(1)
// Json stuff
implicit val taskListJsonRead: Reads[TaskList] = (
(__ \ "nameStr").read[String] and
(__ \ "tasks").read(Reads.seq[Task](TaskJsonUtils.reads)) and
(__ \ "email").readNullable[String]
)((name, tasks, email) => new TaskList(name, tasks.toArray, email.orNull))
implicit val taskListWrites: Writes[TaskList] = (taskList: TaskList) => Json.obj(
"nameStr" -> taskList.name,
"tasks" -> taskList.tasks.map(TaskJsonUtils.taskWrites.writes),
"email" -> taskList.email
)
}
|
modsrm/ctask
|
common/src/main/scala/com/ctask/data/TaskListJsonUtils.scala
|
Scala
|
gpl-3.0
| 762
|
class C1 {
def f {}
}
class C2 extends C1 {
println(/* line: 2 */ f)
}
|
ilinum/intellij-scala
|
testdata/resolve2/inheritance/element/Function.scala
|
Scala
|
apache-2.0
| 75
|
package ems
import org.joda.time.DateTime
import scala.language.implicitConversions
package object storage {
implicit def toDateTime(dt: java.util.Date) = new DateTime(dt)
}
|
javaBin/ems-redux
|
src/main/scala/ems/storage/package.scala
|
Scala
|
apache-2.0
| 178
|
/*
* ******************************************************************************
* Copyright 2012-2013 SpotRight
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ******************************************************************************
*/
package com.spotright.polidoro.testing.keyspaces
import com.netflix.astyanax.Cluster
import com.netflix.astyanax.ddl.ColumnFamilyDefinition
import com.netflix.astyanax.serializers.ComparatorType._
/**
* Define a ColumnFamily given a Cluster.
*/
object ColumnFamilyDef {
def apply(
cfName: String,
keyType: String = BYTESTYPE.tyn,
nameType: String = BYTESTYPE.tyn,
valueType: String = BYTESTYPE.tyn,
columnDefs: Map[String, ColumnDef] = Map.empty[String,ColumnDef]
): Cluster => ColumnFamilyDefinition = (cluster: Cluster) => {
val cdef = cluster.makeColumnFamilyDefinition()
val keyValidator = if (keyType.startsWith("(")) "CompositeType" + keyType else keyType
val nameComparator = if (nameType.startsWith("(")) "CompositeType" + nameType else nameType
cdef.setName(cfName)
cdef.setKeyValidationClass(keyValidator)
cdef.setComparatorType(nameComparator)
cdef.setDefaultValidationClass(valueType)
columnDefs.foreach {
case (name, ColumnDef(vc, ki)) =>
cluster.makeColumnDefinition()
.setName(name)
.setValidationClass(vc)
.setKeysIndex(ki)
}
cdef
}
}
|
SpotRight/Polidoro
|
src/main/scala/com/spotright/polidoro/testing/keyspaces/ColumnFamilyDef.scala
|
Scala
|
apache-2.0
| 2,018
|
package lara.epfl.scalasca.tests.unit.rules
import scala.reflect.runtime.universe.Tree
import scala.sys.process._
class BasicTest {
def outputToStrippedString(cmd: Seq[String]): (String, String) = {
val stdError = new StringBuffer()
val stdOutput = cmd lines_! ProcessLogger(stdError append _)
(stdOutput.toList.foldLeft("")((acc, item) => {
val str = item.toString.trim
if (str.length() > 0)
acc + item.toString.trim + "\\n"
else
acc
}), stdError.toString())
}
def runTest(prefix: String, test: String): Unit = {
val (producedOutput, producedErrors) = outputToStrippedString(Seq("scalac", "-d", "bin", "-Xplugin:target/scala-2.11/scalasca_2.11-0.1.jar", "-P:scalasca:testRule:" + prefix, "src/test/scala/lara/epfl/scalasca/tests/unit/executables/" + prefix + "/" + test + ".scala"))
val (expectedOutput, _) = outputToStrippedString(Seq("cat", "src/test/scala/lara/epfl/scalasca/tests/unit/executables/" + prefix + "/" + test + ".txt"))
assert(producedOutput == expectedOutput, "Produced output:\\n" + producedOutput + "\\nProduced errors:\\n" + producedErrors + "\\nExpected output:\\n" + expectedOutput)
}
}
|
jean-andre-gauthier/scalasca
|
src/test/scala/lara/epfl/scalasca/tests/unit/rules/BasicTest.scala
|
Scala
|
bsd-3-clause
| 1,144
|
package org.jetbrains.plugins.scala.testingSupport.specs2.specs2_2_10_2_4_6
import org.jetbrains.plugins.scala.testingSupport.specs2.Specs2GoToSourceTest
/**
* @author Roman.Shein
* @since 27.01.2015.
*/
class Specs2_2_10_2_4_6_GoToSourceTest extends Specs2GoToSourceTest with Specs2_2_10_2_4_6_Base{
}
|
katejim/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/specs2/specs2_2_10_2_4_6/Specs2_2_10_2_4_6_GoToSourceTest.scala
|
Scala
|
apache-2.0
| 309
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.listeners
import scala.collection.JavaConverters._
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.events._
import org.apache.carbondata.view.MVManagerInSpark
object ShowCacheEventListener extends OperationEventListener {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Called on a specified event occurrence
*/
override protected def onEvent(event: Event, operationContext: OperationContext): Unit = {
event match {
case showTableCacheEvent: ShowTableCacheEvent =>
val carbonTable = showTableCacheEvent.carbonTable
val internalCall = showTableCacheEvent.internalCall
if (carbonTable.isMV && !internalCall) {
throw new UnsupportedOperationException("Operation not allowed on child table.")
}
val childTables = operationContext.getProperty(carbonTable.getTableUniqueName)
.asInstanceOf[List[(String, String)]]
val views =
MVManagerInSpark.get(showTableCacheEvent.sparkSession).getSchemasOnTable(carbonTable)
if (!views.isEmpty) {
val mvTables = views.asScala.collect {
case view =>
(
s"${view.getIdentifier.getDatabaseName}-${view.getIdentifier.getTableName}",
"mv",
view.getIdentifier.getTableId
)
}
operationContext.setProperty(carbonTable.getTableUniqueName, childTables ++ mvTables)
}
}
}
}
|
zzcclp/carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/listeners/ShowCacheEventListener.scala
|
Scala
|
apache-2.0
| 2,349
|
package text.parser
import util.Config
/**
* @author ynupc
* Created on 2016/05/23
*/
object SentenceSplitter {
def split(text: String): Seq[String] = {
Config.sentenceSplitter match {
case "none" =>
Seq[String](text)
case _ =>
Seq[String](text)
}
}
}
|
ynupc/scalastringcourseday6
|
src/main/scala/text/parser/SentenceSplitter.scala
|
Scala
|
apache-2.0
| 308
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.memory.MemoryMode
import org.apache.spark.util.Utils
/**
* :: DeveloperApi ::
* Flags for controlling the storage of an RDD. Each StorageLevel records whether to use memory,
* or ExternalBlockStore, whether to drop the RDD to disk if it falls out of memory or
* ExternalBlockStore, whether to keep the data in memory in a serialized format, and whether
* to replicate the RDD partitions on multiple nodes.
*
* The [[org.apache.spark.storage.StorageLevel]] singleton object contains some static constants
* for commonly useful storage levels. To create your own storage level object, use the
* factory method of the singleton object (`StorageLevel(...)`).
*/
@DeveloperApi
class StorageLevel private(
private var _useDisk: Boolean,
private var _useMemory: Boolean,
private var _useOffHeap: Boolean,
private var _deserialized: Boolean,
private var _replication: Int = 1)
extends Externalizable {
// TODO: Also add fields for caching priority, dataset ID, and flushing.
private def this(flags: Int, replication: Int) = {
this((flags & 8) != 0, (flags & 4) != 0, (flags & 2) != 0, (flags & 1) != 0, replication)
}
def this() = this(false, true, false, false) // For deserialization
def useDisk: Boolean = _useDisk
def useMemory: Boolean = _useMemory
def useOffHeap: Boolean = _useOffHeap
def deserialized: Boolean = _deserialized
def replication: Int = _replication
assert(replication < 40, "Replication restricted to be less than 40 for calculating hash codes")
private[spark] def memoryMode: MemoryMode = {
if (useOffHeap) MemoryMode.OFF_HEAP
else MemoryMode.ON_HEAP
}
override def clone(): StorageLevel = {
new StorageLevel(useDisk, useMemory, useOffHeap, deserialized, replication)
}
override def equals(other: Any): Boolean = other match {
case s: StorageLevel =>
s.useDisk == useDisk &&
s.useMemory == useMemory &&
s.useOffHeap == useOffHeap &&
s.deserialized == deserialized &&
s.replication == replication
case _ =>
false
}
def isValid: Boolean = (useMemory || useDisk) && (replication > 0)
def toInt: Int = {
var ret = 0
if (_useDisk) {
ret |= 8
}
if (_useMemory) {
ret |= 4
}
if (_useOffHeap) {
ret |= 2
}
if (_deserialized) {
ret |= 1
}
ret
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
out.writeByte(toInt)
out.writeByte(_replication)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
val flags = in.readByte()
_useDisk = (flags & 8) != 0
_useMemory = (flags & 4) != 0
_useOffHeap = (flags & 2) != 0
_deserialized = (flags & 1) != 0
_replication = in.readByte()
}
@throws(classOf[IOException])
private def readResolve(): Object = StorageLevel.getCachedStorageLevel(this)
override def toString: String = {
val disk = if (useDisk) "disk" else ""
val memory = if (useMemory) "memory" else ""
val heap = if (useOffHeap) "offheap" else ""
val deserialize = if (deserialized) "deserialized" else ""
val output =
Seq(disk, memory, heap, deserialize, s"$replication replicas").filter(_.nonEmpty)
s"StorageLevel(${output.mkString(", ")})"
}
override def hashCode(): Int = toInt * 41 + replication
def description: String = {
var result = ""
result += (if (useDisk) "Disk " else "")
if (useMemory) {
result += (if (useOffHeap) "Memory (off heap) " else "Memory ")
}
result += (if (deserialized) "Deserialized " else "Serialized ")
result += s"${replication}x Replicated"
result
}
}
/**
* Various [[org.apache.spark.storage.StorageLevel]] defined and utility functions for creating
* new storage levels.
*/
object StorageLevel {
val NONE = new StorageLevel(false, false, false, false)
val DISK_ONLY = new StorageLevel(true, false, false, false)
val DISK_ONLY_2 = new StorageLevel(true, false, false, false, 2)
val DISK_ONLY_3 = new StorageLevel(true, false, false, false, 3)
val MEMORY_ONLY = new StorageLevel(false, true, false, true)
val MEMORY_ONLY_2 = new StorageLevel(false, true, false, true, 2)
val MEMORY_ONLY_SER = new StorageLevel(false, true, false, false)
val MEMORY_ONLY_SER_2 = new StorageLevel(false, true, false, false, 2)
val MEMORY_AND_DISK = new StorageLevel(true, true, false, true)
val MEMORY_AND_DISK_2 = new StorageLevel(true, true, false, true, 2)
val MEMORY_AND_DISK_SER = new StorageLevel(true, true, false, false)
val MEMORY_AND_DISK_SER_2 = new StorageLevel(true, true, false, false, 2)
val OFF_HEAP = new StorageLevel(true, true, true, false, 1)
/**
* :: DeveloperApi ::
* Return the StorageLevel object with the specified name.
*/
@DeveloperApi
def fromString(s: String): StorageLevel = s match {
case "NONE" => NONE
case "DISK_ONLY" => DISK_ONLY
case "DISK_ONLY_2" => DISK_ONLY_2
case "DISK_ONLY_3" => DISK_ONLY_3
case "MEMORY_ONLY" => MEMORY_ONLY
case "MEMORY_ONLY_2" => MEMORY_ONLY_2
case "MEMORY_ONLY_SER" => MEMORY_ONLY_SER
case "MEMORY_ONLY_SER_2" => MEMORY_ONLY_SER_2
case "MEMORY_AND_DISK" => MEMORY_AND_DISK
case "MEMORY_AND_DISK_2" => MEMORY_AND_DISK_2
case "MEMORY_AND_DISK_SER" => MEMORY_AND_DISK_SER
case "MEMORY_AND_DISK_SER_2" => MEMORY_AND_DISK_SER_2
case "OFF_HEAP" => OFF_HEAP
case _ => throw new IllegalArgumentException(s"Invalid StorageLevel: $s")
}
/**
* :: DeveloperApi ::
* Create a new StorageLevel object.
*/
@DeveloperApi
def apply(
useDisk: Boolean,
useMemory: Boolean,
useOffHeap: Boolean,
deserialized: Boolean,
replication: Int): StorageLevel = {
getCachedStorageLevel(
new StorageLevel(useDisk, useMemory, useOffHeap, deserialized, replication))
}
/**
* :: DeveloperApi ::
* Create a new StorageLevel object without setting useOffHeap.
*/
@DeveloperApi
def apply(
useDisk: Boolean,
useMemory: Boolean,
deserialized: Boolean,
replication: Int = 1): StorageLevel = {
getCachedStorageLevel(new StorageLevel(useDisk, useMemory, false, deserialized, replication))
}
/**
* :: DeveloperApi ::
* Create a new StorageLevel object from its integer representation.
*/
@DeveloperApi
def apply(flags: Int, replication: Int): StorageLevel = {
getCachedStorageLevel(new StorageLevel(flags, replication))
}
/**
* :: DeveloperApi ::
* Read StorageLevel object from ObjectInput stream.
*/
@DeveloperApi
def apply(in: ObjectInput): StorageLevel = {
val obj = new StorageLevel()
obj.readExternal(in)
getCachedStorageLevel(obj)
}
private[spark] val storageLevelCache = new ConcurrentHashMap[StorageLevel, StorageLevel]()
private[spark] def getCachedStorageLevel(level: StorageLevel): StorageLevel = {
storageLevelCache.putIfAbsent(level, level)
storageLevelCache.get(level)
}
}
|
ueshin/apache-spark
|
core/src/main/scala/org/apache/spark/storage/StorageLevel.scala
|
Scala
|
apache-2.0
| 8,013
|
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence
class NamedEntity() extends PersistentEntity {
override type Command = String
override type Event = String
override type State = String
override def entityTypeName: String = "some-name"
override def initialState: State = ""
override def behavior: Behavior = Actions.empty
}
|
edouardKaiser/lagom
|
persistence/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/persistence/NamedEntity.scala
|
Scala
|
apache-2.0
| 417
|
/* Copyright 2015 White Label Personal Clouds Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.welcomer.framework.pico.dsl
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import akka.actor.Actor
import akka.actor.ActorLogging
import play.api.libs.json._
import me.welcomer.framework.pico.EventedEvent
import me.welcomer.framework.pico.EventedFailure
import me.welcomer.framework.pico.EventedFunction
import me.welcomer.framework.pico.EventedResult
trait PicoEventHandlingDSL extends AnyRef { this: Actor with ActorLogging =>
import PicoEventSchema._
def handleEvent[T](
successHandler: (T, EventedEvent) => Unit,
errorHandler: (JsError, EventedEvent) => Unit = handleAttributeError)(implicit event: EventedEvent, readsT: Reads[T]): Unit = {
event.attributes.validate[T] match {
case JsSuccess(value, path) => successHandler(value, event)
case e: JsError => errorHandler(e, event)
}
}
def handleFunction[A, B](f: EventedFunction)(
success: A => Future[EventedResult[B]])(implicit ec: ExecutionContext, reads: Reads[A]): Future[EventedResult[B]] = {
f.args.validate[A] match {
case JsSuccess(value, _) => success(value)
case e: JsError => Future.successful(EventedFailure(e))
}
}
protected def handleAttributeError(e: JsError, event: EventedEvent): Unit = {
val errorJson = JsError.toFlatJson(e)
log.error("Error with attributes: {}", Json.prettyPrint(errorJson))
// TODO: Check if there's a replyTo and if so, use it to send an error event back?
}
}
|
welcomer/framework
|
src/main/scala/me/welcomer/framework/pico/dsl/PicoEventHandlingDSL.scala
|
Scala
|
apache-2.0
| 2,124
|
package sp.domain
import java.util.UUID
import org.json4s._
/**
*
* All things used in the domain should be an IDAble. When a new object is created, a new random ID is created.
*
* When an object is updated, the model handler will reuse the id and increment the version.
* The plan is that only the model handler should do this.
*
* Created by Kristofer on 2014-06-07.
*/
trait IDAble {
val name: String
val id: ID
val attributes: SPAttributes
def |=(x: Any) = x match {
case m: IDAble => m.id.equals(id)
case _ => false
}
}
case class ID(value: UUID){
override def toString = value.toString
}
object ID {
implicit def uuidToID(id: UUID) = ID(id)
implicit def idToUUID(id: ID) = id.value
def newID = ID(UUID.randomUUID())
def makeID(id: String): Option[ID] = {
try {
Some(ID(UUID.fromString(id)))
} catch {
case e: IllegalArgumentException => None
}
}
def isID(str: String) = {
makeID(str) != None
}
}
|
kristoferB/SP
|
sp1/src/main/scala/sp/domain/IDable.scala
|
Scala
|
mit
| 980
|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.http.parsing
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import play.api.test._
import play.api.mvc.BodyParser
import play.api.mvc.PlayBodyParsers
import scala.xml.NodeSeq
import java.io.File
import java.nio.charset.StandardCharsets
import play.api.Application
import java.nio.file.Files
class XmlBodyParserSpec extends PlaySpecification {
"The XML body parser" should {
implicit def tolerantXmlBodyParser(implicit app: Application) =
app.injector.instanceOf[PlayBodyParsers].tolerantXml(1048576)
def xmlBodyParser(implicit app: Application) = app.injector.instanceOf[PlayBodyParsers].xml
def parse(xml: String, contentType: Option[String], encoding: String)(
implicit mat: Materializer,
bodyParser: BodyParser[NodeSeq]
) = {
await(
bodyParser(FakeRequest().withHeaders(contentType.map(CONTENT_TYPE -> _).toSeq: _*))
.run(Source.single(ByteString(xml, encoding)))
)
}
"parse XML bodies" in new WithApplication() {
parse("<foo>bar</foo>", Some("application/xml; charset=utf-8"), "utf-8") must beRight.like {
case xml => xml.text must_== "bar"
}
}
"honour the external charset for application sub types" in new WithApplication() {
parse("<foo>bär</foo>", Some("application/xml; charset=iso-8859-1"), "iso-8859-1") must beRight.like {
case xml => xml.text must_== "bär"
}
parse("<foo>bär</foo>", Some("application/xml; charset=utf-16"), "utf-16") must beRight.like {
case xml => xml.text must_== "bär"
}
}
"honour the external charset for text sub types" in new WithApplication() {
parse("<foo>bär</foo>", Some("text/xml; charset=iso-8859-1"), "iso-8859-1") must beRight.like {
case xml => xml.text must_== "bär"
}
parse("<foo>bär</foo>", Some("text/xml; charset=utf-16"), "utf-16") must beRight.like {
case xml => xml.text must_== "bär"
}
}
"default to iso-8859-1 for text sub types" in new WithApplication() {
parse("<foo>bär</foo>", Some("text/xml"), "iso-8859-1") must beRight.like {
case xml => xml.text must_== "bär"
}
}
"default to reading the encoding from the prolog for application sub types" in new WithApplication() {
parse("""<?xml version="1.0" encoding="utf-16"?><foo>bär</foo>""", Some("application/xml"), "utf-16") must beRight
.like {
case xml => xml.text must_== "bär"
}
parse("""<?xml version="1.0" encoding="iso-8859-1"?><foo>bär</foo>""", Some("application/xml"), "iso-8859-1") must beRight
.like {
case xml => xml.text must_== "bär"
}
}
"default to reading the encoding from the prolog for no content type" in new WithApplication() {
parse("""<?xml version="1.0" encoding="utf-16"?><foo>bär</foo>""", None, "utf-16") must beRight.like {
case xml => xml.text must_== "bär"
}
parse("""<?xml version="1.0" encoding="iso-8859-1"?><foo>bär</foo>""", None, "iso-8859-1") must beRight.like {
case xml => xml.text must_== "bär"
}
}
"accept all common xml content types" in new WithApplication() {
parse("<foo>bar</foo>", Some("application/xml; charset=utf-8"), "utf-8") must beRight.like {
case xml => xml.text must_== "bar"
}
parse("<foo>bar</foo>", Some("text/xml; charset=utf-8"), "utf-8") must beRight.like {
case xml => xml.text must_== "bar"
}
parse("<foo>bar</foo>", Some("application/xml+rdf; charset=utf-8"), "utf-8") must beRight.like {
case xml => xml.text must_== "bar"
}
}
"reject non XML content types" in new WithApplication() {
parse("<foo>bar</foo>", Some("text/plain; charset=utf-8"), "utf-8")(app.materializer, xmlBodyParser) must beLeft
parse("<foo>bar</foo>", Some("xml/xml; charset=utf-8"), "utf-8")(app.materializer, xmlBodyParser) must beLeft
parse("<foo>bar</foo>", None, "utf-8")(app.materializer, xmlBodyParser) must beLeft
}
"gracefully handle invalid xml" in new WithApplication() {
parse("<foo", Some("text/xml; charset=utf-8"), "utf-8") must beLeft
}
"parse XML bodies without loading in a related schema" in new WithApplication() {
val f = File.createTempFile("xxe", ".txt")
Files.write(f.toPath, "I shouldn't be there!".getBytes(StandardCharsets.UTF_8))
f.deleteOnExit()
val xml = s"""<?xml version="1.0" encoding="ISO-8859-1"?>
| <!DOCTYPE foo [
| <!ELEMENT foo ANY >
| <!ENTITY xxe SYSTEM "${f.toURI}">]><foo>hello&xxe;</foo>""".stripMargin
parse(xml, Some("text/xml; charset=iso-8859-1"), "iso-8859-1") must beLeft
}
"parse XML bodies without loading in a related schema from a parameter" in new WithApplication() {
val externalParameterEntity = File.createTempFile("xep", ".dtd")
val externalGeneralEntity = File.createTempFile("xxe", ".txt")
Files.write(
externalParameterEntity.toPath,
s"""
|<!ENTITY % xge SYSTEM "${externalGeneralEntity.toURI}">
|<!ENTITY % pe "<!ENTITY xxe '%xge;'>">
""".stripMargin.getBytes(StandardCharsets.UTF_8)
)
Files.write(externalGeneralEntity.toPath, "I shouldnt be there!".getBytes(StandardCharsets.UTF_8))
externalGeneralEntity.deleteOnExit()
externalParameterEntity.deleteOnExit()
val xml = s"""<?xml version="1.0" encoding="ISO-8859-1"?>
| <!DOCTYPE foo [
| <!ENTITY % xpe SYSTEM "${externalParameterEntity.toURI}">
| %xpe;
| %pe;
| ]><foo>hello&xxe;</foo>""".stripMargin
parse(xml, Some("text/xml; charset=iso-8859-1"), "iso-8859-1") must beLeft
}
"gracefully fail when there are too many nested entities" in new WithApplication() {
val nested = for (x <- 1 to 30) yield "<!ENTITY laugh" + x + " \"&laugh" + (x - 1) + ";&laugh" + (x - 1) + ";\">"
val xml = s"""<?xml version="1.0"?>
| <!DOCTYPE billion [
| <!ELEMENT billion (#PCDATA)>
| <!ENTITY laugh0 "ha">
| ${nested.mkString("\n")}
| ]>
| <billion>&laugh30;</billion>""".stripMargin
parse(xml, Some("text/xml; charset=utf-8"), "utf-8") must beLeft
success
}
"gracefully fail when an entity expands to be very large" in new WithApplication() {
val as = "a" * 50000
val entities = "&a;" * 50000
val xml = s"""<?xml version="1.0"?>
| <!DOCTYPE kaboom [
| <!ENTITY a "$as">
| ]>
| <kaboom>$entities</kaboom>""".stripMargin
parse(xml, Some("text/xml; charset=utf-8"), "utf-8") must beLeft
}
}
}
|
wegtam/playframework
|
core/play-integration-test/src/it/scala/play/it/http/parsing/XmlBodyParserSpec.scala
|
Scala
|
apache-2.0
| 7,041
|
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.servicemanager.services
import es.tid.cosmos.servicemanager.ClusterUser
import es.tid.cosmos.servicemanager.services.dependencies.ServiceDependencies
object CosmosUserService extends Service {
override type Parametrization = Seq[ClusterUser]
override val name: String = "COSMOS_USER"
override def defaultParametrization: Option[Parametrization] = Some(Seq.empty)
override val dependencies = ServiceDependencies.required(Hdfs)
}
|
telefonicaid/fiware-cosmos-platform
|
service-manager/src/main/scala/es/tid/cosmos/servicemanager/services/CosmosUserService.scala
|
Scala
|
apache-2.0
| 1,100
|
package beam.router
import java.awt.geom.Ellipse2D
import java.awt.{BasicStroke, Color}
import beam.agentsim.agents.vehicles.BeamVehicleType
import beam.agentsim.infrastructure.taz.{TAZ, TAZTreeMap}
import beam.analysis.plots.{GraphUtils, GraphsStatsAgentSimEventsListener}
import beam.router.Modes.BeamMode
import beam.router.Modes.BeamMode.{CAR, WALK}
import beam.router.model.{EmbodiedBeamLeg, EmbodiedBeamTrip}
import beam.sim.BeamScenario
import beam.sim.common.GeoUtils
import beam.utils.{FileUtils, GeoJsonReader, ProfilingUtils}
import com.google.inject.Inject
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Geometry
import org.jfree.chart.ChartFactory
import org.jfree.chart.annotations.{XYLineAnnotation, XYTextAnnotation}
import org.jfree.chart.plot.{PlotOrientation, XYPlot}
import org.jfree.data.statistics.{HistogramDataset, HistogramType}
import org.jfree.data.xy.{XYSeries, XYSeriesCollection}
import org.jfree.ui.RectangleInsets
import org.matsim.api.core.v01.{Coord, Id}
import org.matsim.core.controler.events.IterationEndsEvent
import org.matsim.core.utils.io.IOUtils
import org.opengis.feature.Feature
import org.opengis.feature.simple.SimpleFeature
import org.supercsv.io.{CsvMapReader, ICsvMapReader}
import org.supercsv.prefs.CsvPreference
import scala.collection.mutable
class TravelTimeObserved @Inject()(
val beamScenario: BeamScenario,
val geo: GeoUtils
) extends LazyLogging {
import TravelTimeObserved._
import beamScenario._
@volatile
private var skimmer: BeamSkimmer = new BeamSkimmer(beamScenario, geo)
private val observedTravelTimesOpt: Option[Map[PathCache, Float]] = {
val zoneBoundariesFilePath = beamConfig.beam.calibration.roadNetwork.travelTimes.zoneBoundariesFilePath
val zoneODTravelTimesFilePath = beamConfig.beam.calibration.roadNetwork.travelTimes.zoneODTravelTimesFilePath
if (zoneBoundariesFilePath.nonEmpty && zoneODTravelTimesFilePath.nonEmpty) {
val tazToMovId: Map[TAZ, Int] = buildTAZ2MovementId(
zoneBoundariesFilePath,
geo,
tazTreeMap
)
val movId2Taz: Map[Int, TAZ] = tazToMovId.map { case (k, v) => v -> k }
Some(buildPathCache2TravelTime(zoneODTravelTimesFilePath, movId2Taz))
} else None
}
val uniqueModes: List[BeamMode.CAR.type] = List(CAR)
val uniqueTimeBins: Range.Inclusive = 0 to 23
val dummyId: Id[BeamVehicleType] = Id.create("NA", classOf[BeamVehicleType])
def observeTrip(
trip: EmbodiedBeamTrip,
generalizedTimeInHours: Double,
generalizedCost: Double,
energyConsumption: Double
): Unit = {
val legs = trip.legs.filter(x => x.beamLeg.mode == BeamMode.CAR || x.beamLeg.mode == BeamMode.CAV)
legs.foreach { carLeg =>
val dummyHead = EmbodiedBeamLeg.dummyLegAt(
carLeg.beamLeg.startTime,
Id.createVehicleId(""),
isLastLeg = false,
carLeg.beamLeg.travelPath.startPoint.loc,
WALK,
dummyId
)
val dummyTail = EmbodiedBeamLeg.dummyLegAt(
carLeg.beamLeg.endTime,
Id.createVehicleId(""),
isLastLeg = true,
carLeg.beamLeg.travelPath.endPoint.loc,
WALK,
dummyId
)
// In case of `CAV` we have to override its mode to `CAR`
val fixedCarLeg = if (carLeg.beamLeg.mode == BeamMode.CAV) {
carLeg.copy(beamLeg = carLeg.beamLeg.copy(mode = BeamMode.CAR))
} else {
carLeg
}
val carTrip = EmbodiedBeamTrip(Vector(dummyHead, fixedCarLeg, dummyTail))
skimmer.observeTrip(carTrip, generalizedTimeInHours, generalizedCost, energyConsumption)
}
}
def notifyIterationEnds(event: IterationEndsEvent): Unit = {
writeTravelTimeObservedVsSimulated(event)
skimmer = new BeamSkimmer(beamScenario, geo)
}
def writeTravelTimeObservedVsSimulated(event: IterationEndsEvent): Unit = {
observedTravelTimesOpt.foreach { observedTravelTimes =>
ProfilingUtils.timed(
s"writeTravelTimeObservedVsSimulated on iteration ${event.getIteration}",
x => logger.info(x)
) {
write(event, observedTravelTimes)
}
}
}
private def write(event: IterationEndsEvent, observedTravelTimes: Map[PathCache, Float]): Unit = {
val filePathObservedVsSimulated = event.getServices.getControlerIO.getIterationFilename(
event.getServices.getIterationNumber,
"tazODTravelTimeObservedVsSimulated.csv.gz"
)
val writerObservedVsSimulated = IOUtils.getBufferedWriter(filePathObservedVsSimulated)
writerObservedVsSimulated.write("fromTAZId,toTAZId,hour,timeSimulated,timeObserved,counts")
writerObservedVsSimulated.write("\\n")
var series = new mutable.ListBuffer[(Int, Double, Double)]()
val categoryDataset = new HistogramDataset()
var deltasOfObservedSimulatedTimes = new mutable.ListBuffer[Double]
tazTreeMap.getTAZs
.foreach { origin =>
tazTreeMap.getTAZs.foreach { destination =>
uniqueModes.foreach { mode =>
uniqueTimeBins
.foreach { timeBin =>
val key = PathCache(origin.tazId, destination.tazId, timeBin)
observedTravelTimes.get(key).foreach { timeObserved =>
skimmer
.getSkimValue(timeBin * 3600, mode, origin.tazId, destination.tazId)
.map(_.toSkimExternal)
.foreach { theSkim =>
series += ((theSkim.count, theSkim.time, timeObserved))
for (count <- 1 to theSkim.count)
deltasOfObservedSimulatedTimes += theSkim.time - timeObserved
writerObservedVsSimulated.write(
s"${origin.tazId},${destination.tazId},${timeBin},${theSkim.time},${timeObserved},${theSkim.count}\\n"
)
}
}
}
}
}
}
categoryDataset.addSeries("Simulated-Observed", deltasOfObservedSimulatedTimes.toArray, histogramBinSize)
writerObservedVsSimulated.close()
val chartPath =
event.getServices.getControlerIO.getIterationFilename(event.getServices.getIterationNumber, chartName)
generateChart(series, chartPath)
val histogramPath =
event.getServices.getControlerIO.getIterationFilename(event.getServices.getIterationNumber, histogramName)
generateHistogram(categoryDataset, histogramPath)
}
}
object TravelTimeObserved extends LazyLogging {
val chartName: String = "scatterplot_simulation_vs_reference.png"
val histogramName: String = "simulation_vs_reference_histogram.png"
val histogramBinSize: Int = 200
val MaxDistanceFromBeamTaz: Double = 500.0 // 500 meters
case class PathCache(from: Id[TAZ], to: Id[TAZ], hod: Int)
def buildTAZ2MovementId(filePath: String, geo: GeoUtils, tazTreeMap: TAZTreeMap): Map[TAZ, Int] = {
ProfilingUtils.timed(s"buildTAZ2MovementId from '$filePath'", x => logger.info(x)) {
val mapper: Feature => (TAZ, Int, Double) = (feature: Feature) => {
val centroid = feature.asInstanceOf[SimpleFeature].getDefaultGeometry.asInstanceOf[Geometry].getCentroid
val wgsCoord = new Coord(centroid.getX, centroid.getY)
val utmCoord = geo.wgs2Utm(wgsCoord)
val movId = feature.getProperty("MOVEMENT_ID").getValue.toString.toInt
val taz: TAZ = tazTreeMap.getTAZ(utmCoord.getX, utmCoord.getY)
val distance = geo.distUTMInMeters(utmCoord, taz.coord)
(taz, movId, distance)
}
val xs: Array[(TAZ, Int, Double)] = GeoJsonReader.read(filePath, mapper)
val filterByMaxDistance = xs.filter { case (taz, movId, distance) => distance <= MaxDistanceFromBeamTaz }
val tazId2MovIdByMinDistance = filterByMaxDistance
.groupBy { case (taz, _, _) => taz }
.map {
case (taz, arr) =>
val (_, movId, _) = arr.minBy { case (_, _, distance) => distance }
(taz, movId)
}
val numOfUniqueMovId = tazId2MovIdByMinDistance.values.toSet.size
logger.info(
s"xs size is ${xs.length}. tazId2MovIdByMinDistance size is ${tazId2MovIdByMinDistance.keys.size}. numOfUniqueMovId: $numOfUniqueMovId"
)
tazId2MovIdByMinDistance
}
}
def buildPathCache2TravelTime(pathToAggregateFile: String, movId2Taz: Map[Int, TAZ]): Map[PathCache, Float] = {
val observedTravelTimes: mutable.HashMap[PathCache, Float] = scala.collection.mutable.HashMap.empty
ProfilingUtils.timed(s"buildPathCache2TravelTime from '$pathToAggregateFile'", x => logger.info(x)) {
val mapReader: ICsvMapReader =
new CsvMapReader(FileUtils.readerFromFile(pathToAggregateFile), CsvPreference.STANDARD_PREFERENCE)
try {
val header = mapReader.getHeader(true)
var line: java.util.Map[String, String] = mapReader.read(header: _*)
while (null != line) {
val sourceid = line.get("sourceid").toInt
val dstid = line.get("dstid").toInt
val mean_travel_time = line.get("mean_travel_time").toFloat
val hod = line.get("hod").toInt
if (movId2Taz.contains(sourceid) && movId2Taz.contains(dstid)) {
observedTravelTimes.put(PathCache(movId2Taz(sourceid).tazId, movId2Taz(dstid).tazId, hod), mean_travel_time)
}
line = mapReader.read(header: _*)
}
} finally {
if (null != mapReader)
mapReader.close()
}
}
logger.info(s"observedTravelTimesOpt size is ${observedTravelTimes.keys.size}")
observedTravelTimes.toMap
}
def generateHistogram(dataset: HistogramDataset, path: String): Unit = {
dataset.setType(HistogramType.FREQUENCY)
val chart = ChartFactory.createHistogram(
"Simulated-Observed Frequency",
"Simulated-Observed",
"Frequency",
dataset,
PlotOrientation.VERTICAL,
true,
false,
false
)
GraphUtils.saveJFreeChartAsPNG(
chart,
path,
GraphsStatsAgentSimEventsListener.GRAPH_WIDTH,
GraphsStatsAgentSimEventsListener.GRAPH_HEIGHT
)
}
def generateChart(series: mutable.ListBuffer[(Int, Double, Double)], path: String): Unit = {
def drawLineHelper(color: Color, percent: Int, xyplot: XYPlot, max: Double, text: Double) = {
xyplot.addAnnotation(
new XYLineAnnotation(
0,
0,
max * 2 * Math.cos(Math.toRadians(45 + percent)),
max * 2 * Math.sin(Math.toRadians(45 + percent)),
new BasicStroke(1f),
color
)
)
xyplot.addAnnotation(
new XYTextAnnotation(
s"$text%",
max * Math.cos(Math.toRadians(45 + percent)) / 2,
max * Math.sin(Math.toRadians(45 + percent)) / 2
)
)
}
val maxSkimCount = series.map(_._1).max
val bucketsNum = Math.min(maxSkimCount, 4)
val buckets = (1 to bucketsNum).map(_ * maxSkimCount / bucketsNum)
def getClosest(num: Double) = buckets.minBy(v => math.abs(v - num))
var dataset = new XYSeriesCollection()
val seriesPerCount = mutable.HashMap[Int, XYSeries]()
series.foreach {
case (count, simulatedTime, observedTime) =>
val closestBucket = getClosest(count)
if (!seriesPerCount.contains(closestBucket))
seriesPerCount(closestBucket) = new XYSeries(closestBucket.toString, false)
seriesPerCount(closestBucket).add(simulatedTime, observedTime)
}
seriesPerCount.toSeq.sortBy(_._1).foreach {
case (_, seriesToAdd) =>
dataset.addSeries(seriesToAdd)
}
val chart = ChartFactory.createScatterPlot(
"TAZ TravelTimes Observed Vs. Simulated",
"Simulated",
"Observed",
dataset,
PlotOrientation.VERTICAL,
true,
true,
false
)
val xyplot = chart.getPlot.asInstanceOf[XYPlot]
xyplot.setDomainCrosshairVisible(false)
xyplot.setRangeCrosshairVisible(false)
val colors = List(
new Color(125, 125, 250), // light blue
new Color(32, 32, 253), // dark blue
new Color(255, 87, 126), // light red
new Color(255, 0, 60) // dark red
)
(0 until seriesPerCount.size).map { counter =>
val renderer = xyplot
.getRendererForDataset(xyplot.getDataset(0))
renderer.setSeriesShape(counter, new Ellipse2D.Double(0, 0, 5, 5))
renderer.setSeriesPaint(counter, colors(counter % colors.length))
}
val max = Math.max(
dataset.getDomainLowerBound(false),
dataset.getRangeUpperBound(false)
)
if (max > 0) {
xyplot.getDomainAxis.setRange(0.0, max)
xyplot.getRangeAxis.setRange(0.0, max)
}
xyplot.getDomainAxis.setAutoRange(false)
xyplot.getRangeAxis.setAutoRange(false)
xyplot.getDomainAxis.setTickLabelInsets(new RectangleInsets(10.0, 10.0, 10.0, 10.0))
xyplot.getRangeAxis.setTickLabelInsets(new RectangleInsets(10.0, 10.0, 10.0, 10.0))
// diagonal line
chart.getXYPlot.addAnnotation(
new XYLineAnnotation(
0,
0,
xyplot.getDomainAxis.getRange.getUpperBound,
xyplot.getRangeAxis.getRange.getUpperBound
)
)
val percents = List(
(18, Color.RED, -50.0),
(-18, Color.RED, 100.0),
(36, Color.BLUE, -83.0),
(-36, Color.BLUE, 500.0)
)
percents.foreach {
case (percent: Int, color: Color, value: Double) =>
drawLineHelper(
color,
percent,
xyplot,
max,
value
)
}
GraphUtils.saveJFreeChartAsPNG(
chart,
path,
1000,
1000
)
}
}
|
colinsheppard/beam
|
src/main/scala/beam/router/TravelTimeObserved.scala
|
Scala
|
gpl-3.0
| 13,600
|
// Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.example
import org.scalamock.scalatest.MockFactory
import org.scalatest.funsuite.AnyFunSuite
import scala.math.{Pi, sqrt}
class ControllerTest extends AnyFunSuite with MockFactory {
test("draw line") {
val mockTurtle = mock[Turtle]
val controller = new Controller(mockTurtle)
inSequence {
inAnyOrder {
(() => mockTurtle.penUp()).expects()
(mockTurtle.getPosition _).expects().returning(0.0, 0.0)
(mockTurtle.getAngle _).expects().returning(0.0)
}
(mockTurtle.turn _).expects(~(Pi / 4))
(mockTurtle.forward _).expects(~sqrt(2.0))
(mockTurtle.getAngle _).expects().returning(Pi / 4)
(mockTurtle.turn _).expects(~(-Pi / 4))
(() => mockTurtle.penDown()).expects()
(mockTurtle.forward _).expects(1.0)
}
controller.drawLine((1.0, 1.0), (2.0, 1.0))
}
}
|
paulbutcher/ScalaMock
|
examples/src/test/scala/com/example/ControllerTest.scala
|
Scala
|
mit
| 2,053
|
package akka.pattern.extended
import akka.testkit.ThreadUtil
import akka.actor.ActorSystem
object Thread {
def sleep(duration :Long)(implicit system: ActorSystem): Unit = ThreadUtil.sleep(duration)(system)
}
|
unicredit/akka.js
|
akka-js-actor-tests/js/src/test/scala/akka/pattern/extended/Thread.scala
|
Scala
|
bsd-3-clause
| 216
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api.test
import java.util.{Collection, Collections, Properties}
import scala.collection.JavaConverters._
import org.junit.runners.Parameterized
import org.junit.runner.RunWith
import org.junit.runners.Parameterized.Parameters
import org.junit.{After, Before, Test}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.junit.Assert._
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.ByteArraySerializer
@RunWith(value = classOf[Parameterized])
class ProducerCompressionTest(compression: String) extends ZooKeeperTestHarness {
private val brokerId = 0
private val topic = "topic"
private val numRecords = 2000
private var server: KafkaServer = null
@Before
override def setUp() {
super.setUp()
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
server = TestUtils.createServer(KafkaConfig.fromProps(props))
}
@After
override def tearDown() {
TestUtils.shutdownServers(Seq(server))
super.tearDown()
}
/**
* testCompression
*
* Compressed messages should be able to sent and consumed correctly
*/
@Test
def testCompression() {
val producerProps = new Properties()
val bootstrapServers = TestUtils.getBrokerListStrFromServers(Seq(server))
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
producerProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compression)
producerProps.put(ProducerConfig.BATCH_SIZE_CONFIG, "66000")
producerProps.put(ProducerConfig.LINGER_MS_CONFIG, "200")
val producer = new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer)
val consumer = TestUtils.createConsumer(bootstrapServers, securityProtocol = SecurityProtocol.PLAINTEXT)
try {
// create topic
TestUtils.createTopic(zkClient, topic, 1, 1, List(server))
val partition = 0
// prepare the messages
val messageValues = (0 until numRecords).map(i => "value" + i)
// make sure the returned messages are correct
val now = System.currentTimeMillis()
val responses = for (message <- messageValues)
yield producer.send(new ProducerRecord(topic, null, now, null, message.getBytes))
for ((future, offset) <- responses.zipWithIndex) {
assertEquals(offset.toLong, future.get.offset)
}
val tp = new TopicPartition(topic, partition)
// make sure the fetched message count match
consumer.assign(Collections.singleton(tp))
consumer.seek(tp, 0)
val records = TestUtils.consumeRecords(consumer, numRecords)
for (((messageValue, record), index) <- messageValues.zip(records).zipWithIndex) {
assertEquals(messageValue, new String(record.value))
assertEquals(now, record.timestamp)
assertEquals(index.toLong, record.offset)
}
} finally {
producer.close()
consumer.close()
}
}
}
object ProducerCompressionTest {
@Parameters(name = "{index} compressionType = {0}")
def parameters: Collection[Array[String]] = {
Seq(
Array("none"),
Array("gzip"),
Array("snappy"),
Array("lz4")
).asJava
}
}
|
richhaase/kafka
|
core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala
|
Scala
|
apache-2.0
| 4,200
|
package com.twitter.inject.conversions
import com.twitter.util.{Duration => TwitterDuration}
import org.joda.time.Duration
object duration {
implicit class RichDuration(val self: Duration) extends AnyVal {
def toTwitterDuration: TwitterDuration = {
TwitterDuration.fromMilliseconds(self.getMillis)
}
}
implicit class RichTwitterDuration(val self: TwitterDuration) extends AnyVal {
def toJodaDuration: Duration = {
new Duration(self.inMillis)
}
}
}
|
twitter/finatra
|
inject/inject-utils/src/main/scala/com/twitter/inject/conversions/duration.scala
|
Scala
|
apache-2.0
| 489
|
package poker.core
/** Type safety for poker ranks without runtime overhead
*
* @param char Poker ranks (2-9, T, J, Q, K, A) where T represents 10
*/
final case class Rank(char: Char) extends AnyVal
|
kyuksel/poker
|
src/main/scala/poker/core/Rank.scala
|
Scala
|
mit
| 206
|
package scutil.lang.tc
import minitest._
import scutil.core.implicits._
import scutil.lang._
object ApplicativeTest extends SimpleTestSuite {
test("map2 in the zip order when zippy") {
assertEquals(
Applicative[Option].map2(Some(1), Some(2))(_ -> _),
(Some(1) zip Some(2))
)
}
test("map2 in the same order as flatMap for Vector") {
val as = Vector(1,2)
val bs = Vector(3,4)
// 3,4,6,8
val x =
as flatMap { a =>
bs map { b =>
a*b
}
}
assertEquals(
x,
Vector(3,4,6,8)
)
val y = (as map2 bs)(_*_)
assertEquals(y, x)
}
test("map2 in the same order as flatMap for Nes") {
val as = Nes.of(1,2)
val bs = Nes.of(3,4)
// 3,4,6,8
val x =
as flatMap { a =>
bs map { b =>
a*b
}
}
assertEquals(
x,
Nes.of(3,4,6,8)
)
val y = (as map2 bs)(_*_)
assertEquals(y, x)
}
test("have ap do the same thing for native and instance") {
val f1:Int=>Int = _-1
val f2:Int=>Int = _+1
val as = Vector(3,4)
val bs = Vector(f1, f2)
// function effect first
val x = bs ap as
assertEquals(
x,
Vector(2,3,4,5)
)
val y = Applicative[Vector].ap(bs)(as)
assertEquals(y, x)
}
test("do the function effect first") {
val f:Either[String,Int=>Int] = Left("function")
val v:Either[String,Int] = Left("value")
assertEquals(
Applicative[Either[String,_]].ap(f)(v),
Left("function")
)
}
}
|
ritschwumm/scutil
|
modules/core/src/test/scala/scutil/lang/tc/ApplicativeTest.scala
|
Scala
|
bsd-2-clause
| 1,399
|
/*
* Copyright 2008-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
import xml.{NodeSeq, Text}
import common.Loggable
import util._
import Helpers._
import S.?
/**
* Base class for things that require pagination. Implements a contract
* for supplying the correct number of browsable pages etc
*
* @tparam T the type of item being paginated
* @author nafg and Timothy Perrett
*/
trait Paginator[T] extends Loggable {
/**
* The total number of items
*/
def count: Long
/**
* How many items to put on each page
*/
def itemsPerPage = 20
/**
* The record number this page starts at. Zero-based.
*/
def first = 0L
/**
* The items displayed on the current page
*/
def page: Seq[T]
/**
* Calculates the number of pages the items will be spread across
*/
def numPages =
(count/itemsPerPage).toInt +
(if(count % itemsPerPage > 0) 1 else 0)
/**
* Calculates the current page number, based on the value of 'first.'
*/
def curPage = (first / itemsPerPage).toInt
/**
* Returns a list of page numbers to be displayed in 'zoomed' mode, i.e.,
* as the page numbers get further from the current page, they are more sparse.
*/
def zoomedPages = (
List(curPage - 1020, curPage - 120, curPage - 20) ++
(curPage-10 to curPage+10) ++
List(curPage + 20, curPage + 120, curPage + 1020)
) filter { n=>
n >= 0 && n < numPages
}
}
/**
* In many situations you'll want to sort things in your paginated view.
* <code>SortedPaginator</code> is a specialized paginator for doing such tasks.
*
* T: The type of the elements, accessed via def page within the listing snippet
* C: The type of the columns, used to specify sorting
*
* @author nafg and Timothy Perrett
*/
trait SortedPaginator[T, C] extends Paginator[T] {
/**
* Pair of (column index, ascending)
*/
type SortState = (Int, Boolean)
/**
* The sort headers: pairs of column labels, and column identifier objects of type C.
*/
def headers: List[(String, C)]
protected var _sort = (0, true)
/**
* Get the current sort state: Pair of (column index, ascending?)
*/
def sort: SortState = _sort
/**
* Set the current sort state: Pair of (column index, ascending?)
*/
def sort_=(s: SortState) = _sort = s
/**
* Returns a new SortState based on a column index.
* If the paginator is already sorted by that column, it
* toggles the direction; otherwise the direction is ascending.
* Note that this method does not alter the sort state in the
* paginator; it only calculates the direction toggle.
* Example usage:
* sortedPaginator.sort = sortedPaginator.sortedBy(columns.indexOf(clickedColumn))
*/
def sortedBy(column: Int): SortState = sort match {
case (`column`, true) => // descending is only if it was already sorted ascending
(column, false)
case _ =>
(column, true)
}
}
/**
* This is the paginator snippet. It provides page
* navigation and column sorting links.
* View XHTML is as follows:
* nav prefix (prefix is configurable by overriding def navPrefix)
* - <nav:first/> - a link to the first page
* - <nav:prev/> - a link to the previous page
* - <nav:allpages/> - individual links to all pages. The contents of this node are used to separate page links.
* - <nav:next/> - a link to the next page
* - <nav:last/> - a link to the last page
* - <nav:records/> - a description of which records are currently being displayed
* - <nav:recordsFrom/> - the first record number being displayed
* - <nav:recordsTo/> - the last record number being displayed
* - <nav:recordsCount/> - the total number of records on all pages
*
* @author nafg and Timothy Perrett
*/
trait PaginatorSnippet[T] extends Paginator[T] {
/**
* The "previous page" link text
*/
def prevXml: NodeSeq = Text(?("<"))
/**
* The "next page" link text
*/
def nextXml: NodeSeq = Text(?(">"))
/**
* The "first page" link text
*/
def firstXml: NodeSeq = Text(?("<<"))
/**
* The "last page" link text
*/
def lastXml: NodeSeq = Text(?(">>"))
/**
* How to display the page's starting record
*/
def recordsFrom: String = (first+1 min count).toString
/**
* How to display the page's ending record
*/
def recordsTo: String = ((first+itemsPerPage) min count).toString
/**
* The status displayed when using <nav:records/> in the template.
*/
def currentXml: NodeSeq =
if(count==0)
Text(S.?("paginator.norecords"))
else
Text(S.?("paginator.displayingrecords",
Array(recordsFrom, recordsTo, count).map(_.asInstanceOf[AnyRef]) : _*))
/**
* The template prefix for general navigation components
*/
def navPrefix = "nav"
/**
* The URL query parameter to propagate the record the page should start at
*/
def offsetParam = "offset"
protected var _first = 0L
/**
* Overrides the super's implementation so the first record can be overridden by a URL query parameter.
*/
override def first = S.param(offsetParam).map(toLong) openOr _first max 0
/**
* Sets the default starting record of the page (URL query parameters take precedence over this)
*/
def first_=(f: Long) = _first = f max 0 min (count-1)
/**
* Returns a URL used to link to a page starting at the given record offset.
*/
def pageUrl(offset: Long): String = {
def originalUri = S.originalRequest.map(_.uri).openOr(sys.error("No request"))
appendParams(originalUri, List(offsetParam -> offset.toString))
}
/**
* Returns XML that links to a page starting at the given record offset, if the offset is valid and not the current one.
* @param ns The link text, if the offset is valid and not the current offset; or, if that is not the case, the static unlinked text to display
*/
def pageXml(newFirst: Long, ns: NodeSeq): NodeSeq =
if(first==newFirst || newFirst < 0 || newFirst >= count)
ns
else
<a href={pageUrl(newFirst)}>{ns}</a>
/**
* Generates links to multiple pages with arbitrary XML delimiting them.
*/
def pagesXml(pages: Seq[Int])(sep: NodeSeq): NodeSeq = {
pages.toList map {n =>
pageXml(n*itemsPerPage, Text((n+1).toString))
} match {
case one :: Nil => one
case first :: rest => rest.foldLeft(first) {
case (a,b) => a ++ sep ++ b
}
case Nil => Nil
}
}
/**
* This method binds template HTML based according to the specified
* configuration. You can reference this as a snippet method directly
* in your template; or you can call it directly as part of your binding
* code.
*
* Classes used to bind:
* - `first`: link to go back to the first page (populated by `firstXml`)
* - `prev`: link to go to previous page (populated by `prevXml`)
* - `all-pages`: container for all pages (populated by `pagesXml`)
* - `zoomed-pages`: container for `zoomedPages` (populated by `pagesXml`)
* - `next`: link to go to next page (populated by `nextXml`)
* - `last`: link to go to last page (populated by `lastXml`)
* - `records`: currently visible records + total count (populated by
* `currentXml`)
* - `records-start`: start of currently visible records
* - `records-end`: end of currently visible records
* - `records-count`: total records count
*/
def paginate: CssSel = {
import scala.math._
".first *" #> pageXml(0, firstXml) &
".prev *" #> pageXml(max(first - itemsPerPage, 0), prevXml) &
".all-pages *" #> pagesXml(0 until numPages) _ &
".zoomed-pages *" #> pagesXml(zoomedPages) _ &
".next *" #> pageXml(
max(0, min(first + itemsPerPage, itemsPerPage * (numPages - 1))),
nextXml
) &
".last *" #> pageXml(itemsPerPage * (numPages - 1), lastXml) &
".records *" #> currentXml &
".records-start *" #> recordsFrom &
".records-end *" #> recordsTo &
".records-count *" #> count
}
}
/**
* This trait adds snippet functionality for sorted paginators.
* You can place bind points in your template for column headers, and it turns them into links
* That you can click to sort by that column. Simply write, e.g.,
* <th><sort:name/></th><th><sort:email/></th> etc.
*/
trait SortedPaginatorSnippet[T, C] extends SortedPaginator[T, C] with PaginatorSnippet[T] {
/**
* The prefix to bind the sorting column headers
*/
def sortPrefix = "sort"
/**
* The URL query parameter to specify the sort column
*/
def sortParam = "sort"
/**
* The URL query parameter to specify the sort direction
*/
def ascendingParam = "asc"
/**
* Calculates the page url taking sorting into account.
*/
def sortedPageUrl(offset: Long, sort: (Int, Boolean)) = sort match {
case (col, ascending) =>
appendParams(super.pageUrl(offset), List(sortParam->col.toString, ascendingParam->ascending.toString))
}
/**
* Overrides pageUrl and delegates to sortedPageUrl using the current sort
*/
override def pageUrl(offset: Long) = sortedPageUrl(offset, sort)
/**
* Overrides sort, giving the URL query parameters precedence
*/
override def sort = super.sort match {
case (col, ascending) => (
S.param("sort").map(toInt) openOr col,
S.param("asc").map(toBoolean) openOr ascending
)
}
/**
* This method binds template HTML based according to the specified
* configuration. You can reference this as a snippet method directly
* in your template; or you can call it directly as part of your binding
* code.
*
* In addition to the classes bound in {@link PaginatorSnippet}, for
* each header in the `headers` list, this will bind elements with that
* class name and put a link in them with their contents.
*
* For example, with a list of headers `List("foo", "bar")`, this would
* bind the `.foo` element's contents to contain a link to a page that
* renders that column sorted, as well as the `.bar` element's contents
* to contain a link to a page that renders that column sorted.
*/
override def paginate: CssSel = {
val headerTransforms =
headers.zipWithIndex.map {
case ((binding, _), colIndex) =>
s".$binding *" #> { ns: NodeSeq =>
<a href={sortedPageUrl(first, sortedBy(colIndex))}>{ns}</a>
}
}
headerTransforms.foldLeft(super.paginate)(_ & _)
}
}
/**
* Sort your paginated views by using lifts functions mapping.
* The only down side with this style is that your links are session
* specific and non-bookmarkable.
* If you mix this trait in to a StatefulSnippet, it should work out the box.
* Otherwise, implement 'registerThisSnippet.'
* @author nafg and Timothy Perrett
*/
trait StatefulSortedPaginatorSnippet[T, C] extends SortedPaginatorSnippet[T, C] {
/**
* This method is called before the new page is served, to set up the state in advance.
* It is implemented by StatefulSnippet so you can just mix in StatefulSortedPaginatorSnippet to one;
* or you can implement it yourself, using things like S.mapSnippet.
*/
def registerThisSnippet: Unit
/**
* Overrides to use Lift state rather than URL query parameters.
*/
override def sortedPageUrl(offset: Long, sort: (Int, Boolean)) =
S.fmapFunc(S.NFuncHolder(() => registerThisSnippet)){ name =>
appendParams(super.sortedPageUrl(offset,sort), List(name -> "_"))
}
}
|
lzpfmh/framework-2
|
web/webkit/src/main/scala/net/liftweb/http/Paginator.scala
|
Scala
|
apache-2.0
| 12,283
|
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.batch
trait PrunedSpace[-T] extends java.io.Serializable {
// expire (REMOVE) before writing, T is often (K, V) pair
def prune(item: T, writeTime: Timestamp): Boolean
}
object PrunedSpace extends java.io.Serializable {
val neverPruned: PrunedSpace[Any] =
new PrunedSpace[Any] { def prune(item: Any, writeTime: Timestamp) = false }
def apply[T](pruneFn: (T, Timestamp) => Boolean) = new PrunedSpace[T] {
def prune(item: T, writeTime: Timestamp) = pruneFn(item, writeTime)
}
}
|
nvoron23/summingbird
|
summingbird-batch/src/main/scala/com/twitter/summingbird/batch/PrunedSpace.scala
|
Scala
|
apache-2.0
| 1,084
|
/*
* Copyright 2001-2012 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/*
* If distributingTests comes through, then the DSR will not use
* a SuiteCompleted message to indicate that the suite is done, but instead
* will wait for a completedTests invocation.
*/
/**
* A sorter for the events of a run's distributed suites.
*
* <p>
* This trait is used, for example, when <code>-PS</code> is passed to <a href="tools/Runner$.html"><code>Runner</code></a>, to sort the
* events of distributed suites such that each suite's events are propagated together, with a timeout if an event takes too long.
* </p>
*/
trait DistributedSuiteSorter {
def distributingTests(suiteId: String)
def completedTests(suiteId: String)
}
|
hubertp/scalatest
|
src/main/scala/org/scalatest/DistributedSuiteSorter.scala
|
Scala
|
apache-2.0
| 1,281
|
class WinActor extends Actor {
// create an actor
val playerer = context.actorOf(Props[PlayerDestroyedActor], PlayerDestroyedActor.ACTOR_NAME)
// set timeout for ask -> ?
implicit val timeout = Timeout(5 seconds)
def handleMessage(player1: IPlayer, player2: IPlayer, ref: ActorRef) = {
// ? -> ask an ActorRef, returns a future
val pl1 = (playerer ? PlayerDestroyedMessage(player1, first = true)).mapTo[PlayerDestroyedResponse]
val pl2 = (playerer ? PlayerDestroyedMessage(player2, first = false)).mapTo[PlayerDestroyedResponse]
// use the FIRST future result with flatMap
val entireFuture = pl1 flatMap { first =>
// use the SECOND future reuslt with map to have the same depth level
pl2 map { second =>
if (first.destroyed || second.destroyed) {
// ! -> fire and forget a response
ref ! WinnerResponse(won = true, winner = if (first.destroyed) player2 else player1)
} else {
ref ! WinnerResponse(won = false, winner = null)
}
}
}
}
override def receive: Receive = {
case msg: WinMessage =>
// save sender now
handleMessage(msg.player1, msg.player2, sender())
case msg => unhandled(msg)
}
}
|
mosauter/WT_Battleship
|
public/presentation/architecture/code/winner.scala
|
Scala
|
gpl-2.0
| 1,359
|
package ml.combust.mleap.tensorflow
import ml.combust.bundle.serializer.FileUtil
import ml.combust.mleap.core.types.TensorType
import ml.combust.mleap.tensor.{DenseTensor, Tensor}
import ml.combust.mleap.tensorflow.converter.MleapConverter
import org.scalatest.FunSpec
import org.tensorflow.{SavedModelBundle, Signature}
import org.tensorflow.ndarray.Shape
import org.tensorflow.types.TFloat32
import java.io.ByteArrayOutputStream
import java.nio.file.Files
import java.util.zip.ZipOutputStream
import scala.collection.JavaConverters._
/**
* Created by hollinwilkins on 1/12/17.
*/
class TensorflowModelSpec extends FunSpec {
describe("with an adding tensorflow model") {
it("adds two floats together") {
val graph = TestUtil.createAddGraph()
val model = TensorflowModel(
graph = Some(graph),
inputs = Seq(("InputA", TensorType.Float()), ("InputB", TensorType.Float())),
outputs = Seq(("MyResult", TensorType.Float())),
modelBytes = graph.toGraphDef.toByteArray
)
assert(model(Tensor.scalar(23.4f), Tensor.scalar(45.6f)).head == Tensor.scalar(23.4f + 45.6f))
assert(model(Tensor.scalar(42.3f), Tensor.scalar(99.9f)).head == Tensor.scalar(42.3f + 99.9f))
assert(model(Tensor.scalar(65.8f), Tensor.scalar(34.6f)).head == Tensor.scalar(65.8f + 34.6f))
model.close()
}
}
describe("with a multiple tensorflow model") {
describe("with a float and a float vector") {
it("scales the float vector") {
val graph = TestUtil.createMultiplyGraph()
val model = TensorflowModel(
inputs = Seq(("InputA", TensorType.Float()), ("InputB", TensorType.Float())),
outputs = Seq(("MyResult", TensorType.Float(3))),
modelBytes = graph.toGraphDef.toByteArray)
val tensor1 = DenseTensor(Array(1.0f, 2.0f, 3.0f), Seq(3))
val scale1 = Tensor.scalar(2.0f)
assert(model(scale1, tensor1).head.asInstanceOf[DenseTensor[Float]].values sameElements Array(2.0f, 4.0f, 6.0f))
model.close()
}
}
}
describe("with an tensorflow model has variables") {
it("saved model") {
var reducedSum = 0.0f
val testFolder = Files.createTempDirectory("tf-saved-model-export")
val input = DenseTensor(Array(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f), Seq(2, 3))
val xyShape = Shape.of(2, 3L)
val f = TestUtil.createSessionFunctionWithVariables(xyShape)
val xTensor = MleapConverter.convert(input)
val zTensor = f.call(xTensor).asInstanceOf[TFloat32]
try {
reducedSum = zTensor.getFloat()
f.save(testFolder.toString)
} finally {
if (xTensor != null) xTensor.close()
if (zTensor != null) zTensor.close()
if (f != null) f.session.close()
}
// load it back
val bundle = SavedModelBundle.load(testFolder.toString)
try {
val signatureDef = bundle.metaGraphDef.getSignatureDefOrThrow(Signature.DEFAULT_KEY)
val inputMap = signatureDef.getInputsMap.asScala.map { case (k, v) => (k, v.getName) }
val outputMap = signatureDef.getOutputsMap.asScala.map { case (k, v) => (k, v.getName) }
assert(inputMap("input") == "Placeholder:0")
assert(outputMap("reducedSum") == "ReduceSum:0")
val inputs = Seq(("Placeholder:0", TensorType.Float(2, 3)))
val outputs = Seq(("ReduceSum:0", TensorType.Float()))
val format = Some("saved_model")
val byteStream = new ByteArrayOutputStream()
val zf = new ZipOutputStream(byteStream)
try FileUtil().zip(testFolder.toFile, zf) finally if (zf != null) zf.close()
FileUtil().rmRF(testFolder.toFile)
val model = TensorflowModel(
inputs = inputs,
outputs = outputs,
format = format,
modelBytes = byteStream.toByteArray
)
try {
val output = model(input)
assert(reducedSum == output.head.asInstanceOf[DenseTensor[Float]](0))
} finally if (model != null) model.close()
} finally if (bundle != null) bundle.close()
}
}
}
|
combust/mleap
|
mleap-tensorflow/src/test/scala/ml/combust/mleap/tensorflow/TensorflowModelSpec.scala
|
Scala
|
apache-2.0
| 4,101
|
abstract class RedBlack[A] extends Serializable {
abstract class Tree[+B] extends Serializable
case object Empty extends Tree[Nothing]
}
object Test {
def main(args: Array[String]): Unit = {
val r = classOf[RedBlack[_]].getMethod("Empty").getGenericReturnType.toString
// Output changed in JDK 1.8.0_172: https://github.com/scala/bug/issues/10835
assert(r == "RedBlack<A>.Empty$" || r == "RedBlack<A>$Empty$", r)
}
}
|
scala/scala
|
test/files/run/t2873.scala
|
Scala
|
apache-2.0
| 438
|
package com.jayway.saaloop.dsl
import org.specs2.mutable.Specification
import com.jayway.saaloop.dsl.Saaloop._
/**
* Copyright 2012 Amir Moulavi (amir.moulavi@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Amir Moulavi
*/
class HadoopConfigurationSpec extends Specification {
"Hadoop configuration" should {
"contain arbitrary number of properties" in {
val conf = hadoopConfig {
("fs.temp.dir" << "/tmp") +
("dfs.name.dir" << "/tmp") +
("dfs.info.port" << "13400")
}
conf.get("fs.temp.dir") mustEqual "/tmp"
conf.get("dfs.name.dir") mustEqual "/tmp"
conf.get("dfs.info.port") mustEqual "13400"
}
"be instantiable with default configurations" in {
hadoopConfig() mustNotEqual null
}
}
}
|
amir343/saaloop
|
saaloop-core/src/test/scala/com/jayway/saaloop/dsl/HadoopConfigurationSpec.scala
|
Scala
|
apache-2.0
| 1,342
|
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.examples
import com.stratio.datasource.mongodb.examples.MongoExampleFunctions._
object SQLExample extends App with MongoDefaultConstants {
val mongoClient = prepareEnvironment()
withSQLContext { sqlContext =>
sqlContext.sql(
s"""|CREATE TEMPORARY TABLE $Collection
|(id STRING, age INT, description STRING, enrolled BOOLEAN, name STRING, optionalField BOOLEAN)
|USING $MongoProvider
|OPTIONS (
|host '$MongoHost:$MongoPort',
|database '$Database',
|collection '$Collection'
|)
""".stripMargin.replaceAll("\\n", " "))
sqlContext.sql(s"SELECT id, name FROM $Collection WHERE age > 16").show(5)
}
cleanEnvironment(mongoClient)
}
|
pfcoperez/spark-mongodb
|
spark-mongodb-examples/src/main/scala/com/stratio/datasource/mongodb/examples/SQLExample.scala
|
Scala
|
apache-2.0
| 1,391
|
/*
* This source code is licensed under the MIT license found in the
* LICENSE.txt file in the root directory of this source tree
*/
package a14e.collz.mut
import scala.annotation.tailrec
import scala.collection.{AbstractIterator, mutable}
object PrefixMap {
sealed trait Node
class Leaf(val key: String,
val value: Any,
val startIndex: Int,
val validCount: Int) extends Node
class NonEmptyNode(val leaves: IntMap[Node],
val key: String,
val startIndex: Int,
val validCount: Int) extends Node
def leafIterator[T](leaf: Leaf): Iterator[(String, T)] = Iterator.single((leaf.key, leaf.value.asInstanceOf[T]))
def nodeIterator[T](leaves: IntMap[Node]): Iterator[(String, T)] = new AbstractIterator[(String, T)] {
private var stack: List[Iterator[(Int, Node)]] = Nil
private var currentIterator: Iterator[(Int, Node)] = leaves.iterator
@tailrec
private final def calcNext(): (String, T) = {
if (currentIterator.hasNext) {
currentIterator.next()._2 match {
case l: Leaf => (l.key, l.value.asInstanceOf[T])
case n: NonEmptyNode =>
stack = currentIterator :: stack
currentIterator = n.leaves.iterator
calcNext()
}
} else {
if (stack.isEmpty) null
else {
currentIterator = stack.head
stack = stack.tail
calcNext()
}
}
}
private var nextValue = calcNext()
override def next(): (String, T) =
if (hasNext) {
val res = nextValue
nextValue = calcNext()
res
} else Iterator.empty.next()
override def hasNext: Boolean = nextValue != null
}
private final def minOf3(x: Int, y: Int, z: Int): Int = {
math.min(math.min(x, y), z)
}
/**
* функция для вычисления количества общих символов, в строках
* first и second начиная с индекса start, причем количество вычесленных
* символов не будет превышать maxCount
*
* @param first первая строка
* @param second вторая строка
* @param start начальный индекс
* @param maxCount максимальный результат
* @return общее количество символов в строках first и second начиная с индекса start, но
* не большее чем maxCount
*/
private[collz] def countEquals(first: String, second: String, start: Int, maxCount: Int): Int = {
val minLen = minOf3(first.length, second.length, start + maxCount)
if (minLen <= start)
return 0
var count = 0
var i = start
while (i < minLen) {
val char1 = first.charAt(i)
val char2 = second.charAt(i)
if (char1 != char2) return count
else count += 1
i += 1
}
count
}
def apply[T](kvs: (String, T)*): PrefixMap[T] = new PrefixMap[T](new IntMap[Node](), 0) ++= kvs
// особый индекс для символа, который отвечает за пустую строку =)
// такой вид, так как если я захочу приводить к беззнаковому виду
// но найбольший номер у char будет 0xFF, а это не единичку больше =)
final val emptyStringIndex = 0x100
}
import PrefixMap._
/**
* коллекция оптимизированная для работы со строками. При создании за основу было взято префиксное дерево
* Trie. в каждом узле которого находится mut.IntMap. Таким образом
* худшее время поиска, добавления, удаления элементов не зависит от числа элементов будет занимать
* O(n * log16(k)) n -- длина строки, k - размер алфавита. Так как размер алфавита в нашем случае
* 256, то имеем сложность 2 * O(n) для худшего случае, зависящую только от длины строки.
* Более точным будет вместо длины строки принимать количество коллизий для ключа, что обычно
* значительно меньше чем длинна строки.
* в лучшем случае, когда нет ни одного другого ключа, начинающегося на ту же букву, сложность большинства
* операций будет занимать 2 * O(1), что примерно равно сложности работы с IntMap, когда максимальное число 256.
* для удобства поиска пустых строк был добавлен новый номер индекса 0xFF + 1 = 256 для пустых строк =)
*
* в описании данной стрктуры термины ключ и строка применяются в одном смысле
*
* @param root корневой узел
* @param _size размер коллекции
* @tparam T тип значений в коллекции
*/
class PrefixMap[T] private[collz](private val root: IntMap[Node],
private var _size: Int) extends mutable.Map[String, T] {
override def size: Int = _size
override def clear(): Unit = {
root.clear()
_size = 0
}
/**
* вычисление индекса по ключу и смещению относительно начала строки
* если startIndex равен длине строки(индекс для пустого символа), тогда мы считаем, что индекс равен
* 0x100,
* иначе просто приводим Char по инлексу startIndex к типу Int
*
* @param key строка, по которой стоит брать ключ
* @param startIndex индекс в строке для ключа
* @return индекс в узле для данного ключа
*/
private def calcIndex(key: String, startIndex: Int): Int = {
if (key.length == startIndex)
emptyStringIndex
else
key.charAt(startIndex).toInt
}
/**
* рекурентная функция для поиска значения в префиксном редеве по ключу
* сложность поиска в худшем случае зависит только от длины ключа(строк)
* O(n), где n -- длина строки
*
* @param startIndex индекс в строке для проверки в текущем узле
* @param key ключ для поиск
* @param leaves IntMap для хранения данных в нелистовых узлах
* @return найденное значение, приведенное к типу Any, либо null
*/
@tailrec
private def recGetOrNull(startIndex: Int,
key: String,
leaves: IntMap[Node]): Any = {
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null => null
case l: Leaf =>
val count = countEquals(key, l.key, l.startIndex, l.validCount)
if (count == l.validCount) l.value
else null
case n: NonEmptyNode =>
val count = countEquals(key, n.key, n.startIndex, n.validCount)
if (count == n.validCount) recGetOrNull(startIndex + count, key, n.leaves)
else null
}
}
/**
* функция возвращает значение с данным ключем если найдено, либо null
* сложность операции O(n), где n -- длина ключа
*
* @param key ключ для поиска
* @return значение с данным ключем если найдено, либо null
*/
def getOrNull(key: String): T = recGetOrNull(0, key, root).asInstanceOf[T]
/**
* функция возвращает Some(...) c значеним с данным ключем если найдено, либо None
* сложность операции O(n), где n -- длина ключа
*
* @param key ключ для поиска
* @return Some(...) c значеним с данным ключем если найдено, либо None
*/
override def get(key: String): Option[T] = Option(getOrNull(key))
/**
* функция возвращает true если данная коллекция содержит ключ key, иначе false
* сложность операции O(n), где n -- длина ключа
*
* @param key ключ для поиска
* @return true если данная коллекция содержит ключ key, иначе false
*/
override def contains(key: String): Boolean = getOrNull(key) != null
@tailrec
private def recHasPrefix(startIndex: Int,
key: String,
leaves: IntMap[Node]): Boolean = {
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null => false
case l: Leaf =>
val count = countEquals(key, l.key, l.startIndex, l.validCount)
count + startIndex == key.length
case n: NonEmptyNode =>
val count = countEquals(key, n.key, n.startIndex, n.validCount)
if (count == n.validCount) recHasPrefix(startIndex + count, key, n.leaves)
else false
}
}
def hasPrefix(key: String): Boolean = recHasPrefix(0, key, root)
@tailrec
private def recFindByPrefix(startIndex: Int,
key: String,
leaves: IntMap[Node]): Iterator[(String, T)] = {
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null => Iterator.empty
case leaf: Leaf =>
val count = countEquals(key, leaf.key, leaf.startIndex, leaf.validCount)
if (count + startIndex == key.length) leafIterator[T](leaf)
else Iterator.empty
case node: NonEmptyNode =>
val count = countEquals(key, node.key, node.startIndex, node.validCount)
if (count + startIndex == key.length)
PrefixMap.nodeIterator[T](node.leaves)
else if (count == node.validCount)
recFindByPrefix(startIndex + count, key, node.leaves)
else Iterator.empty
}
}
def findForPrefix(key: String): Iterator[(String, T)] = {
if (key.isEmpty) this.iterator
else recFindByPrefix(0, key, root)
}
@tailrec
private def recFindClosestPrefix(startIndex: Int,
key: String,
leaves: IntMap[Node],
lastIterator: () => (Iterator[(String, T)], Int)): (Iterator[(String, T)], Int) = {
if (startIndex == key.length)
return lastIterator()
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null => lastIterator()
case leaf: Leaf =>
val count = countEquals(key, leaf.key, leaf.startIndex, leaf.validCount)
val totalCount = count + startIndex
val iter = leafIterator[T](leaf)
(iter, totalCount)
case node: NonEmptyNode =>
val count = countEquals(key, node.key, node.startIndex, node.validCount)
def currentIterator(): (Iterator[(String, T)], Int) = {
val iter = PrefixMap.nodeIterator[T](node.leaves)
val totalCount = count + startIndex
(iter, totalCount)
}
if (count == node.validCount) {
recFindClosestPrefix(startIndex + count, key, node.leaves, () => currentIterator())
} else {
currentIterator()
}
}
}
// TODO протестировать
def findClosesByPrefix(key: String): (Iterator[(String, T)], Int) = {
def currentIterator() = (this.iterator, 0)
recFindClosestPrefix(0, key, root, () => currentIterator())
}
private def leafIndex(leaf: Leaf): Int = {
// проверяем на пустые строки
if (leaf.validCount == 0)
emptyStringIndex
else
leaf.key.charAt(leaf.startIndex).toInt
}
private def mergeLeaves(init: Leaf, toAdd: Leaf): Node = {
val maxCount = math.min(init.validCount, toAdd.validCount)
val count = countEquals(init.key, toAdd.key, init.startIndex, maxCount)
val firstInSecond = count == init.validCount
val secondInFirst = count == toAdd.validCount
if (firstInSecond && secondInFirst) // если строки совпадают
return toAdd
val node =
new NonEmptyNode(IntMap[Node](), toAdd.key, init.startIndex, count)
val newStartIndex = init.startIndex + count
val newLeaf1 = new Leaf(init.key, init.value, newStartIndex, init.validCount - count)
val newLeaf2 = new Leaf(toAdd.key, toAdd.value, newStartIndex, toAdd.validCount - count)
val key1 = leafIndex(newLeaf1)
val key2 = leafIndex(newLeaf2)
node.leaves(key1) = newLeaf1
node.leaves(key2) = newLeaf2
node
}
private def mergeNodeAndLeaf(init: NonEmptyNode, toAdd: Leaf, count: Int): Node = {
val node =
new NonEmptyNode(IntMap[Node](), toAdd.key, init.startIndex, count)
val newStartIndex = init.startIndex + count
val node1 = new NonEmptyNode(init.leaves, init.key, newStartIndex, init.validCount - count)
val node2 = new Leaf(toAdd.key, toAdd.value, newStartIndex, toAdd.validCount - count)
val key1 = if (node1.validCount == 0) emptyStringIndex else node1.key.charAt(newStartIndex).toInt
val key2 = leafIndex(node2)
node.leaves(key1) = node1
node.leaves(key2) = node2
node
}
@tailrec
private def recAdd(startIndex: Int,
key: String,
value: Any,
leaves: IntMap[Node]): Unit = {
def newLeaf() = new Leaf(key, value, startIndex, key.length - startIndex)
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null =>
leaves(internalKey) = newLeaf()
_size += 1
case leaf: Leaf =>
val created = newLeaf()
val merged = mergeLeaves(leaf, created)
leaves(internalKey) = merged
if (created ne merged)
_size += 1
case foundNode: NonEmptyNode =>
val count = countEquals(foundNode.key, key, foundNode.startIndex, foundNode.validCount)
if (count == foundNode.validCount) {
recAdd(startIndex + count, key, value, foundNode.leaves)
} else {
leaves(internalKey) = mergeNodeAndLeaf(foundNode, newLeaf(), count)
_size += 1
}
}
}
override def update(key: String, value: T): Unit = {
recAdd(0, key, value, root)
}
override def +=(kv: (String, T)): PrefixMap.this.type = {
this (kv._1) = kv._2
this
}
@tailrec
private def recRemove(startIndex: Int,
key: String,
leaves: IntMap[Node],
previousLeaves: IntMap[Node],
previousLeaveKey: Int): Unit = {
val internalKey = calcIndex(key, startIndex)
leaves.getOrNull(internalKey) match {
case null =>
case leaf: Leaf =>
val leafCount = countEquals(leaf.key, key, leaf.startIndex, leaf.validCount)
val validDelete = leafCount == leaf.validCount && leafCount + startIndex == key.length
if (validDelete) {
leaves -= internalKey
_size -= 1
if (leaves.isEmpty && (previousLeaves ne null))
previousLeaves -= previousLeaveKey
}
case foundNode: NonEmptyNode =>
val count = countEquals(foundNode.key, key, foundNode.startIndex, foundNode.validCount)
if (count == foundNode.validCount) {
/** иногда чистим пустые узлы чтобы было меньше
* утечек мусора. для полного их избежания
* пока не хватает фантазии =)
* */
if (foundNode.leaves.isEmpty)
leaves -= internalKey
else
recRemove(startIndex + count, key, foundNode.leaves, leaves, internalKey)
}
}
}
override def -=(key: String): PrefixMap.this.type = {
recRemove(0, key, root, null, -1)
this
}
override def iterator: Iterator[(String, T)] = PrefixMap.nodeIterator[T](root)
}
|
a14e/collz
|
src/main/scala/a14e/collz/mut/PrefixMap.scala
|
Scala
|
mit
| 16,911
|
package eventstore
package core
sealed trait ConsumerStrategy
/**
* System supported consumer strategies for use with persistent subscriptions.
*/
object ConsumerStrategy {
val Values: Set[ConsumerStrategy] = Set(DispatchToSingle, RoundRobin)
def apply(name: String): ConsumerStrategy = {
Values find { _.toString equalsIgnoreCase name } getOrElse Custom(name)
}
/**
* Distributes events to a single client until it is full. Then round robin to the next client.
*/
@SerialVersionUID(1L) case object DispatchToSingle extends ConsumerStrategy
/**
* Distribute events to each client in a round robin fashion.
*/
@SerialVersionUID(1L) case object RoundRobin extends ConsumerStrategy
/**
* Unknown, not predefined strategy
*/
@SerialVersionUID(1L) final case class Custom private[eventstore] (value: String) extends ConsumerStrategy {
require(value != null, "value must not be null")
require(value.nonEmpty, "value must not be empty")
override def toString = value
}
}
|
EventStore/EventStore.JVM
|
core/src/main/scala/eventstore/core/ConsumerStrategy.scala
|
Scala
|
bsd-3-clause
| 1,026
|
package epam.idobrovolskiy.wikipedia.trending.cli
import epam.idobrovolskiy.wikipedia.trending.time.WikiDate
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by Igor_Dobrovolskiy on 03.08.2017.
*/
class WikiQueryArgsParserTest extends FlatSpec with Matchers {
"CLI w/o any option" should "not be parsed as valid options" in {
val args = Array.empty[String]
WikiQueryArgsParser.parse(args) shouldEqual None
}
"CLI with single '--debug' option" should "not be parsed as valid options" in {
val args = Array("--debug")
WikiQueryArgsParser.parse(args) shouldEqual None
}
"CLI with single '--tokens' option" should "be parsed as TokensForPeriodQueryArgs with timeframe from Genesis till Now" in {
val args = Array("--tokens")
WikiQueryArgsParser.parse(args) shouldBe Some(TokensForPeriodQueryArgs(WikiQueryArgs.DefaultArgs)())
}
val tokensDebugExpected =
Some(TokensForPeriodQueryArgs(WikiQueryArgs.DefaultArgs)(d = true))
"CLI with '--tokens' and '--debug' option" should "be parsed as TokensForPeriodQueryArgs with timeframe from Genesis till Now and debug flag ON" in {
val args = Array("--tokens", "--debug")
WikiQueryArgsParser.parse(args) shouldEqual tokensDebugExpected
}
it should "work the same when options are placed in reversed order" in {
val args = Array("--debug", "--tokens")
WikiQueryArgsParser.parse(args) shouldEqual tokensDebugExpected
}
"CLI with '--tokens' and '--version=1' option" should "be parsed properly" in {
val args = Array("--tokens","--version=1")
WikiQueryArgsParser.parse(args) shouldEqual Some(TokensForPeriodQueryArgs(WikiQueryArgs.DefaultArgs)(qv = 1))
}
it should "work the same when options are placed in reversed order" in {
val args = Array("--version=1", "--tokens")
WikiQueryArgsParser.parse(args) shouldEqual Some(TokensForPeriodQueryArgs(WikiQueryArgs.DefaultArgs)(qv = 1))
}
"CLI with '--tokens' and '--version=2' option" should "be parsed properly" in {
val args = Array("--tokens","--version=2")
WikiQueryArgsParser.parse(args) shouldEqual Some(TokensForPeriodQueryArgs(WikiQueryArgs.DefaultArgs)(qv = 2))
}
}
|
igor-dobrovolskiy-epam/wikipedia-analysis-scala-core
|
src/test/scala/epam/idobrovolskiy/wikipedia/trending/cli/WikiQueryArgsParserTest.scala
|
Scala
|
apache-2.0
| 2,185
|
package org.globalwordnet.api.serialize
import eu.monnetproject.lang.{Language, Script}
import org.globalwordnet.api.wn._
import org.scalatest._
class WNDBSpec extends FlatSpec with Matchers {
var lr = new WNLMF(false).read(new java.io.FileReader("src/test/resources/example3.xml"))
it should "output a correct data.noun file" in {
val wndb = new WNDB(null, null, null, null, null, null, null, None, None, true, None, None)
import org.globalwordnet.api.MultiMap._
val entriesForSynset : Map[String, Seq[(LexicalEntry,Sense)]] = lr.lexicons(0).entries.flatMap({ entry =>
entry.senses.map({ sense =>
sense.synsetRef -> (entry, sense)
})
}).toMultiMap
val synsetLookup = collection.mutable.Map[String, (String, PartOfSpeech)]()
val data = (new ByteStringBuilder(), collection.mutable.Map[String, Seq[Int]]())
wndb.writeData(lr, lr.lexicons(0), noun, entriesForSynset, synsetLookup, data,
(oldId, newId) => {
wndb.replaceAll(data, oldId, newId)
})
data._1.toString should be (""" 1 This software and database is being provided to you, the LICENSEE, by
2 Princeton University under the following license. By obtaining, using
3 and/or copying this software and database, you agree that you have
4 read, understood, and will comply with these terms and conditions.:
5
6 Permission to use, copy, modify and distribute this software and
7 database and its documentation for any purpose and without fee or
8 royalty is hereby granted, provided that you agree to comply with
9 the following copyright notice and statements, including the disclaimer,
10 and that the same appear on ALL copies of the software, database and
11 documentation, including modifications that you make for internal
12 use or for distribution.
13
14 WordNet 3.1 Copyright 2011 by Princeton University. All rights reserved.
15
16 THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON
17 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
18 IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON
19 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT-
20 ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE
21 OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT
22 INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR
23 OTHER RIGHTS.
24
25 The name of Princeton University or Princeton may not be used in
26 advertising or publicity pertaining to distribution of the software
27 and/or database. Title to copyright in this software, database and
28 any associated documentation shall at all times remain with
29 Princeton University and LICENSEE agrees to preserve same.
00001740 45 n 01 paternal_grandfather 0 001 + 00001848 n 0101 | A father's father; a paternal grandfather
00001848 45 n 01 grandfather 0 001 @ 00001941 n 0000 | the father of your father or mother
00001941 45 n 00 000
""")
}
it should "output a correct index.noun file" in {
val wndb = new WNDB(null, null, null, null, null, null, null, None, None, true, None, None)
import org.globalwordnet.api.MultiMap._
val entriesForSynset : Map[String, Seq[(LexicalEntry,Sense)]] = lr.lexicons(0).entries.flatMap({ entry =>
entry.senses.map({ sense =>
sense.synsetRef -> (entry, sense)
})
}).toMultiMap
val synsetLookup = collection.mutable.Map[String, (String, PartOfSpeech)]()
val data = (new ByteStringBuilder(), collection.mutable.Map[String, Seq[Int]]())
val sw = new java.io.StringWriter()
val out = new java.io.PrintWriter(sw)
wndb.writeData(lr, lr.lexicons(0), noun, entriesForSynset, synsetLookup, data,
(oldId, newId) => {
wndb.replaceAll(data, oldId, newId)
})
wndb.writeIndex(lr.lexicons(0), noun, synsetLookup, out)
sw.toString should be (""" 1 This software and database is being provided to you, the LICENSEE, by
2 Princeton University under the following license. By obtaining, using
3 and/or copying this software and database, you agree that you have
4 read, understood, and will comply with these terms and conditions.:
5
6 Permission to use, copy, modify and distribute this software and
7 database and its documentation for any purpose and without fee or
8 royalty is hereby granted, provided that you agree to comply with
9 the following copyright notice and statements, including the disclaimer,
10 and that the same appear on ALL copies of the software, database and
11 documentation, including modifications that you make for internal
12 use or for distribution.
13
14 WordNet 3.1 Copyright 2011 by Princeton University. All rights reserved.
15
16 THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON
17 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
18 IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON
19 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT-
20 ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE
21 OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT
22 INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR
23 OTHER RIGHTS.
24
25 The name of Princeton University or Princeton may not be used in
26 advertising or publicity pertaining to distribution of the software
27 and/or database. Title to copyright in this software, database and
28 any associated documentation shall at all times remain with
29 Princeton University and LICENSEE agrees to preserve same.
grandfather n 1 1 @ 1 0 00001848
paternal_grandfather n 1 1 + 1 0 00001740
""")
}
it should "output a correct index.sense" in {
val wndb = new WNDB(null, null, null, null, null, null, null, None, None, true, None, None)
import org.globalwordnet.api.MultiMap._
val entriesForSynset : Map[String, Seq[(LexicalEntry,Sense)]] = lr.lexicons(0).entries.flatMap({ entry =>
entry.senses.map({ sense =>
sense.synsetRef -> (entry, sense)
})
}).toMultiMap
val synsetLookup = collection.mutable.Map[String, (String, PartOfSpeech)]()
val data = (new ByteStringBuilder(), collection.mutable.Map[String, Seq[Int]]())
val sw = new java.io.StringWriter()
val out = new java.io.PrintWriter(sw)
wndb.writeData(lr, lr.lexicons(0), noun, entriesForSynset, synsetLookup, data,
(oldId, newId) => {
wndb.replaceAll(data, oldId, newId)
})
wndb.writeSenseIndex(lr.lexicons(0), synsetLookup, entriesForSynset, out)
sw.toString should be ("""grandfather%1:01:00:: 00001848 1 0
paternal_grandfather%1:01:00:: 00001740 1 0
""")
}
}
|
jmccrae/gwn-scala-api
|
src/test/scala/org/globalwordnet/wnapi/test_wndb.scala
|
Scala
|
apache-2.0
| 6,795
|
import scala.quoted.*
import Macros.*
object Test {
def main(args: Array[String]): Unit = {
val sym = new Symantics {
type R = Int
def Meth(exp: Int): Int = exp
def Meth(): Int = 42
}
val test = m(sym)
}
}
|
lampepfl/dotty
|
tests/pos-macros/i7110b/Test_2.scala
|
Scala
|
apache-2.0
| 243
|
/*
* Copyright (C) 2015 Keith M. Hughes.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.robotbrains.data.cloud.timeseries.server.data;
/**
* A data sample for a sensor.
*
* @author Keith M. Hughes
*/
class SensorDataSample(_sensor: String, _value: Double, _timestamp: Long) {
def sensor = _sensor
def value = _value
def timestamp = _timestamp
override def toString: String =
"SensorDataSample [sensor=" + sensor + ", value=" + value + ", timestamp=" + timestamp +
"]";
}
|
kmhughes/robotbrains-examples
|
data/cloud/scala/org.robotbrains.data.cloud.timeseries.server/src/main/scala/org/robotbrains/data/cloud/timeseries/server/data/SensorDataSample.scala
|
Scala
|
apache-2.0
| 1,031
|
package org.joda
import org.joda.time.DateTime
import org.scalatest.FunSuite
class DatetimeTests extends FunSuite {
test("basic datetime test") {
val dt = new DateTime(2015, 8, 11, 12, 1, 2, 3)
assert(dt.year.get === 2015)
assert(dt.monthOfYear.get === 8)
assert(dt.dayOfMonth.get === 1)
assert(dt.hourOfDay.get === 12)
assert(dt.minuteOfHour.get === 1)
assert(dt.secondOfMinute.get === 2)
assert(dt.millisOfSecond.get === 3)
}
}
|
easel/play-json-extra
|
scalajs-joda-time/src/test/scala/org/joda/DatetimeTests.scala
|
Scala
|
apache-2.0
| 480
|
package io.surfkit.core.rabbitmq
import io.surfkit.model.Api
import scala.collection.JavaConversions._
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.util.ByteString
import com.rabbitmq.client.Connection
import com.rabbitmq.client.Channel
import com.rabbitmq.client.DefaultConsumer
import com.rabbitmq.client.Envelope
import com.rabbitmq.client.AMQP
object RabbitUserConsumer {
lazy val userExchange = RabbitConfig.userExchange
case class RabbitMessage(deliveryTag: Long, headers: Map[String, String], body: ByteString)
def props(userId: Long, userActor: ActorRef)(implicit connection: Connection) =
Props(new RabbitUserConsumer(userId, userActor))
}
class RabbitUserConsumer(userId: Long, userActor: ActorRef)(implicit connection: Connection) extends Actor with ActorLogging {
import io.surfkit.core.rabbitmq.RabbitUserConsumer._
val queue = s"${RabbitConfig.userExchange}.$userId"
private def initBindings(channel: Channel): Unit = {
channel.exchangeDeclare(userExchange, "direct", true)
channel.queueDeclare(queue, true, false, false, Map[String, java.lang.Object]())
channel.queueBind(queue, userExchange, queue)
}
private def initConsumer(channel: Channel): DefaultConsumer = new DefaultConsumer(channel) {
override def handleDelivery(
consumerTag: String,
envelope: Envelope,
properties: AMQP.BasicProperties,
body: Array[Byte]) = {
val rawHeaders = Option(properties.getHeaders).map(_.toMap).getOrElse(Map())
val headers = rawHeaders.mapValues(_.toString)
self ! RabbitMessage(envelope.getDeliveryTag, headers, ByteString(body))
}
}
var channel: Channel = null
var consumer: DefaultConsumer = null
override def receive = {
case msg: RabbitUserConsumer.RabbitMessage =>
println("USER :: @@@@@@@@@@@@@@@@@@")
log.debug(s"RabbitUserConsumer received msg with deliveryTag ${msg.deliveryTag}")
userActor ! msg
// This pushes data back into rabbit that will go down the web socket connections to the user.
case ret:Api.Result =>
val replyProps = new AMQP.BasicProperties
.Builder()
.correlationId(ret.routing.id)
.build()
println("In RabbitUserConsumer Result...")
println(ret.op)
println(s"replyTo: ${ret.routing.reply}")
println(s"ret.routing.id, ${ret.routing.id}")
// TODO: looks like i need a NAMED socket queue here... check into this more.
channel.basicPublish( "", ret.routing.reply, replyProps, upickle.write( ret ).getBytes )
}
override def preStart() = {
channel = connection.createChannel()
consumer = {
initBindings(channel)
val consumer = initConsumer(channel)
channel.basicConsume(queue, true, consumer)
log.info(s"$userActor started consuming from queue=$queue")
consumer
}
}
override def postStop() = {
//this might be too late and could cause losing some messages
channel.basicCancel(consumer.getConsumerTag())
channel.close()
}
}
|
coreyauger/surfkit
|
core/src/main/scala/core/rabbitmq/RabbitUserConsumer.scala
|
Scala
|
mit
| 3,087
|
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.modulej.phases
import ch.usi.inf.l3.sana
import sana.tiny.dsl._
import sana.tiny.core._
import sana.tiny.core.Implicits._
import sana.tiny.ast.{Tree, NoTree}
import sana.tiny.symbols.Symbol
import sana.modulej.Nodes
import sana.ooj.typechecker._
trait ConstructorsCheckerFamilyApi
extends CheckerFamily[(Tree, ConstructorCheckerEnv)] {
self =>
override def default = { case s => () }
def components: List[PartialFunction[(Tree, ConstructorCheckerEnv), Unit]] =
generateComponents[(Tree, ConstructorCheckerEnv), Unit](
"Program,CompilationUnit,PackageDef,ClassDef,Template,MethodDef,Block,Assign",
"ConstructorsCheckerComponent", "check", "")
def check: ((Tree, ConstructorCheckerEnv)) => Unit = family
}
case class ConstructorsCheckerFamily(compiler: CompilerInterface)
extends ConstructorsCheckerFamilyApi
|
amanjpro/languages-a-la-carte
|
modulej/src/main/scala/phases/ConstructorsCheckerFamily.scala
|
Scala
|
bsd-3-clause
| 2,442
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.canvas
import sun.misc.Unsafe
/**
*/
private[canvas] object UnsafeUtil {
// Fetch theUnsafe object for Oracle and OpenJDK
private[canvas] val unsafe = {
import java.lang.reflect.Field
val field: Field = classOf[Unsafe].getDeclaredField("theUnsafe")
field.setAccessible(true)
field.get(null).asInstanceOf[Unsafe]
}
if (unsafe == null)
throw new RuntimeException("Unsafe is unavailable")
private[canvas] val arrayByteBaseOffset: Long = unsafe.arrayBaseOffset(classOf[Array[Byte]])
private[canvas] val arrayByteIndexScale: Int = unsafe.arrayIndexScale(classOf[Array[Byte]])
// Make sure the VM thinks bytes are only one byte wide
if (arrayByteIndexScale != 1)
throw new IllegalStateException("Byte array index scale must be 1, but is " + arrayByteIndexScale)
}
|
wvlet/airframe
|
airframe-canvas/src/main/scala/wvlet/airframe/canvas/UnsafeUtil.scala
|
Scala
|
apache-2.0
| 1,389
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.spark
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.geotools.data.{Query, Transaction}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.planning.QueryPlanner
import org.locationtech.geomesa.kudu.data.{KuduDataStore, KuduDataStoreFactory}
import org.locationtech.geomesa.spark.{DataStoreConnector, SpatialRDD, SpatialRDDProvider}
import org.locationtech.geomesa.utils.geotools.FeatureUtils
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeature
class KuduSpatialRDDProvider extends SpatialRDDProvider {
override def canProcess(params: java.util.Map[String, _ <: java.io.Serializable]): Boolean =
KuduDataStoreFactory.canProcess(params)
override def rdd(conf: Configuration, sc: SparkContext, params: Map[String, String], query: Query): SpatialRDD = {
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
val ds = DataStoreConnector[KuduDataStore](params)
// force loose bbox to be false
query.getHints.put(QueryHints.LOOSE_BBOX, false)
// get the query plan to set up the iterators, ranges, etc
lazy val sft = ds.getSchema(query.getTypeName)
lazy val transform = {
QueryPlanner.setQueryTransforms(sft, query)
query.getHints.getTransformSchema
}
if (ds == null || sft == null) {
SpatialRDD(sc.emptyRDD[SimpleFeature], transform.getOrElse(sft))
} else {
val jobConf = new JobConf(conf)
GeoMesaKuduInputFormat.configure(jobConf, params, query)
GeoMesaKuduInputFormat.addCredentials(jobConf, ds.client)
val rdd = sc.newAPIHadoopRDD(jobConf, classOf[GeoMesaKuduInputFormat],
classOf[NullWritable], classOf[SimpleFeature]).map(_._2)
SpatialRDD(rdd, transform.getOrElse(sft))
}
}
override def save(rdd: RDD[SimpleFeature], params: Map[String, String], typeName: String): Unit = {
val ds = DataStoreConnector[KuduDataStore](params)
require(ds.getSchema(typeName) != null,
"Feature type must exist before calling save. Call `createSchema` on the DataStore first.")
unsafeSave(rdd, params, typeName)
}
/**
* Writes this RDD to a GeoMesa table.
* The type must exist in the data store, and all of the features in the RDD must be of this type.
* This method assumes that the schema exists.
*
* @param rdd rdd
* @param params data store connection parameters
* @param typeName feature type name
*/
def unsafeSave(rdd: RDD[SimpleFeature], params: Map[String, String], typeName: String): Unit = {
rdd.foreachPartition { iter =>
val ds = DataStoreConnector[KuduDataStore](params)
WithClose(ds.getFeatureWriterAppend(typeName, Transaction.AUTO_COMMIT)) { writer =>
iter.foreach(FeatureUtils.write(writer, _, useProvidedFid = true))
}
}
}
}
|
aheyne/geomesa
|
geomesa-kudu/geomesa-kudu-spark/src/main/scala/org/locationtech/geomesa/kudu/spark/KuduSpatialRDDProvider.scala
|
Scala
|
apache-2.0
| 3,496
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen
import org.apache.flink.api.common.functions.Function
import org.apache.flink.cep.functions.PatternProcessFunction
import org.apache.flink.cep.pattern.conditions.{IterativeCondition, RichIterativeCondition}
import org.apache.flink.configuration.Configuration
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.data.{GenericRowData, RowData}
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.CodeGenUtils._
import org.apache.flink.table.planner.codegen.GenerateUtils.{generateNullLiteral, generateRowtimeAccess}
import org.apache.flink.table.planner.codegen.GeneratedExpression.{NEVER_NULL, NO_CODE}
import org.apache.flink.table.planner.codegen.Indenter.toISC
import org.apache.flink.table.planner.codegen.MatchCodeGenerator._
import org.apache.flink.table.planner.codegen.agg.AggsHandlerCodeGenerator
import org.apache.flink.table.planner.functions.sql.FlinkSqlOperatorTable._
import org.apache.flink.table.planner.plan.utils.AggregateUtil
import org.apache.flink.table.planner.plan.utils.MatchUtil.AggregationPatternVariableFinder
import org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore
import org.apache.flink.table.runtime.generated.GeneratedFunction
import org.apache.flink.table.runtime.operators.`match`.{IterativeConditionRunner, PatternProcessFunctionRunner}
import org.apache.flink.table.types.logical.{RowType, TimestampKind, TimestampType}
import org.apache.flink.table.utils.EncodingUtils
import org.apache.flink.util.Collector
import org.apache.flink.util.MathUtils.checkedDownCast
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlAggFunction
import org.apache.calcite.tools.RelBuilder
import org.apache.calcite.util.ImmutableBitSet
import java.lang.{Long => JLong}
import java.util
import _root_.scala.collection.JavaConversions._
import _root_.scala.collection.JavaConverters._
import _root_.scala.collection.mutable
/**
* A code generator for generating CEP related functions.
*
* Aggregates are generated as follows:
* 1. all aggregate [[RexCall]]s are grouped by corresponding pattern variable
* 2. even if the same aggregation is used multiple times in an expression
* (e.g. SUM(A.price) > SUM(A.price) + 1) it will be calculated once. To do so [[AggBuilder]]
* keeps set of already seen different aggregation calls, and reuses the code to access
* appropriate field of aggregation result
* 3. after translating every expression (either in [[generateCondition]] or in
* [[generateOneRowPerMatchExpression]]) there will be generated code for
* - [[GeneratedFunction]], which will be an inner class
* - said [[GeneratedFunction]] will be instantiated in the ctor and opened/closed
* in corresponding methods of top level generated classes
* - function that transforms input rows (row by row) into aggregate input rows
* - function that calculates aggregates for variable, that uses the previous method
* The generated code will look similar to this:
*
* @param ctx the cotext of the code generator
* @param nullableInput input(s) can be null.
* @param patternNames sorted sequence of pattern variables
* @param currentPattern if generating condition the name of pattern, which the condition will
* be applied to
*/
class MatchCodeGenerator(
ctx: CodeGeneratorContext,
relBuilder: RelBuilder,
nullableInput: Boolean,
patternNames: Seq[String],
currentPattern: Option[String] = None,
collectorTerm: String = CodeGenUtils.DEFAULT_COLLECTOR_TERM)
extends ExprCodeGenerator(ctx, nullableInput) {
private case class GeneratedPatternList(resultTerm: String, code: String)
/**
* Used to assign unique names for list of events per pattern variable name. Those lists
* are treated as inputs and are needed by input access code.
*/
private val reusablePatternLists: mutable.HashMap[String, GeneratedPatternList] =
mutable.HashMap[String, GeneratedPatternList]()
/**
* Used to deduplicate aggregations calculation. The deduplication is performed by
* [[RexNode#toString]]. Those expressions needs to be accessible from splits, if such exists.
*/
private val reusableAggregationExpr = new mutable.HashMap[String, GeneratedExpression]()
/**
* Context information used by Pattern reference variable to index rows mapped to it.
* Indexes element at offset either from beginning or the end based on the value of first.
*/
private var offset: Int = 0
private var first : Boolean = false
/**
* Flags that tells if we generate expressions inside an aggregate. It tells how to access input
* row.
*/
private var isWithinAggExprState: Boolean = false
/**
* Used to collect all aggregates per pattern variable.
*/
private val aggregatesPerVariable = new mutable.HashMap[String, AggBuilder]
/**
* Name of term in function used to transform input row into aggregate input row.
*/
private val inputAggRowTerm = "inAgg"
/** Term for row for key extraction */
private val keyRowTerm = "keyRow"
/**
* @return term of pattern names
*/
private val patternNamesTerm = newName("patternNames")
private lazy val eventTypeTerm = boxedTypeTermForType(input1Type)
/**
* Sets the new reference variable indexing context. This should be used when resolving logical
* offsets = LAST/FIRST
*
* @param first true if indexing from the beginning, false otherwise
* @param offset offset from either beginning or the end
*/
private def updateOffsets(first: Boolean, offset: Int): Unit = {
this.first = first
this.offset = offset
}
/** Resets indexing context of Pattern variable. */
private def resetOffsets(): Unit = {
first = false
offset = 0
}
private def reusePatternLists(): String = {
reusablePatternLists.values.map(_.code).mkString("\\n")
}
private def addReusablePatternNames(): Unit = {
ctx.addReusableMember(s"private String[] $patternNamesTerm = new String[] { ${
patternNames.map(p => s""""${EncodingUtils.escapeJava(p)}"""").mkString(", ")
} };")
}
/**
* Generates a wrapper [[IterativeConditionRunner]] around code generated [[IterativeCondition]]
* for a single pattern definition defined in DEFINE clause.
*
* @param patternDefinition pattern definition as defined in DEFINE clause
* @return a code generated condition that can be used in constructing a
* [[org.apache.flink.cep.pattern.Pattern]]
*/
def generateIterativeCondition(patternDefinition: RexNode): IterativeCondition[RowData] = {
val condition = generateCondition(patternDefinition)
val body =
s"""
|${condition.code}
|return ${condition.resultTerm};
|""".stripMargin
val genCondition = generateMatchFunction(
"MatchRecognizeCondition",
classOf[RichIterativeCondition[RowData]],
body)
new IterativeConditionRunner(genCondition)
}
/**
* Generates a wrapper [[PatternProcessFunctionRunner]] around code generated
* [[PatternProcessFunction]] that transform found matches into expected output as defined
* in the MEASURES. It also accounts for fields used in PARTITION BY.
*
* @param returnType the row type of output row
* @param partitionKeys keys used for partitioning incoming data, they will be included in the
* output
* @param measures definitions from MEASURE clause
* @return a process function that can be applied to [[org.apache.flink.cep.PatternStream]]
*/
def generateOneRowPerMatchExpression(
returnType: RowType,
partitionKeys: ImmutableBitSet,
measures: util.Map[String, RexNode])
: PatternProcessFunctionRunner = {
val resultExpression = generateOneRowPerMatchExpression(
partitionKeys,
measures,
returnType)
val body =
s"""
|${resultExpression.code}
|$collectorTerm.collect(${resultExpression.resultTerm});
|""".stripMargin
val genFunction = generateMatchFunction(
"MatchRecognizePatternProcessFunction",
classOf[PatternProcessFunction[RowData, RowData]],
body)
new PatternProcessFunctionRunner(genFunction)
}
/**
* Generates a [[org.apache.flink.api.common.functions.Function]] that can be passed to Java
* compiler.
*
* @param name Class name of the Function. Must not be unique but has to be a valid Java class
* identifier.
* @param clazz Flink Function to be generated.
* @param bodyCode code contents of the SAM (Single Abstract Method). Inputs, collector, or
* output record can be accessed via the given term methods.
* @tparam F Flink Function to be generated.
* @tparam T Return type of the Flink Function.
* @return instance of GeneratedFunction
*/
private def generateMatchFunction[F <: Function, T <: Any](
name: String,
clazz: Class[F],
bodyCode: String)
: GeneratedFunction[F] = {
val funcName = newName(name)
val collectorTypeTerm = classOf[Collector[Any]].getCanonicalName
val (functionClass, signature, inputStatements) =
if (clazz == classOf[RichIterativeCondition[_]]) {
val inputTypeTerm = boxedTypeTermForType(input1Type)
val baseClass = classOf[RichIterativeCondition[_]]
val contextType = classOf[IterativeCondition.Context[_]].getCanonicalName
(baseClass,
s"boolean filter(Object _in1, $contextType $contextTerm)",
List(s"$inputTypeTerm $input1Term = ($inputTypeTerm) _in1;"))
} else if (clazz == classOf[PatternProcessFunction[_, _]]) {
val baseClass = classOf[PatternProcessFunction[_, _]]
val inputTypeTerm =
s"java.util.Map<String, java.util.List<${boxedTypeTermForType(input1Type)}>>"
val contextTypeTerm = classOf[PatternProcessFunction.Context].getCanonicalName
(baseClass,
s"void processMatch($inputTypeTerm $input1Term, $contextTypeTerm $contextTerm, " +
s"$collectorTypeTerm $collectorTerm)",
List())
} else throw new CodeGenException("Unsupported Function.")
val funcCode =
j"""
public class $funcName extends ${functionClass.getCanonicalName} {
${ctx.reuseMemberCode()}
${ctx.reuseLocalVariableCode()}
public $funcName(Object[] references) throws Exception {
${ctx.reuseInitCode()}
}
@Override
public void open(${classOf[Configuration].getCanonicalName} parameters) throws Exception {
${ctx.reuseOpenCode()}
}
@Override
public $signature throws Exception {
${inputStatements.mkString("\\n")}
${reusePatternLists()}
${ctx.reusePerRecordCode()}
${ctx.reuseInputUnboxingCode()}
$bodyCode
}
@Override
public void close() throws Exception {
${ctx.reuseCloseCode()}
}
}
""".stripMargin
new GeneratedFunction[F](funcName, funcCode, ctx.references.toArray)
}
private def generateOneRowPerMatchExpression(
partitionKeys: ImmutableBitSet,
measures: java.util.Map[String, RexNode],
returnType: RowType): GeneratedExpression = {
// For "ONE ROW PER MATCH", the output columns include:
// 1) the partition columns;
// 2) the columns defined in the measures clause.
val resultExprs =
partitionKeys.toArray.map(generatePartitionKeyAccess) ++
returnType.getFieldNames
.filter(measures.containsKey(_))
.map { fieldName =>
generateExpression(measures.get(fieldName))
}
val resultCodeGenerator = new ExprCodeGenerator(ctx, nullableInput)
.bindInput(input1Type, inputTerm = input1Term)
val resultExpression = resultCodeGenerator.generateResultExpression(
resultExprs,
returnType,
classOf[GenericRowData])
aggregatesPerVariable.values.foreach(_.generateAggFunction())
resultExpression
}
private def generateCondition(call: RexNode): GeneratedExpression = {
val exp = call.accept(this)
aggregatesPerVariable.values.foreach(_.generateAggFunction())
exp
}
override def visitCall(call: RexCall): GeneratedExpression = {
call.getOperator match {
case PREV | NEXT =>
val countLiteral = call.getOperands.get(1).asInstanceOf[RexLiteral]
val count = checkedDownCast(countLiteral.getValueAs(classOf[JLong]))
if (count != 0) {
throw new TableException("Flink does not support physical offsets within partition.")
} else {
updateOffsets(first = false, 0)
val exp = call.getOperands.get(0).accept(this)
resetOffsets()
exp
}
case FIRST | LAST =>
val countLiteral = call.getOperands.get(1).asInstanceOf[RexLiteral]
val offset = checkedDownCast(countLiteral.getValueAs(classOf[JLong]))
updateOffsets(call.getOperator == FIRST, offset)
val patternExp = call.operands.get(0).accept(this)
resetOffsets()
patternExp
case FINAL => call.getOperands.get(0).accept(this)
case _: SqlAggFunction =>
val variable = call.accept(new AggregationPatternVariableFinder)
.getOrElse(throw new TableException("No pattern variable specified in aggregate"))
val matchAgg = aggregatesPerVariable.get(variable) match {
case Some(agg) => agg
case None =>
val agg = new AggBuilder(variable)
aggregatesPerVariable(variable) = agg
agg
}
matchAgg.generateDeduplicatedAggAccess(call)
case MATCH_PROCTIME =>
// attribute is proctime indicator.
// We use a null literal and generate a timestamp when we need it.
generateNullLiteral(
new TimestampType(true, TimestampKind.PROCTIME, 3),
ctx.nullCheck)
case MATCH_ROWTIME =>
generateRowtimeAccess(ctx, contextTerm)
case PROCTIME_MATERIALIZE =>
// override proctime materialize code generation
// because there is no timerService in PatternProcessFunction#Context
generateProctimeTimestamp()
case _ => super.visitCall(call)
}
}
private def generateProctimeTimestamp(): GeneratedExpression = {
val resultType = new TimestampType(3)
val resultTypeTerm = primitiveTypeTermForType(resultType)
val resultTerm = ctx.addReusableLocalVariable(resultTypeTerm, "result")
val resultCode =
s"""
|$resultTerm = $TIMESTAMP_DATA.fromEpochMillis($contextTerm.currentProcessingTime());
|""".stripMargin.trim
// the proctime has been materialized, so it's TIMESTAMP now, not PROCTIME_INDICATOR
GeneratedExpression(resultTerm, NEVER_NULL, resultCode, resultType)
}
/**
* Extracts partition keys from any element of the match
*
* @param partitionKeyIdx partition key index
* @return generated code for the given key
*/
private def generatePartitionKeyAccess(partitionKeyIdx: Int): GeneratedExpression = {
val keyRow = generateKeyRow()
GenerateUtils.generateFieldAccess(
ctx,
keyRow.resultType,
keyRow.resultTerm,
partitionKeyIdx
)
}
private def generateKeyRow(): GeneratedExpression = {
val exp = ctx.getReusableInputUnboxingExprs(keyRowTerm, 0) match {
case Some(expr) =>
expr
case None =>
val nullTerm = newName("isNull")
ctx.addReusableMember(s"$eventTypeTerm $keyRowTerm;")
val keyCode =
j"""
|boolean $nullTerm = true;
|for (java.util.Map.Entry entry : $input1Term.entrySet()) {
| java.util.List value = (java.util.List) entry.getValue();
| if (value != null && value.size() > 0) {
| $keyRowTerm = ($eventTypeTerm) value.get(0);
| $nullTerm = false;
| break;
| }
|}
|""".stripMargin
val exp = GeneratedExpression(keyRowTerm, nullTerm, keyCode, input1Type)
ctx.addReusableInputUnboxingExprs(keyRowTerm, 0, exp)
exp
}
exp.copy(code = NO_CODE)
}
override def visitPatternFieldRef(fieldRef: RexPatternFieldRef): GeneratedExpression = {
if (isWithinAggExprState) {
GenerateUtils.generateFieldAccess(ctx, input1Type, inputAggRowTerm, fieldRef.getIndex)
} else {
if (fieldRef.getAlpha.equals(ALL_PATTERN_VARIABLE) &&
currentPattern.isDefined && offset == 0 && !first) {
GenerateUtils.generateInputAccess(
ctx, input1Type, input1Term, fieldRef.getIndex, nullableInput)
} else {
generatePatternFieldRef(fieldRef)
}
}
}
private def generateDefinePatternVariableExp(
patternName: String,
currentPattern: String)
: GeneratedPatternList = {
val Seq(listName, eventNameTerm) = newNames("patternEvents", "event")
ctx.addReusableMember(s"java.util.List $listName;")
val addCurrent = if (currentPattern == patternName || patternName == ALL_PATTERN_VARIABLE) {
j"""
|$listName.add($input1Term);
|""".stripMargin
} else {
""
}
val listCode = if (patternName == ALL_PATTERN_VARIABLE) {
addReusablePatternNames()
val patternTerm = newName("pattern")
j"""
|$listName = new java.util.ArrayList();
|for (String $patternTerm : $patternNamesTerm) {
| for ($eventTypeTerm $eventNameTerm :
| $contextTerm.getEventsForPattern($patternTerm)) {
| $listName.add($eventNameTerm);
| }
|}
|""".stripMargin
} else {
val escapedPatternName = EncodingUtils.escapeJava(patternName)
j"""
|$listName = new java.util.ArrayList();
|for ($eventTypeTerm $eventNameTerm :
| $contextTerm.getEventsForPattern("$escapedPatternName")) {
| $listName.add($eventNameTerm);
|}
|""".stripMargin
}
val code =
j"""
|$listCode
|$addCurrent
|""".stripMargin
GeneratedPatternList(listName, code)
}
private def generateMeasurePatternVariableExp(patternName: String): GeneratedPatternList = {
val Seq(listName, patternTerm) = newNames("patternEvents", "pattern")
ctx.addReusableMember(s"java.util.List $listName;")
val code = if (patternName == ALL_PATTERN_VARIABLE) {
addReusablePatternNames()
j"""
|$listName = new java.util.ArrayList();
|for (String $patternTerm : $patternNamesTerm) {
| java.util.List rows = (java.util.List) $input1Term.get($patternTerm);
| if (rows != null) {
| $listName.addAll(rows);
| }
|}
|""".stripMargin
} else {
val escapedPatternName = EncodingUtils.escapeJava(patternName)
j"""
|$listName = (java.util.List) $input1Term.get("$escapedPatternName");
|if ($listName == null) {
| $listName = java.util.Collections.emptyList();
|}
|""".stripMargin
}
GeneratedPatternList(listName, code)
}
private def findEventByLogicalPosition(patternFieldAlpha: String): GeneratedExpression = {
val Seq(rowNameTerm, isRowNull) = newNames("row", "isRowNull")
val listName = findEventsByPatternName(patternFieldAlpha).resultTerm
val resultIndex = if (first) {
j"""$offset"""
} else {
j"""$listName.size() - $offset - 1"""
}
val funcCode =
j"""
|$eventTypeTerm $rowNameTerm = null;
|boolean $isRowNull = true;
|if ($listName.size() > $offset) {
| $rowNameTerm = (($eventTypeTerm) $listName.get($resultIndex));
| $isRowNull = false;
|}
|""".stripMargin
GeneratedExpression(rowNameTerm, "", funcCode, input1Type)
}
private def findEventsByPatternName(patternFieldAlpha: String): GeneratedPatternList = {
reusablePatternLists.get(patternFieldAlpha) match {
case Some(expr) =>
expr
case None =>
val exp = currentPattern match {
case Some(p) => generateDefinePatternVariableExp(patternFieldAlpha, p)
case None => generateMeasurePatternVariableExp(patternFieldAlpha)
}
reusablePatternLists(patternFieldAlpha) = exp
exp
}
}
private def generatePatternFieldRef(fieldRef: RexPatternFieldRef): GeneratedExpression = {
val escapedAlpha = EncodingUtils.escapeJava(fieldRef.getAlpha)
val patternVariableRef = ctx.getReusableInputUnboxingExprs(
s"$escapedAlpha#$first", offset) match {
case Some(expr) =>
expr
case None =>
val exp = findEventByLogicalPosition(fieldRef.getAlpha)
ctx.addReusableInputUnboxingExprs(s"$escapedAlpha#$first", offset, exp)
exp
}
GenerateUtils.generateNullableInputFieldAccess(
ctx,
patternVariableRef.resultType,
patternVariableRef.resultTerm,
fieldRef.getIndex)
}
class AggBuilder(variable: String) {
private val aggregates = new mutable.ListBuffer[RexCall]()
private val variableUID = newName("variable")
private val calculateAggFuncName = s"calculateAgg_$variableUID"
def generateDeduplicatedAggAccess(aggCall: RexCall): GeneratedExpression = {
reusableAggregationExpr.get(aggCall.toString) match {
case Some(expr) =>
expr
case None =>
val exp: GeneratedExpression = generateAggAccess(aggCall)
aggregates += aggCall
reusableAggregationExpr(aggCall.toString) = exp
ctx.addReusablePerRecordStatement(exp.code)
exp.copy(code = NO_CODE)
}
}
private def generateAggAccess(aggCall: RexCall): GeneratedExpression = {
val singleAggResultTerm = newName("result")
val singleAggNullTerm = newName("nullTerm")
val singleAggResultType = FlinkTypeFactory.toLogicalType(aggCall.`type`)
val primitiveSingleAggResultTypeTerm = primitiveTypeTermForType(singleAggResultType)
val boxedSingleAggResultTypeTerm = boxedTypeTermForType(singleAggResultType)
val allAggRowTerm = s"aggRow_$variableUID"
val rowsForVariableCode = findEventsByPatternName(variable)
val codeForAgg =
j"""
|$GENERIC_ROW $allAggRowTerm = $calculateAggFuncName(${rowsForVariableCode.resultTerm});
|""".stripMargin
ctx.addReusablePerRecordStatement(codeForAgg)
val defaultValue = primitiveDefaultValue(singleAggResultType)
val codeForSingleAgg = if (ctx.nullCheck) {
j"""
|boolean $singleAggNullTerm;
|$primitiveSingleAggResultTypeTerm $singleAggResultTerm;
|if ($allAggRowTerm.getField(${aggregates.size}) != null) {
| $singleAggResultTerm = ($boxedSingleAggResultTypeTerm) $allAggRowTerm
| .getField(${aggregates.size});
| $singleAggNullTerm = false;
|} else {
| $singleAggNullTerm = true;
| $singleAggResultTerm = $defaultValue;
|}
|""".stripMargin
} else {
j"""
|$primitiveSingleAggResultTypeTerm $singleAggResultTerm =
| ($boxedSingleAggResultTypeTerm) $allAggRowTerm.getField(${aggregates.size});
|""".stripMargin
}
ctx.addReusablePerRecordStatement(codeForSingleAgg)
GeneratedExpression(singleAggResultTerm, singleAggNullTerm, NO_CODE, singleAggResultType)
}
def generateAggFunction(): Unit = {
val matchAgg = extractAggregatesAndExpressions
val aggCalls = matchAgg.aggregations.map(a => AggregateCall.create(
a.sqlAggFunction,
false,
false,
a.exprIndices,
-1,
a.resultType,
a.sqlAggFunction.getName))
val needRetraction = matchAgg.aggregations.map(_ => false).toArray
val typeFactory = relBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val inputRelType = typeFactory.createStructType(
matchAgg.inputExprs.map(_.getType),
matchAgg.inputExprs.indices.map(i => s"TMP$i"))
val aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(
aggCalls,
inputRelType,
needRetraction,
needInputCount = false,
isStateBackendDataViews = false,
needDistinctInfo = false)
val inputFieldTypes = matchAgg.inputExprs
.map(expr => FlinkTypeFactory.toLogicalType(expr.getType))
val aggsHandlerCodeGenerator = new AggsHandlerCodeGenerator(
CodeGeneratorContext(new TableConfig),
relBuilder,
inputFieldTypes,
copyInputField = false).needAccumulate()
val generatedAggsHandler = aggsHandlerCodeGenerator.generateAggsHandler(
s"AggFunction_$variableUID",
aggInfoList)
val generatedTerm = ctx.addReusableObject(generatedAggsHandler, "generatedAggHandler")
val aggsHandlerTerm = s"aggregator_$variableUID"
val declareCode = s"private $AGGS_HANDLER_FUNCTION $aggsHandlerTerm;"
val initCode = s"$aggsHandlerTerm = ($AGGS_HANDLER_FUNCTION) " +
s"$generatedTerm.newInstance($CURRENT_CLASS_LOADER);"
ctx.addReusableMember(declareCode)
ctx.addReusableInitStatement(initCode)
val transformFuncName = s"transformRowForAgg_$variableUID"
val inputTransform: String = generateAggInputExprEvaluation(
matchAgg.inputExprs,
transformFuncName)
generateAggCalculation(aggsHandlerTerm, transformFuncName, inputTransform)
}
private def extractAggregatesAndExpressions: MatchAgg = {
val inputRows = new mutable.LinkedHashMap[String, (RexNode, Int)]
val singleAggregates = aggregates.map { aggCall =>
val callsWithIndices = aggCall.operands.asScala.map(innerCall => {
inputRows.get(innerCall.toString) match {
case Some(x) =>
x
case None =>
val callWithIndex = (innerCall, inputRows.size)
inputRows(innerCall.toString) = callWithIndex
callWithIndex
}
})
SingleAggCall(
aggCall.getOperator.asInstanceOf[SqlAggFunction],
aggCall.`type`,
callsWithIndices.map(callsWithIndice => Integer.valueOf(callsWithIndice._2)))
}
MatchAgg(singleAggregates, inputRows.values.map(_._1).toSeq)
}
private def generateAggCalculation(
aggsHandlerTerm: String,
transformFuncName: String,
inputTransformFunc: String): Unit = {
val code =
j"""
|$inputTransformFunc
|
|private $GENERIC_ROW $calculateAggFuncName(java.util.List input)
| throws Exception {
| $aggsHandlerTerm.setAccumulators($aggsHandlerTerm.createAccumulators());
| for ($ROW_DATA row : input) {
| $aggsHandlerTerm.accumulate($transformFuncName(row));
| }
| $GENERIC_ROW result = ($GENERIC_ROW) $aggsHandlerTerm.getValue();
| return result;
|}
|""".stripMargin
ctx.addReusableMember(code)
ctx.addReusableOpenStatement(
s"$aggsHandlerTerm.open(new $AGGS_HANDLER_CONTEXT(getRuntimeContext()));")
ctx.addReusableCloseStatement(s"$aggsHandlerTerm.close();")
}
private def generateAggInputExprEvaluation(
inputExprs: Seq[RexNode],
funcName: String): String = {
isWithinAggExprState = true
val resultTerm = newName("result")
val exprs = inputExprs.zipWithIndex.map {
case (inputExpr, outputIndex) =>
val expr = generateExpression(inputExpr)
s"""
| ${expr.code}
| if (${expr.nullTerm}) {
| $resultTerm.setField($outputIndex, null);
| } else {
| $resultTerm.setField($outputIndex, ${expr.resultTerm});
| }
""".stripMargin
}.mkString("\\n")
isWithinAggExprState = false
j"""
|private $GENERIC_ROW $funcName($ROW_DATA $inputAggRowTerm) {
| $GENERIC_ROW $resultTerm = new $GENERIC_ROW(${inputExprs.size});
| $exprs
| return $resultTerm;
|}
|""".stripMargin
}
private case class SingleAggCall(
sqlAggFunction: SqlAggFunction,
resultType: RelDataType,
exprIndices: Seq[Integer]
)
private case class MatchAgg(
aggregations: Seq[SingleAggCall],
inputExprs: Seq[RexNode]
)
}
}
object MatchCodeGenerator {
val ALL_PATTERN_VARIABLE = "*"
val AGGS_HANDLER_CONTEXT: String = className[PerKeyStateDataViewStore]
val CURRENT_CLASS_LOADER = "Thread.currentThread().getContextClassLoader()"
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/MatchCodeGenerator.scala
|
Scala
|
apache-2.0
| 29,916
|
def factors(n: Long): List[Long] = (2 to math.sqrt(n).toInt)
.find(n % _ == 0).fold(List(n))(i => i.toLong :: factors(n / i))
val r = factors(600851475143L).last
assert(r == 6857) // 1 ms
|
pavelfatin/projecteuler
|
p03.scala
|
Scala
|
gpl-3.0
| 192
|
package performance
import java.util.concurrent.{Callable, Executors}
import com.hadoop.compression.lzo.LzopCodec
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkContext, SparkConf}
/**
* 从一个Hive表中查询数据,
* 1、单线程方式将记录进行两次save操作
* 2、多线程同时进行两次save操作
* 进行对比
* Created by jgh on 2017/8/30 030.
*/
object JobWithMultiThread {
val hdfsPath = "hdfs://ns1/work/bw/tech/test"
def main (args: Array[String]) {
val conf = new SparkConf().setAppName("MultiJobWithThread")
val sc = new SparkContext(conf)
val hiveContext = new HiveContext(sc)
val df = getInfo(hiveContext)
//没有多线程处理的情况,连续执行两个Action操作,生成两个Job
val t1 = System.currentTimeMillis()
df.rdd.saveAsTextFile(hdfsPath + "testfile1", classOf[LzopCodec])
df.rdd.saveAsTextFile(hdfsPath + "testfile2", classOf[LzopCodec])
val t2 = System.currentTimeMillis()
println("没有多线程处理两个不相关Job的情况耗时:" + (t2-t1))
//用Executor实现多线程方式处理Job
val executorService = Executors.newFixedThreadPool(2)//线程池两个线程
executorService.submit(new Callable[Unit](){
@Override
def call() : Unit = {
df.rdd.saveAsTextFile(hdfsPath + "testfile3", classOf[LzopCodec])
}
})
executorService.submit(new Callable[Unit](){
@Override
def call() : Unit = {
df.rdd.saveAsTextFile(hdfsPath + "testfile4", classOf[LzopCodec])
}
})
}
def getInfo(hiveContext : HiveContext) : DataFrame = {
val sql = "select * from common.dict_hotel_ol"
hiveContext.sql(sql)
}
}
|
chocolateBlack/LearningSpark
|
src/main/scala/performance/JobWithMultiThread.scala
|
Scala
|
mit
| 1,784
|
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.ast
import laika.config.Config.ConfigResult
import laika.config.{ASTValue, ConfigError, ConfigValue, InvalidType, Key, SimpleConfigValue}
import laika.parse.{GeneratedSource, SourceFragment}
import laika.rewrite.ReferenceResolver.CursorKeys
import laika.rewrite.TemplateRewriter
/** Represents a placeholder inline element that needs
* to be resolved in a rewrite step.
* Useful for elements that need access to the
* document, structure, title or configuration before
* being fully resolved.
*/
trait SpanResolver extends Span with Unresolved {
def resolve (cursor: DocumentCursor): Span
}
/** Represents a placeholder block element that needs
* to be resolved in a rewrite step.
* Useful for elements that need access to the
* document, structure, title or configuration before
* being fully resolved.
*/
trait BlockResolver extends Block with Unresolved {
def resolve (cursor: DocumentCursor): Block
}
/** Represents a reference to a value from the context
* of the current document. The `ref` attribute
* is a simple path expression in dot notation
* for accessing properties of an object (or keys
* of a Map).
*
* The root elements accessible to such a reference are:
*
* - `document`: the current document with all of its public properties
* - `parent`: the parent tree of the current document
* - `root`: the root tree
* - `config`: all configuration values for the current document,
* including those inherited from parent trees
*/
abstract class ContextReference[T <: Span] (ref: Key, source: SourceFragment) extends SpanResolver {
def result (value: ConfigResult[Option[ConfigValue]]): T
def resolve (cursor: DocumentCursor): Span = {
cursor.resolveReference(ref) match {
case Right(Some(ASTValue(element: Element))) => result(Right(Some(ASTValue(TemplateRewriter.rewriteRules(cursor).rewriteElement(element)))))
case other => result(other)
}
}
protected def missing: InvalidSpan = InvalidSpan(s"Missing required reference: '$ref'", source)
protected def invalid(cError: ConfigError): InvalidSpan =
InvalidSpan(s"Error resolving reference: '$ref': ${cError.message}", source)
protected def invalidType(value: ConfigValue): InvalidSpan = InvalidSpan(s"Error resolving reference: '$ref': " +
InvalidType("AST Element or Simple Value", value).message, source)
}
/** A context reference specifically for use in template documents.
*/
case class TemplateContextReference (ref: Key, required: Boolean, source: SourceFragment, options: Options = NoOpt)
extends ContextReference[TemplateSpan](ref, source) with TemplateSpan {
type Self = TemplateContextReference
def result (value: ConfigResult[Option[ConfigValue]]): TemplateSpan = value match {
case Right(Some(ASTValue(s: TemplateSpan))) => s
case Right(Some(ASTValue(RootElement(content,_)))) => EmbeddedRoot(content)
case Right(Some(ASTValue(e: Element))) => TemplateElement(e)
case Right(Some(simple: SimpleConfigValue)) => TemplateString(simple.render)
case Right(None) if !required => TemplateString("")
case Right(None) => TemplateElement(missing)
case Right(Some(unsupported)) => TemplateElement(invalidType(unsupported))
case Left(configError) => TemplateElement(invalid(configError))
}
def withOptions (options: Options): TemplateContextReference = copy(options = options)
lazy val unresolvedMessage: String = s"Unresolved template context reference with key '${ref.toString}'"
}
/** A context reference specifically for use in markup documents.
*/
case class MarkupContextReference (ref: Key, required: Boolean, source: SourceFragment, options: Options = NoOpt) extends ContextReference[Span](ref, source) {
type Self = MarkupContextReference
def result (value: ConfigResult[Option[ConfigValue]]): Span = value match {
case Right(Some(ASTValue(s: Span))) => s
case Right(Some(ASTValue(e: Element))) => TemplateElement(e)
case Right(Some(simple: SimpleConfigValue)) => Text(simple.render)
case Right(None) if !required => Text("")
case Right(None) => missing
case Right(Some(unsupported)) => invalidType(unsupported)
case Left(configError) => invalid(configError)
}
def withOptions (options: Options): MarkupContextReference = copy(options = options)
lazy val unresolvedMessage: String = s"Unresolved markup context reference with key '${ref.toString}'"
}
/** The base type for all inline elements that
* can be found in a template.
*/
trait TemplateSpan extends Span
/** A container of other TemplateSpan elements.
*/
trait TemplateSpanContainer extends ElementContainer[TemplateSpan] with RewritableContainer {
type Self <: TemplateSpanContainer
/** Rewrites all template span children of this container based on the specified rules.
*
* Concrete types are expected to support rewriting at least for all standard block, span and template span
* elements they contain, plus optionally for any other elements that have custom support for rewriting.
*/
def rewriteTemplateSpans (rules: RewriteRule[TemplateSpan]): Self = rewriteChildren(RewriteRules(templateRules = Seq(rules)))
def rewriteChildren (rules: RewriteRules): Self = withContent(rules.rewriteTemplateSpans(content))
/** Creates a copy of this instance with the specified new content.
*
* Implementation note: This method exists to deal with the fact that there is no polymorphic copy method
* and trades a small bit of boilerplate for avoiding the compile time hit of using shapeless for this.
*/
def withContent (newContent: Seq[TemplateSpan]): Self
}
/** Common methods for simple template span containers (without additional parameters). */
trait TemplateSpanContainerCompanion {
type ContainerType
/** Creates an empty instance */
def empty: ContainerType = createSpanContainer(Nil)
/** Create an instance only containing a single TemplateString span */
def apply(text: String, texts: String*): ContainerType = createSpanContainer((text +: texts).map(TemplateString(_)))
/** Create an instance containing a one or more spans */
def apply(span: TemplateSpan, spans: TemplateSpan*): ContainerType = createSpanContainer(span +: spans.toList)
protected def createSpanContainer (spans: Seq[TemplateSpan]): ContainerType
}
/** Wraps a generic element that otherwise could not be placed directly into
* a template document tree. Useful when custom tags which are placed inside
* a template produce non-template tree elements.
*/
case class TemplateElement (element: Element, indent: Int = 0, options: Options = NoOpt) extends TemplateSpan with ElementTraversal
with RewritableContainer {
type Self = TemplateElement
def rewriteChildren (rules: RewriteRules): TemplateElement = copy(element = rules.rewriteElement(element))
def withOptions (options: Options): TemplateElement = copy(options = options)
}
/** A generic container element containing a list of template spans. Can be used where a sequence
* of spans must be inserted in a place where a single element is required by the API.
* Usually renderers do not treat the container as a special element and render its children
* as s sub flow of the parent container.
*/
case class TemplateSpanSequence (content: Seq[TemplateSpan], options: Options = NoOpt) extends TemplateSpan with TemplateSpanContainer {
type Self = TemplateSpanSequence
def withContent (newContent: Seq[TemplateSpan]): TemplateSpanSequence = copy(content = newContent)
def withOptions (options: Options): TemplateSpanSequence = copy(options = options)
}
object TemplateSpanSequence extends TemplateSpanContainerCompanion {
type ContainerType = TemplateSpanSequence
protected def createSpanContainer (spans: Seq[TemplateSpan]): ContainerType = TemplateSpanSequence(spans)
}
/** A simple string element, representing the parts of a template
* that are not detected as special markup constructs and treated as raw text.
*/
case class TemplateString (content: String, options: Options = NoOpt) extends TemplateSpan with TextContainer {
type Self = TemplateString
def withOptions (options: Options): TemplateString = copy(options = options)
}
/** The root element of a template document tree.
*/
case class TemplateRoot (content: Seq[TemplateSpan], options: Options = NoOpt) extends Block with TemplateSpanContainer {
type Self = TemplateRoot
def withContent (newContent: Seq[TemplateSpan]): TemplateRoot = copy(content = newContent)
def withOptions (options: Options): TemplateRoot = copy(options = options)
}
/** Companion with a fallback instance for setups without a default template */
object TemplateRoot extends TemplateSpanContainerCompanion {
type ContainerType = TemplateRoot
protected def createSpanContainer (spans: Seq[TemplateSpan]): ContainerType = TemplateRoot(spans)
/** A fallback instance that can be used when no user-specified template
* is available. It simply inserts the content of the parsed markup document
* without any surrounding decoration. */
val fallback: TemplateRoot = TemplateRoot(TemplateContextReference(CursorKeys.documentContent, required = true, GeneratedSource))
}
/** The root element of a document tree (originating from text markup) inside a template.
* Usually created by a template reference like `\\${cursor.currentDocument.content}`.
*/
case class EmbeddedRoot (content: Seq[Block], indent: Int = 0, options: Options = NoOpt) extends TemplateSpan with BlockContainer {
type Self = EmbeddedRoot
def withContent (newContent: Seq[Block]): EmbeddedRoot = copy(content = content)
def withOptions (options: Options): EmbeddedRoot = copy(options = options)
}
object EmbeddedRoot extends BlockContainerCompanion {
type ContainerType = EmbeddedRoot
override protected def createBlockContainer (blocks: Seq[Block]) = EmbeddedRoot(blocks)
}
|
planet42/Laika
|
core/shared/src/main/scala/laika/ast/templates.scala
|
Scala
|
apache-2.0
| 10,866
|
package org.jetbrains.plugins.scala
package editor.smartEnter.fixers
import com.intellij.psi._
import com.intellij.openapi.editor.{Editor, Document}
import editor.smartEnter.ScalaSmartEnterProcessor
import lang.psi.api.expr.{ScExpression, ScIfStmt}
import util.PsiTreeUtil
/**
* @author Ksenia.Sautina
* @since 1/28/13
*/
@SuppressWarnings(Array("HardCodedStringLiteral"))
class ScalaIfConditionFixer extends ScalaFixer {
def apply(editor: Editor, processor: ScalaSmartEnterProcessor, psiElement: PsiElement) {
val ifStatement = PsiTreeUtil.getParentOfType(psiElement, classOf[ScIfStmt], false)
if (ifStatement == null) return
val doc: Document = editor.getDocument
val leftParenthesis = ifStatement.getLeftParenthesis.getOrElse(null)
val rightParenthesis = ifStatement.getRightParenthesis.getOrElse(null)
val condition = ifStatement.condition.getOrElse(null)
if (condition == null) {
if (leftParenthesis == null && rightParenthesis == null) {
var stopOffset: Int = doc.getLineEndOffset(doc.getLineNumber(ifStatement.getTextRange.getStartOffset))
val then: ScExpression = ifStatement.thenBranch.getOrElse(null)
if (then != null) {
stopOffset = Math.min(stopOffset, then.getTextRange.getStartOffset)
}
doc.replaceString(ifStatement.getTextRange.getStartOffset, stopOffset, "if () {\\n}")
processor.registerUnresolvedError(ifStatement.getTextRange.getStartOffset + "if (".length)
}
else if (leftParenthesis != null && rightParenthesis == null) {
doc.insertString(ifStatement.getTextRange.getEndOffset, ") {\\n}")
processor.registerUnresolvedError(leftParenthesis.getTextRange.getEndOffset)
} else {
processor.registerUnresolvedError(leftParenthesis.getTextRange.getEndOffset)
}
} else if (rightParenthesis == null) {
doc.insertString(condition.getTextRange.getEndOffset, ")")
}
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/editor/smartEnter/fixers/ScalaIfConditionFixer.scala
|
Scala
|
apache-2.0
| 1,943
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.