code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package net.vanfleteren.objectvalidation.play
import play.api.data.mapping.{Rule, RuleLike, Path}
import shapeless._
import ops.record.{ Selector => RSelector, Updater }
import record.{ FieldType }
trait Get[I, O] {
outer =>
val path: Path
val lens: Lens[I, O]
def read(sub: => RuleLike[O, O]): Rule[I, I] = Rule { i =>
Rule.toRule(sub).repath(path ++ _)
.fmap(_ => i)
.validate(lens.get(i))
}
// def read[I, O](implicit r: Path => RuleLike[I, O]): Rule[I, O] =
def \\[Out0 <: HList : lens.Gen, V](k: Witness)(implicit s: RSelector.Aux[Out0, k.T, V], u: Updater.Aux[Out0, FieldType[k.T, V], Out0]) =
new Get[I, V]{
val nodeName = k match {
case w: Witness.Aux[Symbol] => w.value.name
case _ => k.value.toString
}
val path = outer.path \\ nodeName
val lens = outer.lens >> k
}
def apply(f: Get[I, I] => Rule[I, O]): Rule[I, O] = Rule.toRule(f(Get[I]))
}
object Get {
def lens[I, O](l: Lens[I, O]): Get[I, O] = new Get[I, O] {
type Out = O
val path = Path
val lens = l
}
def apply[I]: Get[I, I] = lens(shapeless.lens.apply[I])
} | cvanfleteren/objectvalidation | src/main/scala/net/vanfleteren/objectvalidation/play/Get.scala | Scala | apache-2.0 | 1,125 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.solr
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class IsoDateSpec extends AnyFlatSpec with Matchers {
"Precision YEAR" should "format date leaving off everything but year" in {
IsoDate.format("2015-02-03T12:34:56.789", "YEAR") shouldBe "2015"
}
"Precision MONTH" should "format date leaving off day and further" in {
IsoDate.format("2015-02-03T12:34:56.789", "MONTH") shouldBe "2015-02"
}
"Precision DAY" should "format date leaving off hour and further" in {
IsoDate.format("2015-02-03T12:34:56.789", "DAY") shouldBe "2015-02-03"
}
"Precision HOUR" should "format date leaving off minute and further" in {
IsoDate.format("2015-02-03T12:34:56.789", "HOUR") shouldBe "2015-02-03T12"
}
"Precision MINUTE" should "format date leaving off second and further" in {
IsoDate.format("2015-02-03T12:34:56.789", "MINUTE") shouldBe "2015-02-03T12:34"
}
"Precision SECOND" should "format date leaving off millisecond" in {
IsoDate.format("2015-02-03T12:34:56.789", "SECOND") shouldBe "2015-02-03T12:34:56"
}
"Precision MILLISECOND" should "include everything" in {
// The time zone depends on where this test is executed, so it is not checked
IsoDate.format("2015-02-03T12:34:56.789", "MILLISECOND") should startWith("2015-02-03T12:34:56.789")
}
"Precision unspecified" should "default to millisecond" in {
// The time zone depends on where this test is executed, so it is not checked
IsoDate.format("2015-02-03T12:34:56.789", "") should startWith("2015-02-03T12:34:56.789")
}
}
| DANS-KNAW/easy-update-solr-index | lib/src/test/scala/nl.knaw.dans.easy.solr/IsoDateSpec.scala | Scala | apache-2.0 | 2,269 |
package handlers
import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import akka.pattern.pipe
import database.Characters
import org.mongodb.scala.Document
import gameobjects.GamePlayer
import handlers.client.Handler
import world.WorldUpdate
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* Created by franblas on 25/03/17.
*/
class GameClient(session: Int) extends Actor {
import akka.io.Tcp._
val handler: Handler = new Handler()
val worldUpdate = new WorldUpdate()
val sessionId: Int = session
var loginName: String = ""
var requestCounter: Int = 0
var theRef: ActorRef = _
var player: Option[GamePlayer] = Some(new GamePlayer(new Characters().documentToCharacter(Document())))
case class ProcessedMessage(ref: ActorRef, data: Array[Byte])
def sendPacket(f: Future[Array[Byte]]): Unit = {
f.map(data => ProcessedMessage(theRef, data)).pipeTo(self)
}
def receive = {
case Received(data) =>
val s = sender() // please do not remove or inline !
theRef = s
handler.handle(data.toArray, this)
.map(data => ProcessedMessage(s, data))
.pipeTo(self)
case ProcessedMessage(ref, data) =>
this.requestCounter += 1
ref ! Write(ByteString.apply(data))
case worldUpdate.NPC_UPDATE_KEYWORD => worldUpdate.updateNPCs(this)
case worldUpdate.OBJ_UPDATE_KEYWORD => worldUpdate.updateWorldObjects(this)
case PeerClosed => context stop self
}
}
| franblas/NAOC | src/main/scala/handlers/GameClient.scala | Scala | mit | 1,491 |
import ForthError.ForthError
object ForthError extends Enumeration {
type ForthError = Value
val DivisionByZero, StackUnderflow, InvalidWord, UnknownWord = Value
}
trait ForthEvaluatorState {
// TODO: Implement. return the current stack as Text with the element
// on top of the stack being the rightmost element in the output."
override def toString: String
}
abstract class Definition {
def evaluate(state: Either[ForthError, ForthEvaluatorState]): Either[ForthError, ForthEvaluatorState]
}
trait ForthEvaluator {
// TODO: Implement evaluation
def eval(text: String): Either[ForthError, ForthEvaluatorState]
} | exercism/xscala | exercises/practice/forth/src/main/scala/ForthEvaluator.scala | Scala | mit | 634 |
/**
* Created on: Jan 2, 2014
*/
package com.iteamsolutions.angular.services.scalate
import scalaz._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import com.iteamsolutions.angular.ProjectSpec
/**
* The '''DynamicContentGeneratorSpec''' type defines the behaviour expected of
* the [[com.iteamsolutions.angular.services.scalate.DynamicContentGenerator]]
* [[com.iteamsolutions.angular.services.DynamicContent]] implementation.
*
* @author svickers
*
*/
@RunWith (classOf[JUnitRunner])
class DynamicContentGeneratorSpec
extends ProjectSpec
{
describe ("A DynamicContentGenerator") {
describe ("Creating an instance") {
it ("expects a symbolic template name") {
Given ("a symbolic name");
val arbitraryName = "/some/template.scaml";
When ("an instance is created with it");
val generator = DynamicContentGenerator (arbitraryName);
Then ("the generator should know its name");
generator.name shouldBe "/some/template.scaml";
}
it ("can have multiple instances created") {
Given ("two different symbolic names");
val oneName = "name 1";
val anotherName = "name 2";
When ("generators are created");
val gen1 = DynamicContentGenerator (oneName);
val gen2 = DynamicContentGenerator (anotherName);
Then ("each should be different");
gen1 shouldNot be theSameInstanceAs gen2;
And ("should have different names");
gen1.name shouldNot equal (gen2.name);
}
}
describe ("Producing content") {
it ("fails gracefully when the template is not found") {
Given ("an unknown scaml template");
val generator = DynamicContentGenerator ("unknown.ssp");
When ("asked to generate content");
val content = generator ();
Then ("there should either be content or an error");
content shouldBe an [Throwable \/ String];
And ("this call should fail");
content shouldBe 'left;
}
it ("can generate content without any parameterization") {
Given ("a scaml template generator");
val generator = DynamicContentGenerator (
"/partials/no_bindings.scaml"
);
When ("asked to generate content based on the template");
val result = generator ();
Then ("this call should succeed");
result shouldBe 'right;
result foreach {
content =>
And ("should have visible content");
content shouldNot be ('empty);
}
}
it ("can provide the template with run-time parameters") {
Given ("a scaml template expecting a parameter");
val generator = DynamicContentGenerator (
"/partials/message_binding.scaml"
);
When ("asked to generate content based on the template");
val result = generator (Map ('message -> "Hello, world!"));
Then ("this call should succeed");
result shouldBe 'right;
result foreach {
content =>
And ("should have the message provided");
content should include ("Hello, world!");
}
}
}
}
}
| osxhacker/angular-codegen | src/test/scala/com/iteamsolutions/angular/services/scalate/DynamicContentGeneratorSpec.scala | Scala | bsd-2-clause | 3,954 |
package assets.mustache.forces
import uk.gov.gds.ier.transaction.forces.nationality.NationalityMustache
import uk.gov.gds.ier.test._
class NationalityTemplateTest
extends TemplateTestSuite
with NationalityMustache {
it should "properly render all properties from the model" in {
running(FakeApplication()) {
val data = NationalityModel(
question = Question(postUrl = "/whatever-url",
number = "1",
title = "nationality title"
),
nationality = FieldSet("nationalityClass"),
britishOption = Field(
id = "britishOptionId",
name = "britishOptionName",
attributes = "foo=\"foo\""
),
irishOption = Field(
id = "irishOptionId",
name = "irishOptionName",
attributes = "foo=\"foo\""
),
hasOtherCountryOption = Field(
id = "hasOtherCountryOptionId",
name = "hasOtherCountryOptionName",
attributes = "foo=\"foo\""
),
otherCountry = FieldSet("otherCountryClass"),
otherCountries0 = Field(
id = "otherCountries0Id",
name = "otherCountries0Name",
value = "otherCountries0Value",
classes = "otherCountries0Class"
),
otherCountries1 = Field(
id = "otherCountries1Id",
name = "otherCountries1Name",
value = "otherCountries1Value",
classes = "otherCountries1Class"
),
otherCountries2 = Field(
id = "otherCountries2Id",
name = "otherCountries2Name",
value = "otherCountries2Value",
classes = "otherCountries2Class"
),
noNationalityReason = Field (
id = "noNationalityReasonId",
name = "noNationalityReasonName",
value = "noNationalityReasonValue"
),
noNationalityReasonShowFlag = "noNationalityReasonShowFlag"
)
val html = Mustache.render("forces/nationality", data)
val doc = Jsoup.parse(html.toString)
val nationalityFieldSet = doc.select("fieldset").first()
nationalityFieldSet.attr("class") should include("nationalityClass")
val britishOptionInput = doc.select("input[id=britishOptionId]").first()
britishOptionInput.attr("id") should be("britishOptionId")
britishOptionInput.attr("name") should be("britishOptionName")
britishOptionInput.attr("foo") should be("foo")
val irishOptionInput = doc.select("input[id=irishOptionId]").first()
irishOptionInput.attr("id") should be("irishOptionId")
irishOptionInput.attr("name") should be("irishOptionName")
irishOptionInput.attr("foo") should be("foo")
val otherCountryValidation = doc.select("div").first()
otherCountryValidation.attr("class") should include("otherCountryClass")
val otherCountry0Label = doc.select("label[for=otherCountries0Id]").first()
otherCountry0Label.attr("for") should be("otherCountries0Id")
val otherCountry0Input = doc.select("input[id=otherCountries0Id]").first()
otherCountry0Input.attr("id") should be("otherCountries0Id")
otherCountry0Input.attr("name") should be("otherCountries0Name")
otherCountry0Input.attr("value") should be("otherCountries0Value")
otherCountry0Input.attr("class") should include("otherCountries0Class")
val otherCountry1Label = doc.select("label[for=otherCountries1Id]").first()
otherCountry1Label.attr("for") should be("otherCountries1Id")
val otherCountry1Input = doc.select("input[id=otherCountries1Id]").first()
otherCountry1Input.attr("id") should be("otherCountries1Id")
otherCountry1Input.attr("name") should be("otherCountries1Name")
otherCountry1Input.attr("value") should be("otherCountries1Value")
otherCountry1Input.attr("class") should include("otherCountries1Class")
val otherCountry2Label = doc.select("label[for=otherCountries2Id]").first()
otherCountry2Label.attr("for") should be("otherCountries2Id")
val otherCountry2Input = doc.select("input[id=otherCountries2Id]").first()
otherCountry2Input.attr("id") should be("otherCountries2Id")
otherCountry2Input.attr("name") should be("otherCountries2Name")
otherCountry2Input.attr("value") should be("otherCountries2Value")
otherCountry2Input.attr("class") should include("otherCountries2Class")
}
}
}
| michaeldfallen/ier-frontend | test/assets/mustache/forces/NationalityTemplateTest.scala | Scala | mit | 4,413 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java
import java.io.Closeable
import java.util
import java.util.{Map => JMap}
import scala.annotation.varargs
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.{InputFormat, JobConf}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.input.PortableDataStream
import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, NewHadoopRDD}
import org.apache.spark.resource.ResourceInformation
/**
* A Java-friendly version of [[org.apache.spark.SparkContext]] that returns
* [[org.apache.spark.api.java.JavaRDD]]s and works with Java collections instead of Scala ones.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
*/
class JavaSparkContext(val sc: SparkContext) extends Closeable {
/**
* Create a JavaSparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkContext())
/**
* @param conf a [[org.apache.spark.SparkConf]] object specifying Spark parameters
*/
def this(conf: SparkConf) = this(new SparkContext(conf))
/**
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
*/
def this(master: String, appName: String) = this(new SparkContext(master, appName))
/**
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(conf.setMaster(master).setAppName(appName))
/**
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param sparkHome The SPARK_HOME directory on the worker nodes
* @param jarFile JAR file to send to the cluster. This can be a path on the local file system
* or an HDFS, HTTP, HTTPS, or FTP URL.
*/
def this(master: String, appName: String, sparkHome: String, jarFile: String) =
this(new SparkContext(master, appName, sparkHome, Seq(jarFile)))
/**
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param sparkHome The SPARK_HOME directory on the worker nodes
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
def this(master: String, appName: String, sparkHome: String, jars: Array[String]) =
this(new SparkContext(master, appName, sparkHome, jars.toSeq))
/**
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param sparkHome The SPARK_HOME directory on the worker nodes
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes
*/
def this(master: String, appName: String, sparkHome: String, jars: Array[String],
environment: JMap[String, String]) =
this(new SparkContext(master, appName, sparkHome, jars.toSeq, environment.asScala))
private[spark] val env = sc.env
def statusTracker: JavaSparkStatusTracker = new JavaSparkStatusTracker(sc)
def isLocal: java.lang.Boolean = sc.isLocal
def sparkUser: String = sc.sparkUser
def master: String = sc.master
def appName: String = sc.appName
def resources: JMap[String, ResourceInformation] = sc.resources.asJava
def jars: util.List[String] = sc.jars.asJava
def startTime: java.lang.Long = sc.startTime
/** The version of Spark on which this application is running. */
def version: String = sc.version
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: java.lang.Integer = sc.defaultParallelism
/** Default min number of partitions for Hadoop RDDs when not given by user */
def defaultMinPartitions: java.lang.Integer = sc.defaultMinPartitions
/** Distribute a local Scala collection to form an RDD. */
def parallelize[T](list: java.util.List[T], numSlices: Int): JavaRDD[T] = {
implicit val ctag: ClassTag[T] = fakeClassTag
sc.parallelize(list.asScala.toSeq, numSlices)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T]: JavaRDD[T] = {
implicit val ctag: ClassTag[T] = fakeClassTag
JavaRDD.fromRDD(new EmptyRDD[T](sc))
}
/** Distribute a local Scala collection to form an RDD. */
def parallelize[T](list: java.util.List[T]): JavaRDD[T] =
parallelize(list, sc.defaultParallelism)
/** Distribute a local Scala collection to form an RDD. */
def parallelizePairs[K, V](list: java.util.List[Tuple2[K, V]], numSlices: Int)
: JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = fakeClassTag
implicit val ctagV: ClassTag[V] = fakeClassTag
JavaPairRDD.fromRDD(sc.parallelize(list.asScala.toSeq, numSlices))
}
/** Distribute a local Scala collection to form an RDD. */
def parallelizePairs[K, V](list: java.util.List[Tuple2[K, V]]): JavaPairRDD[K, V] =
parallelizePairs(list, sc.defaultParallelism)
/** Distribute a local Scala collection to form an RDD. */
def parallelizeDoubles(list: java.util.List[java.lang.Double], numSlices: Int): JavaDoubleRDD =
JavaDoubleRDD.fromRDD(sc.parallelize(list.asScala.map(_.doubleValue()).toSeq, numSlices))
/** Distribute a local Scala collection to form an RDD. */
def parallelizeDoubles(list: java.util.List[java.lang.Double]): JavaDoubleRDD =
parallelizeDoubles(list, sc.defaultParallelism)
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*/
def textFile(path: String): JavaRDD[String] = sc.textFile(path)
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*/
def textFile(path: String, minPartitions: Int): JavaRDD[String] =
sc.textFile(path, minPartitions)
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* {{{
* JavaPairRDD<String, String> rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")
* }}}
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
*
* @param minPartitions A suggestion value of the minimal splitting number for input data.
*/
def wholeTextFiles(path: String, minPartitions: Int): JavaPairRDD[String, String] =
new JavaPairRDD(sc.wholeTextFiles(path, minPartitions))
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* @see `wholeTextFiles(path: String, minPartitions: Int)`.
*/
def wholeTextFiles(path: String): JavaPairRDD[String, String] =
new JavaPairRDD(sc.wholeTextFiles(path))
/**
* Read a directory of binary files from HDFS, a local file system (available on all nodes),
* or any Hadoop-supported file system URI as a byte array. Each file is read as a single
* record and returned in a key-value pair, where the key is the path of each file,
* the value is the content of each file.
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* {{{
* JavaPairRDD<String, byte[]> rdd = sparkContext.dataStreamFiles("hdfs://a-hdfs-path")
* }}}
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files but may cause bad performance.
*
* @param minPartitions A suggestion value of the minimal splitting number for input data.
*/
def binaryFiles(path: String, minPartitions: Int): JavaPairRDD[String, PortableDataStream] =
new JavaPairRDD(sc.binaryFiles(path, minPartitions))
/**
* Read a directory of binary files from HDFS, a local file system (available on all nodes),
* or any Hadoop-supported file system URI as a byte array. Each file is read as a single
* record and returned in a key-value pair, where the key is the path of each file,
* the value is the content of each file.
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* {{{
* JavaPairRDD<String, byte[]> rdd = sparkContext.dataStreamFiles("hdfs://a-hdfs-path")
* }}},
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files but may cause bad performance.
*/
def binaryFiles(path: String): JavaPairRDD[String, PortableDataStream] =
new JavaPairRDD(sc.binaryFiles(path, defaultMinPartitions))
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @param path Directory to the input data files
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(path: String, recordLength: Int): JavaRDD[Array[Byte]] = {
new JavaRDD(sc.binaryRecords(path, recordLength))
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
new JavaPairRDD(sc.sequenceFile(path, keyClass, valueClass, minPartitions))
}
/**
* Get an RDD for a Hadoop SequenceFile.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V]):
JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
new JavaPairRDD(sc.sequenceFile(path, keyClass, valueClass))
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental storage
* format and may not be supported exactly as is in future Spark releases. It will also be pretty
* slow if you use the default serializer (Java serialization), though the nice thing about it is
* that there's very little effort required to save arbitrary objects.
*/
def objectFile[T](path: String, minPartitions: Int): JavaRDD[T] = {
implicit val ctag: ClassTag[T] = fakeClassTag
sc.objectFile(path, minPartitions)(ctag)
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental storage
* format and may not be supported exactly as is in future Spark releases. It will also be pretty
* slow if you use the default serializer (Java serialization), though the nice thing about it is
* that there's very little effort required to save arbitrary objects.
*/
def objectFile[T](path: String): JavaRDD[T] = {
implicit val ctag: ClassTag[T] = fakeClassTag
sc.objectFile(path)(ctag)
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf giving its InputFormat and any
* other necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable,
* etc).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass Class of the InputFormat
* @param keyClass Class of the keys
* @param valueClass Class of the values
* @param minPartitions Minimum number of Hadoop Splits to generate.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def hadoopRDD[K, V, F <: InputFormat[K, V]](
conf: JobConf,
inputFormatClass: Class[F],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
val rdd = sc.hadoopRDD(conf, inputFormatClass, keyClass, valueClass, minPartitions)
new JavaHadoopRDD(rdd.asInstanceOf[HadoopRDD[K, V]])
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf giving its InputFormat and any
* other necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable,
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass Class of the InputFormat
* @param keyClass Class of the keys
* @param valueClass Class of the values
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def hadoopRDD[K, V, F <: InputFormat[K, V]](
conf: JobConf,
inputFormatClass: Class[F],
keyClass: Class[K],
valueClass: Class[V]
): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
val rdd = sc.hadoopRDD(conf, inputFormatClass, keyClass, valueClass)
new JavaHadoopRDD(rdd.asInstanceOf[HadoopRDD[K, V]])
}
/**
* Get an RDD for a Hadoop file with an arbitrary InputFormat.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](
path: String,
inputFormatClass: Class[F],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
val rdd = sc.hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
new JavaHadoopRDD(rdd.asInstanceOf[HadoopRDD[K, V]])
}
/**
* Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](
path: String,
inputFormatClass: Class[F],
keyClass: Class[K],
valueClass: Class[V]
): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(keyClass)
implicit val ctagV: ClassTag[V] = ClassTag(valueClass)
val rdd = sc.hadoopFile(path, inputFormatClass, keyClass, valueClass)
new JavaHadoopRDD(rdd.asInstanceOf[HadoopRDD[K, V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(kClass)
implicit val ctagV: ClassTag[V] = ClassTag(vClass)
val rdd = sc.newAPIHadoopFile(path, fClass, kClass, vClass, conf)
new JavaNewHadoopRDD(rdd.asInstanceOf[NewHadoopRDD[K, V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass Class of the InputFormat
* @param kClass Class of the keys
* @param vClass Class of the values
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD will create many references to the same object.
* If you plan to directly cache Hadoop writable objects, you should first copy them using
* a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = ClassTag(kClass)
implicit val ctagV: ClassTag[V] = ClassTag(vClass)
val rdd = sc.newAPIHadoopRDD(conf, fClass, kClass, vClass)
new JavaNewHadoopRDD(rdd.asInstanceOf[NewHadoopRDD[K, V]])
}
/** Build the union of JavaRDDs. */
@varargs
def union[T](rdds: JavaRDD[T]*): JavaRDD[T] = {
require(rdds.nonEmpty, "Union called on no RDDs")
implicit val ctag: ClassTag[T] = rdds.head.classTag
sc.union(rdds.map(_.rdd))
}
/** Build the union of JavaPairRDDs. */
@varargs
def union[K, V](rdds: JavaPairRDD[K, V]*): JavaPairRDD[K, V] = {
require(rdds.nonEmpty, "Union called on no RDDs")
implicit val ctag: ClassTag[(K, V)] = rdds.head.classTag
implicit val ctagK: ClassTag[K] = rdds.head.kClassTag
implicit val ctagV: ClassTag[V] = rdds.head.vClassTag
new JavaPairRDD(sc.union(rdds.map(_.rdd)))
}
/** Build the union of JavaDoubleRDDs. */
@varargs
def union(rdds: JavaDoubleRDD*): JavaDoubleRDD = {
require(rdds.nonEmpty, "Union called on no RDDs")
new JavaDoubleRDD(sc.union(rdds.map(_.srdd)))
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*/
def broadcast[T](value: T): Broadcast[T] = sc.broadcast(value)(fakeClassTag)
/** Shut down the SparkContext. */
def stop(): Unit = {
sc.stop()
}
override def close(): Unit = stop()
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
def getSparkHome(): Optional[String] = JavaUtils.optionToOptional(sc.getSparkHome())
/**
* Add a file to be downloaded with this Spark job on every node.
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
sc.addFile(path)
}
/**
* Add a file to be downloaded with this Spark job on every node.
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* A directory can be given if the recursive option is set to true. Currently directories are only
* supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
sc.addFile(path, recursive)
}
/**
* Adds a JAR dependency for all tasks to be executed on this SparkContext in the future.
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
sc.addJar(path)
}
/**
* Returns the Hadoop configuration used for the Hadoop code (e.g. file systems) we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration(): Configuration = {
sc.hadoopConfiguration
}
/**
* Set the directory under which RDDs are going to be checkpointed. The directory must
* be an HDFS path if running on a cluster.
*/
def setCheckpointDir(dir: String): Unit = {
sc.setCheckpointDir(dir)
}
def getCheckpointDir: Optional[String] = JavaUtils.optionToOptional(sc.getCheckpointDir)
protected def checkpointFile[T](path: String): JavaRDD[T] = {
implicit val ctag: ClassTag[T] = fakeClassTag
new JavaRDD(sc.checkpointFile(path))
}
/**
* Return a copy of this JavaSparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = sc.getConf
/**
* Pass-through to SparkContext.setCallSite. For API support only.
*/
def setCallSite(site: String): Unit = {
sc.setCallSite(site)
}
/**
* Pass-through to SparkContext.setCallSite. For API support only.
*/
def clearCallSite(): Unit = {
sc.clearCallSite()
}
/**
* Set a local property that affects jobs submitted from this thread, and all child
* threads, such as the Spark fair scheduler pool.
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = sc.setLocalProperty(key, value)
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.api.java.JavaSparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String = sc.getLocalProperty(key)
/**
* Set a human readable description of the current job.
* @since 2.3.0
*/
def setJobDescription(value: String): Unit = sc.setJobDescription(value)
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
sc.setLogLevel(logLevel)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.api.java.JavaSparkContext.cancelJobGroup`
* to cancel all running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description");
* rdd.map(...).count();
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel");
* }}}
*
* If interruptOnCancel is set to true for the job group, then job cancellation will result
* in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure
* that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208,
* where HDFS may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean): Unit =
sc.setJobGroup(groupId, description, interruptOnCancel)
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* @see `setJobGroup(groupId: String, description: String, interruptThread: Boolean)`.
* This method sets interruptOnCancel to false.
*/
def setJobGroup(groupId: String, description: String): Unit = sc.setJobGroup(groupId, description)
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = sc.clearJobGroup()
/**
* Cancel active jobs for the specified group. See
* `org.apache.spark.api.java.JavaSparkContext.setJobGroup` for more information.
*/
def cancelJobGroup(groupId: String): Unit = sc.cancelJobGroup(groupId)
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = sc.cancelAllJobs()
/**
* Returns a Java map of JavaRDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: JMap[java.lang.Integer, JavaRDD[_]] = {
sc.getPersistentRDDs.mapValues(s => JavaRDD.fromRDD(s)).toMap
.asJava.asInstanceOf[JMap[java.lang.Integer, JavaRDD[_]]]
}
}
object JavaSparkContext {
implicit def fromSparkContext(sc: SparkContext): JavaSparkContext = new JavaSparkContext(sc)
implicit def toSparkContext(jsc: JavaSparkContext): SparkContext = jsc.sc
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*/
def jarOfClass(cls: Class[_]): Array[String] = SparkContext.jarOfClass(cls).toArray
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*/
def jarOfObject(obj: AnyRef): Array[String] = SparkContext.jarOfObject(obj).toArray
/**
* Produces a ClassTag[T], which is actually just a casted ClassTag[AnyRef].
*
* This method is used to keep ClassTags out of the external Java API, as the Java compiler
* cannot produce them automatically. While this ClassTag-faking does please the compiler,
* it can cause problems at runtime if the Scala API relies on ClassTags for correctness.
*
* Often, though, a ClassTag[AnyRef] will not lead to incorrect behavior, just worse performance
* or security issues. For instance, an Array[AnyRef] can hold any type T, but may lose primitive
* specialization.
*/
private[spark]
def fakeClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
}
| hvanhovell/spark | core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala | Scala | apache-2.0 | 31,439 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.javaapi
import java.nio.ByteBuffer
import kafka.cluster.BrokerEndPoint
class GroupCoordinatorResponse(private val underlying: kafka.api.GroupCoordinatorResponse) {
def error = underlying.error
def errorCode = error.code
def coordinator: BrokerEndPoint = {
import kafka.javaapi.Implicits._
underlying.coordinatorOpt
}
override def equals(obj: Any): Boolean = {
obj match {
case null => false
case other: GroupCoordinatorResponse => this.underlying.equals(other.underlying)
case _ => false
}
}
override def hashCode = underlying.hashCode
override def toString = underlying.toString
}
object GroupCoordinatorResponse {
def readFrom(buffer: ByteBuffer) = new GroupCoordinatorResponse(kafka.api.GroupCoordinatorResponse.readFrom(buffer))
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/javaapi/GroupCoordinatorResponse.scala | Scala | apache-2.0 | 1,614 |
package com.twitter.finagle.stats
import java.util.concurrent.ConcurrentHashMap
import java.util.function.Consumer
import scala.collection.compat._
import scala.util.matching.Regex
/**
* Caches the results of evaluating a given regex
*
* Caches whether the regex matches each of a list of strings. When a map is
* passed to the CachedRegex, it will strip out all of the strings that match
* the regex.
*/
private[stats] class CachedRegex(regex: Regex)
extends (collection.Map[String, Number] => collection.Map[String, Number]) {
// public for testing
// a mapping from a metric key to whether the regex matches it or not
// we allow this to race as long as we're confident that the result is correct
// since a given key will always compute the same value, this is safe to race
val regexMatchCache = new ConcurrentHashMap[String, java.lang.Boolean]
private[this] val filterFn: String => Boolean = { key =>
// we unfortunately need to rely on escape analysis to shave this allocation
// because otherwise java.util.Map#get that returns a null for a plausibly
// primitive type in scala gets casted to a null
//
// the alternative is to first check containsKey and then do a subsequent
// lookup, but that risks an actually potentially bad race condition
// (a potential false negative)
val cached = regexMatchCache.get(key)
val matched = if (cached ne null) {
Boolean.unbox(cached)
} else {
val result = regex.pattern.matcher(key).matches()
// adds new entries
regexMatchCache.put(key, result)
result
}
!matched
}
def apply(samples: collection.Map[String, Number]): collection.Map[String, Number] = {
// don't really want this to be executed in parallel
regexMatchCache.forEachKey(
Long.MaxValue,
new Consumer[String] {
def accept(key: String): Unit = {
if (!samples.contains(key)) {
regexMatchCache.remove(key)
}
}
})
samples.view.filterKeys(filterFn).toMap
}
}
| twitter/util | util-stats/src/main/scala/com/twitter/finagle/stats/CachedRegex.scala | Scala | apache-2.0 | 2,051 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Khalifeh Al-Jadda, John A. Miller, Hao Peng
* @version 1.2
* @date Mon Aug 15 13:13:15 EDT 2016
* @see LICENSE (MIT style license file).
*/
package scalation.analytics.classifier
import scala.math._
import scalation.linalgebra.{MatriI, VectoI, VectorD, VectorI}
import scalation.linalgebra.gen.{HMatrix4, HMatrix3}
import scalation.relalgebra.Relation
import scalation.util.{banner, time}
import BayesClassifier.me_default
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PGMHD3` class implements a three level Bayes Classifier for discrete input data.
* The classifier is trained using a data matrix 'x' and a classification vector 'y'.
* Each data vector in the matrix is classified into one of 'k' classes numbered
* 0, ..., k-1. Prior probabilities are calculated based on the population of
* each class in the training-set. Relative posterior probabilities are computed
* by multiplying these by values computed using conditional probabilities. The
* classifier is naive, because it assumes feature independence and therefore
* simply multiplies the conditional probabilities.
* -----------------------------------------------------------------------------
* [ x ] -> [ x z ] where x features are level 2 and z features are level 3.
* -----------------------------------------------------------------------------
* @param x the integer-valued data vectors stored as rows of a matrix
* @param nx the number of x features/columns
* @param y the class vector, where y(l) = class for row l of the matrix x, x(l)
* @param fn the names for all features/variables
* @param k the number of classes
* @param cn the names for all classes
* @param vc the value count (number of distinct values) for each feature
* @param me use m-estimates (me == 0 => regular MLE estimates)
*/
class PGMHD3 (x: MatriI, nx: Int, y: VectoI, fn: Array [String], k: Int, cn: Array [String],
private var vc: VectoI = null, me: Int = me_default)
extends BayesClassifier (x, y, fn, k, cn)
{
private val DEBUG = true // debug flag
private val nz = x.dim2 - nx // number of z features/columns
private val xrg = 0 until nx // range (column indices) of the X-features
private val zrg = nx until x.dim2 // range (column indices) of the Z-features
private val cor = calcCorrelation2 (zrg, xrg) // feature correlation matrix
private val parent = new VectorI (nz) // vector holding the parent for each Zfeature/variable
private val vcp = Array.ofDim [Int] (nz) // value count for the parent
private val popC = new VectorI (k) // frequency counts for classes 0, ..., k-1
private val probC = new VectorD (k) // probabilities for classes 0, ..., k-1
private val popX = new HMatrix3 [Int] (k, nx) // conditional frequency counts for variable/feature j
private val probX = new HMatrix3 [Double] (k, nx) // conditional probabilities for variable/feature j
private val popZ = new HMatrix4 [Int] (k, nz) // conditional frequency counts for variable/feature j
private val probZ = new HMatrix4 [Double] (k, nz) // conditional probabilities for variable/feature j
if (vc == null) {
shiftToZero; vc = vc_fromData // determine 'vc' from data
} // if
val vc_x = vc.slice (0, nx)().toArray
val vc_z = vc.slice (nx, n)().toArray
computeParent ()
computeVcp ()
popX.alloc (vc_x)
probX.alloc (vc_x)
popZ.alloc (vc_z, vcp)
probZ.alloc (vc_z, vcp)
if (DEBUG) {
println ("value count vc = " + vc)
println ("value count vc_x = " + vc_x.deep)
println ("value count vc_z = " + vc_z.deep)
println ("correlation matrix = " + cor)
println ("value count vcP = " + vcp.deep)
println ("Z's parent = " + parent)
} // if
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the parent of each feature based on the correlation matrix.
* z features can only select a parent from the x features.
*/
def computeParent ()
{
for (i <- 0 until nz) {
val correl = cor(i).map ((x: Double) => abs (x))
parent(i) = correl.argmax ()
} // for
} // computeParent
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the value counts of each parent feature based on the parent vector.
*/
def computeVcp ()
{
for (j <- 0 until nz) vcp(j) = vc_x(parent(j))
} // computeVcp
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Build a model.
* @param testStart starting index of test region (inclusive) used in cross-validation
* @param testEnd ending index of test region (exclusive) used in cross-validation
*/
def buildModel (testStart: Int, testEnd: Int): (Array [Boolean], DAG) =
{
(Array.fill (n)(true), new DAG (Array.ofDim [Int] (n, 0)))
} // buildModel
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the frequencies for 'y' having class 'i' and value 'x' for cases 0, 1, ...
* Only the test region from 'testStart' to 'testEnd' is skipped, the rest is
* training data.
* @param testStart starting index of test region (inclusive) used in cross-validation
* @param testEnd ending index of test region (exclusive) used in cross-validation
*/
private def frequencies (testStart: Int, testEnd: Int)
{
if (DEBUG) banner ("frequencies (testStart, testEnd)")
for (l <- 0 until m if l < testStart || l >= testEnd) {
// l = lth row of data matrix x
val i = y(l) // get the class
popC(i) += 1 // increment ith class
for (j <- 0 until n) {
if (j < nx) popX(i, j, x(l, j)) += 1 // increment ith class, jth feature, x value
else popZ(i, j-nx, x(l, j), x(l, parent(j-nx))) += 1 // increment ith class, jth feature, z value
} // for
} // for
if (DEBUG) {
println ("popC = " + popC) // #(C = i)
println ("popX = " + popX) // #(X_j = x & C = i)
println ("popZ = " + popZ) // #(Z_j = z & C = i)
} // if
} // frequencies
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Train the classifier by computing the probabilities for C, and the
* conditional probabilities for X_j.
* @param testStart starting index of test region (inclusive) used in cross-validation.
* @param testEnd ending index of test region (exclusive) used in cross-validation.
*/
def train (testStart: Int, testEnd: Int)
{
frequencies (testStart, testEnd) // compute frequencies skipping test region
if (DEBUG) banner ("train (testStart, testEnd)")
for (i <- 0 until k) {
// for each class i
val pci = popC(i).toDouble // population of class i
probC(i) = pci / md // probability of class i
for (j <- 0 until nx) { // for each feature j
val me_vc = me / vc_x(j).toDouble
for (xj <- 0 until vc_x(j)) { // for each value for feature j: xj
probX(i, j, xj) = (popX(i, j, xj) + me_vc) / (pci + me)
} // for
} // for
for (j <- 0 until nz) { // for each feature j
val me_vc = me / vc_z(j).toDouble
for (zj <- 0 until vc_z(j); zp <- 0 until vc_x(parent(j))) { // for each value of feature j: zj and each value of zj's parent: zp
probZ(i, j, zj, zp) = (popZ(i, j, zj, zp) + me_vc) / (pci + me)
} // for
} // for
} // for
if (DEBUG) {
println ("probC = " + probC) // P(C = i)
println ("probX = " + probX) // P(X_j = x | C = i)
println ("probZ = " + probZ) // P(Z_j = z | C = i)
} // if
} // train
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the frequencies for 'y' having class 'i' and value 'x' for cases 0, 1, ...
* Only the test region from 'testStart' to 'testEnd' is skipped, the rest is
* training data.
* @param itrain indices of the instances considered train data
*/
private def frequencies (itrain: Array [Int])
{
if (DEBUG) banner ("frequencies (itrain)")
for (l <- itrain) { // l = lth row of data matrix x
// l = lth row of data matrix x
val i = y(l) // get the class
popC(i) += 1 // increment ith class
for (j <- 0 until n) {
if (j < nx) popX(i, j, x(l, j)) += 1 // increment ith class, jth feature, x value
else popZ(i, j-nx, x(l, j), x(l, parent(j-nx))) += 1 // increment ith class, jth feature, z value
} // for
} // for
if (DEBUG) {
println ("popC = " + popC) // #(C = i)
println ("popX = " + popX) // #(X_j = x & C = i)
println ("popZ = " + popZ) // #(Z_j = z & C = i)
} // if
} // frequencies
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Train the classifier by computing the probabilities for C, and the
* conditional probabilities for X_j.
* @param itrain indices of the instances considered train data
*/
override def train (itrain: Array [Int])
{
frequencies (itrain) // compute frequencies skipping test region
if (DEBUG) banner ("train (itrain)")
for (i <- 0 until k) {
// for each class i
val pci = popC(i).toDouble // population of class i
probC(i) = pci / md // probability of class i
for (j <- 0 until nx) { // for each feature j
val me_vc = me / vc_x(j).toDouble
for (xj <- 0 until vc_x(j)) { // for each value for feature j: xj
probX(i, j, xj) = (popX(i, j, xj) + me_vc) / (pci + me)
} // for
} // for
for (j <- 0 until nz) { // for each feature j
val me_vc = me / vc_z(j).toDouble
for (zj <- 0 until vc_z(j); zp <- 0 until vc_x(parent(j))) { // for each value of feature j: zj and each value of zj's parent: zp
probZ(i, j, zj, zp) = (popZ(i, j, zj, zp) + me_vc) / (pci + me)
} // for
} // for
} // for
if (DEBUG) {
println ("probC = " + probC) // P(C = i)
println ("probX = " + probX) // P(X_j = x | C = i)
println ("probZ = " + probZ) // P(Z_j = z | C = i)
} // if
} // train
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a discrete data vector 'z', classify it returning the class number
* (0, ..., k-1) with the highest relative posterior probability.
* Return the best class, its name and its relative probability.
* @param z the data vector to classify
*/
def classify (z: VectoI): (Int, String, Double) =
{
if (DEBUG) banner ("classify (z)")
val prob = new VectorD (k)
for (i <- 0 until k) {
prob(i) = probC(i) // P(C = i)
for (j <- 0 until n) {
if (j < nx) prob(i) *= probX(i, j, z(j)) // P(X_j = z_j | C = i)
else prob(i) *= probZ(i, j-nx, z(j), z(parent(j-nx))) // P(Z_j = z_j | C = i )
} // for
//if (DEBUG) println ("prob = " + prob)
} // for
if (DEBUG) println ("prob = " + prob)
val best = prob.argmax () // class with the highest relative posterior probability
(best, cn(best), prob(best)) // return the best class, its name and its probability
} // classify
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reset or re-initialize all the population and probability vectors and
* hypermatrices to 0.
*/
def reset ()
{
popC.set (0)
probC.set (0)
popX.set (0)
probX.set (0)
popZ.clear()
probZ.clear()
popZ.alloc (vc_z, vcp)
probZ.alloc (vc_z, vcp)
} // reset
} // PGMHD3 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `PGMHD3` is the companion object for the `PGMHD3` class.
*/
object PGMHD3
{
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `PGMHD3` object, passing 'x' and 'y' together in one matrix.
* @param xy the data vectors along with their classifications stored as rows of a matrix
* @param nx the number of x features/columns
* @param fn the names of the features
* @param k the number of classes
* @param vc the value count (number of distinct values) for each feature
* @param me use m-estimates (me == 0 => regular MLE estimates)
*/
def apply (xy: MatriI, nx: Int, fn: Array [String], k: Int, cn: Array [String],
vc: VectoI = null, me: Int = me_default) =
{
new PGMHD3 (xy(0 until xy.dim1, 0 until xy.dim2 - 1), nx, xy.col(xy.dim2 - 1), fn, k, cn,
vc, me)
} // apply
} // PGMHD3 object
import scalation.linalgebra.MatrixI
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PGMHD3Test` object is used to test the `PGMHD3` class.
* Classify whether a car is more likely to be stolen (1) or not (1).
* @see www.inf.u-szeged.hu/~ormandi/ai2/06-naiveBayes-example.pdf
* > run-main scalation.analytics.classifier.PGMHD3Test
*/
object PGMHD3Test extends App
{
// x0: Color: Red (1), Yellow (0)
// x1: Type: SUV (1), Sports (0)
// x2: Origin: Domestic (1), Imported (0)
// x3: Mpg: High (1), Low (0)
// features: x0 x1 x2 x3
val x = new MatrixI ((10, 4), 1, 0, 1, 1, // data matrix
1, 0, 1, 0,
1, 0, 1, 1,
0, 0, 1, 1,
0, 0, 0, 1,
0, 1, 0, 0,
0, 1, 0, 0,
0, 1, 1, 1,
1, 1, 0, 0,
1, 0, 0, 0)
val y = VectorI (1, 0, 1, 0, 1, 0, 1, 0, 0, 1) // classification vector: 0(No), 1(Yes))
val fn = Array ("Color", "Type", "Origin", "Mpg") // feature/variable names
val cn = Array ("No", "Yes") // class names
println ("x = " + x)
println ("y = " + y)
println ("---------------------------------------------------------------")
val pgmhd3 = new PGMHD3 (x, 2, y, fn, 2, cn) // create the classifier
// train the classifier ---------------------------------------------------
pgmhd3.train ()
// test sample ------------------------------------------------------------
val z1 = VectorI (1, 0, 1, 1) // existing data vector to classify
val z2 = VectorI (1, 1, 1, 0) // new data vector to classify
println ("classify (" + z1 + ") = " + pgmhd3.classify (z1) + "\\n")
println ("classify (" + z2 + ") = " + pgmhd3.classify (z2) + "\\n")
// nb.crossValidateRand () // cross validate the classifier
} // PGMHD3Test object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PGMHD3Test2` object is used to test the 'PGMHD3' class.
* Given whether a person is Fast and/or Strong, classify them as making C = 1
* or not making C = 0 the football team.
* > run-main scalation.analytics.classifier.PGMHD3Test2
*/
object PGMHD3Test2 extends App
{
// training-set -----------------------------------------------------------
// x0: Fast
// x1: Strong
// y: Classification (No/0, Yes/1)
// features: x0 x1 y
val xy = new MatrixI ((10, 3), 1, 1, 1,
1, 1, 1,
1, 0, 1,
1, 0, 1,
1, 0, 0,
0, 1, 0,
0, 1, 0,
0, 1, 1,
0, 0, 0,
0, 0, 0)
val fn = Array ("Fast", "Strong") // feature names
val cn = Array ("No", "Yes") // class names
println ("xy = " + xy)
println ("---------------------------------------------------------------")
val pgmhd3 = PGMHD3 (xy, 1, fn, 2, cn, null, 0) // create the classifier
// train the classifier ---------------------------------------------------
pgmhd3.train()
// test sample ------------------------------------------------------------
val z = VectorI (1, 0) // new data vector to classify
println ("classify (" + z + ") = " + pgmhd3.classify (z) + "\\n")
println("Cross Validation starts:")
println("CV average accuracy = " + pgmhd3.crossValidate ()) // cross validate the classifier
} // PGMHD3Test2 object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PGMHD3Test3` object is used to test the 'PGMHD3' class.
* > run-main scalation.analytics.classifier.PGMHD3Test3
*/
object PGMHD3Test3 extends App
{
val filename = BASE_DIR + "breast-cancer.arff"
var data = Relation (filename, -1, null)
val xy = data.toMatriI2 (null)
val fn = data.colName.toArray
val cn = Array ("0", "1") // class names
val pgmhd3 = PGMHD3 (xy, 2, fn, 2, cn, null, 0) // create the classifier
pgmhd3.train ()
println("Cross Validation starts:")
println("CV average accuracy = " + pgmhd3.crossValidate ())
} // PGMHD3Test3 object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/analytics/classifier/PGMHD3.scala | Scala | mit | 19,442 |
package com.bolour.boardgame.scala.server.domain
/**
* Version of the server.
*
* For now it serves as both the API version for clients,
* and the persistence version for specific representations
* of persisted objects.
*/
object Version {
val version: Int = 1
}
| azadbolour/boardgame | scala-server/app/com/bolour/boardgame/scala/server/domain/Version.scala | Scala | agpl-3.0 | 278 |
import leon.lang._
import leon.collection._
object Map {
def failure1[T](l: List[T], f: T => T): Boolean = {
l.map(f) == l.map(f).map(f)
}.holds
def failure2[T](l: List[T], f: T => T): Boolean = {
l.map(f) == (l match {
case Cons(head, tail) => Cons(head, tail.map(f))
case Nil() => Nil[T]()
})
}.holds
def failure3[T](l: List[T], f: T => List[T]): Boolean = {
l == l.flatMap(f)
}.holds
}
| regb/leon | testcases/verification/higher-order/invalid/Map.scala | Scala | gpl-3.0 | 435 |
class A {
@inline final def m: Int = {
val (x, y) = (10, 20)
x
}
}
| lrytz/scala | test/files/pos/t11663/A_1.scala | Scala | apache-2.0 | 82 |
package example.rock.paper.scissors
import org.scalatest.{FlatSpec, Matchers}
class UtilSpec extends FlatSpec with Matchers {
"An Util" should "enable executing a function untill we get value" in {
var countCalled = 0
def isCalledTenTimes: Option[Int] = {
if (countCalled ==10)
Some(10)
else{
countCalled +=1
None
}
}
Util.doUntilSuccess(isCalledTenTimes) should be(10)
}
}
| chavdarch/rock-paper-scissors | src/test/scala/example/rock/paper/scissors/UtilSpec.scala | Scala | mit | 442 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
/** Base trait for all products, which in the standard library include at
* least [[scala.Product1]] through [[scala.Product22]] and therefore also
* their subclasses [[scala.Tuple1]] through [[scala.Tuple22]]. In addition,
* all case classes implement `Product` with synthetically generated methods.
*/
trait Product extends Any with Equals {
/** The size of this product.
* @return for a product `A(x,,1,,, ..., x,,k,,)`, returns `k`
*/
def productArity: Int
/** The n^th^ element of this product, 0-based. In other words, for a
* product `A(x,,1,,, ..., x,,k,,)`, returns `x,,(n+1),,` where `0 <= n < k`.
*
* @param n the index of the element to return
* @throws IndexOutOfBoundsException if the `n` is out of range(n < 0 || n >= productArity).
* @return the element `n` elements after the first element
*/
def productElement(n: Int): Any
/** An iterator over all the elements of this product.
* @return in the default implementation, an `Iterator[Any]`
*/
def productIterator: Iterator[Any] = new scala.collection.AbstractIterator[Any] {
private[this] var c: Int = 0
private[this] val cmax = productArity
def hasNext: Boolean = c < cmax
def next(): Any = { val result = productElement(c); c += 1; result }
}
/** A string used in the `toString` methods of derived classes.
* Implementations may override this method to prepend a string prefix
* to the result of `toString` methods.
*
* @return in the default implementation, the empty string
*/
def productPrefix: String = ""
/** The name of the n^th^ element of this product, 0-based.
* In the default implementation, an empty string.
*
* @param n the index of the element name to return
* @throws IndexOutOfBoundsException if the `n` is out of range(n < 0 || n >= productArity).
* @return the name of the specified element
*/
def productElementName(n: Int): String =
if (n >= 0 && n < productArity) ""
else throw new IndexOutOfBoundsException(s"$n is out of bounds (min 0, max ${productArity-1})")
/** An iterator over the names of all the elements of this product.
*/
def productElementNames: Iterator[String] = new scala.collection.AbstractIterator[String] {
private[this] var c: Int = 0
private[this] val cmax = productArity
def hasNext: Boolean = c < cmax
def next(): String = { val result = productElementName(c); c += 1; result }
}
}
| lrytz/scala | src/library/scala/Product.scala | Scala | apache-2.0 | 2,807 |
package org.modiphy.math
import cern.colt.matrix._
import cern.colt.matrix.DoubleFactory2D._
import cern.colt.function.DoubleFunction
import cern.colt.matrix.linalg.{Algebra,EigenvalueDecomposition}
import org.modiphy.util._
object EnhancedMatrix{
type Matrix=DoubleMatrix2D
type Vector=DoubleMatrix1D
implicit def enhanceMatrix(m:Matrix)=new EnhancedMatrix(m)
implicit def enhanceVector(m:Vector)=new EnhancedVector(m)
implicit def enhanceArray(m:Array[Double])=new EnhancedArray(m)
implicit def enhanceList(m:List[Double])=new EnhancedList(m)
private lazy val algebra = new Algebra
class EnhancedList(l:List[Double]){
def toVector=Vector(l)
}
class EnhancedArray(a:Array[Double]){
def toVector=Vector(a)
}
}
import EnhancedMatrix._
object Vector{
def apply(i:Int):Vector=cern.colt.matrix.DoubleFactory1D.dense.make(i)
def apply(a:Double*):Vector=Vector(a.toArray)
def apply(a:Array[Double]):Vector=cern.colt.matrix.DoubleFactory1D.dense.make(a)
def apply(l:List[Double]):Vector=Vector(l.toArray)
}
object Matrix{
def apply(i:Int,j:Int)=dense.make(i,j)
}
trait MatrixExponential{
def pi:Vector
def u:Matrix
def v:Matrix
def q:Matrix
lazy val qNorm = u * d * v
def rawD:Matrix
def scale:Option[Double]
lazy val d = {
if (scale.isDefined){
def normFact = - dense.diagonal(q).zDotProduct(pi) / scale.get
def normFunc = new DoubleFunction(){def apply(v:Double)=v/normFact}
rawD.copy.assign(normFunc)
}else {
rawD
}
}
def exp(t:Double)={
(u * d.expVals(t)) * v
}
}
//Doesn't play well with other actors
//(react from channel belonging to other actor error)
//needs to be recoded as pure actor
//TODO
trait CachedMatrixExponential extends MatrixExponential{
import scala.actors.Actor
import scala.actors.OutputChannel
import scala.actors.Actor._
case class CacheReq(t:Double)
case class Calc(o:OutputChannel[Any],r:CacheReq)
case class Answer(t:Double,m:Matrix)
def realExp(t:Double) = super.exp(t)
case object Exit
class CacheActor extends Actor{
val cache = new SoftCacheMap[Double,Matrix](500)
def act{
loop{
react{
case Answer(t,m)=>
println("GOT ANSWER")
cache+((t,m))
case CacheReq(t) =>
println("GOT CACHEREQ")
val cacheLookup = cache.get(t)
if (cacheLookup.isDefined){
reply(Answer(t,cacheLookup.get))
}
else {
Actor.actor{
react{
case Calc(o,CacheReq(t))=>
val ans = realExp(t)
o ! Answer(t,ans)
reply(Answer(t,ans))
}
exit
}
} ! Calc(sender,CacheReq(t))
case Exit => exit
}
}
}
}
val actor = new CacheActor
actor.start
override def exp(t:Double)={
println("SENDING")
(actor !? CacheReq(t)).asInstanceOf[Answer].m
}
def exit { actor ! Exit }
}
class MatExpNormal(val q:Matrix,val pi:Vector,val scale:Option[Double]) extends MatrixExponential{
val eigen = new EigenvalueDecomposition(q)
val algebra = new Algebra
val u = eigen.getV
val rawD = eigen.getD
val v = algebra.inverse(u)
}
class MatExpScale(m:MatrixExponential,val s:Double) extends MatrixExponential{
def pi = m.pi
def u = m.u
def v = m.v
def q = m.q
def rawD = m.rawD
val scale = Some(s)
}
class BasicMatExpScale(val u:Matrix,val rawD:Matrix,val v:Matrix,val pi:Vector, s:Double) extends MatrixExponential{
def this(u:Matrix,rawD:Vector,v:Matrix,pi:Vector,scale:Double)= this(u,sparse.diagonal(rawD),v,pi,scale)
val scale = Some(s)
def q = u * rawD * v
}
class MatExpYang(val q:Matrix,val pi:Vector,val scale:Option[Double]) extends MatrixExponential{
import cern.colt.matrix.DoubleFactory2D.sparse
val funcRoot = new DoubleFunction(){def apply(v:Double)=Math.sqrt(v)}
val funcRec = new DoubleFunction(){def apply(v:Double)=1.0D/v}
val piRootVec = pi.copy.assign(funcRoot)
val piRoot = sparse.diagonal(piRootVec)
val inversePiRoot:Matrix = sparse.diagonal(piRootVec.copy.assign(funcRec))
val a = ((piRoot * q) * inversePiRoot)//.symmetrise
val eigen = new EigenvalueDecomposition(a)
val u:Matrix = inversePiRoot * eigen.getV
val v:Matrix = eigen.getV.viewDice * piRoot
val rawD = eigen.getD
// println("MatExpYang" + d(0,0) + " " + q(0,0))
}
object MatExp{
private lazy val algebra = new Algebra
private lazy val cache:SoftCacheMap[String,(EigenvalueDecomposition,Matrix)] = new SoftCacheMap(10)
def decomp(m:Matrix)={
if (m.exists{d=> d==Math.NEG_INF_DOUBLE || d==Math.POS_INF_DOUBLE || d.isNaN}) throw new InvalidMatrixException("Invalid matrix")
cache.getOrElseUpdate(m.toString,{
val e = new EigenvalueDecomposition(m); (e,algebra.inverse(e.getV))
})
}
def exp(m:Matrix,t:Double) = {
val (eigen,vprime)=decomp(m)
val v = eigen.getV
v * (eigen.getD expVals t) * vprime
}
}
class EnhancedMatrix(d:DoubleMatrix2D){
type ID={def id:Int}
def apply(i:Int)=d.viewRow(i)
def exp(t:Double)=MatExp.exp(d,t)
def expVals(t:Double)=sparse.diagonal(dense.diagonal(d).assign( new DoubleFunction(){def apply(arg:Double)={Math.exp(t * arg)}}))
def *(m:Matrix)=algebra.mult(d,m)
def *(n:Double)=d.assign(new DoubleFunction(){def apply(v:Double)=v * n})
def update(i:Int,j:Int,v:Double):Unit=d.set(i,j,v)
def update(i:ID,j:ID,v:Double):Unit=d.set(i.id,j.id,v)
def apply(i:Int,j:Int):Double=d.get(i,j)
def apply(i:ID,j:ID):Double=apply(i.id,j.id)
def symmetrise(s:Matrix)={
val s2 = s.copy
(0 to d.columns-1).foreach{i=>
(i+1 to d.columns-1).foreach{j=>
s2(j,i)=s2(i,j)
}
}
s2
}
def sToQ(pi:{def apply(i:Int):Double})={
val q = symmetrise(d)
(0 to d.columns-1).foreach{i=>
q.viewColumn(i).assign(new DoubleFunction(){
def apply(j:Double)=j*pi(i)
})
}
(0 to q.rows-1).foreach{i=>
q(i,i)=0
q(i,i)= -(q.viewRow(i).zSum)
}
q
}
def normalize(v:Vector):Matrix=normalize(v,1.0D)
def normalize(v:Vector,overall:Double):Matrix={
val sum = rate(v)
d.copy.assign(new DoubleFunction(){def apply(d:Double)= overall * d/sum})
}
def rate(v:Vector):Double={
-dense.diagonal(d).zDotProduct(v)
}
def exists(f: Double=>Boolean):Boolean={
this.elements.exists{f}
}
def toList=elements.toList
def diagonal=dense.diagonal(d)
def toUpper={
val s = d.like
(0 to d.rows-1).foreach{i=>
(i to d.columns-1).foreach{j=>
s(i,j)=d(j,i)
}
}
s
}
def fixDiag={
rowElements.toList.zipWithIndex.foreach{t=>
val (v,i)=t
v(i)=0
v(i)= -v.zSum
}
d
}
def rowElements={
for (i <- 0 until d.rows) yield d.viewRow(i)
}
def elements={
val outer = this
new Iterator[Double]{
var i=0
var j=0
def hasNext={i < d.rows && j < d.columns}
private def inc = {
j+=1
if (j >= d.columns){
j=0
i+=1
}
}
def next={val ans = d.getQuick(i,j);inc;ans}
}
}
}
class EnhancedVector(d:DoubleMatrix1D){
type ID={def id:Int}
def toList = (0 to d.size -1).map{i=>d.get(i)}.toList
def elements=d.toArray.elements
def apply(i:Int):Double=d.get(i)
def apply(i:ID):Double=apply(i.id)
def update(i:Int,v:Double):Unit=d.set(i,v)
def update(i:ID,v:Double):Unit=update(i.id,v)
def *(n:Double)=d.copy.assign(new DoubleFunction(){def apply(v:Double)=v * n})
def /(n:Double)={*(1/n)}
def normalize(a:Double):Vector={
val sum = d.zSum
d.copy.assign(new DoubleFunction(){def apply(o:Double)=a*o/sum})
}
def normalize:Vector=normalize(1.0D)
}
| benb/modiphy | src/main/scala/org/modiphy/math/Math.scala | Scala | mit | 7,843 |
package io.getquill.context.sql.norm
import io.getquill.norm._
import io.getquill.ast.Ast
import io.getquill.norm.ConcatBehavior.AnsiConcat
import io.getquill.norm.EqualityBehavior.AnsiEquality
import io.getquill.norm.capture.{ AvoidAliasConflict, DemarcateExternalAliases }
import io.getquill.util.Messages.{ TraceType, title }
object SqlNormalize {
def apply(ast: Ast, concatBehavior: ConcatBehavior = AnsiConcat, equalityBehavior: EqualityBehavior = AnsiEquality) =
new SqlNormalize(concatBehavior, equalityBehavior)(ast)
}
class SqlNormalize(concatBehavior: ConcatBehavior, equalityBehavior: EqualityBehavior) {
private def demarcate(heading: String) =
((ast: Ast) => title(heading, TraceType.SqlNormalizations)(ast))
private val normalize =
(identity[Ast] _)
.andThen(demarcate("original"))
.andThen(DemarcateExternalAliases.apply _)
.andThen(demarcate("DemarcateReturningAliases"))
.andThen(new FlattenOptionOperation(concatBehavior).apply _)
.andThen(demarcate("FlattenOptionOperation"))
.andThen(new SimplifyNullChecks(equalityBehavior).apply _)
.andThen(demarcate("SimplifyNullChecks"))
.andThen(Normalize.apply _)
.andThen(demarcate("Normalize"))
// Need to do RenameProperties before ExpandJoin which normalizes-out all the tuple indexes
// on which RenameProperties relies
//.andThen(RenameProperties.apply _)
.andThen(RenameProperties.apply _)
.andThen(demarcate("RenameProperties"))
.andThen(ExpandDistinct.apply _)
.andThen(demarcate("ExpandDistinct"))
.andThen(Normalize.apply _)
.andThen(demarcate("Normalize")) // Needed only because ExpandDistinct introduces an alias.
.andThen(NestImpureMappedInfix.apply _)
.andThen(demarcate("NestImpureMappedInfix"))
.andThen(Normalize.apply _)
.andThen(demarcate("Normalize"))
.andThen(ExpandJoin.apply _)
.andThen(demarcate("ExpandJoin"))
.andThen(ExpandMappedInfix.apply _)
.andThen(demarcate("ExpandMappedInfix"))
.andThen(ast => {
// In the final stage of normalization, change all temporary aliases into
// shorter ones of the form x[0-9]+.
Normalize.apply(AvoidAliasConflict.Ast(ast, true))
})
.andThen(demarcate("Normalize"))
def apply(ast: Ast) = normalize(ast)
}
| getquill/quill | quill-sql-portable/src/main/scala/io/getquill/sql/norm/SqlNormalize.scala | Scala | apache-2.0 | 2,353 |
package io.github.chenfh5.lucene_analysis.ik
import java.io.File
import org.apache.commons.lang3.StringUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.env.Environment
import org.slf4j.LoggerFactory
import org.wltea.analyzer.cfg.Configuration
import org.wltea.analyzer.lucene.IKAnalyzer
import io.github.chenfh5.lucene_analysis.CustomAnalyzer
object IkClient extends CustomAnalyzer {
private val LOG = LoggerFactory.getLogger(getClass.getName)
private val ikSetting = {
val path = new File(getClass.getClassLoader.getResource("es-config").toURI)
Settings.builder()
.put("path.home", "")
.put("path.conf", path)
.put("use_smart", true)
}
private lazy val ikAnalyzerSmart = {
val setting = ikSetting.build()
val ikAnalyzerSmart = new IKAnalyzer(new Configuration(new Environment(setting), setting))
LOG.info("this is the ikAnalyzerSmart={}, initialized successfully", ikAnalyzerSmart)
ikAnalyzerSmart
}
private lazy val ikAnalyzerFull = {
val setting = ikSetting
.put("use_smart", false)
.build()
val ikAnalyzerFull = new IKAnalyzer(new Configuration(new Environment(setting), setting))
LOG.info("this is the ikAnalyzerFull={}, initialized successfully", ikAnalyzerFull)
ikAnalyzerFull
}
def getIkTokens(inputText: String, useSmart: Boolean = true) = {
require(StringUtils.isNotBlank(inputText), "An input text must be specified")
val ikAnalyzer = if (useSmart) ikAnalyzerSmart else ikAnalyzerFull
getTokens(inputText, ikAnalyzer)
}
}
| chenfh5/test-spark-connect-es | src/main/scala/io/github/chenfh5/lucene_analysis/ik/IkClient.scala | Scala | apache-2.0 | 1,588 |
package gitbucket.core.model.activity
import java.util.UUID
import gitbucket.core.model.Activity
import gitbucket.core.model.Profile.currentDate
final case class MergeInfo(
userName: String,
repositoryName: String,
activityUserName: String,
issueId: Int,
message: String
) extends BaseActivityInfo {
override def toActivity: Activity =
Activity(
userName,
repositoryName,
activityUserName,
"merge_pullreq",
s"[user:$activityUserName] merged pull request [pullreq:$userName/$repositoryName#$issueId]",
Some(message),
currentDate,
UUID.randomUUID().toString
)
}
| xuwei-k/gitbucket | src/main/scala/gitbucket/core/model/activity/MergeActivityInfo.scala | Scala | apache-2.0 | 632 |
package edu.gemini.dbTools.ephemeris
import edu.gemini.pot.sp.{ISPObsComponent, ISPObservation, ProgramTestSupport, SPComponentType, ISPFactory, ISPProgram, ProgramGen}
import edu.gemini.shared.util.immutable.ScalaConverters._
import edu.gemini.spModel.core.{Ephemeris, SiderealTarget, NonSiderealTarget, HorizonsDesignation}
import edu.gemini.spModel.gemini.obscomp.SPProgram
import edu.gemini.spModel.obs.{ObservationStatus, ObsPhase2Status, SPObservation}
import edu.gemini.spModel.obsrecord.ObsExecStatus
import edu.gemini.spModel.target.env.TargetEnvironment
import edu.gemini.spModel.target.obsComp.TargetObsComp
import edu.gemini.spModel.util.SPTreeUtil
import edu.gemini.util.security.principal.StaffPrincipal
import org.scalacheck.Gen
import org.scalacheck.Arbitrary._
import java.security.Principal
import scala.collection.JavaConverters._
import scala.util.Random
trait TestSupport extends ProgramTestSupport {
val User = java.util.Collections.singleton[Principal](StaffPrincipal.Gemini)
import ProgramGen._
val nonSids = List(
(HorizonsDesignation.Comet("C/1973 E1"), "Kohoutek" ),
(HorizonsDesignation.AsteroidNewStyle("1971 UC1"), "1896 Beer"),
(HorizonsDesignation.AsteroidOldStyle(4), "Vesta"),
(HorizonsDesignation.MajorBody(606), "Titan")
)
val genNonSiderealTarget: Gen[NonSiderealTarget] =
Gen.oneOf(nonSids).map { case (hid, name) =>
NonSiderealTarget(name, Ephemeris.empty, Some(hid), List.empty, None, None)
}
def findOrCreateTargetComp(f: ISPFactory, o: ISPObservation): ISPObsComponent =
Option(SPTreeUtil.findTargetEnvNode(o)).getOrElse {
val tc = f.createObsComponent(o.getProgram, SPComponentType.TELESCOPE_TARGETENV, null)
o.addObsComponent(tc)
tc
}
def editProgram(ef: SPProgram => Unit): ProgEdit = { (_: ISPFactory, p: ISPProgram) =>
val dob = p.getDataObject.asInstanceOf[SPProgram]
ef(dob)
p.setDataObject(dob)
}
val setInactive: ProgEdit =
editProgram(_.setActive(SPProgram.Active.NO))
val setCompleted: ProgEdit =
editProgram(_.setCompleted(true))
val setLibrary: ProgEdit =
editProgram(_.setLibrary(true))
val genInactiveProgram: Gen[ProgEdit] =
Gen.oneOf(setInactive, setCompleted, setLibrary)
val genSiderealEdit: Gen[ProgEdit] =
for {
f <- maybePickObservation
} yield { (_: ISPFactory, p: ISPProgram) =>
f(p).foreach { obs =>
val tc = SPTreeUtil.findTargetEnvNode(obs)
val toc = tc.getDataObject.asInstanceOf[TargetObsComp]
toc.getBase.setTarget(SiderealTarget.empty)
tc.setDataObject(toc)
}
}
val genInactiveObsStatus: Gen[ProgEdit] =
for {
f <- maybePickObservation
s <- Gen.oneOf(ObservationStatus.OBSERVED, ObservationStatus.INACTIVE)
} yield { (_: ISPFactory, p: ISPProgram) =>
f(p).foreach { obs =>
val dob = obs.getDataObject.asInstanceOf[SPObservation]
s match {
case ObservationStatus.OBSERVED =>
dob.setPhase2Status(ObsPhase2Status.PHASE_2_COMPLETE)
dob.setExecStatusOverride(Option(ObsExecStatus.OBSERVED).asGeminiOpt)
case _ =>
dob.setPhase2Status(s.phase2())
dob.setExecStatusOverride(Option(ObsExecStatus.PENDING).asGeminiOpt)
}
obs.setDataObject(dob)
}
}
val genTargetEnv: Gen[TargetEnvironment] = {
val r = new Random
arbitrary[TargetEnvironment].map { te =>
te.getTargets.asScala.foreach { t =>
// set half the targets on average to non-sidereal
if (r.nextInt(2) == 0) {
t.setTarget(genNonSiderealTarget.sample.get)
}
}
te
}
}
val genTestProg: Gen[ISPFactory => ISPProgram] =
genProg.map { pCons => (fact: ISPFactory) => {
val p = pCons(fact)
p.getAllObservations.asScala.foreach { obs =>
val tc = findOrCreateTargetComp(fact, obs)
val toc = tc.getDataObject.asInstanceOf[TargetObsComp]
toc.setTargetEnvironment(genTargetEnv.sample.get)
tc.setDataObject(toc)
val dob = obs.getDataObject.asInstanceOf[SPObservation]
dob.setPhase2Status(ObsPhase2Status.PHASE_2_COMPLETE)
dob.setExecStatusOverride(Gen.oneOf(ObsExecStatus.PENDING, ObsExecStatus.ONGOING).sample.asGeminiOpt)
obs.setDataObject(dob)
}
p
}
}
}
| spakzad/ocs | bundle/edu.gemini.spdb.reports.collection/src/test/java/edu/gemini/dbTools/ephemeris/TestSupport.scala | Scala | bsd-3-clause | 4,448 |
package com.oschrenk.delight.network
import java.time.LocalDateTime
case class JsonClass(ClassID: Int, StartDateTime: LocalDateTime, EndDateTime: LocalDateTime, Name: String, WebSignup: Boolean, SignedIn: Boolean, LateCancelled: Boolean, Staff: JsonStaff, Location: JsonLocation)
case class JsonStaff(Name: String)
case class JsonLocation(Name: String, Address: String, PostalCode: String, City: String)
| oschrenk/delight | src/main/scala/com/oschrenk/delight/network/Protocol.scala | Scala | apache-2.0 | 408 |
trait List[T] {
def isEmpty(): Boolean
def head : T
def tail : List[T]
}
/** Val parameters make class definitions similar to this:
`class Cons(_head:Int, _tail: IntList) extends IntList{
val head = _head
val tail = _tail
}
*/
class Cons[T](val head: T, val tail: List[T]) extends List[T]{
def isEmpty() = false
override def toString(): String = head.toString +" ->" +tail
}
class Nil[T] extends List[T]{
def isEmpty() = true
def head: Nothing = throw new NoSuchElementException("Empty List has no head")
def tail: Nothing = throw new NoSuchElementException("Empty list has no tail")
override def toString(): String = "NULL"
}
//Types T also work for function
object Lists extends App{
def singleton[T](elem: T) = new Cons[T](elem, new Nil[T])
def nth[T](n:Int, list:List[T]) : T ={
def loop(i:Int, elem:T, tail:List[T]): T = {
if(i==n) elem
else{
if(tail.isEmpty()) throw new IndexOutOfBoundsException("" + n.toString)
else loop(i+1, tail.head, tail.tail)
}
}
if(n<0) throw new IndexOutOfBoundsException("" + n.toString())
else loop(0, list.head, list.tail)
}
override def main(args:Array[String]) = {
// explicit types
val l1 = singleton[Int](10)
// inferred types
val l2 = singleton(1)
val l3 = new Cons[Int](15, l1)
val l4 = new Cons[Int](20, l3)
println(nth(2, l4))
val list= new Cons(1, new Cons(2, new Cons(3,new Nil)))
println(nth(2,list))
//IOOBE println(nth(-1, l4))
//IOOBE println(nth(6,l4))
}
}
// Lecture 02 exercise
object List{
def apply[T](x1: T, x2: T): List[T] = new Cons(x1, new Cons(x2, new Nil))
def apply[T]() = new Nil
}
| purukaushik/scala-class | scala_class_coursera/src/main/scala/Lists.scala | Scala | gpl-3.0 | 1,694 |
package service
import models.{Address, Contact}
import play.api.Logger
import play.api.Play.current
import play.api.libs.json.{Format, JsValue}
import play.api.mvc.{Controller, Result}
import play.modules.reactivemongo.ReactiveMongoPlugin
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.Cursor
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait ReactiveMongoService[T] extends Controller {
def collectionName: String
def collection: JSONCollection = ReactiveMongoPlugin.db.collection[JSONCollection](collectionName)
implicit val formats: Format[T]
val INVALID_JSON = "Invalid JSON"
def create(data: JsValue): Future[Result] = {
Logger.info("############## data = " + data)
val isValid = data.validate[T].isSuccess
isValid match {
case true => {
val dataToSave = data.as[T]
collection.insert(dataToSave) map {
lastError =>
Logger.info("lastError.ok = " + lastError.ok)
Created
}
}
case false => {
Future.successful(BadRequest(INVALID_JSON))
}
}
}
// TODO - define fetch method
def fetchAll = {
val cursor: Cursor[T] = collection.genericQueryBuilder.cursor[T]
val data: Future[List[T]] = cursor.collect[List]()
data
}
// TODO - define update method
}
object AddressService extends ReactiveMongoService[Address] {
override def collectionName: String = "address"
override val formats = Address.formats
}
object ContactService extends ReactiveMongoService[Contact] {
override def collectionName: String = "contact"
override val formats = Contact.formats
}
| avinash-anand/PlayScala1 | app/service/ReactiveMongoService.scala | Scala | gpl-3.0 | 1,699 |
package spire
package util
trait OptVersions {
// name-based extractor, cf. http://hseeberger.github.io/blog/2013/10/04/name-based-extractors-in-scala-2-dot-11/
def unapply[A](n: Opt[A]): Opt[A] = n
}
object OptVersions {
type Base = AnyVal
}
| tixxit/spire | core/shared/src/main/scala_2.11/spire/util/OptVersions.scala | Scala | mit | 251 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.abc
import apparat.utils.{Dumpable, IndentingPrintWriter}
import compat.Platform
object AbcConstantPool {
val EMPTY_STRING = Symbol(null)
val EMPTY_NAMESPACE = AbcNamespace(0, EMPTY_STRING)
val EMPTY_NSSET = AbcNSSet(Array(EMPTY_NAMESPACE))
val EMPTY_NAME = AbcQName(EMPTY_STRING, EMPTY_NAMESPACE)
}
class AbcConstantPool(
val ints: Array[Int],
val uints: Array[Long],
val doubles: Array[Double],
val strings: Array[Symbol],
val namespaces: Array[AbcNamespace],
val nssets: Array[AbcNSSet],
val names: Array[AbcName]) extends Dumpable {
def +(that: AbcConstantPool) = new AbcConstantPool(
(0 :: ((ints.toList drop 1) ::: (that.ints.toList drop 1)).distinct).toArray,
(0L :: ((uints.toList drop 1) ::: (that.uints.toList drop 1)).distinct).toArray,
(Double.NaN :: ((doubles.toList drop 1) ::: (that.doubles.toList drop 1)).distinct).toArray,
(AbcConstantPool.EMPTY_STRING :: ((strings.toList drop 1) ::: (that.strings.toList drop 1)).distinct).toArray,
(AbcConstantPool.EMPTY_NAMESPACE :: ((namespaces.toList drop 1) ::: (that.namespaces.toList drop 1)).distinct).toArray,
(AbcConstantPool.EMPTY_NSSET :: ((nssets.toList drop 1) ::: (that.nssets.toList drop 1)).distinct).toArray,
(AbcConstantPool.EMPTY_NAME :: ((names.toList drop 1) ::: (that.names.toList drop 1)).distinct).toArray)
def accept(visitor: AbcVisitor) = visitor visit this
def constant(kind: Some[Int], index: Int): Any = constant(kind.get, index)
def constant(kind: Int, index: Int): Any = kind match {
case AbcConstantType.Int => ints(index)
case AbcConstantType.UInt => uints(index)
case AbcConstantType.Double => doubles(index)
case AbcConstantType.Utf8 => strings(index)
case AbcConstantType.True => true
case AbcConstantType.False => false
case AbcConstantType.Null => null
case AbcConstantType.Undefined => null
case AbcConstantType.Namespace |
AbcConstantType.PackageNamespace |
AbcConstantType.InternalNamespace |
AbcConstantType.ProtectedNamespace |
AbcConstantType.ExplicitNamespace |
AbcConstantType.StaticProtectedNamespace |
AbcConstantType.PrivateNamespace => namespaces(index)
}
def indexOf(value: Int): Int = ints indexOf value
def indexOf(value: Long): Int = uints indexOf value
def indexOf(value: Double): Int = doubles indexOf value
def indexOf(value: Symbol): Int = strings indexOf value
def indexOf(value: AbcNamespace) = namespaces indexOf value
def indexOf(value: AbcNSSet) = nssets indexOf value
def indexOf(value: AbcName) = names indexOf value
def indexOf(kind: Option[Int], value: Option[Any]): Int = {
value match {
case Some(x) => indexOf(kind getOrElse error("Constant value without type."), x)
case None => 0
}
}
def indexOf(kind: Int, value: Any): Int = {
//
// Undocumented: If a value type has no value associated with it. I.e.
// Null we have to return the type of the constant.
//
//
// NOTE: Although index zero is correct for certain values (e.g. type
// is Int and value is "0" the index is usually 0) we may not return
// it since a trait has no value associated with it if the index
// is zero.
//
kind match {
case AbcConstantType.Int => ints.indexOf(value.asInstanceOf[Int], 1)
case AbcConstantType.UInt => uints.indexOf(value.asInstanceOf[Long], 1)
case AbcConstantType.Double => {
//TODO fix when fixed
//http://lampsvn.epfl.ch/trac/scala/ticket/3291
val double = value.asInstanceOf[Double]
if(double.isNaN) {
for(i <- 1 until doubles.length) {
if(doubles(i).isNaN) {
return i
}
}
-1
} else {
doubles.indexOf(value.asInstanceOf[Double], 1)
}
}
case AbcConstantType.Utf8 => strings.indexOf(value.asInstanceOf[Symbol], 1)
case AbcConstantType.True |
AbcConstantType.False |
AbcConstantType.Null |
AbcConstantType.Undefined => kind
case AbcConstantType.Namespace |
AbcConstantType.PackageNamespace |
AbcConstantType.InternalNamespace |
AbcConstantType.ProtectedNamespace |
AbcConstantType.ExplicitNamespace |
AbcConstantType.StaticProtectedNamespace |
AbcConstantType.PrivateNamespace => namespaces.indexOf(value.asInstanceOf[AbcNamespace], 1)
case _ => 0xff
}
}
override def toString = "[AbcConstantPool]"
override def dump(writer: IndentingPrintWriter) = {
writer <= "ConstantPool:"
writer withIndent {
writer <= ints.length + " integer(s):"
writer <<< ints
writer <= uints.length + " uint(s):"
writer <<< uints
writer <= doubles.length + " double(s):"
writer <<< doubles
writer <= strings.length + " string(s):"
writer withIndent writer.println(strings)("\\"" + _.name + "\\"")
writer <= namespaces.length + " namespace(s):"
writer <<< namespaces
writer <= nssets.length + " namespaceset(s):"
writer <<< nssets
writer <= names.length + " multiname(s):"
writer <<< names
}
}
//
def add(value: Int): AbcConstantPool = new AbcConstantPool(addToPool(value, ints) { _ == value }, uints, doubles, strings, namespaces, nssets, names)
def add(value: Long): AbcConstantPool = new AbcConstantPool(ints, addToPool(value, uints) { _ == value }, doubles, strings, namespaces, nssets, names)
def add(value: Double): AbcConstantPool = {
if(value.isNaN) {
new AbcConstantPool(ints, uints, addToPool(Double.NaN, doubles) { _.isNaN }, strings, namespaces, nssets, names)
} else {
new AbcConstantPool(ints, uints, addToPool(value, doubles) { _ == value }, strings, namespaces, nssets, names)
}
}
def add(value: Symbol): AbcConstantPool = {
if(value != AbcConstantPool.EMPTY_STRING) {
new AbcConstantPool(ints, uints, doubles, addToPool(value, strings) { _ == value }, namespaces, nssets, names)
} else {
this
}
}
def add(value: AbcNamespace): AbcConstantPool = {
val result = add(value.name)
if(value != AbcConstantPool.EMPTY_NAMESPACE) {
new AbcConstantPool(result.ints, result.uints, result.doubles, result.strings, addToPool(value, result.namespaces) { _ == value }, result.nssets, result.names)
} else {
result
}
}
def add(value: AbcNSSet): AbcConstantPool = {
var result = this
for(ns <- value.set) {
result = result add ns
}
if(value != AbcConstantPool.EMPTY_NSSET) {
new AbcConstantPool(result.ints, result.uints, result.doubles, result.strings, result.namespaces, addToPool(value, result.nssets) { _ == value }, result.names)
} else {
result
}
}
def add(value: AbcName): AbcConstantPool = {
var result = if(value != AbcConstantPool.EMPTY_NAME) {
new AbcConstantPool(ints, uints, doubles, strings, namespaces, nssets, addToPool(value, names) { _ == value })
} else {
this
}
value match {
case AbcQName(name, namespace) => {
result = result add name
result = result add namespace
}
case AbcQNameA(name, namespace) => {
result = result add name
result = result add namespace
}
case AbcRTQName(name) => result = result add name
case AbcRTQNameA(name) => result = result add name
case AbcRTQNameL | AbcRTQNameLA =>
case AbcMultiname(name, nsset) => {
result = result add name
result = result add nsset
}
case AbcMultinameA(name, nsset) => {
result = result add name
result = result add nsset
}
case AbcMultinameL(nsset) => result = result add nsset
case AbcMultinameLA(nsset) => result = result add nsset
case AbcTypename(name, parameters) => {
result = result add name
for(parameter <- parameters) {
result = result add parameter
}
}
}
result
}
@inline private def addToPool[T: ClassManifest](value: T, array: Array[T])(condition: T => Boolean): Array[T] = {
var i = 1//NOTE we ignore index 0. this can lead to duplicates for the 0 entry but is safe for optionals
val n = array.length
while(i < n) {
if(condition(array(i))) {
return array
}
i += 1
}
val r = new Array[T](n + 1)
Platform.arraycopy(array, 0, r, 0, n)
r(n) = value
r
}
}
| joa/apparat | apparat-core/src/main/scala/apparat/abc/AbcConstantPool.scala | Scala | lgpl-2.1 | 8,854 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.operator.variance
import com.stratio.sparta.plugin.operator.variance.VarianceOperator
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{Matchers, WordSpec}
@RunWith(classOf[JUnitRunner])
class VarianceOperatorTest extends WordSpec with Matchers {
"Variance operator" should {
"processMap must be " in {
val schema1 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputField = new VarianceOperator("variance", schema1, Map())
inputField.processMap(Row(1, 2)) should be(None)
val schema2 = StructType(Seq(StructField("field3", IntegerType), StructField("field2", IntegerType)))
val inputFields2 = new VarianceOperator("variance", schema2, Map("inputField" -> "field1"))
inputFields2.processMap(Row(1, 2)) should be(None)
val schema3 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputFields3 = new VarianceOperator("variance", schema3, Map("inputField" -> "field1"))
inputFields3.processMap(Row(1, 2)) should be(Some(1))
val schema4 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputFields4 = new VarianceOperator("variance", schema4, Map("inputField" -> "field1"))
inputFields3.processMap(Row(1, 2)) should be(Some(1))
val schema6 = StructType(Seq(StructField("field1", FloatType), StructField("field2", IntegerType)))
val inputFields6 = new VarianceOperator("variance", schema6, Map("inputField" -> "field1"))
inputFields6.processMap(Row(1.5, 2)) should be(Some(1.5))
val schema7 = StructType(Seq(StructField("field1", LongType), StructField("field2", IntegerType)))
val inputFields7 = new VarianceOperator("variance", schema7, Map("inputField" -> "field1"))
inputFields7.processMap(Row(5L, 2)) should be(Some(5L))
val schema8 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputFields8 = new VarianceOperator("variance", schema8,
Map("inputField" -> "field1", "filters" -> "[{\\"field\\":\\"field1\\", \\"type\\": \\"<\\", \\"value\\":2}]"))
inputFields8.processMap(Row(1, 2)) should be(Some(1L))
val schema9 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputFields9 = new VarianceOperator("variance", schema9,
Map("inputField" -> "field1", "filters" -> "[{\\"field\\":\\"field1\\", \\"type\\": \\">\\", \\"value\\":\\"2\\"}]"))
inputFields9.processMap(Row(1, 2)) should be(None)
val schema10 = StructType(Seq(StructField("field1", IntegerType), StructField("field2", IntegerType)))
val inputFields10 = new VarianceOperator("variance", schema10,
Map("inputField" -> "field1", "filters" -> {
"[{\\"field\\":\\"field1\\", \\"type\\": \\"<\\", \\"value\\":\\"2\\"}," +
"{\\"field\\":\\"field2\\", \\"type\\": \\"<\\", \\"value\\":\\"2\\"}]"
}))
inputFields10.processMap(Row(1, 2)) should be(None)
}
"processReduce must be " in {
val schema11 = StructType(Seq(StructField("field1", DoubleType)))
val inputFields = new VarianceOperator("variance", schema11, Map())
inputFields.processReduce(Seq()) should be(Some(0d))
val schema12 = StructType(Seq(
StructField("field1", IntegerType),
StructField("field2", IntegerType),
StructField("field3", IntegerType),
StructField("field4", IntegerType),
StructField("field5", IntegerType)
))
val inputFields2 = new VarianceOperator("variance", schema12, Map())
inputFields2.processReduce(Seq(Some(1), Some(2), Some(3), Some(7), Some(7))) should be(Some(8))
val schema13 = StructType(Seq(
StructField("field1", IntegerType),
StructField("field2", IntegerType),
StructField("field3", IntegerType),
StructField("field4", FloatType),
StructField("field5", FloatType)
))
val inputFields3 = new VarianceOperator("variance", schema13, Map())
inputFields3.processReduce(Seq(Some(1), Some(2), Some(3), Some(6.5), Some(7.5))) should be(Some(8.125))
val schema14 = StructType(Seq())
val inputFields4 = new VarianceOperator("variance", schema14, Map())
inputFields4.processReduce(Seq(None)) should be(Some(0d))
val schema15 = StructType(Seq(
StructField("field1", IntegerType),
StructField("field2", IntegerType),
StructField("field3", IntegerType),
StructField("field4", IntegerType),
StructField("field5", IntegerType)
))
val inputFields5 = new VarianceOperator("variance", schema15, Map("typeOp" -> "string"))
inputFields5.processReduce(Seq(Some(1), Some(2), Some(3), Some(7), Some(7))) should be(Some("8.0"))
}
"processReduce distinct must be " in {
val schema16 = StructType(Seq(StructField("field1", IntegerType), StructField("field", IntegerType)))
val inputFields = new VarianceOperator("variance", schema16, Map("distinct" -> "true"))
inputFields.processReduce(Seq()) should be(Some(0d))
val schema17 = StructType(Seq(StructField("field1", IntegerType), StructField("field", IntegerType)))
val inputFields2 = new VarianceOperator("variance", schema17, Map("distinct" -> "true"))
inputFields2.processReduce(Seq(Some(1), Some(2), Some(3), Some(7), Some(7))) should be(Some(6.916666666666667))
val schema18 = StructType(Seq(StructField("field1", IntegerType), StructField("field", IntegerType)))
val inputFields3 = new VarianceOperator("variance", schema18, Map("distinct" -> "true"))
inputFields3.processReduce(Seq(Some(1), Some(1), Some(2), Some(3), Some(6.5), Some(7.5))) should be(Some(8.125))
val schema19 = StructType(Seq(StructField("field1", IntegerType), StructField("field", IntegerType)))
val inputFields4 = new VarianceOperator("variance", schema19, Map("distinct" -> "true"))
inputFields4.processReduce(Seq(None)) should be(Some(0d))
val schema20 = StructType(Seq(StructField("field1", IntegerType), StructField("field", IntegerType)))
val inputFields5 = new VarianceOperator("variance", schema20, Map("typeOp" -> "string", "distinct" -> "true"))
inputFields5.processReduce(Seq(Some(1), Some(2), Some(3), Some(7), Some(7))) should be(Some("6.916666666666667"))
}
}
}
| danielcsant/sparta | plugins/src/test/scala/com/stratio/sparta/plugin/operator/variance/VarianceOperatorTest.scala | Scala | apache-2.0 | 7,152 |
package pub.ayada.scala.sparkUtils.etl.read.hive
case class Hive2DFProps(taskType : String = "Hive2DF",
id : String,
propsFile : String = null,
schema : String = "default",
table : String,
repartition : Int = 0,
broadcast : Boolean = false,
loadCount : Boolean = false,
printSchema : Boolean = true,
forceNoPersist : Boolean = false,
sql : String) extends pub.ayada.scala.sparkUtils.etl.read.ReadProps {
override def toString() : String = {
new StringBuilder()
.append("taskType ->").append(taskType)
.append(", id ->").append(id)
.append(", schema ->").append(schema)
.append(", table ->").append(table)
.append(", loadCount ->").append(loadCount)
.append(", printSchema ->").append(printSchema)
.append(", sql ->").append(sql.split("\\n").map(_.trim).mkString(" "))
.toString
}
}
| k-ayada/SparkETL | pub/ayada/scala/sparkUtils/etl/read/hive/Hive2DFProps.scala | Scala | apache-2.0 | 1,135 |
package synthesis
//import scala.annotation.tailrec
//*********************************** Pressburger Synthesis ************************************************//
sealed abstract class RenderingMode {
val true_symbol:String
val false_symbol:String
val and_symbol: String
val or_symbol:String
val not_symbol:String
val min_symbol: String
val max_symbol: String
val error_string: String
val abs_symbol: String
val gcd_symbol: String
val lcm_symbol: String
def mod_function(operand: String, divisor: String): String
}
case class RenderingScala() extends RenderingMode {
val (true_symbol, false_symbol, and_symbol, or_symbol, not_symbol, min_symbol, max_symbol, error_string) =
("true", "false", "&&", "||", "!", "Math.min", "Math.max", "throw new Error(\\"No solution exists\\")")
val (abs_symbol, gcd_symbol, lcm_symbol) = ("Math.abs", "Common.gcdlist", "Common.lcmlist")
def mod_function(string_numerator: String, denominator: String): String = {
if(APASynthesis.advanced_modulo)
string_numerator+"%%"+denominator
else
"("+denominator+" + "+string_numerator+"%"+denominator+")%"+denominator
}
}
case class RenderingPython() extends RenderingMode {
val (true_symbol, false_symbol, and_symbol, or_symbol, not_symbol, min_symbol, max_symbol, error_string) =
("True", "False", "and", "or", "not", "min", "max", "raise Exception(\\"No solution exists\\")")
val (abs_symbol, gcd_symbol, lcm_symbol) = ("abs", "gcd", "lcm")
def mod_function(operand: String, divisor: String): String = {
operand+"%" + divisor
}
}
object APASynthesis {
/** ************* Synthesis options *************** */
// To allow rendering expressions of the for a %% b where a %% 0 == a, and else (k %% b) is always between 0 and b-1 and congruent to k modulo b.
var advanced_modulo = false
// To turn off run-time checks : if true, the "throw new Error" are replaced by tuples filleds with zeros.
var run_time_checks = false
// Other rendering mode
var rendering_mode:RenderingMode = RenderingScala()
// ************* Different ways of specifying solving conditions ***************/** */
def getOutputVariables(eqs: List[APAEquation]):List[OutputVar] = {
(eqs flatMap (_.output_variables)).distinct
}
def solveLazyEquations(input_variables: List[InputVar], output_variables: List[OutputVar], eqslazy: FormulaSplit):(APACondition, APAProgram) = {
return (new APASynthesis(eqslazy, input_variables, output_variables)).solve()
}
def solveLazyEquations(name: String, output_variables: List[OutputVar], eqs: APAFormula):(APACondition, APAProgram) = {
val input_variables = eqs.input_variables
var (cond, prog) = (new APASynthesis(eqs.getLazyEquations, input_variables, output_variables)).solve()
prog.setName(name)
(cond, prog)
}
/*def solveEquations(name: String, variables: List[OutputVar], eqs: List[APAEquation]) = {
var (cond, prog) = (new APASynthesis(eqs, variables)).solve()
prog.setName(name)
(cond, prog)
}*/
def solve(name: String, output_variables: List[OutputVar], formula_sequence: APAFormula*):(APACondition, APAProgram) = {
val formula = APAConjunction(formula_sequence.toList).simplified
//val dnf:Stream[List[APAEquation]] = formula.getEquations
//val output_variables = variables
//val input_variables = formula.input_variables
//val programs_conditions = (dnf map {solveEquations("", output_variables, _)}).toList
solveLazyEquations(name, output_variables, formula)
}
def solve(name: String, formula_sequence: APAFormula*):(APACondition, APAProgram) = {
solve(name, formula_sequence.toList)
}
def solve(name: String, formula_sequence: List[APAFormula]):(APACondition, APAProgram) = {
val formula = APAConjunction(formula_sequence.toList).simplified
solve(name, formula.output_variables, formula)
}
def solve(variables:List[OutputVar], formula_sequence: APAFormula*):(APACondition, APAProgram) = {
solve(variables, formula_sequence.toList)
}
def solve(variables:List[OutputVar], formula_sequence: List[APAFormula]):(APACondition, APAProgram) = {
val formula = APAConjunction(formula_sequence.toList).simplified
solve("result", variables, formula)
}
def solve(formula_sequence: APAFormula*):(APACondition, APAProgram) = {
solve(formula_sequence.toList)
}
def solve(formula_sequence: List[APAFormula]):(APACondition, APAProgram) = {
val formula = APAConjunction(formula_sequence).simplified
solve("result", formula.output_variables, formula)
}
/** ************* Function used in the algorithm *************** */
val alphabet = "abcdefghijklmnopqrstuvwxyz"
def newOutputVariable(input_existing: List[InputVar], output_existing : List[OutputVar]): OutputVar = {
//var typical = "xyzmnpqrstuvw"
var i = 0
val names = (input_existing map (_.name)) ++ (output_existing map (_.name))
(0 to 25) foreach { i =>
val test = "y"+alphabet.substring(i, i+1)
if(!(names contains test))
return OutputVar(test)
}
while(names contains ("y"+i)) {
i+=1
}
OutputVar("y"+i)
}
def newInputVariable(input_existing: List[InputVar], output_existing : List[OutputVar]): InputVar = {
var i = 0
val names = (input_existing map (_.name)) ++ (output_existing map (_.name))
while(names contains ("k"+i)) {
i+=1
}
InputVar("k"+i)
}
// Split the list into APAEqualZero one left and not APAEqualZero on right
def partitionPAEqualZero(eqs : List[APAEquation]):(List[APAEqualZero], List[APAEquation]) = eqs match {
case Nil => (Nil, Nil)
case (p@APAEqualZero(pac)::q) =>
val (a, b) = partitionPAEqualZero(q)
(APAEqualZero(pac)::a, b)
case (p::q) =>
val (a, b) = partitionPAEqualZero(q)
(a, p::b)
}
// Splits the list into equalities, inequalities, and a boolean indicating if the system is consistent.
def partitionPAGreaterEqZero(eqs : List[APAEquation]):(List[APAEqualZero], List[APAGreaterEqZero], Boolean) = eqs match {
case Nil => (Nil, Nil, true)
case (p@APAEqualZero(pac)::q) =>
val (a, b, c) = partitionPAGreaterEqZero(q)
(APAEqualZero(pac)::a, b, c)
case (p@APAGreaterEqZero(pac)::q) =>
val (a, b, c) = partitionPAGreaterEqZero(q)
(a, APAGreaterEqZero(pac)::b, c)
case (p@APAGreaterZero(APACombination(coef, o))::q) =>
val (a, b, c) = partitionPAGreaterEqZero(q)
(a, APAGreaterEqZero(APACombination(coef+APAInputCombination(-1), o))::b, c)
case (APATrue()::q) =>
partitionPAGreaterEqZero(q)
case (APAFalse()::q) =>
(Nil, Nil, false)
case (APADivides(n, pac)::q) =>
throw new Error("Divides are not supported at this point")
}
def recursive_propagation(
output_assignments : List[(OutputVar, APATerm)],
assignments_to_propagate : List[(OutputVar, APACombination)],
interesting_variables : List[OutputVar])
: List[(OutputVar, APATerm)] =
output_assignments match {
case Nil => Nil
case (v, pac@APACombination(_, _))::q => pac.replaceList(assignments_to_propagate) match {
case pac@APACombination(APAInputCombination(_, Nil), Nil) => // We propagate constants
if(interesting_variables contains v) {
(v, pac)::recursive_propagation(q, (v,pac)::assignments_to_propagate, interesting_variables)
} else {
recursive_propagation(q, (v,pac)::assignments_to_propagate, interesting_variables)
}
case t => (v, t)::recursive_propagation(q, assignments_to_propagate, interesting_variables)
}
case (v, t)::q => (v, t.replaceList(assignments_to_propagate))::recursive_propagation(q, assignments_to_propagate, interesting_variables)
}
// Propagate simple assignments, by removing the introduced variables if possible.
def propagateAssignment(y: OutputVar, t: APACombination, output_assignments : List[(OutputVar, APATerm)], output_variables_initial : List[OutputVar]): List[(OutputVar, APATerm)] = {
recursive_propagation(output_assignments, (y,t)::Nil, output_variables_initial)
}
}
class APASynthesis(equations: FormulaSplit, input_variables_initial:List[InputVar], output_variables_initial:List[OutputVar]) {
import APASynthesis._
var output_variables:List[OutputVar] = output_variables_initial
var output_variables_encountered:List[OutputVar] = output_variables_initial
var input_variables:List[InputVar] = input_variables_initial
var input_variables_encountered:List[InputVar] = input_variables_initial
// Global_precondition is a conjunction of disjunctions of conjunctions.
var global_precondition: List[APAFormula] = Nil
// equation should not have output variables
def addPrecondition (e: APAFormula):Unit = {
val f = e.simplified
if(f.output_variables != Nil) // Debug sentence
throw new Exception("Error: there should be no output variables in this precondition :"+f)
f match {
case APATrue() =>
case APAFalse() => setFalsePrecondition()
case APAConjunction(l) => l foreach (addPrecondition(_))
case APAGreaterEqZero(APACombination(i, Nil)) => // We forward the constraint.
input_assignments = input_assignments map { case assignment =>
assignment.assumeSignInputTerm(i, PositiveZeroSign())
}
output_assignments = output_assignments map {
case (v, t) =>
(v, t.assumeSignInputTerm(i, PositiveZeroSign()))
}
global_precondition = f :: global_precondition
case f =>
global_precondition = f :: global_precondition
}
}
def setFalsePrecondition() = global_precondition = APAFalse()::Nil
def addOutputVar(y: OutputVar) = {
output_variables = (y::output_variables)
output_variables_encountered = (y::output_variables_encountered). distinct
}
def delOutputVar(y: OutputVar) = output_variables -= y
def addInputVar (y: InputVar) = {
input_variables = (y::input_variables)
input_variables_encountered = (y::input_variables_encountered)
}
def getNewOutputVarWithoutRegistering() = APASynthesis.newOutputVariable(input_variables_encountered, output_variables_encountered)
def getNewOutputVar() = {
val y = getNewOutputVarWithoutRegistering()
addOutputVar(y)
y
}
def getNewInputVar() = {
val x = APASynthesis.newInputVariable(input_variables_encountered, output_variables_encountered)
addInputVar(x)
x
}
// List of reversed assignments: At the end, leftmost assignments should be done at the end.
var input_assignments: List[InputAssignment] = Nil
var output_assignments: List[(OutputVar, APATerm)] = Nil
def addSingleInputAssignment (x: InputVar, t: APAInputTerm) = input_assignments = input_assignments ++ (SingleInputAssignment(x, t.simplified)::Nil)
def addBezoutInputAssignment (xl: List[List[InputVar]], tl: List[APAInputTerm]) = input_assignments = input_assignments ++ (BezoutInputAssignment(xl, tl).simplified)
def addOutputAssignment(y: OutputVar, t: APATerm) = output_assignments = (y, t.simplified)::output_assignments
def removeInputAssignment(y: InputVar) = input_assignments = input_assignments remove {case SingleInputAssignment(x, _) if x == y => true; case _ => false}
/************* Functions used in the algorithm *************** */
def simplifyEquations(equations: List[APAEquation]) : List[APAEquation] = {
equations flatMap {
case e@APADivides(k, APACombination(c, Nil)) =>
addPrecondition(e.simplified)
Nil
case APADivides(k, APACombination(c, o)) =>
val y = getNewOutputVar()
APAEqualZero(APACombination(c, (k, y)::o)).simplified::Nil
case APAGreaterZero(APACombination(c, o)) => APAGreaterEqZero(APACombination(c-APAInputCombination(1), o)).simplified::Nil
case e => e.simplified::Nil
}
}
def needsLessOperations(coef1: APAInputTerm, coef2: APAInputTerm): Boolean = (coef1, coef2) match {
case (APAInputCombination(k1, Nil), APAInputCombination(k2, Nil)) => Math.abs(k1) < Math.abs(k2)
case (APAInputCombination(k1, Nil), _) => true
case (_, APAInputCombination(k2, Nil)) => false
case (_, _) => false
}
/** Returns the remaining non_equalities (non_equalities should not contain equalities, nor will the returned term do) */
def solveEqualities(data: FormulaSplit): APASplit = {
val FormulaSplit(equalities, non_equalities, remaining_disjunctions) = data
/** Make sure all equalities have at least one output variable, else remove them. */
val (interesting_equalities, precondition_equalities) = equalities partition (_.has_output_variables)
addPrecondition(APAConjunction(precondition_equalities))
def minInputTerms(coef1: APAInputTerm, coef2:APAInputTerm) = if(needsLessOperations(coef1, coef2)) coef1 else coef2
/** Sorting function (OptimizeMe) */
/** Priority to constant terms */
def by_least_outputvar_coef(eq1: APAEqualZero, eq2: APAEqualZero): Boolean = (eq1, eq2) match {
case (APAEqualZero(pac1@APACombination(c1, o1)), APAEqualZero(pac2@APACombination(c2, o2))) =>
val min_coefs_o1 = o1 map (_._1) reduceLeft (minInputTerms(_, _))
val min_coefs_o2 = o2 map (_._1) reduceLeft (minInputTerms(_, _))
needsLessOperations(min_coefs_o1, min_coefs_o2)
}
val sorted_equalities = interesting_equalities sortWith by_least_outputvar_coef
sorted_equalities match {
case Nil =>
val newfs = FormulaSplit(Nil, non_equalities, remaining_disjunctions)
solveEquations(newfs)
case (eq1@APAEqualZero(pac@APACombination(c1, o1)))::rest_equalities =>
var const_part:APAInputTerm = c1
var o1_coefs = o1 map (_._1)
var o1_vars = o1 map (_._2)
val gcd = APAInputGCD(o1_coefs).replaceList(input_assignments flatMap (_.extract)).simplified
gcd match {
case APAInputCombination(1, Nil) =>
// Perfect !! We know that there is a solution.
// Continue to CONTINUE_POINT
case n =>
val coefs_are_zero = APAConjunction(o1_coefs map (APACombination(_)===APAInputCombination(0))).simplified
if(coefs_are_zero == APATrue() || pac.allCoefficientsAreZero) {
// We add the precondition const_part == 0
addPrecondition(APACombination(const_part)===APAInputCombination(0))
return solveEqualities(FormulaSplit(rest_equalities, non_equalities, remaining_disjunctions))
} else if(coefs_are_zero == APAFalse() || pac.notAllCoefficientsAreZero) {
// Regular run. We know that the GCD of the numbers is positive.
addPrecondition(APADivides(n, APACombination(const_part, Nil)))
val x = getNewInputVar()
val n_positive = n.assumeSign(1)
val gcd_expr = n_positive match {
case APAInputCombination(i, Nil) =>
n_positive
case APAInputCombination(0, ((k, _)::Nil)) if Math.abs(k) == 1 =>
n_positive
case _ =>
val gcd_var = getNewInputVar().assumePositive()
val result = APAInputCombination(gcd_var).replaceList(input_assignments flatMap (_.extract))
assert(result.isPositiveZero)
addSingleInputAssignment(gcd_var, n_positive)
result
}
addSingleInputAssignment(x, APAInputDivision(const_part, gcd_expr).simplified)
const_part = APAInputCombination(0, (1, x)::Nil)
o1_coefs = o1_coefs map (APAInputDivision(_, gcd_expr).simplified)
} else {
var (cond1, prog1) = APASynthesis.solve(output_variables, APAEqualZero(pac.assumeAllCoefficientsAreZero) :: rest_equalities ++ non_equalities) // Case where the coefficients are null.
cond1 = cond1.assumeBefore(coefs_are_zero)
var (cond2, prog2) = APASynthesis.solve(output_variables, APAEqualZero(pac.assumeNotAllCoefficientsAreZero) :: rest_equalities ++ non_equalities) //solve with the knowledge that not all the coefficients are null.
cond2 = cond2.assumeBefore(APANegation(coefs_are_zero))
return APACaseSplit.optimized((cond1, prog1)::(cond2, prog2)::Nil)
}
}
// CONTINUE_POINT
// Now the gcd of the output variables is for sure 1.
// We find a solution to o1_coefs.o1_vars + 1 = 0
// Then we know that by multiplying the first line by const_part, we obtain the general solution
// Express the input variables by assignments
val new_input_variables: List[List[InputVar]]= o1_vars.indices.toList map { _ => o1_vars.indices.toList map { _ => getNewInputVar()}}
addBezoutInputAssignment(new_input_variables, o1_coefs)
val first_line:List[APACombination] = new_input_variables.head map {
case iv =>
val p = APAInputCombination(0, (1, iv)::Nil)
p.replaceList(input_assignments flatMap (_.extract)) match {
case t@APAInputCombination(i, Nil) =>
removeInputAssignment(iv)
APACombination(const_part * t)
case t@APAInputCombination(0, (i, v)::Nil) if Math.abs(i) == 1 => // Simple case 2
removeInputAssignment(iv)
APACombination(const_part * t)
case _ => // If it's not an integer, keep the variable name.
APACombination(const_part * p)
}
}
// From this solution, we introduce |o1| - 1 new variables to solve the equality and remove the equation.
val new_assignments:List[APACombination] = new_input_variables.tail.foldLeft(first_line:List[APACombination]) { case (assignments, line) =>
val y = getNewOutputVar()
(assignments zip line) map {
case (expr:APACombination, iv) =>
//expr + y*iv
val p = APAInputCombination(0, (1, iv)::Nil)
p.replaceList(input_assignments flatMap (_.extract)) match {
case t@APAInputCombination(i, Nil) => // Simple case 2
removeInputAssignment(iv)
expr + (APACombination(y)*t)
case t@APAInputCombination(0, (i, v)::Nil) if Math.abs(i) == 1 => // Simple case 2
removeInputAssignment(iv)
expr + (APACombination(y)*t)
case _ =>
expr + (APACombination(y)*p)
}
}
}
// We add the variables if they are needed.
//if(((new_assignments flatMap (_.input_variables)).distinct intersect new_input_variables) != Nil)
//var new_equalities = rest_equalities
//var new_nonequalities = non_equalities
val assignments = (o1_vars zip new_assignments)
assignments foreach {
case (v, pac) => addOutputAssignment(v, pac)
//val (new_eqs1, new_noneqs1) = partitionPAEqualZero(new_equalities map (_.replace(v, pac)))
//val (new_eqs2, new_noneqs2) = partitionPAEqualZero(new_nonequalities map (_.replace(v, pac)))
//new_equalities = new_eqs1 ++ new_eqs2
//new_nonequalities = new_noneqs1 ++ new_noneqs2
delOutputVar(v)
}
//var new_remaining_disjunctions = remaining_disjunctions map (_.replaceList(assignments))
val newfs = FormulaSplit(rest_equalities, non_equalities, remaining_disjunctions).replaceList(assignments)
solveEqualities(newfs)
}
}
def setRemainingVariablesToZero(output_variables : List[OutputVar]):Unit = output_variables match {
case Nil =>
case y::q =>
output_variables_initial contains y match {
case true => output_assignments = propagateAssignment(y, APACombination(APAInputCombination(0, Nil), Nil), output_assignments, output_variables_initial)
addOutputAssignment(y, APACombination(APAInputCombination(0, Nil), Nil))
delOutputVar(y)
setRemainingVariablesToZero(q)
case false => output_assignments = propagateAssignment(y, APACombination(APAInputCombination(0, Nil), Nil), output_assignments, output_variables_initial)
delOutputVar(y)
setRemainingVariablesToZero(q)
}
}
// Returns (cond, l_left, l_right, l_remaining) such that:
// l_left contains elements (A, a) such that A <= a*v
// l_right contains elements (b, B) such that b*v <= B
// l_remaining contains elements which do not contain v
// l_cond is a list of formulas
// Properties : The length of the stream is 3^l_formulas.size of the first element.
def getInequalitiesForVariable(v: OutputVar, inequalities:List[APAGreaterEqZero]): Stream[(List[APAFormula], List[(APACombination, APAInputTerm)], List[(APAInputTerm, APACombination)], List[APAEquation])] = {
def getInequalitiesForVariable_aux(v: OutputVar,
inequalities:List[APAEquation],
result: (List[APAFormula], List[(APACombination, APAInputTerm)], List[(APAInputTerm, APACombination)], List[APAEquation])
) : Stream[(List[APAFormula], List[(APACombination, APAInputTerm)], List[(APAInputTerm, APACombination)], List[APAEquation])] =
inequalities match {
case Nil =>
// At this split point, we can solve.
Stream(result)
case ((p@APAGreaterEqZero(pac@APACombination(c, o)))::q) =>
val (l_formulas, l_left, l_right, l_remaining)=result
o find (_._2 == v) match {
case None =>
getInequalitiesForVariable_aux(v, q, (l_formulas, l_left, l_right, p::l_remaining))
case Some(found_element@(k, v)) =>
if(k.isPositive)
getInequalitiesForVariable_aux(v, q, (l_formulas, (APACombination(c, o - found_element) * (-1), k)::l_left, l_right, l_remaining))
else if(k.isZero) // Should not happen, but this is just to keep a coherent skeleton.
getInequalitiesForVariable_aux(v, q, (l_formulas, l_left, l_right, APAGreaterEqZero(APACombination(c, o - found_element))::l_remaining))
else if(k.isNegative)
getInequalitiesForVariable_aux(v, q, (l_formulas, l_left, (-k, APACombination(c, o - found_element))::l_right, l_remaining))
else {
def replaceLeftBound(left: List[(APACombination, APAInputTerm)], t1: APAInputTerm, s: SignAbstraction): List[(APACombination, APAInputTerm)] = {
left map {
case (pac, i) =>
val result = (pac.assumeSignInputTerm(t1, s), i.assumeSignInputTerm(t1, s))
result
}
}
def replaceRightBound(right: List[(APAInputTerm, APACombination)], t1: APAInputTerm, s: SignAbstraction): List[(APAInputTerm, APACombination)] = {
right map {
case (i, pac) =>
val result = (i.assumeSignInputTerm(t1, s), pac.assumeSignInputTerm(t1, s))
result
}
}
def replaceRemaining(remaining: List[APAEquation], t1: APAInputTerm, s: SignAbstraction): List[APAEquation] = {
val result = remaining map (_.assumeSignInputTerm(t1, s))
result
}
(if(k.can_be_positive) { // k can be positive
val k_positive = k.assumeSign(1)
assert(k_positive.isPositive)
val new_l_formulas = (APACombination(k) > APAInputCombination(0))::l_formulas
val new_l_left = (APACombination(c, o - found_element)*(-1), k_positive)::replaceLeftBound(l_left, k, k_positive)
val new_l_right = replaceRightBound(l_right, k, k_positive)
val new_l_remaining = replaceRemaining(l_remaining, k, k_positive)
val new_q = replaceRemaining(q, k, k_positive)
getInequalitiesForVariable_aux(v, new_q, (new_l_formulas, new_l_left, new_l_right, new_l_remaining))
} else Stream()) append
(if(k.can_be_zero) { // k can be zero
val k_nul = APAInputCombination(0)
assert(k_nul.isZero)
val new_l_formulas = (APACombination(k) === APAInputCombination(0))::l_formulas
val new_l_left = replaceLeftBound(l_left, k, k_nul)
val new_l_right = replaceRightBound(l_right, k, k_nul)
val new_l_remaining = APAGreaterEqZero(APACombination(c, o - found_element))::replaceRemaining(l_remaining, k, k_nul)
val new_q = replaceRemaining(q, k, k_nul)
getInequalitiesForVariable_aux(v, new_q, (new_l_formulas, new_l_left, new_l_right, new_l_remaining))
} else Stream()) append
(if(k.can_be_negative) { // k can be negative
val k_negative = k.assumeSign(-1)
val mk_negative = -k_negative
assert(mk_negative.isPositive)
val new_l_formulas = (APACombination(k) < APAInputCombination(0))::l_formulas
val new_l_left = replaceLeftBound(l_left, k, k_negative)
val new_l_right = (mk_negative, APACombination(c, o - found_element))::replaceRightBound(l_right, k, k_negative)
val new_l_remaining = replaceRemaining(l_remaining, k, k_negative)
val new_q = replaceRemaining(q, k, k_negative)
getInequalitiesForVariable_aux(v, new_q, (new_l_formulas, new_l_left, new_l_right, new_l_remaining))
} else Stream())
}
}
case APATrue()::q =>
val (l_formulas, l_left, l_right, l_remaining)=result
getInequalitiesForVariable_aux(v, q, (l_formulas, l_left, l_right, l_remaining))
case APAFalse()::q =>
val (l_formulas, l_left, l_right, l_remaining)=result
getInequalitiesForVariable_aux(v, q, (l_formulas, l_left, l_right, APAFalse()::l_remaining))
case f::q =>
throw new Error("Could not handle "+f)
}
getInequalitiesForVariable_aux(v, inequalities, (Nil, Nil, Nil, Nil))
}
/** Solve them, so now we only have non-equalities */
/** The simplification of inequalities can generate new equalities, so we handle them. */
def solveEquations(data: FormulaSplit):APASplit = {
val FormulaSplit(equalities, non_equalities, remaining_disjunctions) = data
// Let's extract the divisibility predicates and converts them to equalities.
val (new_equalities, new_non_equalities) = partitionPAEqualZero(simplifyEquations(non_equalities))
val total_equalities = equalities ++ new_equalities
if(total_equalities != Nil) {
solveEqualities(FormulaSplit(total_equalities, new_non_equalities, remaining_disjunctions))
} else {
val filtered_non_equalities = non_equalities remove (_==APATrue())
if(filtered_non_equalities contains APAFalse()) return APAFalseSplit()
//TODO :Here, verify that we don't have remaining_disjunctions anymore before handling the rest.
if(!remaining_disjunctions.isEmpty) {
assert(equalities == Nil)
// We merge the current non equalities to the subproblems.
val problems:Stream[FormulaSplit] = remaining_disjunctions map (FormulaSplit.conjunction(FormulaSplit(Nil, filtered_non_equalities, Stream.empty), _))
// We recombine the subproblems together.
val solutions = (problems map (APASynthesis.solveLazyEquations(input_variables, output_variables, _))).toList
return APACaseSplit.optimized(solutions)
}
assert(remaining_disjunctions.isEmpty)
/** Get only inequalities, plus maybe with "False" in other */
val (equalities2, inequalities, is_consistent) = partitionPAGreaterEqZero(non_equalities)
// equalities2 should be empty given that new_non_equalities cannot contain equalities
assert(equalities2 == Nil)
if(!is_consistent) return APAFalseSplit()
/** Remove redundant inequalities, maybe generating equalities */
//val (inequalities3, equalities3, is_consistent3) = removeRedundancies(inequalities)
val (inequalities3, equalities3, is_consistent3) = (inequalities, Nil, true)
if(!is_consistent3) return APAFalseSplit()
if(equalities3 != Nil)
return solveEqualities(FormulaSplit(equalities3, inequalities3, Stream.empty))
var current_inequalities = inequalities3
var current_noninequalities = List[APAEquation]()
var is_consistent4 = true
/** Solves for unbounded variables, when there are no case splits. */
/** The value of output_variable is going to change, but we just need the initial one. */
var current_inequalities_saved = APATrue()::current_inequalities
while(current_inequalities != current_inequalities_saved) {
current_inequalities_saved = current_inequalities
output_variables foreach { y =>
getInequalitiesForVariable(y, current_inequalities) match {
case Stream((l_formula@Nil, l_left@Nil, l_right@Nil, l_remaining)) =>
setRemainingVariablesToZero(y::Nil)
case Stream((l_formula@Nil, l_left@Nil, l_right@l, l_remaining)) =>
// Only bounded on the right by equations of style b*y <= pac, so we know that the integer upper bounds are pac/b
val upper_bounds = l_right map { case (b , pac) => APADivision(pac, b).simplified }
addOutputAssignment(y, APAMinimum(upper_bounds))
delOutputVar(y)
val (eqs, ineqs, consistency) = partitionPAGreaterEqZero(l_remaining)
if(eqs != Nil) throw new Exception("Support for equalities appearing after split is not supported (yet)")
is_consistent4 &&= consistency
current_inequalities = ineqs
case Stream((l_formula@Nil, l_left@l, l_right@Nil, l_remaining)) =>
// Only bounded on the left by equations of style pac <= a*y, so we know that the integer upper bounds are (pac+a-1)/a
val lower_bounds = l_left map { case (pac, a) => APADivision(pac + APACombination(a-APAInputCombination(1)), a).simplified }
addOutputAssignment(y, APAMaximum(lower_bounds))
delOutputVar(y)
val (eqs, ineqs, consistency) = partitionPAGreaterEqZero(l_remaining)
if(eqs != Nil) throw new Exception("Support for equalities appearing after split is not supported (yet)")
current_inequalities = ineqs
is_consistent4 &&= consistency
case _ =>
}
}
}
if(!is_consistent4) return APAFalseSplit()
if(output_variables == Nil) {
addPrecondition(APAConjunction(current_inequalities))
return APAEmptySplit()
}
// Now at this point, all variables are bounded on both sides.
// Let's find the one for which the LCM of its coefficients is the smallest.
// (Number of splits, min_coef)
val output_variables_with_min_coefs:List[((Int, APAInputTerm), OutputVar)] = output_variables map {
case y =>
// Both l_left and r_right are not empty because of the previous computation
getInequalitiesForVariable(y, current_inequalities) match {
case Stream((l_formula, l_left, l_right, l_remaining)) =>
assert(l_formula == Nil) // l_left and l_right only contain integers for bounds.
val l_left_coefs:List[APAInputTerm] = l_left map (_._2)
val l_right_coefs:List[APAInputTerm] = l_right map (_._1)
val min_coef = APAInputLCM(l_left_coefs ++ l_right_coefs).simplified
((l_formula.size, min_coef), y)
case Stream.cons((l_formula, l_left, l_right, l_remaining), _) =>
// We have to split everything for this variable, so we don't do anything yet
((l_formula.size, APAInputCombination(0)), y)
}
}
def min_coef(i1:((Int, APAInputTerm), OutputVar), i2:((Int, APAInputTerm), OutputVar)) : ((Int, APAInputTerm), OutputVar) = (i1, i2) match {
case (t1@((split1, k1), v1), t2@((split2, k2), v2)) =>
if(split1 < split2) t1 else (if(split1==split2) (if(needsLessOperations(k1, k2)) t1 else t2) else t2)
}
val (_, y) = output_variables_with_min_coefs.reduceRight(min_coef(_, _))
getInequalitiesForVariable(y, current_inequalities) match {
case Stream((Nil, l_left, l_right, l_remaining)) => // The signs are fully determined !!
val (eqs, ineqs, consistency) = partitionPAGreaterEqZero(l_remaining)
if(eqs != Nil) throw new Exception("Support for equalities appearing after split is not supported (yet)")
current_inequalities = ineqs
is_consistent4 &&= consistency
if(l_right.size <= l_left.size) {
val upper_bounds = l_right map { case (b , pac) => APADivision(pac, b).simplified }
addOutputAssignment(y, APAMinimum(upper_bounds))
delOutputVar(y)
} else {
val lower_bounds = l_left map { case (pac, a) => APADivision(pac + APACombination(a-APAInputCombination(1)), a).simplified }
addOutputAssignment(y, APAMaximum(lower_bounds))
delOutputVar(y)
}
val prog_needed_afterwards = output_variables != Nil
// OptimizeMe : If a is smaller than b, use it instead of a.
var output_variables_used = (output_variables_encountered).distinct
var input_variables_used = (input_variables_encountered).distinct
// We don't care about pairs of equations that are trivial to reduce.
l_left foreach { case (eqA, a) =>
l_right foreach { case (b, eqB) =>
if(a==APAInputCombination(1, Nil)) current_inequalities = (APAGreaterEqZero(eqB-(eqA*b)))::current_inequalities
else if(b==APAInputCombination(1, Nil)) current_inequalities = (APAGreaterEqZero((eqB*a)-eqA))::current_inequalities
}
}
val l_left_filtered = l_left remove (_._2==APAInputCombination(1, Nil))
val l_right_filtered = l_right remove (_._1==APAInputCombination(1, Nil))
val lcm_value = APAInputLCM((l_left_filtered map (_._2)) ++ (l_right_filtered map (_._1))).simplified
val lcm_int:Option[Int] = lcm_value match {
case APAInputCombination(i, Nil) => Some(i)
case _=> None
}
var lcm_expr = lcm_int match {
case Some(i) => APAInputCombination(i)
case None =>
lcm_value match {
case t@APAInputCombination(0, (i, v)::Nil) => t // Simple enough to get propagated easily
case t => // "Too complicated"
val var_lcm = getNewInputVar()
addSingleInputAssignment(var_lcm, lcm_value)
APAInputCombination(var_lcm)
}
}
val l_left_normalized = l_left_filtered map {
case (eqA, a) =>
assert(a.isPositive)
eqA*(lcm_expr/a)
}
val l_right_normalized = l_right_filtered map {
case (b, eqB) =>
assert(b.isPositive)
eqB*(lcm_expr/b)
}
var collected_new_input_variables:List[InputVar] = Nil
val collected_new_equations:List[APAEquation] = l_right_normalized flatMap { case eqB =>
val new_mod_bounds = l_left_normalized map { case eqA => (eqB - eqA) } remove {
case APACombination(APAInputCombination(i, Nil), Nil) =>
lcm_int match {
case Some(lcm) => i >= lcm-1
case None => false
}
case _ => false
}
// OptimizeMe ! If eqB%L contains only input variables, assign a new input variable to it.
// OptimizeMe ! If eqB & eqA contain only input variables, add a precondition for it.
new_mod_bounds match {
case Nil => Nil // Ok, nothing to add
case _ => // We need decomposition
val k = getNewInputVar().assumePositiveZero()
val ov = getNewOutputVar()
collected_new_input_variables = k::collected_new_input_variables
val k_expr = APAInputCombination(0, (1, k)::Nil)
assert(k_expr.isPositiveZero)
val new_ineqs = new_mod_bounds map {
case mod_bound =>
val result = APACombination(k_expr, Nil) <= mod_bound
result
}
val new_eq = eqB === APACombination(k_expr, (lcm_value, ov)::Nil)
new_eq::new_ineqs
}
}
(collected_new_equations, collected_new_input_variables) match {
case (Nil, Nil) =>
if(current_inequalities == Nil) {
APAEmptySplit()
} else {
solveEquations(FormulaSplit(Nil, current_inequalities, Stream.empty))
}
case (Nil, t) => throw new Error("How could this happen ? Both variables should be Nil, but the second one is "+t)
case (t, Nil) => throw new Error("How could this happen ? Both variables should be Nil, but the first one is "+t)
case (_, _) =>
val (condition, program) = APASynthesis.solve(collected_new_equations ++ current_inequalities)
if(prog_needed_afterwards) {
APAForSplit(collected_new_input_variables, APAInputCombination(0), lcm_value-APAInputCombination(1), condition, program)
} else {
//We don't need the program. Just the precondition
APAForSplit(collected_new_input_variables, APAInputCombination(0), lcm_value-APAInputCombination(1), condition, APAProgram.empty)
}
}
case stream => // The signs are not fully determined.
// OptimizeMe ! Too bad, the split is already done, but we cannot continue that way
// because we cannot assign it in other cases. Something better ?
val possibilities = stream.toList
val solutions = possibilities.zipWithIndex map {
case (t@(l_formula, l_left, l_right, l_remaining), i) =>
//println(i + " on variable " + y + " with equations " + t)
val left_equations = l_left map {
case (p, k) =>
if(k.isNotDefined) {
APAFalse()
} else {
assert(k.isPositive)
val right_member = APACombination(y)*k
p <= right_member
}
}
val right_equations = l_right map {
case (k, p) =>
if(k.isNotDefined) {
APAFalse()
} else {
assert(k.isPositive)
val left_member = APACombination(y)*k
left_member <= p
}
}
val collected_new_equations = left_equations ++ right_equations ++ l_remaining
//println("Collected : " + collected_new_equations)
val (condition, program) = APASynthesis.solve(output_variables, collected_new_equations)
val result = (condition && APAConjunction(l_formula), program)
result
}
//println("Regrouping solutions")
APACaseSplit.optimized(solutions)
}
}
}
def solve():(APACondition, APAProgram) = {
/************* Main algorithm *************** */
//***** Step 1: There are no quantifiers by construction. Nothing to do
//***** Step 2: Remove divisibility constraints
// Convert "Greater" to "GreaterEq"
// Simplify everything
//val equations2 = equations.simplified //simplifyEquations(equations)
//***** Step 3 : converting to DNF : Nothing to do, the argument is a conjunction
//***** Step 4 : Case Splitting. Nothing to do.
//***** Step 5 : Removing equalities
// All equations without output vars go directly to the global precondition
//val (eq_with_outputvars, eq_without_outputvars) = equations2 partition (_.has_output_variables)
//addPrecondition(APAConjunction(eq_without_outputvars))
// Retrieve all equalities
//var (equalities, non_equalities) = partitionPAEqualZero(eq_with_outputvars)
val result = solveEquations(equations)
// Looking for variables bounded only on one side.
result match {
case APAFalseSplit() =>
setFalsePrecondition()
(APACondition.optimized(Nil, APAConjunction(global_precondition), APAEmptySplitCondition()), APAProgram.empty)
case (pa_split@APACaseSplit(list_cond_prog)) =>
val splitted_conditions:List[APACondition] = list_cond_prog map (_._1)
if(list_cond_prog == Nil) { // Nobody took care of the remaining output variables.
setRemainingVariablesToZero(output_variables)
}
(APACondition.optimized(input_assignments, APAConjunction(global_precondition), APACaseSplitCondition(splitted_conditions)),
APAProgram.optimized(input_variables_initial, input_assignments, pa_split, output_assignments, output_variables_initial))
case pa_split@APAForSplit(vars, l, u, cond, prog) =>
prog match {
case t if t == APAProgram.empty =>
(APACondition.optimized(input_assignments, APAConjunction(global_precondition), APAForCondition(vars, l, u, cond)),
APAProgram.optimized(input_variables_initial, input_assignments, APAEmptySplit(), output_assignments, output_variables_initial))
case _ =>
(APACondition.optimized(input_assignments, APAConjunction(global_precondition), APAForCondition(vars, l, u, cond)),
APAProgram.optimized(input_variables_initial, input_assignments, pa_split, output_assignments, output_variables_initial))
}
case pa_split@APAEmptySplit() =>
setRemainingVariablesToZero(output_variables)
(APACondition.optimized(input_assignments, APAConjunction(global_precondition), APAEmptySplitCondition()),
APAProgram.optimized(input_variables_initial, input_assignments, pa_split, output_assignments, output_variables_initial))
case t =>
throw new Error("handling of "+t+" not implemented yet")
}
}
}
| epfl-lara/comfusy | src/main/scala/APASynthesis.scala | Scala | bsd-2-clause | 42,619 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.run
import slamdata.Predef._
import quasar.RateLimiting
import quasar.api.{QueryEvaluator, SchemaConfig}
import quasar.api.datasource.{DatasourceRef, DatasourceType, Datasources}
import quasar.api.destination.{DestinationRef, DestinationType, Destinations}
import quasar.api.push.ResultPush
import quasar.api.resource.{ResourcePath, ResourcePathType}
import quasar.api.table.{TableRef, Tables}
import quasar.common.PhaseResultTell
import quasar.connector.{QueryResult, ResourceSchema}
import quasar.connector.datasource.Datasource
import quasar.connector.destination.{Destination, DestinationModule}
import quasar.connector.evaluate._
import quasar.connector.render.ResultRender
import quasar.contrib.std.uuid._
import quasar.ejson.implicits._
import quasar.impl.{DatasourceModule, QuasarDatasource, ResourceManager, UuidString}
import quasar.impl.datasource.{AggregateResult, CompositeResult}
import quasar.impl.datasources._
import quasar.impl.datasources.middleware._
import quasar.impl.destinations._
import quasar.impl.evaluate._
import quasar.impl.push.DefaultResultPush
import quasar.impl.storage.IndexedStore
import quasar.impl.table.DefaultTables
import quasar.qscript.{construction, Map => QSMap}
import quasar.run.implicits._
import java.util.UUID
import scala.concurrent.ExecutionContext
import argonaut.Json
import argonaut.JsonScalaz._
import cats.{~>, Functor, Show}
import cats.effect.{ConcurrentEffect, ContextShift, Resource, Sync, Timer}
import cats.kernel.Hash
import cats.syntax.functor._
import cats.syntax.show._
import fs2.Stream
import fs2.job.JobManager
import matryoshka.data.Fix
import org.slf4s.Logging
import shims.{monadToScalaz, functorToCats, functorToScalaz, orderToScalaz, showToCats}
final class Quasar[F[_], R, C <: SchemaConfig](
val datasources: Datasources[F, Stream[F, ?], UUID, Json, C],
val destinations: Destinations[F, Stream[F, ?], UUID, Json],
val tables: Tables[F, UUID, SqlQuery],
val queryEvaluator: QueryEvaluator[F, SqlQuery, Stream[F, R]],
val resultPush: ResultPush[F, UUID, UUID])
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
object Quasar extends Logging {
type EvalResult[F[_]] = Either[QueryResult[F], AggregateResult[F, QSMap[Fix, QueryResult[F]]]]
/** What it says on the tin. */
def apply[F[_]: ConcurrentEffect: ContextShift: MonadQuasarErr: PhaseResultTell: Timer, R, C <: SchemaConfig, A: Hash](
datasourceRefs: IndexedStore[F, UUID, DatasourceRef[Json]],
destinationRefs: IndexedStore[F, UUID, DestinationRef[Json]],
tableRefs: IndexedStore[F, UUID, TableRef[SqlQuery]],
queryFederation: QueryFederation[Fix, F, QueryAssociate[Fix, F, EvalResult[F]], Stream[F, R]],
resultRender: ResultRender[F, R],
resourceSchema: ResourceSchema[F, C, (ResourcePath, CompositeResult[F, QueryResult[F]])],
rateLimiting: RateLimiting[F, A],
byteStores: ByteStores[F, UUID])(
datasourceModules: List[DatasourceModule],
destinationModules: List[DestinationModule])(
implicit
ec: ExecutionContext)
: Resource[F, Quasar[F, R, C]] = {
val destModules =
DestinationModules[F, UUID](destinationModules)
for {
_ <- Resource.liftF(warnDuplicates[F, DatasourceModule, DatasourceType](datasourceModules)(_.kind))
_ <- Resource.liftF(warnDuplicates[F, DestinationModule, DestinationType](destinationModules)(_.destinationType))
freshUUID = Sync[F].delay(UUID.randomUUID)
(dsErrors, onCondition) <- Resource.liftF(DefaultDatasourceErrors[F, UUID])
dsModules =
DatasourceModules[Fix, F, UUID, A](datasourceModules, rateLimiting, byteStores)
.withMiddleware(AggregatingMiddleware(_, _))
.withMiddleware(ConditionReportingMiddleware(onCondition)(_, _))
dsCache <- ResourceManager[F, UUID, QuasarDatasource[Fix, F, Stream[F, ?], CompositeResult[F, QueryResult[F]], ResourcePathType]]
datasources <- Resource.liftF(DefaultDatasources(freshUUID, datasourceRefs, dsModules, dsCache, dsErrors, resourceSchema, byteStores))
destCache <- ResourceManager[F, UUID, Destination[F]]
destinations <- Resource.liftF(DefaultDestinations(freshUUID, destinationRefs, destCache, destModules))
lookupRunning =
(id: UUID) => datasources.quasarDatasourceOf(id).map(_.map(_.modify(reifiedAggregateDs)))
sqlEvaluator =
Sql2Compiler[Fix, F]
.map((_, None))
.andThen(QueryFederator(ResourceRouter(UuidString, lookupRunning)))
.andThen(queryFederation)
tables = DefaultTables(freshUUID, tableRefs)
jobManager <- JobManager[F, (UUID, UUID), Nothing]()
push <- Resource.liftF(DefaultResultPush[F, UUID, UUID, SqlQuery, R](
tableRefs.lookup,
sqlEvaluator,
destinations.destinationOf(_).map(_.toOption),
jobManager,
resultRender))
} yield new Quasar(datasources, destinations, tables, sqlEvaluator, push)
}
////
private def warnDuplicates[F[_]: Sync, A, B: Show](l: List[A])(fn: A => B): F[Unit] =
Sync[F].delay(l.groupBy(fn) foreach {
case (b, grouped) =>
if (grouped.length > 1) {
log.warn(s"Found duplicate modules for type ${b.show}")
}
})
private val rec = construction.RecFunc[Fix]
private def reifiedAggregateDs[F[_]: Functor, G[_], P <: ResourcePathType]
: Datasource[F, G, ?, CompositeResult[F, QueryResult[F]], P] ~> Datasource[F, G, ?, EvalResult[F], P] =
new (Datasource[F, G, ?, CompositeResult[F, QueryResult[F]], P] ~> Datasource[F, G, ?, EvalResult[F], P]) {
def apply[A](ds: Datasource[F, G, A, CompositeResult[F, QueryResult[F]], P]) = {
val l = Datasource.ploaders[F, G, A, CompositeResult[F, QueryResult[F]], A, EvalResult[F], P]
l.modify(_.map(_.map(reifyAggregateStructure)))(ds)
}
}
private def reifyAggregateStructure[F[_], A](s: Stream[F, (ResourcePath, A)])
: Stream[F, (ResourcePath, QSMap[Fix, A])] =
s map { case (rp, a) =>
(rp, QSMap(a, rec.Hole))
}
}
| slamdata/quasar | run/src/main/scala/quasar/run/Quasar.scala | Scala | apache-2.0 | 6,688 |
import org.scalatest._
class TestAll extends FlatSpec {
"testA1" should "work" in {
assert(A1.a1(true) == B1)
}
"testA2" should "work" in {
A2.a2()
}
}
| smparkes/rules_scala | test/coverage/TestAll.scala | Scala | apache-2.0 | 171 |
package com.highperformancespark.examples.goldilocks
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD
object RDDJoinExamples {
/* For Example, suppose we have one RDD with some data in the form (Panda id, score)
and another RDD with (Panda id, address), and we want to send each Panda some mail
with her best score. We could join the RDDs on ID and then compute the best score
for each address. Like this:
'ToDo: Insert Example'
However, this is slower than first reducing the score data, so that the
//first dataset contains only one row for each Panda with her best score and then
//joining that data with the address data.
'ToDO: Insert an example of this' */
//tag::joinScoresWithAddress[]
def joinScoresWithAddress1( scoreRDD : RDD[(Long, Double)],
addressRDD : RDD[(Long, String )]) : RDD[(Long, (Double, String))]= {
val joinedRDD = scoreRDD.join(addressRDD)
joinedRDD.reduceByKey( (x, y) => if(x._1 > y._1) x else y )
}
//end::joinScoresWithAddress[]
//tag::leftOuterJoinScoresWithAddress[]
def outerJoinScoresWithAddress( scoreRDD : RDD[(Long, Double)],
addressRDD : RDD[(Long, String )]) : RDD[(Long, (Double, Option[String]))]= {
val joinedRDD = scoreRDD.leftOuterJoin(addressRDD)
joinedRDD.reduceByKey( (x, y) => if(x._1 > y._1) x else y )
}
//end::leftOuterJoinScoresWithAddress[]
//tag::joinScoresWithAddressFast[]
def joinScoresWithAddress2( scoreRDD : RDD[(Long, Double)],
addressRDD : RDD[(Long, String )]) : RDD[(Long, (Double, String))]= {
//stuff
val bestScoreData = scoreRDD.reduceByKey((x, y) => if(x > y) x else y)
bestScoreData.join(addressRDD)
}
//end::joinScoresWithAddressFast[]
/*
We could make the example in the previous section even faster,
by using the partitioner for the address data as an argument for
the reduce by key step.
'ToDO: Insert the code to show this here' */
//tag::joinScoresWithAddress3[]
def joinScoresWithAddress3( scoreRDD : RDD[(Long, Double)],
addressRDD : RDD[(Long, String )]) : RDD[(Long, (Double, String))]= {
//if addressRDD has a known partitioner we should use that,
//otherwise it has a default hash parttioner, which we can reconstrut by getting the umber of
// partitions.
val addressDataPartitioner = addressRDD.partitioner match {
case (Some(p)) => p
case (None) => new HashPartitioner(addressRDD.partitions.length)
}
val bestScoreData = scoreRDD.reduceByKey(addressDataPartitioner, (x, y) => if(x > y) x else y)
bestScoreData.join(addressRDD)
}
//end::joinScoresWithAddress3[]
def debugString( scoreRDD : RDD[(Long, Double)],
addressRDD : RDD[(Long, String )]) = {
//tag::debugString[]
scoreRDD.join(addressRDD).toDebugString
//end::debugString[]
}
/*
* Suppose we had two datasets of information about each panda,
* one with the scores, and one with there favorite foods.
* We could use cogroup to associate each Pandas id with an iterator
* of their scores and another iterator of their favorite foods.
*/
def coGroupExample( scoreRDD : RDD[(Long, Double)], foodRDD : RDD[(Long, String )],
addressRDD : RDD[(Long, String )]) = {
//tag::coGroupExample1[]
val cogroupedRDD: RDD[(Long, (Iterable[Double], Iterable[String]))] = scoreRDD.cogroup(foodRDD)
//end::coGroupExample1[]
/*
* For example, if we needed to join the panda score data with both address
* and favorite foods, it would be better to use co group than two
* join operations.
*/
//tag::coGroupExample2[]
val addressScoreFood = addressRDD.cogroup(scoreRDD, foodRDD)
//end::coGroupExample2[]
}
}
| mahmoudhanafy/high-performance-spark-examples | src/main/scala/com/high-performance-spark-examples/GoldiLocks/RDDJoinExamples.scala | Scala | apache-2.0 | 3,654 |
package org.jetbrains.plugins.scala
package annotator
package element
import org.jetbrains.plugins.scala.lang.psi.api.base.ScInterpolatedStringLiteral
import org.jetbrains.plugins.scala.lang.psi.api.base.literals.{ScNullLiteral, ScSymbolLiteral}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScLiteralTypeElement
import org.jetbrains.plugins.scala.project._
object ScLiteralTypeElementAnnotator extends ElementAnnotator[ScLiteralTypeElement] {
override def annotate(element: ScLiteralTypeElement, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit = {
if (!holder.getCurrentAnnotationSession.getFile.literalTypesEnabled) {
holder.createErrorAnnotation(element, ScalaBundle.message("wrong.type.no.literal.types", element.getText))
}
else if (!element.singleton) {
val literalName = element.getLiteral match {
case _: ScInterpolatedStringLiteral => "string interpolator"
case _: ScNullLiteral => "'null'"
case _: ScSymbolLiteral => "quoted identifier"
case other => other.toString.stripSuffix("Sc").stripSuffix("Literal") // not expected, but better be safe
}
holder.createErrorAnnotation(element, ScalaBundle.message("identifier.expected.but.0.found", literalName))
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/element/ScLiteralTypeElementAnnotator.scala | Scala | apache-2.0 | 1,353 |
package org.oleber.state
import org.oleber.State.{State, StateVisitor}
import play.api.libs.json.{Format, Json}
object SucceedState {
val format: Format[SucceedState] = Json.format[SucceedState]
}
case class SucceedState(
InputPath: Option[String] = None,
OutputPath: Option[String] = None,
Comment: Option[String] = None
) extends State {
override def accept[T](visitor: StateVisitor[T]): T =
visitor.visit(this)
}
| oleber/aws-stepfunctions | src/main/scala/org/oleber/state/SucceedState.scala | Scala | apache-2.0 | 525 |
package com.cloudray.scalapress.account
import javax.persistence._
import com.cloudray.scalapress.theme.Markup
import scala.beans.BeanProperty
import com.cloudray.scalapress.plugin.SingleInstance
/** @author Stephen Samuel */
@Entity
@SingleInstance
@Table(name = "plugins_account")
class AccountPlugin {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
@BeanProperty
var id: Long = _
@ManyToOne
@JoinColumn(name = "accountPageMarkup")
@BeanProperty
var accountPageMarkup: Markup = _
@ElementCollection
@BeanProperty
var accounts: java.util.List[java.lang.Long] = new java.util.ArrayList[java.lang.Long]()
@Column(length = 10000)
@BeanProperty
var accountPageHeader: String = _
@Column(length = 10000)
@BeanProperty
var accountPageFooter: String = _
@Column(length = 10000)
@BeanProperty
var loginPageHeader: String = _
@Column(length = 10000)
@BeanProperty
var loginPageFooter: String = _
@Column(length = 10000)
@BeanProperty
var registrationPageHeader: String = _
@Column(length = 10000)
@BeanProperty
var registrationPageFooter: String = _
@Column(name = "registrationCompletionHtml", length = 10000)
@BeanProperty
var registrationCompletionHtml: String = _
@BeanProperty
var loginRedirect: String = _
@BeanProperty
var registrationRedirect: String = _
} | vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/account/AccountPlugin.scala | Scala | apache-2.0 | 1,347 |
package core.exception
/**
* 订单状态异常. 比如, 对一个已经过期的账单进行支付等操作
*
* Created by zephyre on 12/17/15.
*/
class OrderStatusException(message: String, cause: Throwable) extends RuntimeException(message, cause) {
def this() = this(null, null)
def this(message: String) = this(message, null)
def this(cause: Throwable) = this(null, cause)
}
object OrderStatusException {
def apply(message: String, cause: Throwable) = new OrderStatusException(message, cause)
def apply(message: String) = new OrderStatusException(message)
def apply(cause: Throwable) = new OrderStatusException(cause)
def apply() = new OrderStatusException()
}
| Lvxingpai/Hanse | app/core/exception/OrderStatusException.scala | Scala | apache-2.0 | 691 |
package com.twitter.scalding.bdd
import com.twitter.scalding.TypedPipe
import com.twitter.scalding.Dsl
trait TypedPipeOperationsConversions {
import Dsl._
import com.twitter.scalding.typed.TDsl._
trait TypedPipeOperation[TypeOut] {
def assertPipeSize(pipes: List[TypedPipe[_]], expectedSize: Int) =
require(pipes.size == expectedSize, "Cannot apply an operation for " + expectedSize + "pipes to " + pipes.size + " pipes. " +
"Verify matching of given and when clauses in test case definition")
def apply(pipes: List[TypedPipe[_]]): TypedPipe[TypeOut]
}
class OneTypedPipeOperation[TypeIn, TypeOut](op: TypedPipe[TypeIn] => TypedPipe[TypeOut]) extends TypedPipeOperation[TypeOut] {
override def apply(pipes: List[TypedPipe[_]]): TypedPipe[TypeOut] = {
assertPipeSize(pipes, 1)
op(pipes.head.asInstanceOf[TypedPipe[TypeIn]])
}
}
class TwoTypedPipesOperation[TypeIn1, TypeIn2, TypeOut](op: (TypedPipe[TypeIn1], TypedPipe[TypeIn2]) => TypedPipe[TypeOut]) extends TypedPipeOperation[TypeOut] {
override def apply(pipes: List[TypedPipe[_]]): TypedPipe[TypeOut] = {
assertPipeSize(pipes, 2)
op(
pipes(0).asInstanceOf[TypedPipe[TypeIn1]], // linter:ignore
pipes(1).asInstanceOf[TypedPipe[TypeIn2]])
}
}
class ThreeTypedPipesOperation[TypeIn1, TypeIn2, TypeIn3, TypeOut](op: (TypedPipe[TypeIn1], TypedPipe[TypeIn2], TypedPipe[TypeIn3]) => TypedPipe[TypeOut]) extends TypedPipeOperation[TypeOut] {
override def apply(pipes: List[TypedPipe[_]]): TypedPipe[TypeOut] = {
assertPipeSize(pipes, 3)
op(
pipes(0).asInstanceOf[TypedPipe[TypeIn1]], // linter:ignore
pipes(1).asInstanceOf[TypedPipe[TypeIn2]],
pipes(2).asInstanceOf[TypedPipe[TypeIn3]])
}
}
class ListOfTypedPipesOperations[TypeOut](op: List[TypedPipe[_]] => TypedPipe[TypeOut]) extends TypedPipeOperation[TypeOut] {
override def apply(pipes: List[TypedPipe[_]]): TypedPipe[TypeOut] = op(pipes)
}
implicit def fromSingleTypedPipeFunctionToOperation[TypeIn, TypeOut](op: TypedPipe[TypeIn] => TypedPipe[TypeOut]) =
new OneTypedPipeOperation[TypeIn, TypeOut](op)
implicit def fromTwoTypedPipesFunctionToOperation[TypeIn1, TypeIn2, TypeOut](op: (TypedPipe[TypeIn1], TypedPipe[TypeIn2]) => TypedPipe[TypeOut]) =
new TwoTypedPipesOperation[TypeIn1, TypeIn2, TypeOut](op)
implicit def fromThreeTypedPipesFunctionToOperation[TypeIn1, TypeIn2, TypeIn3, TypeOut](op: (TypedPipe[TypeIn1], TypedPipe[TypeIn2], TypedPipe[TypeIn3]) => TypedPipe[TypeOut]) =
new ThreeTypedPipesOperation[TypeIn1, TypeIn2, TypeIn3, TypeOut](op)
implicit def fromListOfTypedPipesFunctionToOperation[TypeOut](op: List[TypedPipe[_]] => TypedPipe[TypeOut]) =
new ListOfTypedPipesOperations[TypeOut](op)
}
| tresata/scalding | scalding-core/src/main/scala/com/twitter/scalding/bdd/TypedPipeOperationsConversions.scala | Scala | apache-2.0 | 2,800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.r
import java.io.File
import java.util.Arrays
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.internal.config._
private[spark] object RUtils {
// Local path where R binary packages built from R source code contained in the spark
// packages specified with "--packages" or "--jars" command line option reside.
var rPackages: Option[String] = None
/**
* Get the SparkR package path in the local spark distribution.
*/
def localSparkRPackagePath: Option[String] = {
val sparkHome = sys.env.get("SPARK_HOME").orElse(sys.props.get("spark.test.home"))
sparkHome.map(
Seq(_, "R", "lib").mkString(File.separator)
)
}
/**
* Check if SparkR is installed before running tests that use SparkR.
*/
def isSparkRInstalled: Boolean = {
localSparkRPackagePath.filter { pkgDir =>
new File(Seq(pkgDir, "SparkR").mkString(File.separator)).exists
}.isDefined
}
/**
* Get the list of paths for R packages in various deployment modes, of which the first
* path is for the SparkR package itself. The second path is for R packages built as
* part of Spark Packages, if any exist. Spark Packages can be provided through the
* "--packages" or "--jars" command line options.
*
* This assumes that Spark properties `spark.master` and `spark.submit.deployMode`
* and environment variable `SPARK_HOME` are set.
*/
def sparkRPackagePath(isDriver: Boolean): Seq[String] = {
val (master, deployMode) =
if (isDriver) {
(sys.props("spark.master"), sys.props("spark.submit.deployMode"))
} else {
val sparkConf = SparkEnv.get.conf
(sparkConf.get("spark.master"), sparkConf.get(SUBMIT_DEPLOY_MODE))
}
val isYarnCluster = master != null && master.contains("yarn") && deployMode == "cluster"
val isYarnClient = master != null && master.contains("yarn") && deployMode == "client"
// In YARN mode, the SparkR package is distributed as an archive symbolically
// linked to the "sparkr" file in the current directory and additional R packages
// are distributed as an archive symbolically linked to the "rpkg" file in the
// current directory.
//
// Note that this does not apply to the driver in client mode because it is run
// outside of the cluster.
if (isYarnCluster || (isYarnClient && !isDriver)) {
val sparkRPkgPath = new File("sparkr").getAbsolutePath
val rPkgPath = new File("rpkg")
if (rPkgPath.exists()) {
Seq(sparkRPkgPath, rPkgPath.getAbsolutePath)
} else {
Seq(sparkRPkgPath)
}
} else {
// Otherwise, assume the package is local
val sparkRPkgPath = localSparkRPackagePath.getOrElse {
throw new SparkException("SPARK_HOME not set. Can't locate SparkR package.")
}
if (!rPackages.isEmpty) {
Seq(sparkRPkgPath, rPackages.get)
} else {
Seq(sparkRPkgPath)
}
}
}
/** Check if R is installed before running tests that use R commands. */
def isRInstalled: Boolean = {
try {
val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
builder.start().waitFor() == 0
} catch {
case e: Exception => false
}
}
def isEncryptionEnabled(sc: JavaSparkContext): Boolean = {
sc.conf.get(org.apache.spark.internal.config.IO_ENCRYPTION_ENABLED)
}
}
| Aegeaner/spark | core/src/main/scala/org/apache/spark/api/r/RUtils.scala | Scala | apache-2.0 | 4,262 |
package com.roundeights.shnappy
import com.roundeights.tubeutil.DateGen
import com.roundeights.foldout.{Doc, Documentable}
import com.roundeights.scalon.{nObject, nElement}
import com.roundeights.vfunk.{Validate, Filter, TextField, Err}
import java.util.UUID
/** @see SiteInfo */
object SiteInfo {
/** Creates a new site info instance */
def apply (
theme: String, title: String,
favicon: Option[String], hosts: Set[String]
) = new SiteInfo(
UUID.randomUUID, None, theme, Some(title), favicon, hosts
)
/** Creates a new site info instance */
def apply (
theme: String, title: String, favicon: Option[String], host: String
): SiteInfo = apply( theme, title, favicon, Set(host) )
/** Creates an SiteInfo from a document */
def apply ( doc: Doc ) = Data.checktype(doc, "siteinfo") {
new SiteInfo(
UUID.fromString( doc.id ),
Some( doc.rev ),
doc.str("theme"),
doc.str_?("title"),
doc.str_?("favicon"),
doc.ary_?("hosts").map( _.map( _.asString ).toSet ).getOrElse( Set() )
)
}
/** Filter and validation rules for the theme */
private[SiteInfo] val theme = TextField( "theme",
Filter.chain( Filter.printable, Filter.trim ),
Validate.notEmpty
)
/** Filter and validation rules for the title */
private[SiteInfo] val title = TextField( "title",
Filter.chain( Filter.printable, Filter.trim ),
Validate.notEmpty
)
/** Filter and validation rules for hosts */
val host = TextField( "host",
Filter.chain(
Filter.trim, Filter.lower,
Filter.characters( Set('.', '-') ++ ('a' to 'z') ++ ('0' to '9') ),
Filter.callback(host =>
if ( host.startsWith("www.") ) host.drop(4) else host
)
),
Validate.and(
Validate.notEmpty,
Validate.invoke( host =>
Some( Err("HOST", "Host name must not start with a period") )
.filter( _ => host.startsWith(".") )
),
Validate.invoke( host =>
Some( Err("HOST", "Host name must not end with a period") )
.filter( _ => host.endsWith(".") )
)
)
)
}
/** Represents data that applies to the whole site */
case class SiteInfo (
val id: UUID,
private val revision: Option[String],
rawTheme: String,
rawTitle: Option[String],
val favicon: Option[String],
rawHosts: Set[String]
) extends Documentable with nElement.ToJson {
/** The filtered and validated theme */
val theme: String = SiteInfo.theme.process( rawTheme ).require.value
/** The filtered and validated title */
val title: Option[String] = rawTitle.map(
value => SiteInfo.title.process( value ).require.value
)
/** The filtered and validated title */
val hosts: Set[String] = rawHosts.map(
value => SiteInfo.host.process( value ).require.value
)
/** Changes the Theme for this site */
def withTheme ( newTheme: String )
= SiteInfo(id, revision, newTheme, title, favicon, hosts)
/** Changes the title for this site */
def withTitle ( newTitle: Option[String] )
= SiteInfo(id, revision, theme, newTitle, favicon, hosts)
/** Changes the favicon for this site */
def withFavicon ( newFavicon: Option[String] )
= SiteInfo(id, revision, theme, title, newFavicon, hosts)
/** Changes the favicon for this site */
def withHosts ( newHosts: Set[String] )
= SiteInfo(id, revision, theme, title, favicon, newHosts)
/** Returns this instance as a map */
def toMap: Map[String, String] = {
title.foldLeft( Map("theme" -> theme) ) {
(accum, value) => accum + ("title" -> value)
}
}
/** {@inheritDoc} */
override def toDoc = Doc(
"_id" -> id.toString,
"_rev" -> revision,
"type" -> "siteinfo",
"theme" -> theme,
"title" -> title,
"favicon" -> favicon,
"hosts" -> hosts,
"updated" -> DateGen.formatNow
)
/** Returns a lightweight json version of this instance */
def toJsonLite = nObject( "siteID" -> id.toString, "title" -> title )
/** {@inheritDoc} */
override def toJson = nObject(
"siteID" -> id.toString, "theme" -> theme, "title" -> title,
"favicon" -> favicon, "hosts" -> hosts
)
}
| Nycto/Shnappy | src/main/scala/SiteInfo.scala | Scala | mit | 4,503 |
/** soar
*
* Copyright (c) 2017 Hugo Firth
* Email: <me@hugofirth.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ncl.la.soar.glance.web.client.component
import cats._
import cats.implicits._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import uk.ac.ncl.la.soar.data.StudentRecords
import uk.ac.ncl.la.soar.{ModuleCode, StudentNumber}
import uk.ac.ncl.la.soar.glance.web.client.component.sortable._
import scala.collection.immutable.SortedMap
object StudentsSortableTable {
type Record = StudentRecords[SortedMap, ModuleCode, Double]
case class Props(rankModule: ModuleCode,
queryRecords: List[Record],
headings: List[(String, Option[String])],
renderCell: (Record, String) => String,
selectStudent: Record => Callback,
changeRanks: (List[StudentNumber], IndexChange) => Callback,
focused: (Option[Record], Option[Record]) = (None, None)) {
val rankModuleIdx = headings.indexWhere { case (title, tip) => title == rankModule }
}
// As in original SortableComponent
class Backend(bs: BackendScope[Props, List[Record]]) {
private def tableView(wrappedP: Props) = ScalaComponent.builder[List[Record]]("TableView")
.render(bs => {
<.table(
^.className := "react-sortable-list table table-bordered table-hover",
^.id := "ranking-table",
<.thead(
<.tr(
wrappedP.headings match {
case hd :: tl =>
(<.th(" ") :: <.th(hd._1) :: tl.map { h =>
<.th(
if(h._1 == wrappedP.rankModule) {
^.className := "warning long-heading"
} else {
^.className := "long-heading"
},
<.span(
h._2.whenDefined(t => ^.title := t),
h._1
)
)
}).toTagMod
case Nil =>
EmptyVdom
}
)
),
<.tbody(
bs.props.zipWithIndex.toTagMod { case (value, index) =>
sortableTr(wrappedP)(SortableElement.Props(index = index))(value)
}
)
)
})
.build
private def trView(wrappedP: Props) = ScalaComponent.builder[Record]("TrView")
.render(bs => {
//Get the row columns for the given record
val columns = wrappedP.headings.map { case (title, tip) => wrappedP.renderCell(bs.props, title) }
//TODO: Find out why no cells are getting the warning class anymore
val renderedColumns = columns.iterator.zipWithIndex.map({ case (c, idx) =>
<.td(
^.onClick --> wrappedP.selectStudent(bs.props),
if(idx == wrappedP.rankModuleIdx) {
<.strong("?")
} else {
c
}
)
}).toList
val rowClass = wrappedP.focused match {
case (Some(sel), _) if sel.number == bs.props.number => "react-sortable-item active-selecting"
case (_, Some(comp)) if comp.number == bs.props.number => "react-sortable-item active-comparing"
case _ => "react-sortable-item"
}
<.tr(
^.className := rowClass,
TagMod.fromTraversableOnce(<.td(SortableView.handle) :: renderedColumns)
)
})
.build
private def sortableTr(p: Props) = SortableElement.wrap(trView(p))
// As in original demo
private def sortableTable(p: Props) = SortableContainer.wrap(tableView(p))
def render(props: Props, items: List[Record]) = {
sortableTable(props)(
SortableContainer.Props(
onSortEnd = { p =>
//TODO: map.dRanks to studentNumber every time is very wasteful, fix it!
for {
ranks <- bs.state
dRanks = p.updatedList(ranks)
_ <- bs.setState(dRanks)
_ <- props.changeRanks(dRanks.map(_.number), p)
} yield ()
},
useDragHandle = true,
helperClass = "react-sortable-handler"
)
)(items)
}
}
val component = ScalaComponent.builder[Props]("SortableContainerDemo")
.initialStateFromProps(p => p.queryRecords)
.renderBackend[Backend]
.build
}
| NewcastleComputingScience/student-outcome-accelerator | glance-eval/js/src/main/scala/uk/ac/ncl/la/soar/glance/web/client/component/StudentsSortableTable.scala | Scala | apache-2.0 | 4,989 |
package reader
import rescala._
import scala.language.implicitConversions
class EventShouldFireWrapper[T](evt: Event[T]) {
def shouldFireIn(action: => Unit) = {
var fired = false
evt += { _ => fired = true }
action
assert(fired, "Event should have fired, but did not")
}
def shouldNotFireIn(action: => Unit) = {
var fired = false
evt += { _ => fired = true }
action
assert(!fired, "Event should not have fired, but it did")
}
}
object EventShouldFireWrapper {
implicit def convertToEventShouldFireWrapper[T](evt: Event[T]) =
new EventShouldFireWrapper(evt)
}
| volkc/REScala | Examples/RSSReader/ReactiveScalaReader.Events/src/test/scala/reader/EventShouldFireWrapper.scala | Scala | apache-2.0 | 614 |
package org.cg.scala.dhc.util
import java.io.File
/**
* Created by ssmertnig on 4/22/17.
*/
object xmlUtil {
def getXml(file: FileInfo) = XML.loadString(XmlAnnotator.annotate(FileUtil.fileToString(new File(file.canonicalPath))))
}
| curiosag/datahubchecker | datahubchecker-utility/src/main/scala/org/cg/scala/dhc/util/XmlUtil.scala | Scala | unlicense | 241 |
/*
* Copyright 2013-2015 Michael Krolikowski
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mkroli.dns4s.section
import com.github.mkroli.dns4s.MessageBuffer
import com.github.mkroli.dns4s.MessageBufferEncoder
import com.github.mkroli.dns4s.section.resource._
trait Resource extends MessageBufferEncoder
case class ResourceRecord(name: String, `type`: Int, `class`: Int, ttl: Long, rdata: Resource) extends MessageBufferEncoder {
require(`type` >= 0 && `type` < (1 << 16))
require(`class` >= 0 && `class` < (1 << 16))
require(ttl >= 0 && ttl < (1L << 32))
def apply(buf: MessageBuffer) = {
buf
.putDomainName(name)
.putUnsignedInt(2, `type`)
.putUnsignedInt(2, `class`)
.putUnsignedLong(4, ttl)
.putLengthOf(2, rdata.apply)
}
}
object ResourceRecord {
val typeA = 1
val typeNS = 2
val typeMD = 3
val typeMF = 4
val typeCNAME = 5
val typeSOA = 6
val typeMB = 7
val typeMG = 8
val typeMR = 9
val typeNULL = 10
val typeWKS = 11
val typePTR = 12
val typeHINFO = 13
val typeMINFO = 14
val typeMX = 15
val typeTXT = 16
val typeAAAA = 28
val typeSRV = 33
val typeNAPTR = 35
val typeOPT = 41
val qtypeAXFR = 252
val qtypeMAILB = 253
val qtypeMAILA = 254
val qtypeAsterisk = 255
val typeCAA = 257
val classIN = 1
val classCS = 2
val classCH = 3
val classHS = 4
val qclassAsterisk = 255
def apply(buf: MessageBuffer) = {
val name = buf.getDomainName()
val `type` = buf.getUnsignedInt(2)
val `class` = buf.getUnsignedInt(2)
val ttl = buf.getUnsignedLong(4)
val rdlength = buf.getUnsignedInt(2)
val rdata = buf.processBytes(rdlength) {
`type` match {
case `typeA` => AResource(buf)
case `typeAAAA` => AAAAResource(buf)
case `typeSRV` => SRVResource(buf)
case `typeNAPTR` => NAPTRResource(buf)
case `typeOPT` => OPTResource(buf, rdlength)
case `typeNS` => NSResource(buf)
case `typeCNAME` => CNameResource(buf)
case `typeSOA` => SOAResource(buf)
case `typePTR` => PTRResource(buf)
case `typeHINFO` => HInfoResource(buf)
case `typeMX` => MXResource(buf)
case `typeTXT` => TXTResource(buf, rdlength)
case `typeCAA` => CAAResource(buf, rdlength)
case _ => UnknownResource(buf, rdlength, `type`)
}
}
new ResourceRecord(name, `type`, `class`, ttl, rdata)
}
}
| mkroli/dns4s | core/src/main/scala/com/github/mkroli/dns4s/section/ResourceRecord.scala | Scala | apache-2.0 | 3,155 |
package linguistic
import akka.stream.ActorMaterializer
import akka.cluster.sharding.ShardRegion
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import linguistic.protocol.{HomophonesQuery, WordsQuery}
import linguistic.ps.{HomophonesSubTreeShardEntity, WordShardEntity}
object Searches {
def props(mat: ActorMaterializer, wordslist: ActorRef, homophones: ActorRef) =
Props(new Searches(mat, wordslist, homophones))
.withDispatcher("shard-dispatcher")
}
class Searches(mat: ActorMaterializer, wordslist: ActorRef, homophones: ActorRef) extends Actor with ActorLogging {
override def receive: Receive = {
case search: WordsQuery =>
wordslist forward search
case search: HomophonesQuery =>
homophones forward search
//works only for local
case (name: String, m @ ShardRegion.GetShardRegionState) =>
name match {
case WordShardEntity.Name =>
wordslist forward m
case HomophonesSubTreeShardEntity.Name =>
homophones forward m
}
case (name: String, m @ ShardRegion.GetCurrentRegions) =>
name match {
case WordShardEntity.Name =>
wordslist forward m
case HomophonesSubTreeShardEntity.Name =>
homophones forward m
case _ => //
}
case (name: String, m @ ShardRegion.GetClusterShardingStats) =>
name match {
case WordShardEntity.Name =>
wordslist forward m
case HomophonesSubTreeShardEntity.Name =>
homophones forward m
case _ => //
}
}
}
| haghard/linguistic | server/src/main/scala/linguistic/Searches.scala | Scala | apache-2.0 | 1,557 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.utils
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.sql._
import org.apache.calcite.sql.`type`.SqlOperandTypeChecker.Consistency
import org.apache.calcite.sql.`type`._
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.functions.utils.ScalarSqlFunction._
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils._
import scala.collection.JavaConverters._
/**
* Calcite wrapper for user-defined scalar functions.
*
* @param name function name (used by SQL parser)
* @param scalarFunction scalar function to be called
* @param typeFactory type factory for converting Flink's between Calcite's types
*/
class ScalarSqlFunction(
name: String,
scalarFunction: ScalarFunction,
typeFactory: FlinkTypeFactory)
extends SqlFunction(
new SqlIdentifier(name, SqlParserPos.ZERO),
createReturnTypeInference(name, scalarFunction, typeFactory),
createOperandTypeInference(scalarFunction, typeFactory),
createOperandTypeChecker(name, scalarFunction),
null,
SqlFunctionCategory.USER_DEFINED_FUNCTION) {
def getScalarFunction = scalarFunction
override def isDeterministic: Boolean = scalarFunction.isDeterministic
}
object ScalarSqlFunction {
private[flink] def createReturnTypeInference(
name: String,
scalarFunction: ScalarFunction,
typeFactory: FlinkTypeFactory)
: SqlReturnTypeInference = {
/**
* Return type inference based on [[ScalarFunction]] given information.
*/
new SqlReturnTypeInference {
override def inferReturnType(opBinding: SqlOperatorBinding): RelDataType = {
val parameters = opBinding
.collectOperandTypes()
.asScala
.map { operandType =>
if (operandType.getSqlTypeName == SqlTypeName.NULL) {
null
} else {
FlinkTypeFactory.toTypeInfo(operandType)
}
}
val foundSignature = getEvalMethodSignature(scalarFunction, parameters)
if (foundSignature.isEmpty) {
throw new ValidationException(
s"Given parameters of function '$name' do not match any signature. \\n" +
s"Actual: ${signatureToString(parameters)} \\n" +
s"Expected: ${signaturesToString(scalarFunction, "eval")}")
}
val resultType = getResultTypeOfScalarFunction(scalarFunction, foundSignature.get)
val t = typeFactory.createTypeFromTypeInfo(resultType)
typeFactory.createTypeWithNullability(t, nullable = true)
}
}
}
private[flink] def createOperandTypeInference(
scalarFunction: ScalarFunction,
typeFactory: FlinkTypeFactory)
: SqlOperandTypeInference = {
/**
* Operand type inference based on [[ScalarFunction]] given information.
*/
new SqlOperandTypeInference {
override def inferOperandTypes(
callBinding: SqlCallBinding,
returnType: RelDataType,
operandTypes: Array[RelDataType]): Unit = {
val operandTypeInfo = getOperandTypeInfo(callBinding)
val foundSignature = getEvalMethodSignature(scalarFunction, operandTypeInfo)
.getOrElse(throw new ValidationException(s"Operand types of could not be inferred."))
val inferredTypes = scalarFunction
.getParameterTypes(foundSignature)
.map(typeFactory.createTypeFromTypeInfo)
for (i <- operandTypes.indices) {
if (i < inferredTypes.length - 1) {
operandTypes(i) = inferredTypes(i)
} else if (null != inferredTypes.last.getComponentType) {
// last argument is a collection, the array type
operandTypes(i) = inferredTypes.last.getComponentType
} else {
operandTypes(i) = inferredTypes.last
}
}
}
}
}
private[flink] def createOperandTypeChecker(
name: String,
scalarFunction: ScalarFunction)
: SqlOperandTypeChecker = {
val signatures = getMethodSignatures(scalarFunction, "eval")
/**
* Operand type checker based on [[ScalarFunction]] given information.
*/
new SqlOperandTypeChecker {
override def getAllowedSignatures(op: SqlOperator, opName: String): String = {
s"$opName[${signaturesToString(scalarFunction, "eval")}]"
}
override def getOperandCountRange: SqlOperandCountRange = {
var min = 255
var max = -1
signatures.foreach( sig => {
var len = sig.length
if (len > 0 && sig(sig.length - 1).isArray) {
max = 254 // according to JVM spec 4.3.3
len = sig.length - 1
}
max = Math.max(len, max)
min = Math.min(len, min)
})
SqlOperandCountRanges.between(min, max)
}
override def checkOperandTypes(
callBinding: SqlCallBinding,
throwOnFailure: Boolean)
: Boolean = {
val operandTypeInfo = getOperandTypeInfo(callBinding)
val foundSignature = getEvalMethodSignature(scalarFunction, operandTypeInfo)
if (foundSignature.isEmpty) {
if (throwOnFailure) {
throw new ValidationException(
s"Given parameters of function '$name' do not match any signature. \\n" +
s"Actual: ${signatureToString(operandTypeInfo)} \\n" +
s"Expected: ${signaturesToString(scalarFunction, "eval")}")
} else {
false
}
} else {
true
}
}
override def isOptional(i: Int): Boolean = false
override def getConsistency: Consistency = Consistency.NONE
}
}
}
| hongyuhong/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/ScalarSqlFunction.scala | Scala | apache-2.0 | 6,692 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package bootstrap.liftweb
package checks
import net.liftweb.common._
import org.slf4j.LoggerFactory
import net.liftweb.common.Logger
import net.liftweb.common.Failure
import com.normation.rudder.repository.RoDirectiveRepository
import com.normation.rudder.domain.logger.MigrationLogger
import com.normation.rudder.domain.policies.Directive
import com.normation.utils.Control.sequence
import com.normation.eventlog.ModificationId
import com.normation.utils.StringUuidGenerator
import com.normation.rudder.domain.eventlog._
import com.normation.rudder.repository.WoDirectiveRepository
/**
* See http://www.rudder-project.org/redmine/issues/3152
*
* That class checks that interpolated variable in Directive variable
* (variable with syntax: ${some_var} ) are correctly using the "rudder"
* namespace, so that that doesn't clash with CFEngine interpolation.
*
* The problem is that an user may have used its own interpolated
* variable, and so we don't have an exaustive list of directive
* to change.
*
* The check is idempotent, or almost - we have a non-atomic datastore
* (LDAP), and so we adopt that strategy:
* - check if system directive are migrated (i.e, their intepolated
* variables use ${rudder.VAR}
* - if the migration is done, end.
* - else, get all directives with interpolated variable
* - migrate all variable non starting with ${rudder.]
* (here, we do make the assumption that the user never used
* cfengine variables in directive values
* - save all non-system variables
* - save system variable.
*
* Hence, by terminating with system variable, we are assured that
* we don't miss any user defined variable.
*
*/
class CheckMigrationDirectiveInterpolatedVariablesHaveRudderNamespace(
roRepos: RoDirectiveRepository
, rwRepos: WoDirectiveRepository
, uuidGen: StringUuidGenerator
) extends BootstrapChecks {
override val description = "Check that directive variables use the namespace 'rudder'"
private[this] object logger extends Logger {
override protected def _logger = LoggerFactory.getLogger("migration")
val defaultErrorLogger : Failure => Unit = { f =>
_logger.error(f.messageChain)
f.rootExceptionCause.foreach { ex =>
_logger.error("Root exception was:", ex)
}
}
}
private[this] val variableRegex = """\\$\\{(.*)\\}""".r
override def checks() : Unit = {
roRepos.getFullDirectiveLibrary match {
case eb:EmptyBox =>
val f = (eb ?~! "Can not check that Rudder interpolated variable in directive variables use 'rudder' namespace")
logger.defaultErrorLogger(f)
case Full(fullLibrary) =>
val (systemDirectives, userDirectives) = fullLibrary.allDirectives.values.map(_._2).partition(d => d.isSystem)
if(systemDirectives.exists { d => migrateDirectiveParametersToRudderNamespace(d).isDefined}) {
//migrate everything
val newUserDirectives = migrateDirectives(userDirectives.toSeq)
val newSystemDirectives = migrateDirectives(systemDirectives.toSeq)
//generate a unique modification ID for the whole migration process
val modId = ModificationId(uuidGen.newUuid)
logger.info("Starting migration of inline variables in Directives (i.e variables with ${XXX} syntax) to new 'rudder' namespace (i.e to syntax ${rudder.XXX})")
(sequence(newUserDirectives ++ newSystemDirectives) { directive =>
val message = "Migrating inline variables in Directive %s (uuid: %s) so that they use the new 'rudder' namespace".format(directive.name, directive.id.value)
logger.info(message)
val activeTechnique = fullLibrary.allDirectives(directive.id)._1
for {
saved <- if(directive.isSystem) {
rwRepos.saveSystemDirective(activeTechnique.id, directive, modId, RudderEventActor, Some(message))
} else {
rwRepos.saveDirective(activeTechnique.id, directive, modId, RudderEventActor, Some(message))
}
} yield {
saved
}
}) match {
case eb: EmptyBox =>
val f = (eb ?~! "Can not finish the migration process due to an error")
logger.defaultErrorLogger(f)
case Full(res) =>
logger.info("Migration of inline variables in Directives to new 'rudder' namespace succeeded")
}
} else {
//OK, migration done
logger.info("Migration of inline variables in Directives to 'rudder' namespace already done, skipping")
}
}
}
/**
* Migrate a list of directive. Return only directives that actually need
* to be migrated.
*/
private[this] def migrateDirectives(directives:Seq[Directive]) : Seq[Directive] = {
directives.flatMap { case d =>
migrateDirectiveParametersToRudderNamespace(d).map { params => d.copy(parameters = params) }
}
}
/**
* Find all directive parameters with interpolated variable that does not
* already use the rudder namespace.
* An empty map may say that all variable are already using the namespace,
* or that no interpolated variable were used in that Directive.
*
* Return the map of parameter migrated, or nothing if the directive does not
* contain any variable to migrate.
*/
private[this] def migrateDirectiveParametersToRudderNamespace(directive:Directive) : Option[Map[String, Seq[String]]] = {
//try to migrate, create the resulting map of params with value =>
// a list of (key, (param, wasMigrated)) where
//is migrated say that the we had to migrate a variable.
val migrated : Map[String, (Seq[String],Boolean)]= directive.parameters.map { case (key, params) =>
val newParams = for {
param <- params
} yield {
useANonRudderNamespace(param) match {
case Some(notMigratedParamName) =>
("${rudder." + notMigratedParamName + "}" , true)
case None => (param,false)
}
}
//the param is migrated if any of its value was migrated
val wasMigrated = (false /: newParams){ case (paste, (_,current)) => paste || current }
(key,(newParams.map(_._1),wasMigrated))
}
if(migrated.exists { case (_, (_,wasMigrated)) => wasMigrated }) {
Some(migrated.map { case (k, (params, _) ) => (k, params) } )
} else {
None
}
}
/**
* Return the name of the variable that is NOT in Rudder
* namespace, or nothing in case there is :
* - it's not a variable
* - it's a variable already migrated to rudder
*
* NOTICE than we can ONLY have one parameter, alone, and so we can not have
* cases with """${foo.bar} ${rudder.plop}""" - that's an all or nothing choice.
*/
private[this] def useANonRudderNamespace(s:String) : Option[String] = {
s.toLowerCase match {
case variableRegex(x) if(!x.startsWith("rudder.")) => Some(x)
case _ => None
}
}
}
| armeniaca/rudder | rudder-web/src/main/scala/bootstrap/liftweb/checks/CheckMigrationDirectiveInterpolatedVariablesHaveRudderNamespace.scala | Scala | gpl-3.0 | 8,763 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.lang.{Long => JLong}
import java.nio.CharBuffer
import java.util
import scala.collection.mutable.StringBuilder
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.misc.Interval
import org.antlr.v4.runtime.tree.TerminalNode
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.{CurrentOrigin, Origin}
import org.apache.spark.sql.errors.QueryParsingErrors
/**
* A collection of utility methods for use during the parsing process.
*/
object ParserUtils {
val U16_CHAR_PATTERN = """\\\\u([a-fA-F0-9]{4})(?s).*""".r
val U32_CHAR_PATTERN = """\\\\U([a-fA-F0-9]{8})(?s).*""".r
val OCTAL_CHAR_PATTERN = """\\\\([01][0-7]{2})(?s).*""".r
val ESCAPED_CHAR_PATTERN = """\\\\((?s).)(?s).*""".r
/** Get the command which created the token. */
def command(ctx: ParserRuleContext): String = {
val stream = ctx.getStart.getInputStream
stream.getText(Interval.of(0, stream.size() - 1))
}
def operationNotAllowed(message: String, ctx: ParserRuleContext): Nothing = {
throw QueryParsingErrors.operationNotAllowedError(message, ctx)
}
def checkDuplicateClauses[T](
nodes: util.List[T], clauseName: String, ctx: ParserRuleContext): Unit = {
if (nodes.size() > 1) {
throw QueryParsingErrors.duplicateClausesError(clauseName, ctx)
}
}
/** Check if duplicate keys exist in a set of key-value pairs. */
def checkDuplicateKeys[T](keyPairs: Seq[(String, T)], ctx: ParserRuleContext): Unit = {
keyPairs.groupBy(_._1).filter(_._2.size > 1).foreach { case (key, _) =>
throw QueryParsingErrors.duplicateKeysError(key, ctx)
}
}
/** Get the code that creates the given node. */
def source(ctx: ParserRuleContext): String = {
val stream = ctx.getStart.getInputStream
stream.getText(Interval.of(ctx.getStart.getStartIndex, ctx.getStop.getStopIndex))
}
/** Get all the text which comes after the given rule. */
def remainder(ctx: ParserRuleContext): String = remainder(ctx.getStop)
/** Get all the text which comes after the given token. */
def remainder(token: Token): String = {
val stream = token.getInputStream
val interval = Interval.of(token.getStopIndex + 1, stream.size() - 1)
stream.getText(interval)
}
/**
* Get all the text which between the given start and end tokens.
* When we need to extract everything between two tokens including all spaces we should use
* this method instead of defined a named Antlr4 rule for .*?,
* which somehow parse "a b" -> "ab" in some cases
*/
def interval(start: Token, end: Token): String = {
val interval = Interval.of(start.getStopIndex + 1, end.getStartIndex - 1)
start.getInputStream.getText(interval)
}
/** Convert a string token into a string. */
def string(token: Token): String = unescapeSQLString(token.getText)
/** Convert a string node into a string. */
def string(node: TerminalNode): String = unescapeSQLString(node.getText)
/** Convert a string node into a string without unescaping. */
def stringWithoutUnescape(node: TerminalNode): String = {
// STRING parser rule forces that the input always has quotes at the starting and ending.
node.getText.slice(1, node.getText.size - 1)
}
/** Collect the entries if any. */
def entry(key: String, value: Token): Seq[(String, String)] = {
Option(value).toSeq.map(x => key -> string(x))
}
/** Get the origin (line and position) of the token. */
def position(token: Token): Origin = {
val opt = Option(token)
Origin(opt.map(_.getLine), opt.map(_.getCharPositionInLine))
}
/** Validate the condition. If it doesn't throw a parse exception. */
def validate(f: => Boolean, message: String, ctx: ParserRuleContext): Unit = {
if (!f) {
throw new ParseException(message, ctx)
}
}
/**
* Register the origin of the context. Any TreeNode created in the closure will be assigned the
* registered origin. This method restores the previously set origin after completion of the
* closure.
*/
def withOrigin[T](ctx: ParserRuleContext)(f: => T): T = {
val current = CurrentOrigin.get
CurrentOrigin.set(position(ctx.getStart))
try {
f
} finally {
CurrentOrigin.set(current)
}
}
/** Unescape backslash-escaped string enclosed by quotes. */
def unescapeSQLString(b: String): String = {
val sb = new StringBuilder(b.length())
def appendEscapedChar(n: Char): Unit = {
n match {
case '0' => sb.append('\\u0000')
case '\\'' => sb.append('\\'')
case '"' => sb.append('\\"')
case 'b' => sb.append('\\b')
case 'n' => sb.append('\\n')
case 'r' => sb.append('\\r')
case 't' => sb.append('\\t')
case 'Z' => sb.append('\\u001A')
case '\\\\' => sb.append('\\\\')
// The following 2 lines are exactly what MySQL does TODO: why do we do this?
case '%' => sb.append("\\\\%")
case '_' => sb.append("\\\\_")
case _ => sb.append(n)
}
}
if (b.startsWith("r") || b.startsWith("R")) {
b.substring(2, b.length - 1)
} else {
// Skip the first and last quotations enclosing the string literal.
val charBuffer = CharBuffer.wrap(b, 1, b.length - 1)
while (charBuffer.remaining() > 0) {
charBuffer match {
case U16_CHAR_PATTERN(cp) =>
// \\u0000 style 16-bit unicode character literals.
sb.append(Integer.parseInt(cp, 16).toChar)
charBuffer.position(charBuffer.position() + 6)
case U32_CHAR_PATTERN(cp) =>
// \\U00000000 style 32-bit unicode character literals.
// Use Long to treat codePoint as unsigned in the range of 32-bit.
val codePoint = JLong.parseLong(cp, 16)
if (codePoint < 0x10000) {
sb.append((codePoint & 0xFFFF).toChar)
} else {
val highSurrogate = (codePoint - 0x10000) / 0x400 + 0xD800
val lowSurrogate = (codePoint - 0x10000) % 0x400 + 0xDC00
sb.append(highSurrogate.toChar)
sb.append(lowSurrogate.toChar)
}
charBuffer.position(charBuffer.position() + 10)
case OCTAL_CHAR_PATTERN(cp) =>
// \\000 style character literals.
sb.append(Integer.parseInt(cp, 8).toChar)
charBuffer.position(charBuffer.position() + 4)
case ESCAPED_CHAR_PATTERN(c) =>
// escaped character literals.
appendEscapedChar(c.charAt(0))
charBuffer.position(charBuffer.position() + 2)
case _ =>
// non-escaped character literals.
sb.append(charBuffer.get())
}
}
sb.toString()
}
}
/** the column name pattern in quoted regex without qualifier */
val escapedIdentifier = "`((?s).+)`".r
/** the column name pattern in quoted regex with qualifier */
val qualifiedEscapedIdentifier = ("((?s).+)" + """.""" + "`((?s).+)`").r
/** Some syntactic sugar which makes it easier to work with optional clauses for LogicalPlans. */
implicit class EnhancedLogicalPlan(val plan: LogicalPlan) extends AnyVal {
/**
* Create a plan using the block of code when the given context exists. Otherwise return the
* original plan.
*/
def optional(ctx: AnyRef)(f: => LogicalPlan): LogicalPlan = {
if (ctx != null) {
f
} else {
plan
}
}
/**
* Map a [[LogicalPlan]] to another [[LogicalPlan]] if the passed context exists using the
* passed function. The original plan is returned when the context does not exist.
*/
def optionalMap[C](ctx: C)(f: (C, LogicalPlan) => LogicalPlan): LogicalPlan = {
if (ctx != null) {
f(ctx, plan)
} else {
plan
}
}
}
}
| ueshin/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala | Scala | apache-2.0 | 8,694 |
package org.airpnp.upnp
import scala.collection.JavaConversions._
import javax.xml.soap.MessageFactory
import javax.xml.transform.TransformerFactory
import java.io.StringWriter
import javax.xml.transform.stream.StreamResult
import java.io.InputStream
import scala.xml.XML
import scala.xml.Node
import javax.xml.soap.SOAPElement
import org.w3c.dom.Element
object SoapMessage {
private val messageFactory = MessageFactory.newInstance
private val xform = TransformerFactory.newInstance.newTransformer
def parse(is: InputStream) = new SoapMessage(is)
}
class SoapMessage private (private val serviceTypeIn: String, private val nameIn: String, private val is: InputStream) {
private val (soapPart, bodyElement, serviceType: String, name: String) = {
if (is != null) {
val soapMessage = SoapMessage.messageFactory.createMessage(null, is)
val soapPart = soapMessage.getSOAPPart
val soapEnvelope = soapPart.getEnvelope
val soapBody = soapEnvelope.getBody
val bodyElement = soapBody.getChildElements.toSeq.filter(x => x.isInstanceOf[SOAPElement]).head.asInstanceOf[SOAPElement]
(soapPart, bodyElement, bodyElement.getNamespaceURI(), bodyElement.getLocalName)
} else {
val soapMessage = SoapMessage.messageFactory.createMessage
val soapPart = soapMessage.getSOAPPart
val soapEnvelope = soapPart.getEnvelope
// Header is optional, remove it
val soapHeader = soapEnvelope.getHeader
soapEnvelope.removeChild(soapHeader)
val soapBody = soapEnvelope.getBody();
val bodyName = soapEnvelope.createName(nameIn, "u", serviceTypeIn)
val bodyElement = soapBody.addBodyElement(bodyName)
(soapPart, bodyElement, serviceTypeIn, nameIn)
}
}
def this(serviceType: String, name: String) = this(serviceType, name, null)
def this(is: InputStream) = this(null, null, is)
def getServiceType() = serviceType
def getName() = name
def getSoapAction() = "\\"" + serviceType + "#" + name + "\\""
def getArgument(name: String, defaultValue: String): String = {
val elems = bodyElement.getElementsByTagName(name)
if (elems.getLength == 0) defaultValue else elems.item(0).getTextContent
}
def setArgument(name: String, value: String) = {
val elems = bodyElement.getElementsByTagName(name)
elems.getLength match {
case 0 => bodyElement.addChildElement(name).setTextContent(value)
case _ => elems.item(0).setTextContent(value)
}
}
def deleteArgument(name: String) = {
val elems = bodyElement.getElementsByTagName(name)
elems.getLength match {
case 0 => throw new IllegalArgumentException("No such argument: " + name)
case _ => bodyElement.removeChild(elems.item(0))
}
}
override def toString = {
val source = soapPart.getContent
val writer = new StringWriter
val result = new StreamResult(writer)
SoapMessage.xform.transform(source, result)
writer.toString
}
def isFault() = getName == "Fault"
def toFunctionLikeString() = {
getSoapAction + "(" +
bodyElement.getChildElements()
.map(_.asInstanceOf[Element])
.map(e => e.getNodeName + ": " + e.getTextContent).mkString(", ") + ")"
}
} | provegard/ScAirPnp | src/main/scala/org/airpnp/upnp/SoapMessage.scala | Scala | mit | 3,204 |
package org.jetbrains.plugins.scala.lang
package transformation
package annotations
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.{&&, Parent}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScFunctionExpr
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameterClause}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaCode._
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* @author Pavel Fatin
*/
class AddTypeToFunctionParameter extends AbstractTransformer {
def transformation(implicit project: ProjectContext): PartialFunction[PsiElement, Unit] = {
case (p: ScParameter) && Parent(e @ Parent(Parent(_: ScFunctionExpr))) if p.paramType.isEmpty =>
appendTypeAnnotation(p.getRealParameterType().get, e) { annotation =>
val replacement = code"(${p.getText}: ${annotation.getText}) => ()"
.getFirstChild.getFirstChild
val result = e.replace(replacement).asInstanceOf[ScParameterClause]
result.parameters.head.typeElement.get
}
}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/transformation/annotations/AddTypeToFunctionParameter.scala | Scala | apache-2.0 | 1,086 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.structure
import io.gatling.core.action.builder._
import io.gatling.core.session.{ Expression, Session }
trait ConditionalStatements[B] extends Execs[B] {
/**
* Method used to add a conditional execution in the scenario
*
* @param condition the function that will determine if the condition is satisfied or not
* @param thenNext the chain to be executed if the condition is satisfied
* @return a new builder with a conditional execution added to its actions
*/
def doIf(condition: Expression[Boolean])(thenNext: ChainBuilder): B = doIf(condition, thenNext, None)
private def equalityCondition(actual: Expression[Any], expected: Expression[Any]): Expression[Boolean] =
(session: Session) =>
for {
expected <- expected(session)
actual <- actual(session)
} yield expected == actual
/**
* Method used to add a conditional execution in the scenario
*
* @param actual the real value
* @param expected the expected value
* @param thenNext the chain to be executed if the condition is satisfied
* @return a new builder with a conditional execution added to its actions
*/
def doIfEquals(actual: Expression[Any], expected: Expression[Any])(thenNext: ChainBuilder): B =
doIf(equalityCondition(actual, expected), thenNext, None)
/**
* Method used to add a conditional execution in the scenario with a fall back
* action if condition is not satisfied
*
* @param condition the function that will determine if the condition is satisfied or not
* @param thenNext the chain to be executed if the condition is satisfied
* @param elseNext the chain to be executed if the condition is not satisfied
* @return a new builder with a conditional execution added to its actions
*/
def doIfOrElse(condition: Expression[Boolean])(thenNext: ChainBuilder)(elseNext: ChainBuilder): B =
doIf(condition, thenNext, Some(elseNext))
/**
* Method used to add a conditional execution in the scenario with a fall back
* action if condition is not satisfied
*
* @param actual the real value
* @param expected the expected value
* @param thenNext the chain to be executed if the condition is satisfied
* @param elseNext the chain to be executed if the condition is not satisfied
* @return a new builder with a conditional execution added to its actions
*/
def doIfEqualsOrElse(actual: Expression[Any], expected: Expression[Any])(thenNext: ChainBuilder)(elseNext: ChainBuilder): B =
doIf(equalityCondition(actual, expected), thenNext, Some(elseNext))
/**
* Private method that actually adds the If Action to the scenario
*
* @param condition the function that will determine if the condition is satisfied or not
* @param thenNext the chain to be executed if the condition is satisfied
* @param elseNext the chain to be executed if the condition is not satisfied
* @return a new builder with a conditional execution added to its actions
*/
private def doIf(condition: Expression[Boolean], thenNext: ChainBuilder, elseNext: Option[ChainBuilder]): B =
exec(new IfBuilder(condition, thenNext, elseNext))
/**
* Add a switch in the chain. Every possible subchain is defined with a key.
* Switch is selected through the matching of a key with the evaluation of the passed expression.
* If no switch is selected, switch is bypassed.
*
* @param value expression to evaluate and match to find the right subchain
* @param possibilities tuples of key and subchain
* @return a new builder with a switch added to its actions
*/
def doSwitch(value: Expression[Any])(possibilities: (Any, ChainBuilder)*): B = {
require(possibilities.size >= 2, "doSwitch()() requires at least 2 possibilities")
doSwitch(value, possibilities.toList, None)
}
/**
* Add a switch in the chain. Every possible subchain is defined with a key.
* Switch is selected through the matching of a key with the evaluation of the passed expression.
* If no switch is selected, the fallback subchain is used.
*
* @param value expression to evaluate and match to find the right subchain
* @param possibilities tuples of key and subchain
* @param elseNext fallback subchain
* @return a new builder with a switch added to its actions
*/
def doSwitchOrElse(value: Expression[Any])(possibilities: (Any, ChainBuilder)*)(elseNext: ChainBuilder): B = {
require(possibilities.size >= 2, "doSwitchOrElse()()() requires at least 2 possibilities")
doSwitch(value, possibilities.toList, Some(elseNext))
}
private def doSwitch(value: Expression[Any], possibilities: List[(Any, ChainBuilder)], elseNext: Option[ChainBuilder]): B =
exec(new SwitchBuilder(value, possibilities, elseNext))
/**
* Add a switch in the chain. Every possible subchain is defined with a percentage.
* Switch is selected randomly. If no switch is selected (ie: random number exceeds percentages sum), switch is bypassed.
* Percentages sum can't exceed 100%.
*
* @param possibilities the possible subchains
* @return a new builder with a random switch added to its actions
*/
def randomSwitch(possibilities: (Double, ChainBuilder)*): B = {
require(possibilities.size >= 1, "randomSwitch() requires at least 1 possibility")
randomSwitch(possibilities.toList, None)
}
/**
* Add a switch in the chain. Every possible subchain is defined with a percentage.
* Switch is selected randomly. If no switch is selected (ie: random number exceeds percentages sum),
* the subchain defined as the fallback will be used.
* Percentages sum must be below 100%.
*
* @param possibilities the possible subchains
* @param elseNext fallback subchain
* @return a new builder with a random switch added to its actions
*/
def randomSwitchOrElse(possibilities: (Double, ChainBuilder)*)(elseNext: ChainBuilder): B = {
require(possibilities.size >= 1, "randomSwitchOrElse() requires at least 1 possibility")
randomSwitch(possibilities.toList, Some(elseNext))
}
private def randomSwitch(possibilities: List[(Double, ChainBuilder)], elseNext: Option[ChainBuilder]): B =
exec(RandomSwitchBuilder(possibilities, elseNext))
/**
* Add a switch in the chain. Selection uses a uniformly distributed random strategy
*
* @param possibilities the possible subchains
* @return a new builder with a random switch added to its actions
*/
def uniformRandomSwitch(possibilities: ChainBuilder*): B = {
require(possibilities.size >= 2, "uniformRandomSwitch() requires at least 2 possibilities")
val possibility1 :: tailPossibilities = possibilities.toList
val basePercentage = 100d / (tailPossibilities.size + 1)
val firstPercentage = 100d - basePercentage * tailPossibilities.size
val possibilitiesWithPercentage = (firstPercentage, possibility1) :: tailPossibilities.map((basePercentage, _))
randomSwitch(possibilitiesWithPercentage, None)
}
/**
* Add a switch in the chain. Selection uses a round robin strategy
*
* @param possibilities the possible subchains
* @return a new builder with a random switch added to its actions
*/
def roundRobinSwitch(possibilities: ChainBuilder*): B = {
require(possibilities.size >= 1, "roundRobinSwitch() requires at least 1 possibility")
exec(new RoundRobinSwitchBuilder(possibilities.toList))
}
}
| MykolaB/gatling | gatling-core/src/main/scala/io/gatling/core/structure/ConditionalStatements.scala | Scala | apache-2.0 | 8,044 |
package com.twitter.finagle.memcached.unit
import com.twitter.conversions.time._
import com.twitter.finagle.memcached.MockClient
import com.twitter.finagle.memcached.protocol.ClientError
import com.twitter.io.Buf
import com.twitter.util.{Await, Awaitable, Return}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MockClientTest extends FunSuite {
val TimeOut = 15.seconds
private def awaitResult[T](awaitable: Awaitable[T]): T = Await.result(awaitable, TimeOut)
test("correctly perform the GET command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.get("key")) == Some("value"))
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the SET command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.set("key", "new value").liftToTry) == Return.Unit)
assert(awaitResult(memcache.get("key")) == Some("new value"))
assert(awaitResult(memcache.set("key2", "value2").liftToTry) == Return.Unit)
assert(awaitResult(memcache.get("key2")) == Some("value2"))
assert(awaitResult(memcache.set("key2", "value3").liftToTry) == Return.Unit)
assert(awaitResult(memcache.get("key2")) == Some("value3"))
}
test("correctly perform the ADD command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(!awaitResult(memcache.add("key", "new value")))
assert(awaitResult(memcache.get("key")) == Some("value"))
assert(awaitResult(memcache.add("key2", "value2")).booleanValue)
assert(awaitResult(memcache.get("key2")) == Some("value2"))
assert(!awaitResult(memcache.add("key2", "value3")))
assert(awaitResult(memcache.get("key2")) == Some("value2"))
}
test("correctly perform the APPEND command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.append("key", "More")).booleanValue)
assert(awaitResult(memcache.get("key")) == Some("valueMore"))
assert(!awaitResult(memcache.append("unknown", "value")))
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the PREPEND command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.prepend("key", "More")).booleanValue)
assert(awaitResult(memcache.get("key")) == Some("Morevalue"))
assert(!awaitResult(memcache.prepend("unknown", "value")))
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the REPLACE command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.replace("key", "new value")).booleanValue)
assert(awaitResult(memcache.get("key")) == Some("new value"))
assert(!awaitResult(memcache.replace("unknown", "value")))
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the DELETE command") {
val memcache = new MockClient(Map("key" -> "value")).withStrings
assert(awaitResult(memcache.delete("key")).booleanValue)
assert(awaitResult(memcache.get("key")) == None)
assert(!awaitResult(memcache.delete("unknown")))
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the INCR command") {
val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings
intercept[ClientError] { awaitResult(memcache.incr("key")) }
assert(awaitResult(memcache.get("key")) == Some("value"))
assert(awaitResult(memcache.incr("count")) == Some(2))
assert(awaitResult(memcache.get("count")) == Some("2"))
assert(awaitResult(memcache.incr("unknown")) == None)
assert(awaitResult(memcache.get("unknown")) == None)
}
test("correctly perform the DECR command") {
val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings
intercept[ClientError] { awaitResult(memcache.decr("key")) }
assert(awaitResult(memcache.get("key")) == Some("value"))
assert(awaitResult(memcache.decr("count")) == Some(0))
assert(awaitResult(memcache.get("count")) == Some("0"))
assert(awaitResult(memcache.decr("count")) == Some(0))
assert(awaitResult(memcache.get("count")) == Some("0"))
assert(awaitResult(memcache.decr("unknown")) == None)
assert(awaitResult(memcache.get("unknown")) == None)
}
test("`getResults` command populates the `casUnique` value") {
val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings
val result = awaitResult(memcache.getResult(Seq("key")))
assert(result.hits("key").casUnique.isDefined)
}
test("`contents` produces immutable copies") {
val memcache = new MockClient()
val emptyContents = memcache.contents
memcache.withStrings.set("key", "value")
val oneKey = memcache.contents
// ensure that contents of emptyContents has not changed: check it after set
assert(emptyContents == Map())
assert(oneKey == Map("key" -> Buf.Utf8("value")))
}
}
| koshelev/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/MockClientTest.scala | Scala | apache-2.0 | 5,134 |
package de.kaufhof.pillar.cli
import org.clapper.argot.ArgotParser
import org.clapper.argot.ArgotConverters._
import java.io.File
object CommandLineConfiguration {
def buildFromArguments(arguments: Array[String]): CommandLineConfiguration = {
val parser = new ArgotParser("pillar")
val commandParameter = parser.parameter[MigratorAction]("command", "migrate or initialize", optional = false) {
(commandString, _) =>
commandString match {
case "initialize" => Initialize
case "migrate" => Migrate
case _ => parser.usage(s"$commandString is not a command")
}
}
val dataStoreConfigurationOption = parser.parameter[String]("data-store", "The target data store, as defined in application.conf", optional = false)
val migrationsDirectoryOption = parser.option[File](List("d", "migrations-directory"), "directory", "The directory containing migrations") {
(path, _) =>
val directory = new File(path)
if (!directory.isDirectory) parser.usage(s"${directory.getAbsolutePath} is not a directory")
directory
}
val environmentOption = parser.option[String](List("e", "environment"), "env", "environment")
val timeStampOption = parser.option[Long](List("t", "time-stamp"), "time", "The migration time stamp")
parser.parse(arguments)
CommandLineConfiguration(
commandParameter.value.get,
dataStoreConfigurationOption.value.get,
environmentOption.value.getOrElse("development"),
migrationsDirectoryOption.value.getOrElse(new File("conf/pillar/migrations")),
timeStampOption.value
)
}
}
case class CommandLineConfiguration(command: MigratorAction, dataStore: String, environment: String, migrationsDirectory: File, timeStampOption: Option[Long])
| j-potts/pillar | src/main/scala/de/kaufhof/pillar/cli/CommandLineConfiguration.scala | Scala | mit | 1,796 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.text
import org.locationtech.geomesa.utils.date.DateUtils.toInstant
import java.time._
import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder}
import java.time.temporal.{ChronoField, TemporalAccessor, TemporalQuery}
import java.util.{Date, Locale}
object DateParsing {
private val format =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.append(DateTimeFormatter.ISO_LOCAL_DATE)
.parseLenient()
.optionalStart()
.appendLiteral('T')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.optionalStart()
.appendLiteral(':')
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.optionalStart()
.appendFraction(ChronoField.MILLI_OF_SECOND, 3, 3, true)
.optionalEnd()
.optionalEnd()
.optionalEnd()
.optionalStart()
.appendOffsetId()
.toFormatter(Locale.US)
.withZone(ZoneOffset.UTC)
val Epoch: ZonedDateTime = ZonedDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)
object TemporalQueries {
val ZonedQuery: TemporalQuery[ZonedDateTime] = new TemporalQuery[ZonedDateTime] {
override def queryFrom(temporal: TemporalAccessor): ZonedDateTime = ZonedDateTime.from(temporal)
}
val LocalQuery: TemporalQuery[LocalDateTime] = new TemporalQuery[LocalDateTime] {
override def queryFrom(temporal: TemporalAccessor): LocalDateTime = LocalDateTime.from(temporal)
}
val LocalDateQuery: TemporalQuery[LocalDate] = new TemporalQuery[LocalDate] {
override def queryFrom(temporal: TemporalAccessor): LocalDate = LocalDate.from(temporal)
}
}
/**
* Parses a date string, with optional time and zone
*
* @param value date string
* @param format date formatter, default ISO format with optional time and zone
* @return
*/
def parse(value: String, format: DateTimeFormatter = format): ZonedDateTime = {
import TemporalQueries.{LocalDateQuery, LocalQuery, ZonedQuery}
format.parseBest(value, ZonedQuery, LocalQuery, LocalDateQuery) match {
case d: ZonedDateTime => d
case d: LocalDateTime => d.atZone(ZoneOffset.UTC)
case d: LocalDate => d.atTime(LocalTime.MIN).atZone(ZoneOffset.UTC)
}
}
/**
* Parses a date string, with optional time and zone
*
* @param value date string
* @param format date formatter, default ISO format with optional time and zone
* @return
*/
def parseInstant(value: String, format: DateTimeFormatter = format): Instant = {
import TemporalQueries.{LocalDateQuery, LocalQuery, ZonedQuery}
format.parseBest(value, ZonedQuery, LocalQuery, LocalDateQuery) match {
case d: ZonedDateTime => d.toInstant
case d: LocalDateTime => d.toInstant(ZoneOffset.UTC)
case d: LocalDate => d.atTime(LocalTime.MIN).toInstant(ZoneOffset.UTC)
}
}
/**
* Parses a date string, with optional time and zone
*
* @param value date string
* @param format date formatter, default ISO format with optional time and zone
* @return
*/
def parseDate(value: String, format: DateTimeFormatter = format): Date = {
import TemporalQueries.{LocalDateQuery, LocalQuery, ZonedQuery}
format.parseBest(value, ZonedQuery, LocalQuery, LocalDateQuery) match {
case d: ZonedDateTime => Date.from(d.toInstant)
case d: LocalDateTime => Date.from(d.toInstant(ZoneOffset.UTC))
case d: LocalDate => Date.from(d.atTime(LocalTime.MIN).toInstant(ZoneOffset.UTC))
}
}
/**
* Parses a date string, with optional time and zone
*
* @param value date string
* @param format date formatter, default ISO format with optional time and zone
* @return
*/
def parseMillis(value: String, format: DateTimeFormatter = format): Long = {
import TemporalQueries.{LocalDateQuery, LocalQuery, ZonedQuery}
format.parseBest(value, ZonedQuery, LocalQuery, LocalDateQuery) match {
case d: ZonedDateTime => d.toInstant.toEpochMilli
case d: LocalDateTime => d.toInstant(ZoneOffset.UTC).toEpochMilli
case d: LocalDate => d.atTime(LocalTime.MIN).toInstant(ZoneOffset.UTC).toEpochMilli
}
}
def format(value: ZonedDateTime, format: DateTimeFormatter = format): String = value.format(format)
def formatDate(value: Date, format: DateTimeFormatter = format): String =
ZonedDateTime.ofInstant(toInstant(value), ZoneOffset.UTC).format(format)
def formatInstant(value: Instant, format: DateTimeFormatter = format): String =
ZonedDateTime.ofInstant(value, ZoneOffset.UTC).format(format)
def formatMillis(value: Long, format: DateTimeFormatter = format): String =
ZonedDateTime.ofInstant(Instant.ofEpochMilli(value), ZoneOffset.UTC).format(format)
}
| aheyne/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/text/DateParsing.scala | Scala | apache-2.0 | 5,286 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package org.jdbcdslog
import java.sql.SQLFeatureNotSupportedException
import java.util.logging.Logger
import javax.sql.DataSource
/**
* This class is necessary because jdbcdslog proxies does not
* exposes the target dataSource, which is necessary to shutdown
* the pool.
*/
class LogSqlDataSource extends ConnectionPoolDataSourceProxy {
override def getParentLogger: Logger = throw new SQLFeatureNotSupportedException
def getTargetDatasource = this.targetDS.asInstanceOf[DataSource]
}
| Shruti9520/playframework | framework/src/play-jdbc/src/main/scala/org/jdbcdslog/LogSqlDataSource.scala | Scala | apache-2.0 | 575 |
package gapt.proofs.nd
import gapt.expr._
import gapt.expr.formula.All
import gapt.expr.formula.Atom
import gapt.expr.formula.Bottom
import gapt.expr.formula.Imp
import gapt.expr.ty.TBase
import gapt.proofs._
import gapt.utils.SatMatchers
import org.specs2.mutable._
class NDTest extends Specification with SatMatchers {
"doubleNegationElim" in {
val a1 = LogicalAxiom( hof"¬ ¬A" )
val a2 = LogicalAxiom( hof"¬A" )
val a3 = NegElimRule( a1, a2 )
val a4 = BottomElimRule( a3, hof"A" )
val b1 = LogicalAxiom( hof"A" )
val c1 = LogicalAxiom( hof"A" )
val c2 = LogicalAxiom( hof"¬A" )
val c3 = OrIntro1Rule( c1, hof"¬A" )
val c4 = OrIntro2Rule( c2, hof"A" )
val c5 = ExcludedMiddleRule( c3, Ant( 0 ), c4, Ant( 0 ) )
val a5 = OrElimRule( c5, b1, a4 )
val a6 = ImpIntroRule( a5 )
a6.conclusion must beValidSequent
}
"Example 1" in {
val a1 = LogicalAxiom( hof"p" )
val a2 = AndIntroRule( a1, a1 )
val a3 = ContractionRule( a2, hof"p" )
val a4 = ImpIntroRule( a3 )
a4.conclusion must beValidSequent
}
"Weakening" in {
val a1 = LogicalAxiom( hof"a" )
val a2 = WeakeningRule( a1, hof"b" )
a2.conclusion must beValidSequent
}
"LogicalAxiom" in {
val a1 = LogicalAxiom( hof"a", Seq( hof"b", hof"c" ) )
a1.conclusion must beValidSequent
}
"And" in {
val a1 = LogicalAxiom( hof"a" )
val a2 = LogicalAxiom( hof"b" )
val a3 = AndIntroRule( a1, a2 )
val a4 = AndElim1Rule( a3 )
val a5 = AndElim2Rule( a3 )
val a6 = AndIntroRule( a4, a5 )
val a7 = ContractionRule( a6, hof"a" )
val a8 = ContractionRule( a7, hof"b" )
a8.conclusion must beValidSequent
}
"Forall" in {
val a1 = LogicalAxiom( hof"!x P x" )
val a2 = ForallElimRule( a1, hov"v" )
val a3 = ForallIntroRule( a2, hof"!y P y", hov"v" )
val a4 = ImpIntroRule( a3 )
val b1 = LogicalAxiom( hof"!y P y" )
val b2 = ForallElimRule( b1, hov"v" )
val b3 = ForallIntroRule( b2, hof"!x P x", hov"v" )
val b4 = ImpIntroRule( b3 )
val res = AndIntroRule( a4, b4 )
res.conclusion mustEqual Sequent() :+ hof"(!x P x -> !y P y) & (!y P y -> !x P x)"
}
"Induction" in {
val b1 = LogicalAxiom( hof"!(x: nat) (((x + (0: nat)): nat) = x)" )
val b2 = ForallElimRule( b1, le"0: nat" )
val s1 = LogicalAxiom( hof"!(x: nat) !(y: nat) (((s(x): nat) + y: nat) = s(x + y))" )
val s2 = ForallElimRule( s1, le"x0: nat" )
val s3 = ForallElimRule( s2, le"0: nat" )
val s4 = LogicalAxiom( hof"(((x0: nat) + (0: nat)): nat) = x0" )
val s5 = EqualityElimRule( s4, s3, hof"((((s(x0): nat) + (0: nat)): nat) = s(z: nat))", hov"z: nat" )
s5.conclusion( Suc( 0 ) ) mustEqual hof"(((s(x0:nat): nat) + (0:nat)): nat) = s(x0)"
val cases = Seq(
InductionCase( b2, hoc"0: nat", Seq.empty, Seq.empty ),
InductionCase( s5, hoc"s: nat>nat", Seq( Ant( 0 ) ), Seq( hov"x0: nat" ) ) )
val p = InductionRule( cases, Abs( Var( "x", TBase( "nat" ) ), hof"(((x: nat) + (0:nat)): nat) = x" ), le"x: nat" )
p.conclusion mustEqual Seq( hof"!(x: nat) ((x + (0:nat)): nat) = x", hof"!(x: nat) !(y: nat) (((s(x): nat) + y): nat) = s(x + y)" ) ++: Sequent() :+ hof"(((x: nat) + (0: nat)): nat) = x"
}
"Induction2" in {
val nat = TBase( "nat" )
val c0 = Const( "0", nat )
val cs = Const( "s", nat ->: nat )
val x = Var( "x", nat )
val p0 = Atom( "P", c0 )
val px = Atom( "P", x )
val psx = Atom( "P", cs( x ) )
val a1 = LogicalAxiom( p0 )
val b1 = LogicalAxiom( All( x, Imp( px, psx ) ) )
val b2 = ForallElimRule( b1, x )
val b3 = LogicalAxiom( px )
val b4 = ImpElimRule( b2, b3 )
val c1 = InductionCase( a1, c0, Seq(), Seq() )
val c2 = InductionCase( b4, cs, Seq( Ant( 1 ) ), Seq( x ) )
val c3 = InductionRule( Seq( c1, c2 ), Abs( x, px ), x )
val d1 = ForallIntroRule( c3, x, x )
val d2 = ImpIntroRule( d1, Ant( 0 ) )
val d3 = ImpIntroRule( d2 )
d3.conclusion mustEqual Seq() ++: Sequent() :+ hof"∀x (P(x:nat) → P(s(x))) → P(0) → ∀x P(x)"
}
"ImpElim" in {
val a1 = LogicalAxiom( hof"a" )
val a2 = LogicalAxiom( hof"a -> b" )
val a3 = ImpElimRule( a2, a1 )
a3.conclusion must beValidSequent
}
"ImpIntro" in {
val a1 = LogicalAxiom( hof"a", Seq( hof"b" ) )
val a2 = ImpIntroRule( a1, Ant( 0 ) )
val a3 = ImpIntroRule( a2 )
a3.conclusion must beValidSequent
}
"OrElim" in {
val a1 = LogicalAxiom( hof"a & b" )
val a2 = AndElim1Rule( a1 )
val a3 = LogicalAxiom( hof"a & c" )
val a4 = AndElim1Rule( a3 )
val a5 = LogicalAxiom( hof"(a & b) | (a & c)" )
val a6 = OrElimRule( a5, a2, a4 )
a6.conclusion must beValidSequent
}
"OrIntro1" in {
val a1 = LogicalAxiom( hof"a & b" )
val a7 = OrIntro1Rule( a1, hof"a" )
a7.conclusion must beValidSequent
}
"OrIntro2" in {
val a1 = LogicalAxiom( hof"a & b" )
val a8 = OrIntro2Rule( a1, hof"a" )
a8.conclusion must beValidSequent
}
"BottomElim" in {
val a1 = LogicalAxiom( Bottom() )
val a2 = BottomElimRule( a1, hof"a" )
a2.conclusion must beValidSequent
}
"Negation1" in {
val a1 = LogicalAxiom( hof"¬a" )
val a2 = LogicalAxiom( hof"a" )
val a3 = NegElimRule( a1, a2 )
val a4 = NegIntroRule( a3, Ant( 0 ) )
a4.conclusion must beValidSequent
}
"ExistsIntro 1" in {
val a1 = LogicalAxiom( hof"P a b" )
val a2 = ExistsIntroRule( a1, hof"P x b", hoc"a : i", hov"x" )
a2.conclusion mustEqual Seq( hof"P a b" ) ++: Sequent() :+ hof"?x P x b"
}
"ExistsIntro 2" in {
val a1 = LogicalAxiom( hof"P a b" )
val a3 = ExistsIntroRule( a1, hof"?x P x b", hoc"a : i" )
a3.conclusion mustEqual Seq( hof"P a b" ) ++: Sequent() :+ hof"?x P x b"
}
"ExistsIntro 3" in {
val a4 = LogicalAxiom( hof"P x b" )
val a5 = ExistsIntroRule( a4, hof"?x P x b" )
val a6 = ExistsIntroRule( a5, hof"?y ?x P x y", hoc"b : i" )
a6.conclusion mustEqual Seq( hof"P x b" ) ++: Sequent() :+ hof"?y ?x P x y"
}
"ExistsElim" in {
val a1 = LogicalAxiom( hof"?x P x" )
val a2 = LogicalAxiom( hof"!x (P x -> Q)" )
val a3 = ForallElimRule( a2, hov"y" )
val a4 = LogicalAxiom( hof"P y" )
val a5 = ImpElimRule( a3, a4 )
val a6 = ExistsElimRule( a1, a5, hov"y" )
a6.conclusion mustEqual Seq( hof"?x P x", hof"!x (P x -> Q)" ) ++: Sequent() :+ hof"Q"
}
"ExistsElim2" in {
val b1 = LogicalAxiom( hof"R y" )
val b2 = LogicalAxiom( hof"?x R x -> ?x P x" )
val b3 = ExistsIntroRule( b1, hof"? x R x", hov"y:i" )
val b4 = ImpElimRule( b2, b3 )
val a2 = LogicalAxiom( hof"!x (P x -> Q)" )
val a3 = ForallElimRule( a2, hov"y" )
val a4 = LogicalAxiom( hof"P y" )
val a5 = ImpElimRule( a3, a4 )
val a6 = ExistsElimRule( b4, a5, hov"y" )
a6.conclusion mustEqual Seq( hof"?x R x -> ?x P x", hof"R y", hof"!x (P x -> Q)" ) ++: Sequent() :+ hof"Q"
}
"ExcludedMiddle" in {
val a1 = LogicalAxiom( hof"P" )
val a2 = LogicalAxiom( hof"¬P" )
val a3 = OrIntro1Rule( a1, hof"¬P" )
val a4 = OrIntro2Rule( a2, hof"P" )
val a5 = ExcludedMiddleRule( a3, Ant( 0 ), a4, Ant( 0 ) )
a5.conclusion must beValidSequent
}
"TheoryAxiom" in {
val a1 = TheoryAxiom( fof"!x x = x" )
a1.conclusion must beEValidSequent
}
"EqualityElim 1" in {
val a1 = LogicalAxiom( fof"!x0!x1 P(x2)" )
val a2 = LogicalAxiom( fof"x2=x3" )
val a3 = EqualityElimRule( a2, a1 )
a3.conclusion mustEqual Seq( fof"x2 = x3", fof"!x0 !x1 P x2" ) ++: Sequent() :+ fof"!x0 !x1 P x3"
}
"EqualityElim 2" in {
val a1 = LogicalAxiom( fof"!x0!x1 P(x2)" )
val a2 = LogicalAxiom( fof"x2=x3" )
val a4 = EqualityElimRule( a2, a1, fof"!x0!x1 P(x2)", fov"x2" )
a4.conclusion mustEqual Seq( fof"x2 = x3", fof"!x0 !x1 P x2" ) ++: Sequent() :+ fof"!x0 !x1 P x3"
}
"EqualityElim 3" in {
val b1 = LogicalAxiom( fof"!x0!x1 P(x1)" )
val b2 = LogicalAxiom( fof"x1=x2" )
val b4 = EqualityElimRule( b2, b1, fof"!x0!x1 P(x1)", fov"x1" )
b4.conclusion mustEqual Seq( fof"x1 = x2", fof"!x0 !x1 P x1" ) ++: Sequent() :+ fof"!x0 !x1 P x1"
}
"EqualityElim 4" in {
val c1 = LogicalAxiom( fof"!x0!x1 P(x2)" )
val c2 = LogicalAxiom( fof"x2=x1" )
val c3 = EqualityElimRule( c2, c1 )
c3.conclusion mustEqual Seq( fof"x2 = x1", fof"!x0 !x1 P x2" ) ++: Sequent() :+ fof"!x0 !x1_0 P x1"
}
"EqualityElim 5" in {
val c1 = LogicalAxiom( fof"!x0!x1 P(x2)" )
val c2 = LogicalAxiom( fof"x2=x1" )
val c4 = EqualityElimRule( c2, c1, fof"!x0!x1 P(x2)", fov"x2" )
c4.conclusion mustEqual Seq( fof"x2 = x1", fof"!x0 !x1 P x2" ) ++: Sequent() :+ fof"!x0 !x1_0 P x1"
}
"EqualityElim 6" in {
val a1 = LogicalAxiom( hof"s=t" )
val a2 = LogicalAxiom( hof"!t P(t,s)" )
val a3 = EqualityElimRule( a1, a2 )
val a4 = LogicalAxiom( hof"t=u" )
val a5 = EqualityElimRule( a4, a3 )
a5.conclusion mustEqual Seq( hof"t=u", hof"s=t", hof"!t P(t,s)" ) ++: Sequent() :+ hof"!t P(t,u)"
}
"EqualityElim 7" in {
val a1 = LogicalAxiom( hof"s=t" )
val a2 = LogicalAxiom( hof"!s P(s) & Q(s)" )
val a3 = EqualityElimRule( a1, a2 )
a3.conclusion mustEqual Seq( hof"s=t", hof"!s P(s) & Q(s)" ) ++: Sequent() :+ hof"!s P(s) & Q(t)"
}
"EqualityIntro fov" in {
val a1 = EqualityIntroRule( fov"x" )
a1.conclusion must beEValidSequent
}
"EqualityIntro foc" in {
val a2 = EqualityIntroRule( foc"c" )
a2.conclusion must beEValidSequent
}
"TopIntro" in {
val a1 = TopIntroRule
a1.conclusion must beValidSequent
}
"Issue #650" should {
"be fixed for ∀" in {
val p1 = nd.TheoryAxiom( fof"P(y,y)" )
nd.ForallIntroRule( p1, fof"!x P(x,y)", fov"y" ) must throwAn[NDRuleCreationException]
}
}
"ExistsIntro 4" in {
val a4 = LogicalAxiom( hof"P a b" )
val a5 = ExistsIntroRule( a4, hof"?x P x b" )
val a6 = ExistsIntroRule( a5, hof"?y ?x P x y", hoc"b : i" )
a6.conclusion mustEqual Seq( hof"P a b" ) ++: Sequent() :+ hof"?y ?x P x y"
}
}
| gapt/gapt | tests/src/test/scala/gapt/proofs/nd/NDTest.scala | Scala | gpl-3.0 | 10,125 |
// Files must be named the same as the class/object they contain
object Play {
private val World_Width = 150
private val World_Height = 40
def main(args: Array[String]) = {
val world = new World(
width = World_Width,
height = World_Height,
)
println(world.render)
var total_tick = 0.0
var total_render = 0.0
while (true) {
val tick_start = System.currentTimeMillis()
world._tick
val tick_finish = System.currentTimeMillis()
val tick_time = (tick_finish - tick_start) / 1.0
total_tick += tick_time
val avg_tick = (total_tick / world.tick)
val render_start = System.currentTimeMillis()
val rendered = world.render
val render_finish = System.currentTimeMillis()
val render_time = (render_finish - render_start) / 1.0
total_render += render_time
val avg_render = (total_render / world.tick)
var output = "#"+world.tick
output += " - World tick took "+_f(tick_time)+" ("+_f(avg_tick)+")"
output += " - Rendering took "+_f(render_time)+" ("+_f(avg_render)+")"
output += "\n"+rendered
print("\u001b[H\u001b[2J")
println(output)
}
}
private def _f(value: Double) = {
"%.3f".format(value)
}
}
| KieranP/Game-Of-Life-Implementations | scala/Play.scala | Scala | mit | 1,259 |
/*
* Copyright (c) 2016 eBay Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ebay.rtran.core
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import com.ebay.rtran.api.{IModel, IModelProvider, IProjectCtx}
import scala.util.{Failure, Success, Try}
import scala.collection.JavaConversions._
object ModelProviderRegistry extends LazyLogging {
val PATH_TO_MODEL_PROVIDER = "rtran.model-providers"
private[this] var modelProviders = loadModelProviders(UpgraderMeta.configs)
def findProvider(modelClass: Class[_ <: IModel], projectCtxClass: Class[_ <: IProjectCtx]) =
modelProviders get (modelClass, projectCtxClass) orElse {
modelProviders find {
case ((mclass, pclass), provider) => mclass == modelClass && pclass.isAssignableFrom(projectCtxClass)
} map (_._2)
}
def providers = modelProviders.values
private[rtran] def registerProvider[T <: IModelProvider[IModel, IProjectCtx]](provider: T): Unit = {
modelProviders += (provider.runtimeModelClass, provider.runtimeProjectCtxClass) -> provider
}
private def loadModelProviders(configs: Iterator[Config]) = {
var providers = Map.empty[(Class[_ <: IModel], Class[_ <: IProjectCtx]), IModelProvider[IModel, IProjectCtx]]
configs.filter(_.hasPath(PATH_TO_MODEL_PROVIDER)).flatMap(_.getStringList(PATH_TO_MODEL_PROVIDER)) foreach {className =>
loadModelProvider(className) match {
case Success(provider) if providers contains (provider.runtimeModelClass, provider.runtimeProjectCtxClass) =>
val modelClass = provider.runtimeModelClass
val projectCtxClass = provider.runtimeProjectCtxClass
if (providers((modelClass, projectCtxClass)).getClass == provider.getClass) {
logger.warn("Get duplicated model provider definition for {}", provider.getClass)
} else {
logger.warn("Model provider {} already exists for {}", provider.getClass, (modelClass, projectCtxClass))
}
case Success(provider) =>
providers += (provider.runtimeModelClass, provider.runtimeProjectCtxClass) -> provider
case Failure(e) =>
logger.error("Failed to create provider instance {}, {}", className, e)
}
}
providers
}
private def loadModelProvider(className: String) = Try {
Class.forName(className).asSubclass(classOf[IModelProvider[IModel, IProjectCtx]]).newInstance
}
}
| eBay/RTran | rtran-core/src/main/scala/com/ebay/rtran/core/ModelProviderRegistry.scala | Scala | apache-2.0 | 2,975 |
package com.webtrends.harness.utils
import java.text.MessageFormat
import java.util.{ResourceBundle, Locale}
/** Messages externalization
*
* == Overview ==
* You would use it like so:
*
* {{{
* Localized(user) { implicit lang =>
* val error = LocalizedString("error")
* }
* }}}
*
* Messages are stored in `messages_XXX.properties` files in UTF-8 encoding in resources.
* The lookup will fallback to default file `messages.properties` if the string is not found in
* the language-specific file.
*
* Messages are formatted with `java.text.MessageFormat`.
*/
trait LocalizedString {
/** get the message w/o formatting */
def raw(msg: String)(implicit locale: Locale=Locale.getDefault, context:String="messages"): String = {
val bundle = ResourceBundle.getBundle(context, locale, UTF8BundleControl)
bundle.getString(msg)
}
def apply(msg: String, args: Any*)(locale: Locale=Locale.getDefault, context:String="messages"): String = {
new MessageFormat(raw(msg)(locale, context), locale).format(args.map(_.asInstanceOf[java.lang.Object]).toArray)
}
}
object LocalizedString extends LocalizedString
// @see https://gist.github.com/alaz/1388917
// @see http://stackoverflow.com/questions/4659929/how-to-use-utf-8-in-resource-properties-with-resourcebundle
private[utils] object UTF8BundleControl extends ResourceBundle.Control {
val Format = "properties.utf8"
override def getFormats(baseName: String): java.util.List[String] = {
import collection.JavaConverters._
Seq(Format).asJava
}
override def getFallbackLocale(baseName: String, locale: Locale) =
if (locale == Locale.getDefault) null
else Locale.getDefault
override def newBundle(baseName: String, locale: Locale, fmt: String, loader: ClassLoader, reload: Boolean): ResourceBundle = {
import java.util.PropertyResourceBundle
import java.io.InputStreamReader
// The below is an approximate copy of the default Java implementation
def resourceName = toResourceName(toBundleName(baseName, locale), "properties")
def stream =
if (reload) {
for {url <- Option(loader getResource resourceName)
connection <- Option(url.openConnection)}
yield {
connection.setUseCaches(false)
connection.getInputStream
}
} else
Option(loader getResourceAsStream resourceName)
(for {format <- Option(fmt) if format == Format
is <- stream}
yield new PropertyResourceBundle(new InputStreamReader(is, "UTF-8"))).orNull
}
} | Kraagen/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/utils/LocalizedString.scala | Scala | apache-2.0 | 2,566 |
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.krasserm.ases
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.{AkkaPersistenceEventLog, KafkaEventLog, KafkaSpec}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}
import scala.collection.immutable.Seq
object EventSourcingSpec {
import EventSourcing._
sealed trait Request
sealed trait Event
case object GetState extends Request
case class Increment(delta: Int) extends Request
case class ClearIfEqualTo(value: Int) extends Request
case class Response(state: Int)
case class Incremented(delta: Int) extends Event
case object Cleared extends Event
val requestHandler: RequestHandler[Int, Event, Request, Response] = {
case (s, req @ GetState) =>
respond(Response(s))
case (s, req @ Increment(d)) =>
emit(Seq(Incremented(d)), Response)
case (s, req @ ClearIfEqualTo(v)) =>
if (s == v) emit(Seq(Cleared), Response)
else respond(Response(s))
}
val eventHandler: EventHandler[Int, Event] = {
case (s, Incremented(delta)) =>
s + delta
case (s, Cleared) =>
0
}
}
class EventSourcingSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
import EventSourcingSpec._
implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))
val akkaPersistenceEventLog: AkkaPersistenceEventLog =
new log.AkkaPersistenceEventLog(journalId = "akka.persistence.journal.inmem")
val kafkaEventLog: KafkaEventLog =
new log.KafkaEventLog(host, port)
def testEventLog[A](emitted: Seq[Emitted[A]] = Seq.empty): Flow[Emitted[A], Delivery[Durable[A]], NotUsed] =
Flow[Emitted[A]]
.zipWithIndex.map { case (e, i) => e.durable(i) }
.map(Delivered(_))
.prepend(Source.single(Recovered))
.prepend(Source(durables(emitted)).map(Delivered(_)))
"An EventSourcing stage" when {
"joined with a test event log" must {
val processor: Flow[Request, Response, NotUsed] =
EventSourcing(emitterId, 0, requestHandler, eventHandler).join(testEventLog())
"consume commands and produce responses" in {
val commands = Seq(1, -4, 7).map(Increment)
val expected = Seq(1, -3, 4).map(Response)
Source(commands).via(processor).runWith(Sink.seq).futureValue should be(expected)
}
"consume queries and produce responses" in {
val commands = Seq(1, 0, 7).map {
case 0 => GetState
case i => Increment(i)
}
val expected = Seq(1, 1, 8).map(Response)
Source(commands).via(processor).runWith(Sink.seq).futureValue should be(expected)
}
}
"joined with a non-empty test event log" must {
def processor(replay: Seq[Emitted[Incremented]]): Flow[Request, Response, NotUsed] =
EventSourcing(emitterId, 0, requestHandler, eventHandler).join(testEventLog(replay))
"first recover state and then consume commands and produce responses" in {
val commands = Seq(-4, 7).map(Increment)
val expected = Seq(-3, 4).map(Response)
Source(commands).via(processor(Seq(Emitted(Incremented(1), emitterId)))).runWith(Sink.seq).futureValue should be(expected)
}
"first recover state and then consume state-dependent command with correct state" in {
Source.single(ClearIfEqualTo(5)).via(processor(Seq(Emitted(Incremented(5), emitterId))))
.runWith(Sink.seq).futureValue should be(Seq(Response(0)))
}
"first recover state and then consume command followed by state dependent command with correct state" in {
Source(Seq(Increment(0), ClearIfEqualTo(5))).via(processor(Seq(Emitted(Incremented(5), emitterId))))
.runWith(Sink.seq).futureValue should be(Seq(Response(5), Response(0)))
}
"first recover state and then consume command and produce response" in {
Source.single(Increment(2)).via(processor(Seq(Emitted(Incremented(1), emitterId))))
.runWith(Sink.seq).futureValue should be(Seq(Response(3)))
}
"first recover state and then consume query and produce response" in {
Source.single(GetState).via(processor(Seq(Emitted(Incremented(1), emitterId))))
.runWith(Sink.seq).futureValue should be(Seq(Response(1)))
}
}
"joined with an Akka Persistence event log" must {
def processor(persistenceId: String): Flow[Request, Response, NotUsed] =
EventSourcing(emitterId, 0, requestHandler, eventHandler).join(akkaPersistenceEventLog.flow(persistenceId))
"consume commands and produce responses" in {
val persistenceId = "pid-1"
val commands = Seq(1, -4, 7).map(Increment)
val expected = Seq(1, -3, 4).map(Response)
Source(commands).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(expected)
}
"first recover state and then consume commands and produce responses" in {
val persistenceId = "pid-2"
Source.single(Emitted(Incremented(1), emitterId)).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
val commands = Seq(-4, 7).map(Increment)
val expected = Seq(-3, 4).map(Response)
Source(commands).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(expected)
}
"first recover state and then consume state-dependent command with correct state" in {
val persistenceId = "pid-3"
Source.single(Emitted(Incremented(5), emitterId)).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
Source.single(ClearIfEqualTo(5)).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(Seq(Response(0)))
}
"first recover state and then consume command followed by state dependent command with correct state" in {
val persistenceId = "pid-4"
Source.single(Emitted(Incremented(5), emitterId)).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
Source(Seq(Increment(0), ClearIfEqualTo(5))).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(Seq(Response(5), Response(0)))
}
"first recover state and then consume command and produce response" in {
val persistenceId = "pid-5"
Source.single(Emitted(Incremented(1), emitterId)).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
Source.single(Increment(2)).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(Seq(Response(3)))
}
"first recover state and then consume query and produce response" in {
val persistenceId = "pid-6"
Source.single(Emitted(Incremented(1), emitterId)).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
Source.single(GetState).via(processor(persistenceId)).runWith(Sink.seq).futureValue should be(Seq(Response(1)))
}
}
"joined with a Kafka event log" must {
def processor(topicPartition: TopicPartition): Flow[Request, Response, NotUsed] =
EventSourcing(emitterId, 0, requestHandler, eventHandler).join(kafkaEventLog.flow(topicPartition))
"consume commands and produce responses" in {
val topicPartition = new TopicPartition("p-1", 0)
val commands = Seq(1, -4, 7).map(Increment)
val expected = Seq(1, -3, 4).map(Response)
Source(commands).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(expected)
}
"first recover state and then consume commands and produce responses" in {
val topicPartition = new TopicPartition("p-2", 0)
Source.single(Emitted(Incremented(1), emitterId)).runWith(kafkaEventLog.sink(topicPartition)).futureValue
val commands = Seq(-4, 7).map(Increment)
val expected = Seq(-3, 4).map(Response)
Source(commands).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(expected)
}
"first recover state and then consume state-dependent command with correct state" in {
val topicPartition = new TopicPartition("p-3", 0)
Source.single(Emitted(Incremented(5), emitterId)).runWith(kafkaEventLog.sink(topicPartition)).futureValue
Source.single(ClearIfEqualTo(5)).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(Seq(Response(0)))
}
"first recover state and then consume command followed by state dependent command with correct state" in {
val topicPartition = new TopicPartition("p-4", 0)
Source.single(Emitted(Incremented(5), emitterId)).runWith(kafkaEventLog.sink(topicPartition)).futureValue
Source(Seq(Increment(0), ClearIfEqualTo(5))).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(Seq(Response(5), Response(0)))
}
"first recover state and then consume command and produce response" in {
val topicPartition = new TopicPartition("p-5", 0)
Source.single(Emitted(Incremented(1), emitterId)).runWith(kafkaEventLog.sink(topicPartition)).futureValue
Source.single(Increment(2)).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(Seq(Response(3)))
}
"first recover state and then consume query and produce response" in {
val topicPartition = new TopicPartition("p-6", 0)
Source.single(Emitted(Incremented(1), emitterId)).runWith(kafkaEventLog.sink(topicPartition)).futureValue
Source.single(GetState).via(processor(topicPartition)).runWith(Sink.seq).futureValue should be(Seq(Response(1)))
}
}
}
}
| krasserm/akka-stream-eventsourcing | src/test/scala/com/github/krasserm/ases/EventSourcingSpec.scala | Scala | apache-2.0 | 10,376 |
package sparklyr
import java.util.concurrent.ConcurrentHashMap
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import scala.util.Random
object SamplingUtils {
type SamplesPQ = BoundedPriorityQueue[Sample]
private[sparklyr] case class Sample(val priority: Double, val row: Row) extends Ordered[Sample] {
override def compare(that: Sample): Int = {
scala.math.signum(priority - that.priority).toInt
}
}
private[this] case class PRNG() extends java.util.function.Function[Long, Random] {
override def apply(x: Long): Random = new Random(x)
}
def sampleWithoutReplacement(
rdd: RDD[Row],
weightColumn: String,
k: Int,
seed: Long
): RDD[Row] = {
val sc = rdd.context
if (0 == k) {
sc.emptyRDD
} else {
val prngState = new ConcurrentHashMap[Long, Random]
val samples = rdd.aggregate(
zeroValue = new SamplesPQ(k)
)(
seqOp = (pq: SamplesPQ, row: Row) => {
var weight = extractWeightValue(row, weightColumn)
if (weight > 0) {
val sampleSeed = seed + TaskContext.getPartitionId
val random = prngState.computeIfAbsent(
sampleSeed,
new PRNG
)
val sample = Sample(genSamplePriority(weight, random), row)
pq += sample
}
pq
},
combOp = (pq1: SamplesPQ, pq2: SamplesPQ) => {
pq1 ++= pq2
pq1
}
)
sc.parallelize(samples.toSeq.map(x => x.row))
}
}
def sampleWithReplacement(
rdd: RDD[Row],
weightColumn: String,
k: Int,
seed: Long
): RDD[Row] = {
val sc = rdd.context
if (0 == k) {
sc.emptyRDD
} else {
val mapRDDs = rdd.mapPartitionsWithIndex { (index, iter) =>
val random = new Random(seed + index)
val samples = Array.fill[Sample](k)(
Sample(Double.NegativeInfinity, null)
)
for (row <- iter) {
var weight = extractWeightValue(row, weightColumn)
if (weight > 0)
Range(0, k).foreach(idx => {
val replacement = Sample(genSamplePriority(weight, random), row)
if (samples(idx) < replacement)
samples(idx) = replacement
})
}
Iterator.single(samples)
}
if (0 == mapRDDs.partitions.length) {
sc.emptyRDD
} else {
sc.parallelize(
mapRDDs.reduce(
(s1, s2) => {
Range(0, k).foreach(idx => {
if (s1(idx) < s2(idx)) s1(idx) = s2(idx)
})
s1
}
).map(x => x.row)
)
}
}
}
// generate a sampling priority for a row given the sampling weight and
// source of randomness
private[sparklyr] def genSamplePriority(weight: Double, random: Random): Double = {
scala.math.log(random.nextDouble) / weight
}
private[sparklyr] def extractWeightValue(row: Row, weightColumn: String): Double = {
if (null == weightColumn || weightColumn.isEmpty) {
1.0
} else {
Utils.asDouble(row.get(row.fieldIndex(weightColumn)))
}
}
}
| rstudio/sparklyr | java/spark-1.5.2/samplingutils.scala | Scala | apache-2.0 | 3,220 |
// Copyright (c) 2013-2020 Rob Norris and Contributors
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT
package doobie.issue
import cats._
import cats.syntax.all._
import cats.effect.IO
import doobie._, doobie.implicits._
import org.scalacheck.Prop.forAll
import scala.Predef._
class `706` extends munit.ScalaCheckSuite {
import cats.effect.unsafe.implicits.global
val xa = Transactor.fromDriverManager[IO](
"org.h2.Driver",
"jdbc:h2:mem:issue-706;DB_CLOSE_DELAY=-1",
"sa", ""
)
val setup: ConnectionIO[Unit] =
sql"CREATE TABLE IF NOT EXISTS test (value INTEGER)".update.run.void
def insert[F[_]: Foldable, A: Write](as: F[A]): ConnectionIO[Int] =
Update[A]("INSERT INTO test VALUES (?)").updateMany(as)
test("updateMany should work correctly for valid inputs") {
forAll { (ns: List[Int]) =>
val prog = setup *> insert(ns)
assertEquals(prog.transact(xa).unsafeRunSync(), ns.length)
}
}
// TODO: add a case for invalid inputs if we can find one that doesn't cause an
// exception to be thrown.
}
| tpolecat/doobie | modules/core/src/test/scala/doobie/issue/706.scala | Scala | mit | 1,147 |
package api
import java.io.File
import org.json4s.JInt
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import common.DsmoqSpec
import dsmoq.controllers.AjaxResponse
import dsmoq.persistence.PostgresqlHelper._
import dsmoq.persistence.User
import dsmoq.services.json.DatasetData.{ Dataset, DatasetFile }
import dsmoq.services.json.RangeSlice
import scalikejdbc._
class AuthenticationSpec extends DsmoqSpec {
private val dummyFile = new File("../README.md")
"Authentication test" - {
"to api" in {
for {
sessionUser <- Seq(true, false)
allowGuest <- Seq(true, false)
headers <- testHeaders
} {
authenticationCheckForDataset(sessionUser, allowGuest, headers)(datasetExpected(sessionUser, allowGuest, headers))
}
}
"to file" in {
for {
sessionUser <- Seq(true, false)
allowGuest <- Seq(true, false)
headers <- testHeaders
} {
authenticationCheckForFile(sessionUser, allowGuest, headers)(fileExpected(sessionUser, allowGuest, headers))
}
}
}
"Disabled user" - {
"Authorization Header" in {
val datasetId = session {
signIn()
createDataset(true)
}
disableDummy1()
val apiKey = "5dac067a4c91de87ee04db3e3c34034e84eb4a599165bcc9741bb9a91e8212cb"
val signature = "nFGVWB7iGxemC2D0wQ177hjla7Q%3D"
val headers = Map("Authorization" -> s"api_key=${apiKey},signature=${signature}")
get(s"/api/datasets/${datasetId}", headers = headers) {
checkStatus(403, Some("Unauthorized"))
}
}
"Session" in {
disableDummy1()
session {
post("/api/signin", params = Map("d" -> compact(render(("id" -> "dummy1") ~ ("password" -> "password"))))) {
checkStatus(400, Some("BadRequest"))
}
}
}
}
private def testHeaders: Seq[Map[String, String]] = {
val es: Seq[Map[String, String]] = Seq(
Map.empty,
Map("Authorization" -> ""),
Map("Authorization" -> ",,,"),
Map("Authorization" -> ",,,hoge=piyo")
)
val ns: Seq[Map[String, String]] = for {
apiKey <- Seq("", "hello", "5dac067a4c91de87ee04db3e3c34034e84eb4a599165bcc9741bb9a91e8212cb")
signature <- Seq("", "world", "nFGVWB7iGxemC2D0wQ177hjla7Q%3D")
ext <- Seq("", ",a=b")
} yield {
Map("Authorization" -> s"api_key=${apiKey},signature=${signature}${ext}")
}
es ++ ns
}
private def datasetExpected(sessionUser: Boolean, allowGuest: Boolean, headers: Map[String, String]): Unit = {
headers.get("Authorization") match {
case None | Some("") => {
checkStatus(if (sessionUser || allowGuest) 200 else 403, Some(if (sessionUser || allowGuest) "OK" else "AccessDenied"))
}
case Some(v) if v.startsWith("api_key=5dac067a4c91de87ee04db3e3c34034e84eb4a599165bcc9741bb9a91e8212cb,signature=nFGVWB7iGxemC2D0wQ177hjla7Q") => {
checkStatus()
}
case Some(_) => {
checkStatus(403, Some("Unauthorized"))
}
}
}
private def fileExpected(sessionUser: Boolean, allowGuest: Boolean, headers: Map[String, String]): Unit = {
headers.get("Authorization") match {
case None | Some("") => {
if (sessionUser || allowGuest) checkResonse() else checkResonse(403, Some("Access Denied"))
}
case Some(v) if v.startsWith("api_key=5dac067a4c91de87ee04db3e3c34034e84eb4a599165bcc9741bb9a91e8212cb,signature=nFGVWB7iGxemC2D0wQ177hjla7Q") => {
checkResonse()
}
case Some(_) => {
checkResonse(403, Some("Unauthorized"))
}
}
}
def authenticationCheckForDataset(
sessionUser: Boolean = false,
allowGuest: Boolean = false,
headers: Map[String, String] = Map.empty
)(expected: => Any): Unit = {
withClue(s"for dataset - sessionUser: ${sessionUser}, allowGuest: ${allowGuest}, headers: ${headers}") {
val datasetId = session {
signIn()
createDataset(allowGuest)
}
session {
if (sessionUser) {
signIn()
}
get(s"/api/datasets/${datasetId}", headers = headers) {
expected
}
}
}
}
def authenticationCheckForFile(
sessionUser: Boolean = false,
allowGuest: Boolean = false,
headers: Map[String, String] = Map.empty
)(expected: => Any): Unit = {
withClue(s"for file - sessionUser: ${sessionUser}, allowGuest: ${allowGuest}, headers: ${headers}") {
val (datasetId, fileId) = session {
signIn()
val datasetId = createDataset(allowGuest = allowGuest, file = Some(dummyFile))
val fileId = get(s"/api/datasets/${datasetId}/files") {
checkStatus()
val files = parse(body).extract[AjaxResponse[RangeSlice[DatasetFile]]].data.results
val file = files.headOption.orNull
file should not be (null)
file.id
}
(datasetId, fileId)
}
session {
if (sessionUser) {
signIn()
}
get(s"/files/${datasetId}/${fileId}", headers = headers) {
expected
}
}
}
}
def disableDummy1() {
DB.localTx { implicit s =>
withSQL {
val u = User.column
update(User)
.set(u.disabled -> true)
.where
.eqUuid(u.id, "023bfa40-e897-4dad-96db-9fd3cf001e79")
}.update.apply()
}
}
def checkResonse(expectedCode: Int = 200, expectedBody: Option[String] = None) {
status should be(expectedCode)
expectedBody.foreach { expected =>
body should be(expected)
}
}
}
| nkawa/dsmoq | server/apiServer/src/test/scala/api/AuthenticationSpec.scala | Scala | apache-2.0 | 5,592 |
/*
* Copyright 2014, Luca Rosellini.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kina.examples.scala
import kina.rdd._
import kina.config.CassandraConfigFactory
import kina.context.{KinaContext, CassandraKinaContext}
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import kina.testutils.ContextProperties
import kina.testentity.TweetEntity
/**
* Author: Emmanuelle Raffenne
* Date..: 3-mar-2014
*/
object GroupingByKey {
def main(args: Array[String]) {
val job = "scala:groupingByKey"
val keyspaceName = "test"
val tableName = "tweets"
// Creating the Kina Context where args are Spark Master and Job Name
val p = new ContextProperties(args)
val kinaContext = new CassandraKinaContext(p.getCluster, job, p.getSparkHome, p.getJars)
// Creating a configuration for the RDD and initialize it
val config = CassandraConfigFactory.create(classOf[TweetEntity])
.host(p.getCassandraHost).cqlPort(p.getCassandraCqlPort).rpcPort(p.getCassandraThriftPort)
.keyspace(keyspaceName).table(tableName)
.initialize
// Creating the RDD
val rdd: RDD[TweetEntity] = kinaContext.cassandraRDD(config)
// creating a key-value pairs RDD
val pairsRDD: RDD[(String, TweetEntity)] = rdd map {
e: TweetEntity => (e.getAuthor, e)
}
// grouping by key
val groups: RDD[(String, Iterable[TweetEntity])] = pairsRDD.groupByKey
// counting elements in groups
val counts: RDD[(String, Int)] = groups map {
t: (String, Iterable[TweetEntity]) => (t._1, t._2.size)
}
// fetching results
val result: Array[(String, Int)] = counts.collect()
var total: Int = 0
var authors: Int = 0
println("This is the groupByKey")
result foreach {
t: (String, Int) =>
println(t._1 + ": " + t._2.toString)
total = total + t._2
authors = authors + 1
}
println(" total: " + total.toString + " authors: " + authors.toString)
}
}
| lucarosellini/kina | kina-examples/src/main/scala/kina/examples/scala/GroupingByKey.scala | Scala | apache-2.0 | 2,502 |
/*
* Originally (c) 2014 Dmitry Leskov, http://www.dmitryleskov.com
* Released into the public domain under the Unlicense, http://unlicense.org
*/
package streamhygiene
package part3
import scala.annotation.tailrec
import scalaz._
import scalaz.EphemeralStream._
import Test._
trait EphemeralStreamConsumers {
def traitSum(xs: EphemeralStream[Int]): Int = {
@tailrec
def loop(acc: Int, xs: EphemeralStream[Int]): Int =
if (xs.isEmpty) acc else loop(acc+xs.head(), xs.tail())
loop(0, xs)
}
@tailrec
final def traitSumTailRec(xs: EphemeralStream[Int], z: Int = 0): Int = {
if (xs.isEmpty) z else traitSumTailRec(xs.tail(), z + xs.head())
}
def traitSumImperative(xs: EphemeralStream[Int]): Int = {
var scan = xs
var res = 0
while (!scan.isEmpty) {
res += scan.head()
scan = scan.tail()
}
res
}
}
object Ephemeral extends AutoConfig with EphemeralStreamConsumers {
/*
* No Rule #1: Storing an EphemeralStream in a 'val' is no problem.
*/
def ones: EphemeralStream[Int] = 1 ##:: ones
val input = ones take problemSize
test("input.length"){
input.length
}
/*
* No Rule #2: Functions consuming EphemeralStreams do not need to be tail-recursive.
*/
test("Imperative sum(input)"){
def sum(xs: EphemeralStream[Int]): Int = {
var scan = xs
var res = 0
while (!scan.isEmpty) {
res += scan.head()
scan = scan.tail()
}
res
}
sum(input)
}
/*
* No Rule #3: No need to use by-name parameters in intermediate functions.
*/
test("sum(input) holding reference to its parameter"){
def sum(xs: EphemeralStream[Int]): Int = {
@tailrec
def loop(acc: Int, xs: EphemeralStream[Int]): Int =
if (xs.isEmpty) acc else loop(acc+xs.head(), xs.tail())
loop(0, xs.tail()) + xs.head()
}
sum(input)
}
/*
* No Rule #3 corollary: No need to use by-name parameters in
* consuming functions defined in traits.
*/
test("traitSum(input)"){traitSum(input)}
test("traitSumTailRec(input)"){traitSumTailRec(input)}
test("traitSumImperative(input)"){traitSumImperative(input)}
/*
* No Rule #4: Pattern matching on EphemeralStreams is okay.
* There is no equivalent of Stream.Empty, though.
*/
test("sumPatMat(input)"){
def sumPatMat(xs: EphemeralStream[Int]): Int = {
@tailrec
def loop(acc: Int, xs: EphemeralStream[Int]): Int =
xs match {
case x ##:: xs => loop(acc+x, xs)
case _ => acc
}
xs match {
case h ##:: t => loop(0, xs)
case _ => 0
}
}
sumPatMat(input)
}
test("tailAvg(input)"){
def tailAvg(xs: EphemeralStream[Int]): Option[Int] = {
xs match {
case y ##:: ys => Some(ys.sum / ys.length)
case _ => None
}
}
tailAvg(input)
}
/*
* No Rule #5. All eager stream-consuming methods work (if the stream is finite).
* Not all Stream methods are available, however, and
* EphemeralStream's own methods need special treatment.
*/
test("input.foldLeft(0)(x => y => x + y)"){input.foldLeft(0)(x => y => x + y)}
test("input.reduceLeft(_ + _)"){input.reduceLeft(_ + _)}
test("{var sum = 0; input.foreach(x => sum += x); sum}"){
var sum = 0; input.foreach(x => sum += x); sum
}
test("input.sum"){input.sum}
test("(0 /: input)(_ + _)"){(0 /: input)(_ + _)}
test("input forall (_ == 1)"){input forall (_ == 1)}
test("input exists (_ != 1)"){input exists (_ != 1)}
test("input find (_ != 1)"){input find (_ != 1)}
} | dmitryleskov/stream-hygiene | src/streamhygiene/part3/Ephemeral.scala | Scala | unlicense | 3,662 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg.distributed
import java.util.Arrays
import scala.collection.mutable.ListBuffer
import breeze.linalg.{DenseMatrix => BDM, DenseVector => BDV, SparseVector => BSV, axpy => brzAxpy,
svd => brzSvd}
import breeze.numerics.{sqrt => brzSqrt}
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.spark.Logging
import org.apache.spark.SparkContext._
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.stat.{MultivariateOnlineSummarizer, MultivariateStatisticalSummary}
import org.apache.spark.rdd.RDD
import org.apache.spark.util.random.XORShiftRandom
import org.apache.spark.storage.StorageLevel
/**
* :: Experimental ::
* Represents a row-oriented distributed Matrix with no meaningful row indices.
*
* @param rows rows stored as an RDD[Vector]
* @param nRows number of rows. A non-positive value means unknown, and then the number of rows will
* be determined by the number of records in the RDD `rows`.
* @param nCols number of columns. A non-positive value means unknown, and then the number of
* columns will be determined by the size of the first row.
*/
@Experimental
class RowMatrix(
val rows: RDD[Vector],
private var nRows: Long,
private var nCols: Int) extends DistributedMatrix with Logging {
/** Alternative constructor leaving matrix dimensions to be determined automatically. */
def this(rows: RDD[Vector]) = this(rows, 0L, 0)
/** Gets or computes the number of columns. */
override def numCols(): Long = {
if (nCols <= 0) {
try {
// Calling `first` will throw an exception if `rows` is empty.
nCols = rows.first().size
} catch {
case err: UnsupportedOperationException =>
sys.error("Cannot determine the number of cols because it is not specified in the " +
"constructor and the rows RDD is empty.")
}
}
nCols
}
/** Gets or computes the number of rows. */
override def numRows(): Long = {
if (nRows <= 0L) {
nRows = rows.count()
if (nRows == 0L) {
sys.error("Cannot determine the number of rows because it is not specified in the " +
"constructor and the rows RDD is empty.")
}
}
nRows
}
/**
* Multiplies the Gramian matrix `A^T A` by a dense vector on the right without computing `A^T A`.
*
* @param v a dense vector whose length must match the number of columns of this matrix
* @return a dense vector representing the product
*/
private[mllib] def multiplyGramianMatrixBy(v: BDV[Double]): BDV[Double] = {
val n = numCols().toInt
val vbr = rows.context.broadcast(v)
rows.treeAggregate(BDV.zeros[Double](n))(
seqOp = (U, r) => {
val rBrz = r.toBreeze
val a = rBrz.dot(vbr.value)
rBrz match {
// use specialized axpy for better performance
case _: BDV[_] => brzAxpy(a, rBrz.asInstanceOf[BDV[Double]], U)
case _: BSV[_] => brzAxpy(a, rBrz.asInstanceOf[BSV[Double]], U)
case _ => throw new UnsupportedOperationException(
s"Do not support vector operation from type ${rBrz.getClass.getName}.")
}
U
}, combOp = (U1, U2) => U1 += U2)
}
/**
* Computes the Gramian matrix `A^T A`.
*/
def computeGramianMatrix(): Matrix = {
val n = numCols().toInt
checkNumColumns(n)
// Computes n*(n+1)/2, avoiding overflow in the multiplication.
// This succeeds when n <= 65535, which is checked above
val nt: Int = if (n % 2 == 0) ((n / 2) * (n + 1)) else (n * ((n + 1) / 2))
// Compute the upper triangular part of the gram matrix.
val GU = rows.treeAggregate(new BDV[Double](new Array[Double](nt)))(
seqOp = (U, v) => {
RowMatrix.dspr(1.0, v, U.data)
U
}, combOp = (U1, U2) => U1 += U2)
RowMatrix.triuToFull(n, GU.data)
}
private def checkNumColumns(cols: Int): Unit = {
if (cols > 65535) {
throw new IllegalArgumentException(s"Argument with more than 65535 cols: $cols")
}
if (cols > 10000) {
val memMB = (cols.toLong * cols) / 125000
logWarning(s"$cols columns will require at least $memMB megabytes of memory!")
}
}
/**
* Computes singular value decomposition of this matrix. Denote this matrix by A (m x n). This
* will compute matrices U, S, V such that A ~= U * S * V', where S contains the leading k
* singular values, U and V contain the corresponding singular vectors.
*
* At most k largest non-zero singular values and associated vectors are returned. If there are k
* such values, then the dimensions of the return will be:
* - U is a RowMatrix of size m x k that satisfies U' * U = eye(k),
* - s is a Vector of size k, holding the singular values in descending order,
* - V is a Matrix of size n x k that satisfies V' * V = eye(k).
*
* We assume n is smaller than m. The singular values and the right singular vectors are derived
* from the eigenvalues and the eigenvectors of the Gramian matrix A' * A. U, the matrix
* storing the right singular vectors, is computed via matrix multiplication as
* U = A * (V * S^-1^), if requested by user. The actual method to use is determined
* automatically based on the cost:
* - If n is small (n < 100) or k is large compared with n (k > n / 2), we compute
* the Gramian matrix first and then compute its top eigenvalues and eigenvectors locally
* on the driver. This requires a single pass with O(n^2^) storage on each executor and
* on the driver, and O(n^2^ k) time on the driver.
* - Otherwise, we compute (A' * A) * v in a distributive way and send it to ARPACK's DSAUPD to
* compute (A' * A)'s top eigenvalues and eigenvectors on the driver node. This requires O(k)
* passes, O(n) storage on each executor, and O(n k) storage on the driver.
*
* Several internal parameters are set to default values. The reciprocal condition number rCond
* is set to 1e-9. All singular values smaller than rCond * sigma(0) are treated as zeros, where
* sigma(0) is the largest singular value. The maximum number of Arnoldi update iterations for
* ARPACK is set to 300 or k * 3, whichever is larger. The numerical tolerance for ARPACK's
* eigen-decomposition is set to 1e-10.
*
* @note The conditions that decide which method to use internally and the default parameters are
* subject to change.
*
* @param k number of leading singular values to keep (0 < k <= n).
* It might return less than k if
* there are numerically zero singular values or there are not enough Ritz values
* converged before the maximum number of Arnoldi update iterations is reached (in case
* that matrix A is ill-conditioned).
* @param computeU whether to compute U
* @param rCond the reciprocal condition number. All singular values smaller than rCond * sigma(0)
* are treated as zero, where sigma(0) is the largest singular value.
* @return SingularValueDecomposition(U, s, V). U = null if computeU = false.
*/
def computeSVD(
k: Int,
computeU: Boolean = false,
rCond: Double = 1e-9): SingularValueDecomposition[RowMatrix, Matrix] = {
// maximum number of Arnoldi update iterations for invoking ARPACK
val maxIter = math.max(300, k * 3)
// numerical tolerance for invoking ARPACK
val tol = 1e-10
computeSVD(k, computeU, rCond, maxIter, tol, "auto")
}
/**
* The actual SVD implementation, visible for testing.
*
* @param k number of leading singular values to keep (0 < k <= n)
* @param computeU whether to compute U
* @param rCond the reciprocal condition number
* @param maxIter max number of iterations (if ARPACK is used)
* @param tol termination tolerance (if ARPACK is used)
* @param mode computation mode (auto: determine automatically which mode to use,
* local-svd: compute gram matrix and computes its full SVD locally,
* local-eigs: compute gram matrix and computes its top eigenvalues locally,
* dist-eigs: compute the top eigenvalues of the gram matrix distributively)
* @return SingularValueDecomposition(U, s, V). U = null if computeU = false.
*/
private[mllib] def computeSVD(
k: Int,
computeU: Boolean,
rCond: Double,
maxIter: Int,
tol: Double,
mode: String): SingularValueDecomposition[RowMatrix, Matrix] = {
val n = numCols().toInt
require(k > 0 && k <= n, s"Requested k singular values but got k=$k and numCols=$n.")
object SVDMode extends Enumeration {
val LocalARPACK, LocalLAPACK, DistARPACK = Value
}
val computeMode = mode match {
case "auto" =>
if (k > 5000) {
logWarning(s"computing svd with k=$k and n=$n, please check necessity")
}
// TODO: The conditions below are not fully tested.
if (n < 100 || (k > n / 2 && n <= 15000)) {
// If n is small or k is large compared with n, we better compute the Gramian matrix first
// and then compute its eigenvalues locally, instead of making multiple passes.
if (k < n / 3) {
SVDMode.LocalARPACK
} else {
SVDMode.LocalLAPACK
}
} else {
// If k is small compared with n, we use ARPACK with distributed multiplication.
SVDMode.DistARPACK
}
case "local-svd" => SVDMode.LocalLAPACK
case "local-eigs" => SVDMode.LocalARPACK
case "dist-eigs" => SVDMode.DistARPACK
case _ => throw new IllegalArgumentException(s"Do not support mode $mode.")
}
// Compute the eigen-decomposition of A' * A.
val (sigmaSquares: BDV[Double], u: BDM[Double]) = computeMode match {
case SVDMode.LocalARPACK =>
require(k < n, s"k must be smaller than n in local-eigs mode but got k=$k and n=$n.")
val G = computeGramianMatrix().toBreeze.asInstanceOf[BDM[Double]]
EigenValueDecomposition.symmetricEigs(v => G * v, n, k, tol, maxIter)
case SVDMode.LocalLAPACK =>
// breeze (v0.10) svd latent constraint, 7 * n * n + 4 * n < Int.MaxValue
require(n < 17515, s"$n exceeds the breeze svd capability")
val G = computeGramianMatrix().toBreeze.asInstanceOf[BDM[Double]]
val brzSvd.SVD(uFull: BDM[Double], sigmaSquaresFull: BDV[Double], _) = brzSvd(G)
(sigmaSquaresFull, uFull)
case SVDMode.DistARPACK =>
if (rows.getStorageLevel == StorageLevel.NONE) {
logWarning("The input data is not directly cached, which may hurt performance if its"
+ " parent RDDs are also uncached.")
}
require(k < n, s"k must be smaller than n in dist-eigs mode but got k=$k and n=$n.")
EigenValueDecomposition.symmetricEigs(multiplyGramianMatrixBy, n, k, tol, maxIter)
}
val sigmas: BDV[Double] = brzSqrt(sigmaSquares)
// Determine the effective rank.
val sigma0 = sigmas(0)
val threshold = rCond * sigma0
var i = 0
// sigmas might have a length smaller than k, if some Ritz values do not satisfy the convergence
// criterion specified by tol after max number of iterations.
// Thus use i < min(k, sigmas.length) instead of i < k.
if (sigmas.length < k) {
logWarning(s"Requested $k singular values but only found ${sigmas.length} converged.")
}
while (i < math.min(k, sigmas.length) && sigmas(i) >= threshold) {
i += 1
}
val sk = i
if (sk < k) {
logWarning(s"Requested $k singular values but only found $sk nonzeros.")
}
// Warn at the end of the run as well, for increased visibility.
if (computeMode == SVDMode.DistARPACK && rows.getStorageLevel == StorageLevel.NONE) {
logWarning("The input data was not directly cached, which may hurt performance if its"
+ " parent RDDs are also uncached.")
}
val s = Vectors.dense(Arrays.copyOfRange(sigmas.data, 0, sk))
val V = Matrices.dense(n, sk, Arrays.copyOfRange(u.data, 0, n * sk))
if (computeU) {
// N = Vk * Sk^{-1}
val N = new BDM[Double](n, sk, Arrays.copyOfRange(u.data, 0, n * sk))
var i = 0
var j = 0
while (j < sk) {
i = 0
val sigma = sigmas(j)
while (i < n) {
N(i, j) /= sigma
i += 1
}
j += 1
}
val U = this.multiply(Matrices.fromBreeze(N))
SingularValueDecomposition(U, s, V)
} else {
SingularValueDecomposition(null, s, V)
}
}
/**
* Computes the covariance matrix, treating each row as an observation.
* @return a local dense matrix of size n x n
*/
def computeCovariance(): Matrix = {
val n = numCols().toInt
checkNumColumns(n)
val (m, mean) = rows.treeAggregate[(Long, BDV[Double])]((0L, BDV.zeros[Double](n)))(
seqOp = (s: (Long, BDV[Double]), v: Vector) => (s._1 + 1L, s._2 += v.toBreeze),
combOp = (s1: (Long, BDV[Double]), s2: (Long, BDV[Double])) =>
(s1._1 + s2._1, s1._2 += s2._2)
)
if (m <= 1) {
sys.error(s"RowMatrix.computeCovariance called on matrix with only $m rows." +
" Cannot compute the covariance of a RowMatrix with <= 1 row.")
}
updateNumRows(m)
mean :/= m.toDouble
// We use the formula Cov(X, Y) = E[X * Y] - E[X] E[Y], which is not accurate if E[X * Y] is
// large but Cov(X, Y) is small, but it is good for sparse computation.
// TODO: find a fast and stable way for sparse data.
val G = computeGramianMatrix().toBreeze.asInstanceOf[BDM[Double]]
var i = 0
var j = 0
val m1 = m - 1.0
var alpha = 0.0
while (i < n) {
alpha = m / m1 * mean(i)
j = 0
while (j < n) {
G(i, j) = G(i, j) / m1 - alpha * mean(j)
j += 1
}
i += 1
}
Matrices.fromBreeze(G)
}
/**
* Computes the top k principal components.
* Rows correspond to observations and columns correspond to variables.
* The principal components are stored a local matrix of size n-by-k.
* Each column corresponds for one principal component,
* and the columns are in descending order of component variance.
* The row data do not need to be "centered" first; it is not necessary for
* the mean of each column to be 0.
*
* @param k number of top principal components.
* @return a matrix of size n-by-k, whose columns are principal components
*/
def computePrincipalComponents(k: Int): Matrix = {
val n = numCols().toInt
require(k > 0 && k <= n, s"k = $k out of range (0, n = $n]")
val Cov = computeCovariance().toBreeze.asInstanceOf[BDM[Double]]
val brzSvd.SVD(u: BDM[Double], _, _) = brzSvd(Cov)
if (k == n) {
Matrices.dense(n, k, u.data)
} else {
Matrices.dense(n, k, Arrays.copyOfRange(u.data, 0, n * k))
}
}
/**
* Computes column-wise summary statistics.
*/
def computeColumnSummaryStatistics(): MultivariateStatisticalSummary = {
val summary = rows.treeAggregate(new MultivariateOnlineSummarizer)(
(aggregator, data) => aggregator.add(data),
(aggregator1, aggregator2) => aggregator1.merge(aggregator2))
updateNumRows(summary.count)
summary
}
/**
* Multiply this matrix by a local matrix on the right.
*
* @param B a local matrix whose number of rows must match the number of columns of this matrix
* @return a [[org.apache.spark.mllib.linalg.distributed.RowMatrix]] representing the product,
* which preserves partitioning
*/
def multiply(B: Matrix): RowMatrix = {
val n = numCols().toInt
val k = B.numCols
require(n == B.numRows, s"Dimension mismatch: $n vs ${B.numRows}")
require(B.isInstanceOf[DenseMatrix],
s"Only support dense matrix at this time but found ${B.getClass.getName}.")
val Bb = rows.context.broadcast(B.toBreeze.asInstanceOf[BDM[Double]].toDenseVector.toArray)
val AB = rows.mapPartitions { iter =>
val Bi = Bb.value
iter.map { row =>
val v = BDV.zeros[Double](k)
var i = 0
while (i < k) {
v(i) = row.toBreeze.dot(new BDV(Bi, i * n, 1, n))
i += 1
}
Vectors.fromBreeze(v)
}
}
new RowMatrix(AB, nRows, B.numCols)
}
/**
* Compute all cosine similarities between columns of this matrix using the brute-force
* approach of computing normalized dot products.
*
* @return An n x n sparse upper-triangular matrix of cosine similarities between
* columns of this matrix.
*/
def columnSimilarities(): CoordinateMatrix = {
columnSimilarities(0.0)
}
/**
* Compute similarities between columns of this matrix using a sampling approach.
*
* The threshold parameter is a trade-off knob between estimate quality and computational cost.
*
* Setting a threshold of 0 guarantees deterministic correct results, but comes at exactly
* the same cost as the brute-force approach. Setting the threshold to positive values
* incurs strictly less computational cost than the brute-force approach, however the
* similarities computed will be estimates.
*
* The sampling guarantees relative-error correctness for those pairs of columns that have
* similarity greater than the given similarity threshold.
*
* To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of this matrix.
* Let B be the largest in magnitude non-zero element of this matrix.
* Let L be the maximum number of non-zeros per row.
*
* For example, for {0,1} matrices: A=B=1.
* Another example, for the Netflix matrix: A=1, B=5
*
* For those column pairs that are above the threshold,
* the computed similarity is correct to within 20% relative error with probability
* at least 1 - (0.981)^10/B^
*
* The shuffle size is bounded by the *smaller* of the following two expressions:
*
* O(n log(n) L / (threshold * A))
* O(m L^2^)
*
* The latter is the cost of the brute-force approach, so for non-zero thresholds,
* the cost is always cheaper than the brute-force approach.
*
* @param threshold Set to 0 for deterministic guaranteed correctness.
* Similarities above this threshold are estimated
* with the cost vs estimate quality trade-off described above.
* @return An n x n sparse upper-triangular matrix of cosine similarities
* between columns of this matrix.
*/
def columnSimilarities(threshold: Double): CoordinateMatrix = {
require(threshold >= 0, s"Threshold cannot be negative: $threshold")
if (threshold > 1) {
logWarning(s"Threshold is greater than 1: $threshold " +
"Computation will be more efficient with promoted sparsity, " +
" however there is no correctness guarantee.")
}
val gamma = if (threshold < 1e-6) {
Double.PositiveInfinity
} else {
10 * math.log(numCols()) / threshold
}
columnSimilaritiesDIMSUM(computeColumnSummaryStatistics().normL2.toArray, gamma)
}
/**
* Find all similar columns using the DIMSUM sampling algorithm, described in two papers
*
* http://arxiv.org/abs/1206.2082
* http://arxiv.org/abs/1304.1467
*
* @param colMags A vector of column magnitudes
* @param gamma The oversampling parameter. For provable results, set to 10 * log(n) / s,
* where s is the smallest similarity score to be estimated,
* and n is the number of columns
* @return An n x n sparse upper-triangular matrix of cosine similarities
* between columns of this matrix.
*/
private[mllib] def columnSimilaritiesDIMSUM(
colMags: Array[Double],
gamma: Double): CoordinateMatrix = {
require(gamma > 1.0, s"Oversampling should be greater than 1: $gamma")
require(colMags.size == this.numCols(), "Number of magnitudes didn't match column dimension")
val sg = math.sqrt(gamma) // sqrt(gamma) used many times
// Don't divide by zero for those columns with zero magnitude
val colMagsCorrected = colMags.map(x => if (x == 0) 1.0 else x)
val sc = rows.context
val pBV = sc.broadcast(colMagsCorrected.map(c => sg / c))
val qBV = sc.broadcast(colMagsCorrected.map(c => math.min(sg, c)))
val sims = rows.mapPartitionsWithIndex { (indx, iter) =>
val p = pBV.value
val q = qBV.value
val rand = new XORShiftRandom(indx)
val scaled = new Array[Double](p.size)
iter.flatMap { row =>
row match {
case SparseVector(size, indices, values) =>
val nnz = indices.size
var k = 0
while (k < nnz) {
scaled(k) = values(k) / q(indices(k))
k += 1
}
Iterator.tabulate (nnz) { k =>
val buf = new ListBuffer[((Int, Int), Double)]()
val i = indices(k)
val iVal = scaled(k)
if (iVal != 0 && rand.nextDouble() < p(i)) {
var l = k + 1
while (l < nnz) {
val j = indices(l)
val jVal = scaled(l)
if (jVal != 0 && rand.nextDouble() < p(j)) {
buf += (((i, j), iVal * jVal))
}
l += 1
}
}
buf
}.flatten
case DenseVector(values) =>
val n = values.size
var i = 0
while (i < n) {
scaled(i) = values(i) / q(i)
i += 1
}
Iterator.tabulate (n) { i =>
val buf = new ListBuffer[((Int, Int), Double)]()
val iVal = scaled(i)
if (iVal != 0 && rand.nextDouble() < p(i)) {
var j = i + 1
while (j < n) {
val jVal = scaled(j)
if (jVal != 0 && rand.nextDouble() < p(j)) {
buf += (((i, j), iVal * jVal))
}
j += 1
}
}
buf
}.flatten
}
}
}.reduceByKey(_ + _).map { case ((i, j), sim) =>
MatrixEntry(i.toLong, j.toLong, sim)
}
new CoordinateMatrix(sims, numCols(), numCols())
}
private[mllib] override def toBreeze(): BDM[Double] = {
val m = numRows().toInt
val n = numCols().toInt
val mat = BDM.zeros[Double](m, n)
var i = 0
rows.collect().foreach { vector =>
vector.foreachActive { case (j, v) =>
mat(i, j) = v
}
i += 1
}
mat
}
/** Updates or verifies the number of rows. */
private def updateNumRows(m: Long) {
if (nRows <= 0) {
nRows = m
} else {
require(nRows == m,
s"The number of rows $m is different from what specified or previously computed: ${nRows}.")
}
}
}
@Experimental
object RowMatrix {
/**
* Adds alpha * x * x.t to a matrix in-place. This is the same as BLAS's DSPR.
*
* @param U the upper triangular part of the matrix packed in an array (column major)
*/
private def dspr(alpha: Double, v: Vector, U: Array[Double]): Unit = {
// TODO: Find a better home (breeze?) for this method.
val n = v.size
v match {
case DenseVector(values) =>
blas.dspr("U", n, alpha, values, 1, U)
case SparseVector(size, indices, values) =>
val nnz = indices.length
var colStartIdx = 0
var prevCol = 0
var col = 0
var j = 0
var i = 0
var av = 0.0
while (j < nnz) {
col = indices(j)
// Skip empty columns.
colStartIdx += (col - prevCol) * (col + prevCol + 1) / 2
col = indices(j)
av = alpha * values(j)
i = 0
while (i <= j) {
U(colStartIdx + indices(i)) += av * values(i)
i += 1
}
j += 1
prevCol = col
}
}
}
/**
* Fills a full square matrix from its upper triangular part.
*/
private def triuToFull(n: Int, U: Array[Double]): Matrix = {
val G = new BDM[Double](n, n)
var row = 0
var col = 0
var idx = 0
var value = 0.0
while (col < n) {
row = 0
while (row < col) {
value = U(idx)
G(row, col) = value
G(col, row) = value
idx += 1
row += 1
}
G(col, col) = U(idx)
idx += 1
col +=1
}
Matrices.dense(n, n, G.data)
}
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala | Scala | apache-2.0 | 25,431 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Benchmarks **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, Jonas Fonseca **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Ported by the Dart team to Dart.
// Ported by Jonas Fonseca to Scala.js.
// This is a Scala implementation of the Richards benchmark from:
//
// http://www.cl.cam.ac.uk/~mr10/Bench.html
//
// The benchmark was originally implemented in BCPL by
// Martin Richards.
package org.scalajs.benchmark.richards
/**
* Richards simulates the task dispatcher of an operating system.
*/
object Richards extends org.scalajs.benchmark.Benchmark {
override def prefix = "Richards"
def run() {
val scheduler = new Scheduler()
scheduler.addIdleTask(ID_IDLE, 0, null, COUNT)
var queue = new Packet(null, ID_WORKER, KIND_WORK)
queue = new Packet(queue, ID_WORKER, KIND_WORK)
scheduler.addWorkerTask(ID_WORKER, 1000, queue)
queue = new Packet(null, ID_DEVICE_A, KIND_DEVICE)
queue = new Packet(queue, ID_DEVICE_A, KIND_DEVICE)
queue = new Packet(queue, ID_DEVICE_A, KIND_DEVICE)
scheduler.addHandlerTask(ID_HANDLER_A, 2000, queue)
queue = new Packet(null, ID_DEVICE_B, KIND_DEVICE)
queue = new Packet(queue, ID_DEVICE_B, KIND_DEVICE)
queue = new Packet(queue, ID_DEVICE_B, KIND_DEVICE)
scheduler.addHandlerTask(ID_HANDLER_B, 3000, queue)
scheduler.addDeviceTask(ID_DEVICE_A, 4000, null)
scheduler.addDeviceTask(ID_DEVICE_B, 5000, null)
scheduler.schedule()
if (scheduler.queueCount != EXPECTED_QUEUE_COUNT ||
scheduler.holdCount != EXPECTED_HOLD_COUNT) {
print(s"Error during execution: queueCount = ${scheduler.queueCount}, holdCount = ${scheduler.holdCount}.")
}
if (EXPECTED_QUEUE_COUNT != scheduler.queueCount) {
throw new Exception("bad scheduler queue-count")
}
if (EXPECTED_HOLD_COUNT != scheduler.holdCount) {
throw new Exception("bad scheduler hold-count")
}
}
val DATA_SIZE = 4
val COUNT = 1000
/**
* These two constants specify how many times a packet is queued and
* how many times a task is put on hold in a correct run of richards.
* They don't have any meaning a such but are characteristic of a
* correct run so if the actual queue or hold count is different from
* the expected there must be a bug in the implementation.
*/
val EXPECTED_QUEUE_COUNT = 2322
val EXPECTED_HOLD_COUNT = 928
val ID_IDLE = 0
val ID_WORKER = 1
val ID_HANDLER_A = 2
val ID_HANDLER_B = 3
val ID_DEVICE_A = 4
val ID_DEVICE_B = 5
val NUMBER_OF_IDS = 6
val KIND_DEVICE = 0
val KIND_WORK = 1
}
/**
* A scheduler can be used to schedule a set of tasks based on their relative
* priorities. Scheduling is done by maintaining a list of task control blocks
* which holds tasks and the data queue they are processing.
*/
class Scheduler {
var queueCount = 0
var holdCount = 0
var currentTcb: TaskControlBlock = null
var currentId: Int = 0
var list: TaskControlBlock = null
val blocks = new Array[TaskControlBlock](Richards.NUMBER_OF_IDS)
/// Add an idle task to this scheduler.
def addIdleTask(id: Int, priority: Int, queue: Packet, count: Int) {
addRunningTask(id, priority, queue, IdleTask(this, 1, count))
}
/// Add a work task to this scheduler.
def addWorkerTask(id: Int, priority: Int, queue: Packet) {
addTask(id, priority, queue, WorkerTask(this, Richards.ID_HANDLER_A, 0))
}
/// Add a handler task to this scheduler.
def addHandlerTask(id: Int, priority: Int, queue: Packet) {
addTask(id, priority, queue, HandlerTask(this))
}
/// Add a handler task to this scheduler.
def addDeviceTask(id: Int, priority: Int, queue: Packet) {
addTask(id, priority, queue, DeviceTask(this))
}
/// Add the specified task and mark it as running.
def addRunningTask(id: Int, priority: Int, queue: Packet, task: Task) {
addTask(id, priority, queue, task)
currentTcb.setRunning()
}
/// Add the specified task to this scheduler.
def addTask(id: Int, priority: Int, queue: Packet, task: Task) {
currentTcb = new TaskControlBlock(list, id, priority, queue, task)
list = currentTcb
blocks(id) = currentTcb
}
/// Execute the tasks managed by this scheduler.
def schedule() {
currentTcb = list
while (currentTcb != null) {
if (currentTcb.isHeldOrSuspended()) {
currentTcb = currentTcb.link
} else {
currentId = currentTcb.id
currentTcb = currentTcb.run()
}
}
}
/// Release a task that is currently blocked and return the next block to run.
def release(id: Int): TaskControlBlock = {
val tcb = blocks(id)
if (tcb == null) return tcb
tcb.markAsNotHeld()
if (tcb.priority > currentTcb.priority) return tcb
return currentTcb
}
/**
* Block the currently executing task and return the next task control block
* to run. The blocked task will not be made runnable until it is explicitly
* released, even if new work is added to it.
*/
def holdCurrent(): TaskControlBlock = {
holdCount += 1
currentTcb.markAsHeld()
return currentTcb.link
}
/**
* Suspend the currently executing task and return the next task
* control block to run.
* If new work is added to the suspended task it will be made runnable.
*/
def suspendCurrent(): TaskControlBlock = {
currentTcb.markAsSuspended()
return currentTcb
}
/**
* Add the specified packet to the end of the worklist used by the task
* associated with the packet and make the task runnable if it is currently
* suspended.
*/
def queue(packet: Packet): TaskControlBlock = {
val t = blocks(packet.id)
if (t == null) return t
queueCount += 1
packet.link = null
packet.id = currentId
return t.checkPriorityAdd(currentTcb, packet)
}
}
object TaskState {
/// The task is running and is currently scheduled.
val RUNNING = 0
/// The task has packets left to process.
val RUNNABLE = 1
/**
* The task is not currently running. The task is not blocked as such and may
* be started by the scheduler.
*/
val SUSPENDED = 2
/// The task is blocked and cannot be run until it is explicitly released.
val HELD = 4
val SUSPENDED_RUNNABLE = SUSPENDED | RUNNABLE
val NOT_HELD = ~HELD
}
/**
* A task control block manages a task and the queue of work packages associated
* with it.
*
* @param id The id of this block.
* @param priority The priority of this block.
* @param queue The queue of packages to be processed by the task.
*/
class TaskControlBlock(val link: TaskControlBlock, val id: Int, val priority: Int, var queue: Packet, task: Task) {
var state = if (queue == null) TaskState.SUSPENDED else TaskState.SUSPENDED_RUNNABLE
def setRunning() {
state = TaskState.RUNNING
}
def markAsNotHeld() {
state = state & TaskState.NOT_HELD
}
def markAsHeld() {
state = state | TaskState.HELD
}
def isHeldOrSuspended(): Boolean = {
return (state & TaskState.HELD) != 0 ||
(state == TaskState.SUSPENDED)
}
def markAsSuspended() {
state = state | TaskState.SUSPENDED
}
def markAsRunnable() {
state = state | TaskState.RUNNABLE
}
/// Runs this task, if it is ready to be run, and returns the next task to run.
def run(): TaskControlBlock = {
val packet = if (state == TaskState.SUSPENDED_RUNNABLE) queue else null
if (packet != null) {
queue = packet.link
state = if (queue == null) TaskState.RUNNING else TaskState.RUNNABLE
}
task.run(packet)
}
/**
* Adds a packet to the worklist of this block's task, marks this as
* runnable if necessary, and returns the next runnable object to run
* (the one with the highest priority).
*/
def checkPriorityAdd(task: TaskControlBlock, packet: Packet): TaskControlBlock = {
if (queue == null) {
queue = packet
markAsRunnable()
if (priority > task.priority)
return this
} else {
queue = packet.addTo(queue)
}
task
}
override def toString = s"tcb { ${task}@${state} }"
}
/**
* Abstract task that manipulates work packets.
*
* @param scheduler The scheduler that manages this task.
*/
sealed abstract class Task(scheduler: Scheduler) {
def run(packet: Packet): TaskControlBlock
}
/**
* An idle task doesn't do any work itself but cycles control between the two
* device tasks.
*
* @param v1 A seed value that controls how the device tasks are scheduled.
* @param count The number of times this task should be scheduled.
*/
case class IdleTask(scheduler: Scheduler, var v1: Int, var count: Int) extends Task(scheduler) {
def run(packet: Packet): TaskControlBlock = {
count -= 1
if (count == 0) {
scheduler.holdCurrent()
} else if ((v1 & 1) == 0) {
v1 = v1 >> 1
scheduler.release(Richards.ID_DEVICE_A)
} else {
v1 = (v1 >> 1) ^ 0xD008
scheduler.release(Richards.ID_DEVICE_B)
}
}
}
/**
* A task that suspends itself after each time it has been run to simulate
* waiting for data from an external device.
*/
case class DeviceTask(scheduler: Scheduler) extends Task(scheduler) {
var v1: Packet = null
def run(packet: Packet): TaskControlBlock = {
if (packet == null) {
if (v1 == null) return scheduler.suspendCurrent()
val v = v1
v1 = null
return scheduler.queue(v)
}
v1 = packet
scheduler.holdCurrent()
}
}
/**
* A task that manipulates work packets.
*
* @param v1 A seed used to specify how work packets are manipulated.
* @param v2 Another seed used to specify how work packets are manipulated.
*/
case class WorkerTask(scheduler: Scheduler, var v1: Int, var v2: Int) extends Task(scheduler) {
def run(packet: Packet): TaskControlBlock = {
if (packet == null) {
return scheduler.suspendCurrent()
}
if (v1 == Richards.ID_HANDLER_A) {
v1 = Richards.ID_HANDLER_B
} else {
v1 = Richards.ID_HANDLER_A
}
packet.id = v1
packet.a1 = 0
for (i <- 0 until Richards.DATA_SIZE) {
v2 += 1
if (v2 > 26) v2 = 1
packet.a2(i) = v2
}
scheduler.queue(packet)
}
}
/**
* A task that manipulates work packets and then suspends itself.
*/
case class HandlerTask(scheduler: Scheduler) extends Task(scheduler) {
var v1: Packet = null
var v2: Packet = null
def run(packet: Packet): TaskControlBlock = {
if (packet != null) {
if (packet.kind == Richards.KIND_WORK) {
v1 = packet.addTo(v1)
} else {
v2 = packet.addTo(v2)
}
}
if (v1 != null) {
val count = v1.a1
if (count < Richards.DATA_SIZE) {
if (v2 != null) {
val v = v2
v2 = v2.link
v.a1 = v1.a2(count)
v1.a1 = count + 1
return scheduler.queue(v)
}
} else {
val v = v1
v1 = v1.link
return scheduler.queue(v)
}
}
scheduler.suspendCurrent()
}
}
/**
* A simple package of data that is manipulated by the tasks. The exact layout
* of the payload data carried by a packet is not importaint, and neither is the
* nature of the work performed on packets by the tasks.
* Besides carrying data, packets form linked lists and are hence used both as
* data and worklists.
*
* @param link The tail of the linked list of packets.
* @param id An ID for this packet.
* @param kind The type of this packet.
*/
case class Packet(var link: Packet, var id: Int, val kind: Int) {
var a1 = 0
val a2 = new Array[Int](Richards.DATA_SIZE)
/// Add this packet to the end of a worklist, and return the worklist.
def addTo(queue: Packet): Packet = {
link = null
if (queue == null) {
this
} else {
var next = queue
while (next.link != null)
next = next.link
next.link = this
queue
}
}
}
| jonas/scala-js-benchmarks | richards/src/main/scala/org/scalajs/benchmark/richards/Richards.scala | Scala | bsd-3-clause | 13,876 |
package test
class ASpec {
def sayHiFromShared(): Unit = println(SharedTestHelper.sayHi)
}
| jkinkead/sbt-plugins | src/sbt-test/sbt-plugins/simple/core/src/it/scala/ASpec.scala | Scala | apache-2.0 | 94 |
package com.joescii
import org.scalatest.{FlatSpec, ShouldMatchers}
class InputSpec extends FlatSpec with ShouldMatchers {
"parseInput()" should "handle the sample from the problem" in {
val is = this.getClass.getClassLoader.getResourceAsStream("input.txt")
Solution.parseInput(is) shouldEqual Some(List(("bac", "bac"), ("abc", "def"), ("jdfh", "fds")))
}
}
| joescii/hackerrank-palindrome | src/test/scala/InputSpec.scala | Scala | apache-2.0 | 373 |
package phenan.parsers.ll
import org.scalatest._
import phenan.parsers.reader._
class ParsersTest extends FunSuite with Matchers {
object MyParsers extends Parsers[Char] {
def non_zero_digit = elem("non zero digit", { n => '1' <= n && n <= '9' }) ^^ { _ - '0' }
def digit = non_zero_digit | elem('0') ^^^ 0 as "digit"
def nat = non_zero_digit ~ digit.* ^^ { case d ~ ds => ds.foldLeft(d)(_ * 10 + _) } as "natural number"
def mul = nat.chainl1(mul_op | div_op)
def mul_op = elem('*') ^^^ { (a: Int, b: Int) => a * b }
def div_op = elem('/') ^^^ { (a: Int, b: Int) => a / b }
def add = mul.chainl1(add_op | sub_op)
def add_op = elem('+') ^^^ { (a: Int, b: Int) => a + b }
def sub_op = elem('-') ^^^ { (a: Int, b: Int) => a - b }
def expr = ( add <~ elem(';') ).panic(untilSemicolon)
def untilSemicolon = elem("", _ != ';').* ~> elem(';')
def exprs = expr.+
}
def parse [T] (parser: Parser[Char, T], src: String): ParseResult[T] = parser.parseAll(CharSequenceReader(src))
import MyParsers._
test ("digit") {
parse(digit, "4") shouldBe ParseSuccess(4)
parse(digit, "r") should matchPattern {
case ParseFailure(List(FailureMessage("digit", "r", p))) if p.line == 1 && p.column == 1 =>
}
}
test ("natural number") {
parse(nat, "308") shouldBe ParseSuccess(308)
parse(nat, "097") should matchPattern {
case ParseFailure(List(FailureMessage("natural number", "0", p))) if p.line == 1 && p.column == 1 =>
}
}
test ("expression") {
parse(expr, "12/4+3*2;") shouldBe ParseSuccess(9)
parse(expr, "12/4+3*2") should matchPattern {
case ParseFailure(List(FailureMessage(";", "end of input", p))) if p.line == 1 && p.column == 9 =>
}
}
test ("expressions") {
parse(exprs, "1+5;3*4*2;") shouldBe ParseSuccess(List(6, 24))
parse(exprs, "6+5*2;3+a;3*b*2;6") should matchPattern {
case ParseFailure(List(FailureMessage("natural number", "a", _), FailureMessage("natural number", "b", _), FailureMessage(";", "end of input", _))) =>
}
}
}
| phenan/parsers | src/test/scala/phenan/parsers/ll/ParsersTest.scala | Scala | mit | 2,078 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import org.orbeon.oxf.common.Version
import org.scalatest.junit.AssertionsForJUnit
import org.orbeon.oxf.test.DocumentTestBase
import org.junit.{Assume, Test}
import org.orbeon.oxf.xml.Dom4j.elemToDocument
class ItemsetDependenciesTest extends DocumentTestBase with AssertionsForJUnit {
// See: [ #315557 ] XPath analysis: Checkbox with both itemset and value changing ends up in incorrect state
// http://forge.ow2.org/tracker/?func=detail&atid=350207&aid=315557&group_id=168
@Test def selectValueDependingOnItemset(): Unit = {
Assume.assumeTrue(Version.isPE) // only test this feature if we are the PE version
this setupDocument
<xh:html xmlns:xh="http://www.w3.org/1999/xhtml"
xmlns:xf="http://www.w3.org/2002/xforms"
xmlns:xxf="http://orbeon.org/oxf/xml/xforms"
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xh:head>
<xf:model id="model" xxf:xpath-analysis="true" xxf:encrypt-item-values="false">
<xf:instance id="instance">
<instance xmlns="">
<selection>1 2</selection>
<value>1</value>
<value>2</value>
<index>1</index>
</instance>
</xf:instance>
</xf:model>
</xh:head>
<xh:body>
<xf:select id="checkbox" ref="selection" appearance="full">
<xf:item>
<xf:label/>
<xf:value ref="../value[xs:integer(../index)]"/>
</xf:item>
</xf:select>
<xf:select1 id="value-selection" ref="index" appearance="full">
<xf:item>
<xf:label>1</xf:label>
<xf:value>1</xf:value>
</xf:item>
<xf:item>
<xf:label>2</xf:label>
<xf:value>2</xf:value>
</xf:item>
</xf:select1>
</xh:body>
</xh:html>
assert(getControlExternalValue("checkbox") === "1")
assert(getControlExternalValue("value-selection") === "1")
assert(getItemset("checkbox") === """[{"label":"","value":"1"}]""")
setControlValue("value-selection", "2")
assert(getControlExternalValue("checkbox") === "2")
assert(getItemset("checkbox") === """[{"label":"","value":"2"}]""")
}
} | brunobuzzi/orbeon-forms | xforms/jvm/src/test/scala/org/orbeon/oxf/xforms/analysis/ItemsetDependenciesTest.scala | Scala | lgpl-2.1 | 2,968 |
package scodec.bits
object StreamingBitVectorTest extends App {
println { """
|Simple integration test for streaming, lazy-I/O-backed `BitVector`.
|When viewing output, we are looking for two things:
|
| * Speed should be "reasonable", between 10-100 MB/s if touching
| each byte, and up to 4 GB/s or so if we are just measuring time
| to load the series of chunks.
| * Memory usage should be bounded by the chunk size, not by overall
| file size. That is, the garbage collector should be able to
| reclaim previous chunks when the stream is traversed in a linear
| fashion. Try running with -Xmx100m to verify this.
""".stripMargin
}
def time[A](label: String, mb: Double)(a: => A): A = {
val start = System.currentTimeMillis
val result = a
val t = (System.currentTimeMillis.toDouble - start) / 1000.0
println(s"$label took $t seconds, ${math.round(mb/t)} MB/s")
result
}
def printMemoryStats(): Unit = {
val R = java.lang.Runtime.getRuntime
println(s"Max memory: ${R.maxMemory.toDouble / 1e6} MB")
println(s"Total memory: ${R.totalMemory.toDouble / 1e6} MB")
println(s"Free memory: ${R.freeMemory.toDouble / 1e6} MB")
}
val Stride = 4096 * 8 // 4kb
@annotation.tailrec
def countBits(b: BitVector, acc: Long, touchBytes: Boolean): Long = {
if (b.isEmpty) acc
else {
val (h, t) = (b.take(Stride), b.drop(Stride))
var i = 0
if (touchBytes) {
val bytes = h.toByteVector
while (i < bytes.size) {
bytes(i)
i += 1
}
}
countBits(t, acc + (if (touchBytes) i else h.size / 8), touchBytes)
}
}
import java.io._
val file = new File("test.largefile")
if (!file.exists) {
println("To test large file-backed streaming BitVector support,")
println("place a large file at ./test.largefile")
}
else {
def go(touchBytes: Boolean): Unit = {
val in = new FileInputStream(file)
val nioIn1 = (new FileInputStream(file)).getChannel
val nioIn2 = (new FileInputStream(file)).getChannel
val nioIn3 = (new FileInputStream(file)).getChannel
val nioIn4 = (new FileInputStream(file)).getChannel
val size = nioIn1.size.toDouble / 1e6
println("Processing file of size: " + size + " MB")
println("Touching each byte read: " + touchBytes)
println("----------")
println
try {
time("BitVector.fromInputStream", size) {
// NB: if we declare `val b1 = BitVector.fromInputStream(..)`, this
// will run out of memory, since head of stream makes entire stream
// reachable!
val N = countBits(BitVector.fromInputStream(in), 0, touchBytes)
println(s"finished processing ${N.toDouble/1e6} MB")
printMemoryStats
}
println
time("BitVector.fromChannel(chunkSize=64kB)", size) {
val N = countBits(BitVector.fromChannel(nioIn1, chunkSizeInBytes = 1024 * 64), 0, touchBytes)
println(s"finished processing ${N.toDouble/1e6} MB")
printMemoryStats
}
println
time("BitVector.fromChannel(direct=true, chunkSizeInBytes=64k)", size) {
val N = countBits(BitVector.fromChannel(nioIn3, 64 * 1024, direct = true), 0, touchBytes)
println(s"finished processing ${N.toDouble/1e6} MB")
printMemoryStats
}
println
time("BitVector.fromChannel(direct=true, chunkSize=16MB)", size) {
val N = countBits(BitVector.fromChannel(nioIn2, 16 * 1024 * 1000, direct = true), 0, touchBytes)
println(s"finished processing ${N.toDouble/1e6} MB")
printMemoryStats
}
println
time("BitVector.fromMmap", size) {
val N = countBits(BitVector.fromMmap(nioIn4), 0, touchBytes)
println(s"finished processing ${N.toDouble/1e6} MB")
printMemoryStats
}
}
finally {
print("closing files... ")
in.close
nioIn1.close
nioIn2.close
println("done")
println
}
}
go(false)
go(true)
}
}
| aloiscochard/scodec-bits | core/src/test/scala/scodec/bits/StreamingBitVectorTest.scala | Scala | bsd-3-clause | 4,128 |
package at.logic.gapt.integration_tests
import at.logic.gapt.examples._
import at.logic.gapt.proofs.Sequent
import at.logic.gapt.provers.eprover.EProver
import at.logic.gapt.provers.prover9.Prover9
import org.specs2.mutable._
class nTapeTest extends Specification {
args( skipAll = !Prover9.isInstalled )
"The higher-order tape proof" should {
"do cut-elimination on the 2 copies tape proof tape3.llk" in {
val acnf_lkconclusion = nTape2.acnf.conclusion
//println( nTape2.preprocessed_input_proof.conclusion )
//println( acnf_lkconclusion )
acnf_lkconclusion.multiSetEquals( nTape2.preprocessed_input_proof.conclusion ) must beTrue
nTape2.thf_reproving_deep( None ) must be_!=( "" )
ok( "acnf could be created" )
}
"print statistics of the 2 copies tape proof, including reproving the deep formula (tape3.llk)" in {
if ( !EProver.isInstalled ) skipped( "No EProver installed!" )
nTape2.printStatistics()
ok( "all statistics created!" )
}
"do cut-elimination on the 1 copy tape proof tape3ex.llk" in {
val acnf_lkconclusion = nTape3.acnf.conclusion
acnf_lkconclusion.multiSetEquals( nTape3.preprocessed_input_proof.conclusion ) must beTrue
nTape3.thf_reproving_deep( None ) must be_!=( "" )
ok( "acnf could be created" )
}
"print statistics of the 3 copies tape proof, including reproving the deep formula tape3ex.llk" in {
if ( !EProver.isInstalled ) skipped( "No EProver installed!" )
nTape3.printStatistics()
ok( "all statistics created!" )
}
"calculate of the css for version 4 of the n-tape proof" in {
for ( i <- 2 to 4 ) nTape4( i ).preprocessed_css_hol_clauses
ok( "computations done" )
}
"calulate the css for version 5 of the n-tape proof" in {
for ( i <- 2 to 4 ) nTape5( i ).preprocessed_css_hol_clauses
ok( "computations done" )
}
"calulate the css for version 5 with arithmetical if-then-else of the n-tape proof" in {
nTape5Arith( 2 ).preprocessed_css_hol_clauses
ok( "computations done" )
}
"evaluate the formulas in the if-then-else tests" in {
nTape6.sequents
ok( "terms created" )
}
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/integration_tests/nTapeTest.scala | Scala | gpl-3.0 | 2,232 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.server
import java.nio.ByteBuffer
import java.util.{Collections, LinkedHashMap, Optional, Properties}
import java.util.concurrent.{Executors, Future, TimeUnit}
import kafka.log.LogConfig
import kafka.network.RequestChannel.Session
import kafka.security.auth._
import kafka.utils.TestUtils
import org.apache.kafka.common.acl.{AccessControlEntry, AccessControlEntryFilter, AclBinding, AclBindingFilter, AclOperation, AclPermissionType}
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.message.{CreateTopicsRequestData, DescribeGroupsRequestData, ElectPreferredLeadersRequestData, LeaveGroupRequestData, JoinGroupRequestData}
import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourcePatternFilter, ResourceType => AdminResourceType}
import org.apache.kafka.common.{Node, TopicPartition}
import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicSet}
import org.apache.kafka.common.message.SaslAuthenticateRequestData
import org.apache.kafka.common.message.SaslHandshakeRequestData
import org.apache.kafka.common.metrics.{KafkaMetric, Quota, Sensor}
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.protocol.types.Struct
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.CreateAclsRequest.AclCreation
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.{AuthenticationContext, KafkaPrincipal, KafkaPrincipalBuilder, SecurityProtocol}
import org.apache.kafka.common.utils.Sanitizer
import org.apache.kafka.common.utils.SecurityUtils
import org.junit.Assert._
import org.junit.{After, Before, Test}
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
class RequestQuotaTest extends BaseRequestTest {
override def numBrokers: Int = 1
private val topic = "topic-1"
private val numPartitions = 1
private val tp = new TopicPartition(topic, 0)
private val logDir = "logDir"
private val unthrottledClientId = "unthrottled-client"
private val smallQuotaProducerClientId = "small-quota-producer-client"
private val smallQuotaConsumerClientId = "small-quota-consumer-client"
private val brokerId: Integer = 0
private var leaderNode: KafkaServer = null
// Run tests concurrently since a throttle could be up to 1 second because quota percentage allocated is very low
case class Task(apiKey: ApiKeys, future: Future[_])
private val executor = Executors.newCachedThreadPool
private val tasks = new ListBuffer[Task]
override def propertyOverrides(properties: Properties): Unit = {
properties.put(KafkaConfig.ControlledShutdownEnableProp, "false")
properties.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "1")
properties.put(KafkaConfig.OffsetsTopicPartitionsProp, "1")
properties.put(KafkaConfig.GroupMinSessionTimeoutMsProp, "100")
properties.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
properties.put(KafkaConfig.AuthorizerClassNameProp, classOf[RequestQuotaTest.TestAuthorizer].getName)
properties.put(KafkaConfig.PrincipalBuilderClassProp, classOf[RequestQuotaTest.TestPrincipalBuilder].getName)
}
@Before
override def setUp() {
RequestQuotaTest.principal = KafkaPrincipal.ANONYMOUS
super.setUp()
createTopic(topic, numPartitions, 1)
leaderNode = servers.head
// Change default client-id request quota to a small value and a single unthrottledClient with a large quota
val quotaProps = new Properties()
quotaProps.put(DynamicConfig.Client.RequestPercentageOverrideProp, "0.01")
quotaProps.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "2000")
quotaProps.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "2000")
adminZkClient.changeClientIdConfig("<default>", quotaProps)
quotaProps.put(DynamicConfig.Client.RequestPercentageOverrideProp, "2000")
adminZkClient.changeClientIdConfig(Sanitizer.sanitize(unthrottledClientId), quotaProps)
// Client ids with small producer and consumer (fetch) quotas. Quota values were picked so that both
// producer/consumer and request quotas are violated on the first produce/consume operation, and the delay due to
// producer/consumer quota violation will be longer than the delay due to request quota violation.
quotaProps.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "1")
quotaProps.put(DynamicConfig.Client.RequestPercentageOverrideProp, "0.01")
adminZkClient.changeClientIdConfig(Sanitizer.sanitize(smallQuotaProducerClientId), quotaProps)
quotaProps.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "1")
quotaProps.put(DynamicConfig.Client.RequestPercentageOverrideProp, "0.01")
adminZkClient.changeClientIdConfig(Sanitizer.sanitize(smallQuotaConsumerClientId), quotaProps)
TestUtils.retry(20000) {
val quotaManager = servers.head.dataPlaneRequestProcessor.quotas.request
assertEquals(s"Default request quota not set", Quota.upperBound(0.01), quotaManager.quota("some-user", "some-client"))
assertEquals(s"Request quota override not set", Quota.upperBound(2000), quotaManager.quota("some-user", unthrottledClientId))
val produceQuotaManager = servers.head.dataPlaneRequestProcessor.quotas.produce
assertEquals(s"Produce quota override not set", Quota.upperBound(1), produceQuotaManager.quota("some-user", smallQuotaProducerClientId))
val consumeQuotaManager = servers.head.dataPlaneRequestProcessor.quotas.fetch
assertEquals(s"Consume quota override not set", Quota.upperBound(1), consumeQuotaManager.quota("some-user", smallQuotaConsumerClientId))
}
}
@After
override def tearDown() {
try executor.shutdownNow()
finally super.tearDown()
}
@Test
def testResponseThrottleTime() {
for (apiKey <- RequestQuotaTest.ClientActions)
submitTest(apiKey, () => checkRequestThrottleTime(apiKey))
waitAndCheckResults()
}
@Test
def testResponseThrottleTimeWhenBothProduceAndRequestQuotasViolated() {
val apiKey = ApiKeys.PRODUCE
submitTest(apiKey, () => checkSmallQuotaProducerRequestThrottleTime(apiKey))
waitAndCheckResults()
}
@Test
def testResponseThrottleTimeWhenBothFetchAndRequestQuotasViolated() {
val apiKey = ApiKeys.FETCH
submitTest(apiKey, () => checkSmallQuotaConsumerRequestThrottleTime(apiKey))
waitAndCheckResults()
}
@Test
def testUnthrottledClient() {
for (apiKey <- RequestQuotaTest.ClientActions)
submitTest(apiKey, () => checkUnthrottledClient(apiKey))
waitAndCheckResults()
}
@Test
def testExemptRequestTime() {
for (apiKey <- RequestQuotaTest.ClusterActions)
submitTest(apiKey, () => checkExemptRequestMetric(apiKey))
waitAndCheckResults()
}
@Test
def testUnauthorizedThrottle() {
RequestQuotaTest.principal = RequestQuotaTest.UnauthorizedPrincipal
for (apiKey <- ApiKeys.values)
submitTest(apiKey, () => checkUnauthorizedRequestThrottle(apiKey))
waitAndCheckResults()
}
def session(user: String): Session = Session(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user), null)
private def throttleTimeMetricValue(clientId: String): Double = {
throttleTimeMetricValueForQuotaType(clientId, QuotaType.Request)
}
private def throttleTimeMetricValueForQuotaType(clientId: String, quotaType: QuotaType): Double = {
val metricName = leaderNode.metrics.metricName("throttle-time",
quotaType.toString,
"",
"user", "",
"client-id", clientId)
val sensor = leaderNode.quotaManagers.request.getOrCreateQuotaSensors(session("ANONYMOUS"),
clientId).throttleTimeSensor
metricValue(leaderNode.metrics.metrics.get(metricName), sensor)
}
private def requestTimeMetricValue(clientId: String): Double = {
val metricName = leaderNode.metrics.metricName("request-time",
QuotaType.Request.toString,
"",
"user", "",
"client-id", clientId)
val sensor = leaderNode.quotaManagers.request.getOrCreateQuotaSensors(session("ANONYMOUS"),
clientId).quotaSensor
metricValue(leaderNode.metrics.metrics.get(metricName), sensor)
}
private def exemptRequestMetricValue: Double = {
val metricName = leaderNode.metrics.metricName("exempt-request-time", QuotaType.Request.toString, "")
metricValue(leaderNode.metrics.metrics.get(metricName), leaderNode.quotaManagers.request.exemptSensor)
}
private def metricValue(metric: KafkaMetric, sensor: Sensor): Double = {
sensor.synchronized {
if (metric == null) -1.0 else metric.metricValue.asInstanceOf[Double]
}
}
private def requestBuilder(apiKey: ApiKeys): AbstractRequest.Builder[_ <: AbstractRequest] = {
apiKey match {
case ApiKeys.PRODUCE =>
ProduceRequest.Builder.forCurrentMagic(1, 5000,
collection.mutable.Map(tp -> MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("test".getBytes))).asJava)
case ApiKeys.FETCH =>
val partitionMap = new LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
partitionMap.put(tp, new FetchRequest.PartitionData(0, 0, 100, Optional.of(15)))
FetchRequest.Builder.forConsumer(0, 0, partitionMap)
case ApiKeys.METADATA =>
new MetadataRequest.Builder(List(topic).asJava, true)
case ApiKeys.LIST_OFFSETS =>
ListOffsetRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED)
.setTargetTimes(Map(tp -> new ListOffsetRequest.PartitionData(
0L, Optional.of[Integer](15))).asJava)
case ApiKeys.LEADER_AND_ISR =>
new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, brokerId, Int.MaxValue, Long.MaxValue,
Map(tp -> new LeaderAndIsrRequest.PartitionState(Int.MaxValue, brokerId, Int.MaxValue, List(brokerId).asJava,
2, Seq(brokerId).asJava, true)).asJava,
Set(new Node(brokerId, "localhost", 0)).asJava)
case ApiKeys.STOP_REPLICA =>
new StopReplicaRequest.Builder(ApiKeys.STOP_REPLICA.latestVersion, brokerId, Int.MaxValue, Long.MaxValue, true, Set(tp).asJava)
case ApiKeys.UPDATE_METADATA =>
val partitionState = Map(tp -> new UpdateMetadataRequest.PartitionState(
Int.MaxValue, brokerId, Int.MaxValue, List(brokerId).asJava, 2, Seq(brokerId).asJava, Seq.empty[Integer].asJava)).asJava
val securityProtocol = SecurityProtocol.PLAINTEXT
val brokers = Set(new UpdateMetadataRequest.Broker(brokerId,
Seq(new UpdateMetadataRequest.EndPoint("localhost", 0, securityProtocol,
ListenerName.forSecurityProtocol(securityProtocol))).asJava, null)).asJava
new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, brokerId, Int.MaxValue, Long.MaxValue, partitionState, brokers)
case ApiKeys.CONTROLLED_SHUTDOWN =>
new ControlledShutdownRequest.Builder(brokerId, Long.MaxValue, ApiKeys.CONTROLLED_SHUTDOWN.latestVersion)
case ApiKeys.OFFSET_COMMIT =>
new OffsetCommitRequest.Builder("test-group",
Map(tp -> new OffsetCommitRequest.PartitionData(0, Optional.empty[Integer](), "metadata")).asJava).
setMemberId("").setGenerationId(1)
case ApiKeys.OFFSET_FETCH =>
new OffsetFetchRequest.Builder("test-group", List(tp).asJava)
case ApiKeys.FIND_COORDINATOR =>
new FindCoordinatorRequest.Builder(FindCoordinatorRequest.CoordinatorType.GROUP, "test-group")
case ApiKeys.JOIN_GROUP =>
new JoinGroupRequest.Builder(
new JoinGroupRequestData()
.setGroupId("test-join-group")
.setSessionTimeoutMs(200)
.setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID)
.setProtocolType("consumer")
.setProtocols(
new JoinGroupRequestData.JoinGroupRequestProtocolSet(
Collections.singletonList(new JoinGroupRequestData.JoinGroupRequestProtocol()
.setName("consumer-range")
.setMetadata("test".getBytes())).iterator()
)
)
.setRebalanceTimeoutMs(100)
)
case ApiKeys.HEARTBEAT =>
new HeartbeatRequest.Builder("test-group", 1, "")
case ApiKeys.LEAVE_GROUP =>
new LeaveGroupRequest.Builder(new LeaveGroupRequestData().setGroupId("test-leave-group").setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID))
case ApiKeys.SYNC_GROUP =>
new SyncGroupRequest.Builder("test-sync-group", 1, "", Map[String, ByteBuffer]().asJava)
case ApiKeys.DESCRIBE_GROUPS =>
new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(List("test-group").asJava))
case ApiKeys.LIST_GROUPS =>
new ListGroupsRequest.Builder()
case ApiKeys.SASL_HANDSHAKE =>
new SaslHandshakeRequest.Builder(new SaslHandshakeRequestData().setMechanism("PLAIN"))
case ApiKeys.SASL_AUTHENTICATE =>
new SaslAuthenticateRequest.Builder(new SaslAuthenticateRequestData().setAuthBytes(new Array[Byte](0)))
case ApiKeys.API_VERSIONS =>
new ApiVersionsRequest.Builder
case ApiKeys.CREATE_TOPICS => {
new CreateTopicsRequest.Builder(
new CreateTopicsRequestData().setTopics(
new CreatableTopicSet(Collections.singleton(
new CreatableTopic().setName("topic-2").setNumPartitions(1).
setReplicationFactor(1.toShort)).iterator())))
}
case ApiKeys.DELETE_TOPICS =>
new DeleteTopicsRequest.Builder(Set("topic-2").asJava, 5000)
case ApiKeys.DELETE_RECORDS =>
new DeleteRecordsRequest.Builder(5000, Map(tp -> (0L: java.lang.Long)).asJava)
case ApiKeys.INIT_PRODUCER_ID =>
new InitProducerIdRequest.Builder("abc")
case ApiKeys.OFFSET_FOR_LEADER_EPOCH =>
new OffsetsForLeaderEpochRequest.Builder(ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion,
Map(tp -> new OffsetsForLeaderEpochRequest.PartitionData(Optional.of(15), 0)).asJava)
case ApiKeys.ADD_PARTITIONS_TO_TXN =>
new AddPartitionsToTxnRequest.Builder("test-transactional-id", 1, 0, List(tp).asJava)
case ApiKeys.ADD_OFFSETS_TO_TXN =>
new AddOffsetsToTxnRequest.Builder("test-transactional-id", 1, 0, "test-txn-group")
case ApiKeys.END_TXN =>
new EndTxnRequest.Builder("test-transactional-id", 1, 0, TransactionResult.forId(false))
case ApiKeys.WRITE_TXN_MARKERS =>
new WriteTxnMarkersRequest.Builder(List.empty.asJava)
case ApiKeys.TXN_OFFSET_COMMIT =>
new TxnOffsetCommitRequest.Builder("test-transactional-id", "test-txn-group", 2, 0,
Map.empty[TopicPartition, TxnOffsetCommitRequest.CommittedOffset].asJava)
case ApiKeys.DESCRIBE_ACLS =>
new DescribeAclsRequest.Builder(AclBindingFilter.ANY)
case ApiKeys.CREATE_ACLS =>
new CreateAclsRequest.Builder(Collections.singletonList(new AclCreation(new AclBinding(
new ResourcePattern(AdminResourceType.TOPIC, "mytopic", PatternType.LITERAL),
new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.WRITE, AclPermissionType.DENY)))))
case ApiKeys.DELETE_ACLS =>
new DeleteAclsRequest.Builder(Collections.singletonList(new AclBindingFilter(
new ResourcePatternFilter(AdminResourceType.TOPIC, null, PatternType.LITERAL),
new AccessControlEntryFilter("User:ANONYMOUS", "*", AclOperation.ANY, AclPermissionType.DENY))))
case ApiKeys.DESCRIBE_CONFIGS =>
new DescribeConfigsRequest.Builder(Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic)))
case ApiKeys.ALTER_CONFIGS =>
new AlterConfigsRequest.Builder(
Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic),
new AlterConfigsRequest.Config(Collections.singleton(
new AlterConfigsRequest.ConfigEntry(LogConfig.MaxMessageBytesProp, "1000000")
))), true)
case ApiKeys.ALTER_REPLICA_LOG_DIRS =>
new AlterReplicaLogDirsRequest.Builder(Collections.singletonMap(tp, logDir))
case ApiKeys.DESCRIBE_LOG_DIRS =>
new DescribeLogDirsRequest.Builder(Collections.singleton(tp))
case ApiKeys.CREATE_PARTITIONS =>
new CreatePartitionsRequest.Builder(
Collections.singletonMap("topic-2", new CreatePartitionsRequest.PartitionDetails(1)), 0, false
)
case ApiKeys.CREATE_DELEGATION_TOKEN =>
new CreateDelegationTokenRequest.Builder(Collections.singletonList(SecurityUtils.parseKafkaPrincipal("User:test")), 1000)
case ApiKeys.EXPIRE_DELEGATION_TOKEN =>
new ExpireDelegationTokenRequest.Builder("".getBytes, 1000)
case ApiKeys.DESCRIBE_DELEGATION_TOKEN =>
new DescribeDelegationTokenRequest.Builder(Collections.singletonList(SecurityUtils.parseKafkaPrincipal("User:test")))
case ApiKeys.RENEW_DELEGATION_TOKEN =>
new RenewDelegationTokenRequest.Builder("".getBytes, 1000)
case ApiKeys.DELETE_GROUPS =>
new DeleteGroupsRequest.Builder(Collections.singleton("test-group"))
case ApiKeys.ELECT_PREFERRED_LEADERS =>
val partition = new ElectPreferredLeadersRequestData.TopicPartitions()
.setPartitionId(Collections.singletonList(0))
.setTopic("my_topic")
new ElectPreferredLeadersRequest.Builder(
new ElectPreferredLeadersRequestData()
.setTimeoutMs(0)
.setTopicPartitions(Collections.singletonList(partition)))
case _ =>
throw new IllegalArgumentException("Unsupported API key " + apiKey)
}
}
case class Client(clientId: String, apiKey: ApiKeys) {
var correlationId: Int = 0
val builder = requestBuilder(apiKey)
def runUntil(until: (Struct) => Boolean): Boolean = {
val startMs = System.currentTimeMillis
var done = false
val socket = connect()
try {
while (!done && System.currentTimeMillis < startMs + 10000) {
correlationId += 1
val response = requestResponse(socket, clientId, correlationId, builder)
done = until.apply(response)
}
} finally {
socket.close()
}
done
}
override def toString: String = {
val requestTime = requestTimeMetricValue(clientId)
val throttleTime = throttleTimeMetricValue(clientId)
val produceThrottleTime = throttleTimeMetricValueForQuotaType(clientId, QuotaType.Produce)
val consumeThrottleTime = throttleTimeMetricValueForQuotaType(clientId, QuotaType.Fetch)
s"Client $clientId apiKey $apiKey requests $correlationId requestTime $requestTime " +
s"throttleTime $throttleTime produceThrottleTime $produceThrottleTime consumeThrottleTime $consumeThrottleTime"
}
}
private def submitTest(apiKey: ApiKeys, test: () => Unit) {
val future = executor.submit(new Runnable() {
def run() {
test.apply()
}
})
tasks += Task(apiKey, future)
}
private def waitAndCheckResults() {
for (task <- tasks) {
try {
task.future.get(15, TimeUnit.SECONDS)
} catch {
case e: Throwable => {
error(s"Test failed for api-key ${task.apiKey} with exception $e")
throw e
}
}
}
}
private def responseThrottleTime(apiKey: ApiKeys, response: Struct): Int = {
apiKey match {
case ApiKeys.PRODUCE => new ProduceResponse(response).throttleTimeMs
case ApiKeys.FETCH => FetchResponse.parse(response).throttleTimeMs
case ApiKeys.LIST_OFFSETS => new ListOffsetResponse(response).throttleTimeMs
case ApiKeys.METADATA =>
new MetadataResponse(response, ApiKeys.DESCRIBE_GROUPS.latestVersion()).throttleTimeMs
case ApiKeys.OFFSET_COMMIT => new OffsetCommitResponse(response).throttleTimeMs
case ApiKeys.OFFSET_FETCH => new OffsetFetchResponse(response).throttleTimeMs
case ApiKeys.FIND_COORDINATOR => new FindCoordinatorResponse(response).throttleTimeMs
case ApiKeys.JOIN_GROUP => new JoinGroupResponse(response).throttleTimeMs
case ApiKeys.HEARTBEAT => new HeartbeatResponse(response).throttleTimeMs
case ApiKeys.LEAVE_GROUP => new LeaveGroupResponse(response).throttleTimeMs
case ApiKeys.SYNC_GROUP => new SyncGroupResponse(response).throttleTimeMs
case ApiKeys.DESCRIBE_GROUPS =>
new DescribeGroupsResponse(response, ApiKeys.DESCRIBE_GROUPS.latestVersion()).throttleTimeMs
case ApiKeys.LIST_GROUPS => new ListGroupsResponse(response).throttleTimeMs
case ApiKeys.API_VERSIONS => new ApiVersionsResponse(response).throttleTimeMs
case ApiKeys.CREATE_TOPICS =>
new CreateTopicsResponse(response, ApiKeys.CREATE_TOPICS.latestVersion()).throttleTimeMs
case ApiKeys.DELETE_TOPICS => new DeleteTopicsResponse(response).throttleTimeMs
case ApiKeys.DELETE_RECORDS => new DeleteRecordsResponse(response).throttleTimeMs
case ApiKeys.INIT_PRODUCER_ID => new InitProducerIdResponse(response).throttleTimeMs
case ApiKeys.ADD_PARTITIONS_TO_TXN => new AddPartitionsToTxnResponse(response).throttleTimeMs
case ApiKeys.ADD_OFFSETS_TO_TXN => new AddOffsetsToTxnResponse(response).throttleTimeMs
case ApiKeys.END_TXN => new EndTxnResponse(response).throttleTimeMs
case ApiKeys.TXN_OFFSET_COMMIT => new TxnOffsetCommitResponse(response).throttleTimeMs
case ApiKeys.DESCRIBE_ACLS => new DescribeAclsResponse(response).throttleTimeMs
case ApiKeys.CREATE_ACLS => new CreateAclsResponse(response).throttleTimeMs
case ApiKeys.DELETE_ACLS => new DeleteAclsResponse(response).throttleTimeMs
case ApiKeys.DESCRIBE_CONFIGS => new DescribeConfigsResponse(response).throttleTimeMs
case ApiKeys.ALTER_CONFIGS => new AlterConfigsResponse(response).throttleTimeMs
case ApiKeys.ALTER_REPLICA_LOG_DIRS => new AlterReplicaLogDirsResponse(response).throttleTimeMs
case ApiKeys.DESCRIBE_LOG_DIRS => new DescribeLogDirsResponse(response).throttleTimeMs
case ApiKeys.CREATE_PARTITIONS => new CreatePartitionsResponse(response).throttleTimeMs
case ApiKeys.CREATE_DELEGATION_TOKEN => new CreateDelegationTokenResponse(response).throttleTimeMs
case ApiKeys.DESCRIBE_DELEGATION_TOKEN=> new DescribeDelegationTokenResponse(response).throttleTimeMs
case ApiKeys.EXPIRE_DELEGATION_TOKEN => new ExpireDelegationTokenResponse(response).throttleTimeMs
case ApiKeys.RENEW_DELEGATION_TOKEN => new RenewDelegationTokenResponse(response).throttleTimeMs
case ApiKeys.DELETE_GROUPS => new DeleteGroupsResponse(response).throttleTimeMs
case ApiKeys.OFFSET_FOR_LEADER_EPOCH => new OffsetsForLeaderEpochResponse(response).throttleTimeMs
case ApiKeys.ELECT_PREFERRED_LEADERS => new ElectPreferredLeadersResponse(response).throttleTimeMs
case requestId => throw new IllegalArgumentException(s"No throttle time for $requestId")
}
}
private def checkRequestThrottleTime(apiKey: ApiKeys) {
// Request until throttled using client-id with default small quota
val clientId = apiKey.toString
val client = Client(clientId, apiKey)
val throttled = client.runUntil(response => responseThrottleTime(apiKey, response) > 0)
assertTrue(s"Response not throttled: $client", throttled)
assertTrue(s"Throttle time metrics not updated: $client" , throttleTimeMetricValue(clientId) > 0)
}
private def checkSmallQuotaProducerRequestThrottleTime(apiKey: ApiKeys) {
// Request until throttled using client-id with default small producer quota
val smallQuotaProducerClient = Client(smallQuotaProducerClientId, apiKey)
val throttled = smallQuotaProducerClient.runUntil(response => responseThrottleTime(apiKey, response) > 0)
assertTrue(s"Response not throttled: $smallQuotaProducerClient", throttled)
assertTrue(s"Throttle time metrics for produce quota not updated: $smallQuotaProducerClient",
throttleTimeMetricValueForQuotaType(smallQuotaProducerClientId, QuotaType.Produce) > 0)
assertTrue(s"Throttle time metrics for request quota updated: $smallQuotaProducerClient",
throttleTimeMetricValueForQuotaType(smallQuotaProducerClientId, QuotaType.Request).isNaN)
}
private def checkSmallQuotaConsumerRequestThrottleTime(apiKey: ApiKeys) {
// Request until throttled using client-id with default small consumer quota
val smallQuotaConsumerClient = Client(smallQuotaConsumerClientId, apiKey)
val throttled = smallQuotaConsumerClient.runUntil(response => responseThrottleTime(apiKey, response) > 0)
assertTrue(s"Response not throttled: $smallQuotaConsumerClientId", throttled)
assertTrue(s"Throttle time metrics for consumer quota not updated: $smallQuotaConsumerClient",
throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.Fetch) > 0)
assertTrue(s"Throttle time metrics for request quota updated: $smallQuotaConsumerClient",
throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.Request).isNaN)
}
private def checkUnthrottledClient(apiKey: ApiKeys) {
// Test that request from client with large quota is not throttled
val unthrottledClient = Client(unthrottledClientId, apiKey)
unthrottledClient.runUntil(response => responseThrottleTime(apiKey, response) <= 0.0)
assertEquals(1, unthrottledClient.correlationId)
assertTrue(s"Client should not have been throttled: $unthrottledClient", throttleTimeMetricValue(unthrottledClientId).isNaN)
}
private def checkExemptRequestMetric(apiKey: ApiKeys) {
val exemptTarget = exemptRequestMetricValue + 0.02
val clientId = apiKey.toString
val client = Client(clientId, apiKey)
val updated = client.runUntil(response => exemptRequestMetricValue > exemptTarget)
assertTrue(s"Exempt-request-time metric not updated: $client", updated)
assertTrue(s"Client should not have been throttled: $client", throttleTimeMetricValue(clientId).isNaN)
}
private def checkUnauthorizedRequestThrottle(apiKey: ApiKeys) {
val clientId = "unauthorized-" + apiKey.toString
val client = Client(clientId, apiKey)
val throttled = client.runUntil(response => throttleTimeMetricValue(clientId) > 0.0)
assertTrue(s"Unauthorized client should have been throttled: $client", throttled)
}
}
object RequestQuotaTest {
val ClusterActions = ApiKeys.values.toSet.filter(apiKey => apiKey.clusterAction)
val SaslActions = Set(ApiKeys.SASL_HANDSHAKE, ApiKeys.SASL_AUTHENTICATE)
val ClientActions = ApiKeys.values.toSet -- ClusterActions -- SaslActions
val UnauthorizedPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "Unauthorized")
// Principal used for all client connections. This is modified by tests which
// check unauthorized code path
var principal = KafkaPrincipal.ANONYMOUS
class TestAuthorizer extends SimpleAclAuthorizer {
override def authorize(session: Session, operation: Operation, resource: Resource): Boolean = {
session.principal != UnauthorizedPrincipal
}
}
class TestPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
principal
}
}
}
| gf53520/kafka | core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala | Scala | apache-2.0 | 28,344 |
/*
* Copyright (C) 2014 Cathal Mc Ginley
*
* This file is part of TomatoJuice, a Pomodoro timer-tracker for GNOME.
*
* TomatoJuice is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 3 of the License,
* or (at your option) any later version.
*
* TomatoJuice is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with TomatoJuice; see the file COPYING. If not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*/
package org.gnostai.tomatojuice.actors
import akka.actor._
import scala.concurrent.Future
import akka.routing.Listeners
import org.gnostai.tomatojuice.core._
import scala.collection.JavaConversions
trait PomodoroTrackerModule extends CoreModule with PomodoroCountdownModule {
object PomodoroTracker {
case object TimerActivated
case class CountdownMinutesRemaining(timer: CountdownTimer, minutes: Int)
case class CountdownTimerCompleted(timer: CountdownTimer)
}
trait PomodoroTrackerActor extends Listeners with ActorLogging { this: Actor =>
def mainApp: ActorRef
def countdownActor: ActorRef
def timerInactive(nextCountdown: CountdownTimer, pomodorosRemaining: Int): Receive
def countingDown(timer: CountdownTimer, pomodorosRemaining: Int): Receive
protected def pomodorosBeforeLongBreak: Int
protected def nextTimerFor(countdown: CountdownTimer, pomodorosRemaining: Int): CountdownTimer
protected def minutesToCountDown(countdown: CountdownTimer): Int
}
trait PomodoroTrackerActorImpl extends PomodoroTrackerActor { this: Actor =>
import CoreMessages._
import PomodoroTracker._
import PomodoroCountdown._
val pomodoroConfig = config.getConfig("tomatojuice.pomodoro")
def receive = timerInactive(PomodoroCountdownTimer, pomodorosBeforeLongBreak) orElse listenerManagement
def timerInactive(nextCountdown: CountdownTimer, pomodorosRemaining: Int): Receive = {
case TimerActivated =>
context.become(countingDown(nextCountdown, pomodorosRemaining) orElse listenerManagement)
val minutes = minutesToCountDown(nextCountdown)
countdownActor ! StartCountdown(minutes)
mainApp ! NewPomodoroStarted
}
def countingDown(timer: CountdownTimer, pomodorosRemaining: Int): Receive = {
case MinutesRemaining(mins) =>
gossip(CountdownMinutesRemaining(timer, mins))
case TimerCompleted =>
System.err.println("> " + TimerCompleted)
import JavaConversions._
for (x <- listeners) {
System.err.println(" + " + x)
}
System.err.println(" !!+ " + listeners.size())
gossip(CountdownMinutesRemaining(timer, 0))
val remaining = timer match {
case PomodoroCountdownTimer => pomodorosRemaining
case ShortBreakCountdownTimer => pomodorosRemaining - 1
case LongBreakCountdownTimer => pomodorosBeforeLongBreak
}
val nextTimer = nextTimerFor(timer, remaining)
gossip(CountdownTimerCompleted(nextTimer))
context.become(timerInactive(nextTimer, remaining))
}
protected def pomodorosBeforeLongBreak = pomodoroConfig.getInt("pomodorosBeforeLongBreak")
protected def nextTimerFor(countdown: CountdownTimer, pomodorosRemaining: Int): CountdownTimer = {
countdown match {
case PomodoroCountdownTimer =>
if (pomodorosRemaining > 1)
ShortBreakCountdownTimer
else
LongBreakCountdownTimer
case ShortBreakCountdownTimer => PomodoroCountdownTimer
case LongBreakCountdownTimer => PomodoroCountdownTimer
}
}
protected def minutesToCountDown(countdown: CountdownTimer) = {
countdown match {
case PomodoroCountdownTimer => pomodoroConfig.getInt("duration")
case ShortBreakCountdownTimer => pomodoroConfig.getInt("breakDuration")
case LongBreakCountdownTimer => pomodoroConfig.getInt("longBreakDuration")
}
}
}
class PomodoroTrackerActorProductionImpl(val mainApp: ActorRef) extends Actor with PomodoroTrackerActorImpl {
val countdownActor = context.actorOf(Props(newPomodoroCountdownActor(self)))
}
}
trait ProductionPomodoroTrackerModule extends PomodoroTrackerModule
with PomodoroCountdownModule {
}
| cathalmcginley/tomatojuice | src/main/scala/org/gnostai/tomatojuice/actors/PomodoroTracker.scala | Scala | gpl-3.0 | 4,661 |
package no.skytteren.elasticala.search
import no.skytteren.elasticala.mapping.{DefinedAnalyzer, Analyzer, default => DefaultAnalyzer}
import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder
import org.elasticsearch.search.suggest._
import org.elasticsearch.search.suggest.phrase.{PhraseSuggestionBuilder, PhraseSuggestion => EsPhraseSuggestion}
import org.elasticsearch.search.suggest.term.{TermSuggestionBuilder, TermSuggestion => EsTermSuggestion}
import org.elasticsearch.search.suggest.completion.{CompletionSuggestion => EsCompletionSuggestion, CompletionSuggestionFuzzyBuilder, CompletionSuggestionBuilder}
import org.elasticsearch.common.unit.{Fuzziness => EsFuzziness}
/**
* Created by steskytt on 01.02.2016.
*/
trait SuggestDSL {
object suggest {
def term(text: String, field: String, analyzer: Analyzer = DefaultAnalyzer, size: Int = 5, sort: SuggestionSort = SuggestionSort.score, suggestMode: SuggestMode = SuggestMode.missing) = TermSuggestion(text, field, analyzer, size, sort, suggestMode)
/*
TODO: collate
*/
def phrase(text: String, field: String, analyzer: Analyzer = DefaultAnalyzer, size: Int = 5, realWordErrorLikelihood: Float = 0.95f, maxErrors: Float = 1.0f,
gramSize: GramSize = DefaultGramSize, confidence: Float = 1.0f, separator: String = " ", highlight: Highlight = Highlight("", "")) =
PhraseSuggestion(text, field, analyzer, size, realWordErrorLikelihood, maxErrors, gramSize, confidence, separator, highlight)
/*
TODO: context
*/
def completion(text: String, field: String, size: Int = 5, fuzzy: FuzzyCompletionQuery = NoFuzzyCompletionQuery) = CompletionSuggestion(text, field, size, fuzzy)
def fuzzy(fuzziness: Fuzziness = Fuzziness.AUTO, transpositions: Boolean = true, minLength: Int = 3, prefixLength: Int = 1, unicodeAware: Boolean = false) = DefinedFuzzyCompletionQuery(fuzziness, transpositions, minLength, prefixLength, unicodeAware)
}
implicit def int2GramSize(value: Int): GramSize = ExplicitGramSize(value)
}
sealed trait Fuzziness
object Fuzziness {
object `0` extends Fuzziness
object `1` extends Fuzziness
object `2` extends Fuzziness
object AUTO extends Fuzziness
}
sealed trait FuzzyCompletionQuery
case class DefinedFuzzyCompletionQuery(fuzziness: Fuzziness, transpositions: Boolean, minLength: Int, prefixLength: Int, unicodeAware: Boolean) extends FuzzyCompletionQuery
case object NoFuzzyCompletionQuery extends FuzzyCompletionQuery
case class Highlight(preTag: String, postTag: String)
sealed trait GramSize
case class ExplicitGramSize(value: Int) extends GramSize
case object DefaultGramSize extends GramSize
trait SuggestAnalyser
trait Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit
}
case object NoSuggestions extends Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit = {}
}
case class Suggestions1[S1 <: Suggestion](_1: (String, S1)) extends Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit = {
add(_1._2.build(_1._1))
}
}
case class Suggestions2[S1 <: Suggestion, S2 <: Suggestion](_1: (String, S1), _2: (String, S2)) extends Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit = {
add(_1._2.build(_1._1))
add(_2._2.build(_2._1))
}
}
case class Suggestions3[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion](_1: (String, S1), _2: (String, S2), _3: (String, S3)) extends Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit = {
add(_1._2.build(_1._1))
add(_2._2.build(_2._1))
add(_3._2.build(_3._1))
}
}
case class Suggestions4[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion, S4 <: Suggestion](_1: (String, S1), _2: (String, S2), _3: (String, S3), _4: (String, S4)) extends Suggestions {
def build(add: SuggestionBuilder[_] => Unit): Unit = {
add(_1._2.build(_1._1))
add(_2._2.build(_2._1))
add(_3._2.build(_3._1))
add(_4._2.build(_4._1))
}
}
trait SuggestionsBuilder[T] {
type S <: Suggestions
def build(in: T): S
}
object SuggestionsBuilder {
implicit object noSuggestionsBuilder extends SuggestionsBuilder[NoSuggestions.type] {
type S = NoSuggestions.type
override def build(_1: NoSuggestions.type) = NoSuggestions
}
implicit def suggestions1Builder[S1 <: Suggestion]: SuggestionsBuilder[(String, S1)]{ type S = Suggestions1[S1]} =
new SuggestionsBuilder[(String, S1)] {
type S = Suggestions1[S1]
override def build(s: (String, S1)): Suggestions1[S1] = Suggestions1(s)
}
implicit def suggestions2Builder[S1 <: Suggestion, S2 <: Suggestion]: SuggestionsBuilder[((String, S1), (String, S2))] { type S = Suggestions2[S1, S2]} =
new SuggestionsBuilder[((String, S1), (String, S2))] {
type S = Suggestions2[S1, S2]
override def build(s: ((String, S1), (String, S2))): Suggestions2[S1, S2] = Suggestions2(s._1, s._2)
}
implicit def suggestions3Builder[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion]: SuggestionsBuilder[((String, S1), (String, S2), (String, S3))] { type S = Suggestions3[S1, S2, S3]} =
new SuggestionsBuilder[((String, S1), (String, S2), (String, S3))] {
type S = Suggestions3[S1, S2, S3]
override def build(s: ((String, S1), (String, S2), (String, S3))): Suggestions3[S1, S2, S3] = Suggestions3(s._1, s._2, s._3)
}
implicit def suggestions4Builder[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion, S4 <: Suggestion]: SuggestionsBuilder[((String, S1), (String, S2), (String, S3), (String, S4))] { type S = Suggestions4[S1, S2, S3, S4]} =
new SuggestionsBuilder[((String, S1), (String, S2), (String, S3), (String, S4))] {
type S = Suggestions4[S1, S2, S3, S4]
override def build(s: ((String, S1), (String, S2), (String, S3), (String, S4))): Suggestions4[S1, S2, S3, S4] = Suggestions4(s._1, s._2, s._3, s._4)
}
}
trait SuggestionResult
trait Suggestion {
def build(name: String): SuggestionBuilder[_]
}
case class TermSuggestion(text: String, field: String, analyzer: Analyzer, size: Int, sort: SuggestionSort, suggestMode: SuggestMode) extends Suggestion{
override def build(name: String) = {
val builder = new TermSuggestionBuilder(name).text(text).field(field).size(size).sort(sort.value).suggestMode(suggestMode.value)
analyzer match {
case a: DefinedAnalyzer => builder.analyzer(a.name)
case _ =>
}
builder
}
}
case class TermSuggestedOption(text: String, frequency: Int)
case class TermSuggested(text: String, offset: Int, length: Int, options: Seq[TermSuggestedOption])
case class TermSuggestionResult(name: String, suggestions: Seq[TermSuggested]) extends SuggestionResult
case class PhraseSuggestion(text: String, field: String, analyzer: Analyzer, size: Int, realWordErrorLikelihood: Float, maxErrors: Float,
gramSize: GramSize, confidence: Float, separator: String, highlight: Highlight) extends Suggestion{
override def build(name: String) = {
val builder = new PhraseSuggestionBuilder(name).text(text).field(field).size(size).realWordErrorLikelihood(realWordErrorLikelihood).maxErrors(maxErrors).confidence(confidence).separator(separator).highlight(highlight.preTag, highlight.postTag)
analyzer match {
case a: DefinedAnalyzer => builder.analyzer(a.name)
case _ =>
}
gramSize match {
case ExplicitGramSize(value) => builder.gramSize(value)
case _ =>
}
builder
}
}
case class PhraseSuggestedOption(text: String, score: Float, highlighted: String)
case class PhraseSuggested(text: String, offset: Int, length: Int, cutoffScore: Double, options: Seq[PhraseSuggestedOption])
case class PhraseSuggestionResult(name: String, suggestions: Seq[PhraseSuggested]) extends SuggestionResult
case class CompletionSuggestion(text: String, field: String, size: Int, fuzzy: FuzzyCompletionQuery) extends Suggestion{
override def build(name: String): SuggestionBuilder[_] = {
fuzzy match {
case q: DefinedFuzzyCompletionQuery =>
val fb = new CompletionSuggestionFuzzyBuilder(field).text(text).setFuzzyMinLength(q.minLength).setFuzzyPrefixLength(q.prefixLength).setFuzzyTranspositions(q.transpositions).setUnicodeAware(q.unicodeAware).size(size)
val fuzziness = q.fuzziness match {
case Fuzziness.`0` => EsFuzziness.ZERO
case Fuzziness.`1` => EsFuzziness.ONE
case Fuzziness.`2` => EsFuzziness.TWO
case Fuzziness.AUTO => EsFuzziness.AUTO
}
fb.setFuzziness(fuzziness)
fb
case _ => new CompletionSuggestionBuilder(name).text(text).field(field).size(size)
}
}
}
case class CompletionSuggestedOption(text: String, score: Float, payload: String)
case class CompletionSuggested(text: String, offset: Int, length: Int, options: Seq[CompletionSuggestedOption])
case class CompletionSuggestionResult(name: String, suggestions: Seq[CompletionSuggested]) extends SuggestionResult
sealed trait SuggestionResults
case object NoSuggestionResults extends SuggestionResults
case class SuggestionResults1[S1 <: SuggestionResult](_1: S1) extends SuggestionResults
case class SuggestionResults2[S1 <: SuggestionResult, S2 <: SuggestionResult](_1: S1, _2: S2) extends SuggestionResults
case class SuggestionResults3[S1 <: SuggestionResult, S2 <: SuggestionResult, S3 <: SuggestionResult](_1: S1, _2: S2, _3: S3) extends SuggestionResults
case class SuggestionResults4[S1 <: SuggestionResult, S2 <: SuggestionResult, S3 <: SuggestionResult, S4 <: SuggestionResult](_1: S1, _2: S2, _3: S3, _4: S4) extends SuggestionResults
case class SuggestionResultsSeq[A <: SuggestionResult](a: Seq[A]) extends SuggestionResults
sealed abstract class SuggestionSort(val value: String)
object SuggestionSort{
/**
* Sort by score first, then document frequency and then the term itself.
*/
case object score extends SuggestionSort("score")
/**
* Sort by document frequency first, then similarity score and then the term itself.
*/
case object frequency extends SuggestionSort("frequency")
}
/**
* The suggest mode controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. Three possible values can be specified:
*/
sealed abstract class SuggestMode(val value: String)
object SuggestMode{
/**
* Only provide suggestions for suggest text terms that are not in the index. This is the default.
*/
case object missing extends SuggestMode("missing")
/**
* Only suggest suggestions that occur in more docs then the original suggest text term.
*/
case object popular extends SuggestMode("popular")
/**
* Suggest any matching suggestions based on terms in the suggest text.
*/
case object always extends SuggestMode("always")
}
trait SuggestionResultsHandler[Ss <: Suggestions, SRs <: SuggestionResults] {
def toResult(ss: Ss, eSs: String => Suggest.Suggestion[_]): SRs
}
object SuggestionResultsHandler {
implicit object NoApplicationResultHandler extends SuggestionResultsHandler[NoSuggestions.type, NoSuggestionResults.type] {
def toResult(ss: NoSuggestions.type, sSsFunc: String => Suggest.Suggestion[_]) = NoSuggestionResults
}
implicit def SuggestionResults1Handler[S1 <: Suggestion, AR1 <: SuggestionResult](implicit ar: SuggestionResultHandler[S1, AR1]): SuggestionResultsHandler[Suggestions1[S1], SuggestionResults1[AR1]] =
new SuggestionResultsHandler[Suggestions1[S1], SuggestionResults1[AR1]] {
override def toResult(ss: Suggestions1[S1], sSsFunc: String => Suggest.Suggestion[_]) = SuggestionResults1(ar.toResult(ss._1, sSsFunc))
}
implicit def SuggestionResults2Handler[S1 <: Suggestion, S2 <: Suggestion, AR1 <: SuggestionResult, AR2 <: SuggestionResult](implicit ar1: SuggestionResultHandler[S1, AR1], ar2: SuggestionResultHandler[S2, AR2]): SuggestionResultsHandler[Suggestions2[S1, S2], SuggestionResults2[AR1, AR2]] =
new SuggestionResultsHandler[Suggestions2[S1, S2], SuggestionResults2[AR1, AR2]] {
override def toResult(ss: Suggestions2[S1, S2], sSsFunc: String => Suggest.Suggestion[_]) = SuggestionResults2(ar1.toResult(ss._1, sSsFunc), ar2.toResult(ss._2, sSsFunc))
}
implicit def SuggestionResults3Handler[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion, AR1 <: SuggestionResult, AR2 <: SuggestionResult, AR3 <: SuggestionResult](implicit ar1: SuggestionResultHandler[S1, AR1], ar2: SuggestionResultHandler[S2, AR2], ar3: SuggestionResultHandler[S3, AR3]): SuggestionResultsHandler[Suggestions3[S1, S2, S3], SuggestionResults3[AR1, AR2, AR3]] =
new SuggestionResultsHandler[Suggestions3[S1, S2, S3], SuggestionResults3[AR1, AR2, AR3]] {
override def toResult(ss: Suggestions3[S1, S2, S3], sSsFunc: String => Suggest.Suggestion[_]) = SuggestionResults3(ar1.toResult(ss._1, sSsFunc), ar2.toResult(ss._2, sSsFunc), ar3.toResult(ss._3, sSsFunc))
}
implicit def SuggestionResults4Handler[S1 <: Suggestion, S2 <: Suggestion, S3 <: Suggestion, S4 <: Suggestion, AR1 <: SuggestionResult, AR2 <: SuggestionResult, AR3 <: SuggestionResult, AR4 <: SuggestionResult](implicit ar1: SuggestionResultHandler[S1, AR1], ar2: SuggestionResultHandler[S2, AR2], ar3: SuggestionResultHandler[S3, AR3], ar4: SuggestionResultHandler[S4, AR4]): SuggestionResultsHandler[Suggestions4[S1, S2, S3, S4], SuggestionResults4[AR1, AR2, AR3, AR4]] =
new SuggestionResultsHandler[Suggestions4[S1, S2, S3, S4], SuggestionResults4[AR1, AR2, AR3, AR4]] {
override def toResult(ss: Suggestions4[S1, S2, S3, S4], sSsFunc: String => Suggest.Suggestion[_]) = SuggestionResults4(ar1.toResult(ss._1, sSsFunc), ar2.toResult(ss._2, sSsFunc), ar3.toResult(ss._3, sSsFunc), ar4.toResult(ss._4, sSsFunc))
}
}
trait SuggestionResultHandler[S <: Suggestion, SR <: SuggestionResult] {
def toResult(s: (String, S), suggestions: String => Suggest.Suggestion[_]): SR
}
object SuggestionResultHandler {
implicit object TermSuggestionResultHandler extends SuggestionResultHandler[TermSuggestion, TermSuggestionResult] {
override def toResult(s: (String, TermSuggestion), suggestions: (String) => Suggest.Suggestion[_]): TermSuggestionResult = {
val ts = suggestions(s._1).asInstanceOf[EsTermSuggestion]
import scala.collection.JavaConverters._
val suggested = ts.getEntries.asScala.toList.map(entry => {
val options = entry.getOptions.asScala.toList.map(option => TermSuggestedOption(option.getText.string(), option.getFreq))
TermSuggested(entry.getText.string, entry.getOffset, entry.getLength, options)
})
TermSuggestionResult(s._1, suggested)
}
}
implicit object PhraseSuggestionResultHandler extends SuggestionResultHandler[PhraseSuggestion, PhraseSuggestionResult] {
override def toResult(s: (String, PhraseSuggestion), suggestions: (String) => Suggest.Suggestion[_]): PhraseSuggestionResult = {
val ts = suggestions(s._1).asInstanceOf[EsPhraseSuggestion]
import scala.collection.JavaConverters._
val suggested = ts.getEntries.asScala.toList.map(entry => {
val options = entry.getOptions.asScala.toList.map(option => PhraseSuggestedOption(option.getText.string(), option.getScore, option.getHighlighted.string()))
PhraseSuggested(entry.getText.string, entry.getOffset, entry.getLength, entry.getCutoffScore, options)
})
PhraseSuggestionResult(s._1, suggested)
}
}
implicit object CompletionSuggestionResultHandler extends SuggestionResultHandler[CompletionSuggestion, CompletionSuggestionResult] {
override def toResult(s: (String, CompletionSuggestion), suggestions: (String) => Suggest.Suggestion[_]): CompletionSuggestionResult = {
val ts = suggestions(s._1).asInstanceOf[EsCompletionSuggestion]
import scala.collection.JavaConverters._
val suggested = ts.getEntries.asScala.toList.map(entry => {
val options = entry.getOptions.asScala.toList.map(option => CompletionSuggestedOption(option.getText.string(), option.getScore, option.getPayloadAsString))
CompletionSuggested(entry.getText.string, entry.getOffset, entry.getLength, options)
})
CompletionSuggestionResult(s._1, suggested)
}
}
} | skytteren/elasticala | src/main/scala/no/skytteren/elasticala/search/SuggestDSL.scala | Scala | apache-2.0 | 16,211 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.directed
import cc.factorie.variable._
/*
trait PlatedDiscreteGeneratingFactor extends DirectedFactor {
def prValue(s:StatisticsType, value:Int, index:Int): Double
def prValue(value:Int, index:Int): Double = prValue(statistics, value, index)
}
*/
object PlatedDiscrete extends DirectedFamily2[DiscreteSeqVariable,ProportionsVariable] {
self =>
//def pr(ds:Seq[DiscreteValue], p:IndexedSeq[Double]): Double = ds.map(dv => p(dv.intValue)).product
def pr(ds:DiscreteSeqVariable#Value, p:Proportions): Double = ds.map(dv => p(dv.intValue)).product // TODO Make this more efficient; this current boxes
def logpr(ds:IndexedSeq[DiscreteValue], p:Proportions): Double = ds.map(dv => math.log(p(dv.intValue))).sum // TODO Make this more efficient
def sampledValue(d:DiscreteDomain, length:Int, p:Proportions)(implicit random: scala.util.Random): IndexedSeq[DiscreteValue] =
Vector.fill(length)(d.apply(p.sampleIndex))
case class Factor(override val _1:DiscreteSeqVariable, override val _2:ProportionsVariable) extends super.Factor(_1, _2) {
def pr(child:DiscreteSeqVariable#Value, p:Proportions): Double = self.pr(child, p)
//override def logpr(s:Statistics): Double = self.logpr(s._1, s._2)
override def sampledValue(implicit random: scala.util.Random): IndexedSeq[DiscreteValue] = self.sampledValue(_1.domain.elementDomain, _1.length, _2.value) // Avoid creating a Statistics
def sampledValue(p:Proportions)(implicit random: scala.util.Random): IndexedSeq[DiscreteValue] = {
if (_1.length == 0) IndexedSeq[DiscreteValue]()
else self.sampledValue(_1.domain.elementDomain, _1.length, p)
}
def updateCollapsedParents(index:Int, weight:Double): Boolean = { _2.value.+=(_1(index).intValue, weight); true }
}
def newFactor(a:DiscreteSeqVariable, b:ProportionsVariable) = Factor(a, b)
}
object PlatedCategorical extends DirectedFamily2[CategoricalSeqVariable[String],ProportionsVariable] {
self =>
//def pr(ds:Seq[CategoricalValue], p:IndexedSeq[Double]): Double = ds.map(dv => p(dv.intValue)).product
def pr(ds:IndexedSeq[CategoricalValue[String]], p:Proportions): Double = ds.map(dv => p(dv.intValue)).product // TODO Make this more efficient; this current boxes
def logpr(ds:IndexedSeq[CategoricalValue[String]], p:Proportions): Double = ds.map(dv => math.log(p(dv.intValue))).sum // TODO Make this more efficient
def sampledValue(d:CategoricalDomain[String], length:Int, p:Proportions)(implicit random: scala.util.Random): IndexedSeq[CategoricalValue[String]] =
Vector.fill(length)(d.apply(p.sampleIndex))
case class Factor(override val _1:CategoricalSeqVariable[String], override val _2:ProportionsVariable) extends super.Factor(_1, _2) {
def pr(child:IndexedSeq[CategoricalValue[String]], p:Proportions): Double = self.pr(child, p)
//override def logpr(s:Statistics): Double = self.logpr(s._1, s._2)
override def sampledValue(implicit random: scala.util.Random): CategoricalSeqVariable[String]#Value = self.sampledValue(_1.head.domain, _1.length, _2.value) // Avoid creating a Statistics
def sampledValue(p:Proportions)(implicit random: scala.util.Random): IndexedSeq[CategoricalValue[String]] = {
if (_1.length == 0) IndexedSeq[CategoricalValue[String]]()
else self.sampledValue(_1.head.domain, _1.length, p)
}
def updateCollapsedParents(index:Int, weight:Double): Boolean = { _2.value.+=(_1(index).intValue, weight); true }
}
def newFactor(a:CategoricalSeqVariable[String], b:ProportionsVariable) = Factor(a, b)
}
| Craigacp/factorie | src/main/scala/cc/factorie/directed/PlatedDiscrete.scala | Scala | apache-2.0 | 4,297 |
package io.plasmap.serializer.test
import io.plasmap.model._
import io.plasmap.model.geometry.Geometry
import io.plasmap.serializer.{Codecs, GeoJsonSerialiser}
import io.plasmap.model.geometry._
import org.specs2.ScalaCheck
import org.specs2.matcher.StringMatchers
import org.specs2.mutable.Specification
import argonaut._, Argonaut._
import scodec.Attempt.Successful
import scodec.Codec
import scodec.bits.BitVector
import shapeless.Lazy
class CodecsSpec extends Specification with ScalaCheck with StringMatchers {
import Codecs._
def roundtrip[A](a:A)(implicit ev: Lazy[Codec[A]]) = {
Codec[A].decode(Codec[A].encode(a).require).require.value
}
"Roundtrips" should {
"work for OsmVersion" in {
val v = OsmVersion()
roundtrip(v) must_== v
}
"work forOsmNode" in {
val n = OsmNode(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
HashPoint(12L)
)
roundtrip(n) must_== n
}
"work forOsmWay" in {
val w = OsmWay(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
List(OsmId(12), OsmId(88))
)
roundtrip(w) must_== w
}
"work forOsmRelation" in {
val r = OsmRelation(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
List(
OsmMember(OsmTypeRelation, OsmId(121), OsmRoleOuter),
OsmMember(OsmTypeWay, OsmId(121), OsmRoleOther("boohoo")),
OsmMember(OsmTypeNode, OsmId(121), OsmRoleEmpty)
)
)
roundtrip(r) must_== r
}
"work forOsmDenormalizedNode" in {
val n = OsmDenormalizedNode(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
HashPoint(12L)
)
roundtrip(n) must_== n
}
"work forOsmDenormalizedWay" in {
val w = OsmDenormalizedWay(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
LineString(List((17.0, 11.2), (18.2, 14.7)))
)
roundtrip(w) must_== w
}
"work forOsmDenormalizedRelation" in {
val r = OsmDenormalizedRelation(
OsmId(12L),
Some(OsmUser("eddybaby", 3820302898L)),
OsmVersion(),
List("key" -> "value", "key2" -> "value2").map((OsmTag.apply _).tupled),
GeometryCollection(List(
LineString(List((22.1, 17.2), (18.0, 18.0))),
LonLatPoint(22.7, 88.2),
GeometryCollection(List(MultiPolygon(List(List(List((1.7, 2.8)))))))
))
)
roundtrip(r) must_== r
}
}
}
| jean-ma/geow | src/test/scala/io/plasmap/serializer/test/CodecsSpec.scala | Scala | apache-2.0 | 3,022 |
package LOCO.solvers
import breeze.linalg._
import org.apache.spark.broadcast.Broadcast
import scala.collection._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import preprocessingUtils.FeatureVectorLP
import LOCO.utils.preprocessing
import LOCO.utils.ProjectionUtils._
object runLOCO {
/**
* Runs LOCO
*
* @param sc Spark Context
* @param classification True if the problem at hand is a classification problem. In this case
* an l2-penalized SVM is trained. When set to false ridge regression is
* used.
* @param randomSeed Random seed for random projections and choosing the examples randomly in SDCA.
* @param trainingDataByCol Training data set
* @param nPartitions Number of partitions to use to split the data matrix across columns.
* @param nExecutors Number of executors used - needed to set the tree depth in treeReduce when
* aggregating the random features.
* @param projection Specify which projection shall be used: "sparse" for a sparse
* random projection or "SDCT" for the SDCT.
* @param concatenate True if random features should be concatenated.
* When set to false they are added.
* @param nFeatsProj Dimensionality of the random projection
* @param lambda Regularization paramter to use in algorithm
* @param numIterations Specify number of iterations used in SDCA.
* @param checkDualityGap If the optimizer is SDCA, specify whether the duality gap should be
* computer after each iteration. Note that this is expensive as it
* requires a pass over the entire (local) data set. Should only be used
* for tuning purposes.
* @param stoppingDualityGap If the optimizer is SDCA, specify the size of the duality gap at
* which the optimization should end. If it is not reached after
* numIterations, the optimization ends nonetheless.
*
* @return Return the coefficients estimated by LOCO, the time stamp when timing was started,
* the column means and the mean response (to center the test set later).
*/
def run(
sc : SparkContext,
classification : Boolean,
logistic : Boolean,
randomSeed : Int,
trainingDataByCol : RDD[FeatureVectorLP],
response : DenseVector[Double],
nFeats : Int,
nPartitions : Int,
nExecutors : Int,
projection : String,
useSparseStructure : Boolean,
concatenate : Boolean,
nFeatsProj : Int,
lambda : Double,
numIterations : Int,
checkDualityGap : Boolean,
stoppingDualityGap : Double,
privateLOCO : Boolean,
privateEps : Double,
privateDelta : Double,
privateCV : Boolean,
kfold : Int,
lambdaSeq : Seq[Double]) : (DenseVector[Double], Long, Long, Long,
Option[DenseVector[Double]],
Option[Array[Array[(Double, Double, Double)]]]) = {
// get number of observations
val nObs = response.size
//
val naive = if(nFeatsProj == 0) true else false
println("\nNumber of observations: " + nObs)
println("\nNumber of features: " + nFeats)
println("\nApprox. number of raw features per worker: " +
nFeats / trainingDataByCol.partitions.size.toDouble)
println("\nProjection dimension: " + nFeatsProj)
println("\nPartitions training over cols: " + trainingDataByCol.partitions.size)
// create local matrices from feature vectors
val localMats = preprocessing.createLocalMatrices(trainingDataByCol, useSparseStructure, nObs, null)
// persist local matrices and unpersist training data RDD
localMats.persist(StorageLevel.MEMORY_AND_DISK).foreach(x => {})
trainingDataByCol.unpersist()
// start timing of LOCO
val t1 = System.currentTimeMillis
// project local matrices
val rawAndRandomFeats : RDD[(Int, (List[Int], Matrix[Double], DenseMatrix[Double], Option[Matrix[Double]]))] =
project(localMats, projection, useSparseStructure, nFeatsProj, nObs, nFeats, randomSeed,
nPartitions, privateLOCO, privateEps, privateDelta)
// force evaluation of rawAndRandomFeats RDD and unpersist localMats (only needed for timing purposes)
rawAndRandomFeats.persist(StorageLevel.MEMORY_AND_DISK).foreach(x => {})
localMats.unpersist()
// time: random features have been computed
val tRPComputed = System.currentTimeMillis
// if random projection are to be concatenated, broadcast random projections
val randomProjectionsConcatenated: Broadcast[Map[Int, DenseMatrix[Double]]] =
if(concatenate){
sc.broadcast(
rawAndRandomFeats
.mapValues{case(colIndices, rawFeatsTrain, randomFeats, rawFeatsTest) => randomFeats}
.collectAsMap()
)
}else{
null
}
// if random projection are to be added, add random projections and broadcast
val randomProjectionsAdded: Broadcast[DenseMatrix[Double]] =
if(!concatenate){
sc.broadcast(
rawAndRandomFeats
.values
.map{case(colIndices, rawFeatsTrain, randomFeats, rawFeatsTest) => randomFeats}
.treeReduce(_ + _, depth =
math.max(2, math.ceil(math.log10(nExecutors)/math.log10(2))).toInt))
}else{
null
}
// time: random features have been communicated
val tRPCommunicated = System.currentTimeMillis
// broadcast response vector
val responseBroadcast = sc.broadcast(response)
// for each worker: compute estimates locally on design matrix with raw and random features
val betaLocoAsMapwithLambdas =
if (concatenate){
// extract partitionID as key, and column indices and raw features as value
val rawFeatsTrainRDD =
rawAndRandomFeats
.map{case(partitionID, (colIndices, rawFeatsTrain, randomFeatures, rawFeatsTest)) =>
(partitionID, (colIndices, rawFeatsTrain))
}
// for each local design matrix, learn coefficients
rawFeatsTrainRDD.map{ oneLocalMat =>
localDual.runLocalDualConcatenate(
oneLocalMat, randomProjectionsConcatenated.value, responseBroadcast.value, lambda,
nObs, classification, logistic, numIterations, nFeatsProj, randomSeed, checkDualityGap,
stoppingDualityGap, naive, privateCV, kfold, lambdaSeq)
}
}else{
// when RPs are to be added
rawAndRandomFeats.map{ oneLocalMat =>
localDual.runLocalDualAdd(
oneLocalMat, randomProjectionsAdded.value, responseBroadcast.value, lambda, nObs,
classification, logistic, numIterations, nFeatsProj, randomSeed, checkDualityGap,
stoppingDualityGap, naive, privateCV, kfold, lambdaSeq)
}
}
betaLocoAsMapwithLambdas.persist()
val betaLocoAsMap: Map[Int, Double] =
betaLocoAsMapwithLambdas.flatMap{
case(colIndices, coefficients, _, _) =>
colIndices.zip(coefficients.toArray)
}.collectAsMap()
val localLambdas: Option[DenseVector[Double]] =
if(privateCV){
Some(
DenseVector(
betaLocoAsMapwithLambdas
.flatMap{ case(_, _, localLambda, _) => localLambda
}.collect()))
}else{
None
}
val debug = false
val privateCVStats =
if(debug & privateCV){
Some(
betaLocoAsMapwithLambdas.map(x => x._4.get).collect()
)
}else{
None
}
betaLocoAsMapwithLambdas.unpersist()
// unpersist raw and random features
rawAndRandomFeats.unpersist()
val betaLoco = DenseVector.fill(nFeats)(0.0)
for(entry <- 0 until nFeats){
betaLoco(entry) = betaLocoAsMap.getOrElse(entry, 0.0)
}
// sort coefficients by column index and return LOCO coefficients and time stamps
(betaLoco, t1, tRPComputed, tRPCommunicated, localLambdas, privateCVStats)
}
} | christinaheinze/loco-lib | LOCO/src/main/scala/solvers/runLOCO.scala | Scala | apache-2.0 | 8,214 |
import sbt._
import Keys._
import org.scalatra.sbt._
import com.typesafe.sbteclipse.plugin.EclipsePlugin.EclipseKeys
import play.twirl.sbt.SbtTwirl
import play.twirl.sbt.Import.TwirlKeys._
object MyBuild extends Build {
val Organization = "jp.sf.amateras"
val Name = "gitbucket"
val Version = "0.0.1"
val ScalaVersion = "2.11.2"
val ScalatraVersion = "2.3.0"
lazy val project = Project (
"gitbucket",
file(".")
)
.settings(ScalatraPlugin.scalatraWithJRebel: _*)
.settings(
sourcesInBase := false,
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers ++= Seq(
Classpaths.typesafeReleases,
"amateras-repo" at "http://amateras.sourceforge.jp/mvn/"
),
scalacOptions := Seq("-deprecation", "-language:postfixOps"),
libraryDependencies ++= Seq(
"org.eclipse.jgit" % "org.eclipse.jgit.http.server" % "3.4.1.201406201815-r",
"org.eclipse.jgit" % "org.eclipse.jgit.archive" % "3.4.1.201406201815-r",
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"org.scalatra" %% "scalatra-json" % ScalatraVersion,
"org.json4s" %% "json4s-jackson" % "3.2.10",
"jp.sf.amateras" %% "scalatra-forms" % "0.1.0",
"commons-io" % "commons-io" % "2.4",
"org.pegdown" % "pegdown" % "1.4.1",
"org.apache.commons" % "commons-compress" % "1.5",
"org.apache.commons" % "commons-email" % "1.3.1",
"org.apache.httpcomponents" % "httpclient" % "4.3",
"org.apache.sshd" % "apache-sshd" % "0.11.0",
"com.typesafe.slick" %% "slick" % "2.1.0-RC3",
"com.novell.ldap" % "jldap" % "2009-10-07",
"org.quartz-scheduler" % "quartz" % "2.2.1",
"com.h2database" % "h2" % "1.4.180",
"ch.qos.logback" % "logback-classic" % "1.0.13" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.8.v20121106" % "container;provided",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts Artifact("javax.servlet", "jar", "jar"),
"junit" % "junit" % "4.11" % "test",
"com.typesafe.play" %% "twirl-compiler" % "1.0.2"
),
EclipseKeys.withSource := true,
javacOptions in compile ++= Seq("-target", "6", "-source", "6"),
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
packageOptions += Package.MainClass("JettyLauncher")
).enablePlugins(SbtTwirl)
}
| QianmiOpen/gitbucket | project/build.scala | Scala | apache-2.0 | 2,518 |
package com.rediscombinators
import com.redis.RedisClient
import com.redis.serialization.{Format, Parse}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
import scalaz.Scalaz._
object RedisAsyncOps {
implicit class RedisAsync(rc: RedisClient) {
def getAsync[A](key: String)(implicit format: Format, parse: Parse[A]): Future[A] =
Future { rc.get(key) }.flatMap { maybeA =>
val t = maybeA match {
case Some(a) => Success(a)
case None => Failure(new NoSuchElementException)
}
Future.fromTry(t)
}
def delAsync(key: String): Unit = Future { rc.del(key) }
def mSetAsync[A](kvs: List[(String, A)]): Unit = Future { if (kvs.nonEmpty) rc.mset(kvs.toArray: _*) }
def mDelAsync(ks: List[String]): Unit = Future { if (ks.nonEmpty) rc.del(ks.head, ks.tail: _*) }
def mDelAsync(pattern: String): Unit = forEachKeyAsync(pattern) { k => delAsync(k) }
def getKeysAsync: Future[List[String]] = getKeysAsync("*")
def getKeysAsync(pattern: String): Future[List[String]] = mapKeyAsync("*")(identity)
def forEachKeyAsync(f: String => Unit): Unit = mapKeyAsync(f)
def forEachKeyAsync(pattern: String)(f: String => Unit): Unit = mapKeyAsync(pattern)(f)
def mapKeyAsync[B](f: String => B): Future[List[B]] = mapKeyAsync("*")(f)
def mapKeyAsync[B](pattern: String)(f: String => B): Future[List[B]] = {
implicit val p: String = pattern
scan(0, f).flatMap(combineScan(f))
}
private def nextScan[B](cursor: Int, f: String => B)(implicit pattern: String): Future[List[B]] =
if (cursor > 0)
scan(cursor, f).flatMap(combineScan(f))
else
Future.successful(List.empty[B])
private def combineScan[B](f: (String) => B)(t: (Int, List[B]))(implicit pattern: String): Future[List[B]] = {
val (cursor, vs) = t
nextScan(cursor, f).map(bs => vs |+| bs)
}
private def scan[B](cursor: Int, f: String => B)(implicit pattern: String): Future[(Int, List[B])] = Future {
rc.scan(cursor, pattern).map { t =>
val (cursorMaybe, ks) = t
val cursor: Int = cursorMaybe.orZero
val bs: List[B] = ks.map(vs => vs.flatten.map(k => f(k))).orZero
cursor -> bs
}.orZero
}
}
}
| agarella/RedisScalaCombinators | src/main/scala/com/rediscombinators/RedisAsyncOps.scala | Scala | apache-2.0 | 2,336 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.stacklang
class FormatSuite extends BaseWordSuite {
def interpreter: Interpreter = Interpreter(StandardVocabulary.allWords)
def word: Word = StandardVocabulary.Format
def shouldMatch: List[(String, List[Any])] = List(
"a%s,(,.netflix.com,)" -> List("a.netflix.com")
)
def shouldNotMatch: List[String] = List("", "a", "(,),a")
}
| copperlight/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/stacklang/FormatSuite.scala | Scala | apache-2.0 | 980 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.pos
import cc.factorie._
import cc.factorie.app.nlp._
import cc.factorie.la._
import cc.factorie.util._
import java.io._
import cc.factorie.variable.{MutableCategoricalVar, BinaryFeatureVectorVariable, CategoricalVectorDomain}
import cc.factorie.optimize.Trainer
import cc.factorie.app.classify.backend.LinearMulticlassClassifier
/** A part-of-speech tagger that predicts by greedily labeling each word in sequence.
Although it does not use Viterbi, it is surprisingly accurate. It is also fast.
For the Viterbi-based part-of-speech tagger, see ChainPosTagger.
@author Andrew McCallum, */
class ForwardPosTagger extends DocumentAnnotator with Serializable {
private val logger = Logger.getLogger(this.getClass.getName)
// Different ways to load saved parameters
def this(stream:InputStream) = { this(); deserialize(stream) }
def this(file: File) = {
this(new FileInputStream(file))
logger.debug("ForwardPosTagger loading from "+file.getAbsolutePath)
}
def this(url:java.net.URL) = {
this()
val stream = url.openConnection.getInputStream
if (stream.available <= 0) throw new Error("Could not open "+url)
logger.debug("ForwardPosTagger loading from "+url)
deserialize(stream)
}
object FeatureDomain extends CategoricalVectorDomain[String]
class FeatureVariable(t:Tensor1) extends BinaryFeatureVectorVariable[String] { def domain = FeatureDomain; set(t)(null) } // Only used for printing diagnostics
lazy val model = new LinearMulticlassClassifier(PennPosDomain.size, FeatureDomain.dimensionSize)
/** Local lemmatizer used for POS features. */
protected def lemmatize(string:String): String = cc.factorie.app.strings.replaceDigits(string)
/** A special IndexedSeq[String] that will return "null" for indices out of bounds, rather than throwing an error */
class Lemmas(tokens:Seq[Token]) extends IndexedSeq[String] {
val inner: IndexedSeq[String] = tokens.toIndexedSeq.map((t:Token) => cc.factorie.app.strings.replaceDigits(t.string))
val innerlc = inner.map(_.toLowerCase)
val length: Int = inner.length
def apply(i:Int): String = if (i < 0 || i > length-1) null else inner(i)
def lc(i:Int): String = if (i < 0 || i > length-1) null else innerlc(i)
def docFreq(i:Int): String = if ((i < 0 || i > length-1) || !WordData.docWordCounts.contains(innerlc(i))) null else inner(i)
def docFreqLc(i:Int): String = if (i < 0 || i > length-1 || !WordData.docWordCounts.contains(innerlc(i))) null else innerlc(i)
}
protected def lemmas(tokens:Seq[Token]) = new Lemmas(tokens)
// This should not be a singleton object, global mutable state is bad -luke
/** Infrastructure for building and remembering a list of training data words that nearly always have the same POS tag.
Used as cheap "stacked learning" features when looking-ahead to words not yet predicted by this POS tagger.
The key into the ambiguityClasses is app.strings.replaceDigits().toLowerCase */
object WordData {
val ambiguityClasses = JavaHashMap[String,String]()
val sureTokens = JavaHashMap[String,Int]()
var docWordCounts = JavaHashMap[String,Int]()
val ambiguityClassThreshold = 0.4
val wordInclusionThreshold = 1
val sureTokenThreshold = -1 // -1 means don't consider any tokens "sure"
def computeWordFormsByDocumentFrequency(tokens: Iterable[Token], cutoff: Integer, numToksPerDoc: Int) = {
var begin = 0
for(i <- numToksPerDoc to tokens.size by numToksPerDoc){
val docTokens = tokens.slice(begin,i)
val docUniqueLemmas = docTokens.map(x => lemmatize(x.string).toLowerCase).toSet
for(lemma <- docUniqueLemmas){
if (!docWordCounts.contains(lemma)) {
docWordCounts(lemma) = 0
}
docWordCounts(lemma) += 1
}
begin = i
}
// deal with last chunk of sentences
if(begin < tokens.size){
val docTokens = tokens.slice(begin,tokens.size)
val docUniqueLemmas = docTokens.map(x => lemmatize(x.string).toLowerCase).toSet
for(lemma <- docUniqueLemmas){
if (!docWordCounts.contains(lemma)) {
docWordCounts(lemma) = 0
}
docWordCounts(lemma) += 1
}
}
docWordCounts = docWordCounts.filter(_._2 > cutoff)
}
def computeAmbiguityClasses(tokens: Iterable[Token]) = {
val posCounts = collection.mutable.HashMap[String,Array[Int]]()
val wordCounts = collection.mutable.HashMap[String,Double]()
var tokenCount = 0
val lemmas = docWordCounts.keySet
tokens.foreach(t => {
tokenCount += 1
if (t.attr[PennPosTag] eq null) {
println("POS1.WordData.preProcess tokenCount "+tokenCount)
println("POS1.WordData.preProcess token "+t.prev.string+" "+t.prev.attr)
println("POS1.WordData.preProcess token "+t.string+" "+t.attr)
throw new Error("Found training token with no PennPosTag.")
}
val lemma = lemmatize(t.string).toLowerCase
if (!wordCounts.contains(lemma)) {
wordCounts(lemma) = 0
posCounts(lemma) = Array.fill(PennPosDomain.size)(0)
}
wordCounts(lemma) += 1
posCounts(lemma)(t.attr[PennPosTag].intValue) += 1
})
lemmas.foreach(w => {
val posFrequencies = posCounts(w).map(_/wordCounts(w))
val bestPosTags = posFrequencies.zip(PennPosDomain.categories).filter(_._1 > ambiguityClassThreshold).unzip._2
val ambiguityString = bestPosTags.mkString("_")
ambiguityClasses(w) = ambiguityString
if (wordCounts(w) >= 1000) {
posFrequencies.zipWithIndex.filter(i => i._1 >= 0.999).foreach(c => sureTokens(w) = c._2)
}
})
}
}
def features(token:Token, lemmaIndex:Int, lemmas:Lemmas): SparseBinaryTensor1 = {
def lemmaStringAtOffset(offset:Int): String = "L@"+offset+"="+lemmas.docFreqLc(lemmaIndex + offset) // this is lowercased
def wordStringAtOffset(offset:Int): String = "W@"+offset+"="+lemmas.docFreq(lemmaIndex + offset) // this is not lowercased, but still has digits replaced
def affinityTagAtOffset(offset:Int): String = "A@"+offset+"="+WordData.ambiguityClasses.getOrElse(lemmas.lc(lemmaIndex + offset), null)
def posTagAtOffset(offset:Int): String = { val t = token.next(offset); "P@"+offset+(if (t ne null) t.attr[PennPosTag].categoryValue else null) }
def takePrefix(s:String, n:Int): String = {if (n <= s.length) "PREFIX="+s.substring(0,n) else null }
def takeSuffix(s:String, n:Int): String = { val l = s.length; if (n <= l) "SUFFIX="+s.substring(l-n,l) else null }
val tensor = new SparseBinaryTensor1(FeatureDomain.dimensionSize); tensor.sizeHint(40)
def addFeature(s:String): Unit = if (s ne null) { val i = FeatureDomain.dimensionDomain.index(s); if (i >= 0) tensor += i }
// Original word, with digits replaced, no @
val Wm2 = if (lemmaIndex > 1) lemmas(lemmaIndex-2) else ""
val Wm1 = if (lemmaIndex > 0) lemmas(lemmaIndex-1) else ""
val W = lemmas(lemmaIndex)
val Wp1 = if (lemmaIndex < lemmas.length-1) lemmas(lemmaIndex+1) else ""
val Wp2 = if (lemmaIndex < lemmas.length-2) lemmas(lemmaIndex+2) else ""
// Original words at offsets, with digits replaced, marked with @
val wm3 = wordStringAtOffset(-3)
val wm2 = wordStringAtOffset(-2)
val wm1 = wordStringAtOffset(-1)
val w0 = wordStringAtOffset(0)
val wp1 = wordStringAtOffset(1)
val wp2 = wordStringAtOffset(2)
val wp3 = wordStringAtOffset(3)
// Lemmas at offsets
val lm2 = lemmaStringAtOffset(-2)
val lm1 = lemmaStringAtOffset(-1)
val l0 = lemmaStringAtOffset(0)
val lp1 = lemmaStringAtOffset(1)
val lp2 = lemmaStringAtOffset(2)
// Affinity classes at next offsets
val a0 = affinityTagAtOffset(0)
val ap1 = affinityTagAtOffset(1)
val ap2 = affinityTagAtOffset(2)
val ap3 = affinityTagAtOffset(3)
// POS tags at prev offsets
val pm1 = posTagAtOffset(-1)
val pm2 = posTagAtOffset(-2)
val pm3 = posTagAtOffset(-3)
addFeature(wm3)
addFeature(wm2)
addFeature(wm1)
addFeature(w0)
addFeature(wp1)
addFeature(wp2)
addFeature(wp3)
// The paper also includes wp3 and wm3
// not in ClearNLP
// addFeature(lp3)
// addFeature(lp2)
// addFeature(lp1)
// addFeature(l0)
// addFeature(lm1)
// addFeature(lm2)
// addFeature(lm3)
addFeature(pm3)
addFeature(pm2)
addFeature(pm1)
addFeature(a0)
addFeature(ap1)
addFeature(ap2)
addFeature(ap3)
addFeature(lm2+lm1)
addFeature(lm1+l0)
addFeature(l0+lp1)
addFeature(lp1+lp2)
addFeature(lm1+lp1)
addFeature(pm2+pm1)
addFeature(ap1+ap2)
addFeature(pm1+ap1)
// addFeature(pm1+a0) // Not in http://www.aclweb.org/anthology-new/P/P12/P12-2071.pdf
// addFeature(a0+ap1) // Not in http://www.aclweb.org/anthology-new/P/P12/P12-2071.pdf
addFeature(lm2+lm1+l0)
addFeature(lm1+l0+lp1)
addFeature(l0+lp1+lp2)
addFeature(lm2+lm1+lp1)
addFeature(lm1+lp1+lp2)
addFeature(pm2+pm1+a0)
addFeature(pm1+a0+ap1)
addFeature(pm2+pm1+ap1)
addFeature(pm1+ap1+ap2)
// addFeature(a0+ap1+ap2) // Not in http://www.aclweb.org/anthology-new/P/P12/P12-2071.pdf
addFeature(takePrefix(W, 1))
addFeature(takePrefix(W, 2))
addFeature(takePrefix(W, 3))
// not in ClearNLP
// addFeature("PREFIX2@1="+takePrefix(Wp1, 2))
// addFeature("PREFIX3@1="+takePrefix(Wp1, 3))
// addFeature("PREFIX2@2="+takePrefix(Wp2, 2))
// addFeature("PREFIX3@2="+takePrefix(Wp2, 3))
addFeature(takeSuffix(W, 1))
addFeature(takeSuffix(W, 2))
addFeature(takeSuffix(W, 3))
addFeature(takeSuffix(W, 4))
// not in ClearNLP
// addFeature("SUFFIX1@1="+takeRight(Wp1, 1))
// addFeature("SUFFIX2@1="+takeRight(Wp1, 2))
// addFeature("SUFFIX3@1="+takeRight(Wp1, 3))
// addFeature("SUFFIX4@1="+takeRight(Wp1, 4))
// addFeature("SUFFIX2@2="+takeRight(Wp2, 2))
// addFeature("SUFFIX3@2="+takeRight(Wp2, 3))
// addFeature("SUFFIX4@2="+takeRight(Wp2, 4))
addFeature("SHAPE@-2="+cc.factorie.app.strings.stringShape(Wm2, 2))
addFeature("SHAPE@-1="+cc.factorie.app.strings.stringShape(Wm1, 2))
addFeature("SHAPE@0="+cc.factorie.app.strings.stringShape(W, 2))
addFeature("SHAPE@1="+cc.factorie.app.strings.stringShape(Wp1, 2))
addFeature("SHAPE@2="+cc.factorie.app.strings.stringShape(Wp2, 2))
// TODO(apassos): add the remaining jinho features not contained in shape
addFeature("HasPeriod="+(w0.indexOf('.') >= 0))
addFeature("HasHyphen="+(w0.indexOf('-') >= 0))
addFeature("HasDigit="+(l0.indexOf('0', 4) >= 0)) // The 4 is to skip over "W@0="
//addFeature("MiddleHalfCap="+token.string.matches(".+1/2[A-Z].*")) // Paper says "contains 1/2+capital(s) not at the beginning". Strange feature. Why? -akm
tensor
}
def features(tokens:Seq[Token]): Seq[SparseBinaryTensor1] = {
val lemmaStrings = lemmas(tokens)
tokens.zipWithIndex.map({case (t:Token, i:Int) => features(t, i, lemmaStrings)})
}
var exampleSetsToPrediction = false
class SentenceClassifierExample(val tokens:Seq[Token], model:LinearMulticlassClassifier, lossAndGradient: optimize.OptimizableObjectives.Multiclass) extends optimize.Example {
def accumulateValueAndGradient(value: DoubleAccumulator, gradient: WeightsMapAccumulator) {
val lemmaStrings = lemmas(tokens)
for (index <- 0 until tokens.length) {
val token = tokens(index)
val posLabel = token.attr[LabeledPennPosTag]
val featureVector = features(token, index, lemmaStrings)
new optimize.PredictorExample(model, featureVector, posLabel.target.intValue, lossAndGradient, 1.0).accumulateValueAndGradient(value, gradient)
if (exampleSetsToPrediction) {
posLabel.set(model.classification(featureVector).bestLabelIndex)(null)
}
}
}
}
def predict(tokens: Seq[Token]): Unit = {
val lemmaStrings = lemmas(tokens)
for (index <- 0 until tokens.length) {
val token = tokens(index)
if (token.attr[PennPosTag] eq null) token.attr += new PennPosTag(token, "NNP")
val l = lemmatize(token.string).toLowerCase
if (WordData.sureTokens.contains(l)) {
token.attr[PennPosTag].set(WordData.sureTokens(l))(null)
} else {
val featureVector = features(token, index, lemmaStrings)
token.attr[PennPosTag].set(model.classification(featureVector).bestLabelIndex)(null)
}
}
}
def predict(span: TokenSpan): Unit = predict(span.tokens)
def predict(document: Document): Unit = {
for (section <- document.sections)
if (section.hasSentences) document.sentences.foreach(predict(_)) // we have Sentence boundaries
else predict(section.tokens) // we don't // TODO But if we have trained with Sentence boundaries, won't this hurt accuracy?
}
// Serialization
def serialize(filename: String): Unit = {
val file = new File(filename); if (file.getParentFile ne null) file.getParentFile.mkdirs()
serialize(new java.io.FileOutputStream(file))
}
def deserialize(file: File): Unit = {
require(file.exists(), "Trying to load non-existent file: '" +file)
deserialize(new java.io.FileInputStream(file))
}
def serialize(stream: java.io.OutputStream): Unit = {
import CubbieConversions._
val sparseEvidenceWeights = new la.DenseLayeredTensor2(model.weights.value.dim1, model.weights.value.dim2, new la.SparseIndexedTensor1(_))
model.weights.value.foreachElement((i, v) => if (v != 0.0) sparseEvidenceWeights += (i, v))
model.weights.set(sparseEvidenceWeights)
val dstream = new java.io.DataOutputStream(new BufferedOutputStream(stream))
BinarySerializer.serialize(FeatureDomain.dimensionDomain, dstream)
BinarySerializer.serialize(model, dstream)
BinarySerializer.serialize(WordData.ambiguityClasses, dstream)
BinarySerializer.serialize(WordData.sureTokens, dstream)
BinarySerializer.serialize(WordData.docWordCounts, dstream)
dstream.close() // TODO Are we really supposed to close here, or is that the responsibility of the caller
}
def deserialize(stream: java.io.InputStream): Unit = {
import CubbieConversions._
val dstream = new java.io.DataInputStream(new BufferedInputStream(stream))
BinarySerializer.deserialize(FeatureDomain.dimensionDomain, dstream)
model.weights.set(new la.DenseLayeredTensor2(FeatureDomain.dimensionDomain.size, PennPosDomain.size, new la.SparseIndexedTensor1(_)))
BinarySerializer.deserialize(model, dstream)
BinarySerializer.deserialize(WordData.ambiguityClasses, dstream)
BinarySerializer.deserialize(WordData.sureTokens, dstream)
BinarySerializer.deserialize(WordData.docWordCounts, dstream)
dstream.close() // TODO Are we really supposed to close here, or is that the responsibility of the caller
}
def printAccuracy(sentences: Iterable[Sentence], extraText: String) = {
val (tokAcc, senAcc, speed, _) = accuracy(sentences)
println(extraText + s"$tokAcc token accuracy, $senAcc sentence accuracy, $speed tokens/sec")
}
def accuracy(sentences:Iterable[Sentence]): (Double, Double, Double, Double) = {
var tokenTotal = 0.0
var tokenCorrect = 0.0
var totalTime = 0.0
var sentenceCorrect = 0.0
var sentenceTotal = 0.0
sentences.foreach(s => {
var thisSentenceCorrect = 1.0
val t0 = System.currentTimeMillis()
process(s) //predict(s)
totalTime += (System.currentTimeMillis()-t0)
for (token <- s.tokens) {
tokenTotal += 1
if (token.attr[LabeledPennPosTag].valueIsTarget) tokenCorrect += 1.0
else thisSentenceCorrect = 0.0
}
sentenceCorrect += thisSentenceCorrect
sentenceTotal += 1.0
})
val tokensPerSecond = (tokenTotal/totalTime)*1000.0
(tokenCorrect/tokenTotal, sentenceCorrect/sentenceTotal, tokensPerSecond, tokenTotal)
}
def test(sentences:Iterable[Sentence]) = {
println("Testing on " + sentences.size + " sentences...")
val (tokAccuracy, sentAccuracy, speed, tokens) = accuracy(sentences)
println("Tested on " + tokens + " tokens at " + speed + " tokens/sec")
println("Token accuracy: " + tokAccuracy)
println("Sentence accuracy: " + sentAccuracy)
}
def train(trainSentences:Seq[Sentence], testSentences:Seq[Sentence], lrate:Double = 0.1, decay:Double = 0.01, cutoff:Int = 2, doBootstrap:Boolean = true, useHingeLoss:Boolean = false, numIterations: Int = 5, l1Factor:Double = 0.000001, l2Factor:Double = 0.000001)(implicit random: scala.util.Random) {
// TODO Accomplish this TokenNormalization instead by calling POS3.preProcess
//for (sentence <- trainSentences ++ testSentences; token <- sentence.tokens) cc.factorie.app.nlp.segment.PlainTokenNormalizer.processToken(token)
val toksPerDoc = 5000
WordData.computeWordFormsByDocumentFrequency(trainSentences.flatMap(_.tokens), 1, toksPerDoc)
WordData.computeAmbiguityClasses(trainSentences.flatMap(_.tokens))
// Prune features by count
FeatureDomain.dimensionDomain.gatherCounts = true
for (sentence <- trainSentences) features(sentence.tokens) // just to create and count all features
FeatureDomain.dimensionDomain.trimBelowCount(cutoff)
FeatureDomain.freeze()
println("After pruning using %d features.".format(FeatureDomain.dimensionDomain.size))
/* Print out some features (for debugging) */
//println("ForwardPosTagger.train\\n"+trainSentences(3).tokens.map(_.string).zip(features(trainSentences(3).tokens).map(t => new FeatureVariable(t).toString)).mkString("\\n"))
def evaluate() {
exampleSetsToPrediction = doBootstrap
printAccuracy(trainSentences, "Training: ")
printAccuracy(testSentences, "Testing: ")
println(s"Sparsity: ${model.weights.value.toSeq.count(_ == 0).toFloat/model.weights.value.length}")
}
val examples = trainSentences.shuffle.par.map(sentence =>
new SentenceClassifierExample(sentence.tokens, model, if (useHingeLoss) cc.factorie.optimize.OptimizableObjectives.hingeMulticlass else cc.factorie.optimize.OptimizableObjectives.sparseLogMulticlass)).seq
//val optimizer = new cc.factorie.optimize.AdaGrad(rate=lrate)
val optimizer = new cc.factorie.optimize.AdaGradRDA(rate=lrate, l1=l1Factor/examples.length, l2=l2Factor/examples.length)
Trainer.onlineTrain(model.parameters, examples, maxIterations=numIterations, optimizer=optimizer, evaluate=evaluate, useParallelTrainer = false)
if (false) {
// Print test results to file
val source = new java.io.PrintStream(new File("pos1-test-output.txt"))
for (s <- testSentences) {
for (t <- s.tokens) { val p = t.attr[LabeledPennPosTag]; source.println("%s %20s %6s %6s".format(if (p.valueIsTarget) " " else "*", t.string, p.target.categoryValue, p.categoryValue)) }
source.println()
}
source.close()
}
}
def process(d: Document) = {
predict(d)
if (!d.annotators.contains(classOf[PennPosTag])) d.annotators(classOf[PennPosTag]) = this.getClass
d
}
def process(s: Sentence) = { predict(s); s }
def prereqAttrs: Iterable[Class[_]] = List(classOf[Token], classOf[Sentence], classOf[segment.PlainNormalizedTokenString])
def postAttrs: Iterable[Class[_]] = List(classOf[PennPosTag])
override def tokenAnnotationString(token:Token): String = { val label = token.attr[PennPosTag]; if (label ne null) label.categoryValue else "(null)" }
}
/** The default part-of-speech tagger, trained on Penn Treebank Wall Street Journal, with parameters loaded from resources in the classpath. */
class WSJForwardPosTagger(url:java.net.URL) extends ForwardPosTagger(url)
object WSJForwardPosTagger extends WSJForwardPosTagger(cc.factorie.util.ClasspathURL[WSJForwardPosTagger](".factorie"))
/** The default part-of-speech tagger, trained on all Ontonotes training data (including Wall Street Journal), with parameters loaded from resources in the classpath. */
class OntonotesForwardPosTagger(url:java.net.URL) extends ForwardPosTagger(url) with Serializable
object OntonotesForwardPosTagger extends OntonotesForwardPosTagger(cc.factorie.util.ClasspathURL[OntonotesForwardPosTagger](".factorie")) with Serializable
class ForwardPosOptions extends cc.factorie.util.DefaultCmdOptions with SharedNLPCmdOptions{
val modelFile = new CmdOption("model", "", "FILENAME", "Filename for the model (saving a trained model or reading a running model.")
val testFile = new CmdOption("test-file", "", "FILENAME", "OWPL test file.")
val trainFile = new CmdOption("train-file", "", "FILENAME", "OWPL training file.")
val testDir = new CmdOption("test-dir", "", "FILENAME", "Directory containing OWPL test files (.dep.pmd).")
val trainDir = new CmdOption("train-dir", "", "FILENAME", "Directory containing OWPL training files (.dep.pmd).")
val testFiles = new CmdOption("test-files", "", "STRING", "comma-separated list of OWPL test files (.dep.pmd).")
val trainFiles = new CmdOption("train-files", "", "STRING", "comma-separated list of OWPL training files (.dep.pmd).")
val l1 = new CmdOption("l1", 0.000001, "FLOAT", "l1 regularization weight")
val l2 = new CmdOption("l2", 0.00001, "FLOAT", "l2 regularization weight")
val rate = new CmdOption("rate", 1.0, "FLOAT", "base learning rate")
val delta = new CmdOption("delta", 0.1, "FLOAT", "learning rate decay")
val cutoff = new CmdOption("cutoff", 2, "INT", "Discard features less frequent than this before training.")
val updateExamples = new CmdOption("update-examples", true, "BOOL", "Whether to update examples in later iterations during training.")
val useHingeLoss = new CmdOption("use-hinge-loss", false, "BOOL", "Whether to use hinge loss (or log loss) during training.")
val saveModel = new CmdOption("save-model", false, "BOOL", "Whether to save the trained model.")
val runText = new CmdOption("run", "", "FILENAME", "Plain text file on which to run.")
val numIters = new CmdOption("num-iterations", 5, "INT", "number of passes over the data for training")
val owpl = new CmdOption("owpl", false, "BOOL", "Whether the data is in OWPL format or otherwise (Ontonotes)")
}
object ForwardPosTester {
def main(args: Array[String]) {
val opts = new ForwardPosOptions
opts.parse(args)
assert(opts.testFile.wasInvoked || opts.testDir.wasInvoked || opts.testFiles.wasInvoked)
// load model from file if given,
// else if the wsj command line param was specified use wsj model,
// otherwise ontonotes model
val pos = {
if(opts.modelFile.wasInvoked) new ForwardPosTagger(new File(opts.modelFile.value))
else if(opts.owpl.value) WSJForwardPosTagger
else OntonotesForwardPosTagger
}
assert(!(opts.testDir.wasInvoked && opts.testFiles.wasInvoked))
var testFileList = Seq(opts.testFile.value)
if(opts.testDir.wasInvoked){
testFileList = FileUtils.getFileListFromDir(opts.testDir.value)
}else if (opts.testFiles.wasInvoked){
testFileList = opts.testFiles.value.split(",")
}
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value else 1.0
val testDocs = testFileList.map(fname => {
if(opts.owpl.value) load.LoadOWPL.fromFilename(fname, pennPosLabelMaker).head
else load.LoadOntonotes5.fromFilename(fname).head
})
val testSentencesFull = testDocs.flatMap(_.sentences)
val testSentences = testSentencesFull.take((testPortionToTake*testSentencesFull.length).floor.toInt)
pos.test(testSentences)
}
}
object ForwardPosTrainer extends HyperparameterMain {
def evaluateParameters(args: Array[String]): Double = {
implicit val random = new scala.util.Random(0)
val opts = new ForwardPosOptions
opts.parse(args)
assert(opts.trainFile.wasInvoked || opts.trainDir.wasInvoked || opts.trainFiles.wasInvoked)
// Expects three command-line arguments: a train file, a test file, and a place to save the model
// the train and test files are supposed to be in OWPL format
val pos = new ForwardPosTagger
assert(!(opts.trainDir.wasInvoked && opts.trainFiles.wasInvoked))
var trainFileList = Seq(opts.trainFile.value)
if(opts.trainDir.wasInvoked){
trainFileList = FileUtils.getFileListFromDir(opts.trainDir.value)
} else if (opts.trainFiles.wasInvoked){
trainFileList = opts.trainFiles.value.split(",")
}
assert(!(opts.testDir.wasInvoked && opts.testFiles.wasInvoked))
var testFileList = Seq(opts.testFile.value)
if(opts.testDir.wasInvoked){
testFileList = FileUtils.getFileListFromDir(opts.testDir.value)
}else if (opts.testFiles.wasInvoked){
testFileList = opts.testFiles.value.split(",")
}
val trainDocs = trainFileList.map(fname => {
if(opts.owpl.value) load.LoadOWPL.fromFilename(fname, pennPosLabelMaker).head
else load.LoadOntonotes5.fromFilename(fname).head
})
val testDocs = testFileList.map(fname => {
if(opts.owpl.value) load.LoadOWPL.fromFilename(fname, pennPosLabelMaker).head
else load.LoadOntonotes5.fromFilename(fname).head
})
//for (d <- trainDocs) println("POS3.train 1 trainDoc.length="+d.length)
println("Read %d training tokens from %d files.".format(trainDocs.map(_.tokenCount).sum, trainDocs.size))
println("Read %d testing tokens from %d files.".format(testDocs.map(_.tokenCount).sum, testDocs.size))
val trainPortionToTake = if(opts.trainPortion.wasInvoked) opts.trainPortion.value else 1.0
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value else 1.0
val trainSentencesFull = trainDocs.flatMap(_.sentences)
val trainSentences = trainSentencesFull.take((trainPortionToTake*trainSentencesFull.length).floor.toInt)
val testSentencesFull = testDocs.flatMap(_.sentences)
val testSentences = testSentencesFull.take((testPortionToTake*testSentencesFull.length).floor.toInt)
pos.train(trainSentences, testSentences,
opts.rate.value, opts.delta.value, opts.cutoff.value, opts.updateExamples.value, opts.useHingeLoss.value, numIterations=opts.numIters.value.toInt,l1Factor=opts.l1.value, l2Factor=opts.l2.value)
if (opts.saveModel.value) {
pos.serialize(opts.modelFile.value)
val pos2 = new ForwardPosTagger
pos2.deserialize(new java.io.File(opts.modelFile.value))
pos.printAccuracy(testDocs.flatMap(_.sentences), "pre-serialize accuracy: ")
pos2.printAccuracy(testDocs.flatMap(_.sentences), "post-serialize accuracy: ")
}
val acc = pos.accuracy(testDocs.flatMap(_.sentences))._1
if(opts.targetAccuracy.wasInvoked) cc.factorie.assertMinimalAccuracy(acc,opts.targetAccuracy.value.toDouble)
acc
}
}
object ForwardPosOptimizer {
def main(args: Array[String]) {
val opts = new ForwardPosOptions
opts.parse(args)
opts.saveModel.setValue(false)
val l1 = cc.factorie.util.HyperParameter(opts.l1, new cc.factorie.util.LogUniformDoubleSampler(1e-10, 1e2))
val l2 = cc.factorie.util.HyperParameter(opts.l2, new cc.factorie.util.LogUniformDoubleSampler(1e-10, 1e2))
val rate = cc.factorie.util.HyperParameter(opts.rate, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
val delta = cc.factorie.util.HyperParameter(opts.delta, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
val cutoff = cc.factorie.util.HyperParameter(opts.cutoff, new cc.factorie.util.SampleFromSeq(List(0,1,2,3)))
val iters = cc.factorie.util.HyperParameter(opts.numIters, new cc.factorie.util.SampleFromSeq(List(3,5,7)))
/*
val ssh = new cc.factorie.util.SSHActorExecutor("apassos",
Seq("avon1", "avon2"),
"/home/apassos/canvas/factorie-test",
"try-log/",
"cc.factorie.app.nlp.parse.DepParser2",
10, 5)
*/
val qs = new cc.factorie.util.QSubExecutor(16, "cc.factorie.app.nlp.pos.ForwardPosTrainer")
val optimizer = new cc.factorie.util.HyperParameterSearcher(opts, Seq(l1, l2, rate, delta, cutoff, iters), qs.execute, 200, 180, 60)
val result = optimizer.optimize()
println("Got results: " + result.mkString(" "))
println("Best l1: " + opts.l1.value + " best l2: " + opts.l2.value)
opts.saveModel.setValue(true)
println("Running best configuration...")
import scala.concurrent.duration._
import scala.concurrent.Await
Await.result(qs.execute(opts.values.flatMap(_.unParse).toArray), 5.hours)
println("Done")
}
}
| hlin117/factorie | src/main/scala/cc/factorie/app/nlp/pos/ForwardPosTagger.scala | Scala | apache-2.0 | 29,232 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.filter
import iht.views.ViewTestHelper
import iht.views.html.filter.estimate
import play.api.data.Form
import play.api.data.Forms._
import play.api.mvc.AnyContentAsEmpty
import play.api.test.FakeRequest
import play.api.test.Helpers.{contentAsString, _}
class EstimateViewTest extends ViewTestHelper {
val fakeRequest = createFakeRequest(isAuthorised = false)
val fakeForm = Form(single("s"-> optional(text)))
val submitRoute = iht.controllers.filter.routes.EstimateController.onSubmitWithoutJointAssets()
def getPageAsDoc(form: Form[Option[String]] = fakeForm, request: FakeRequest[AnyContentAsEmpty.type] = fakeRequest) = {
lazy val estimateView: estimate = app.injector.instanceOf[estimate]
val result = estimateView(form, false, submitRoute)(request, messages)
asDocument(contentAsString(result))
}
"Estimate view" must {
"have no message keys in html" in {
val view = getPageAsDoc().toString
noMessageKeysShouldBePresent(view)
}
"generate appropriate content for the title" in {
val doc = getPageAsDoc()
val titleElement = doc.getElementsByTag("h1").first
titleElement.text must include(messagesApi("iht.roughEstimateEstateWorth"))
}
"generate appropriate content for the browser title" in {
val doc = getPageAsDoc()
val titleElement = doc.getElementsByTag("title").first
titleElement.text must include(messagesApi("iht.roughEstimateEstateWorth"))
}
"contain an appropriate field set" in {
val doc = getPageAsDoc()
val fieldSet = doc.getElementsByTag("fieldset")
val id = fieldSet.attr("id")
id must be("estimate-container")
}
"contain an 'Under £325,000' radio button" in {
val doc = getPageAsDoc()
doc.getElementById("estimate-under-325000-label").text() must be(messagesApi("page.iht.filter.estimate.choice.under"))
}
"contain a 'Between £325,000 and £1 million' radio button" in {
val doc = getPageAsDoc()
doc.getElementById("estimate-between-325000-and-1million-label").text() must be(messagesApi("page.iht.filter.estimate.choice.between"))
}
"contain a 'More than £1 million' radio button" in {
val doc = getPageAsDoc()
doc.getElementById("estimate-more-than-1million-label").text() must be(messagesApi("page.iht.filter.estimate.choice.over"))
}
"contain a continue button with the text 'Continue'" in {
val doc = getPageAsDoc()
val button = doc.select("input#continue").first
button.attr("value") must be(messagesApi("iht.continue"))
}
"contain a form with the action attribute set to the Estimate Controller onSubmit URL" in {
val doc = getPageAsDoc()
val formElement = doc.getElementsByTag("form").first
formElement.attr("action") must be(iht.controllers.filter.routes.EstimateController.onSubmitWithoutJointAssets().url)
}
"contain a 'Previous ansewrs' section" in {
val doc = getPageAsDoc()
assertRenderedById(doc, "previous-answers")
}
"contain a 'Start again' link to go back to the domicile page" in {
val doc = getPageAsDoc()
val link = doc.getElementById("start-again")
link.text() must be(messagesApi("iht.startAgain"))
link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url)
}
"contain a row showing the user's answer to the previous question" in {
val doc = getPageAsDoc()
val row = doc.getElementById("domicile-row")
row.text() must include(messagesApi("page.iht.registration.deceasedPermanentHome.title"))
row.text() must include(messagesApi("iht.countries.englandOrWales"))
}
"contain a 'Change' link to go back to the domicile page" in {
val doc = getPageAsDoc()
val link = doc.getElementById("change-domicile")
link.text() must include(messagesApi("iht.change"))
link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url)
}
}
}
| hmrc/iht-frontend | test/iht/views/filter/EstimateViewTest.scala | Scala | apache-2.0 | 4,636 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops
/** Contains helper functions and classes for creating IO-related ops.
*
* @author Emmanouil Antonios Platanios
*/
package object io {
private[api] trait API
extends Files
with data.Dataset.API
with data.Iterator.API
with Reader.API
}
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/io/package.scala | Scala | apache-2.0 | 957 |
package com.programmaticallyspeaking.ncd.chrome.domains
import com.programmaticallyspeaking.ncd.transpile.{CachingES5Transpiler, ClosureBasedES5Transpiler}
trait TranspileSupport {
// For transpiling ES6 code to ES5 that Nashorn pre-Java 10 (Java 9 in ES6 mode says 'not yet implemented'
// for generator functions) understands.
private val transpiler = new CachingES5Transpiler(new ClosureBasedES5Transpiler)
private val transpileTrigger = "function\\\\s*\\\\*".r
def needsTranspile(code: String): Boolean = transpileTrigger.findFirstIn(code).isDefined
def transpile(code: String): String = transpiler.transpile(code)
}
| provegard/ncdbg | src/main/scala/com/programmaticallyspeaking/ncd/chrome/domains/TranspileSupport.scala | Scala | bsd-3-clause | 635 |
package com.twitter.finagle.redis
import java.lang.{Boolean => JBoolean, Double => JDouble, Long => JLong}
import com.twitter.finagle.redis.protocol._
import com.twitter.finagle.redis.util.{BufToString, NumberFormat, ReplyFormat}
import com.twitter.io.Buf
import com.twitter.util.Future
private[redis] trait SortedSetCommands { self: BaseClient =>
private[this] def parseMBulkReply(
withScores: JBoolean
): PartialFunction[Reply, Future[Either[ZRangeResults, Seq[Buf]]]] = {
val parse: PartialFunction[Reply, Either[ZRangeResults, Seq[Buf]]] = {
case MBulkReply(messages) => withScoresHelper(withScores)(messages)
case EmptyMBulkReply => withScoresHelper(withScores)(Nil)
}
parse andThen Future.value
}
private[this] def withScoresHelper(
withScores: JBoolean
)(messages: List[Reply]): Either[ZRangeResults, Seq[Buf]] = {
val chanBufs = ReplyFormat.toBuf(messages)
if (withScores)
Left(ZRangeResults(returnPairs(chanBufs)))
else
Right(chanBufs)
}
/**
* Adds a `member` with `score` to a sorted set under the `key`.
*
* @return The number of elements added to sorted set.
*/
def zAdd(key: Buf, score: JDouble, member: Buf): Future[JLong] = {
zAddMulti(key, Seq((score, member)))
}
/**
* Adds member -> score pair `members` to sorted set under the `key`.
*
* @note Adding multiple elements only works with redis 2.4 or later.
*
* @return The number of elements added to sorted set.
*/
def zAddMulti(key: Buf, members: Seq[(JDouble, Buf)]): Future[JLong] = {
doRequest(ZAdd(key, members.map { m => ZMember(m._1, m._2) })) {
case IntegerReply(n) => Future.value(n)
}
}
/**
* Returns cardinality of the sorted set under the `key`, or 0
* if `key` does not exist.
*/
def zCard(key: Buf): Future[JLong] =
doRequest(ZCard(key)) {
case IntegerReply(n) => Future.value(n)
}
/**
* Gets number of elements in sorted set under the `key` with score
* between `min` and `max`.
*/
def zCount(key: Buf, min: ZInterval, max: ZInterval): Future[JLong] =
doRequest(ZCount(key, min, max)) {
case IntegerReply(n) => Future.value(n)
}
/**
* Gets member -> score pairs from sorted set under the `key` between
* `min` and `max`. Results are limited by `limit`.
*/
def zRangeByScore(
key: Buf,
min: ZInterval,
max: ZInterval,
withScores: JBoolean,
limit: Option[Limit]
): Future[Either[ZRangeResults, Seq[Buf]]] =
doRequest(
ZRangeByScore(key, min, max, if (withScores) Some(WithScores) else None, limit)
) (parseMBulkReply(withScores))
/**
* Removes specified `members` from sorted set at `key`.
*
* @return The number of members removed.
*/
def zRem(key: Buf, members: Seq[Buf]): Future[JLong] =
doRequest(ZRem(key, members)) {
case IntegerReply(n) => Future.value(n)
}
/**
* Returns specified range (from `start` to `end`) of elements in
* sorted set at `key`. Elements are ordered from highest to lowest score.
*/
def zRevRange(
key: Buf,
start: JLong,
stop: JLong,
withScores: JBoolean
): Future[Either[ZRangeResults, Seq[Buf]]] =
doRequest(ZRevRange(key, start, stop, if (withScores) Some(WithScores) else None))(
parseMBulkReply(withScores)
)
/**
* Returns elements in sorted set at `key` with a score between `max` and `min`.
* Elements are ordered from highest to lowest score Results are limited by `limit`.
*/
def zRevRangeByScore(
key: Buf,
max: ZInterval,
min: ZInterval,
withScores: JBoolean,
limit: Option[Limit]
): Future[Either[ZRangeResults, Seq[Buf]]] =
doRequest(ZRevRangeByScore(key, max, min, if (withScores) Some(WithScores) else None, limit))(
parseMBulkReply(withScores)
)
/**
* Gets the score of a `member` in sorted set at the `key`.
*/
def zScore(key: Buf, member: Buf): Future[Option[JDouble]] =
doRequest(ZScore(key, member)) {
case BulkReply(message) =>
Future.value(Some(NumberFormat.toDouble(BufToString(message))))
case EmptyBulkReply => Future.None
}
/**
* Gets the rank of a `member` in the sorted set at the `key`, or `None`
* if it doesn't exist.
*/
def zRevRank(key: Buf, member: Buf): Future[Option[JLong]] =
doRequest(ZRevRank(key, member)) {
case IntegerReply(n) => Future.value(Some(n))
case EmptyBulkReply => Future.None
}
/**
* Increments the `member` in sorted set at the `key` by a given `amount`.
* Returns `Some` of the new value of the incremented member or `None` if
* the member is not found or the set is empty. Throws an exception if
* the key refers to a structure that is not a sorted set.
*/
def zIncrBy(key: Buf, amount: JDouble, member: Buf): Future[Option[JDouble]] =
doRequest(ZIncrBy(key, amount, member)) {
case BulkReply(message) =>
Future.value(Some(NumberFormat.toDouble(BufToString(message))))
case EmptyBulkReply => Future.None
}
/**
* Gets the rank of the `member` in the sorted set at the `key`, or `None`
* if it doesn't exist.
*/
def zRank(key: Buf, member: Buf): Future[Option[JLong]] =
doRequest(ZRank(key, member)) {
case IntegerReply(n) => Future.value(Some(n))
case EmptyBulkReply => Future.None
}
/**
* Removes members from sorted set at the `key` by sort order,
* from `start` to `stop`, inclusive.
*
* @return The number of members removed from sorted set.
*/
def zRemRangeByRank(key: Buf, start: JLong, stop: JLong): Future[JLong] =
doRequest(ZRemRangeByRank(key, start, stop)) {
case IntegerReply(n) => Future.value(n)
}
/**
* Removes members from sorted set at the `key` by score, from
* `min` to `max`, inclusive.
*
* @return The number of members removed from sorted set.
*/
def zRemRangeByScore(key: Buf, min: ZInterval, max: ZInterval): Future[JLong] =
doRequest(ZRemRangeByScore(key, min, max)) {
case IntegerReply(n) => Future.value(n)
}
/**
* Returns specified range (from `start` to `stop`) of elements in
* sorted set at the `key`. Elements are ordered from lowest to
* highest score.
*/
def zRange(
key: Buf,
start: JLong,
stop: JLong,
withScores: JBoolean
): Future[Either[ZRangeResults, Seq[Buf]]] =
doRequest(ZRange(key, start, stop, if (withScores) Some(WithScores) else None)) {
parseMBulkReply(withScores)
}
}
| adriancole/finagle | finagle-redis/src/main/scala/com/twitter/finagle/redis/SortedSetCommands.scala | Scala | apache-2.0 | 6,535 |
package org.spiffy.sample.validation
import org.spiffy.http.SpiffyRequestWrapper
import org.spiffy.validation._
object uniqNickName extends SpiffyValidator {
def apply(args:SpiffyValidatorArgs) : Option[String] = {
try {
// TODO: implement this, look up nick name in the database
val nick = args.req.getParameter(args.field)
None
} catch {
case e:Exception => {
Some(args.field + ": unknown error")
}
}
}
}
| mardambey/spiffy | src/main/scala/org/spiffy/sample/validation/Validation.scala | Scala | lgpl-2.1 | 461 |
package lampetia.meta.feature.sql
import lampetia.meta.PropertyType
/**
* @author Hossam Karim
*/
trait SqlTypes {
def name(propertyType: PropertyType[_]): String
}
| hkarim/lampetia | lampetia-model/src/main/scala/lampetia/meta/feature/sql/SqlTypes.scala | Scala | mit | 173 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.domain
import play.api.libs.json.{Reads, Writes}
case class AtedUtr(utr: String) extends TaxIdentifier with SimpleName {
require(AtedUtr.isValid(utr))
override lazy val toString = utr
val name = "atedutr"
def value = utr
}
object AtedUtr extends CheckCharacter {
implicit val atedUtrWrite: Writes[AtedUtr] = new SimpleObjectWrites[AtedUtr](_.value)
implicit val atedUtrRead: Reads[AtedUtr] = new SimpleObjectReads[AtedUtr]("utr", AtedUtr.apply)
private val validFormat = "^[Xx][a-zA-Z]\\\\d{2}00000\\\\d{6}$"
def isValid(utr: String) = !utr.isEmpty && utr.matches(validFormat) && isCheckCorrect(utr.toUpperCase, 1, 2)
}
| howyp/domain | src/main/scala/uk/gov/hmrc/domain/AtedUtr.scala | Scala | apache-2.0 | 1,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.lang.{Boolean => JBoolean, Double => JDouble, Float => JFloat, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import java.sql.{Date, Timestamp}
import java.time.{Duration, Instant, LocalDate, Period}
import java.util.Locale
import scala.collection.JavaConverters.asScalaBufferConverter
import org.apache.parquet.column.statistics.{Statistics => ParquetStatistics}
import org.apache.parquet.filter2.predicate._
import org.apache.parquet.filter2.predicate.SparkFilterApi._
import org.apache.parquet.io.api.Binary
import org.apache.parquet.schema.{GroupType, LogicalTypeAnnotation, MessageType, PrimitiveComparator, PrimitiveType, Type}
import org.apache.parquet.schema.LogicalTypeAnnotation.{DecimalLogicalTypeAnnotation, TimeUnit}
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName._
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils, IntervalUtils}
import org.apache.spark.sql.catalyst.util.RebaseDateTime.{rebaseGregorianToJulianDays, rebaseGregorianToJulianMicros}
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.sources
import org.apache.spark.unsafe.types.UTF8String
/**
* Some utility function to convert Spark data source filters to Parquet filters.
*/
class ParquetFilters(
schema: MessageType,
pushDownDate: Boolean,
pushDownTimestamp: Boolean,
pushDownDecimal: Boolean,
pushDownStartWith: Boolean,
pushDownInFilterThreshold: Int,
caseSensitive: Boolean,
datetimeRebaseMode: LegacyBehaviorPolicy.Value) {
// A map which contains parquet field name and data type, if predicate push down applies.
//
// Each key in `nameToParquetField` represents a column; `dots` are used as separators for
// nested columns. If any part of the names contains `dots`, it is quoted to avoid confusion.
// See `org.apache.spark.sql.connector.catalog.quote` for implementation details.
private val nameToParquetField : Map[String, ParquetPrimitiveField] = {
// Recursively traverse the parquet schema to get primitive fields that can be pushed-down.
// `parentFieldNames` is used to keep track of the current nested level when traversing.
def getPrimitiveFields(
fields: Seq[Type],
parentFieldNames: Array[String] = Array.empty): Seq[ParquetPrimitiveField] = {
fields.flatMap {
case p: PrimitiveType =>
Some(ParquetPrimitiveField(fieldNames = parentFieldNames :+ p.getName,
fieldType = ParquetSchemaType(p.getLogicalTypeAnnotation,
p.getPrimitiveTypeName, p.getTypeLength)))
// Note that when g is a `Struct`, `g.getOriginalType` is `null`.
// When g is a `Map`, `g.getOriginalType` is `MAP`.
// When g is a `List`, `g.getOriginalType` is `LIST`.
case g: GroupType if g.getOriginalType == null =>
getPrimitiveFields(g.getFields.asScala.toSeq, parentFieldNames :+ g.getName)
// Parquet only supports push-down for primitive types; as a result, Map and List types
// are removed.
case _ => None
}
}
val primitiveFields = getPrimitiveFields(schema.getFields.asScala.toSeq).map { field =>
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper
(field.fieldNames.toSeq.quoted, field)
}
if (caseSensitive) {
primitiveFields.toMap
} else {
// Don't consider ambiguity here, i.e. more than one field is matched in case insensitive
// mode, just skip pushdown for these fields, they will trigger Exception when reading,
// See: SPARK-25132.
val dedupPrimitiveFields =
primitiveFields
.groupBy(_._1.toLowerCase(Locale.ROOT))
.filter(_._2.size == 1)
.mapValues(_.head._2)
CaseInsensitiveMap(dedupPrimitiveFields.toMap)
}
}
/**
* Holds a single primitive field information stored in the underlying parquet file.
*
* @param fieldNames a field name as an array of string multi-identifier in parquet file
* @param fieldType field type related info in parquet file
*/
private case class ParquetPrimitiveField(
fieldNames: Array[String],
fieldType: ParquetSchemaType)
private case class ParquetSchemaType(
logicalTypeAnnotation: LogicalTypeAnnotation,
primitiveTypeName: PrimitiveTypeName,
length: Int)
private val ParquetBooleanType = ParquetSchemaType(null, BOOLEAN, 0)
private val ParquetByteType =
ParquetSchemaType(LogicalTypeAnnotation.intType(8, true), INT32, 0)
private val ParquetShortType =
ParquetSchemaType(LogicalTypeAnnotation.intType(16, true), INT32, 0)
private val ParquetIntegerType = ParquetSchemaType(null, INT32, 0)
private val ParquetLongType = ParquetSchemaType(null, INT64, 0)
private val ParquetFloatType = ParquetSchemaType(null, FLOAT, 0)
private val ParquetDoubleType = ParquetSchemaType(null, DOUBLE, 0)
private val ParquetStringType =
ParquetSchemaType(LogicalTypeAnnotation.stringType(), BINARY, 0)
private val ParquetBinaryType = ParquetSchemaType(null, BINARY, 0)
private val ParquetDateType =
ParquetSchemaType(LogicalTypeAnnotation.dateType(), INT32, 0)
private val ParquetTimestampMicrosType =
ParquetSchemaType(LogicalTypeAnnotation.timestampType(true, TimeUnit.MICROS), INT64, 0)
private val ParquetTimestampMillisType =
ParquetSchemaType(LogicalTypeAnnotation.timestampType(true, TimeUnit.MILLIS), INT64, 0)
private def dateToDays(date: Any): Int = {
val gregorianDays = date match {
case d: Date => DateTimeUtils.fromJavaDate(d)
case ld: LocalDate => DateTimeUtils.localDateToDays(ld)
}
datetimeRebaseMode match {
case LegacyBehaviorPolicy.LEGACY => rebaseGregorianToJulianDays(gregorianDays)
case _ => gregorianDays
}
}
private def timestampToMicros(v: Any): JLong = {
val gregorianMicros = v match {
case i: Instant => DateTimeUtils.instantToMicros(i)
case t: Timestamp => DateTimeUtils.fromJavaTimestamp(t)
}
datetimeRebaseMode match {
case LegacyBehaviorPolicy.LEGACY => rebaseGregorianToJulianMicros(gregorianMicros)
case _ => gregorianMicros
}
}
private def decimalToInt32(decimal: JBigDecimal): Integer = decimal.unscaledValue().intValue()
private def decimalToInt64(decimal: JBigDecimal): JLong = decimal.unscaledValue().longValue()
private def decimalToByteArray(decimal: JBigDecimal, numBytes: Int): Binary = {
val decimalBuffer = new Array[Byte](numBytes)
val bytes = decimal.unscaledValue().toByteArray
val fixedLengthBytes = if (bytes.length == numBytes) {
bytes
} else {
val signByte = if (bytes.head < 0) -1: Byte else 0: Byte
java.util.Arrays.fill(decimalBuffer, 0, numBytes - bytes.length, signByte)
System.arraycopy(bytes, 0, decimalBuffer, numBytes - bytes.length, bytes.length)
decimalBuffer
}
Binary.fromConstantByteArray(fixedLengthBytes, 0, numBytes)
}
private def timestampToMillis(v: Any): JLong = {
val micros = timestampToMicros(v)
val millis = DateTimeUtils.microsToMillis(micros)
millis.asInstanceOf[JLong]
}
private def toIntValue(v: Any): Integer = {
Option(v).map {
case p: Period => IntervalUtils.periodToMonths(p)
case n => n.asInstanceOf[Number].intValue
}.map(_.asInstanceOf[Integer]).orNull
}
private def toLongValue(v: Any): JLong = v match {
case d: Duration => IntervalUtils.durationToMicros(d)
case l => l.asInstanceOf[JLong]
}
private val makeEq:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetBooleanType =>
(n: Array[String], v: Any) => FilterApi.eq(booleanColumn(n), v.asInstanceOf[JBoolean])
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.eq(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.eq(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.eq(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.eq(doubleColumn(n), v.asInstanceOf[JDouble])
// Binary.fromString and Binary.fromByteArray don't accept null values
case ParquetStringType =>
(n: Array[String], v: Any) => FilterApi.eq(
binaryColumn(n),
Option(v).map(s => Binary.fromString(s.asInstanceOf[String])).orNull)
case ParquetBinaryType =>
(n: Array[String], v: Any) => FilterApi.eq(
binaryColumn(n),
Option(v).map(b => Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]])).orNull)
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) => FilterApi.eq(
intColumn(n),
Option(v).map(date => dateToDays(date).asInstanceOf[Integer]).orNull)
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.eq(
longColumn(n),
Option(v).map(timestampToMicros).orNull)
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.eq(
longColumn(n),
Option(v).map(timestampToMillis).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.eq(
intColumn(n),
Option(v).map(d => decimalToInt32(d.asInstanceOf[JBigDecimal])).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.eq(
longColumn(n),
Option(v).map(d => decimalToInt64(d.asInstanceOf[JBigDecimal])).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.eq(
binaryColumn(n),
Option(v).map(d => decimalToByteArray(d.asInstanceOf[JBigDecimal], length)).orNull)
}
private val makeNotEq:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetBooleanType =>
(n: Array[String], v: Any) => FilterApi.notEq(booleanColumn(n), v.asInstanceOf[JBoolean])
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.notEq(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.notEq(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.notEq(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.notEq(doubleColumn(n), v.asInstanceOf[JDouble])
case ParquetStringType =>
(n: Array[String], v: Any) => FilterApi.notEq(
binaryColumn(n),
Option(v).map(s => Binary.fromString(s.asInstanceOf[String])).orNull)
case ParquetBinaryType =>
(n: Array[String], v: Any) => FilterApi.notEq(
binaryColumn(n),
Option(v).map(b => Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]])).orNull)
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) => FilterApi.notEq(
intColumn(n),
Option(v).map(date => dateToDays(date).asInstanceOf[Integer]).orNull)
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.notEq(
longColumn(n),
Option(v).map(timestampToMicros).orNull)
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.notEq(
longColumn(n),
Option(v).map(timestampToMillis).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.notEq(
intColumn(n),
Option(v).map(d => decimalToInt32(d.asInstanceOf[JBigDecimal])).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.notEq(
longColumn(n),
Option(v).map(d => decimalToInt64(d.asInstanceOf[JBigDecimal])).orNull)
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) => FilterApi.notEq(
binaryColumn(n),
Option(v).map(d => decimalToByteArray(d.asInstanceOf[JBigDecimal], length)).orNull)
}
private val makeLt:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.lt(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.lt(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.lt(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.lt(doubleColumn(n), v.asInstanceOf[JDouble])
case ParquetStringType =>
(n: Array[String], v: Any) =>
FilterApi.lt(binaryColumn(n), Binary.fromString(v.asInstanceOf[String]))
case ParquetBinaryType =>
(n: Array[String], v: Any) =>
FilterApi.lt(binaryColumn(n), Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]]))
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) =>
FilterApi.lt(intColumn(n), dateToDays(v).asInstanceOf[Integer])
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.lt(longColumn(n), timestampToMicros(v))
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.lt(longColumn(n), timestampToMillis(v))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.lt(intColumn(n), decimalToInt32(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.lt(longColumn(n), decimalToInt64(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.lt(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
}
private val makeLtEq:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.ltEq(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.ltEq(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.ltEq(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.ltEq(doubleColumn(n), v.asInstanceOf[JDouble])
case ParquetStringType =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(binaryColumn(n), Binary.fromString(v.asInstanceOf[String]))
case ParquetBinaryType =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(binaryColumn(n), Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]]))
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(intColumn(n), dateToDays(v).asInstanceOf[Integer])
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.ltEq(longColumn(n), timestampToMicros(v))
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.ltEq(longColumn(n), timestampToMillis(v))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(intColumn(n), decimalToInt32(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(longColumn(n), decimalToInt64(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.ltEq(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
}
private val makeGt:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.gt(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.gt(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.gt(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.gt(doubleColumn(n), v.asInstanceOf[JDouble])
case ParquetStringType =>
(n: Array[String], v: Any) =>
FilterApi.gt(binaryColumn(n), Binary.fromString(v.asInstanceOf[String]))
case ParquetBinaryType =>
(n: Array[String], v: Any) =>
FilterApi.gt(binaryColumn(n), Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]]))
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) =>
FilterApi.gt(intColumn(n), dateToDays(v).asInstanceOf[Integer])
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.gt(longColumn(n), timestampToMicros(v))
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.gt(longColumn(n), timestampToMillis(v))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gt(intColumn(n), decimalToInt32(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gt(longColumn(n), decimalToInt64(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gt(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
}
private val makeGtEq:
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Any) => FilterApi.gtEq(intColumn(n), toIntValue(v))
case ParquetLongType =>
(n: Array[String], v: Any) => FilterApi.gtEq(longColumn(n), toLongValue(v))
case ParquetFloatType =>
(n: Array[String], v: Any) => FilterApi.gtEq(floatColumn(n), v.asInstanceOf[JFloat])
case ParquetDoubleType =>
(n: Array[String], v: Any) => FilterApi.gtEq(doubleColumn(n), v.asInstanceOf[JDouble])
case ParquetStringType =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(binaryColumn(n), Binary.fromString(v.asInstanceOf[String]))
case ParquetBinaryType =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(binaryColumn(n), Binary.fromReusedByteArray(v.asInstanceOf[Array[Byte]]))
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(intColumn(n), dateToDays(v).asInstanceOf[Integer])
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.gtEq(longColumn(n), timestampToMicros(v))
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Any) => FilterApi.gtEq(longColumn(n), timestampToMillis(v))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(intColumn(n), decimalToInt32(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(longColumn(n), decimalToInt64(v.asInstanceOf[JBigDecimal]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(n: Array[String], v: Any) =>
FilterApi.gtEq(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
}
private val makeInPredicate:
PartialFunction[ParquetSchemaType,
(Array[String], Array[Any], ParquetStatistics[_]) => FilterPredicate] = {
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(toIntValue(_).toInt).foreach(statistics.updateStats)
FilterApi.and(
FilterApi.gtEq(intColumn(n), statistics.genericGetMin().asInstanceOf[Integer]),
FilterApi.ltEq(intColumn(n), statistics.genericGetMax().asInstanceOf[Integer]))
case ParquetLongType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(toLongValue).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(longColumn(n), statistics.genericGetMin().asInstanceOf[JLong]),
FilterApi.ltEq(longColumn(n), statistics.genericGetMax().asInstanceOf[JLong]))
case ParquetFloatType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(_.asInstanceOf[JFloat]).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(floatColumn(n), statistics.genericGetMin().asInstanceOf[JFloat]),
FilterApi.ltEq(floatColumn(n), statistics.genericGetMax().asInstanceOf[JFloat]))
case ParquetDoubleType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(_.asInstanceOf[JDouble]).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(doubleColumn(n), statistics.genericGetMin().asInstanceOf[JDouble]),
FilterApi.ltEq(doubleColumn(n), statistics.genericGetMax().asInstanceOf[JDouble]))
case ParquetStringType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(s => Binary.fromString(s.asInstanceOf[String])).foreach(statistics.updateStats)
FilterApi.and(
FilterApi.gtEq(binaryColumn(n), statistics.genericGetMin().asInstanceOf[Binary]),
FilterApi.ltEq(binaryColumn(n), statistics.genericGetMax().asInstanceOf[Binary]))
case ParquetBinaryType =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(b => Binary.fromReusedByteArray(b.asInstanceOf[Array[Byte]]))
.foreach(statistics.updateStats)
FilterApi.and(
FilterApi.gtEq(binaryColumn(n), statistics.genericGetMin().asInstanceOf[Binary]),
FilterApi.ltEq(binaryColumn(n), statistics.genericGetMax().asInstanceOf[Binary]))
case ParquetDateType if pushDownDate =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(dateToDays).map(_.asInstanceOf[Integer]).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(intColumn(n), statistics.genericGetMin().asInstanceOf[Integer]),
FilterApi.ltEq(intColumn(n), statistics.genericGetMax().asInstanceOf[Integer]))
case ParquetTimestampMicrosType if pushDownTimestamp =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(timestampToMicros).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(longColumn(n), statistics.genericGetMin().asInstanceOf[JLong]),
FilterApi.ltEq(longColumn(n), statistics.genericGetMax().asInstanceOf[JLong]))
case ParquetTimestampMillisType if pushDownTimestamp =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(timestampToMillis).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(longColumn(n), statistics.genericGetMin().asInstanceOf[JLong]),
FilterApi.ltEq(longColumn(n), statistics.genericGetMax().asInstanceOf[JLong]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT32, _) if pushDownDecimal =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(_.asInstanceOf[JBigDecimal]).map(decimalToInt32).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(intColumn(n), statistics.genericGetMin().asInstanceOf[Integer]),
FilterApi.ltEq(intColumn(n), statistics.genericGetMax().asInstanceOf[Integer]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, INT64, _) if pushDownDecimal =>
(n: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(_.asInstanceOf[JBigDecimal]).map(decimalToInt64).foreach(statistics.updateStats(_))
FilterApi.and(
FilterApi.gtEq(longColumn(n), statistics.genericGetMin().asInstanceOf[JLong]),
FilterApi.ltEq(longColumn(n), statistics.genericGetMax().asInstanceOf[JLong]))
case ParquetSchemaType(_: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, length)
if pushDownDecimal =>
(path: Array[String], v: Array[Any], statistics: ParquetStatistics[_]) =>
v.map(d => decimalToByteArray(d.asInstanceOf[JBigDecimal], length))
.foreach(statistics.updateStats)
FilterApi.and(
FilterApi.gtEq(binaryColumn(path), statistics.genericGetMin().asInstanceOf[Binary]),
FilterApi.ltEq(binaryColumn(path), statistics.genericGetMax().asInstanceOf[Binary]))
}
// Returns filters that can be pushed down when reading Parquet files.
def convertibleFilters(filters: Seq[sources.Filter]): Seq[sources.Filter] = {
filters.flatMap(convertibleFiltersHelper(_, canPartialPushDown = true))
}
private def convertibleFiltersHelper(
predicate: sources.Filter,
canPartialPushDown: Boolean): Option[sources.Filter] = {
predicate match {
case sources.And(left, right) =>
val leftResultOptional = convertibleFiltersHelper(left, canPartialPushDown)
val rightResultOptional = convertibleFiltersHelper(right, canPartialPushDown)
(leftResultOptional, rightResultOptional) match {
case (Some(leftResult), Some(rightResult)) => Some(sources.And(leftResult, rightResult))
case (Some(leftResult), None) if canPartialPushDown => Some(leftResult)
case (None, Some(rightResult)) if canPartialPushDown => Some(rightResult)
case _ => None
}
case sources.Or(left, right) =>
val leftResultOptional = convertibleFiltersHelper(left, canPartialPushDown)
val rightResultOptional = convertibleFiltersHelper(right, canPartialPushDown)
if (leftResultOptional.isEmpty || rightResultOptional.isEmpty) {
None
} else {
Some(sources.Or(leftResultOptional.get, rightResultOptional.get))
}
case sources.Not(pred) =>
val resultOptional = convertibleFiltersHelper(pred, canPartialPushDown = false)
resultOptional.map(sources.Not)
case other =>
if (createFilter(other).isDefined) {
Some(other)
} else {
None
}
}
}
/**
* Converts data sources filters to Parquet filter predicates.
*/
def createFilter(predicate: sources.Filter): Option[FilterPredicate] = {
createFilterHelper(predicate, canPartialPushDownConjuncts = true)
}
// Parquet's type in the given file should be matched to the value's type
// in the pushed filter in order to push down the filter to Parquet.
private def valueCanMakeFilterOn(name: String, value: Any): Boolean = {
value == null || (nameToParquetField(name).fieldType match {
case ParquetBooleanType => value.isInstanceOf[JBoolean]
case ParquetIntegerType if value.isInstanceOf[Period] => true
case ParquetByteType | ParquetShortType | ParquetIntegerType => value.isInstanceOf[Number]
case ParquetLongType => value.isInstanceOf[JLong] || value.isInstanceOf[Duration]
case ParquetFloatType => value.isInstanceOf[JFloat]
case ParquetDoubleType => value.isInstanceOf[JDouble]
case ParquetStringType => value.isInstanceOf[String]
case ParquetBinaryType => value.isInstanceOf[Array[Byte]]
case ParquetDateType =>
value.isInstanceOf[Date] || value.isInstanceOf[LocalDate]
case ParquetTimestampMicrosType | ParquetTimestampMillisType =>
value.isInstanceOf[Timestamp] || value.isInstanceOf[Instant]
case ParquetSchemaType(decimalType: DecimalLogicalTypeAnnotation, INT32, _) =>
isDecimalMatched(value, decimalType)
case ParquetSchemaType(decimalType: DecimalLogicalTypeAnnotation, INT64, _) =>
isDecimalMatched(value, decimalType)
case
ParquetSchemaType(decimalType: DecimalLogicalTypeAnnotation, FIXED_LEN_BYTE_ARRAY, _) =>
isDecimalMatched(value, decimalType)
case _ => false
})
}
// Decimal type must make sure that filter value's scale matched the file.
// If doesn't matched, which would cause data corruption.
private def isDecimalMatched(value: Any,
decimalLogicalType: DecimalLogicalTypeAnnotation): Boolean = value match {
case decimal: JBigDecimal =>
decimal.scale == decimalLogicalType.getScale
case _ => false
}
private def canMakeFilterOn(name: String, value: Any): Boolean = {
nameToParquetField.contains(name) && valueCanMakeFilterOn(name, value)
}
/**
* @param predicate the input filter predicates. Not all the predicates can be pushed down.
* @param canPartialPushDownConjuncts whether a subset of conjuncts of predicates can be pushed
* down safely. Pushing ONLY one side of AND down is safe to
* do at the top level or none of its ancestors is NOT and OR.
* @return the Parquet-native filter predicates that are eligible for pushdown.
*/
private def createFilterHelper(
predicate: sources.Filter,
canPartialPushDownConjuncts: Boolean): Option[FilterPredicate] = {
// NOTE:
//
// For any comparison operator `cmp`, both `a cmp NULL` and `NULL cmp a` evaluate to `NULL`,
// which can be casted to `false` implicitly. Please refer to the `eval` method of these
// operators and the `PruneFilters` rule for details.
// Hyukjin:
// I added [[EqualNullSafe]] with [[org.apache.parquet.filter2.predicate.Operators.Eq]].
// So, it performs equality comparison identically when given [[sources.Filter]] is [[EqualTo]].
// The reason why I did this is, that the actual Parquet filter checks null-safe equality
// comparison.
// So I added this and maybe [[EqualTo]] should be changed. It still seems fine though, because
// physical planning does not set `NULL` to [[EqualTo]] but changes it to [[IsNull]] and etc.
// Probably I missed something and obviously this should be changed.
predicate match {
case sources.IsNull(name) if canMakeFilterOn(name, null) =>
makeEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, null))
case sources.IsNotNull(name) if canMakeFilterOn(name, null) =>
makeNotEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, null))
case sources.EqualTo(name, value) if canMakeFilterOn(name, value) =>
makeEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.Not(sources.EqualTo(name, value)) if canMakeFilterOn(name, value) =>
makeNotEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.EqualNullSafe(name, value) if canMakeFilterOn(name, value) =>
makeEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.Not(sources.EqualNullSafe(name, value)) if canMakeFilterOn(name, value) =>
makeNotEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.LessThan(name, value) if canMakeFilterOn(name, value) =>
makeLt.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.LessThanOrEqual(name, value) if canMakeFilterOn(name, value) =>
makeLtEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.GreaterThan(name, value) if canMakeFilterOn(name, value) =>
makeGt.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.GreaterThanOrEqual(name, value) if canMakeFilterOn(name, value) =>
makeGtEq.lift(nameToParquetField(name).fieldType)
.map(_(nameToParquetField(name).fieldNames, value))
case sources.And(lhs, rhs) =>
// At here, it is not safe to just convert one side and remove the other side
// if we do not understand what the parent filters are.
//
// Here is an example used to explain the reason.
// Let's say we have NOT(a = 2 AND b in ('1')) and we do not understand how to
// convert b in ('1'). If we only convert a = 2, we will end up with a filter
// NOT(a = 2), which will generate wrong results.
//
// Pushing one side of AND down is only safe to do at the top level or in the child
// AND before hitting NOT or OR conditions, and in this case, the unsupported predicate
// can be safely removed.
val lhsFilterOption =
createFilterHelper(lhs, canPartialPushDownConjuncts)
val rhsFilterOption =
createFilterHelper(rhs, canPartialPushDownConjuncts)
(lhsFilterOption, rhsFilterOption) match {
case (Some(lhsFilter), Some(rhsFilter)) => Some(FilterApi.and(lhsFilter, rhsFilter))
case (Some(lhsFilter), None) if canPartialPushDownConjuncts => Some(lhsFilter)
case (None, Some(rhsFilter)) if canPartialPushDownConjuncts => Some(rhsFilter)
case _ => None
}
case sources.Or(lhs, rhs) =>
// The Or predicate is convertible when both of its children can be pushed down.
// That is to say, if one/both of the children can be partially pushed down, the Or
// predicate can be partially pushed down as well.
//
// Here is an example used to explain the reason.
// Let's say we have
// (a1 AND a2) OR (b1 AND b2),
// a1 and b1 is convertible, while a2 and b2 is not.
// The predicate can be converted as
// (a1 OR b1) AND (a1 OR b2) AND (a2 OR b1) AND (a2 OR b2)
// As per the logical in And predicate, we can push down (a1 OR b1).
for {
lhsFilter <- createFilterHelper(lhs, canPartialPushDownConjuncts)
rhsFilter <- createFilterHelper(rhs, canPartialPushDownConjuncts)
} yield FilterApi.or(lhsFilter, rhsFilter)
case sources.Not(pred) =>
createFilterHelper(pred, canPartialPushDownConjuncts = false)
.map(FilterApi.not)
case sources.In(name, values) if pushDownInFilterThreshold > 0 && values.nonEmpty &&
canMakeFilterOn(name, values.head) =>
val fieldType = nameToParquetField(name).fieldType
val fieldNames = nameToParquetField(name).fieldNames
if (values.length <= pushDownInFilterThreshold) {
values.distinct.flatMap { v =>
makeEq.lift(fieldType).map(_(fieldNames, v))
}.reduceLeftOption(FilterApi.or)
} else if (canPartialPushDownConjuncts) {
val primitiveType = schema.getColumnDescription(fieldNames).getPrimitiveType
val statistics: ParquetStatistics[_] = ParquetStatistics.createStats(primitiveType)
if (values.contains(null)) {
Seq(makeEq.lift(fieldType).map(_(fieldNames, null)),
makeInPredicate.lift(fieldType)
.map(_(fieldNames, values.filter(_ != null), statistics))
).flatten.reduceLeftOption(FilterApi.or)
} else {
makeInPredicate.lift(fieldType).map(_(fieldNames, values, statistics))
}
} else {
None
}
case sources.StringStartsWith(name, prefix)
if pushDownStartWith && canMakeFilterOn(name, prefix) =>
Option(prefix).map { v =>
FilterApi.userDefined(binaryColumn(nameToParquetField(name).fieldNames),
new UserDefinedPredicate[Binary] with Serializable {
private val strToBinary = Binary.fromReusedByteArray(v.getBytes)
private val size = strToBinary.length
override def canDrop(statistics: Statistics[Binary]): Boolean = {
val comparator = PrimitiveComparator.UNSIGNED_LEXICOGRAPHICAL_BINARY_COMPARATOR
val max = statistics.getMax
val min = statistics.getMin
comparator.compare(max.slice(0, math.min(size, max.length)), strToBinary) < 0 ||
comparator.compare(min.slice(0, math.min(size, min.length)), strToBinary) > 0
}
override def inverseCanDrop(statistics: Statistics[Binary]): Boolean = {
val comparator = PrimitiveComparator.UNSIGNED_LEXICOGRAPHICAL_BINARY_COMPARATOR
val max = statistics.getMax
val min = statistics.getMin
comparator.compare(max.slice(0, math.min(size, max.length)), strToBinary) == 0 &&
comparator.compare(min.slice(0, math.min(size, min.length)), strToBinary) == 0
}
override def keep(value: Binary): Boolean = {
value != null && UTF8String.fromBytes(value.getBytes).startsWith(
UTF8String.fromBytes(strToBinary.getBytes))
}
}
)
}
case _ => None
}
}
}
| nchammas/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilters.scala | Scala | apache-2.0 | 39,407 |
package com.github.agourlay.cornichon.steps.check
import com.github.agourlay.cornichon.core._
import com.github.agourlay.cornichon.steps.cats.EffectStep
import com.github.agourlay.cornichon.steps.check.checkModel._
import com.github.agourlay.cornichon.testHelpers.CommonTestSuite
import munit.FunSuite
class CheckModelStepSpec extends FunSuite with CommonTestSuite {
def dummyProperty1(name: String, preNeverValid: Boolean = false, step: Step = identityEffectStep, callGen: Boolean = false): PropertyN[Int, NoValue, NoValue, NoValue, NoValue, NoValue] =
Property1(
description = name,
preCondition = if (preNeverValid) neverValidAssertStep else identityEffectStep,
invariant = g => if (callGen) { g(); step } else step)
test("detect empty transition for starting property") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property")
val transitions = Map(otherAction -> ((100, starting) :: Nil))
val model = Model("model with empty transition for starting property", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
scenarioFailsWithMessage(res) {
"""Scenario 'scenario with checkStep' failed:
|
|at step:
|Checking model 'model with empty transition for starting property' with maxNumberOfRuns=10 and maxNumberOfTransitions=10
|
|with error(s):
|No outgoing transitions definition found for starting property 'starting property'
|
|seed for the run was '1'
|""".stripMargin
}
}
test("detect duplicate transition to target") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property")
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((80, starting) :: (20, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
scenarioFailsWithMessage(res) {
"""Scenario 'scenario with checkStep' failed:
|
|at step:
|Checking model 'model with empty transition for starting' with maxNumberOfRuns=10 and maxNumberOfTransitions=10
|
|with error(s):
|Transitions definition from 'other property' contains duplicates target properties
|
|seed for the run was '1'
|""".stripMargin
}
}
test("detect incorrect weigh definition") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property")
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((101, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
scenarioFailsWithMessage(res) {
"""Scenario 'scenario with checkStep' failed:
|
|at step:
|Checking model 'model with empty transition for starting' with maxNumberOfRuns=10 and maxNumberOfTransitions=10
|
|with error(s):
|Transitions definition from 'other property' contains incorrect weight definition (less or above 100)
|
|seed for the run was '1'
|""".stripMargin
}
}
test("always terminates with maxNumberOfRuns") {
val maxRun = 100
var uglyCounter = 0
val incrementEffect: Step = EffectStep.fromSync("identity", sc => { uglyCounter = uglyCounter + 1; sc.session })
val starting = dummyProperty1("starting property", step = incrementEffect)
val otherAction = dummyProperty1("other property")
val transitions = Map(starting -> ((100, otherAction) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = maxRun, 1, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
res match {
case f: SuccessScenarioReport =>
assert(f.isSuccess)
assert(uglyCounter == maxRun)
case _ =>
assert(cond = false, s"expected SuccessScenarioReport but got $res")
}
}
test("always terminates with maxNumberOfTransitions (even with cyclic model)") {
val maxTransition = 100
var uglyCounter = 0
val incrementEffect: Step = EffectStep.fromSync("identity", sc => { uglyCounter = uglyCounter + 1; sc.session })
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property", step = incrementEffect)
val otherActionTwo = dummyProperty1("other property two ", step = incrementEffect)
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((100, otherActionTwo) :: Nil),
otherActionTwo -> ((100, otherAction) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = 1, maxTransition, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
res match {
case f: SuccessScenarioReport =>
assert(f.isSuccess)
assert(uglyCounter == maxTransition)
case _ =>
assert(cond = false, s"expected SuccessScenarioReport but got $res")
}
}
test("report a failure when an action explodes") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property", step = brokenEffectStep)
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((100, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = 10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
scenarioFailsWithMessage(res) {
"""Scenario 'scenario with checkStep' failed:
|
|at step:
|always boom
|
|with error(s):
|boom!
|
|seed for the run was '1'
|""".stripMargin
}
}
test("report a failure when no precondition is valid") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property", preNeverValid = true)
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((100, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(integerGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = 10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
scenarioFailsWithMessage(res) {
"""Scenario 'scenario with checkStep' failed:
|
|at step:
|Checking model 'model with empty transition for starting' with maxNumberOfRuns=10 and maxNumberOfTransitions=10
|
|with error(s):
|No outgoing transition found from `starting property` to another property with valid pre-conditions
|
|seed for the run was '1'
|""".stripMargin
}
}
test("not using a generator should really not call it") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property")
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((100, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
// passing a broken gen but the actions are not calling it...should be good!
val modelRunner = ModelRunner.make(brokenIntGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = 10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
res match {
case f: SuccessScenarioReport =>
assert(f.isSuccess)
case _ =>
assert(cond = false, s"expected SuccessScenarioReport but got $res")
}
}
test("fails the test if the gen throws") {
val starting = dummyProperty1("starting property")
val otherAction = dummyProperty1("other property", callGen = true)
val transitions = Map(
starting -> ((100, otherAction) :: Nil),
otherAction -> ((100, starting) :: Nil))
val model = Model("model with empty transition for starting", starting, transitions)
val modelRunner = ModelRunner.make(brokenIntGen)(model)
val checkStep = CheckModelStep(maxNumberOfRuns = 10, 10, modelRunner)
val s = Scenario("scenario with checkStep", checkStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
res match {
case f: FailureScenarioReport =>
assert(!f.isSuccess)
case _ =>
assert(cond = false, s"expected SuccessScenarioReport but got $res")
}
}
}
| agourlay/cornichon | cornichon-core/src/test/scala/com/github/agourlay/cornichon/steps/check/CheckModelStepSpec.scala | Scala | apache-2.0 | 10,124 |
package com.realizationtime.btdogg.frontend
import java.nio.file.{Files, Path, Paths}
import com.typesafe.config.{Config, ConfigFactory}
import net.ceedubs.ficus.Ficus
import net.ceedubs.ficus.readers.ArbitraryTypeReader
trait FrontendServerConfig {
import Ficus._
import ArbitraryTypeReader._
val CustomConfigPath: Path = Paths.get(System.getProperty("user.dir"), "application.conf")
protected case class HttpConfig(interface: String, port: Int)
protected case class MongoConfig(uri: String)
private val rootConfig: Config =
(if (Files.exists(CustomConfigPath)) {
ConfigFactory.parseFile(CustomConfigPath.toFile)
.withFallback(ConfigFactory.load())
} else
ConfigFactory.load()
).getConfig("btdogg").getConfig("frontend")
protected val httpConfig: HttpConfig = rootConfig.as[HttpConfig]("http")
val mongoConfig: MongoConfig = rootConfig.as[MongoConfig]("mongo")
import net.ceedubs.ficus.readers.EnumerationReader._
val searchMode: SearchMode.Value = rootConfig.as[SearchMode.Value]("searchMode")
}
| bwrega/btdogg | frontend/server/src/main/scala/com/realizationtime/btdogg/frontend/FrontendServerConfig.scala | Scala | mit | 1,059 |
/**
* Copyright (c) 2015 Basho Technologies, Inc.
*
* This file is provided to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.basho.riak.spark.util
import com.basho.riak.client.core.query.timeseries.{ColumnDescription, Row}
import com.basho.riak.client.core.query.{Location, RiakObject}
import com.basho.riak.spark.query.TSDataQueryingIterator
import scala.reflect.ClassTag
class DataConvertingIterator[R, S](dataIterator: Iterator[S], convert: (S) => R)
(implicit ct: ClassTag[R]) extends Iterator[R] {
override def hasNext: Boolean = {
dataIterator.hasNext
}
override def next(): R = {
val v = dataIterator.next()
convert(v)
}
}
object DataConvertingIterator {
type KV_SOURCE_DATA = (Location, RiakObject)
type TS_SOURCE_DATA = (Seq[ColumnDescription], TSDataQueryingIterator)
def createRiakObjectConverting[R](kvIterator: Iterator[KV_SOURCE_DATA], convert: (Location, RiakObject) => R)
(implicit ct: ClassTag[R]): DataConvertingIterator[R, KV_SOURCE_DATA] =
new DataConvertingIterator[R, KV_SOURCE_DATA](kvIterator, new Function[KV_SOURCE_DATA, R] {
override def apply(v1: (Location, RiakObject)): R = {
convert(v1._1, v1._2)
}
})
def createTSConverting[R](tsdata: TS_SOURCE_DATA, convert: (Seq[ColumnDescription], Row) => R)
(implicit ct: ClassTag[R]): DataConvertingIterator[R, Row] =
new DataConvertingIterator[R, Row](tsdata._2, new Function[Row, R] {
override def apply(v1: Row): R = tsdata match {
case (Nil, iterator) => convert(iterator.columnDefs, v1)
case (cds, _) => convert(cds, v1)
}
})
} | basho/spark-riak-connector | connector/src/main/scala/com/basho/riak/spark/util/DataConvertingIterator.scala | Scala | apache-2.0 | 2,224 |
package sum
import scala.annotation.tailrec
/**
* Created by cganoo on 5/4/14.
*/
object Sum extends App {
def sumTR(f: Int => Int, a: Int, b: Int): Int = {
@tailrec
def loop(a: Int, acc: Int): Int = {
if(a > b) acc
else loop(a + 1, f(a) + acc)
}
loop(a, 0)
}
def sumNTR(f: Int => Int, a:Int, b:Int): Int = {
if(a > b) 0
else f(a) + sumNTR(f, a + 1, b)
}
}
| cganoo/scala-algos | src/main/scala/sum/Sum.scala | Scala | mit | 408 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools.data
import com.beust.jcommander.{IValueValidator, Parameter, ParameterException, Parameters}
import org.locationtech.geomesa.kudu.KuduSystemProperties
import org.locationtech.geomesa.kudu.data.KuduDataStore
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams
import org.locationtech.geomesa.kudu.tools.data.KuduCreateSchemaCommand.KuduCreateSchemaParams
import org.locationtech.geomesa.kudu.utils.ColumnConfiguration
import org.locationtech.geomesa.tools.data.CreateSchemaCommand
import org.locationtech.geomesa.tools.data.CreateSchemaCommand.CreateSchemaParams
import org.opengis.feature.simple.SimpleFeatureType
import scala.util.control.NonFatal
class KuduCreateSchemaCommand extends CreateSchemaCommand[KuduDataStore] with KuduDataStoreCommand {
override val params = new KuduCreateSchemaParams()
override protected def setBackendSpecificOptions(featureType: SimpleFeatureType): Unit =
Option(params.compression).foreach(KuduSystemProperties.Compression.set)
}
object KuduCreateSchemaCommand {
@Parameters(commandDescription = "Create a GeoMesa feature type")
class KuduCreateSchemaParams extends CreateSchemaParams with KuduParams {
@Parameter(names = Array("--compression"),
description = "Default compression for data files. One of 'lz4', 'snappy', 'zlib' or 'no_compression'", required = false, validateValueWith = classOf[CompressionTypeValidator])
var compression: String = KuduSystemProperties.Compression.default
}
class CompressionTypeValidator extends IValueValidator[String] {
override def validate(name: String, value: String): Unit = {
try { ColumnConfiguration.compression(Option(value)) } catch {
case NonFatal(_) =>
throw new ParameterException("Invalid compression type. Values types are " +
"'lz4', 'snappy', 'zlib' or 'no_compression'")
}
}
}
}
| aheyne/geomesa | geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/data/KuduCreateSchemaCommand.scala | Scala | apache-2.0 | 2,457 |
package brainiak.samples.npuzzle.ui.controller
import javafx.scene.input.KeyEvent
import brainiak.samples.npuzzle.ui.Board
/**
* Created by thiago on 1/25/14.
*/
class HumanController(val b: Board) extends BasicController {
override def handleCommand(evt: KeyEvent) = {
if (evt.getCode.isArrowKey) {
val upDownVal = Math.sqrt(board.numHoax + 1).toInt
val directions = Map(16 -> 1, 17 -> upDownVal, 18 -> -1, 19 -> -upDownVal)
val target = directions(evt.getCode.ordinal())
move(target)
}
}
override def startAction() = {
board.controls.stoppedStatus()
board.requestFocus()
}
override def board = b
}
| pintowar/brainiak | brainiak-samples/src/main/scala/brainiak/samples/npuzzle/ui/controller/HumanController.scala | Scala | apache-2.0 | 661 |
package waldap.core.controller.admin
import io.github.gitbucket.scalatra.forms._
import waldap.core.controller.ControllerBase
import org.scalatra.{FlashMapSupport, Ok}
import org.slf4j.LoggerFactory
import waldap.core.service.LDAPAccountService
trait UserControllerBase extends ControllerBase with FlashMapSupport with LDAPAccountService {
private val logger = LoggerFactory.getLogger(getClass)
case class UserAddForm(
username: String,
password: String,
sn: String,
givenName: String,
displayName: String,
mail: String
)
val useraddform = mapping(
"username" -> text(required, maxlength(40)),
"password" -> text(required, maxlength(40)),
"sn" -> text(required, maxlength(40)),
"givenName" -> text(required, maxlength(40)),
"displayName" -> text(required, maxlength(40)),
"mail" -> text(required, maxlength(40))
)(UserAddForm.apply)
case class UserEditForm(sn: String, givenName: String, displayName: String, mail: String)
val usereditform = mapping(
"sn" -> text(required, maxlength(40)),
"givenName" -> text(required, maxlength(40)),
"displayName" -> text(required, maxlength(40)),
"mail" -> text(required, maxlength(40))
)(UserEditForm.apply)
case class PasswordForm(password: String)
val passwordform = mapping(
"password" -> trim(label("Password", text(required)))
)(PasswordForm.apply)
get("/admin/users") {
waldap.core.admin.user.html.userlist(GetLDAPUsers, GetLDAPGroups)
}
post("/admin/users/:name/edit", usereditform) { form =>
params.get("name").map { n =>
EditLDAPUser(n, form.givenName, form.sn, form.displayName, form.mail)
redirect("/admin/users")
} getOrElse (NotFound())
}
post("/admin/users/:name/password", passwordform) { form =>
params.get("name").map { n =>
ChangeLDAPUserPassword(n, form.password)
redirect("/admin/users")
} getOrElse (NotFound())
}
get("/admin/users/:name/delete") {
val name = params.get("name")
name
.map { n =>
DeleteLDAPUser(n)
redirect("/admin/users")
}
.getOrElse(NotFound())
}
post("/admin/users/add", useraddform) { form =>
AddLDAPUser(form.username, form.password, form.givenName, form.sn, form.displayName, form.mail)
redirect("/admin/users")
}
get("/admin/users/join/:user/:group") {
val userName = params.get("user").get
val groupName = params.get("group").get
println(s"join user:$userName group:$groupName")
JoinToLDAPGroup(userName, groupName)
redirect("/admin/users")
}
get("/admin/users/disjoin/:user/:group") {
val userName = params.get("user").get
val groupName = params.get("group").get
println(s"disjoin user:$userName group:$groupName")
DisjoinFromLDAPGroup(userName, groupName)
redirect("/admin/users")
}
}
class UserController extends UserControllerBase
| kounoike/waldap | src/main/scala/waldap/core/controller/admin/UserController.scala | Scala | apache-2.0 | 2,885 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.WeightedAvgWithMerge
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.{StreamTableTestUtil, TableTestBase}
import org.junit.Test
class GroupWindowTest extends TableTestBase {
private val streamUtil: StreamTableTestUtil = streamTestUtil()
private val table = streamUtil.addTable[(Int, String, Long)](
"MyTable", 'a, 'b, 'c, 'proctime.proctime, 'rowtime.rowtime)
@Test
def testTumbleFunction() = {
streamUtil.tableEnv.registerFunction("weightedAvg", new WeightedAvgWithMerge)
val sql =
"SELECT " +
" COUNT(*), weightedAvg(c, a) AS wAvg, " +
" TUMBLE_START(rowtime, INTERVAL '15' MINUTE), " +
" TUMBLE_END(rowtime, INTERVAL '15' MINUTE)" +
"FROM MyTable " +
"GROUP BY TUMBLE(rowtime, INTERVAL '15' MINUTE)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "rowtime", "c", "a")
),
term("window", "TumblingGroupWindow('w$, 'rowtime, 900000.millis)"),
term("select",
"COUNT(*) AS EXPR$0",
"weightedAvg(c, a) AS wAvg",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "EXPR$0", "wAvg", "w$start AS EXPR$2", "w$end AS EXPR$3")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testHoppingFunction() = {
streamUtil.tableEnv.registerFunction("weightedAvg", new WeightedAvgWithMerge)
val sql =
"SELECT COUNT(*), weightedAvg(c, a) AS wAvg, " +
" HOP_START(proctime, INTERVAL '15' MINUTE, INTERVAL '1' HOUR), " +
" HOP_END(proctime, INTERVAL '15' MINUTE, INTERVAL '1' HOUR) " +
"FROM MyTable " +
"GROUP BY HOP(proctime, INTERVAL '15' MINUTE, INTERVAL '1' HOUR)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "proctime", "c", "a")
),
term("window", "SlidingGroupWindow('w$, 'proctime, 3600000.millis, 900000.millis)"),
term("select",
"COUNT(*) AS EXPR$0",
"weightedAvg(c, a) AS wAvg",
"start('w$) AS w$start",
"end('w$) AS w$end",
"proctime('w$) AS w$proctime")
),
term("select", "EXPR$0", "wAvg", "w$start AS EXPR$2", "w$end AS EXPR$3")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testSessionFunction() = {
streamUtil.tableEnv.registerFunction("weightedAvg", new WeightedAvgWithMerge)
val sql =
"SELECT " +
" COUNT(*), weightedAvg(c, a) AS wAvg, " +
" SESSION_START(proctime, INTERVAL '15' MINUTE), " +
" SESSION_END(proctime, INTERVAL '15' MINUTE) " +
"FROM MyTable " +
"GROUP BY SESSION(proctime, INTERVAL '15' MINUTE)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "proctime", "c", "a")
),
term("window", "SessionGroupWindow('w$, 'proctime, 900000.millis)"),
term("select",
"COUNT(*) AS EXPR$0",
"weightedAvg(c, a) AS wAvg",
"start('w$) AS w$start",
"end('w$) AS w$end",
"proctime('w$) AS w$proctime")
),
term("select", "EXPR$0", "wAvg", "w$start AS EXPR$2", "w$end AS EXPR$3")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testExpressionOnWindowAuxFunction() = {
val sql =
"SELECT " +
" COUNT(*), " +
" TUMBLE_END(rowtime, INTERVAL '15' MINUTE) + INTERVAL '1' MINUTE " +
"FROM MyTable " +
"GROUP BY TUMBLE(rowtime, INTERVAL '15' MINUTE)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "rowtime")
),
term("window", "TumblingGroupWindow('w$, 'rowtime, 900000.millis)"),
term("select",
"COUNT(*) AS EXPR$0",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "EXPR$0", "+(w$end, 60000:INTERVAL MINUTE) AS EXPR$1")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testExpressionOnWindowHavingFunction() = {
val sql =
"SELECT " +
" COUNT(*), " +
" HOP_START(rowtime, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE) " +
"FROM MyTable " +
"GROUP BY HOP(rowtime, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE) " +
"HAVING " +
" SUM(a) > 0 AND " +
" QUARTER(HOP_START(rowtime, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE)) = 1"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "rowtime, a")
),
term("window", "SlidingGroupWindow('w$, 'rowtime, 60000.millis, 900000.millis)"),
term("select",
"COUNT(*) AS EXPR$0",
"SUM(a) AS $f1",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "EXPR$0", "w$start AS EXPR$1"),
term("where",
"AND(>($f1, 0), " +
"=(EXTRACT(FLAG(QUARTER), w$start), 1:BIGINT))")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testMultiWindowSqlWithAggregation() = {
val sql =
s"""SELECT
TUMBLE_ROWTIME(zzzzz, INTERVAL '0.004' SECOND),
TUMBLE_END(zzzzz, INTERVAL '0.004' SECOND),
COUNT(`a`) AS `a`
FROM (
SELECT
COUNT(`a`) AS `a`,
TUMBLE_ROWTIME(rowtime, INTERVAL '0.002' SECOND) AS `zzzzz`
FROM MyTable
GROUP BY TUMBLE(rowtime, INTERVAL '0.002' SECOND)
)
GROUP BY TUMBLE(zzzzz, INTERVAL '0.004' SECOND)"""
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "rowtime, a")
),
term("window", "TumblingGroupWindow('w$, 'rowtime, 2.millis)"),
term("select",
"COUNT(a) AS a",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "w$rowtime AS zzzzz")
),
term("window", "TumblingGroupWindow('w$, 'zzzzz, 4.millis)"),
term("select",
"COUNT(*) AS a",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "w$rowtime AS EXPR$0", "w$end AS EXPR$1", "a")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testDecomposableAggFunctions() = {
val sql =
"SELECT " +
" VAR_POP(c), VAR_SAMP(c), STDDEV_POP(c), STDDEV_SAMP(c), " +
" TUMBLE_START(rowtime, INTERVAL '15' MINUTE), " +
" TUMBLE_END(rowtime, INTERVAL '15' MINUTE)" +
"FROM MyTable " +
"GROUP BY TUMBLE(rowtime, INTERVAL '15' MINUTE)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "rowtime", "c", "*(c, c) AS $f2")
),
term("window", "TumblingGroupWindow('w$, 'rowtime, 900000.millis)"),
term("select",
"SUM($f2) AS $f0",
"SUM(c) AS $f1",
"COUNT(c) AS $f2",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select",
"/(-($f0, /(*($f1, $f1), $f2)), $f2) AS EXPR$0",
"/(-($f0, /(*($f1, $f1), $f2)), CASE(=($f2, 1), null:BIGINT, -($f2, 1))) AS EXPR$1",
"CAST(POWER(/(-($f0, /(*($f1, $f1), $f2)), $f2), 0.5:DECIMAL(2, 1))) AS EXPR$2",
"CAST(POWER(/(-($f0, /(*($f1, $f1), $f2)), CASE(=($f2, 1), null:BIGINT, -($f2, 1))), " +
"0.5:DECIMAL(2, 1))) AS EXPR$3",
"w$start AS EXPR$4",
"w$end AS EXPR$5")
)
streamUtil.verifySql(sql, expected)
}
@Test
def testReturnTypeInferenceForWindowAgg() = {
val innerQuery =
"""
|SELECT
| CASE a WHEN 1 THEN 1 ELSE 99 END AS correct,
| rowtime
|FROM MyTable
""".stripMargin
val sql =
"SELECT " +
" sum(correct) as s, " +
" avg(correct) as a, " +
" TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as wStart " +
s"FROM ($innerQuery) " +
"GROUP BY TUMBLE(rowtime, INTERVAL '15' MINUTE)"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "CASE(=(a, 1), 1, 99) AS correct", "rowtime")
),
term("window", "TumblingGroupWindow('w$, 'rowtime, 900000.millis)"),
term("select",
"SUM(correct) AS s",
"AVG(correct) AS a",
"start('w$) AS w$start",
"end('w$) AS w$end",
"rowtime('w$) AS w$rowtime",
"proctime('w$) AS w$proctime")
),
term("select", "CAST(s) AS s", "CAST(a) AS a", "w$start AS wStart")
)
streamUtil.verifySql(sql, expected)
}
}
| fhueske/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/sql/GroupWindowTest.scala | Scala | apache-2.0 | 11,612 |
package akka.persistence.pg.journal
import java.sql.BatchUpdateException
import akka.actor._
import akka.pattern._
import akka.persistence.JournalProtocol.{RecoverySuccess, ReplayMessagesFailure}
import akka.persistence.journal.AsyncWriteJournal
import akka.persistence.pg.journal.PgAsyncWriteJournal._
import akka.persistence.pg.streams.EventsPublisherStageLogic.CancelEventsStage
import akka.persistence.pg.{EventTag, PgConfig, PgExtension, PluginConfig}
import akka.persistence.{AtomicWrite, PersistentRepr}
import akka.serialization.{Serialization, SerializationExtension}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.stream.scaladsl.{Keep, Sink, Source}
import slick.jdbc.{ResultSetConcurrency, ResultSetType}
import scala.collection.{immutable, mutable}
import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
class PgAsyncWriteJournal extends AsyncWriteJournal with ActorLogging with PgConfig with JournalStore {
implicit val executionContext: ExecutionContextExecutor = context.system.dispatcher
override val serialization: Serialization = SerializationExtension(context.system)
override val pgExtension: PgExtension = PgExtension(context.system)
override lazy val pluginConfig: PluginConfig = pgExtension.pluginConfig
lazy val writeStrategy: WriteStrategy = pluginConfig.writeStrategy(this.context)
import driver.api._
def storeActions(entries: Seq[JournalEntryInfo]): Seq[DBIO[_]] = {
val storeEventsActions: Seq[DBIO[_]] = Seq(journals ++= entries.map(_.entry))
val extraDBIOActions: Seq[DBIO[_]] = entries.flatMap(_.extraDBIOInfo).map(_.action)
storeEventsActions ++ extraDBIOActions
}
def failureHandlers(entries: Seq[JournalEntryInfo]): Seq[PartialFunction[Throwable, Unit]] =
entries.flatMap(_.extraDBIOInfo).map(_.failureHandler)
override def asyncWriteMessages(writes: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = {
log.debug("asyncWriteMessages {} atomicWrites", writes.size)
val batches: immutable.Seq[Try[Seq[JournalEntryInfo]]] = writes map { atomicWrite =>
toJournalEntries(atomicWrite.payload)
}
def storeBatch(entries: Seq[JournalEntryInfo]): Future[Try[Unit]] = {
val result = writeStrategy
.store(storeActions(entries), new Notifier(entries.map(_.entry), this))
.map { Success.apply }
result.failed.foreach {
case e: BatchUpdateException => log.error(e.getNextException, "problem storing events")
case NonFatal(e) => log.error(e, "problem storing events")
case _ =>
}
result
}
val storedBatches = batches map {
case Failure(t) => Future.successful(Failure(t))
case Success(batch) =>
failureHandlers(batch).toList match {
case Nil => storeBatch(batch)
case h :: Nil => storeBatch(batch).recover { case e: Throwable if h.isDefinedAt(e) => Failure(e) }
case _ => Future.failed(new RuntimeException("you can only have one failureHandler for each AtomicWrite"))
}
}
Future sequence storedBatches
}
override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = {
log.debug(
"Async read for highest sequence number for processorId: [{}] (hint, seek from nr: [{}])",
persistenceId,
fromSequenceNr
)
database.run {
journals
.filter(_.persistenceId === persistenceId)
.map((table: JournalTable) => table.sequenceNr)
.max
.result
} map {
_.getOrElse(0)
}
}
implicit val materializer = ActorMaterializer(
Some(ActorMaterializerSettings(context.system).withInputBuffer(16, 1024))
)
override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(
replayCallback: (PersistentRepr) => Unit
): Future[Unit] = {
log.debug(
"Async replay for persistenceId [{}], from sequenceNr: [{}], to sequenceNr: [{}] with max records: [{}]",
persistenceId,
fromSequenceNr,
toSequenceNr,
max
)
val publisher = database.stream {
journals
.filter(_.persistenceId === persistenceId)
.filter(_.sequenceNr >= fromSequenceNr)
.filter(_.sequenceNr <= toSequenceNr)
.sortBy(_.sequenceNr)
.take(max)
.result
.withStatementParameters(
rsType = ResultSetType.ForwardOnly,
rsConcurrency = ResultSetConcurrency.ReadOnly,
fetchSize = 1000
)
.transactionally
}
Source
.fromPublisher(publisher)
.toMat(
Sink.foreach[JournalTable#TableElementType] { e =>
replayCallback(toPersistentRepr(e))
}
)(Keep.right)
.run()
.map(_ => ())
}
override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = {
//TODO we could alternatively permanently delete all but the last message and mark the last message as deleted
val selectedEntries: Query[JournalTable, JournalEntry, Seq] = journals
.filter(_.persistenceId === persistenceId)
.filter(_.sequenceNr <= toSequenceNr)
.filter(_.deleted === false)
.sortBy(_.sequenceNr.desc)
database.run(selectedEntries.map(_.deleted).update(true)).map(_ => ())
}
// ------------------------------------------------------------
// --- Akka Persistence Query logic ------
override def receivePluginInternal: Receive = {
case CancelSubscribers => cancelSubscribers()
// requested to send events containing given tags between from and to rowId
case ReplayTaggedMessages(fromRowId, toRowId, max, tags, replyTo) =>
handleReplayTaggedMessages(fromRowId, toRowId, max, tags, replyTo)
// requested to send events containing given tags between from and to rowId
case ReplayMessages(fromRowId, toRowId, max, replyTo) =>
handleReplayMessages(fromRowId, toRowId, max, replyTo)
// subscribe sender to tag notification
case SubscribeTags(tags) => addTagSubscriber(sender, tags)
//subscribe sender for all events
case SubscribeAllEvents => addSubscriber(sender)
case SubscribePersistenceId(persistenceId) => addPersistenceIdSubscriber(sender, persistenceId)
// unsubscribe terminated actor
case Terminated(ref) => removeSubscriber(ref)
}
private def handleReplayTaggedMessages(
fromRowId: Long,
toRowId: Long,
max: Long,
eventTags: Set[EventTag],
replyTo: ActorRef
): Unit = {
val correctedFromRowId = math.max(0L, fromRowId - 1)
asyncReadHighestRowIdWithTags(eventTags, correctedFromRowId).flatMap { highestRowId =>
val calculatedToRowId = math.min(toRowId, highestRowId)
if (highestRowId == 0L || fromRowId > calculatedToRowId) {
// we are done if there is nothing to send
Future.successful(highestRowId)
} else {
asyncReplayTaggedMessagesBoundedByRowIds(eventTags, fromRowId, calculatedToRowId, max) {
case ReplayedTaggedMessage(persistentRepr, tags, offset) =>
adaptFromJournal(persistentRepr).foreach { adaptedPersistentRepr =>
replyTo.tell(ReplayedTaggedMessage(adaptedPersistentRepr, tags, offset), Actor.noSender)
}
}.map(_ => highestRowId)
}
} map { highestRowId =>
RecoverySuccess(highestRowId)
} recover {
case e => ReplayMessagesFailure(e)
} pipeTo replyTo
()
}
private def handleReplayMessages(fromRowId: Long, toRowId: Long, max: Long, replyTo: ActorRef): Unit = {
val correctedFromRowId = math.max(0L, fromRowId - 1)
asyncReadHighestRowId(correctedFromRowId).flatMap { highestRowId =>
val calculatedToRowId = math.min(toRowId, highestRowId)
if (highestRowId == 0L || fromRowId > calculatedToRowId) {
// we are done if there is nothing to send
Future.successful(highestRowId)
} else {
asyncReplayMessagesBoundedByRowIds(fromRowId, calculatedToRowId, max) {
case ReplayedEventMessage(persistentRepr, offset) =>
adaptFromJournal(persistentRepr).foreach { adaptedPersistentRepr =>
replyTo.tell(ReplayedEventMessage(adaptedPersistentRepr, offset), Actor.noSender)
}
}.map(_ => highestRowId)
}
} map { highestRowId =>
RecoverySuccess(highestRowId)
} recover {
case e => ReplayMessagesFailure(e)
} pipeTo replyTo
()
}
def asyncReadHighestRowIdWithTags(tags: Set[EventTag], fromRowId: Long): Future[Long] = {
val query =
journals
.filter(_.idForQuery >= fromRowId)
.filter(tagsFilter(tags))
.map(_.idForQuery)
.max
database
.run(query.result)
.map(_.getOrElse(0L)) // we don't want an Option[Long], but a Long
}
def asyncReadHighestRowId(fromRowId: Long): Future[Long] = {
val query =
journals
.filter(_.idForQuery >= fromRowId)
.map(_.idForQuery)
.max
database
.run(query.result)
.map(_.getOrElse(0L)) // we don't want an Option[Long], but a Long
}
def asyncReplayTaggedMessagesBoundedByRowIds(tags: Set[EventTag], fromRowId: Long, toRowId: Long, max: Long)(
replayCallback: ReplayedTaggedMessage => Unit
): Future[Unit] = {
val query =
journals
.filter(_.idForQuery >= fromRowId)
.filter(_.idForQuery <= toRowId)
.filter(tagsFilter(tags))
.sortBy(_.idForQuery)
.take(max)
database
.run(query.result)
.map { entries =>
log.debug("Replaying {} events ({} <= rowId <= {} and tags: {})", entries.size, fromRowId, toRowId, tags)
entries.foreach { entry =>
val persistentRepr = toPersistentRepr(entry)
replayCallback(ReplayedTaggedMessage(persistentRepr, tags, idForQuery(entry)))
}
}
}
def asyncReplayMessagesBoundedByRowIds(fromRowId: Long, toRowId: Long, max: Long)(
replayCallback: ReplayedEventMessage => Unit
): Future[Unit] = {
val query =
journals
.filter(_.idForQuery >= fromRowId)
.filter(_.idForQuery <= toRowId)
.sortBy(_.idForQuery)
.take(max)
database
.run(query.result)
.map { entries =>
log.debug("Replaying {} events ({} <= rowId <= {})", entries.size, fromRowId, toRowId)
entries.foreach { entry =>
val persistentRepr = toPersistentRepr(entry)
replayCallback(ReplayedEventMessage(persistentRepr, idForQuery(entry)))
}
}
}
private def idForQuery(entry: JournalEntry): Long = {
val id = if (pluginConfig.idForQuery == "rowid") entry.rowid else entry.id
id.getOrElse(sys.error("something went wrong, probably a misconfiguration"))
}
def notifyEventsAvailable(entries: Seq[JournalEntry]): Unit = {
var persistenceIds = Set.empty[String]
entries foreach { entry =>
//notify event with tag available
if (hasTagSubscribers) entry.tags.foreach(notifyTagChange)
persistenceIds += entry.persistenceId
}
//notify event for persistenceId available
if (hasPersistenceIdSubscribers) {
persistenceIds.foreach(notifyPersistenceIdChange)
}
//notify event available
notifyEventsAdded()
}
def cancelSubscribers(): Unit = {
persistenceIdSubscribers.foreach { _._2.foreach(_ ! CancelEventsStage) }
tagSubscribers.foreach { _._2.foreach(_ ! CancelEventsStage) }
allEventsSubscribers.foreach { _ ! CancelEventsStage }
}
private val persistenceIdSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]]
with mutable.MultiMap[String, ActorRef]
private val tagSubscribers = new mutable.HashMap[EventTag, mutable.Set[ActorRef]]
with mutable.MultiMap[EventTag, ActorRef]
private var allEventsSubscribers = Set.empty[ActorRef]
protected[journal] def hasPersistenceIdSubscribers: Boolean = persistenceIdSubscribers.nonEmpty
protected[journal] def hasTagSubscribers: Boolean = tagSubscribers.nonEmpty
private def addTagSubscriber(subscriber: ActorRef, eventTags: Set[EventTag]): Unit = {
eventTags.foreach(eventTag => tagSubscribers.addBinding(eventTag, subscriber))
log.debug(s"added subscriptions for {} for actor {}", eventTags, subscriber)
// watch allEventsSubscribers in order to unsubscribe them if they terminate
context.watch(subscriber)
()
}
private def addSubscriber(subscriber: ActorRef): Unit = {
allEventsSubscribers += subscriber
log.debug("added subscriptions for actor {}", subscriber)
context.watch(subscriber)
()
}
private def addPersistenceIdSubscriber(subscriber: ActorRef, persistenceId: String): Unit = {
persistenceIdSubscribers.addBinding(persistenceId, subscriber)
context.watch(subscriber)
()
}
private def removeSubscriber(subscriber: ActorRef): Unit = {
log.warning("actor {} terminated!!", subscriber)
val keys = persistenceIdSubscribers.collect { case (k, s) if s.contains(subscriber) => k }
keys.foreach { key =>
persistenceIdSubscribers.removeBinding(key, subscriber)
}
val tags = tagSubscribers.collect { case (k, s) if s.contains(subscriber) => k }
if (tags.nonEmpty) {
log.debug("removing subscriber {} [tags: {}]", subscriber, tags)
tags.foreach { tag =>
tagSubscribers.removeBinding(tag, subscriber)
}
}
allEventsSubscribers -= subscriber
}
private def notifyEventsAdded(): Unit =
allEventsSubscribers.foreach(_ ! NewEventAppended)
private def notifyPersistenceIdChange(persistenceId: String): Unit =
if (persistenceIdSubscribers.contains(persistenceId)) {
persistenceIdSubscribers(persistenceId).foreach(_ ! NewEventAppended)
}
private def notifyTagChange(eventTag: EventTag): Unit =
if (tagSubscribers.contains(eventTag)) {
log.debug("Notify subscriber of new events with tag: {}", eventTag)
tagSubscribers(eventTag).foreach(_ ! NewEventAppended)
}
}
object PgAsyncWriteJournal {
sealed trait SubscriptionCommand
//message send when a new event is appended relevant for the subscriber
object NewEventAppended extends DeadLetterSuppression
//events replay by tags
final case class SubscribeTags(tags: Set[EventTag]) extends SubscriptionCommand
final case class ReplayTaggedMessages(
fromRowId: Long,
toRowId: Long,
max: Long,
tags: Set[EventTag],
replyTo: ActorRef
) extends SubscriptionCommand
final case class ReplayedTaggedMessage(persistent: PersistentRepr, tags: Set[EventTag], offset: Long)
extends DeadLetterSuppression
with NoSerializationVerificationNeeded
//all events replay
object SubscribeAllEvents extends SubscriptionCommand
final case class ReplayMessages(fromRowId: Long, toRowId: Long, max: Long, replyTo: ActorRef)
extends SubscriptionCommand
final case class ReplayedEventMessage(persistent: PersistentRepr, offset: Long)
extends DeadLetterSuppression
with NoSerializationVerificationNeeded
//events by persistenceId
final case class SubscribePersistenceId(persistenceId: String) extends SubscriptionCommand
case object CancelSubscribers
}
| WegenenVerkeer/akka-persistence-postgresql | modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/PgAsyncWriteJournal.scala | Scala | mit | 15,350 |
package knot.core
import scala.reflect.ClassTag
object Decorations {
val empty: Decorations = Decorations()
trait Decoration
final case class Name(n: String) extends Decoration
final case class LogLevel(level: String) extends Decoration
final case class Dispatcher(n: String) extends Decoration
}
case class Decorations(list: List[Decorations.Decoration] = Nil) {
import Decorations._
def get[T <: Decoration : ClassTag]: Option[T] = {
val c = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
list.collectFirst {
case d if c.isInstance(d) => c.cast(d)
}
}
def getOrElse[T <: Decoration : ClassTag](default: => T): T = {
get[T] match {
case Some(d) => d
case None => default
}
}
def and(other: Decoration): Decorations = Decorations(other :: list)
def and(other: Decorations): Decorations = {
if (list.isEmpty) other
else if (other.list.isEmpty) this
else if (other.list.tail.isEmpty) Decorations(other.list.head :: list)
else Decorations(other.list ::: list)
}
}
| defvar/knot | knot-core/src/main/scala/knot/core/Decorations.scala | Scala | mit | 1,068 |
package ch2_getting_started
/**
* Write a recursive function to get the nth Fibonacci number (http://mng.bz/C29s).
*
* The first two Fibonacci numbers are 0 and 1. The nth number is always the sum of the
* previous two—the sequence begins 0, 1, 1, 2, 3, 5. Your definition should use a
* local tail-recursive function.
*
* def fib(n: Int): Int
*/
object Ex2_1 {
def main(args: Array[String]): Unit = {
for (i <- Range(0, 10)) {
println(s"No.${i} fibonacci num: ${fib(i)}")
}
}
def fib(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc1: Int, acc2: Int): Int = {
if (n <= 0) acc1
else go(n-1, acc2, acc1+acc2)
}
go(n, 0, 1)
}
}
| zenja/exercise-fp-in-scala | src/ch2_getting_started/Ex2_1.scala | Scala | mit | 697 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.