code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package model
import org.joda.time.DateTime
trait AbstractDatabase{
def user(userId: String): User
def userFolders(userId: String): Seq[Folder]
def folderLinks(idFolder: Long, offset: Long, limit: Long): Seq[Link]
def userLinks(userToken: String, offset: Long, limit: Long): Seq[Link]
def addLink(token: String, url: String, code: String, folder_id: Long) : String
//calc and fix click
def link4code(code: String, referer: String, remoteIp: String): Option[String]
def codeInfo(code: String): Option[CodeInfo]
def linkClicks(code: String, offset: Long, limit: Long): Seq[Click]
}
class MemoryDatabase extends AbstractDatabase{
import scala.collection.mutable._
import Generator.genNewId
private val id2user = Map[String,User]()
override def user(userId: String) = id2user.getOrElseUpdate(userId, User(genNewId))
private val user2folders = Map[String, Seq[Long]]()
override def userFolders(user: String) = user2folders.getOrElse(user, Seq.empty).map(i=> folder2links(i.toInt)._1)
private val folder2links = ArrayBuffer[(Folder,Seq[Link])]()
override def folderLinks(idFolder: Long, offset: Long, limit: Long): Seq[Link] = if (idFolder == -1) Seq.empty else
folder2links(idFolder.toInt)._2.drop(offset.toInt).take(if (limit == 0) Config.defaultLimit else limit.toInt)
private val user2links = Map[String,List[Link]]()
override def userLinks(userToken: String, offset: Long, limit: Long) = user2links.getOrElse(userToken, Seq.empty).
drop(offset.toInt).take(if (limit == 0) Config.defaultLimit else limit.toInt)
private val code2other = Map[String,CodeInfo]()
override def codeInfo(code: String) = code2other.get(code)
override def addLink(token: String, url: String, code: String, folder_id: Long) = {
val linkCode = if (code == "") genNewId else if (code2other.contains(code)) genNewId else code
code2other += linkCode -> CodeInfo(url, folder_id, Nil)
val link = Link(url, linkCode)
user2links(token) = user2links.get(token).map(link :: _).getOrElse(List(link))
linkCode
}
override def link4code(code: String, referer: String, remoteIp: String) =
code2other.get(code).map(info => { info.clicks ::= Click(DateTime.now(), remoteIp); info.url})
override def linkClicks(code: String, offset: Long, limit: Long) = code2other.get(code).map(_.clicks).getOrElse(Nil).
drop(offset.toInt).take(if (limit == 0) Config.defaultLimit else limit.toInt)
}
object Database{
val instance: AbstractDatabase = new MemoryDatabase
}
| Claus1/play-test | app/model/Database.scala | Scala | mit | 2,535 |
package dbpedia.mappings
import com.fasterxml.jackson.annotation.JsonFormat.Value
import dbpedia.dataparsers.ontology.{OntologyClass, OntologyProperty}
import dbpedia.dataparsers.util.wikiparser.WikiTitle
import dbpedia.dataparsers.util.{Language, WikidataUtil}
import org.wikidata.wdtk.datamodel.interfaces.GlobeCoordinatesValue
import scala.collection.mutable
/**
* Created by ali on 12/20/14.
* Command pattern is used to set mappings parameters (property,value) and
* execute mapping commands.
*/
trait WikidataTransformationCommands {
def execute()
}
class WikidataOneToOneCommand(receiver: WikidataCommandReceiver) extends WikidataTransformationCommands {
def execute(): Unit = {
receiver.oneToOne()
}
}
class WikidataOneToManyCommand(receiver: WikidataCommandReceiver) extends WikidataTransformationCommands {
def execute(): Unit = {
receiver.oneToMany()
}
}
class WikidataCommandReceiver() {
var MapResult = mutable.Map.empty[String, String]
private var property: String = ""
private var value: Value = _
private var map = Map.empty[String, String]
private var equivClassSet = Set[OntologyClass]()
private var equivPropertySet = Set[OntologyProperty]()
def setParameters(property: String, value: Value, equivClassSet: Set[OntologyClass], equivPropSet: Set[OntologyProperty], map: Map[String, String]): Unit = {
this.property = property
this.value = value
this.equivClassSet = equivClassSet
this.equivPropertySet = equivPropSet
this.map = map
}
def getMap(): mutable.Map[String, String] = {
MapResult
}
def oneToOne() {
getDBpediaProperties(property, value)
}
def oneToMany(): Unit = {
oldMapToNewMap()
}
private def oldMapToNewMap(): Unit = {
map.foreach {
keyVal => {
if (keyVal._2 != null) {
if (keyVal._2.contains("$1")) {
val v = substitute(keyVal._2, WikidataUtil.replacePunctuation(value.toString).replace(" ", "_").trim)
MapResult += (keyVal._1 -> v)
} else if (keyVal._2.contains("$2")) {
Language.get("commons") match {
case Some(dbpedia_lang) => {
val wikiTitle = WikiTitle.parse(WikidataUtil.replacePunctuation(value.toString), dbpedia_lang)
val v = substitute(keyVal._2, wikiTitle.encoded.toString)
MapResult += (keyVal._1 -> v)
}
case _=>
}
} else {
keyVal._2 match {
case "$getLatitude" => MapResult += (keyVal._1 -> getLatitude(value).toString)
case "$getLongitude" => MapResult += (keyVal._1 -> getLongitude(value).toString)
case "$getGeoRss" => MapResult += (keyVal._1 -> getGeoRss(value))
case "$getDBpediaClass" => if (!equivClassSet.isEmpty) MapResult ++= getDBpediaClass(keyVal._1)
case _ => MapResult += (keyVal._1 -> keyVal._2)
}
}
} else {
if (value.toString!="") MapResult += (keyVal._1 -> WikidataUtil.replacePunctuation(WikidataUtil.replaceItemId(value.toString)))
else MapResult += (keyVal._1 -> "")
}
}
}
}
def getDBpediaClass(key: String): mutable.Map[String, String] = {
var classMap = mutable.Map.empty[String, String]
equivClassSet.foreach {
mappedClass => classMap += (key -> mappedClass.toString)
}
classMap
}
def getDBpediaProperties(key: String, value: Value): Unit = {
if (!equivPropertySet.isEmpty) {
equivPropertySet.foreach {
mappedProperty => {
val propKey = mappedProperty.toString.replace("http://dbpedia.org/ontology/", "")
MapResult += (propKey -> WikidataUtil.getValue(value))
}
}
}
}
def getLatitude(value: Value) = value match {
case v: GlobeCoordinatesValue => {
v.getLatitude
}
case _ => ""
}
def getLongitude(value: Value) = value match {
case v: GlobeCoordinatesValue => {
v.getLongitude
}
case _ => ""
}
def getGeoRss(value: Value) = value match {
case v: GlobeCoordinatesValue => {
v.getLatitude + " " + v.getLongitude
}
case _ => ""
}
def substitute(newValue: String, value: String): String = {
if (newValue.contains("$2")) newValue.replace("$2", value)
else newValue.replace("$1", value)
}
}
| FnOio/dbpedia-parsing-functions-scala | src/main/scala/dbpedia/mappings/WikidataTransformationCommands.scala | Scala | gpl-2.0 | 4,391 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactBootstrap
import scala.scalajs.js
import scala.scalajs.js._
import com.glipka.easyReactJS.react._
import ReactBootstrap._
@js.native trait FormControlFeedbackProps extends HTMLProps[FormControlFeedback] with js.Any {
}
@js.native
class FormControlFeedback(props: FormControlFeedbackProps) extends Component[FormControlFeedbackProps, Any](props) with js.Any{
} | glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactBootstrap/FormControlFeedback.scala | Scala | apache-2.0 | 983 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.effect
import slamdata.Predef._
import quasar.fp.TaskRef
import scalaz._, Scalaz._
import scalaz.concurrent.Task
sealed abstract class Write[W, A]
object Write {
final case class Tell[W](w: W) extends Write[W, Unit]
final class Ops[W, S[_]](implicit S: Write[W, ?] :<: S) extends LiftedOps[Write[W, ?], S] {
def tell(w: W): FreeS[Unit] = lift(Tell(w))
}
object Ops {
implicit def apply[W, S[_]](implicit S: Write[W, ?] :<: S): Ops[W, S] = new Ops[W, S]
}
def fromTaskRef[W: Semigroup](tr: TaskRef[W]): Write[W, ?] ~> Task =
λ[Write[W, ?] ~> Task] {
case Tell(w) => tr.modify(_ ⊹ w).void
}
}
| jedesah/Quasar | effect/src/main/scala/quasar/effect/Write.scala | Scala | apache-2.0 | 1,258 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.uberstore.seqindex
trait SeqIndex {
/**
* Get the offset for a particular sequence number
*
* @param seq Sequence number to find offset for
*
* @return Some(offset) if found, None if not
*/
def getOffsetFor(seq: Long): Option[Long]
/**
* Get the maximum sequence number stored, if such exists
*
* @return Some(sequence) or None if none such exists
*/
def getMaxSeq: Option[Long]
/**
* Map seq -> offset, persisting to disk and memory
*
* This operation is not thread safe relative to other
* put operations.
*
* Subsequent Seq/Offset pairs should be strictly increasing,
* for now behavior is undefined if they are not, in the future
* we may enforce this more vigorously.ugh
*
* @param seq sequence number
* @param offset offset
*/
def put(seq: Long, offset: Long): Unit
/**
* Get the range of offsets for entries for sequence numbers between
* firstSeq and lastSeq, inclusive.
*
* @param firstSeq first sequence number to look for
* @param lastSeq last sequence number to look for
*
* @return Tuple2[Long, Long] with the first item being the offset of
* the first entry withing.
* If the range is empty, (0, -1) is returned
*/
def getOffsetRange(firstSeq: Long, lastSeq: Long): (Long, Long)
/**
* Returns whether or not index is closed for use. Closed indexes
* should not be used.
* @return true if index is closed, false otherwise
*/
def isClosed: Boolean
/**
* Returns the segment size. If the index is empty then the size is 0.
* @return the size of the segment as a Long
*/
def size: Long
/**
* Close open file handles. This SeqIndex should not be used after
* close is called.
*/
def close(): Unit
}
| Comcast/sirius | src/main/scala/com/comcast/xfinity/sirius/uberstore/seqindex/SeqIndex.scala | Scala | apache-2.0 | 2,467 |
package glint.serialization
/**
* Raw relation data structure
* Created by CAB on 07.03.2015.
*/
case class RawRelation(
id:Long,
typeName:String,
data:Map[String,Any])
| AlexCAB/Glint | src/main/scala/glint/serialization/RawRelation.scala | Scala | mit | 181 |
/* sbt -- Simple Build Tool
* Copyright 2008 Mark Harrah
*/
package sbt
import scala.reflect.Manifest
trait Environment
{
abstract class Property[T]
{
/** Explicitly sets the value of this property to 'v'.*/
def update(v: T): Unit
/** Returns the current value of this property or throws an exception if the value could not be obtained.*/
def value: T = resolve.value
/** Returns the current value of this property in an 'Option'. 'None' is used to indicate that the
* value could not obtained.*/
def get: Option[T] = resolve.toOption
/** Returns full information about this property's current value. */
def resolve: PropertyResolution[T]
def foreach(f: T => Unit): Unit = resolve.foreach(f)
}
/** Creates a system property with the given name and no default value.*/
def system[T](propName: String)(implicit format: Format[T]): Property[T]
/** Creates a system property with the given name and the given default value to use if no value is explicitly specified.*/
def systemOptional[T](propName: String, defaultValue: => T)(implicit format: Format[T]): Property[T]
/** Creates a user-defined property that has no default value. The property will try to inherit its value
* from a parent environment (if one exists) if its value is not explicitly specified. An explicitly specified
* value will persist between builds if the object returned by this method is assigned to a 'val' in this
* 'Environment'.*/
def property[T](implicit manifest: Manifest[T], format: Format[T]): Property[T]
/** Creates a user-defined property that has no default value. The property will try to inherit its value
* from a parent environment (if one exists) if its value is not explicitly specified. An explicitly specified
* value will persist between builds if the object returned by this method is assigned to a 'val' in this
* 'Environment'. The given 'format' is used to convert an instance of 'T' to and from the 'String' representation
* used for persistence.*/
def propertyF[T](format: Format[T])(implicit manifest: Manifest[T]): Property[T] = property(manifest, format)
/** Creates a user-defined property with no default value and no value inheritance from a parent environment.
* Its value will persist between builds if the returned object is assigned to a 'val' in this 'Environment'.*/
def propertyLocal[T](implicit manifest: Manifest[T], format: Format[T]): Property[T]
/** Creates a user-defined property with no default value and no value inheritance from a parent environment.
* The property's value will persist between builds if the object returned by this method is assigned to a
* 'val' in this 'Environment'. The given 'format' is used to convert an instance of 'T' to and from the
* 'String' representation used for persistence.*/
def propertyLocalF[T](format: Format[T])(implicit manifest: Manifest[T]): Property[T] = propertyLocal(manifest, format)
/** Creates a user-defined property that uses the given default value if no value is explicitly specified for this property. The property's value will persist between builds
* if the object returned by this method is assigned to a 'val' in this 'Environment'.*/
def propertyOptional[T](defaultValue: => T)(implicit manifest: Manifest[T], format: Format[T]): Property[T]
/** Creates a user-defined property with no value inheritance from a parent environment but with the given default
* value if no value is explicitly specified for this property. The property's value will persist between builds
* if the object returned by this method is assigned to a 'val' in this 'Environment'. The given 'format' is used
* to convert an instance of 'T' to and from the 'String' representation used for persistence.*/
def propertyOptionalF[T](defaultValue: => T, format: Format[T])(implicit manifest: Manifest[T]): Property[T] =
propertyOptional(defaultValue)(manifest, format)
}
private object Environment
{
def reflectiveMappings[T](obj: AnyRef, clazz: Class[T]): Map[String, T] =
{
var mappings = Map[String, T]()
for ((name, value) <- ReflectUtilities.allValsC(obj, clazz))
mappings = mappings.updated(ReflectUtilities.transformCamelCase(name, '.'), value)
mappings
}
}
| olove/xsbt | util/env/src/main/scala/sbt/Environment.scala | Scala | bsd-3-clause | 4,200 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule.examples.io.stopwatch
import molecule._
import io._
import stream._
import channel.Timer
import java.util.concurrent.TimeUnit
sealed abstract class Event
case object Start extends Event
case object Split extends Event
case object Unsplit extends Event
case object Reset extends Event
case object Stop extends Event
object Controller extends ProcessType1x1[Event, DisplayTime, Unit] {
val initTime = DisplayTime(0, 0, 0, 0)
def displayTimeFeed: IChan[DisplayTime] =
Timer.every(100, TimeUnit.MILLISECONDS).scan(initTime)((time, tick) => time.increment)
def main(events: Input[Event], display: Output[DisplayTime]) = {
/**
* Reset the display after watch is stop
*/
def ready(): IO[Unit] = {
def waitForStart: IO[Unit] = events.read() >>\\ {
case Start =>
use(displayTimeFeed) >>\\ running
case _ => waitForStart
}
display.write(initTime) >> waitForStart
}
def running(timeFeed: Input[DisplayTime]): IO[Unit] = {
(events <%+> timeFeed).read() >>\\ {
case Left(Split) => paused(timeFeed)
case Left(Stop) => stopped(timeFeed)
case Left(_) => running(timeFeed)
case Right(timeUpdate) => display.write(timeUpdate) >> running(timeFeed)
}
}
def paused(timeFeed: Input[DisplayTime]): IO[Unit] = {
(events <%+> timeFeed).read() >>\\ {
case Left(Unsplit) => running(timeFeed)
case Left(Stop) => stopped(timeFeed)
case _ => paused(timeFeed)
}
}
def stopped(timeFeed: Input[DisplayTime]): IO[Unit] = {
lazy val waitForReset: IO[Unit] = events.read() >>\\ {
case Reset => ready()
case _ => waitForReset
}
timeFeed.poison() >> waitForReset
}
ready() orCatch {
case EOS =>
ioLog("closed") // timer is automatically closed thanks to ARM
}
}
} | molecule-labs/molecule | molecule-io-examples/src/main/scala/molecule/examples/io/stopwatch/Controller.scala | Scala | apache-2.0 | 2,610 |
package Tutorial
import Chisel._
import Node._
import Literal._
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
class IncgComponent extends gComponentLeaf (() => UFix(width = 32)) (() => UFix(width = 32)) (ArrayBuffer(("testOff", () => UFix(width = 32), () => UFix(width = 32)))) {
val outputData = Reg(UFix(width=32))
val outputValid = Reg(Bool(false))
outputValid := io.in.valid
outputData := io.in.bits + UFix(1, 32)
io.in.ready := io.out.ready
io.out.bits := outputData
io.out.valid := outputValid && io.out.ready
io.out.bits := outputData
}
class Chained extends Component with GorillaUtil {
val io = new gInOutBundle (() => UFix(width = 32), () => UFix(width = 32))
val a = new gComponentMD (() => UFix(width = 32),
() => UFix(width = 32),
ArrayBuffer(("testOff1", () => UFix(width = 32), () => UFix(width = 32))))
val b = new gComponentMD (() => UFix(width = 32),
() => UFix(width = 32),
ArrayBuffer(("testOff2", () => UFix(width = 32), () => UFix(width = 32))))
val c = (a, () => new IncgComponent)
val d = (b, () => new IncgComponent)
val e = Chain(c,d)
io <> e._2().io
}
class ChainedTests(c: Chained) extends Tester(c, Array(c.io)) {
defTests {
val inputs_data = List(1, 2, 4, 8)
val svars = new HashMap[Node, Node]()
val ovars = new HashMap[Node, Node]()
// let it spin for a bit
for (time <- 0 until 5) {
svars(c.io.in.valid) = Bool(false)
svars(c.io.in.bits) = UFix(0)
step(svars, ovars, false)
}
var sourced = 0
var sinked = 0
var time = 0
//var sinkStarted = false
var allPassed = true
while(time < 200 && (sourced < 4 || sinked < 4)) {
if (sourced < 4) {
svars(c.io.in.bits) = Bits(inputs_data(sourced))
svars(c.io.in.valid) = Bool(true)
svars(c.io.out.ready) = Bool(true)
} else {
svars(c.io.in.bits) = UFix(0)
svars(c.io.in.valid) = Bool(false)
}
// this advances the clock
step(svars, ovars)
// bump counters and check outputs after advancing clock
if (ovars(c.io.in.ready).litValue() == 1) sourced += 1
if (ovars(c.io.out.valid).litValue() == 1) {
allPassed = allPassed && (ovars(c.io.out.bits).litValue() == (inputs_data(sinked) + 2))
if (allPassed == false) {
println("Test failed because output is " + ovars(c.io.out.bits).litValue() +
" expected " + (inputs_data(sinked) +2))
println("Sinked is " + sinked)
}
//allPassed = allPassed
sinked += 1
}
//if (time >10) {
// svars(c.io.out.ready) = UFix(1)
// sinkStarted = true
//}
time += 1
}
allPassed && time < 200
}
}
| seyedmaysamlavasani/GorillaPP | chisel/KmeansAndMesh/src/Chained.scala | Scala | bsd-3-clause | 2,788 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.ErrorMsg
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.{CatalogTable, ExternalCatalog}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.CommandUtils
import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc}
import org.apache.spark.sql.hive.client.HiveClientImpl
/**
* Command for writing data out to a Hive table.
*
* This class is mostly a mess, for legacy reasons (since it evolved in organic ways and had to
* follow Hive's internal implementations closely, which itself was a mess too). Please don't
* blame Reynold for this! He was just moving code around!
*
* In the future we should converge the write path for Hive with the normal data source write path,
* as defined in `org.apache.spark.sql.execution.datasources.FileFormatWriter`.
*
* @param table the metadata of the table.
* @param partition a map from the partition key to the partition value (optional). If the partition
* value is optional, dynamic partition insert will be performed.
* As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS ...` would have
*
* {{{
* Map('a' -> Some('1'), 'b' -> Some('2'))
* }}}
*
* and `INSERT INTO tbl PARTITION (a=1, b) AS ...`
* would have
*
* {{{
* Map('a' -> Some('1'), 'b' -> None)
* }}}.
* @param query the logical plan representing data to write to.
* @param overwrite overwrite existing table or partitions.
* @param ifPartitionNotExists If true, only write if the partition does not exist.
* Only valid for static partitions.
*/
case class InsertIntoHiveTable(
table: CatalogTable,
partition: Map[String, Option[String]],
query: LogicalPlan,
overwrite: Boolean,
ifPartitionNotExists: Boolean,
outputColumns: Seq[Attribute]) extends SaveAsHiveFile {
/**
* Inserts all the rows in the table into Hive. Row objects are properly serialized with the
* `org.apache.hadoop.hive.serde2.SerDe` and the
* `org.apache.hadoop.mapred.OutputFormat` provided by the table definition.
*/
override def run(sparkSession: SparkSession, child: SparkPlan): Seq[Row] = {
val externalCatalog = sparkSession.sharedState.externalCatalog
val hadoopConf = sparkSession.sessionState.newHadoopConf()
val hiveQlTable = HiveClientImpl.toHiveTable(table)
// Have to pass the TableDesc object to RDD.mapPartitions and then instantiate new serializer
// instances within the closure, since Serializer is not serializable while TableDesc is.
val tableDesc = new TableDesc(
hiveQlTable.getInputFormatClass,
// The class of table should be org.apache.hadoop.hive.ql.metadata.Table because
// getOutputFormatClass will use HiveFileFormatUtils.getOutputFormatSubstitute to
// substitute some output formats, e.g. substituting SequenceFileOutputFormat to
// HiveSequenceFileOutputFormat.
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata
)
val tableLocation = hiveQlTable.getDataLocation
val tmpLocation = getExternalTmpPath(sparkSession, hadoopConf, tableLocation)
try {
processInsert(sparkSession, externalCatalog, hadoopConf, tableDesc, tmpLocation, child)
} finally {
// Attempt to delete the staging directory and the inclusive files. If failed, the files are
// expected to be dropped at the normal termination of VM since deleteOnExit is used.
deleteExternalTmpPath(hadoopConf)
}
// un-cache this table.
sparkSession.catalog.uncacheTable(table.identifier.quotedString)
sparkSession.sessionState.catalog.refreshTable(table.identifier)
CommandUtils.updateTableStats(sparkSession, table)
// It would be nice to just return the childRdd unchanged so insert operations could be chained,
// however for now we return an empty list to simplify compatibility checks with hive, which
// does not return anything for insert operations.
// TODO: implement hive compatibility as rules.
Seq.empty[Row]
}
private def processInsert(
sparkSession: SparkSession,
externalCatalog: ExternalCatalog,
hadoopConf: Configuration,
tableDesc: TableDesc,
tmpLocation: Path,
child: SparkPlan): Unit = {
val fileSinkConf = new FileSinkDesc(tmpLocation.toString, tableDesc, false)
val numDynamicPartitions = partition.values.count(_.isEmpty)
val numStaticPartitions = partition.values.count(_.nonEmpty)
val partitionSpec = partition.map {
case (key, Some(value)) => key -> value
case (key, None) => key -> ""
}
// All partition column names in the format of "<column name 1>/<column name 2>/..."
val partitionColumns = fileSinkConf.getTableInfo.getProperties.getProperty("partition_columns")
val partitionColumnNames = Option(partitionColumns).map(_.split("/")).getOrElse(Array.empty)
// By this time, the partition map must match the table's partition columns
if (partitionColumnNames.toSet != partition.keySet) {
throw new SparkException(
s"""Requested partitioning does not match the ${table.identifier.table} table:
|Requested partitions: ${partition.keys.mkString(",")}
|Table partitions: ${table.partitionColumnNames.mkString(",")}""".stripMargin)
}
// Validate partition spec if there exist any dynamic partitions
if (numDynamicPartitions > 0) {
// Report error if dynamic partitioning is not enabled
if (!hadoopConf.get("hive.exec.dynamic.partition", "true").toBoolean) {
throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg)
}
// Report error if dynamic partition strict mode is on but no static partition is found
if (numStaticPartitions == 0 &&
hadoopConf.get("hive.exec.dynamic.partition.mode", "strict").equalsIgnoreCase("strict")) {
throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg)
}
// Report error if any static partition appears after a dynamic partition
val isDynamic = partitionColumnNames.map(partitionSpec(_).isEmpty)
if (isDynamic.init.zip(isDynamic.tail).contains((true, false))) {
throw new AnalysisException(ErrorMsg.PARTITION_DYN_STA_ORDER.getMsg)
}
}
table.bucketSpec match {
case Some(bucketSpec) =>
// Writes to bucketed hive tables are allowed only if user does not care about maintaining
// table's bucketing ie. both "hive.enforce.bucketing" and "hive.enforce.sorting" are
// set to false
val enforceBucketingConfig = "hive.enforce.bucketing"
val enforceSortingConfig = "hive.enforce.sorting"
val message = s"Output Hive table ${table.identifier} is bucketed but Spark " +
"currently does NOT populate bucketed output which is compatible with Hive."
if (hadoopConf.get(enforceBucketingConfig, "true").toBoolean ||
hadoopConf.get(enforceSortingConfig, "true").toBoolean) {
throw new AnalysisException(message)
} else {
logWarning(message + s" Inserting data anyways since both $enforceBucketingConfig and " +
s"$enforceSortingConfig are set to false.")
}
case _ => // do nothing since table has no bucketing
}
val partitionAttributes = partitionColumnNames.takeRight(numDynamicPartitions).map { name =>
query.resolve(name :: Nil, sparkSession.sessionState.analyzer.resolver).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${query.output.map(_.name).mkString(", ")}]")
}.asInstanceOf[Attribute]
}
saveAsHiveFile(
sparkSession = sparkSession,
plan = child,
hadoopConf = hadoopConf,
fileSinkConf = fileSinkConf,
outputLocation = tmpLocation.toString,
allColumns = outputColumns,
partitionAttributes = partitionAttributes)
if (partition.nonEmpty) {
if (numDynamicPartitions > 0) {
externalCatalog.loadDynamicPartitions(
db = table.database,
table = table.identifier.table,
tmpLocation.toString,
partitionSpec,
overwrite,
numDynamicPartitions)
} else {
// scalastyle:off
// ifNotExists is only valid with static partition, refer to
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries
// scalastyle:on
val oldPart =
externalCatalog.getPartitionOption(
table.database,
table.identifier.table,
partitionSpec)
var doHiveOverwrite = overwrite
if (oldPart.isEmpty || !ifPartitionNotExists) {
// SPARK-18107: Insert overwrite runs much slower than hive-client.
// Newer Hive largely improves insert overwrite performance. As Spark uses older Hive
// version and we may not want to catch up new Hive version every time. We delete the
// Hive partition first and then load data file into the Hive partition.
if (oldPart.nonEmpty && overwrite) {
oldPart.get.storage.locationUri.foreach { uri =>
val partitionPath = new Path(uri)
val fs = partitionPath.getFileSystem(hadoopConf)
if (fs.exists(partitionPath)) {
if (!fs.delete(partitionPath, true)) {
throw new RuntimeException(
"Cannot remove partition directory '" + partitionPath.toString)
}
// Don't let Hive do overwrite operation since it is slower.
doHiveOverwrite = false
}
}
}
// inheritTableSpecs is set to true. It should be set to false for an IMPORT query
// which is currently considered as a Hive native command.
val inheritTableSpecs = true
externalCatalog.loadPartition(
table.database,
table.identifier.table,
tmpLocation.toString,
partitionSpec,
isOverwrite = doHiveOverwrite,
inheritTableSpecs = inheritTableSpecs,
isSrcLocal = false)
}
}
} else {
externalCatalog.loadTable(
table.database,
table.identifier.table,
tmpLocation.toString, // TODO: URI
overwrite,
isSrcLocal = false)
}
}
}
| brad-kaiser/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala | Scala | apache-2.0 | 11,802 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogdebugger.ui.components
import cogdebugger.ui.fieldvisualizations.Zoomable
import org.jfree.chart.axis.NumberAxis
import org.jfree.data.xy.{XYSeriesCollection, XYSeries}
import org.jfree.chart.renderer.xy.StandardXYItemRenderer
import org.jfree.chart.plot.{XYPlot => JFreeXYPlot, PlotOrientation, CombinedDomainXYPlot}
import scala.swing.{Component, BorderPanel}
import org.jfree.chart.{ChartPanel, JFreeChart}
/*
* Created with IntelliJ IDEA.
* User: gonztobi
* Date: 9/6/13
* Time: 1:30 PM
*/
/** A data visualization that displays a number of timeseries plots stacked on
* top of each other. Each chart has its own set of y/range values and range
* axis, but all share the same x/domain values and axis. This class is
* intended to provide the same functionality as Cog 3's MultiXYPlot class.
*
* All data is sorted according by the domain value prior to rendering. Thus,
* the lines drawn on the timeseries plots can never double back.
*
* @param title The chart's title, displayed at the top of the panel.
* @param xLabel Label for the domain axis
* @param yLabels An array of labels for the range axes, one per series.
* @param initialXData An array of Floats containing the initial domain
* values.
* @param initialYData A two dimension array of floats containing the range
* data for each series. The first index into the array
* selects the data for an individual series, the
* second/innermost index selects an element in that
* series.
*/
class StackedTimeseriesPlot(title: String,
xLabel: String,
yLabels: Array[String],
initialXData: Array[Float],
initialYData: Array[Array[Float]])
extends BorderPanel
with Zoomable {
def this(title: String, xLabel: String, yLabels: Array[String]) =
this(title, xLabel, yLabels, new Array[Float](0), new Array[Array[Float]](0))
require(initialYData.length == yLabels.length)
require(initialYData.length == 0 || initialYData(0).length == initialXData.length)
def ZoomFactor = zDelta // New axis size, relative to before zoom (should be < 1)
val PanFactor = 0.5 // Fraction of current xAxis size to shift by
val fullScaleXAxisLowerBound = if (initialXData.length > 0) initialXData(0) else 0
val fullScaleXAxisUpperBound = if (initialXData.length > 0) initialXData(initialXData.length - 1) else 0
val plot = new CombinedDomainXYPlot(new NumberAxis(xLabel))
plot.setGap(10.0)
plot.setOrientation(PlotOrientation.VERTICAL)
val xAxis = plot.getDomainAxis
xAxis.setLowerMargin(0)
xAxis.setUpperMargin(0)
//xAxis.setRange(...)
val subPlots = for (i <- 0 until yLabels.length) yield {
val label = yLabels(i)
require(label != null)
val data = createDataSet(label, initialXData, initialYData(i))
new JFreeXYPlot(data, null, new NumberAxis(label), new StandardXYItemRenderer())
//subPlot.setRangeAxisLocation(AxisLocation.BOTTOM_OR_LEFT)
//subPlot.setRangeZeroBaselineVisible(true)
//subPlot
}
for (subPlot <- subPlots) plot.add(subPlot)
val chart = new JFreeChart(title, JFreeChart.DEFAULT_TITLE_FONT, plot, false)
add(Component.wrap(new ChartPanel(chart)), BorderPanel.Position.Center)
def xRangeSize = xAxis.getUpperBound - xAxis.getLowerBound
def xRangeSize_=(size: Double) {
val upperBound = xAxis.getUpperBound
val lowerBound = (upperBound - size) max fullScaleXAxisLowerBound
xAxis.setRange(lowerBound, upperBound)
}
/** Shifts the visible X-axis range by `shiftBy` units. */
def shiftXAxisRange(shiftBy: Double) {
var lowerBound = xAxis.getLowerBound + shiftBy
var upperBound = xAxis.getUpperBound + shiftBy
if (lowerBound < fullScaleXAxisLowerBound) {
lowerBound = fullScaleXAxisLowerBound
upperBound = lowerBound + xRangeSize
} else if (upperBound > fullScaleXAxisUpperBound) {
upperBound = fullScaleXAxisUpperBound
lowerBound = upperBound - xRangeSize
}
xAxis.setRange(lowerBound, upperBound)
}
def panLeft() { shiftXAxisRange(-xRangeSize * PanFactor) }
def panRight() { shiftXAxisRange( xRangeSize * PanFactor) }
override def zoomIn() { changeZoomLevel(xRangeSize * ZoomFactor) }
override def zoomOut() { changeZoomLevel(xRangeSize / ZoomFactor) }
/** Updates the data set by shifting the y value of each (x1, y) pair in each
* data series to the previous (x0, y) pair, and then dropping the `yData`
* argument values into the now vacant y component of the last xy pairs.
*
* E.g., consider the 2-series data set here:
* {{{
* series 0 y values -- | 1 | 2 | 3 | 4 | 5 |
* series 1 y values -- | 4 | 7 | 2 | 9 | 1 |
* domain values -- | 2 | 4 | 5 | 7 | 9 |
* }}}
* Calling `updateData` with Array(3, 4) yields:
* {{{
* series 0 y values -- | 2 | 3 | 4 | 5 | 3 |
* series 1 y values -- | 7 | 2 | 9 | 1 | 4 |
* domain values -- | 2 | 4 | 5 | 7 | 9 |
* }}}
*/
def updateData(yData: Array[Float]) {
// This sort of operation could be made *much* faster with a special purpose
// data set that maintains a circular buffer, so as to not require all this
// shifting.
require(yData.length == subPlots.length)
for ((subPlot, idx) <- subPlots.zipWithIndex) {
val series = subPlot.getDataset.asInstanceOf[XYSeriesCollection].getSeries(0)
series.setNotify(false)
for (i <- 0 until series.getItemCount - 1)
series.updateByIndex(i, series.getDataItem(i + 1).getY)
series.updateByIndex(series.getItemCount - 1, yData(idx))
series.setNotify(true)
}
}
/** Updates the data set by dropping the first/left-most item from each data
* series and then appending the values given as arguments to this method.
* Keep in mind that the data will be sorted by domain value prior to
* rendering.
*
* E.g., consider the 2-series data set here:
* {{{
* series 0 y values -- | 1 | 2 | 3 | 4 | 5 |
* series 1 y values -- | 4 | 7 | 2 | 9 | 1 |
* domain values -- | 2 | 4 | 5 | 7 | 9 |
* }}}
* Calling `updateData` with (3, Array(3, 4)) yields:
* {{{
* series 0 y values -- | 3 | 4 | 5 | 3 | 3 |
* series 1 y values -- | 2 | 9 | 1 | 4 | 4 |
* domain values -- | 4 | 5 | 7 | 9 | 3 |
* }}}
* But sorting will cause values to be rendered in this order:
* * {{{
* series 0 y values -- | 3 | 3 | 4 | 5 | 3 |
* series 1 y values -- | 4 | 2 | 9 | 1 | 4 |
* domain values -- | 3 | 4 | 5 | 7 | 9 |
* }}}
*/
def updateData(x: Float, yData: Array[Float]) {
require(yData.length == subPlots.length)
for ((subPlot, idx) <- subPlots.zipWithIndex) {
val series = subPlot.getDataset.asInstanceOf[XYSeriesCollection].getSeries(0)
series.remove(0)
series.add(x, yData(idx))
}
}
/** Overwrites existing Y data with the contents of the given array. Assumes
* (series, col) indexing. This translates into a chart that looke like
* this:
* {{{
* +-----------+---------------+---------------+------
* series 0 | T - times | T - times + 1 | T - times + 2 | ...
* +-----------+---------------+---------------+------
* series 1 | T - times | T - times + 1 | T - times + 2 | ...
* +-----------+---------------+---------------+------
* ... | ...
* }}}
* */
def replaceYData(yData: Array[Array[Float]]) {
//require(yData(0).length == subPlots.length)
// for ((subPlot, idx) <- subPlots.zipWithIndex) {
// val series = subPlot.getDataset.asInstanceOf[XYSeriesCollection].getSeries(0)
// for (i <- 0 until series.getItemCount)
// series.updateByIndex(i, yData(i)(idx))
// }
for ((subPlot, idx) <- subPlots.zipWithIndex) {
val series = subPlot.getDataset.asInstanceOf[XYSeriesCollection].getSeries(0)
series.setNotify(false)
for (i <- 0 until series.getItemCount) series.updateByIndex(i, yData(idx)(i))
series.setNotify(true) // Fires series changed event
}
}
private def createDataSet(title: String, xData: Array[Float], yData: Array[Float]) = {
require(xData.length == yData.length)
val series = new XYSeries(title)
for ((x, y) <- xData zip yData) series.add(x, y)
new XYSeriesCollection(series)
//TODO Switch to new DynamicTimeSeriesCollection(..., ...)
}
def zDelta: Float = 0.8f
def changeZoomLevel(delta: Double) { changeZoomLevel(delta.toFloat) }
def changeZoomLevel(delta: Float) { xRangeSize = delta }
}
| hpe-cct/cct-core | src/main/scala/cogdebugger/ui/components/StackedTimeseriesPlot.scala | Scala | apache-2.0 | 9,360 |
/*-
* #%L
* FWAPP Framework
* %%
* Copyright (C) 2016 - 2017 Open Design Flow
* %%
* This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.odfi.wsb.fwapp.lib.security.provider.passwordless
import org.odfi.indesign.core.module.IndesignModule
import org.odfi.wsb.fwapp.lib.security.SecurityLibModule
import org.odfi.wsb.fwapp.lib.security.UserTraitFederatedIdentity
import com.idyria.osi.ooxoo.core.buffers.datatypes.DateTimeBuffer
import java.util.UUID
object PasswordLessModule extends IndesignModule {
var availableTokens = Map[String, UserTraitFederatedIdentity]()
def generateToken(email: String) = synchronized {
var id = new UserTraitFederatedIdentity
id.providerID = "passwordless"
id.validity = DateTimeBuffer()
id.validity.addMinutes(5)
id.token = UUID.randomUUID().toString()
availableTokens = availableTokens.updated(email, id)
id
}
def checkToken(token: String) = synchronized {
availableTokens.find {
case (email, id) =>
println("PL testing: "+id.token.toString()+" against: "+token)
id.token.toString() == token
} match {
/*case Some((email,id)) if (id.token.toString != token) =>
availableTokens = availableTokens - email
sys.error("Email and Token don't match")*/
case Some((email, id)) if (id.validity.isBeforeNow) =>
availableTokens = availableTokens - email
sys.error("Token not valid anymore")
case Some((email, id)) =>
availableTokens = availableTokens - email
(email, id.token.toString)
case None =>
sys.error("No Token available, please generate one before authentication")
}
}
this.onLoad {
requireModule(SecurityLibModule)
}
}
| opendesignflow/fwapp | src/main/scala/org/odfi/wsb/fwapp/lib/security/provider/passwordless/PasswordLessModule.scala | Scala | agpl-3.0 | 2,326 |
package io.buoyant.telemetry.statsd
import com.timgroup.statsd.StatsDClient
import com.twitter.finagle.stats.{Metadata, NoMetadata, Counter => FCounter, Stat => FStat}
private[statsd] object Metric {
// stats (timing/histograms) only send when Math.random() <= sampleRate
class Counter(statsDClient: StatsDClient, name: String, sampleRate: Double) extends FCounter {
def incr(delta: Long): Unit = statsDClient.count(name, delta, sampleRate)
def metadata: Metadata = NoMetadata
}
// gauges simply evaluate on send
class Gauge(statsDClient: StatsDClient, name: String, f: => Float) {
def send: Unit = statsDClient.recordGaugeValue(name, f)
}
// stats (timing/histograms) only send when Math.random() <= sampleRate
class Stat(statsDClient: StatsDClient, name: String, sampleRate: Double) extends FStat {
def add(value: Float): Unit =
// would prefer `recordHistogramValue`, but that's an extension, supported by Datadog and InfluxDB
statsDClient.recordExecutionTime(name, value.toLong, sampleRate)
def metadata: Metadata = NoMetadata
}
}
| linkerd/linkerd | telemetry/statsd/src/main/scala/io/buoyant/telemetry/statsd/Metric.scala | Scala | apache-2.0 | 1,092 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser.ScalaElementType
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScPatternList
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScDeclaredElementsHolder
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createWildcardPattern
import org.jetbrains.plugins.scala.lang.psi.stubs.ScBindingPatternStub
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.result._
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
class ScReferencePatternImpl private(stub: ScBindingPatternStub[ScReferencePattern], node: ASTNode)
extends ScalaStubBasedElementImpl(stub, ScalaElementType.REFERENCE_PATTERN, node) with ScPatternImpl with ScReferencePattern with ContributedReferenceHost {
def this(node: ASTNode) = this(null, node)
def this(stub: ScBindingPatternStub[ScReferencePattern]) = this(stub, null)
override def isIrrefutableFor(t: Option[ScType]): Boolean = true
override def nameId: PsiElement = findChildByType[PsiElement](TokenSets.ID_SET)
override def toString: String = "ReferencePattern: " + ifReadAllowed(name)("")
override def `type`(): TypeResult = {
this.expectedType match {
case Some(x) => Right(x)
case _ => Failure(ScalaBundle.message("cannot.define.expected.type"))
}
}
override def getReferences: Array[PsiReference] = {
PsiReferenceService.getService.getContributedReferences(this)
}
override def getNavigationElement: PsiElement = getContainingFile match {
case sf: ScalaFile if sf.isCompiled =>
val parent = PsiTreeUtil.getParentOfType(this, classOf[ScMember]) // there is no complicated pattern-based declarations in decompiled files
if (parent != null) {
val navElem = parent.getNavigationElement
navElem match {
case holder: ScDeclaredElementsHolder => holder.declaredElements.find(_.name == name).getOrElse(navElem)
case x => x
}
}
else super.getNavigationElement
case _ => super.getNavigationElement
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = {
ScalaPsiUtil.processImportLastParent(processor, state, place, lastParent, `type`())
}
override def delete(): Unit = {
getContext match {
case pList: ScPatternList if pList.patterns == Seq(this) =>
val context: PsiElement = pList.getContext
context.getContext.deleteChildRange(context, context)
case pList: ScPatternList if pList.simplePatterns && pList.patterns.startsWith(Seq(this)) =>
val end = this.nextSiblings.find(_.getNode.getElementType == ScalaTokenTypes.tCOMMA).get.getNextSiblingNotWhitespace.getPrevSibling
pList.deleteChildRange(this, end)
case pList: ScPatternList if pList.simplePatterns =>
val start = this.prevSiblings.find(_.getNode.getElementType == ScalaTokenTypes.tCOMMA).get.getPrevSiblingNotWhitespace.getNextSibling
pList.deleteChildRange(start, this)
case _ =>
// val (a, b) = t
// val (_, b) = t
replace(createWildcardPattern)
}
}
override def getOriginalElement: PsiElement = super[ScReferencePattern].getOriginalElement
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScReferencePatternImpl.scala | Scala | apache-2.0 | 3,862 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
import monix.execution.exceptions.DummyException
import monix.execution.internal.Platform
import monix.tail.batches.BatchCursor
import org.scalacheck.Test
import org.scalacheck.Test.Parameters
import scala.annotation.tailrec
object IterantDropWhileSuite extends BaseTestSuite {
override lazy val checkConfig: Parameters = {
if (Platform.isJVM)
Test.Parameters.default.withMaxSize(256)
else
Test.Parameters.default.withMaxSize(32)
}
@tailrec
def dropFromList(p: Int => Boolean)(list: List[Int]): List[Int] =
list match {
case x :: xs =>
if (p(x)) dropFromList(p)(xs)
else list
case Nil =>
Nil
}
test("Iterant.dropWhile equivalence with List.dropWhile") { implicit s =>
check3 { (list: List[Int], idx: Int, p: Int => Boolean) =>
val iter = arbitraryListToIterant[Coeval, Int](list, math.abs(idx) + 1, allowErrors = false)
val stream = iter ++ Iterant[Coeval].of(1, 2, 3)
val received = stream.dropWhile(p).toListL.runTry()
val expected = stream.toListL.map(dropFromList(p)).runTry()
if (received != expected) {
println(s"$received != $expected")
}
received <-> expected
}
}
test("Iterant.dropWhile protects against broken batches") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextBatchS[Int](new ThrowExceptionBatch(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => true)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile protects against broken cursors") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](new ThrowExceptionCursor(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => true)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile protects against user code") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](BatchCursor(1,2,3), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => throw dummy)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile preserves the source earlyStop") { implicit s =>
var effect = 0
val stop = Coeval.eval(effect += 1)
val source = Iterant[Coeval].nextCursorS(BatchCursor(1,2,3), Coeval.now(Iterant[Coeval].empty[Int])).guarantee(stop)
val stream = source.dropWhile(_ => true)
stream.completedL.value()
assertEquals(effect, 1)
}
} | Wogan/monix | monix-tail/shared/src/test/scala/monix/tail/IterantDropWhileSuite.scala | Scala | apache-2.0 | 3,659 |
package com.github.karlhigley.spark.neighbors.collision
import org.apache.spark.mllib.linalg.SparseVector
import org.apache.spark.rdd.RDD
import com.github.karlhigley.spark.neighbors.lsh.HashTableEntry
/**
* Abstract base class for approaches to identifying collisions from
* the pre-computed hash tables. This should be sufficiently
* general to support a variety of collision and candidate identification
* strategies, including multi-probe (for scalar-random-projection LSH),
* and banding (for minhash LSH).
*/
private[neighbors] abstract class CollisionStrategy {
type Point = (Long, SparseVector)
def apply(hashTables: RDD[_ <: HashTableEntry[_]]): RDD[(Product, Point)]
}
| L2V/like2vec | src/prediction/src/main/scala/com/github/karlhigley/spark/neighbors/collision/CollisionStrategy.scala | Scala | apache-2.0 | 693 |
/*
* Copyright 2014 Cisco Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cisco.oss.foundation.orchestration.scope.utils
import com.cisco.oss.foundation.ip.utils.IpUtils
import com.cisco.oss.foundation.orchestration.scope.model.{Instance, Network, Node}
import com.google.common.collect.ImmutableList
import org.jclouds.scriptbuilder.domain.Statements._
import org.jclouds.scriptbuilder.domain.{OsFamily, Statement, StatementList}
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext
import scala.io.Source
/**
* Created with IntelliJ IDEA.
* User: igreenfi
* Date: 3/10/14
* Time: 12:08 PM
*/
class LoadBalancerUtils(port: Int, applicationName: String, urlPrefix: String, instanceName: String, systemName: String, rsaKeyPair: Map[String, String]) {
val statements: ImmutableList.Builder[Statement] = ImmutableList.builder[Statement]
private var backendServers: List[String] = Nil
def addBackendServer(server: String) {
backendServers = server :: backendServers
}
/**
*
* @example {{{
* upstream <applicationName>_units {
* server 10.45.37.146:6040 weight=10 max_fails=3 fail_timeout=30s; # Reverse proxy to BES1
* server 10.45.37.52:6040 weight=10 max_fails=3 fail_timeout=30s; # Reverse proxy to BES1
* }
* server {
* listen <ip>:<port>; # Listen on the external interface
* server_name <app>.<instance>.<system>.vcs-foundation.com; # The server name
* access_log /var/log/nginx/nginx.access.log;
* error_log /var/log/nginx/nginx_error.log debug;
* location /<urlPrefix> {
* proxy_pass http://<applicationName>_units; # Load balance the URL location "/" to the upstream upm_units
* }
* error_page 500 502 503 504 /50x.html;
* location = /50x.html {
* root /var/www/nginx-default;
* }
* }
* }}}
* @param host
* @param port
* @return
*/
def nginxConfigurationScript(host: String, port: String) = {
statements.add(exec("yum -y install nginx"))
statements.add(exec("service nginx start"))
statements.add(exec(s"echo 'upstream ${applicationName}_units {"))
backendServers.foreach {
case server => {
statements.add(exec(s" server $server weight=10 max_fails=3 fail_timeout=30s;"))
}
}
statements.add(exec("}"))
statements.add(exec("server {"))
statements.add(exec(s" listen 0.0.0.0:$port; # Listen on the external interface"))
statements.add(exec(s" server_name $applicationName.$instanceName.$systemName.vcs-foundation.com; # The server name"))
statements.add(exec(s" access_log /var/log/nginx/nginx.access.log;"))
statements.add(exec(s" error_log /var/log/nginx/nginx_error.log debug;"))
statements.add(exec(s" location /$urlPrefix {"))
statements.add(exec(s" proxy_pass http://${applicationName}_units; # Load balance the URL location '/' to the upstream upm_units"))
statements.add(exec(s" }"))
statements.add(exec(s" error_page 500 502 503 504 /50x.html;"))
statements.add(exec(s" location = /50x.html {"))
statements.add(exec(s" root /var/www/nginx-default;"))
statements.add(exec(s" }"))
statements.add(exec(s"}' >> /etc/nginx/conf.d/$applicationName.conf"))
statements.add(exec("service nginx reload"))
statements.add(exec("chkconfig nginx on"))
new StatementList(statements.build).render(OsFamily.UNIX)
}
def createLoadBalancer(productRepoUrl: String, instance: Instance)(implicit ec:ExecutionContext) {
val utils = new VMUtils
val loadBalancerDescription = s"${productRepoUrl}loadbalancer.json"
val nodeString = Source.fromFile(loadBalancerDescription).getLines().mkString
val node = ScopeUtils.mapper.readValue(nodeString, classOf[Node])
var newNetwork : List[Network] = Nil
node.network.foreach{
case net => {
newNetwork = net.copy(openPorts = Option(List(port.toString))) :: newNetwork
}
}
val createVMFuture = utils.createVM(systemName, instanceName, "load-balancer", node.copy(name = s"$systemName-$instanceName-lb-$applicationName", network = newNetwork), rsaKeyPair, IpUtils.getHostName, ScopeUtils.configuration.getInt("scope.http.port"), instance.instanceId)
createVMFuture onSuccess {
case nodeMetadata => {
val results = utils.runScriptOnNode(nginxConfigurationScript(nodeMetadata.hostname, port.toString), "configure_load_balancer", nodeMetadata, rsaKeyPair.get("private").get, true)
results.getExitStatus match {
case 0 =>
case _ => throw new IllegalStateException("Failed to configure load balancer.")
}
}
}
createVMFuture onFailure {
case t => throw new IllegalStateException("Failed to create load balancer machine.", t)
}
}
}
| foundation-runtime/orchestration | src/main/java/com/cisco/oss/foundation/orchestration/scope/utils/LoadBalancerUtils.scala | Scala | apache-2.0 | 5,852 |
package controllers
import play.api.mvc._
import play.api.mvc.Results.Redirect
import lila.api.Context
import lila.app._
import lila.game.{ Pov, AnonCookie }
import lila.security.Granter
import views._
private[controllers] trait TheftPrevention {
protected def PreventTheft(pov: Pov)(ok: => Fu[Result])(implicit ctx: Context): Fu[Result] =
isTheft(pov).fold(fuccess(Redirect(routes.Round.watcher(pov.gameId, pov.color.name))), ok)
protected def isTheft(pov: Pov)(implicit ctx: Context) = pov.game.isPgnImport || pov.player.isAi || {
(pov.player.userId, ctx.userId) match {
case (Some(playerId), None) => true
case (Some(playerId), Some(userId)) =>
playerId != userId && !(ctx.me ?? Granter.superAdmin)
case (None, _) =>
lila.api.Mobile.Api.requestVersion(ctx.req).isEmpty &&
ctx.req.cookies.get(AnonCookie.name).map(_.value) != Some(pov.playerId)
}
}
}
| ccampo133/lila | app/controllers/TheftPrevention.scala | Scala | mit | 918 |
object Problem3 {
def largestPrimeFactor(b : BigInt) = {
def loop(f:BigInt, n: BigInt): BigInt =
if (f == n) n else
if (n % f == 0) loop(f, n / f)
else loop(f + 1, n)
loop (BigInt(2), b)
}
largestPrimeFactor(BigInt(600851475143L))
} | FredericJacobs/Project_Euler-Scala | ProjectEuler/src/Problem3.scala | Scala | gpl-2.0 | 261 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.Cast
import org.apache.spark.sql.catalyst.expressions.postgreSQL.PostgreCastStringToBoolean
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{BooleanType, StringType}
object PostgreSQLDialect {
val postgreSQLDialectRules: List[Rule[LogicalPlan]] =
CastStringToBoolean ::
Nil
object CastStringToBoolean extends Rule[LogicalPlan] with Logging {
override def apply(plan: LogicalPlan): LogicalPlan = {
// The SQL configuration `spark.sql.dialect` can be changed in runtime.
// To make sure the configuration is effective, we have to check it during rule execution.
val conf = SQLConf.get
if (conf.usePostgreSQLDialect) {
plan.transformExpressions {
case Cast(child, dataType, _)
if dataType == BooleanType && child.dataType == StringType =>
PostgreCastStringToBoolean(child)
}
} else {
plan
}
}
}
}
| bdrillard/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/PostgreSQLDialect.scala | Scala | apache-2.0 | 1,996 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. with significant
* portions copyright 2012-2014 Amazon.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.kinesis.consumer
import java.util.List
import com.amazonaws.services.kinesis.clientlibrary.exceptions.{
InvalidStateException,
ShutdownException,
ThrottlingException
}
import com.amazonaws.services.kinesis.clientlibrary.interfaces.{
IRecordProcessor,
IRecordProcessorCheckpointer
}
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason
import com.amazonaws.services.kinesis.model.Record
import scala.util.control.Breaks._
import scala.collection.JavaConversions._
// Thrift.
import org.apache.thrift.TDeserializer
class RecordProcessor(config: KinesisConsumerConfig)
extends IRecordProcessor {
private val thriftDeserializer = new TDeserializer()
private var kinesisShardId: String = _
private var nextCheckpointTimeInMillis: Long = _
// Backoff and retry settings.
private val BACKOFF_TIME_IN_MILLIS = 3000L
private val NUM_RETRIES = 10
private val CHECKPOINT_INTERVAL_MILLIS = 1000L
@Override
def initialize(shardId: String) = {
println("Initializing record processor for shard: " + shardId)
this.kinesisShardId = shardId
}
private val printData: (Array[Byte] => Unit) =
if (config.streamDataType == "string") printDataString
else if (config.streamDataType == "thrift") printDataThrift
else throw new RuntimeException(
"data-type configuration must be 'string' or 'thrift'.")
@Override
def processRecords(records: List[Record],
checkpointer: IRecordProcessorCheckpointer) = {
println(s"Processing ${records.size} records from $kinesisShardId")
processRecordsWithRetries(records)
if (System.currentTimeMillis() > nextCheckpointTimeInMillis) {
checkpoint(checkpointer)
nextCheckpointTimeInMillis =
System.currentTimeMillis + CHECKPOINT_INTERVAL_MILLIS
}
}
private def processRecordsWithRetries(records: List[Record]) = {
for (record <- records) {
try {
println(s"Sequence number: ${record.getSequenceNumber}")
printData(record.getData.array)
println(s"Partition key: ${record.getPartitionKey}")
} catch {
case t: Throwable =>
println(s"Caught throwable while processing record $record")
println(t)
}
}
}
private def printDataString(data: Array[Byte]) =
println("data: " + new String(data))
private def printDataThrift(data: Array[Byte]) = {
var deserializedData: generated.StreamData = new generated.StreamData()
this.synchronized {
thriftDeserializer.deserialize(deserializedData, data)
}
println("data: " + deserializedData.toString)
}
@Override
def shutdown(checkpointer: IRecordProcessorCheckpointer,
reason: ShutdownReason) = {
println(s"Shutting down record processor for shard: $kinesisShardId")
if (reason == ShutdownReason.TERMINATE) {
checkpoint(checkpointer)
}
}
private def checkpoint(checkpointer: IRecordProcessorCheckpointer) = {
println(s"Checkpointing shard $kinesisShardId")
breakable { for (i <- 0 to NUM_RETRIES-1) {
try {
checkpointer.checkpoint()
break
} catch {
case se: ShutdownException =>
println("Caught shutdown exception, skipping checkpoint.", se)
case e: ThrottlingException =>
if (i >= (NUM_RETRIES - 1)) {
println(s"Checkpoint failed after ${i+1} attempts.", e)
} else {
println(s"Transient issue when checkpointing - attempt ${i+1} of "
+ NUM_RETRIES, e)
}
case e: InvalidStateException =>
println("Cannot save checkpoint to the DynamoDB table used by " +
"the Amazon Kinesis Client Library.", e)
}
Thread.sleep(BACKOFF_TIME_IN_MILLIS)
}
} }
}
| snowplow/kinesis-example-scala-consumer | src/main/scala/com.snowplowanalytics.kinesis.consumer/RecordProcessor.scala | Scala | apache-2.0 | 4,569 |
import sbt._
import Settings._
object Dependencies {
val jodaTime = Seq(
"joda-time" % "joda-time" % "2.3",
"org.joda" % "joda-convert" % "1.5"
)
val guava = "com.google.guava" % "guava" % "15.0"
val httpClient = Seq(
"net.databinder.dispatch" %% "dispatch-core" % "0.11.0",
"net.databinder.dispatch" %% "dispatch-jsoup" % "0.11.0"
)
val boilerpipe = "com.syncthemall" % "boilerpipe" % "1.2.1"
val breeze = "org.scalanlp" %% "breeze" % "0.5.2"
val chalk = "org.scalanlp" % "chalk" % "1.3.0"
val rxjava = "com.netflix.rxjava" % "rxjava-scala" % "0.15.1"
object akka {
val core = Seq(
"com.typesafe.akka" %% "akka-actor" % Version.Akka,
"com.typesafe.akka" %% "akka-cluster" % Version.Akka,
"com.typesafe.akka" %% "akka-contrib" % Version.Akka
)
val logging = Seq(
"com.typesafe.akka" %% "akka-slf4j" % Version.Akka,
"org.slf4j" % "slf4j-log4j12" % "1.7.5"
)
val kernel = "com.typesafe.akka" %% "akka-kernel" % Version.Akka
}
object db {
val mongo = "org.reactivemongo" %% "reactivemongo" % "0.10.0-SNAPSHOT"
}
object scala {
val reflect = "org.scala-lang" % "scala-reflect" % Version.Scala
val swing = "org.scala-lang" % "scala-swing" % Version.Scala
}
val scalatra = Seq(
"org.eclipse.jetty" % "jetty-webapp" % "9.0.4.v20130625" % "container",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts Artifact("javax.servlet", "jar", "jar"),
"org.json4s" %% "json4s-jackson" % "3.2.5",
"org.json4s" %% "json4s-ext" % "3.2.5",
"org.scalatra" %% "scalatra" % Version.Scalatra,
"org.scalatra" %% "scalatra-json" % Version.Scalatra,
"org.scalatra" %% "scalatra-scalate" % Version.Scalatra
)
object tests {
val scalatest = "org.scalatest" %% "scalatest" % "2.0" % "test"
val akka = Seq(
"com.typesafe.akka" %% "akka-testkit" % Version.Akka % "test",
"com.typesafe.akka" %% "akka-multi-node-testkit" % Version.Akka % "test"
)
val scalatra = "org.scalatra" %% "scalatra-scalatest" % Version.Scalatra % "test"
}
} | kjanosz/stock-market-sherlock | project/Dependencies.scala | Scala | apache-2.0 | 2,144 |
/**
* Copyright (C) 2016 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf
package object webapp {
type UserRolesFacade = {
def getRemoteUser(): String
def isUserInRole(role: String): Boolean
}
type SessionFacade = {
def getAttribute(name: String): AnyRef
def setAttribute(name: String, value: AnyRef): Unit
}
}
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/webapp/package.scala | Scala | lgpl-2.1 | 949 |
package scalaxy.js
import scala.reflect.api.Universe
class global extends scala.annotation.StaticAnnotation
trait Globals {
val global: Universe
import global._
def hasGlobalAnnotation(sym: Symbol): Boolean = sym != null && {
sym.annotations.exists(a => Option(a.tpe).exists(_ =:= typeOf[scalaxy.js.global]))
}
}
| nativelibs4java/Scalaxy | Experiments/JS/Compiler/src/main/scala/scalaxy/js/conversion/global.scala | Scala | bsd-3-clause | 329 |
package featurebee.impl
import java.util.UUID
import featurebee.ClientInfoImpl
import org.scalatest.{FeatureSpec, MustMatchers}
class UuidDistributionConditionSpec extends FeatureSpec with MustMatchers {
feature("Invalid ranges") {
scenario("Lower boundary is too low") {
intercept[IllegalArgumentException] {
UuidDistributionCondition.apply(0 to 10)
}.getMessage must be("Range should describe a range between 1 and 100 inclusive")
}
scenario("Upper boundary is too high") {
intercept[IllegalArgumentException] {
UuidDistributionCondition.apply(90 to 101)
}.getMessage must be("Range should describe a range between 1 and 100 inclusive")
}
}
feature("Valid ranges") {
scenario("max range") {
1 to 1000 foreach {
_ =>
val uuid = UUID.randomUUID()
val info = ClientInfoImpl(uuid = Some(uuid))
UuidDistributionCondition.apply(1 to 100).applies(info) must be(true)
}
}
}
}
| AutoScout24/featurebee-scala | src/test/scala/featurebee/impl/UuidDistributionConditionSpec.scala | Scala | mit | 1,000 |
package core
import core.ModuloOperations._
import integer.integers
import org.scalatest.{FunSuite, Matchers}
class ResidueClassTest extends FunSuite with Matchers {
def intToResidueClass(modulus: Int): Int => ResidueClass[Int] = x => ResidueClass(x, modulus)
test("Residue class equality holds as expected") {
implicit def intsMod4 = integers modulo_r 4
def classOf = intToResidueClass(4)
classOf(2) == classOf(2) should be (true)
classOf(2) == classOf(6) should be (true)
classOf(2) == classOf(-2) should be (true)
}
test("Residue class equality does not hold as expected") {
implicit def intsMod4 = integers modulo_r 4
def classOf = intToResidueClass(4)
classOf(2) != classOf(3) should be (true)
classOf(7) != classOf(0) should be (true)
}
test("Residue classes of different moduli cannot be equal") {
def mod4 = intToResidueClass(4)
def mod6 = intToResidueClass(6)
mod4(2) != mod6(2) should be (true)
}
// TODO: suppress the compiler warning. See https://github.com/dkettlestrings/thunder/issues/60
test("Residue class equality takes types into account") {
implicit def intsMod4 = integers modulo_r 4
def classOf = intToResidueClass(4)
classOf(2) != 2 should be (true)
}
test("Hashcode respects equality") {
implicit def intsMod4 = integers modulo_r 4
def classOf = intToResidueClass(4)
classOf(2).hashCode == classOf(2).hashCode should be (true)
classOf(2).hashCode == classOf(6).hashCode should be (true)
classOf(2).hashCode == classOf(-2).hashCode should be (true)
}
test("Residue classes can be used in Sets") {
implicit def intsMod4 = integers modulo_r 4
def classOf = intToResidueClass(4)
val set = Set(classOf(0), classOf(1), classOf(5))
set.size should be (2)
set.contains(classOf(8)) should be (true)
set.contains(classOf(7)) should be (false)
}
test("Residue classes are printed in square bracket notation") {
def mod4 = intToResidueClass(4)
mod4(3).toString should be ("[3]_4")
}
}
| dkettlestrings/thunder | src/test/scala/core/ResidueClassTest.scala | Scala | gpl-3.0 | 2,067 |
package gitbucket.core.util
import java.io._
import java.sql._
import java.text.SimpleDateFormat
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
import scala.util.Using
/**
* Provides implicit class which extends java.sql.Connection.
* This is used in following points:
*
* - Automatic migration in [[gitbucket.core.servlet.InitializeListener]]
* - Data importing / exporting in [[gitbucket.core.controller.SystemSettingsController]] and [[gitbucket.core.controller.FileUploadController]]
*/
object JDBCUtil {
implicit class RichConnection(private val conn: Connection) extends AnyVal {
def update(sql: String, params: Any*): Int = {
execute(sql, params: _*) { stmt =>
stmt.executeUpdate()
}
}
def find[T](sql: String, params: Any*)(f: ResultSet => T): Option[T] = {
execute(sql, params: _*) { stmt =>
Using.resource(stmt.executeQuery()) { rs =>
if (rs.next) Some(f(rs)) else None
}
}
}
def select[T](sql: String, params: Any*)(f: ResultSet => T): Seq[T] = {
execute(sql, params: _*) { stmt =>
Using.resource(stmt.executeQuery()) { rs =>
val list = new ListBuffer[T]
while (rs.next) {
list += f(rs)
}
list.toSeq
}
}
}
def selectInt(sql: String, params: Any*): Int = {
execute(sql, params: _*) { stmt =>
Using.resource(stmt.executeQuery()) { rs =>
if (rs.next) rs.getInt(1) else 0
}
}
}
private def execute[T](sql: String, params: Any*)(f: (PreparedStatement) => T): T = {
Using.resource(conn.prepareStatement(sql)) { stmt =>
params.zipWithIndex.foreach {
case (p, i) =>
p match {
case x: Int => stmt.setInt(i + 1, x)
case x: String => stmt.setString(i + 1, x)
}
}
f(stmt)
}
}
def importAsSQL(in: InputStream): Unit = {
conn.setAutoCommit(false)
try {
Using.resource(in) { in =>
var out = new ByteArrayOutputStream()
var length = 0
val bytes = new scala.Array[Byte](1024 * 8)
var stringLiteral = false
while ({ length = in.read(bytes); length != -1 }) {
for (i <- 0 until length) {
val c = bytes(i)
if (c == '\\'') {
stringLiteral = !stringLiteral
}
if (c == ';' && !stringLiteral) {
val sql = new String(out.toByteArray, "UTF-8")
if (sql != null && !sql.isEmpty()) {
conn.update(sql.trim)
}
out = new ByteArrayOutputStream()
} else {
out.write(c)
}
}
}
val remain = out.toByteArray
if (remain.length != 0) {
val sql = new String(remain, "UTF-8")
conn.update(sql.trim)
}
}
conn.commit()
} catch {
case e: Exception => {
conn.rollback()
throw e
}
}
}
def exportAsSQL(targetTables: Seq[String]): File = {
val dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss")
val file = File.createTempFile("gitbucket-export-", ".sql")
Using.resource(new FileOutputStream(file)) { out =>
val dbMeta = conn.getMetaData
val allTablesInDatabase = allTablesOrderByDependencies(dbMeta)
allTablesInDatabase.reverse.foreach { tableName =>
if (targetTables.contains(tableName)) {
out.write(s"DELETE FROM ${tableName};\\n".getBytes("UTF-8"))
}
}
allTablesInDatabase.foreach { tableName =>
if (targetTables.contains(tableName)) {
val sb = new StringBuilder()
select(s"SELECT * FROM ${tableName}") { rs =>
sb.append(s"INSERT INTO ${tableName} (")
val rsMeta = rs.getMetaData
val columns = (1 to rsMeta.getColumnCount).map { i =>
(rsMeta.getColumnName(i), rsMeta.getColumnType(i))
}
sb.append(columns.map(_._1).mkString(", "))
sb.append(") VALUES (")
val values = columns.map {
case (columnName, columnType) =>
if (rs.getObject(columnName) == null) {
null
} else {
columnType match {
case Types.BOOLEAN | Types.BIT => rs.getBoolean(columnName)
case Types.VARCHAR | Types.CLOB | Types.CHAR | Types.LONGVARCHAR => rs.getString(columnName)
case Types.INTEGER => rs.getInt(columnName)
case Types.BIGINT => rs.getLong(columnName)
case Types.TIMESTAMP => rs.getTimestamp(columnName)
}
}
}
val columnValues = values.map {
case x: String => "'" + x.replace("'", "''") + "'"
case x: Timestamp => "'" + dateFormat.format(x) + "'"
case null => "NULL"
case x => x
}
sb.append(columnValues.mkString(", "))
sb.append(");\\n")
}
out.write(sb.toString.getBytes("UTF-8"))
}
}
}
file
}
def allTableNames(): Seq[String] = {
Using.resource(conn.getMetaData.getTables(null, null, "%", Seq("TABLE").toArray)) { rs =>
val tableNames = new ListBuffer[String]
while (rs.next) {
val name = rs.getString("TABLE_NAME").toUpperCase
if (name != "VERSIONS" && name != "PLUGIN") {
tableNames += name
}
}
tableNames.toSeq
}
}
private def childTables(meta: DatabaseMetaData, tableName: String): Seq[String] = {
val normalizedTableName =
if (meta.getDatabaseProductName == "PostgreSQL") {
tableName.toLowerCase
} else {
tableName
}
Using.resource(meta.getExportedKeys(null, null, normalizedTableName)) { rs =>
val children = new ListBuffer[String]
while (rs.next) {
val childTableName = rs.getString("FKTABLE_NAME").toUpperCase
if (!children.contains(childTableName)) {
children += childTableName
children ++= childTables(meta, childTableName)
}
}
children.distinct.toSeq
}
}
private def allTablesOrderByDependencies(meta: DatabaseMetaData): Seq[String] = {
val tables = allTableNames().map { tableName =>
TableDependency(tableName, childTables(meta, tableName))
}
val edges = tables.flatMap { table =>
table.children.map { child =>
(table.tableName, child)
}
}
val ordered = tsort(edges).toSeq
val orphans = tables.collect { case x if !ordered.contains(x.tableName) => x.tableName }
ordered ++ orphans
}
def tsort[A](edges: Iterable[(A, A)]): Iterable[A] = {
@tailrec
def tsort(toPreds: Map[A, Set[A]], done: Iterable[A]): Iterable[A] = {
val (noPreds, hasPreds) = toPreds.partition { _._2.isEmpty }
if (noPreds.isEmpty) {
if (hasPreds.isEmpty) done else sys.error(hasPreds.toString)
} else {
val found = noPreds.map { _._1 }
tsort(hasPreds.map { case (k, v) => (k, v -- found) }, done ++ found)
}
}
val toPred = edges.foldLeft(Map[A, Set[A]]()) { (acc, e) =>
acc + (e._1 -> acc.getOrElse(e._1, Set())) + (e._2 -> (acc.getOrElse(e._2, Set()) + e._1))
}
tsort(toPred, Seq())
}
}
private case class TableDependency(tableName: String, children: Seq[String])
}
| xuwei-k/gitbucket | src/main/scala/gitbucket/core/util/JDBCUtil.scala | Scala | apache-2.0 | 8,029 |
package com.gu.automation.signin
import com.gu.automation.core.WebDriverFeatureSpec
import com.gu.automation.support.LogIn
import org.scalatest.Matchers
class LoggingInTest extends WebDriverFeatureSpec with Matchers {
info("Tests for the API Logging in function")
feature("should be able to log in to the browser") {
scenario("check we can get the right cookie domains") { _ =>
LogIn.getCookieDomain("http://www.theguardian.com/uk") should be (".theguardian.com")
LogIn.getCookieDomain("https://www.theguardian.com/uk") should be (".theguardian.com")
LogIn.getCookieDomain("https://m.code.dev-theguardian.com/") should be (".code.dev-theguardian.com")
}
// could add another test with a fake AuthApi checking the cookies really are set
/**
* This is an end to end test that we really end up logged in.
*
* To pass it needs a local.conf containing something like
*
* "idApiRoot" : "https://idapi.code.dev-theguardian.com"
* testBaseUrl: "http://m.code.dev-theguardian.com"
* memberLogin: {
* "loginEmail" : "regidqa@gmail.com"
* "loginPassword" : "ask_gwyn!"
* }
* browser: chrome
*/
// REMOVED because it can't run in the jenkins env, and we can't run sbt 'test-only -- -l needsBrowser' from the sbt-release task easily
// scenarioWeb("check we are logged in when we have added the cookies", Tag("needsBrowser")) { implicit driver: WebDriver =>
//
// LogIn("memberLogin")
//
// // now go to a URL where we are probably logged in
// driver.get(Config().getTestBaseUrl())
// val userSpan = driver.findElement(By.xpath("//div[@data-component='identity-profile']")).findElement(By.className("js-profile-info"))
// userSpan.getText should be ("Reg Idtester")
// }
}
}
| guardian/scala-automation-web-signin | src/test/scala/com/gu/automation/signin/LoggingInTest.scala | Scala | apache-2.0 | 1,873 |
package pfds.number
import org.scalatest.FunSuite
trait NumberSuite[T <: Nat[T]] extends FunSuite {
val Number: {
def apply(num: Int): T
}
test("to int") {
assert(Number(0).toInt == 0)
assert(Number(1).toInt == 1)
assert(Number(10).toInt == 10)
assert(Number(453).toInt == 453)
assertThrows[NegNatException.type](Number(-1))
}
test("increase number") {
val zero = Number(0)
assert(zero.toInt == 0)
assert(zero.inc.toInt == 1)
assert(zero.inc.inc.toInt == 2)
assert(zero.toInt == 0)
}
test("decrease number") {
val two = Number(2)
assert(two.toInt == 2)
assert(two.dec.toInt == 1)
assert(two.dec.dec.toInt == 0)
assertThrows[NegNatException.type](two.dec.dec.dec)
assert(two.toInt == 2)
}
test("plus numbers") {
val one = Number(1)
val two = Number(2)
assert((one + two).toInt == 3)
assert((two + one).toInt == 3)
assert((one.inc.dec + Number(0)).toInt == 1)
assert((Number(12) + Number(21)).toInt == (12 + 21))
}
}
class NormalRepresentationSuite extends NumberSuite[NormalRepresentation] {
val Number = NormalRepresentation
}
class DenseRepresentationSuite extends NumberSuite[DenseRepresentation] {
val Number = DenseRepresentation
}
class SparseRepresentationSuite extends NumberSuite[SparseRepresentation] {
val Number = SparseRepresentation
}
class SegmentedRepresentationSuite extends NumberSuite[SegmentedRepresentation] {
val Number = SegmentedRepresentation
}
class SkewRepresentationSuite extends NumberSuite[SkewRepresentation] {
val Number = SkewRepresentation
}
| ZhiruiLi/PfdsInScala | src/test/scala/pfds/number/NumberSuite.scala | Scala | mit | 1,609 |
package kotlin
import java.io.File
import java.lang.reflect.{Field, Method}
import java.util.jar.JarEntry
import sbt.Keys.{Classpath, TaskStreams}
import sbt._
import sbt.io._
import sbt.internal.inc.classpath.ClasspathUtilities
import collection.JavaConverters._
import scala.util.Try
/**
* @author pfnguyen
*/
object KotlinCompile {
def grepjar(jarfile: File)(pred: JarEntry => Boolean): Boolean =
jarfile.isFile && Using.jarFile(false)(jarfile) { in =>
in.entries.asScala exists pred
}
lazy val kotlinMemo = scalaz.Memo.immutableHashMapMemo[Classpath, KotlinReflection](cp =>
KotlinReflection.fromClasspath(cp))
def compile(options: Seq[String],
jvmTarget: String,
sourceDirs: Seq[File],
kotlinPluginOptions: Seq[String],
classpath: Classpath,
compilerClasspath: Classpath,
output: File, s: TaskStreams): Unit = {
import language.reflectiveCalls
val stub = KotlinStub(s, kotlinMemo(compilerClasspath))
val args = stub.compilerArgs
stub.parse(args.instance, options.toList)
val kotlinFiles = "*.kt" || "*.kts"
val javaFiles = "*.java"
val kotlinSources = sourceDirs.flatMap(d => (d ** kotlinFiles).get).distinct
val javaSources = sourceDirs.filterNot(f => sourceDirs.exists(f0 =>
f0.relativeTo(f).isDefined && f != f0)) map (d =>
(d, (d ** javaFiles).get)) filter (_._2.nonEmpty)
if (kotlinSources.isEmpty) {
s.log.debug("No sources found, skipping kotlin compile")
} else {
s.log.debug(s"Compiling sources $kotlinSources")
def pluralizeSource(count: Int) =
if (count == 1) "source" else "sources"
val message =
s"Compiling ${kotlinSources.size} Kotlin ${pluralizeSource(kotlinSources.size)}"
s.log.info(message)
args.freeArgs = (kotlinSources ++ javaSources.map(_._1)).map(_.getAbsolutePath).asJava
args.noStdlib = true
args.jvmTarget = jvmTarget
val fcpjars = classpath.map(_.data.getAbsoluteFile)
val (pluginjars, cpjars) = fcpjars.partition {
grepjar(_)(_.getName.startsWith(
"META-INF/services/org.jetbrains.kotlin.compiler.plugin"))
}
val cp = cpjars.mkString(File.pathSeparator)
val pcp = pluginjars.map(_.getAbsolutePath).toArray
args.classpath = Option(args.classpath[String]).fold(cp)(_ + File.pathSeparator + cp)
args.pluginClasspaths = Option(args.pluginClasspaths[Array[String]]).fold(pcp)(_ ++ pcp)
args.pluginOptions = Option(args.pluginOptions[Array[String]]).fold(
kotlinPluginOptions.toArray)(_ ++ kotlinPluginOptions.toArray[String])
output.mkdirs()
args.destination = output.getAbsolutePath
stub.compile(args.instance)
}
}
}
object KotlinReflection {
def fromClasspath(cp: Classpath): KotlinReflection = {
val cl = ClasspathUtilities.toLoader(cp.map(_.data))
val compilerClass = cl.loadClass("org.jetbrains.kotlin.cli.jvm.K2JVMCompiler")
val servicesClass = cl.loadClass("org.jetbrains.kotlin.config.Services")
val messageCollectorClass = cl.loadClass("org.jetbrains.kotlin.cli.common.messages.MessageCollector")
val commonCompilerArgsClass = cl.loadClass("org.jetbrains.kotlin.cli.common.arguments.CommonCompilerArguments")
val compilerExec = Try(
compilerClass.getMethod("exec",
messageCollectorClass, servicesClass, commonCompilerArgsClass)
).toOption.getOrElse {
val commonToolArguments = cl.loadClass(
"org.jetbrains.kotlin.cli.common.arguments.CommonToolArguments")
val clitool = cl.loadClass(
"org.jetbrains.kotlin.cli.common.CLITool")
clitool.getMethod("exec",
messageCollectorClass, servicesClass, commonToolArguments)
}
KotlinReflection(
cl,
servicesClass,
compilerClass,
cl.loadClass("org.jetbrains.kotlin.cli.common.arguments.K2JVMCompilerArguments"),
messageCollectorClass,
commonCompilerArgsClass,
compilerExec,
servicesClass.getDeclaredField("EMPTY"))
}
}
case class KotlinReflection(cl: ClassLoader,
servicesClass: Class[_],
compilerClass: Class[_],
compilerArgsClass: Class[_],
messageCollectorClass: Class[_],
commonCompilerArgsClass: Class[_],
compilerExec: Method,
servicesEmptyField: Field)
case class KotlinStub(s: TaskStreams, kref: KotlinReflection) {
import language.reflectiveCalls
import kref._
def messageCollector: AnyRef = {
type CompilerMessageLocation = {
def getPath: String
def getLine: Int
def getColumn: Int
}
import java.lang.reflect.{Proxy, InvocationHandler}
val messageCollectorInvocationHandler = new InvocationHandler {
override def invoke(proxy: scala.Any, method: Method, args: Array[AnyRef]) = {
if (method.getName == "report") {
val Array(severity, message, location) = args
val l = location.asInstanceOf[CompilerMessageLocation]
val msg = Option(l).map(x => x.getPath).fold(message.toString)(loc =>
loc + ": " + l.getLine + ", " + l.getColumn + ": " + message)
severity.toString match {
case "INFO" => s.log.info(msg)
case "WARNING" => s.log.warn(msg)
case "STRONG_WARNING" => s.log.warn(msg)
case "ERROR" | "EXCEPTION" => s.log.error(msg)
case "OUTPUT" | "LOGGING" => s.log.debug(msg)
}
}
null
}
}
Proxy.newProxyInstance(cl, Array(messageCollectorClass), messageCollectorInvocationHandler)
}
def parse(args: Object, options: List[String]): Unit = {
// TODO FIXME, this is much worse than it used to be, the parsing api has been
// deeply in flux since 1.1.x
val parser = kref.cl.loadClass(
"org.jetbrains.kotlin.cli.common.arguments.ParseCommandLineArgumentsKt")
val commonToolArguments = cl.loadClass(
"org.jetbrains.kotlin.cli.common.arguments.CommonToolArguments")
val parserMethod = parser.getMethod("parseCommandLineArguments", classOf[java.util.List[java.lang.String]], commonToolArguments)
import collection.JavaConverters._
parserMethod.invoke(null, options.asJava, args)
}
def compilerArgs = {
import language.dynamics
new Dynamic {
def withFirstUpper(string: String): String = string.head.toUpper + string.tail
def getterName(field: String) = s"get${withFirstUpper(field)}"
def setterName(field: String) = s"set${withFirstUpper(field)}"
def selectDynamic[A](field: String): A = {
val methodName = getterName(field)
val getterOpt = compilerArgsClass.getMethods.find(_.getName == methodName)
getterOpt match {
case Some(getter) => getter.invoke(instance).asInstanceOf[A]
case None => compilerArgsClass.getField(field).get(instance).asInstanceOf[A]
}
}
def updateDynamic(field: String)(value: Any): Unit = {
val methodName = setterName(field)
val setterOpt = compilerArgsClass.getMethods.find(_.getName == methodName)
setterOpt match {
case Some(setter) => setter.invoke(instance, value.asInstanceOf[Object])
case None => compilerArgsClass.getField(field).set(instance, value)
}
}
val instance = compilerArgsClass.getDeclaredConstructor().newInstance().asInstanceOf[AnyRef]
}
}
def compile(args: AnyRef): Unit = {
val compiler = compilerClass.getDeclaredConstructor().newInstance()
val result = compilerExec.invoke(compiler,
messageCollector, servicesEmptyField.get(null), args: java.lang.Object)
result.toString match {
case "COMPILATION_ERROR" | "INTERNAL_ERROR" =>
throw new MessageOnlyException("Compilation failed. See log for more details")
case _ =>
}
}
}
| pfn/kotlin-plugin | src/main/scala/KotlinCompile.scala | Scala | mit | 8,045 |
//package org.tearne.crosser.spike
//
////import scala.slick.session.Database
////import Database.threadLocalSession
//import scala.slick.driver.ExtendedProfile
//
//object SlickH2Test extends App {
// def newInstance[T](name: String)(implicit m: Manifest[T]): T =
// Class.forName(name + "$").getField("MODULE$").get(m.runtimeClass).asInstanceOf[T]
//
// val driver = "org.h2.Driver"
// val profile = "scala.slick.driver.H2Driver"
// //val url = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1"
// val url = "jdbc:h2:crosserDB"
//
// val ep: ExtendedProfile = newInstance[ExtendedProfile](driver)
// import ep.simple._
//
// Database.forURL(url, driver = driver) withSession { implicit session: Session =>
//
//// new DAO()
// CrossLociTable.ddl.create
//
// val data = List(
// CrossLoci(1, 1, "f1", 3, 1, true, "donor1"),
// CrossLoci(1, 1, "f1", 3, 1, false, "donor1"),
// CrossLoci(1, 1, "f1", 3, 2, true, "donor1"),
// CrossLoci(1, 1, "f1", 3, 2, false, "donor1"),
// CrossLoci(1, 2, "f1", 3, 2, true, "donor2"),
// CrossLoci(1, 1, "f1", 3, 3, false, "donor1")
// )
//
// CrossLociTable.insertAll(data: _*)
//
// val q = Query(CrossLociTable)
//
// println(q.selectStatement)
// q.foreach(println)
// }
//
// case class CrossLoci(sessionId: Int, realisationId: Int, crossName: String, chromosomeId: Int, cMId: Int, isLeftSide: Boolean, geneName: String)
//
// object CrossLociTable extends Table[CrossLoci]("cross_loci"){
// def sessionId = column[Int]("session_id")
// def realisationId = column[Int]("realisation_id")
// def crossName = column[String]("cross_name")
// def chromosomeId = column[Int]("chromosome_id")
// def cMId = column[Int]("centimorgan_id")
// def isLeftSide = column[Boolean]("is_left_side")
// def geneName = column[String]("gene_name")
//
// def pk = primaryKey("pk", (sessionId, realisationId, chromosomeId, cMId, isLeftSide))
//
// def * = sessionId ~ realisationId ~ crossName ~ chromosomeId ~ cMId ~ isLeftSide ~ geneName <> (CrossLoci, CrossLoci.unapply _)
// }
//} | tearne/Crosser | src/main/scala/org/tearne/crosser/spike/SlickH2Test.scala | Scala | apache-2.0 | 2,052 |
package scrabble
class PosTest extends ScrabbleTest {
"a position should" should {
"be within the 15 x 15 boundary of the board" in {
val check: List[(Int, Int)] = for { i <- List.range(-10, 30); j <- List.range(-10, 30) } yield i -> j
check.foreach {
case (x, y) =>
if (x < 1 || x > 15 || y < 1 || y > 15) Pos.at(x, y) must beNone else Pos.at(x, y) must beSome
}
}
"Correctly label the X axis with a letter" in {
val gridLetters = (1 to 15) zip ('A' until 'P') toMap
Pos.allPositionsMap foreach {
case (_, Pos(x, y, coord)) =>
val let = gridLetters get x
let should beSome
let foreach {
let =>
coord must be equalTo (let.toString + y)
}
}
}
}
} | Happy0/scalascrabble | src/test/scala/PosTest.scala | Scala | gpl-2.0 | 804 |
// Solution-7.scala
// Solution to Exercise 7 in "Class Exercises"
import com.atomicscala.AtomicTest._
class SimpleTimeNamed(val hours:Int=0, val minutes:Int = 0) {
def subtract(aTime:SimpleTimeNamed):SimpleTimeNamed = {
val h = hours - aTime.hours
val m = minutes - aTime.minutes
if(h < 0) {
new SimpleTimeNamed(0, 0)
} else if(m >= 0) {
new SimpleTimeNamed(h, m)
} else {
new SimpleTimeNamed(h - 1, m + 60)
}
}
}
val namedST = new SimpleTimeNamed(hours=9)
val anotherNamedST = new SimpleTimeNamed(minutes=30)
namedST.hours is 9
namedST.minutes is 0
anotherNamedST.hours is 0
anotherNamedST.minutes is 30
/* OUTPUT_SHOULD_BE
9
0
0
30
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/27_ClassExercises/Solution-7.scala | Scala | apache-2.0 | 687 |
package de.choffmeister.microserviceutils.auth
import java.security.Key
import java.time.Instant
import akka.http.scaladsl.model.headers.{BasicHttpCredentials, HttpChallenge, OAuth2BearerToken}
import akka.http.scaladsl.model.{FormData, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{AuthenticationFailedRejection, Route}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import de.choffmeister.microserviceutils.auth.consumer.models._
import de.choffmeister.microserviceutils.auth.consumer.{AuthConsumer, AuthConsumerSettings}
import de.choffmeister.microserviceutils.auth.models.AccessToken
import de.choffmeister.microserviceutils.auth.utils.SecretGenerator
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport
import io.jsonwebtoken.SignatureAlgorithm
import io.jsonwebtoken.security.Keys
import org.scalatest.EitherValues
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import play.api.libs.json.{JsNumber, JsObject, JsString}
import scala.concurrent.duration._
class AuthProviderConsumerTest
extends AnyWordSpec
with ScalatestRouteTest
with Matchers
with EitherValues
with ScalaFutures
with PlayJsonSupport {
def prepare = {
val signKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val verifyKey = signKey
val providerSettings = AuthProviderSettings(5.minutes, signKey)
val provider = new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings) with TestAuthProvider
val consumerRealm = "test"
val consumerSettings = AuthConsumerSettings(consumerRealm, verifyKey)
val consumer = new AuthConsumer(consumerSettings)
(provider, consumer)
}
"fails for unknown grant type" in {
val (provider, _) = prepare
Post("/oauth/access_token", FormData("grant_type" -> "unknown")) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("unsupported_grant_type")
}
}
"general" should {
"verifies resource owner" in {
val (provider, _) = prepare
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user4", "password" -> "pass4", "client_id" -> "public")
) ~> Route
.seal(provider.routes) ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_grant")
}
}
"verifies client" in {
val (provider, _) = prepare
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> Route
.seal(provider.routes) ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "invalid")
) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> addCredentials(BasicHttpCredentials("invalid", "")) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
) ~> provider.routes ~> check {
responseAs[AccessTokenResponse]
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> addCredentials(BasicHttpCredentials("public", "")) ~> provider.routes ~> check {
responseAs[AccessTokenResponse]
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "private")
) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> addCredentials(BasicHttpCredentials("private", "")) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData(
"grant_type" -> "password",
"username" -> "user1",
"password" -> "pass1",
"client_id" -> "private",
"client_secret" -> "invalid"
)
) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> addCredentials(BasicHttpCredentials("private", "invalid")) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("invalid_client")
}
Post(
"/oauth/access_token",
FormData(
"grant_type" -> "password",
"username" -> "user1",
"password" -> "pass1",
"client_id" -> "private",
"client_secret" -> "private-secret"
)
) ~> provider.routes ~> check {
responseAs[AccessTokenResponse]
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1")
) ~> addCredentials(BasicHttpCredentials("private", "private-secret")) ~> provider.routes ~> check {
responseAs[AccessTokenResponse]
}
}
"respects client grant type limitations" in {
val (provider, _) = prepare
Post(
"/oauth/access_token",
FormData(
"grant_type" -> "password",
"username" -> "user1",
"password" -> "pass1",
"client_id" -> "limited-grant-types"
)
) ~> provider.routes ~> check {
status should be(StatusCodes.BadRequest)
responseAs[AccessTokenErrorResponse].error should be("unauthorized_client")
}
}
"respects client scope limitations" in {
val (provider, consumer) = prepare
Post(
"/oauth/access_token",
FormData(
"grant_type" -> "password",
"username" -> "user1",
"password" -> "pass1",
"client_id" -> "limited-scopes"
)
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
val accessToken = consumer.verifyAccessToken(res.access_token).value
(accessToken \\ "scopes").as[Set[String]] should be(Set("read"))
}
}
"includes custom claims" in {
val (provider, consumer) = prepare
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
val accessToken = consumer.verifyAccessToken(res.access_token).value
accessToken.value("team") should be(JsString("team1"))
}
}
"includes access token id" in {
val (provider, consumer) = prepare
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
val accessToken = consumer.verifyAccessToken(res.access_token).value
val accessToken2 = provider.accessTokens.find(_.accessTokenId == (accessToken \\ "jti").as[String]).get
accessToken - "jti" - "iat" - "exp" should be(accessToken2.payload)
}
}
"limits granted scopes if requested" in {
val (provider, consumer) = prepare
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.scope should be("admin read write")
val accessToken = consumer.verifyAccessToken(res.access_token).value
(accessToken \\ "scopes").as[Set[String]] should be(Set("admin", "read", "write"))
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user2", "password" -> "pass2", "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.scope should be("read write")
val accessToken = consumer.verifyAccessToken(res.access_token).value
(accessToken \\ "scopes").as[Set[String]] should be(Set("read", "write"))
}
Post(
"/oauth/access_token",
FormData(
"grant_type" -> "password",
"username" -> "user1",
"password" -> "pass1",
"client_id" -> "public",
"scope" -> "read write"
)
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.scope should be("read write")
val accessToken = consumer.verifyAccessToken(res.access_token).value
(accessToken \\ "scopes").as[Set[String]] should be(Set("read", "write"))
}
}
"normalizes scopes" in {
val key = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val providerSettings = AuthProviderSettings(5.minutes, key)
val provider =
new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings) with TestAuthProvider {
override def normalizeScopes(scopes: Set[String]): Set[String] =
if (scopes.contains("admin")) scopes + "admin2" else scopes
}
val refreshToken = Post(
"/oauth/access_token",
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.scope.split(" ").toSet should be(Set("admin", "admin2", "read", "write"))
res.refresh_token.get
}
Post(
"/oauth/access_token",
FormData("grant_type" -> "refresh_token", "refresh_token" -> refreshToken, "client_id" -> "public")
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.scope.split(" ").toSet should be(Set("admin", "admin2", "read", "write"))
}
}
"only provide refresh token when access type is offline" in {
val (provider, _) = prepare
val baseData =
FormData("grant_type" -> "password", "username" -> "user1", "password" -> "pass1", "client_id" -> "public")
Post("/oauth/access_token", baseData) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.refresh_token should not be (empty)
}
Post(
"/oauth/access_token",
FormData(baseData.fields.toMap ++ Map("access_type" -> "offline"))
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.refresh_token should not be (empty)
}
Post(
"/oauth/access_token",
FormData(baseData.fields.toMap ++ Map("access_type" -> "online"))
) ~> provider.routes ~> check {
val res = responseAs[AccessTokenResponse]
res.refresh_token should be(empty)
}
}
}
"json web token" should {
def prepare(
signKey: Key,
verifyKey: Key
): (AuthProvider[TestResourceOwner, TestClient, AccessToken], AuthConsumer) = {
val providerSettings = AuthProviderSettings(5.minutes, signKey)
val provider = new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings)
with TestAuthProvider
val consumerRealm = "test"
val consumerSettings = AuthConsumerSettings(consumerRealm, verifyKey)
val consumer = new AuthConsumer(consumerSettings)
(provider, consumer)
}
"signs and verifies tokens (with HS256, RS256, ES256)" in {
def test(algorithm: SignatureAlgorithm): Unit = {
val (signKey, verifyKey) = generateKeyPair(algorithm)
val (provider, consumer) = prepare(signKey, verifyKey)
val token = JsObject.empty
val str = provider.signAccessToken(token)
consumer.verifyAccessToken(str).value should be(token)
}
test(SignatureAlgorithm.HS256)
test(SignatureAlgorithm.RS256)
test(SignatureAlgorithm.ES256)
}
"signs and verifies tokens (for additional verification keys)" in {
val token = JsObject.empty
def test(
providerKey: Key,
providerKeyId: Option[String],
consumerKey: Key,
consumerAdditionalKeys: Map[String, Key]
): Boolean = {
val providerSettings = AuthProviderSettings(5.minutes, providerKey, providerKeyId)
val provider = new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings)
with TestAuthProvider
val consumerRealm = "test"
val consumer = new AuthConsumer(AuthConsumerSettings(consumerRealm, consumerKey, consumerAdditionalKeys))
val str = provider.signAccessToken(token)
consumer.verifyAccessToken(str).isRight
}
val keyPair1 = generateKeyPair(SignatureAlgorithm.HS256)
val keyPair2 = generateKeyPair(SignatureAlgorithm.RS256)
test(keyPair1._1, None, keyPair1._2, Map.empty) should be(true)
test(keyPair2._1, None, keyPair1._2, Map.empty) should be(false)
test(keyPair2._1, None, keyPair1._2, Map("extra" -> keyPair2._1)) should be(false)
test(keyPair2._1, Some("extra"), keyPair1._2, Map("extra" -> keyPair2._1)) should be(true)
test(keyPair1._1, Some("extra"), keyPair1._2, Map("extra" -> keyPair2._1)) should be(false)
test(keyPair1._1, Some("unknown"), keyPair1._2, Map("extra" -> keyPair2._1)) should be(true)
test(keyPair1._1, Some("unknown"), keyPair2._2, Map("extra" -> keyPair2._1)) should be(false)
test(keyPair1._1, Some("extra"), keyPair1._2, Map.empty) should be(true)
test(keyPair1._1, Some("extra"), keyPair2._2, Map.empty) should be(false)
}
"ensures valid signature" in {
val signKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val verifyKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val (provider, consumer) = prepare(signKey, verifyKey)
val token = JsObject.empty
val str = provider.signAccessToken(token)
consumer.verifyAccessToken(str) should be(Left("The token signature is invalid"))
}
"ensures not expired" in {
val signKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val verifyKey = signKey
val (provider, consumer) = prepare(signKey, verifyKey)
val now = Instant.ofEpochSecond(Instant.now.getEpochSecond)
val token1 = JsObject(Seq("exp" -> JsNumber(now.plusSeconds(60).getEpochSecond)))
val str1 = provider.signAccessToken(token1)
consumer.verifyAccessToken(str1) should be(Right(token1))
val token2 = JsObject(Seq("exp" -> JsNumber(now.minusSeconds(60).getEpochSecond)))
val str2 = provider.signAccessToken(token2)
consumer.verifyAccessToken(str2) should be(Left("The token has expired"))
}
}
"extracts access token" should {
val signKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
val verifyKey = signKey
val providerSettings = AuthProviderSettings(5.minutes, signKey)
val provider = new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings) with TestAuthProvider
val consumerRealm = "test"
val consumerSettings = AuthConsumerSettings(consumerRealm, verifyKey)
val consumer = new AuthConsumer(consumerSettings)
val token = provider.signAccessToken(JsObject.empty)
val route = consumer.extractAccessToken { _ => complete("yes") }
val routeOptional = consumer.extractAccessToken.optional {
case Some(_) => complete("yes")
case None => complete("no")
}
"reject missing token" in {
Get("/") ~> route ~> check {
rejection should be(
AuthenticationFailedRejection(
AuthenticationFailedRejection.CredentialsMissing,
HttpChallenge(
"Bearer",
"test",
Map("error" -> "missing_token", "error_description" -> "The access token is missing")
)
)
)
}
Get("/") ~> Route.seal(routeOptional) ~> check {
status should be(StatusCodes.OK)
responseAs[String] should be("no")
}
}
"reject malformed token (bearer authorization header)" in {
Get("/") ~> addCredentials(OAuth2BearerToken("malformed")) ~> route ~> check {
rejection should be(
AuthenticationFailedRejection(
AuthenticationFailedRejection.CredentialsRejected,
HttpChallenge(
"Bearer",
"test",
Map("error" -> "invalid_token", "error_description" -> "The token is malformed")
)
)
)
}
Get("/") ~> addCredentials(OAuth2BearerToken("malformed")) ~> Route.seal(routeOptional) ~> check {
status should be(StatusCodes.Unauthorized)
}
}
"reject malformed token (token query parameter)" in {
Get("/?token=malformed") ~> route ~> check {
rejection should be(
AuthenticationFailedRejection(
AuthenticationFailedRejection.CredentialsRejected,
HttpChallenge(
"Bearer",
"test",
Map("error" -> "invalid_token", "error_description" -> "The token is malformed")
)
)
)
}
Get("/?token=malformed") ~> Route.seal(routeOptional) ~> check {
status should be(StatusCodes.Unauthorized)
}
}
"accept valid token (bearer authorization header)" in {
Get("/") ~> addCredentials(OAuth2BearerToken(token)) ~> route ~> check {
status should be(StatusCodes.OK)
responseAs[String] should be("yes")
}
}
"accept valid token (token query parameter)" in {
Get("/?token=" + token) ~> route ~> check {
status should be(StatusCodes.OK)
responseAs[String] should be("yes")
}
}
}
def generateKeyPair(algorithm: SignatureAlgorithm) =
algorithm match {
case a if a == SignatureAlgorithm.HS256 =>
val key = Keys.hmacShaKeyFor(SecretGenerator.generate(32))
(key, key)
case a if a == SignatureAlgorithm.RS256 =>
val keyPair = Keys.keyPairFor(a)
(keyPair.getPrivate, keyPair.getPublic())
case a if a == SignatureAlgorithm.ES256 =>
val keyPair = Keys.keyPairFor(a)
(keyPair.getPrivate, keyPair.getPublic())
case _ =>
???
}
}
| choffmeister/microservice-utils | microservice-utils-auth/src/test/scala/de/choffmeister/microserviceutils/auth/AuthProviderConsumerTest.scala | Scala | mit | 19,510 |
package co.rc.smservice.api.routing
import akka.actor.ActorSystem
import com.typesafe.config.Config
import scala.concurrent.ExecutionContext
import spray.routing.HttpServiceActor
import co.rc.smservice.api.routing.routes.SessionRoutes
/**
* Class that defines rest service router actor
*/
class SmServiceRouter()( implicit system: ActorSystem,
executionContext: ExecutionContext,
config: Config ) extends HttpServiceActor {
/**
* Service
*/
private val sessionRoutes: SessionRoutes = new SessionRoutes()
/**
* Service router receive method
* @return Service router receive strategy
*/
override def receive: Receive = runRoute( sessionRoutes.routes )
}
| rodricifuentes1/session-manager-service | src/main/scala/co/rc/smservice/api/routing/SmServiceRouter.scala | Scala | mit | 690 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.index
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.common.exceptions.MetadataProcessException
import org.apache.carbondata.common.exceptions.sql.MalformedIndexCommandException
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class TestIndexCommand extends QueryTest with BeforeAndAfterAll {
val testData = s"$resourcesPath/sample.csv"
override def beforeAll {
sql("drop table if exists indextest")
sql("drop table if exists indexshowtest")
sql("drop table if exists uniqdata")
sql("create table indextest (a string, b string, c string) STORED AS carbondata")
}
val newClass = "org.apache.spark.sql.CarbonSource"
test("test index create: don't support using non-exist class") {
assert(intercept[MetadataProcessException] {
sql(s"CREATE INDEX index1 ON indextest (a) AS '$newClass'")
}.getMessage
.contains(
"failed to create IndexClassProvider 'org.apache.spark.sql.CarbonSource'"))
}
test("test index create with properties: don't support using non-exist class") {
assert(intercept[MetadataProcessException] {
sql(s"CREATE INDEX index2 ON indextest (a) AS '$newClass' PROPERTIES('key'='value')")
}.getMessage
.contains(
"failed to create IndexClassProvider 'org.apache.spark.sql.CarbonSource'"))
}
test("test index create with existing name: don't support using non-exist class") {
assert(intercept[MetadataProcessException] {
sql(
s"CREATE INDEX index2 ON indextest (a) AS '$newClass' PROPERTIES('key'='value')")
}.getMessage
.contains(
"failed to create IndexClassProvider 'org.apache.spark.sql.CarbonSource'"))
}
test("test show indexes with no index") {
sql("drop table if exists indexshowtest")
sql("create table indexshowtest (a string, b string, c string) STORED AS carbondata")
assert(sql("show indexes on indexshowtest").collect().length == 0)
}
test("test show indexes: show index property related information") {
val tableName = "indexshowtest"
val indexName = "bloomindex"
val indexName2 = "bloomindex2"
val indexName3 = "bloomindex3"
sql(s"drop table if exists $tableName")
// for index
sql(s"create table $tableName (a string, b string, c string) STORED AS carbondata")
sql(
s"""
| create index $indexName
| on $tableName (a)
| as 'bloomfilter'
| PROPERTIES ('bloom_size'='32000', 'bloom_fpp'='0.001')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName2
| on table $tableName (b)
| as 'bloomfilter'
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName3
| on table $tableName (c)
| as 'bloomfilter'
""".stripMargin)
val result = sql(s"show indexes on $tableName").cache()
checkAnswer(sql(s"show indexes on $tableName"),
Seq(Row(indexName, "bloomfilter", "a",
"'INDEX_COLUMNS'='a','bloom_fpp'='0.001','bloom_size'='32000'", "ENABLED", "NA"),
Row(indexName2, "bloomfilter", "b", "'INDEX_COLUMNS'='b'", "ENABLED", "NA"),
Row(indexName3, "bloomfilter", "c", "'INDEX_COLUMNS'='c'", "ENABLED", "NA")))
result.unpersist()
sql(s"drop table if exists $tableName")
}
test("test don't support lucene on binary data type") {
val tableName = "indexshowtest20"
sql(s"drop table if exists $tableName")
sql(s"CREATE TABLE $tableName(id int, name string, city string, age string, image binary)" +
s" STORED AS carbondata")
sql(s"insert into $tableName values(1,'a3','b3','c1','image2')")
sql(s"insert into $tableName values(2,'a3','b2','c2','image2')")
sql(s"insert into $tableName values(3,'a1','b2','c1','image3')")
sql(
s"""
| CREATE INDEX agg10
| ON $tableName (name)
| AS 'lucene'
| """.stripMargin)
checkAnswer(sql(s"show indexes on $tableName"),
Seq(Row("agg10", "lucene", "name", "'INDEX_COLUMNS'='name'", "ENABLED", "NA")))
val e = intercept[MalformedIndexCommandException] {
sql(
s"""
| CREATE INDEX agg1
| ON $tableName (image)
| AS 'lucene'
| """.stripMargin)
}
assert(e.getMessage.contains("Only String column is supported, column 'image' is BINARY type."))
checkAnswer(sql(s"show indexes on table $tableName"),
Seq(Row("agg10", "lucene", "name", "'INDEX_COLUMNS'='name'", "ENABLED", "NA")))
val pre = sql(
s"""
| select name,image, id
| from $tableName
| where name = 'a3'
""".stripMargin)
assert(2 == pre.collect().length)
pre.collect().foreach { each =>
assert(3 == each.length)
assert("a3".equals(each.get(0)))
assert("image2".equals(new String(each.getAs[Array[Byte]](1))))
assert(2 == each.get(2) || 1 == each.get(2))
}
sql(s"drop table if exists $tableName")
}
override def afterAll {
sql("DROP TABLE IF EXISTS maintable")
sql("drop table if exists uniqdata")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_HIVE_SCHEMA_META_STORE,
CarbonCommonConstants.ENABLE_HIVE_SCHEMA_META_STORE_DEFAULT)
sql("drop table if exists indextest")
sql("drop table if exists indexshowtest")
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/TestIndexCommand.scala | Scala | apache-2.0 | 6,321 |
package fpscala.c03
import fpscala.datastructures.{List => FpList, Nil => FpNil}
import org.scalatest.{FlatSpec, Matchers}
class Exercise09Spec extends FlatSpec with Matchers {
"length" should "work" in {
Exercise09.length(FpList(0.0, 1.0, 2.0, 3.0)) should equal (4)
Exercise09.length(FpNil) should equal (0)
Exercise09.length(FpList(1, 2)) should equal (2)
}
}
| willtaylor/fpscala | src/test/scala/fpscala/c03/Exercise09Spec.scala | Scala | gpl-3.0 | 383 |
/**
* How-To: Layouts for large Graphs using GraphX
*
* Here we implement the Force-Directed Layout. Plausible positions have do be found
* by taking aesthetic criteria into account. The algorithm was defined by
* "Fruchterman Reingold (1991)".
*
* For more details see: http://en.wikipedia.org/wiki/Force-directed_graph_drawing
*
* Draft for the BLOG-POST is in Google-Docs.
*
* https://docs.google.com/document/d/1O3XcuKVmxoM_WDNKD0wUck2K3-DfmyBryeJtMlHYVss/edit#
*/
import org.apache.spark._
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import scala.util.Random
import java.io._
import sys.process._
import java.util.Hashtable
/**
* Some inspiration for this project came from:
*
* https://github.com/foowie/Graph-Drawing-by-Force-directed-Placement
*
* here I reuse the Vector implementation. The goal was not to reinvent the
* core implementation, but to provide one for GraphX.
*/
/**
* Vector implementation for node metadata (position, displacement)
*/
class Vector(var x: Double = 0.0, var y: Double = 0.0) {
def +(operand: Vector): Vector = {
return new Vector(x + operand.x, y + operand.y)
}
def -(operand: Vector): Vector = {
return new Vector(x - operand.x, y - operand.y)
}
def *(operand: Vector): Vector = {
return new Vector(x * operand.x, y * operand.y)
}
def *(operand: Double): Vector = {
return new Vector(x * operand, y * operand)
}
def /(operand: Double): Vector = {
return new Vector(x / operand, y / operand)
}
def isNaN: Boolean = x.isNaN || y.isNaN
def set(x: Double, y: Double): Vector = {
this.x = x
this.y = y
return this
}
def clear = {
x = 0.0
y = 0.0
}
def lenght = math.sqrt(x * x + y * y)
def asString: String = {
return "x=" + x + " y=" + y + " l=" + this.lenght
}
}
//
// we define a working directory
//
val WD = "."
var REP_SCALE = 0.0001
var ATT_SCALE = 0.0001
val fileNameDebugDump = WD + "/fdl/demo-graph.debug.dump"
//
// count the iterations
//
var ci = 0
//
// the spring constant
//
var k: Double = 1.0
//
// nodes with no connection are always linked to the origin
//
val defaultNode = ("o", 0.0, 0.0, (0.0, 0.0, 0.0, 0.0) )
//
// minimal distance
//
val epsilon = 0.0001D
//
// the area to plot in
//
val width = 1000
val height = 1000
val area = width * height // area of graph
def getWidth: Double = width
def getHeight: Double = height
def getK: Double = k
val iterations = 10
//
// during graph preparation we reduce the size a bit ...
//
val percentage: Double = 1.0
// val percentage: Double = 0.125
var currentIteration = 1 // current iteration
//
// Define the initial temperature
//
var temperature = 0.1 * math.sqrt(area)
val tab: String = "\\t"
/**
* This is a handy helper to write into local files.
*
* Found it here:
* http://stackoverflow.com/questions/4604237/how-to-write-to-a-file-in-scala
*/
def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
val p = new java.io.PrintWriter(f)
try { op(p) } finally { p.close() }
}
/**
* Load the graph from a local file ...
*
* Requires a files with:
* - tab-separated edge-list
* - uses a constant link strength, as it only interprets the first two columns
* - lines starting with "#" are ignored, they contain comments
*/
def loadEdges( fn : String ) : Graph[ Any , String ] = {
val s: String = "1.0"
//val salt: Long = System.currentTimeMillis()
val salt: Long = 1
val edges: RDD[Edge[String]] =
sc.textFile( fn ).filter( l => !(l.startsWith("#")) ).sample( false, percentage, salt ).map { line =>
val fields = line.split( tab )
Edge( fields(0).toLong, fields(1).toLong, s )
}
val graph : Graph[Any, String] = Graph.fromEdges(edges, "defaultProperty")
var zE = graph.numEdges
var zV = graph.numVertices
println("num edges = " + zE);
println("num vertices = " + zV);
graph
}
/**
* We implement a force/directed layout. It uses an approach called "simmulated annealing".
* This is the functin to "cool the graph" during each iteration.
*/
def cool(iteration: Int) = {
temperature = (1.0 - (iteration.toDouble / iterations)) * 0.1 * math.sqrt(area);
}
/**
* Simple inspection of a graph ...
*/
def inspect( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] ) = {
val f: RDD[String] = g.triplets.map(triplet => triplet.srcAttr._1 + " (" + triplet.srcAttr._2 + "," + triplet.srcAttr._3 + "), linkTo( " + triplet.dstAttr._1 + " )=" + triplet.attr )
f.collect.foreach(println(_))
}
/**
* Dump the graph as simple edgelist into the local file system.
*
* This works only for small graphs. In case of larger graphs we can not use the
*
* all. c o l l e c t .foreach(p.println(_))
*
* function. Here we will use graph output formats or scalable GraphWriters.
*
*/
def dump( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ], fn: String ) = {
println( "!!! WARNING !!! => dump() WORKS FOR SMALL GRAPHS ONLY. It uses the RDD.collect() function. " )
val f: RDD[String] = g.triplets.map(triplet => triplet.srcAttr._1 + tab + triplet.dstAttr._1 + tab + triplet.attr )
val header: RDD[String] = sc.parallelize( Array("Source" + tab + "Target" + tab + "Strength") )
val all: RDD[String] = header.union( f )
printToFile(new File( fn + ".triples.csv" )) {
p => all.collect.foreach(p.println(_))
}
println( "### Dumplocation : " + fn + ".triples.csv" )
}
/**
* Dump the graph with layout information into the local file system.
*
* This works only for small graphs. In case of larger graphs we can not use the
*
* allN. c o l l e c t .foreach(p.println(_))
*
* function. Here we will use graph output formats or scalable GraphWriters.
*
* The pref shuld be "#" if guplot or other tools should read the file.
* To export the data to Gephi, we MUST remove the "#" from the first line.
*
*/
def dumpWithLayout( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ], fn: String, pref: String ) = {
println( "!!! WARNING !!! => dumpWithLayout() WORKS FOR SMALL GRAPHS ONLY. It uses the RDD.collect() function. " )
val fnEdgelist = fn + "_" + ci + "_EL.csv"
val fnNodelist = fn + "_" + ci + "_NL.csv"
val headerEL: RDD[String] = sc.parallelize( Array( pref + "Source" + tab + "Target" + tab + "Strength") )
val headerNL: RDD[String] = sc.parallelize( Array( pref + "Id" + tab + "X" + tab + "Y" + "dX" + tab + "dY" + tab + "k" + tab + "m") )
//
// TODO: Print also k and module of the node ...
val e: RDD[String] = g.triplets.map(triplet => triplet.srcAttr._1 + tab + triplet.dstAttr._1 + tab + triplet.attr )
val n: RDD[String] = g.vertices.map(v => v._2._1 + tab + v._2._2 + tab + v._2._3 + tab + v._2._4._1 + tab + v._2._4._2 + tab + 0 + tab + 0 )
val allE: RDD[String] = headerEL.union( e )
val allN: RDD[String] = headerNL.union( n )
printToFile(new File( fnNodelist )) {
p => allN.collect.foreach(p.println(_))
}
printToFile(new File( fnEdgelist )) {
p => allE.collect.foreach(p.println(_))
}
}
/**
*
* If the graph has not the right schema, we convert it.
* This layouter needs a well defined set of properties
* for each node and each link.
*
*/
def convert( g: Graph[ Any, String ] ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
val transformedShuffledNodes: RDD[(VertexId, (String, Double, Double, (Double,Double,Double,Double)))] = g.vertices.map { v =>
val random = new scala.util.Random
( v._1, ( v._1.toString , random.nextDouble * getWidth, random.nextDouble * getHeight, (0.0,0.0,0.0,0.0) ) )
}
val transformedEdges: RDD[Edge[Double]] = g.edges.map( e => Edge( e.srcId, e.dstId, e.attr.toDouble ) )
val graphN = Graph(transformedShuffledNodes, transformedEdges, defaultNode)
graphN
}
/**
*
* Here we simply shuffle the vertex position. New koordinates will be random numbers
* in the range 0.0 to with for x and 0 to hight of the plotting area for y.
*/
def shuffle( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
val shuffledNodes: RDD[(VertexId, (String, Double, Double, (Double,Double,Double,Double)))] = g.vertices.map { v =>
val random = new scala.util.Random
( v._1, ( v._2._1, random.nextDouble * getWidth, random.nextDouble * getHeight, v._2._4 ) )
}
val graphN = Graph(shuffledNodes, g.edges, defaultNode)
graphN
}
/**
* Return value when is between range (min, max) or min/max
*/
def between(min: Double, value: Double, max: Double): Double = {
if(value < min)
return min
if(value > max)
return max
return value
}
/**
*
* a is the graphs VertexRDD with: (label, x, y, (md1, md2, md3, md4)) properties
*
* The final update step is done once per iteration for each vertex. The metadata md1,
* md2, md3, and md4 is the total delta of the position of the vertex per iteration.
* Both contributions are combined here and the metadata is reset to zero for the next
* layout step.
*
* This operation is called on all nodes once per iteration.
*/
def updatePos(a: (String, Double, Double, (Double,Double,Double,Double))) : (String, Double, Double,(Double,Double,Double,Double)) = {
// (x,y) is the new position
val x = between( 0, a._2 + a._4._1, width)
val y = between( 0, a._3 + a._4._2, height)
// md3 and md4 are currently not used here.
(a._1, x, y, (0.0,0.0,0.0,0.0))
}
/**
*
* a is the graphs VertexRDD with: (label, x, y, (md1, md2, md3, md4)) properties
* b is the displacement which is collected as md1 and md2
*
* md3 and md4 are not used here. Later they can contain information about additional
* layout influencers, e.g. module-id or attractor location.
*
* A preUpdate step is done for each influencing factor. Metadata and current position
* are merged in a final update step.
*/
def preUpdatePos(a: (String, Double, Double, (Double,Double,Double,Double)), b: (Double, Double)) : (String, Double, Double, (Double,Double,Double,Double)) = {
// a - existing position
// b - update to the current position
(a._1, a._2, a._3, (a._4._1 + b._1, a._4._2 + b._2,0.0,0.0))
}
/**
*
*
*
*/
def repulsionForce(mp: ((Double,Double),(Double,Double)) ) : (Double,Double) = {
val v1 = new Vector( mp._1._1, mp._1._2 )
val v2 = new Vector( mp._2._1, mp._2._2 )
val delta = v1 - v2
var deltaLength = math.max(epsilon, delta.lenght)
val force = k * k / deltaLength
val disp = delta * force / deltaLength
(disp.x, disp.y)
}
/**
*
*
*
*/
def attractionForce(mp: ((Double,Double),(Double,Double)) ) : (Double,Double) = {
val v1 = new Vector( mp._1._1, mp._1._2 )
val v2 = new Vector( mp._2._1, mp._2._2 )
val delta = v1 - v2
val deltaLength = math.max(epsilon, delta.lenght) // avoid x/0
val force = deltaLength * deltaLength / k
val disp = delta * force * ATT_SCALE / deltaLength
(disp.x, disp.y)
}
def attractionForceInverted(mp: ((Double,Double),(Double,Double)) ) : (Double,Double) = {
val v1 = new Vector( mp._1._1, mp._1._2 )
val v2 = new Vector( mp._2._1, mp._2._2 )
val delta = v1 - v2
val deltaLength = math.max(epsilon, delta.lenght) // avoid x/0
val force = deltaLength * deltaLength / k
val disp = delta * ( -1.0 * force * ATT_SCALE) / deltaLength
(disp.x, disp.y)
}
/**
* Calc Repulsion Force for all pairs of Vertexes in the cluster ... no PREGEL
*/
def calcRepulsion( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
//
//
// THIS MUST BE CHANGED !!!
//
// val repV: VertexRDD[(Double, Double)] = g.mapReduceTriplets[(Double, Double)](
//
// triplet => {
// Iterator( (triplet.dstId, ( triplet.srcAttr._2, triplet.srcAttr._3)) )
// },
// (m1, m2) => repulsionForce( (m1,m2) ) // Reduce Function for all contributions from all neighbors
// ) // repulsionV is now the VertexRDD
//
//
// val disp: VertexRDD[(Double, Double)] = repV.aggregateUsingIndex(repV, (p1, p2) => ( p1._1 + p2._1 , p1._2 + p2._2 ))
//
// val setC: VertexRDD[(String, Double, Double, (Double,Double,Double,Double))] = g.vertices.innerJoin(disp)( (id, a, b) => preUpdatePos(a,b) )
//
// def cartesian[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[((VertexId, VD), U)]
// all pairs
// val pairsV: VertexRDD[(Double, Double)] = g.vertices.cartesian
// map over all pairs and calc force, which consists of two components, in opposite
// direction. Since cartesian contains all pairs we can use the "ID-Orientation" to
// define the orientation of the force component. In case of ID_a > ID_b we use +
// otherwise - as sign.
// This means we emit: ID, forceOnID( ID, ID2 ) in order to calc the total
// contribution to ID from all nodes.
// Finally, build a new Graph.
// val graphN = Graph(setC, g.edges, defaultNode)
// graphN
g
}
/**
* Calc Attraction Force for linked pairs of Vertexes in the cluster sing PREGEL
*/
def calcAttraction( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
/*
val attr1: VertexRDD[(Double, Double)] = g.mapReduceTriplets[(Double, Double)](
triplet => {
Iterator( (triplet.dstId, ( triplet.srcAttr._2, triplet.srcAttr._3)) )
},
(m1, m2) => attractionForce( (m1,m2) ) // Reduce Function for all contributions from all neighbors
) // attraction to one component is now the VertexRDD
val attr2: VertexRDD[(Double, Double)] = g.mapReduceTriplets[(Double, Double)](
triplet => {
Iterator( (triplet.srcId, ( triplet.dstAttr._2, triplet.dstAttr._3)) )
},
(m1, m2) => attractionForceInverted( (m1,m2) ) // Reduce Function for all contributions from all neighbors
) // inverted attraction to other component is now the VertexRDD
*/
// val disp1: VertexRDD[(Double, Double)] = attr1.aggregateUsingIndex(attr1, (p1, p2) => ( p1._1 + p2._1 , p1._2 + p2._2 ))
// val disp2: VertexRDD[(Double, Double)] = attr2.aggregateUsingIndex(attr2, (p1, p2) => ( p1._1 + p2._1 , p1._2 + p2._2 ))
// val setC: VertexRDD[(String, Double, Double, (Double,Double,Double,Double))] = g.vertices.innerJoin(disp1)( (id, a, b) => preUpdatePos(a,b) )
// val g2 = Graph(setC, g.edges, defaultNode)
// val setD: VertexRDD[(String, Double, Double, (Double,Double,Double,Double))] = g2.vertices.innerJoin(disp2)( (id, a, b) => preUpdatePos(a,b) )
// val graphN = Graph(setD, g.edges, defaultNode)
// graphN
g
}
/**
* The Fruchetman Reingold Layout is calculated with n = 10 iterations.
*
*
*/
//def layoutFDFR2( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
// ci = 0
// g.cache()
// println( "> Start the Layout procedure: n=" + iterations + " (nr of iterations)." )
// var gs = shuffle( g )
// println( "> Shuffled the graph." )
// temperature = 0.1 * math.sqrt(area) // current temperature
// for(iteration <- 1 to iterations) {
// ci = iteration
// println( "> Temperature: (T=" + temperature + ")" )
// Repulsion is usually for all pairs of vertexes if they are different vertexs ...
// But for simplification we use only the neighbors.
// val gRep = calcRepulsion( gs )
// Attraction is along the links only
// val gAttr = calcAttraction( gRep )
// WE CAN DEBUG EACH STEP
// dumpWithLayout( gAttr, fileNameDebugDump, "#" )
// Repulsion and Attraction are in super position as they are overlaing forces
// def mapValues[VD2](map: (VertexId, VD) => VD2): VertexRDD[VD2]
// val vNewPositions = gAttr.vertices.mapValues( (id, v) => updatePos( v ) )
// gs = Graph(vNewPositions, g.edges, defaultNode)
// cool
// cool(iteration)
// }
// gs // this is the last state of our layout
// g
//}
/**
* The Fruchterman Reingold Layout is calculated with n = 10 iterations.
*
*
*/
def layoutFDFRLocally( g: Graph[ (String, Double, Double, (Double,Double,Double,Double)),Double ],
i: Integer,
fn: String ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
println( "### FOLDER: " + fn )
val file = new File( fn )
file.mkdirs()
println( "### " + file.exists )
println( ">>> Layout : layoutFDFRLocally() ..." )
val displacements = new java.util.Hashtable[String,Vector]()
val verts = new java.util.Hashtable[Long,(Long,(String, Double, Double, (Double,Double,Double,Double)))]()
println( "> Start the Layout procedure: n=" + i + " (nr of iterations)." )
// var gs = shuffle( g )
var gs = g
// println( "> Shuffled the graph g into gs." )
val edges = gs.edges
val vertices= gs.vertices
var eA = edges.collect
var vA = vertices.collect
temperature = 0.1 * math.sqrt(area) // current temperature
for(iteration <- 1 to i) {
ci = iteration
println( "> Temperature: (T=" + temperature + ")" )
// Repulsion is calculated for all pairs of vertexes if they are
// different vertexes with different IDs ...
/***
* FOR THE PARALLEL VERSION WE USE THIS:
*
* val gRep = calcRepulsion( vertices )
*
*/
for(i <- 0 until vA.length){
// set vertex.disp to zero
var vertex_disp = new Vector( 0.0, 0.0 )
val P1 = vA(i)
var contribs = 0
for(j <- 0 until vA.length){
println("i'th element is: " + P1 + " >>> " + P1.getClass + " >>>> " + P1._2.getClass );
if ( i != j ) {
contribs = contribs + 1
val P2 = vA(j)
//println("j'th element is: (VERTEX) " + p2);
val disp = repulsionForce( ( (P1._2._2,P1._2._3),(P2._2._2,P2._2._3)) )
// increase vertice.disp by disp
vertex_disp = vertex_disp + new Vector( REP_SCALE * disp._1, REP_SCALE * disp._2 )
println(" => F_REP( " + i + " " + j + " ): " + disp)
}
}
println(" => F_REP_total( " + i + " ): {" + contribs + "} <" + P1 + "> " + vertex_disp.asString)
val nodeDispl = displacements.get( ""+P1._1 );
if ( nodeDispl == null ) {
displacements.put( ""+P1._1, vertex_disp );
verts.put( P1._1, P1 )
}
}
//
// Attraction is along the links only, so we iterate on the linklist
//
// val gAttr = calcAttraction( edges )
for(i <- 0 until eA.length){
val p1 = eA(i).srcId
val p2 = eA(i).dstId
val P1 = verts.get( p1 )
val P2 = verts.get( p2 )
val coords = ( (P1._2._2,P1._2._3) ,(P2._2._2,P2._2._3) )
val f1 = attractionForce( coords )
val f2 = attractionForceInverted( coords )
val vertex_dispF1 = new Vector( f1._1, f1._2 )
val vertex_dispF2 = new Vector( f2._1, f2._2 )
println("i'th element is: (EDGE) " + eA(i));
println(" => FORCE 1: ... " + f1);
println(" => FORCE 2: ... " + f2);
val nodeDispl1 = displacements.get( ""+P1._1 );
val nodeDispl2 = displacements.get( ""+P2._1 );
if ( nodeDispl1 == null ) {
displacements.put( ""+P1._1, vertex_dispF1 );
}
else {
displacements.put( ""+P1._1, nodeDispl1 + vertex_dispF1 );
}
if ( nodeDispl2 == null ) {
displacements.put( ""+P2._1, vertex_dispF2 );
}
else {
displacements.put( ""+P2._1, nodeDispl2 + vertex_dispF2 );
}
}
for( i <- 0 until vA.length ){
var p1 = vA(i)
val f1 = displacements.get( ""+p1._1 )
println ( "MOVE: " + p1 + " by " + f1.asString );
// println ( " : " + p1.getClass + " *** " + vA.getClass);
val nP = (p1._1,(p1._2._1, p1._2._2 + f1.x, p1._2._3 + f1.y, (0.0,0.0,0.0,0.0)))
vA(i) = nP;
}
// OPTIONALLY WE CAN DEBUG EACH STEP
// dumpWithLayout( gAttr, fileNameDebugDump, "#" )
// Repulsion and Attraction are in super position
// so lets simply add all contributions
// cool the system and go on to next step ...
cool(iteration)
}
///// WRITE THE final layout
val bwN = new BufferedWriter( new FileWriter( fn + "/nodes.csv" ) )
println( vA )
bwN.write( "label" + "\\t" + "posX" + "\\t" + "posY" + "\\n" )
for(i <- 0 until vA.length){
println("i'th element is: (VERTEX) " + vA(i));
val n = vA(i)
bwN.write( n._2._1 + "\\t" + n._2._2 + "\\t" + n._2._3 + "\\n" )
}
bwN.close()
val bwL = new BufferedWriter( new FileWriter( fn + "/links.csv" ) )
println( eA );
bwL.write( "SOURCE" + "\\t" + "TARGET" + "\\t" + "WEIGHT" + "\\n" )
for(i <- 0 until eA.length){
println("i'th element is: (EDGE) " + eA(i));
val e = eA(i)
bwL.write( e.srcId + "\\t" + e.dstId + "\\t1.0\\n" )
}
bwL.close()
println( "> Final Temperature: (T=" + temperature + ")" )
// finally, we create another graph since we want to continue in Spark ...
// gs = Graph(vNewPositions, g.edges, defaultNode)
g // this is the last state of our layout
}
/**
*
* We create a graph from two layers. One is the stationary link layer and one is the
* functional layer, calculated as an "Edit-Activity Correlation" network.
*
* Now we calculate the layout for both link sets based on the sames vertex set.
* The difference of the locations gives us information if the process is "alligned" with the links
* or if the functional network differes fundamentally from the underlying structural network.
*
*/
def createDemoGraph( gn: String ) : Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double ] = {
// name, x, y, f.x, f.y, att1,att2
val nodes: RDD[(VertexId, ( String, Double, Double, (Double,Double,Double,Double)))] =
sc.parallelize(Array((1L, ("a", 180.0, 50.0, (0.0 ,0.0 ,0.0 ,0.0 ))),
(2L, ("b", 600.0, 100.0, (0.0 ,0.0 ,0.0 ,0.0 ))),
(3L, ("c", 30.0, 130.0, (0.0 ,0.0 ,0.0 ,0.0 ))),
(4L, ("d", 130.0, 830.0, (0.0 ,0.0 ,0.0 ,0.0 ))),
(5L, ("e", 400.0, 400.0, (0.0 ,0.0 ,0.0 ,0.0 )))))
val statLink: RDD[Edge[Double]] =
sc.parallelize(Array(Edge(1L, 2L, 1.0),
Edge(1L, 3L, 1.0),
Edge(2L, 3L, 1.0),
Edge(3L, 5L, 1.0),
Edge(4L, 1L, 1.0),
Edge(4L, 2L, 1.0),
Edge(4L, 3L, 1.0),
Edge(5L, 4L, 1.0)))
//val functLink: RDD[Edge[Double]] =
// sc.parallelize(Array(Edge(3L, 7L, 1.0), Edge(5L, 3L, 1.0),
// Edge(3L, 5L, 0.5), Edge(5L, 2L, 0.15),
// Edge(3L, 2L, 0.1), Edge(5L, 7L, 0.25),
// Edge(2L, 5L, 0.2), Edge(7L, 5L, 0.25),
// Edge(2L, 7L, 0.3), Edge(2L, 3L, 0.15)))
//
//var graphF: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double] = Graph(nodes, functLink, defaultNode)
val defaultNode = ("O", 0.0, 0.0, (0.0, 0.0, 0.0, 0.0) )
var graphS: Graph[ (String, Double, Double, (Double,Double,Double,Double)), Double] = Graph(nodes, statLink, defaultNode)
graphS
}
/**
*
* This Snippet is an implementation of the Fruchterman-Reingold Force Directed Graph Layout Algorithm.
*
* Layout calculations for large graphs is still a problem. Spark allows us local processing and simple scaling
* if a cluster is available.
*
* A force directed layout represents to some extend a physical reality of the system. Maybe the restriction to a
* two dimensional plane is not optimal. But as a first step we use the static link structure of LNNs to calculate
* the layout. Functional links ar then plotted into this layout.
*
* We investigate now the dependency between distance in the 'static layout' with the link strength of the 'functional network'.
* Furthermore one can calculate the distance between the nodes in both layouts. It the ration of both distances is 1, they are
* not. different. If the distances are very different, one can find node pairs, for which a longer distance exist and such with
* a shorter distance. Machine learning algorithms can such link properties use for classification models. It would be interesting
* to find out if this measure has an influence on page sepparation or re linking.
*
* In this work we measure the force on nodes comming from functional links. The functional-displacement of a node is calculated as the
* difference between its location calculated in the static link network and the location found in the functional network.
*
* For a simple interpretation we use the absolute displacement. But one can also think about a vector representation or a complex number
* which also contains an angle.
*
*/
/**
*
* Load the Wiki-Talk Graph
*
*/
//val fileName = "blog/wiki-Talk.txt"
//val fileNameDump = WD + "/fdl/wiki-talk-graph.dump"
//val graphS = loadEdges( fileName )
//val cGraphS = convert( graphS )
/**
* Create the DEMO-Graph
*
* simple:
*
*/
val gn = "simple"
val cGraphS = createDemoGraph( gn )
val fileNameDump = WD + "/fdl/demo-graph" + gn + ".dump"
//
// Just to be sure what was loaded ...
//
dump( cGraphS, fileNameDump + gn + ".PREP" )
// dumpWithLayout( cGraphS, fileNameDump + gn + ".PREP", "#" )
val sizeOfGraph = cGraphS.vertices.count()
println( "> Size of the graph : " + sizeOfGraph + " nodes." )
//
// define the spring constant
//
k = 0.8 * math.sqrt(area / sizeOfGraph) // force constant
REP_SCALE = 0.0001
ATT_SCALE = 0.00001
println( "> Area : " + area )
println( "> Spring constant : " + k + " a.u." )
println( "> Graph data and area were prepared sucessfully." )
println( "> Ready to do a layout." )
val gLSL = layoutFDFRLocally( cGraphS, 0 , "/GITHUB/ETOSHA.WS/tmp/original_" + gn )
val gLSL = layoutFDFRLocally( cGraphS, 500 , "/GITHUB/ETOSHA.WS/tmp/fdl_" + gn )
// val gLS = layoutFDFR2( cGraphS )
dumpWithLayout( gLSL, fileNameDump , "" )
println( "> DONE!" )
println( "> Created EDGE list: " + fileNameDump )
// val result1 = "gnuplot ./blog/fdlplot.gnuplot" !!
val result2 = "gnuplot ./blog/fdlplot-local.gnuplot" !!
println(result2)
| kamir/graphx-layouts | src/blog/fdlayout2.scala | Scala | apache-2.0 | 26,980 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO2
package com.google.protobuf.descriptor
import _root_.scalapb.internal.compat.JavaConverters._
/** Describes a complete .proto file.
*
* @param name
* file name, relative to root of source tree
* @param package
* e.g. "foo", "foo.bar", etc.
* @param dependency
* Names of files imported by this file.
* @param publicDependency
* Indexes of the public imported files in the dependency list above.
* @param weakDependency
* Indexes of the weak imported files in the dependency list.
* For Google-internal migration only. Do not use.
* @param messageType
* All top-level definitions in this file.
* @param sourceCodeInfo
* This field contains optional information about the original source code.
* You may safely remove this entire field without harming runtime
* functionality of the descriptors -- the information is needed only by
* development tools.
* @param syntax
* The syntax of the proto file.
* The supported values are "proto2" and "proto3".
*/
@SerialVersionUID(0L)
final case class FileDescriptorProto(
name: _root_.scala.Option[_root_.scala.Predef.String] = _root_.scala.None,
`package`: _root_.scala.Option[_root_.scala.Predef.String] = _root_.scala.None,
dependency: _root_.scala.Seq[_root_.scala.Predef.String] = _root_.scala.Seq.empty,
publicDependency: _root_.scala.Seq[_root_.scala.Int] = _root_.scala.Seq.empty,
weakDependency: _root_.scala.Seq[_root_.scala.Int] = _root_.scala.Seq.empty,
messageType: _root_.scala.Seq[com.google.protobuf.descriptor.DescriptorProto] = _root_.scala.Seq.empty,
enumType: _root_.scala.Seq[com.google.protobuf.descriptor.EnumDescriptorProto] = _root_.scala.Seq.empty,
service: _root_.scala.Seq[com.google.protobuf.descriptor.ServiceDescriptorProto] = _root_.scala.Seq.empty,
extension: _root_.scala.Seq[com.google.protobuf.descriptor.FieldDescriptorProto] = _root_.scala.Seq.empty,
options: _root_.scala.Option[com.google.protobuf.descriptor.FileOptions] = _root_.scala.None,
sourceCodeInfo: _root_.scala.Option[com.google.protobuf.descriptor.SourceCodeInfo] = _root_.scala.None,
syntax: _root_.scala.Option[_root_.scala.Predef.String] = _root_.scala.None,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[FileDescriptorProto] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
if (name.isDefined) {
val __value = name.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(1, __value)
};
if (`package`.isDefined) {
val __value = `package`.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(2, __value)
};
dependency.foreach { __item =>
val __value = __item
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(3, __value)
}
publicDependency.foreach { __item =>
val __value = __item
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(10, __value)
}
weakDependency.foreach { __item =>
val __value = __item
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(11, __value)
}
messageType.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
enumType.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
service.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
extension.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
if (options.isDefined) {
val __value = options.get
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
};
if (sourceCodeInfo.isDefined) {
val __value = sourceCodeInfo.get
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
};
if (syntax.isDefined) {
val __value = syntax.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(12, __value)
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
name.foreach { __v =>
val __m = __v
_output__.writeString(1, __m)
};
`package`.foreach { __v =>
val __m = __v
_output__.writeString(2, __m)
};
dependency.foreach { __v =>
val __m = __v
_output__.writeString(3, __m)
};
messageType.foreach { __v =>
val __m = __v
_output__.writeTag(4, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
enumType.foreach { __v =>
val __m = __v
_output__.writeTag(5, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
service.foreach { __v =>
val __m = __v
_output__.writeTag(6, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
extension.foreach { __v =>
val __m = __v
_output__.writeTag(7, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
options.foreach { __v =>
val __m = __v
_output__.writeTag(8, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
sourceCodeInfo.foreach { __v =>
val __m = __v
_output__.writeTag(9, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
publicDependency.foreach { __v =>
val __m = __v
_output__.writeInt32(10, __m)
};
weakDependency.foreach { __v =>
val __m = __v
_output__.writeInt32(11, __m)
};
syntax.foreach { __v =>
val __m = __v
_output__.writeString(12, __m)
};
unknownFields.writeTo(_output__)
}
def getName: _root_.scala.Predef.String = name.getOrElse("")
def clearName: FileDescriptorProto = copy(name = _root_.scala.None)
def withName(__v: _root_.scala.Predef.String): FileDescriptorProto = copy(name = Option(__v))
def getPackage: _root_.scala.Predef.String = `package`.getOrElse("")
def clearPackage: FileDescriptorProto = copy(`package` = _root_.scala.None)
def withPackage(__v: _root_.scala.Predef.String): FileDescriptorProto = copy(`package` = Option(__v))
def clearDependency = copy(dependency = _root_.scala.Seq.empty)
def addDependency(__vs: _root_.scala.Predef.String*): FileDescriptorProto = addAllDependency(__vs)
def addAllDependency(__vs: Iterable[_root_.scala.Predef.String]): FileDescriptorProto = copy(dependency = dependency ++ __vs)
def withDependency(__v: _root_.scala.Seq[_root_.scala.Predef.String]): FileDescriptorProto = copy(dependency = __v)
def clearPublicDependency = copy(publicDependency = _root_.scala.Seq.empty)
def addPublicDependency(__vs: _root_.scala.Int*): FileDescriptorProto = addAllPublicDependency(__vs)
def addAllPublicDependency(__vs: Iterable[_root_.scala.Int]): FileDescriptorProto = copy(publicDependency = publicDependency ++ __vs)
def withPublicDependency(__v: _root_.scala.Seq[_root_.scala.Int]): FileDescriptorProto = copy(publicDependency = __v)
def clearWeakDependency = copy(weakDependency = _root_.scala.Seq.empty)
def addWeakDependency(__vs: _root_.scala.Int*): FileDescriptorProto = addAllWeakDependency(__vs)
def addAllWeakDependency(__vs: Iterable[_root_.scala.Int]): FileDescriptorProto = copy(weakDependency = weakDependency ++ __vs)
def withWeakDependency(__v: _root_.scala.Seq[_root_.scala.Int]): FileDescriptorProto = copy(weakDependency = __v)
def clearMessageType = copy(messageType = _root_.scala.Seq.empty)
def addMessageType(__vs: com.google.protobuf.descriptor.DescriptorProto*): FileDescriptorProto = addAllMessageType(__vs)
def addAllMessageType(__vs: Iterable[com.google.protobuf.descriptor.DescriptorProto]): FileDescriptorProto = copy(messageType = messageType ++ __vs)
def withMessageType(__v: _root_.scala.Seq[com.google.protobuf.descriptor.DescriptorProto]): FileDescriptorProto = copy(messageType = __v)
def clearEnumType = copy(enumType = _root_.scala.Seq.empty)
def addEnumType(__vs: com.google.protobuf.descriptor.EnumDescriptorProto*): FileDescriptorProto = addAllEnumType(__vs)
def addAllEnumType(__vs: Iterable[com.google.protobuf.descriptor.EnumDescriptorProto]): FileDescriptorProto = copy(enumType = enumType ++ __vs)
def withEnumType(__v: _root_.scala.Seq[com.google.protobuf.descriptor.EnumDescriptorProto]): FileDescriptorProto = copy(enumType = __v)
def clearService = copy(service = _root_.scala.Seq.empty)
def addService(__vs: com.google.protobuf.descriptor.ServiceDescriptorProto*): FileDescriptorProto = addAllService(__vs)
def addAllService(__vs: Iterable[com.google.protobuf.descriptor.ServiceDescriptorProto]): FileDescriptorProto = copy(service = service ++ __vs)
def withService(__v: _root_.scala.Seq[com.google.protobuf.descriptor.ServiceDescriptorProto]): FileDescriptorProto = copy(service = __v)
def clearExtension = copy(extension = _root_.scala.Seq.empty)
def addExtension(__vs: com.google.protobuf.descriptor.FieldDescriptorProto*): FileDescriptorProto = addAllExtension(__vs)
def addAllExtension(__vs: Iterable[com.google.protobuf.descriptor.FieldDescriptorProto]): FileDescriptorProto = copy(extension = extension ++ __vs)
def withExtension(__v: _root_.scala.Seq[com.google.protobuf.descriptor.FieldDescriptorProto]): FileDescriptorProto = copy(extension = __v)
def getOptions: com.google.protobuf.descriptor.FileOptions = options.getOrElse(com.google.protobuf.descriptor.FileOptions.defaultInstance)
def clearOptions: FileDescriptorProto = copy(options = _root_.scala.None)
def withOptions(__v: com.google.protobuf.descriptor.FileOptions): FileDescriptorProto = copy(options = Option(__v))
def getSourceCodeInfo: com.google.protobuf.descriptor.SourceCodeInfo = sourceCodeInfo.getOrElse(com.google.protobuf.descriptor.SourceCodeInfo.defaultInstance)
def clearSourceCodeInfo: FileDescriptorProto = copy(sourceCodeInfo = _root_.scala.None)
def withSourceCodeInfo(__v: com.google.protobuf.descriptor.SourceCodeInfo): FileDescriptorProto = copy(sourceCodeInfo = Option(__v))
def getSyntax: _root_.scala.Predef.String = syntax.getOrElse("")
def clearSyntax: FileDescriptorProto = copy(syntax = _root_.scala.None)
def withSyntax(__v: _root_.scala.Predef.String): FileDescriptorProto = copy(syntax = Option(__v))
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => name.orNull
case 2 => `package`.orNull
case 3 => dependency
case 10 => publicDependency
case 11 => weakDependency
case 4 => messageType
case 5 => enumType
case 6 => service
case 7 => extension
case 8 => options.orNull
case 9 => sourceCodeInfo.orNull
case 12 => syntax.orNull
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => name.map(_root_.scalapb.descriptors.PString).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 2 => `package`.map(_root_.scalapb.descriptors.PString).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 3 => _root_.scalapb.descriptors.PRepeated(dependency.iterator.map(_root_.scalapb.descriptors.PString).toVector)
case 10 => _root_.scalapb.descriptors.PRepeated(publicDependency.iterator.map(_root_.scalapb.descriptors.PInt).toVector)
case 11 => _root_.scalapb.descriptors.PRepeated(weakDependency.iterator.map(_root_.scalapb.descriptors.PInt).toVector)
case 4 => _root_.scalapb.descriptors.PRepeated(messageType.iterator.map(_.toPMessage).toVector)
case 5 => _root_.scalapb.descriptors.PRepeated(enumType.iterator.map(_.toPMessage).toVector)
case 6 => _root_.scalapb.descriptors.PRepeated(service.iterator.map(_.toPMessage).toVector)
case 7 => _root_.scalapb.descriptors.PRepeated(extension.iterator.map(_.toPMessage).toVector)
case 8 => options.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 9 => sourceCodeInfo.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 12 => syntax.map(_root_.scalapb.descriptors.PString).getOrElse(_root_.scalapb.descriptors.PEmpty)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.FileDescriptorProto
}
object FileDescriptorProto extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.FileDescriptorProto] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.FileDescriptorProto, com.google.protobuf.DescriptorProtos.FileDescriptorProto] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.FileDescriptorProto] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.FileDescriptorProto, com.google.protobuf.DescriptorProtos.FileDescriptorProto] = this
def toJavaProto(scalaPbSource: com.google.protobuf.descriptor.FileDescriptorProto): com.google.protobuf.DescriptorProtos.FileDescriptorProto = {
val javaPbOut = com.google.protobuf.DescriptorProtos.FileDescriptorProto.newBuilder
scalaPbSource.name.foreach(javaPbOut.setName)
scalaPbSource.`package`.foreach(javaPbOut.setPackage)
javaPbOut.addAllDependency(scalaPbSource.dependency.asJava)
javaPbOut.addAllPublicDependency(_root_.scalapb.internal.compat.toIterable(scalaPbSource.publicDependency.iterator.map(_root_.scala.Int.box)).asJava)
javaPbOut.addAllWeakDependency(_root_.scalapb.internal.compat.toIterable(scalaPbSource.weakDependency.iterator.map(_root_.scala.Int.box)).asJava)
javaPbOut.addAllMessageType(_root_.scalapb.internal.compat.toIterable(scalaPbSource.messageType.iterator.map(com.google.protobuf.descriptor.DescriptorProto.toJavaProto)).asJava)
javaPbOut.addAllEnumType(_root_.scalapb.internal.compat.toIterable(scalaPbSource.enumType.iterator.map(com.google.protobuf.descriptor.EnumDescriptorProto.toJavaProto)).asJava)
javaPbOut.addAllService(_root_.scalapb.internal.compat.toIterable(scalaPbSource.service.iterator.map(com.google.protobuf.descriptor.ServiceDescriptorProto.toJavaProto)).asJava)
javaPbOut.addAllExtension(_root_.scalapb.internal.compat.toIterable(scalaPbSource.extension.iterator.map(com.google.protobuf.descriptor.FieldDescriptorProto.toJavaProto)).asJava)
scalaPbSource.options.map(com.google.protobuf.descriptor.FileOptions.toJavaProto).foreach(javaPbOut.setOptions)
scalaPbSource.sourceCodeInfo.map(com.google.protobuf.descriptor.SourceCodeInfo.toJavaProto).foreach(javaPbOut.setSourceCodeInfo)
scalaPbSource.syntax.foreach(javaPbOut.setSyntax)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.DescriptorProtos.FileDescriptorProto): com.google.protobuf.descriptor.FileDescriptorProto = com.google.protobuf.descriptor.FileDescriptorProto(
name = if (javaPbSource.hasName) Some(javaPbSource.getName) else _root_.scala.None,
`package` = if (javaPbSource.hasPackage) Some(javaPbSource.getPackage) else _root_.scala.None,
dependency = javaPbSource.getDependencyList.asScala.iterator.map(_root_.scala.Predef.identity).toSeq,
publicDependency = javaPbSource.getPublicDependencyList.asScala.iterator.map(_.intValue).toSeq,
weakDependency = javaPbSource.getWeakDependencyList.asScala.iterator.map(_.intValue).toSeq,
messageType = javaPbSource.getMessageTypeList.asScala.iterator.map(com.google.protobuf.descriptor.DescriptorProto.fromJavaProto).toSeq,
enumType = javaPbSource.getEnumTypeList.asScala.iterator.map(com.google.protobuf.descriptor.EnumDescriptorProto.fromJavaProto).toSeq,
service = javaPbSource.getServiceList.asScala.iterator.map(com.google.protobuf.descriptor.ServiceDescriptorProto.fromJavaProto).toSeq,
extension = javaPbSource.getExtensionList.asScala.iterator.map(com.google.protobuf.descriptor.FieldDescriptorProto.fromJavaProto).toSeq,
options = if (javaPbSource.hasOptions) Some(com.google.protobuf.descriptor.FileOptions.fromJavaProto(javaPbSource.getOptions)) else _root_.scala.None,
sourceCodeInfo = if (javaPbSource.hasSourceCodeInfo) Some(com.google.protobuf.descriptor.SourceCodeInfo.fromJavaProto(javaPbSource.getSourceCodeInfo)) else _root_.scala.None,
syntax = if (javaPbSource.hasSyntax) Some(javaPbSource.getSyntax) else _root_.scala.None
)
def merge(`_message__`: com.google.protobuf.descriptor.FileDescriptorProto, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.FileDescriptorProto = {
var __name = `_message__`.name
var __package = `_message__`.`package`
val __dependency = (_root_.scala.collection.immutable.Vector.newBuilder[_root_.scala.Predef.String] ++= `_message__`.dependency)
val __publicDependency = (_root_.scala.collection.immutable.Vector.newBuilder[_root_.scala.Int] ++= `_message__`.publicDependency)
val __weakDependency = (_root_.scala.collection.immutable.Vector.newBuilder[_root_.scala.Int] ++= `_message__`.weakDependency)
val __messageType = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.DescriptorProto] ++= `_message__`.messageType)
val __enumType = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.EnumDescriptorProto] ++= `_message__`.enumType)
val __service = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.ServiceDescriptorProto] ++= `_message__`.service)
val __extension = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.FieldDescriptorProto] ++= `_message__`.extension)
var __options = `_message__`.options
var __sourceCodeInfo = `_message__`.sourceCodeInfo
var __syntax = `_message__`.syntax
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__name = Option(_input__.readStringRequireUtf8())
case 18 =>
__package = Option(_input__.readStringRequireUtf8())
case 26 =>
__dependency += _input__.readStringRequireUtf8()
case 80 =>
__publicDependency += _input__.readInt32()
case 82 => {
val length = _input__.readRawVarint32()
val oldLimit = _input__.pushLimit(length)
while (_input__.getBytesUntilLimit > 0) {
__publicDependency += _input__.readInt32()
}
_input__.popLimit(oldLimit)
}
case 88 =>
__weakDependency += _input__.readInt32()
case 90 => {
val length = _input__.readRawVarint32()
val oldLimit = _input__.pushLimit(length)
while (_input__.getBytesUntilLimit > 0) {
__weakDependency += _input__.readInt32()
}
_input__.popLimit(oldLimit)
}
case 34 =>
__messageType += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.DescriptorProto.defaultInstance)
case 42 =>
__enumType += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.EnumDescriptorProto.defaultInstance)
case 50 =>
__service += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.ServiceDescriptorProto.defaultInstance)
case 58 =>
__extension += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.FieldDescriptorProto.defaultInstance)
case 66 =>
__options = Option(_root_.scalapb.LiteParser.readMessage(_input__, __options.getOrElse(com.google.protobuf.descriptor.FileOptions.defaultInstance)))
case 74 =>
__sourceCodeInfo = Option(_root_.scalapb.LiteParser.readMessage(_input__, __sourceCodeInfo.getOrElse(com.google.protobuf.descriptor.SourceCodeInfo.defaultInstance)))
case 98 =>
__syntax = Option(_input__.readStringRequireUtf8())
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.descriptor.FileDescriptorProto(
name = __name,
`package` = __package,
dependency = __dependency.result(),
publicDependency = __publicDependency.result(),
weakDependency = __weakDependency.result(),
messageType = __messageType.result(),
enumType = __enumType.result(),
service = __service.result(),
extension = __extension.result(),
options = __options,
sourceCodeInfo = __sourceCodeInfo,
syntax = __syntax,
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.FileDescriptorProto] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.FileDescriptorProto(
name = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Predef.String]]),
`package` = __fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Predef.String]]),
dependency = __fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).map(_.as[_root_.scala.Seq[_root_.scala.Predef.String]]).getOrElse(_root_.scala.Seq.empty),
publicDependency = __fieldsMap.get(scalaDescriptor.findFieldByNumber(10).get).map(_.as[_root_.scala.Seq[_root_.scala.Int]]).getOrElse(_root_.scala.Seq.empty),
weakDependency = __fieldsMap.get(scalaDescriptor.findFieldByNumber(11).get).map(_.as[_root_.scala.Seq[_root_.scala.Int]]).getOrElse(_root_.scala.Seq.empty),
messageType = __fieldsMap.get(scalaDescriptor.findFieldByNumber(4).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.DescriptorProto]]).getOrElse(_root_.scala.Seq.empty),
enumType = __fieldsMap.get(scalaDescriptor.findFieldByNumber(5).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.EnumDescriptorProto]]).getOrElse(_root_.scala.Seq.empty),
service = __fieldsMap.get(scalaDescriptor.findFieldByNumber(6).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.ServiceDescriptorProto]]).getOrElse(_root_.scala.Seq.empty),
extension = __fieldsMap.get(scalaDescriptor.findFieldByNumber(7).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.FieldDescriptorProto]]).getOrElse(_root_.scala.Seq.empty),
options = __fieldsMap.get(scalaDescriptor.findFieldByNumber(8).get).flatMap(_.as[_root_.scala.Option[com.google.protobuf.descriptor.FileOptions]]),
sourceCodeInfo = __fieldsMap.get(scalaDescriptor.findFieldByNumber(9).get).flatMap(_.as[_root_.scala.Option[com.google.protobuf.descriptor.SourceCodeInfo]]),
syntax = __fieldsMap.get(scalaDescriptor.findFieldByNumber(12).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Predef.String]])
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DescriptorProtoCompanion.javaDescriptor.getMessageTypes().get(1)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DescriptorProtoCompanion.scalaDescriptor.messages(1)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 4 => __out = com.google.protobuf.descriptor.DescriptorProto
case 5 => __out = com.google.protobuf.descriptor.EnumDescriptorProto
case 6 => __out = com.google.protobuf.descriptor.ServiceDescriptorProto
case 7 => __out = com.google.protobuf.descriptor.FieldDescriptorProto
case 8 => __out = com.google.protobuf.descriptor.FileOptions
case 9 => __out = com.google.protobuf.descriptor.SourceCodeInfo
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.FileDescriptorProto(
name = _root_.scala.None,
`package` = _root_.scala.None,
dependency = _root_.scala.Seq.empty,
publicDependency = _root_.scala.Seq.empty,
weakDependency = _root_.scala.Seq.empty,
messageType = _root_.scala.Seq.empty,
enumType = _root_.scala.Seq.empty,
service = _root_.scala.Seq.empty,
extension = _root_.scala.Seq.empty,
options = _root_.scala.None,
sourceCodeInfo = _root_.scala.None,
syntax = _root_.scala.None
)
implicit class FileDescriptorProtoLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.FileDescriptorProto]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.FileDescriptorProto](_l) {
def name: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.getName)((c_, f_) => c_.copy(name = Option(f_)))
def optionalName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Predef.String]] = field(_.name)((c_, f_) => c_.copy(name = f_))
def `package`: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.getPackage)((c_, f_) => c_.copy(`package` = Option(f_)))
def optionalPackage: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Predef.String]] = field(_.`package`)((c_, f_) => c_.copy(`package` = f_))
def dependency: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[_root_.scala.Predef.String]] = field(_.dependency)((c_, f_) => c_.copy(dependency = f_))
def publicDependency: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[_root_.scala.Int]] = field(_.publicDependency)((c_, f_) => c_.copy(publicDependency = f_))
def weakDependency: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[_root_.scala.Int]] = field(_.weakDependency)((c_, f_) => c_.copy(weakDependency = f_))
def messageType: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.DescriptorProto]] = field(_.messageType)((c_, f_) => c_.copy(messageType = f_))
def enumType: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.EnumDescriptorProto]] = field(_.enumType)((c_, f_) => c_.copy(enumType = f_))
def service: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.ServiceDescriptorProto]] = field(_.service)((c_, f_) => c_.copy(service = f_))
def extension: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.FieldDescriptorProto]] = field(_.extension)((c_, f_) => c_.copy(extension = f_))
def options: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.FileOptions] = field(_.getOptions)((c_, f_) => c_.copy(options = Option(f_)))
def optionalOptions: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.descriptor.FileOptions]] = field(_.options)((c_, f_) => c_.copy(options = f_))
def sourceCodeInfo: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.SourceCodeInfo] = field(_.getSourceCodeInfo)((c_, f_) => c_.copy(sourceCodeInfo = Option(f_)))
def optionalSourceCodeInfo: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.descriptor.SourceCodeInfo]] = field(_.sourceCodeInfo)((c_, f_) => c_.copy(sourceCodeInfo = f_))
def syntax: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.getSyntax)((c_, f_) => c_.copy(syntax = Option(f_)))
def optionalSyntax: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Predef.String]] = field(_.syntax)((c_, f_) => c_.copy(syntax = f_))
}
final val NAME_FIELD_NUMBER = 1
final val PACKAGE_FIELD_NUMBER = 2
final val DEPENDENCY_FIELD_NUMBER = 3
final val PUBLIC_DEPENDENCY_FIELD_NUMBER = 10
final val WEAK_DEPENDENCY_FIELD_NUMBER = 11
final val MESSAGE_TYPE_FIELD_NUMBER = 4
final val ENUM_TYPE_FIELD_NUMBER = 5
final val SERVICE_FIELD_NUMBER = 6
final val EXTENSION_FIELD_NUMBER = 7
final val OPTIONS_FIELD_NUMBER = 8
final val SOURCE_CODE_INFO_FIELD_NUMBER = 9
final val SYNTAX_FIELD_NUMBER = 12
def of(
name: _root_.scala.Option[_root_.scala.Predef.String],
`package`: _root_.scala.Option[_root_.scala.Predef.String],
dependency: _root_.scala.Seq[_root_.scala.Predef.String],
publicDependency: _root_.scala.Seq[_root_.scala.Int],
weakDependency: _root_.scala.Seq[_root_.scala.Int],
messageType: _root_.scala.Seq[com.google.protobuf.descriptor.DescriptorProto],
enumType: _root_.scala.Seq[com.google.protobuf.descriptor.EnumDescriptorProto],
service: _root_.scala.Seq[com.google.protobuf.descriptor.ServiceDescriptorProto],
extension: _root_.scala.Seq[com.google.protobuf.descriptor.FieldDescriptorProto],
options: _root_.scala.Option[com.google.protobuf.descriptor.FileOptions],
sourceCodeInfo: _root_.scala.Option[com.google.protobuf.descriptor.SourceCodeInfo],
syntax: _root_.scala.Option[_root_.scala.Predef.String]
): _root_.com.google.protobuf.descriptor.FileDescriptorProto = _root_.com.google.protobuf.descriptor.FileDescriptorProto(
name,
`package`,
dependency,
publicDependency,
weakDependency,
messageType,
enumType,
service,
extension,
options,
sourceCodeInfo,
syntax
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.FileDescriptorProto])
}
| trueaccord/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/descriptor/FileDescriptorProto.scala | Scala | apache-2.0 | 32,015 |
package bootstrap.liftweb
import _root_.net.liftweb.util._
import _root_.net.liftweb.http._
import _root_.net.liftweb.sitemap._
import _root_.net.liftweb.sitemap.Loc._
import Helpers._
import _root_.net.liftweb.mapper._
import _root_.java.sql.{Connection, DriverManager}
import _root_.com.hellolift.model._
/**
* A class that's instantiated early and run. It allows the application
* to modify lift's environment
*/
class Boot {
def boot {
// add the connection manager if there's not already a JNDI connection defined
if (!DB.jndiJdbcConnAvailable_?) DB.defineConnectionManager(DefaultConnectionIdentifier, DBVendor)
// add the com.hellolift package to the list packages
// searched for Snippets, CometWidgets, etc.
LiftRules.addToPackages("com.hellolift")
// Update the database schema to be in sync
Schemifier.schemify(true, Log.infoF _, User, Entry)
// The locale is either calculated based on the incoming user or
// based on the http request
LiftRules.localeCalculator = r => User.currentUser.map(_.locale.isAsLocale).openOr(LiftRules.defaultLocaleCalculator(r))
// Build SiteMap
val entries = Menu(Loc("Home", List("index"), "Home")) ::
Menu(Loc("Request Details", List("request"), "Request Details")) ::
User.sitemap ::: Entry.sitemap
LiftRules.setSiteMap(SiteMap(entries:_*))
}
}
object DBVendor extends ConnectionManager {
def newConnection(name: ConnectionIdentifier): Box[Connection] = {
try {
Class.forName("org.apache.derby.jdbc.EmbeddedDriver")
val dm = DriverManager.getConnection("jdbc:derby:lift_example;create=true")
Full(dm)
} catch {
case e : Exception => e.printStackTrace; Empty
}
}
def releaseConnection(conn: Connection) {conn.close}
}
| beni55/liftweb | sites/hellolift/src/main/scala/bootstrap/liftweb/Boot.scala | Scala | apache-2.0 | 1,782 |
package integrationtest
import model._
import controller.Controllers
import skinny.test.{ FactoryGirl, SkinnyFlatSpec }
class ProgrammersControllerSpec extends SkinnyFlatSpec with unit.SkinnyTesting {
addFilter(Controllers.programmers, "/*")
def skill = Skill.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Skill).create()
}
def company = Company.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Company).create()
}
def programmer = Programmer.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Programmer).create()
}
it should "show programmers" in {
get("/programmers") {
status should equal(200)
}
get("/programmers/") {
status should equal(200)
}
get("/programmers.json") {
logger.debug(body)
status should equal(200)
}
get("/programmers.xml") {
logger.debug(body)
status should equal(200)
}
}
it should "show a programmer in detail" in {
get(s"/programmers/${programmer.id}") {
status should equal(200)
}
get(s"/programmers/${programmer.id}.xml") {
logger.debug(body)
status should equal(200)
}
get(s"/programmers/${programmer.id}.json") {
logger.debug(body)
status should equal(200)
}
}
it should "show new entry form" in {
get(s"/programmers/new") {
status should equal(200)
}
}
it should "create a programmer" in {
val newName = s"Created at ${System.currentTimeMillis}"
post(s"/programmers", "name" -> newName) {
status should equal(403)
}
withSession("csrf-token" -> "12345") {
post(s"/programmers",
"name" -> newName,
"favoriteNumber" -> "123",
"companyId" -> company.id.toString,
"plainTextPassword" -> "1234567890",
"csrf-token" -> "12345") {
status should equal(302)
val id = header("Location").split("/").last.toLong
val created = Programmer.findById(id)
created.isDefined should equal(true)
created.get.hashedPassword.verify(PlainPassword("1234567890"), "dummy salt") should equal(true)
}
}
}
it should "show the edit form" in {
get(s"/programmers/${programmer.id}/edit") {
status should equal(200)
}
}
it should "update a programmer" in {
val newName = s"Updated at ${System.currentTimeMillis}"
put(s"/programmers/${programmer.id}", "name" -> newName) {
status should equal(403)
}
Programmer.findById(programmer.id).get.name should not equal (newName)
withSession("csrf-token" -> "12345") {
put(s"/programmers/${programmer.id}",
"name" -> newName,
"favoriteNumber" -> "123",
"companyId" -> company.id.toString,
"csrf-token" -> "12345") {
status should equal(302)
}
put(s"/programmers/${programmer.id}", "csrf-token" -> "12345") {
status should equal(400)
}
}
Programmer.findById(programmer.id).get.name should equal(newName)
}
it should "delete a programmer" in {
val id = Programmer.createWithAttributes('name -> "Unit Test Programmer", 'favoriteNumber -> 123)
delete(s"/programmers/${id}") {
status should equal(403)
}
withSession("csrf-token" -> "aaaaaa") {
delete(s"/programmers/${id}?csrf-token=aaaaaa") {
status should equal(200)
}
post(s"/programmers/${id}?csrf-token=aaaaaa") {
status should equal(404)
}
post(s"/programmers/${id}.json?csrf-token=aaaaaa") {
status should equal(404)
header("Content-Type") should fullyMatch regex ("application/json;\\\\s*charset=utf-8")
}
post(s"/programmers/${id}.xml?csrf-token=aaaaaa") {
status should equal(404)
header("Content-Type") should fullyMatch regex ("application/xml;\\\\s*charset=utf-8")
}
}
}
it should "add a programmer to a company" in {
val id = Programmer.createWithAttributes('name -> "JoinCompany Test Programmer", 'favoriteNumber -> 123)
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/company/${company.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "remove a programmer from a company" in {
val id = Programmer.createWithAttributes('name -> "LeaveCompany Test Programmer", 'favoriteNumber -> 123)
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/company/${company.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
delete(s"/programmers/${id}/company?csrf-token=aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "add a skill to a programmer" in {
val id = FactoryGirl(Programmer).create().id
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(409)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "remove a skill from a programmer" in {
val id = FactoryGirl(Programmer).create().id
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
delete(s"/programmers/${id}/skills/${skill.id}?csrf-token=aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
}
| seratch/skinny-framework | example/src/test/scala/integrationtest/ProgrammersControllerSpec.scala | Scala | mit | 5,843 |
package com.freshsoft.matterbridge.routing
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.freshsoft.matterbridge.service.database.NineGagService
import model.{DatabaseEntityJsonSupport, NineGagUpload}
import scala.concurrent.ExecutionContext
/**
* The nine gag specific service routes
*/
class NineGagRoute(service: NineGagService)(implicit executionContext: ExecutionContext)
extends DatabaseEntityJsonSupport {
val route: Route = pathPrefix("9gag") {
path("count") {
get {
complete {
service.count map (_.toString)
}
}
} ~
path("add") {
post {
entity(as[NineGagUpload]) { entity =>
complete {
service.add(entity.name, entity.gifUrl, entity.categoryId) map (_.toString)
}
}
}
} ~
path("exists" / Remaining) { p =>
get {
complete(service.exists(p) map (_.toString))
}
} ~
path(JavaUUID) { uuid =>
get {
complete(service.byId(uuid))
} ~
delete {
complete(service.delete(uuid) map (_.toString))
}
} ~
path("last") {
get {
complete(service.last)
}
} ~
path(Remaining) { name =>
get {
complete(service.byName(name))
}
}
}
}
| Freshwood/matterbridge | src/main/scala/com/freshsoft/matterbridge/routing/NineGagRoute.scala | Scala | mit | 1,401 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala.observables
import scala.collection.JavaConverters._
import scala.concurrent.{Future, Promise}
import rx.lang.scala.ImplicitFunctionConversions._
import rx.lang.scala.Observable
import rx.observables.{BlockingObservable => JBlockingObservable}
/**
* An Observable that provides blocking operators.
*
* You can obtain a BlockingObservable from an Observable using [[rx.lang.scala.Observable.toBlocking]]
*/
// constructor is private because users should use Observable.toBlocking
class BlockingObservable[+T] private[scala] (val o: Observable[T])
extends AnyVal
{
// This is def because "field definition is not allowed in value class"
private def asJava: JBlockingObservable[_ <: T] = o.asJavaObservable.toBlocking
/**
* Invoke a method on each item emitted by the {@link Observable}; block until the Observable
* completes.
*
* NOTE: This will block even if the Observable is asynchronous.
*
* This is similar to {@link Observable#subscribe(Observer)}, but it blocks. Because it blocks it does
* not need the {@link Observer#onCompleted()} or {@link Observer#onError(Throwable)} methods.
*
* <img width="640" height="330" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/B.forEach.png" alt="" />
*
* @param f
* the {@link Action1} to invoke for every item emitted by the {@link Observable}
* @throws RuntimeException
* if an error occurs
*/
def foreach(f: T => Unit): Unit = {
asJava.forEach(f)
}
def withFilter(p: T => Boolean): WithFilter[T] = {
new WithFilter[T](p, asJava)
}
/**
* Returns the last item emitted by a specified [[Observable]], or
* throws `NoSuchElementException` if it emits no items.
*
* <img width="640" height="315" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/B.last.png" alt="" />
*
* @return the last item emitted by the source [[Observable]]
* @throws NoSuchElementException
* if source contains no elements
* @see <a href="https://github.com/ReactiveX/RxJava/wiki/Blocking-Observable-Operators#last-and-lastordefault">RxJava Wiki: last()</a>
* @see <a href="http://msdn.microsoft.com/en-us/library/system.reactive.linq.observable.last.aspx">MSDN: Observable.Last</a>
*/
def last : T = {
asJava.last : T
}
/**
* Returns an `Option` with the last item emitted by the source Observable,
* or `None` if the source Observable completes without emitting any items.
*
* @return an `Option` with the last item emitted by the source Observable,
* or `None` if the source Observable is empty
*/
def lastOption: Option[T] = {
o.lastOption.toBlocking.single
}
/**
* Returns the last item emitted by the source Observable, or a default item
* if the source Observable completes without emitting any items.
*
* <img width="640" height="305" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/lastOrDefault.png" alt="" />
*
* @param default the default item to emit if the source Observable is empty.
* This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything.
* @return the last item emitted by the source Observable, or a default item if the source Observable is empty
*/
def lastOrElse[U >: T](default: => U): U = {
lastOption getOrElse default
}
/**
* Returns the first item emitted by a specified [[Observable]], or
* `NoSuchElementException` if source contains no elements.
*
* @return the first item emitted by the source [[Observable]]
* @throws NoSuchElementException
* if source contains no elements
* @see <a href="https://github.com/ReactiveX/RxJava/wiki/Blocking-Observable-Operators#first-and-firstordefault">RxJava Wiki: first()</a>
* @see <a href="http://msdn.microsoft.com/en-us/library/hh229177.aspx">MSDN: Observable.First</a>
*/
def first : T = {
asJava.first : T
}
/**
* Returns the first item emitted by a specified [[Observable]], or
* `NoSuchElementException` if source contains no elements.
*
* @return the first item emitted by the source [[Observable]]
* @throws NoSuchElementException
* if source contains no elements
* @see <a href="https://github.com/ReactiveX/RxJava/wiki/Blocking-Observable-Operators#first-and-firstordefault">RxJava Wiki: first()</a>
* @see <a href="http://msdn.microsoft.com/en-us/library/hh229177.aspx">MSDN: Observable.First</a>
* @see [[BlockingObservable.first]]
*/
def head : T = first
/**
* Returns an `Option` with the very first item emitted by the source Observable,
* or `None` if the source Observable is empty.
*
* @return an `Option` with the very first item from the source,
* or `None` if the source Observable completes without emitting any item.
*/
def headOption: Option[T] = {
o.headOption.toBlocking.single
}
/**
* Returns the very first item emitted by the source Observable, or a default value if the source Observable is empty.
*
* <img width="640" height="305" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/firstOrDefault.png" alt="" />
*
* @param default The default value to emit if the source Observable doesn't emit anything.
* This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything.
* @return the very first item from the source, or a default value if the source Observable completes without emitting any item.
*/
def headOrElse[U >: T](default: => U): U = {
headOption getOrElse default
}
/**
* Returns an `Iterable` that always returns the item most recently emitted by an [[Observable]].
* <p>
* <img width="640" height="490" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/B.mostRecent.png" alt="" />
*
* @param initialValue
* the initial value that will be yielded by the `Iterable` sequence if the [[Observable]] has not yet emitted an item
* @return an `Iterable` that on each iteration returns the item that the [[Observable]] has most recently emitted
*/
def mostRecent[U >: T](initialValue: U): Iterable[U] = {
val asJavaU = asJava.asInstanceOf[rx.observables.BlockingObservable[U]]
asJavaU.mostRecent(initialValue).asScala: Iterable[U] // useless ascription because of compiler bug
}
/**
* Returns an `Iterable` that blocks until the [[Observable]] emits another item,
* then returns that item.
* <p>
* <img width="640" height="490" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/B.next.png" alt="" />
*
* @return an `Iterable` that blocks upon each iteration until the [[Observable]] emits a new item, whereupon the Iterable returns that item
*/
def next: Iterable[T] = {
asJava.next().asScala: Iterable[T] // useless ascription because of compiler bug
}
/**
* If the source Observable completes after emitting a single item, return that item. If the source Observable
* emits more than one item or no items, notify of an `IllegalArgumentException` or `NoSuchElementException` respectively.
*
* <img width="640" height="315" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/single.png" alt="" />
*
* @return an Observable that emits the single item emitted by the source Observable
* @throws IllegalArgumentException if the source emits more than one item
* @throws NoSuchElementException if the source emits no items
*/
def single: T = {
asJava.single(): T // useless ascription because of compiler bug
}
/**
* If the source Observable completes after emitting a single item, return an `Option` with that item;
* if the source Observable is empty, return `None`. If the source Observable emits more than one item,
* throw an `IllegalArgumentException`.
*
* @return an `Option` with the single item emitted by the source Observable, or
* `None` if the source Observable is empty
* @throws IllegalArgumentException if the source Observable emits more than one item
*/
def singleOption: Option[T] = {
o.singleOption.toBlocking.single
}
/**
* If the source Observable completes after emitting a single item, return that item;
* if the source Observable is empty, return a default item. If the source Observable
* emits more than one item, throw an `IllegalArgumentException`.
*
* <img width="640" height="315" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/singleOrDefault.png" alt="" />
*
* @param default a default value to emit if the source Observable emits no item.
* This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything.
* @return the single item emitted by the source Observable, or a default item if
* the source Observable is empty
* @throws IllegalArgumentException if the source Observable emits more than one item
*/
def singleOrElse[U >: T](default: => U): U = {
singleOption getOrElse default
}
/**
* Returns an `Iterator` that iterates over all items emitted by this [[Observable]].
*/
def toIterable: Iterable[T] = {
asJava.toIterable.asScala: Iterable[T] // useless ascription because of compiler bug
}
/**
* Returns a `List` that contains all items emitted by this [[Observable]].
*/
def toList: List[T] = {
asJava.toIterable.asScala.toList: List[T] // useless ascription because of compiler bug
}
/**
* Returns an `Iterable` that returns the latest item emitted by this `BlockingObservable`,
* waiting if necessary for one to become available.
*
* If this `BlockingObservable` produces items faster than `Iterator.next` takes them,
* `onNext` events might be skipped, but `onError` or `onCompleted` events are not.
*
* Note also that an `onNext` directly followed by `onCompleted` might hide the `onNext` event.
*
* @return an `Iterable` that always returns the latest item emitted by this `BlockingObservable`
*/
def latest: Iterable[T] = {
asJava.latest.asScala: Iterable[T] // useless ascription because of compiler bug
}
/**
* Returns a `Future` representing the single value emitted by this `BlockingObservable`.
*
* The returned `Future` will be completed with an `IllegalArgumentException` if the `BlockingObservable`
* emits more than one item. And it will be completed with an `NoSuchElementException` if the `BlockingObservable`
* is empty. Use `Observable.toSeq.toBlocking.toFuture` if you are not sure about the size of `BlockingObservable`
* and do not want to handle these `Exception`s.
*
* <img width="640" height="395" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/B.toFuture.png" alt="" />
*
* @return a `Future` that expects a single item to be emitted by this `BlockingObservable`.
*/
def toFuture: Future[T] = {
val p = Promise[T]()
o.single.subscribe(t => p.success(t), e => p.failure(e))
p.future
}
}
// Cannot yet have inner class because of this error message:
// "implementation restriction: nested class is not allowed in value class.
// This restriction is planned to be removed in subsequent releases."
private[observables] class WithFilter[+T] (p: T => Boolean, asJava: rx.observables.BlockingObservable[_ <: T]) {
import rx.lang.scala.ImplicitFunctionConversions._
// there's no map and flatMap here, they're only available on Observable
def withFilter(q: T => Boolean) = new WithFilter[T]((x: T) => p(x) && q(x), asJava)
def foreach(f: T => Unit): Unit = {
asJava.forEach((e: T) => {
if (p(e)) f(e)
})
}
}
| jbripley/RxScala | src/main/scala/rx/lang/scala/observables/BlockingObservable.scala | Scala | apache-2.0 | 12,555 |
package dk.gp.gpc
import org.junit._
import Assert._
import breeze.linalg._
import java.io.File
import dk.gp.cov.CovSEiso
import breeze.numerics._
class gpcTrainBinaryInputTest {
val data = csvread(new File("src/test/resources/gpc/gpc_binary_input.csv"), skipLines = 1)
val x: DenseMatrix[Double] = data(::, 0 to 0)
val y = data(::, 1)
val covFunc = CovSEiso()
val covFuncParams = DenseVector(log(0.1), log(0.3)) //log sf, logEll
val mean = 0
@Test def test = {
val model = GpcModel(x, y, covFunc, covFuncParams, mean)
val trainedModel = gpcTrain(model, maxIter = 10)
println("prior params=" + model.covFuncParams)
println("learned params=" + trainedModel.covFuncParams)
println("learned gp mean=" + trainedModel.gpMean)
}
} | danielkorzekwa/bayes-scala-gp | src/test/scala/dk/gp/gpc/gpcTrainBinaryInputTest.scala | Scala | bsd-2-clause | 773 |
package org.improving.scalify
import Scalify._
import org.eclipse.jdt.core.dom
import scala.collection.mutable.HashMap
import org.eclipse.jdt.core.dom.{ PrimitiveType => PT }
// import scalaz.OptionW._
// All type nodes represent *references* to types. Declarations are elsewhere.
// public class Foo[A] {
// Bar blah;
// public Foo[B](x: B) {
// this.blah = new Bar[B](x)
// }
// }
//
// class Foo[A, B](x: B) { var blah: Bar[_]; this.blah = new Bar(x) }
//
// Foo[String] x = new Foo[Int](5)
//
// Foo[Int, String]
// Foo[_ <: Int, _ <: String]
// Foo[_, _]
// Foo[_ >: Null]
//
// WildCardType:
class PrimitiveType(node: dom.PrimitiveType) extends Type(node)
{
lazy val PrimitiveType(code) = node
lazy val JPrimitive(anyVal) = node
def emitDirect: Emission = anyVal.emit
}
class BoxedType(node: dom.SimpleType) extends SimpleType(node)
{
lazy val JBoxed(anyVal) = node
override def emitDirect(context: ASTNode): Emission = emitDirect
override def emitDirect: Emission = anyVal.emit
}
class ArrayType(node: dom.ArrayType) extends Type(node)
{
lazy val ArrayType(componentType) = node
lazy val elementType = node.getElementType
lazy val dims = node.getDimensions
def emitDirect: Emission = ARRAY <~> BRACKETS(componentType)
// override def emitWithoutBounds: Emission = ARRAY <~> BRACKETS(componentType.emitWithoutBounds)
}
class ParameterizedType(node: dom.ParameterizedType) extends Type(node)
{
lazy val ParameterizedType(jtype, typeArgs) = node
lazy val typeArgCount = findTypeDeclaration.map(_.emitTypeParameters.size) getOrElse 0
override def emitDirect(context: ASTNode): Emission = jtype.emitDirect(context) ~ TYPEARGS(typeArgs)
override def emitDirect: Emission = jtype.emitDirect ~ TYPEARGS(typeArgs)
private def emitTypeArgs: Emission = TYPEARGS(List.make(typeArgCount, UNDERSCORE).map(x => List(x)))
}
class SimpleType(node: dom.SimpleType) extends Type(node) with NameInfo
{
lazy val SimpleType(name) = node
lazy val typeParameters = tb.getErasure.getTypeParameters
lazy val nameUnadorned = name.emitNameAsOrig <~> emitRawTypeArgs(ANYREF)
override def emitDirect(context: ASTNode): Emission = {
context match {
case _: dom.ClassInstanceCreation => return nameUnadorned
case _: dom.CastExpression => return nameUnadorned
case _ =>
}
val standin: Emission = context match {
case _: dom.MethodDeclaration => UNDERSCORE
case _ => ANYREF
}
emitName(None) <~> emitRawTypeArgs(standin)
}
override def toString: String = name.toString
def emitDirect: Emission = emitName(None) <~> emitRawTypeArgs(ANYREF)
override def emitNameAsOrig = name.emitNameAsOrig
private def emitName(context: Option[ASTNode]): Emission =
if (context.isEmpty || !context.get.isEnclosedInSameType(node)) name.emit
else emitNameAsOrig
// Class => Class[_] and etc.
private def emitRawTypeArgs(standin: Emission): Emission =
if (!tb.isRawType || typeParameters.isEmpty || standin.isEmpty) Nil
else NOS ~ BRACKETS(REPSEP(List.make(typeParameters.size, standin), NOS ::: COMMA))
}
class QualifiedType(node: dom.QualifiedType) extends Type(node) with NameInfo
{
val name = node
def emitDirect: Emission = name
override def emitNameAsOrig = name.emitNameAsOrig
override def toString: String = name.toString
}
class WildcardType(node: dom.WildcardType) extends Type(node)
{
lazy val WildcardType(bound, isUpperBound) = node
lazy val boundEmission = bound.map(b => (if (isUpperBound) BOUNDUPPER else BOUNDLOWER) ~ b) getOrElse Nil
def emitDirect: Emission = UNDERSCORE ~ boundEmission
}
// trait NamedType extends NameInfo
// {
// self: Type =>
//
// // given a particular supercall pointed at this type, returns correct emission
// // the level of complication here is due to factory types having variable names for the superclass
// def emitTypeNameWhenSuper(sc: Option[dom.SuperConstructorInvocation]): Emission = {
// log.trace("emitTypeNameWhenSuper: %s", name)
//
// emitNameAsOrig
// // name
// // INVOKE(ROOTPKG, INVOKE(emitString(pkgName), name))
// }
//
// // def emitExprWhenSuper: Emission = emitTypeNameWhenSuper
// def emitExprWhenSuper(sc: Option[dom.SuperConstructorInvocation]): Emission = {
// if (sc.isEmpty) return emitTypeNameWhenSuper(None)
// lazy val SuperConstructorInvocation(expr, typeArgs, args) = sc.get
//
// emitTypeNameWhenSuper(sc) <~>
// emitOpt(expr, BRACKETS(_) ~ NOS) ~
// METHODARGS(args)
// }
// }
// * A type like "A.B" can be represented either of two ways:
// * <ol>
// * <li>
// * <code>QualifiedType(SimpleType(SimpleName("A")),SimpleName("B"))</code>
// * </li>
// * <li>
// * <code>SimpleType(QualifiedName(SimpleName("A"),SimpleName("B")))</code>
// * </li>
// * </ol>
//
// Somewhere there need be a list of SimpleNames
abstract class Type(node: dom.Type) extends Node(node) with TypeBound
{
def tb = node.resolveBinding
def emitNameAsOrig = emitDirect
def emitTypeNameWhenSuper(sc: Option[dom.SuperConstructorInvocation]): Emission = {
tb.getFactoryType match {
case None => node.emitNameAsOrig
case Some(x) => node.emitNameAsOrig <~> x.emitLabelForSuperCall(sc)
}
}
def emitExprWhenSuper(sc: Option[dom.SuperConstructorInvocation]): Emission = {
if (sc.isEmpty) return emitTypeNameWhenSuper(None)
val SuperConstructorInvocation(expr, typeArgs, args) = sc.get
emitTypeNameWhenSuper(sc) <~>
emitOpt(expr, BRACKETS(_) ~ NOS) ~
METHODARGS(args)
}
def emitImportsWhenSuper: Emission =
tb.getFactoryType match {
case None => Nil
case Some(x) => x.emitImportsWhenSuper ~ NL
}
// def needsQualification = scalaPredefTypes contains leftTypeSegment
//
// // unroll the confusion into a list of SimpleNames
// def leftTypeSegment: String = typeSegments match { case SimpleName(ident) :: _ => ident ; case _ => "" }
// def pkgQualifier: String = ""
// override def toString = node match {
// case SimpleType(SimpleName(ident)) => ident
// case _ => node.toString
// }
// def isVoid: Boolean = node match {
// case x: dom.PrimitiveType if x.toString == "void" => true
// case _ => false
// }
// emits NEW unless we used a factory on the declaring type
def emitNew: Emission = if (tb.isFactoryType) Nil else NEW
}
| mbana/scalify | src/main/ast/Type.scala | Scala | isc | 6,251 |
import org.specs2.mutable._
import play.api.libs.iteratee.Enumerator
import reactivemongo.api._
import reactivemongo.api.gridfs.{ReadFile, DefaultFileToSave, GridFS}
import reactivemongo.api.gridfs.Implicits._
import reactivemongo.bson._
import scala.concurrent._
import reactivemongo.api.gridfs
import scala.concurrent.duration._
class GridfsSpec extends Specification {
import Common._
sequential
lazy val gfs = GridFS(db)
lazy val file = DefaultFileToSave("somefile", Some("application/file"))
lazy val fileContent = Enumerator((1 to 100).view.map(_.toByte).toArray)
"ReactiveMongo" should {
"store a file in gridfs" in {
val actual = Await.result(gfs.save(fileContent, file), timeout)
actual.filename mustEqual "somefile"
}
"find this file in gridfs" in {
val futureFile = gfs.find(BSONDocument("filename" -> "somefile")).collect[List]()
val actual = Await.result(futureFile, timeout).head
(actual.filename mustEqual file.filename) and
(actual.uploadDate must beSome) and
(actual.contentType mustEqual file.contentType)
}
"delete this file from gridfs" in {
val actual = Await.result(gfs.remove(file.id), timeout)
actual.n mustEqual 1
}
}
}
| qubell/ReactiveMongo | driver/src/test/scala/GridfsSpec.scala | Scala | apache-2.0 | 1,245 |
package ammonite.main
import ammonite.util.Util
/**
* Constants used in the default configuration for the Ammonite REPL
*/
object Defaults{
val welcomeBanner = {
def ammoniteVersion = ammonite.Constants.version
def scalaVersion = scala.util.Properties.versionNumberString
def javaVersion = System.getProperty("java.version")
val link = "www.patreon.com/lihaoyi"
Util.normalizeNewlines(
s"""Welcome to the Ammonite Repl $ammoniteVersion
|(Scala $scalaVersion Java $javaVersion)
|If you like Ammonite, please support our development at $link""".stripMargin
)
}
val ignoreUselessImports = """
|notify => _,
| wait => _,
| equals => _,
| asInstanceOf => _,
| synchronized => _,
| notifyAll => _,
| isInstanceOf => _,
| == => _,
| != => _,
| getClass => _,
| ne => _,
| eq => _,
| ## => _,
| hashCode => _,
| _
|"""
// Need to import stuff from ammonite.ops manually, rather than from the
// ammonite.ops.Extensions bundle, because otherwise they result in ambiguous
// imports if someone else imports maunally
val predefString = s"""
|import ammonite.ops.{
| Pipeable,
| FilterMapExt,
| FilterMapArrays,
| FilterMapIterators,
| FilterMapGenerators,
| SeqFactoryFunc,
| ChainableConversions,
| RegexContextMaker,
| Callable1,
| Callable2
|}
|import ammonite.runtime.tools._
|import ammonite.repl.tools._
|import ammonite.runtime.tools.DependencyConstructor.{ArtifactIdExt, GroupIdExt}
|""".stripMargin
val replPredef = """
|import ammonite.main.Router.{doc, main}
|import ammonite.main.Scripts.pathScoptRead
| import ammonite.repl.ReplBridge.value.{
| exit,
| codeColorsImplicit,
| tprintColorsImplicit,
| pprinterImplicit,
| show,
| typeOf
|}
""".stripMargin
def ammoniteHome = ammonite.ops.Path(System.getProperty("user.home"))/".ammonite"
}
| alexarchambault/ammonium | amm/src/main/scala/ammonite/main/Defaults.scala | Scala | mit | 2,019 |
package ignition.jobs.setups
import java.security.MessageDigest
import ignition.core.jobs.CoreJobRunner.RunnerContext
object UsersPasswordsSetup {
val usersList: Seq[String] =
"""allan
|aws
|chaordic
""".stripMargin.split("\\n")
val passwordsList: Seq[String] =
"""123456
|1234567
|12345678
|abracadabra
""".stripMargin.split("\\n")
val md5sList: Seq[String] =
"""b05cc4c5d9c2da10c463ba4edf48d4c9
|770f98821764790ce6dae1f1b9ca84e3
|ddead3c9d25d6f31d0ff9eb97ab293c4
|f637f075da94cc346b2b49bae68ed306
""".stripMargin.split("\\n")
def run(runnerContext: RunnerContext) {
val sc = runnerContext.sparkContext
val sparkConfig = runnerContext.config
val usersRDD = sc.parallelize(usersList)
val passwordsRDD = sc.parallelize(passwordsList)
val md5sRDD = sc.parallelize(md5sList)
usersRDD.cartesian(passwordsRDD).cartesian(md5sRDD)
.filter { case ((user, password), md5) =>
val pair = s"${user}:${password}"
val currentMD5 =
MessageDigest.getInstance("MD5").digest(pair.getBytes).map("%02x".format(_)).mkString
md5 == currentMD5
}
.collect()
.foreach(println)
}
}
| chaordic/ignition-template | src/main/scala/ignition/jobs/setups/UsersPasswordsSetup.scala | Scala | mit | 1,247 |
/* LogEventsWithJavaSpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2015 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.monitor
import java.util.logging.{ Logger, Level }
import org.scalatest._
import org.scalamock.scalatest.MockFactory
/**
* Test suite for [[atmos.monitor.LogEventsWithJava]].
*/
class LogEventsWithJavaSpec extends FlatSpec with Matchers with MockFactory {
val thrown = new RuntimeException
"LogEventsWithJava" should "forward log entries to a standard Java logger" in {
val fixture = new LoggerFixture
val monitor = LogEventsWithJava(fixture.mock)
for {
level <- Seq(Level.SEVERE, Level.WARNING, Level.INFO, Level.CONFIG)
enabled <- Seq(true, false)
t <- Seq(Some(thrown), None)
} {
fixture.isLoggable.expects(level).returns(enabled).once
monitor.isLoggable(level) shouldBe enabled
t match {
case Some(tt) => fixture.logThrown.expects(level, "MSG", tt).once
case None => fixture.log.expects(level, "MSG").once
}
monitor.log(level, "MSG", t)
}
}
class LoggerFixture { self =>
val isLoggable = mockFunction[Level, Boolean]
val log = mockFunction[Level, String, Unit]
val logThrown = mockFunction[Level, String, Throwable, Unit]
val mock = new Logger(null, null) {
override def isLoggable(level: Level) = self.isLoggable(level)
override def log(level: Level, message: String) = self.log(level, message)
override def log(level: Level, message: String, thrown: Throwable) = self.logThrown(level, message, thrown)
}
}
} | zmanio/atmos | src/test/scala/atmos/monitor/LogEventsWithJavaSpec.scala | Scala | apache-2.0 | 2,139 |
package org.juanitodread.pitayafinch.nlp.tools.models.entities
import org.juanitodread.pitayafinch.UnitSpec
object PersonEntityModelFixture {
val model: PersonEntityModel = new PersonEntityModel()
}
class PersonEntityModelSpec extends UnitSpec {
"A PersonEntityModel" should "have a defined name" in {
val model = PersonEntityModelFixture.model
assert(model.getName === "Person")
}
it should "have a valid OpenNLP Entity Model in English" in {
val model = PersonEntityModelFixture.model
assert(model.getNlpModel.getLanguage === "en")
}
it should "have a valid OpenNLP Entity Model version" in {
val model = PersonEntityModelFixture.model
assert(model.getNlpModel.getVersion.toString === "1.5.0")
}
}
| juanitodread/pitaya-finch | src/test/scala/org/juanitodread/pitayafinch/nlp/tools/models/entities/PersonEntityModelSpec.scala | Scala | apache-2.0 | 743 |
package blended.samples.camel.internal
import blended.camel.utils.BlendedCamelContextFactory
import blended.util.logging.Logger
import domino.DominoActivator
import javax.jms.ConnectionFactory
import org.apache.camel.builder.RouteBuilder
import org.apache.camel.component.jms.JmsComponent
class CamelSampleActivator extends DominoActivator {
whenBundleActive {
val log = Logger[CamelSampleActivator]
whenAdvancedServicePresent[ConnectionFactory]("(provider=activemq)") { cf =>
val ctxt = BlendedCamelContextFactory.createContext(name = "BlendedSampleContext", withJmx = true)
ctxt.addComponent("activemq", JmsComponent.jmsComponent(cf))
ctxt.addRoutes(new RouteBuilder() {
override def configure(): Unit = {
from("activemq:queue:SampleIn").id("SampleRoute")
.setHeader("Description", constant("BlendedSample"))
.to("activemq:queue:SampleOut")
}
})
ctxt.start()
onStop {
log.debug("Stopping Camel Context")
ctxt.stop()
}
}
}
}
| lefou/blended | blended.samples/blended.samples.camel/src/main/scala/blended/samples/camel/internal/CamelSampleActivator.scala | Scala | apache-2.0 | 1,058 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.physical.stream
import org.apache.flink.table.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalRank
import org.apache.flink.table.plan.nodes.physical.stream.{StreamExecDeduplicate, StreamExecRank}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
/**
* Rule that converts [[FlinkLogicalRank]] with fetch to [[StreamExecRank]].
* NOTES: the rank can not be converted to [[StreamExecDeduplicate]].
*/
class StreamExecRankRule
extends ConverterRule(
classOf[FlinkLogicalRank],
FlinkConventions.LOGICAL,
FlinkConventions.STREAM_PHYSICAL,
"StreamExecRankRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val rank: FlinkLogicalRank = call.rel(0)
!StreamExecDeduplicateRule.canConvertToDeduplicate(rank)
}
override def convert(rel: RelNode): RelNode = {
val rank = rel.asInstanceOf[FlinkLogicalRank]
val input = rank.getInput
val requiredDistribution = if (!rank.partitionKey.isEmpty) {
FlinkRelDistribution.hash(rank.partitionKey.toList)
} else {
FlinkRelDistribution.SINGLETON
}
val requiredTraitSet = input.getTraitSet
.replace(FlinkConventions.STREAM_PHYSICAL)
.replace(requiredDistribution)
val providedTraitSet = rank.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
val newInput: RelNode = RelOptRule.convert(input, requiredTraitSet)
new StreamExecRank(
rank.getCluster,
providedTraitSet,
newInput,
rank.partitionKey,
rank.orderKey,
rank.rankType,
rank.rankRange,
rank.rankNumberType,
rank.outputRankNumber)
}
}
object StreamExecRankRule {
val INSTANCE: RelOptRule = new StreamExecRankRule
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/physical/stream/StreamExecRankRule.scala | Scala | apache-2.0 | 2,728 |
package de.christofreichardt.scala.ellipticcurve
import de.christofreichardt.scalatest.MyFunSuite
package affine {
class MontgomerySuite extends MyFunSuite {
testWithTracing(this, "Montgomery2Weierstrass (1)") {
val tracer = getCurrentTracer()
val groupLaw = ShortWeierstrass
def convertCoefficients(coefficients: (BigInt, BigInt), prime: BigInt): (BigInt, BigInt) = {
val A = coefficients._1
val B = coefficients._2
val numeratorA = (3 - A.modPow(2, prime)).mod(prime)
val denominatorA = (3*B.modPow(2, prime)).mod(prime)
val numeratorB = (2*A.modPow(3, prime) - 9*A).mod(prime)
val denominatorB = (27*B.modPow(3, prime)).mod(prime)
((numeratorA*denominatorA.modInverse(prime)).mod(prime), (numeratorB*denominatorB.modInverse(prime)).mod(prime))
}
def convertCoordinates(coordinate: (BigInt, BigInt), coefficients: (BigInt, BigInt), prime: BigInt): (BigInt, BigInt) = {
val A = coefficients._1
val B = coefficients._2
val x = coordinate._1
val y = coordinate._2
val transformedX = ((x*B.modInverse(prime)).mod(prime) + (A*(3*B).modInverse(prime))).mod(prime)
val transformedY = (y*B.modInverse(prime)).mod(prime)
(transformedX, transformedY)
}
val montgomeryCoefficients: (BigInt, BigInt) = (117050, 1)
val prime = BigInt(2).pow(221) - 3
assert(prime.isProbablePrime(Constants.CERTAINTY))
val (a, b) = convertCoefficients(montgomeryCoefficients, prime)
val (x, y) = convertCoordinates((BigInt(4), BigInt("1630203008552496124843674615123983630541969261591546559209027208557")), montgomeryCoefficients, prime)
val M_221 = groupLaw.makeCurve(groupLaw.OddCharCoefficients(a, b), groupLaw.PrimeField(prime))
val solver = new QuadraticResidue(prime)
tracer.out().printfIndentln("x = %s, y = %s", x, y)
tracer.out().printfIndentln("x = %s, y = %s", x, solver.solve(M_221.evaluateCurveEquation(x)))
val basePoint = groupLaw.makePoint(groupLaw.AffineCoordinates(x, y), M_221)
val point = basePoint.multiply(BigInt("421249166674228746791672110734682167926895081980396304944335052891"))
tracer.out().printfIndentln("point = %s", point)
}
testWithTracing(this, "Random point") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val curve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val randomPoint = curve.randomPoint
tracer.out().printfIndentln("randomPoint = %s", randomPoint)
}
testWithTracing(this, "P * order(P) = 0") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val curve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val basePoint = groupLaw.makePoint(groupLaw.AffineCoordinates(4, BigInt("1630203008552496124843674615123983630541969261591546559209027208557")), curve)
val order = BigInt("421249166674228746791672110734682167926895081980396304944335052891")
val product = basePoint multiply order
val testPoint = groupLaw.makePoint(groupLaw.AffineCoordinates(BigInt("1606415676813498058014924151313595965130794692739589994783694077531"),
BigInt("2134935430760076865959898766833545863303276772166762027696560530099")), curve)
tracer.out().printfIndentln("curve = %s", curve)
tracer.out().printfIndentln("product = %s", product)
assert(product.isNeutralElement, "Expected the NeutralElement.")
}
testWithTracing(this, "Differential Multiplication (1)") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val curve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val basePoint = groupLaw.makePoint(groupLaw.AffineCoordinates(4, BigInt("1630203008552496124843674615123983630541969261591546559209027208557")), curve)
val double = basePoint add basePoint
val double2 = basePoint.multiply(2)
tracer.out().printfIndentln("(%s == %s) = %b", double, double2, (double == double2): java.lang.Boolean)
val projectiveGroupLaw = de.christofreichardt.scala.ellipticcurve.projective.Montgomery
val projectiveCurve = new projectiveGroupLaw.Curve(curve)
val projectiveBasePoint = new projectiveGroupLaw.Point(basePoint.x, basePoint.y, 1, projectiveCurve)
val test1 = projectiveBasePoint.multiply(2)
tracer.out().printfIndentln("test1 = %s", test1)
val scalar = BigInt(11785)
val affineProduct: Montgomery.Point = basePoint.multiply(scalar)
val projectiveProduct = projectiveBasePoint.multiply(scalar)
tracer.out().printfIndentln("affineProduct = %s", affineProduct)
tracer.out().printfIndentln("projectiveProduct = %s", projectiveProduct)
assert(affineProduct == (projectiveProduct: Montgomery.Point), "Wrong product.")
}
testWithTracing(this, "Montgomery2Weierstrass (2)") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val montgomeryCurve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val montgomeryPoint = montgomeryCurve.randomPoint
if (montgomeryPoint != montgomeryPoint.negate) {
val double1: Montgomery.Point = montgomeryPoint add montgomeryPoint
tracer.out().printfIndentln("%s = %s add %s", double1, montgomeryPoint, montgomeryPoint)
val shortWeierstrassCurve = montgomeryCurve.toShortWeierstrassCurve
val shortWeierstrassPoint = montgomeryPoint.toShortWeierstrassPoint
val double2: ShortWeierstrass.Point = shortWeierstrassPoint add shortWeierstrassPoint
tracer.out().printfIndentln("%s = %s add %s", double2, shortWeierstrassPoint, shortWeierstrassPoint)
assert(double1.toShortWeierstrassPoint == double2, "Wrong point.")
}
}
testWithTracing(this, "Affine Multiplication") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val montgomeryCurve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val order = BigInt("421249166674228746791672110734682167926895081980396304944335052891")
val randomGenerator = new RandomGenerator
val TESTS = 10
(0 until TESTS).foreach(i => {
tracer.out().printfIndentln("%d. Test", i: Integer)
val montgomeryPoint = montgomeryCurve.randomPoint(randomGenerator)
val shortWeierstrassPoint = montgomeryPoint.toShortWeierstrassPoint
val scalar = randomGenerator.bigIntStream(order.bitLength * 2, montgomeryCurve.p).find(n => n != order && n != 0).get
val product1: Montgomery.Point = montgomeryPoint multiply scalar
val product2: ShortWeierstrass.Point = shortWeierstrassPoint multiply scalar
tracer.out().printfIndentln("%s * %s = %s", montgomeryPoint, scalar, product1)
tracer.out().printfIndentln("(%s == %s) = %b", product1, product2, (product1 == product2): java.lang.Boolean)
assert(product1.toShortWeierstrassPoint == product2, "Wrong product.")
})
}
testWithTracing(this, "Differential Multiplication (2)") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val curve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val basePoint = groupLaw.makePoint(groupLaw.AffineCoordinates(4, BigInt("1630203008552496124843674615123983630541969261591546559209027208557")), curve)
val order = BigInt("421249166674228746791672110734682167926895081980396304944335052891")
val projectiveGroupLaw = de.christofreichardt.scala.ellipticcurve.projective.Montgomery
val projectiveCurve = new projectiveGroupLaw.Curve(curve)
val projectiveBasePoint = new projectiveGroupLaw.Point(basePoint.x, basePoint.y, 1, projectiveCurve)
val product = projectiveBasePoint multiply order
tracer.out().printfIndentln("(%s * %s) = %s", projectiveBasePoint, order, product)
}
testWithTracing(this, "Differential Multiplication (3)") {
val tracer = getCurrentTracer()
val groupLaw = Montgomery
val curve = groupLaw.makeCurve(groupLaw.OddCharCoefficients(117050, 1), groupLaw.PrimeField(BigInt(2).pow(221) - 3))
val basePoint = groupLaw.makePoint(groupLaw.AffineCoordinates(4, BigInt("1630203008552496124843674615123983630541969261591546559209027208557")), curve)
val order = BigInt("421249166674228746791672110734682167926895081980396304944335052891")
val projectiveGroupLaw = de.christofreichardt.scala.ellipticcurve.projective.Montgomery
val projectiveCurve = new projectiveGroupLaw.Curve(curve)
val projectiveBasePoint = new projectiveGroupLaw.Point(basePoint.x, basePoint.y, 1, projectiveCurve)
val randomGenerator = new RandomGenerator
val TESTS = 10
(0 until TESTS).foreach(i => {
tracer.out().printfIndentln("%d. Test", i: Integer)
val montgomeryPoint = projectiveCurve.randomPoint(randomGenerator)
val shortWeierstrassPoint = montgomeryPoint.toShortWeierstrassPoint
val scalar = randomGenerator.bigIntStream(order.bitLength * 2, projectiveCurve.affineCurve.p).find(n => n != order && n != 0).get
val product1: Montgomery.Point = montgomeryPoint multiply scalar
val product2: ShortWeierstrass.Point = shortWeierstrassPoint multiply scalar
tracer.out().printfIndentln("%s * %s = %s", montgomeryPoint, scalar, product1)
tracer.out().printfIndentln("(%s == %s) = %b", product1, product2, (product1 == product2): java.lang.Boolean)
assert(product1.toShortWeierstrassPoint == product2, "Wrong product.")
})
}
}
} | chr78rm/jca-provider | elliptic-curve-arithmetic/src/test/scala/de/christofreichardt/scala/ellipticcurve/MontgomerySuite.scala | Scala | gpl-3.0 | 10,110 |
package org.scaladebugger.api.profiles.swappable.requests.monitors
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.profiles.ProfileManager
import org.scaladebugger.api.profiles.swappable.SwappableDebugProfile
import org.scaladebugger.api.profiles.traits.DebugProfile
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.RequestInfoBuilder
class SwappableMonitorWaitRequestSpec extends ParallelMockFunSpec
{
private val mockDebugProfile = mock[DebugProfile]
private val mockProfileManager = mock[ProfileManager]
private val swappableDebugProfile = new Object with SwappableDebugProfile {
override protected val profileManager: ProfileManager = mockProfileManager
}
describe("SwappableMonitorWaitRequest") {
describe("#monitorWaitRequests") {
it("should invoke the method on the underlying profile") {
(mockProfileManager.retrieve _).expects(*)
.returning(Some(mockDebugProfile)).once()
(mockDebugProfile.monitorWaitRequests _).expects().once()
swappableDebugProfile.monitorWaitRequests
}
it("should throw an exception if there is no underlying profile") {
(mockProfileManager.retrieve _).expects(*).returning(None).once()
intercept[AssertionError] {
swappableDebugProfile.monitorWaitRequests
}
}
}
describe("#removeMonitorWaitRequestWithArgs") {
it("should invoke the method on the underlying profile") {
val expected = Some(RequestInfoBuilder.newMonitorWaitRequestInfo())
val extraArguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*)
.returning(Some(mockDebugProfile)).once()
(mockDebugProfile.removeMonitorWaitRequestWithArgs _)
.expects(extraArguments)
.returning(expected).once()
val actual = swappableDebugProfile.removeMonitorWaitRequestWithArgs(
extraArguments: _*
)
actual should be (expected)
}
it("should throw an exception if there remove no underlying profile") {
val extraArguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*).returning(None).once()
intercept[AssertionError] {
swappableDebugProfile.removeMonitorWaitRequestWithArgs(
extraArguments: _*
)
}
}
}
describe("#removeAllMonitorWaitRequests") {
it("should invoke the method on the underlying profile") {
val expected = Seq(RequestInfoBuilder.newMonitorWaitRequestInfo())
(mockProfileManager.retrieve _).expects(*)
.returning(Some(mockDebugProfile)).once()
(mockDebugProfile.removeAllMonitorWaitRequests _).expects()
.returning(expected).once()
val actual = swappableDebugProfile.removeAllMonitorWaitRequests()
actual should be (expected)
}
it("should throw an exception if there remove no underlying profile") {
(mockProfileManager.retrieve _).expects(*).returning(None).once()
intercept[AssertionError] {
swappableDebugProfile.removeAllMonitorWaitRequests()
}
}
}
describe("#isMonitorWaitRequestWithArgsPending") {
it("should invoke the method on the underlying profile") {
val expected = true
val extraArguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*)
.returning(Some(mockDebugProfile)).once()
(mockDebugProfile.isMonitorWaitRequestWithArgsPending _).expects(
extraArguments
).returning(expected).once()
val actual = swappableDebugProfile.isMonitorWaitRequestWithArgsPending(
extraArguments: _*
)
actual should be (expected)
}
it("should throw an exception if there is no underlying profile") {
val extraArguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*).returning(None).once()
intercept[AssertionError] {
swappableDebugProfile.isMonitorWaitRequestWithArgsPending(
extraArguments: _*
)
}
}
}
describe("#tryGetOrCreateMonitorWaitRequestWithData") {
it("should invoke the method on the underlying profile") {
val arguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*)
.returning(Some(mockDebugProfile)).once()
(mockDebugProfile.tryGetOrCreateMonitorWaitRequestWithData _).expects(arguments).once()
swappableDebugProfile.tryGetOrCreateMonitorWaitRequestWithData(arguments: _*)
}
it("should throw an exception if there is no underlying profile") {
val arguments = Seq(mock[JDIArgument])
(mockProfileManager.retrieve _).expects(*).returning(None).once()
intercept[AssertionError] {
swappableDebugProfile.tryGetOrCreateMonitorWaitRequestWithData(arguments: _*)
}
}
}
}
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/swappable/requests/monitors/SwappableMonitorWaitRequestSpec.scala | Scala | apache-2.0 | 5,098 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import java.util.regex.Pattern
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Threads
import org.openjdk.jmh.infra.Blackhole
/**
* There was an old suggestion that max via bit manip would be faster and avoid branch instructions. That
* doesn't appear to be the case:
*
* ```
* > run -wi 10 -i 10 -f1 -t1 .*StringMatching.*
* ```
*/
@State(Scope.Thread)
class StringMatching {
private val value = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_."
private val prefix = value
private val substr = "XYZ"
private val flags = Pattern.CASE_INSENSITIVE
private val startsWithMatcher = StringMatcher.StartsWith(prefix)
private val indexOfMatcher = StringMatcher.IndexOf(prefix)
private val ssIndexOfMatcher = StringMatcher.IndexOf(substr)
private val icIndexOfMatcher = StringMatcher.IndexOfIgnoreCase(substr)
private val regexMatcher = StringMatcher.Regex(None, Pattern.compile(s"^$prefix"))
private val icRegexMatcher = StringMatcher.Regex(None, Pattern.compile(s"^$prefix", flags))
private val ssRegexMatcher = StringMatcher.Regex(None, Pattern.compile(s"^.*$substr"))
private val ssRegexMatcher2 = StringMatcher.Regex(None, Pattern.compile(substr))
private val ssICRegexMatcher = StringMatcher.Regex(None, Pattern.compile(s"^.*$substr", flags))
private val ssICRegexMatcher2 = StringMatcher.Regex(None, Pattern.compile(substr, flags))
@Threads(1)
@Benchmark
def testPrefixRegex(bh: Blackhole): Unit = {
bh.consume(regexMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testPrefixRegexNewMatcher(bh: Blackhole): Unit = {
bh.consume(regexMatcher.pattern.matcher(value).find)
}
@Threads(1)
@Benchmark
def testPrefixStartsWith(bh: Blackhole): Unit = {
bh.consume(startsWithMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testPrefixICRegex(bh: Blackhole): Unit = {
bh.consume(icRegexMatcher.matches(value))
}
// regionMatches is slower than regex when ignoring case. This seems to be mostly due to
// the regex doing simple ascii conversion for case (not using UNICODE_CASE flag) where the
// String class is trying to do a unicode aware case conversion.
@Threads(1)
@Benchmark
def testPrefixICStartsWith(bh: Blackhole): Unit = {
bh.consume(value.regionMatches(true, 0, prefix, 0, prefix.length))
}
@Threads(1)
@Benchmark
def testSubstrIndexOf(bh: Blackhole): Unit = {
bh.consume(ssIndexOfMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testSubstrRegex(bh: Blackhole): Unit = {
bh.consume(ssRegexMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testSubstrRegex2(bh: Blackhole): Unit = {
bh.consume(ssRegexMatcher2.matches(value))
}
@Threads(1)
@Benchmark
def testSubstrICIndexOf(bh: Blackhole): Unit = {
bh.consume(icIndexOfMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testSubstrICRegex(bh: Blackhole): Unit = {
bh.consume(ssICRegexMatcher.matches(value))
}
@Threads(1)
@Benchmark
def testSubstrICRegex2(bh: Blackhole): Unit = {
bh.consume(ssICRegexMatcher2.matches(value))
}
}
| gorcz/atlas | atlas-jmh/src/main/scala/com/netflix/atlas/core/util/StringMatching.scala | Scala | apache-2.0 | 3,850 |
package org.nikosoft.oanda.bot.streaming
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.headers.RawHeader
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Broadcast, Flow, Framing, GraphDSL, RunnableGraph, Sink, Source, ZipWith}
import akka.util.ByteString
import org.json4s.jackson.Serialization.read
import org.nikosoft.oanda.GlobalProperties
import org.nikosoft.oanda.api.ApiModel.PrimitivesModel.InstrumentName
import org.nikosoft.oanda.api.JsonSerializers
import org.nikosoft.oanda.api.`def`.InstrumentApi.CandlesResponse
import org.nikosoft.oanda.bot.streaming.InvestingComStreamer.{Advices, SummaryAverages, SummaryTechnical, advisorSource}
import org.nikosoft.oanda.instruments.Model.CandleStick
import scala.concurrent.duration.DurationDouble
import scala.util.{Failure, Success}
import org.json4s.jackson.Serialization._
object CandleWithInvestingComStreamer extends App {
implicit val formats = JsonSerializers.formats
implicit val actorSystem = ActorSystem("streamer")
implicit val materializer = ActorMaterializer()
def url = s"/v3/instruments/${InstrumentName("EUR_USD").value}/candles?" +
// s"from=${LocalDateTime.now.format(DateTimeFormatter.ISO_DATE_TIME) + "Z"}&" +
// s"to=${LocalDateTime.now.plusDays(1).format(DateTimeFormatter.ISO_DATE_TIME) + "Z"}&" +
s"count=1&granularity=M1"
val flow: Flow[String, CandleStick, NotUsed] = Flow[String].map(_ => HttpRequest(uri = url, headers = List(RawHeader("Authorization", GlobalProperties.OandaToken))) -> 1)
.via(Http().cachedHostConnectionPoolHttps("api-fxtrade.oanda.com"))
.collect { case (Success(response), _) => response }
.flatMapConcat(_.entity.dataBytes.via(
Framing.delimiter(ByteString("\\n"), maximumFrameLength = 999999, allowTruncation = true)
.map(_.utf8String)
.map(read[CandlesResponse])
.mapConcat(_.candles.toList)))
.mapConcat(candle => candle.mid.map(CandleStick.toCandleStick(candle, _)).toList)
RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val zip = builder.add(ZipWith[CandleStick, Advices, Advices, Advices, Advices, Advices, (CandleStick, Advices, Advices, Advices, Advices, Advices)]((a, b, c, d, e, f) => (a, b, c, d, e, f)))
val tick = Source.tick(0.minutes, 1.minute, "_")
val broadcast = builder.add(Broadcast[String](6))
val printlnSink = Sink.foreach[(CandleStick, Advices, Advices, Advices, Advices, Advices)](a => {
println(s"${a._1}")
println(s"${a._2.find(_._1 == SummaryAverages).get} - ${a._2.find(_._1 == SummaryTechnical).get}")
println(s"${a._3.find(_._1 == SummaryAverages).get} - ${a._3.find(_._1 == SummaryTechnical).get}")
println(s"${a._4.find(_._1 == SummaryAverages).get} - ${a._4.find(_._1 == SummaryTechnical).get}")
println(s"${a._5.find(_._1 == SummaryAverages).get} - ${a._5.find(_._1 == SummaryTechnical).get}")
println(s"${a._6.find(_._1 == SummaryAverages).get} - ${a._6.find(_._1 == SummaryTechnical).get}")
println("-----")
})
// @formatter:off
tick ~> broadcast
broadcast ~> flow ~> zip.in0
broadcast ~> advisorSource(60) ~> zip.in1
broadcast ~> advisorSource(300) ~> zip.in2
broadcast ~> advisorSource(900) ~> zip.in3
broadcast ~> advisorSource(1800) ~> zip.in4
broadcast ~> advisorSource(3600) ~> zip.in5
zip.out ~> printlnSink
// @formatter:on
ClosedShape
}).run()
}
| cnnickolay/forex-trader | trading-bot/src/main/scala/org/nikosoft/oanda/bot/streaming/CandleWithInvestingComStreamer.scala | Scala | mit | 3,762 |
package sp
import akka.actor._
import sp.runners._
object Launch extends App {
implicit val system = ActorSystem("SP")
val cluster = akka.cluster.Cluster(system)
cluster.registerOnMemberUp {
// Start all you actors here.
println("ExampleService node has joined the cluster")
system.actorOf(OperationRunner.props)
}
cluster.registerOnMemberRemoved{
println("ExampleService node has been removed from the cluster")
}
scala.io.StdIn.readLine("Press ENTER to exit cluster.\\n")
cluster.leave(cluster.selfAddress)
scala.io.StdIn.readLine("Press ENTER to exit application.\\n")
system.terminate()
}
| kristoferB/SP | spservices/operationRunners/src/main/scala/sp/Launch.scala | Scala | mit | 639 |
package mesosphere.marathon.core.task.update.impl.steps
import javax.inject.Named
import akka.event.EventStream
import com.google.inject.Inject
import mesosphere.marathon.core.task.Task.Terminated
import mesosphere.marathon.core.task.bus.MarathonTaskStatus
import mesosphere.marathon.core.task.bus.TaskChangeObservables.TaskChanged
import mesosphere.marathon.core.task.update.TaskUpdateStep
import mesosphere.marathon.core.task.{ Task, TaskStateOp }
import mesosphere.marathon.event.{ EventModule, MesosStatusUpdateEvent }
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.TaskState.TASK_RUNNING
import org.apache.mesos.Protos.{ TaskState, TaskStatus }
import org.slf4j.LoggerFactory
import scala.collection.immutable.Seq
import scala.concurrent.Future
/**
* Post this update to the internal event stream.
*/
class PostToEventStreamStepImpl @Inject() (
@Named(EventModule.busName) eventBus: EventStream) extends TaskUpdateStep {
private[this] val log = LoggerFactory.getLogger(getClass)
override def name: String = "postTaskStatusEvent"
override def processUpdate(taskChanged: TaskChanged): Future[_] = {
taskChanged.stateOp match {
case TaskStateOp.MesosUpdate(task, MarathonTaskStatus.WithMesosStatus(mesosStatus), timestamp) =>
mesosStatus.getState match {
case Terminated(_) =>
postEvent(timestamp, mesosStatus, task)
case TASK_RUNNING if task.launched.exists(!_.hasStartedRunning) => // staged, not running
postEvent(timestamp, mesosStatus, task)
case state: TaskState =>
val taskId = task.taskId
log.debug(s"Do not post event $state for $taskId of app [${taskId.appId}].")
}
case _ =>
// ignore
}
Future.successful(())
}
private[this] def postEvent(timestamp: Timestamp, status: TaskStatus, task: Task): Unit = {
val taskId = task.taskId
task.launched.foreach { launched =>
log.info(
"Sending event notification for {} of app [{}]: {}",
Array[Object](taskId, taskId.appId, status.getState): _*
)
eventBus.publish(
MesosStatusUpdateEvent(
slaveId = status.getSlaveId.getValue,
taskId = Task.Id(status.getTaskId),
taskStatus = status.getState.name,
message = if (status.hasMessage) status.getMessage else "",
appId = taskId.appId,
host = task.agentInfo.host,
ipAddresses = launched.networking match {
case networkInfoList: Task.NetworkInfoList => networkInfoList.addresses.to[Seq]
case _ => Seq.empty
},
ports = launched.networking match {
case Task.HostPorts(ports) => ports
case _ => Iterable.empty
},
version = launched.appVersion.toString,
timestamp = timestamp.toString
)
)
}
}
}
| vivekjuneja/marathon | src/main/scala/mesosphere/marathon/core/task/update/impl/steps/PostToEventStreamStepImpl.scala | Scala | apache-2.0 | 2,961 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.testFramework.EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
/**
* @author Nikolay.Tropin
*/
abstract class FilterOtherTest extends OperationsOnCollectionInspectionTest {
override val classOfInspection: Class[_ <: OperationOnCollectionInspection] =
classOf[FilterOtherContainsInspection]
}
class FilterOtherContainsTest extends FilterOtherTest {
override protected val hint: String =
ScalaInspectionBundle.message("replace.filter.with.intersect")
def testFunExpr(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(x => others.contains(x))$END",
"val others = Set(1,2); Set().filter(x => others.contains(x))",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testUnderscore(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(others.contains(_))$END",
"val others = Set(1,2); Set().filter(others.contains(_))",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testUnderscore2(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(others.contains _)$END",
"val others = Set(1,2); Set().filter(others.contains(_))",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testUnderscoreInfix(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(others contains _)$END",
"val others = Set(1,2); Set().filter(others contains _)",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testMethodValue(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(others.contains)$END",
"val others = Set(1,2); Set().filter(others.contains)",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testFilterNotNotContains(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filterNot(x => !others.contains(x))$END",
"val others = Set(1,2); Set().filterNot(x => !others.contains(x))",
"val others = Set(1,2); Set().intersect(others)"
)
}
def testNotASet1(): Unit = {
checkTextHasNoErrors("val others = Set(1,2); Seq().filter(others.contains)")
}
def testNotASet2(): Unit = {
checkTextHasNoErrors("val others = Seq(1,2); Set().filter(others.contains)")
}
def testInnerSetDependsOnElement(): Unit = {
checkTextHasNoErrors("Set().filter(x => Set(1 - x, 2 - x).contains(x))")
}
}
class FilterOtherNotContainsTest extends FilterOtherTest {
override protected val hint: String =
ScalaInspectionBundle.message("replace.filter.with.diff")
def testFunExpr(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filter(x => !others.contains(x))$END",
"val others = Set(1,2); Set().filter(x => !others.contains(x))",
"val others = Set(1,2); Set().diff(others)"
)
}
def testUnderscore(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filterNot(others.contains(_))$END",
"val others = Set(1,2); Set().filterNot(others.contains(_))",
"val others = Set(1,2); Set().diff(others)"
)
}
def testUnderscoreInfix(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filterNot(others contains _)$END",
"val others = Set(1,2); Set().filterNot(others contains _)",
"val others = Set(1,2); Set().diff(others)"
)
}
def testMethodValue(): Unit = {
doTest(
s"val others = Set(1,2); Set().${START}filterNot(others.contains)$END",
"val others = Set(1,2); Set().filterNot(others.contains)",
"val others = Set(1,2); Set().diff(others)"
)
}
def testNotASet1(): Unit = {
checkTextHasNoErrors("val others = Set(1,2); Seq().filterNot(others.contains)")
}
def testNotASet2(): Unit = {
checkTextHasNoErrors("val others = Seq(1,2); Set().filter(x => !others.contains(x))")
}
def testInnerSetDependsOnElement(): Unit = {
checkTextHasNoErrors("Set().filter(x => !Set(1 - x, 2 - x).contains(x))")
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/collections/FilterOtherContainsTest.scala | Scala | apache-2.0 | 4,076 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.util.NoSuchElementException
import java.util.zip.ZipOutputStream
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.util.control.NonFatal
import scala.xml.Node
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.History
import org.apache.spark.internal.config.UI._
import org.apache.spark.status.api.v1.{ApiRootResource, ApplicationInfo, UIRoot}
import org.apache.spark.ui.{SparkUI, UIUtils, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.{ShutdownHookManager, SystemClock, Utils}
/**
* A web server that renders SparkUIs of completed applications.
*
* For the standalone mode, MasterWebUI already achieves this functionality. Thus, the
* main use case of the HistoryServer is in other deploy modes (e.g. Yarn or Mesos).
*
* The logging directory structure is as follows: Within the given base directory, each
* application's event logs are maintained in the application's own sub-directory. This
* is the same structure as maintained in the event log write code path in
* EventLoggingListener.
*/
class HistoryServer(
conf: SparkConf,
provider: ApplicationHistoryProvider,
securityManager: SecurityManager,
port: Int)
extends WebUI(securityManager, securityManager.getSSLOptions("historyServer"), port, conf)
with Logging with UIRoot with ApplicationCacheOperations {
// How many applications to retain
private val retainedApplications = conf.get(History.RETAINED_APPLICATIONS)
// How many applications the summary ui displays
private[history] val maxApplications = conf.get(HISTORY_UI_MAX_APPS);
// application
private val appCache = new ApplicationCache(this, retainedApplications, new SystemClock())
// and its metrics, for testing as well as monitoring
val cacheMetrics = appCache.metrics
private val loaderServlet = new HttpServlet {
protected override def doGet(req: HttpServletRequest, res: HttpServletResponse): Unit = {
// Parse the URI created by getAttemptURI(). It contains an app ID and an optional
// attempt ID (separated by a slash).
val parts = Option(req.getPathInfo()).getOrElse("").split("/")
if (parts.length < 2) {
res.sendError(HttpServletResponse.SC_BAD_REQUEST,
s"Unexpected path info in request (URI = ${req.getRequestURI()}")
return
}
val appId = parts(1)
val attemptId = if (parts.length >= 3) Some(parts(2)) else None
// Since we may have applications with multiple attempts mixed with applications with a
// single attempt, we need to try both. Try the single-attempt route first, and if an
// error is raised, then try the multiple attempt route.
if (!loadAppUi(appId, None) && (!attemptId.isDefined || !loadAppUi(appId, attemptId))) {
val msg = <div class="row-fluid">Application {appId} not found.</div>
res.setStatus(HttpServletResponse.SC_NOT_FOUND)
UIUtils.basicSparkPage(req, msg, "Not Found").foreach { n =>
res.getWriter().write(n.toString)
}
return
}
// Note we don't use the UI retrieved from the cache; the cache loader above will register
// the app's UI, and all we need to do is redirect the user to the same URI that was
// requested, and the proper data should be served at that point.
// Also, make sure that the redirect url contains the query string present in the request.
val requestURI = req.getRequestURI + Option(req.getQueryString).map("?" + _).getOrElse("")
res.sendRedirect(res.encodeRedirectURL(requestURI))
}
// SPARK-5983 ensure TRACE is not supported
protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse): Unit = {
res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
override def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T = {
appCache.withSparkUI(appId, attemptId)(fn)
}
initialize()
/**
* Initialize the history server.
*
* This starts a background thread that periodically synchronizes information displayed on
* this UI with the event logs in the provided base directory.
*/
def initialize() {
attachPage(new HistoryPage(this))
attachHandler(ApiRootResource.getServletHandler(this))
addStaticHandler(SparkUI.STATIC_RESOURCE_DIR)
val contextHandler = new ServletContextHandler
contextHandler.setContextPath(HistoryServer.UI_PATH_PREFIX)
contextHandler.addServlet(new ServletHolder(loaderServlet), "/*")
attachHandler(contextHandler)
}
/** Bind to the HTTP server behind this web interface. */
override def bind() {
super.bind()
}
/** Stop the server and close the file system. */
override def stop() {
super.stop()
provider.stop()
}
/** Attach a reconstructed UI to this server. Only valid after bind(). */
override def attachSparkUI(
appId: String,
attemptId: Option[String],
ui: SparkUI,
completed: Boolean) {
assert(serverInfo.isDefined, "HistoryServer must be bound before attaching SparkUIs")
ui.getHandlers.foreach { handler =>
serverInfo.get.addHandler(handler, ui.securityManager)
}
}
/** Detach a reconstructed UI from this server. Only valid after bind(). */
override def detachSparkUI(appId: String, attemptId: Option[String], ui: SparkUI): Unit = {
assert(serverInfo.isDefined, "HistoryServer must be bound before detaching SparkUIs")
ui.getHandlers.foreach(detachHandler)
provider.onUIDetached(appId, attemptId, ui)
}
/**
* Get the application UI and whether or not it is completed
* @param appId application ID
* @param attemptId attempt ID
* @return If found, the Spark UI and any history information to be used in the cache
*/
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
provider.getAppUI(appId, attemptId)
}
/**
* Returns a list of available applications, in descending order according to their end time.
*
* @return List of all known applications.
*/
def getApplicationList(): Iterator[ApplicationInfo] = {
provider.getListing()
}
def getEventLogsUnderProcess(): Int = {
provider.getEventLogsUnderProcess()
}
def getLastUpdatedTime(): Long = {
provider.getLastUpdatedTime()
}
def getApplicationInfoList: Iterator[ApplicationInfo] = {
getApplicationList()
}
def getApplicationInfo(appId: String): Option[ApplicationInfo] = {
provider.getApplicationInfo(appId)
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
provider.writeEventLogs(appId, attemptId, zipStream)
}
/**
* @return html text to display when the application list is empty
*/
def emptyListingHtml(): Seq[Node] = {
provider.getEmptyListingHtml()
}
/**
* Returns the provider configuration to show in the listing page.
*
* @return A map with the provider's configuration.
*/
def getProviderConfig(): Map[String, String] = provider.getConfig()
/**
* Load an application UI and attach it to the web server.
* @param appId application ID
* @param attemptId optional attempt ID
* @return true if the application was found and loaded.
*/
private def loadAppUi(appId: String, attemptId: Option[String]): Boolean = {
try {
appCache.withSparkUI(appId, attemptId) { _ =>
// Do nothing, just force the UI to load.
}
true
} catch {
case NonFatal(e: NoSuchElementException) =>
false
}
}
/**
* String value for diagnostics.
* @return a multi-line description of the server state.
*/
override def toString: String = {
s"""
| History Server;
| provider = $provider
| cache = $appCache
""".stripMargin
}
}
/**
* The recommended way of starting and stopping a HistoryServer is through the scripts
* start-history-server.sh and stop-history-server.sh. The path to a base log directory,
* as well as any other relevant history server configuration, should be specified via
* the $SPARK_HISTORY_OPTS environment variable. For example:
*
* export SPARK_HISTORY_OPTS="-Dspark.history.fs.logDirectory=/tmp/spark-events"
* ./sbin/start-history-server.sh
*
* This launches the HistoryServer as a Spark daemon.
*/
object HistoryServer extends Logging {
private val conf = new SparkConf
val UI_PATH_PREFIX = "/history"
def main(argStrings: Array[String]): Unit = {
Utils.initDaemon(log)
new HistoryServerArguments(conf, argStrings)
initSecurity()
val securityManager = createSecurityManager(conf)
val providerName = conf.get(History.PROVIDER)
.getOrElse(classOf[FsHistoryProvider].getName())
val provider = Utils.classForName(providerName)
.getConstructor(classOf[SparkConf])
.newInstance(conf)
.asInstanceOf[ApplicationHistoryProvider]
val port = conf.get(History.HISTORY_SERVER_UI_PORT)
val server = new HistoryServer(conf, provider, securityManager, port)
server.bind()
ShutdownHookManager.addShutdownHook { () => server.stop() }
// Wait until the end of the world... or if the HistoryServer process is manually stopped
while(true) { Thread.sleep(Int.MaxValue) }
}
/**
* Create a security manager.
* This turns off security in the SecurityManager, so that the History Server can start
* in a Spark cluster where security is enabled.
* @param config configuration for the SecurityManager constructor
* @return the security manager for use in constructing the History Server.
*/
private[history] def createSecurityManager(config: SparkConf): SecurityManager = {
if (config.getBoolean(SecurityManager.SPARK_AUTH_CONF, false)) {
logDebug(s"Clearing ${SecurityManager.SPARK_AUTH_CONF}")
config.set(SecurityManager.SPARK_AUTH_CONF, "false")
}
if (config.get(ACLS_ENABLE)) {
logInfo(s"${ACLS_ENABLE.key} is configured, " +
s"clearing it and only using ${History.HISTORY_SERVER_UI_ACLS_ENABLE.key}")
config.set(ACLS_ENABLE, false)
}
new SecurityManager(config)
}
def initSecurity() {
// If we are accessing HDFS and it has security enabled (Kerberos), we have to login
// from a keytab file so that we can access HDFS beyond the kerberos ticket expiration.
// As long as it is using Hadoop rpc (hdfs://), a relogin will automatically
// occur from the keytab.
if (conf.get(History.KERBEROS_ENABLED)) {
// if you have enabled kerberos the following 2 params must be set
val principalName = conf.get(History.KERBEROS_PRINCIPAL)
.getOrElse(throw new NoSuchElementException(History.KERBEROS_PRINCIPAL.key))
val keytabFilename = conf.get(History.KERBEROS_KEYTAB)
.getOrElse(throw new NoSuchElementException(History.KERBEROS_KEYTAB.key))
SparkHadoopUtil.get.loginUserFromKeytab(principalName, keytabFilename)
}
}
private[history] def getAttemptURI(appId: String, attemptId: Option[String]): String = {
val attemptSuffix = attemptId.map { id => s"/$id" }.getOrElse("")
s"${HistoryServer.UI_PATH_PREFIX}/${appId}${attemptSuffix}"
}
}
| WindCanDie/spark | core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala | Scala | apache-2.0 | 12,343 |
// Databricks notebook source exported at Wed, 14 Sep 2016 07:10:53 UTC
// MAGIC %md
// MAGIC # Convert string date into TimestampType in Spark SQL
// MAGIC This can be done by converting date as string into timestamp (including time zone) using `unix_timestamp` and casting it as `TimestampType`, see example below. Note that you might need to convert with some specific timezone.
// COMMAND ----------
1+1
// COMMAND ----------
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
// COMMAND ----------
val df = Seq(
(1, "2014/01/01 23:00:01"),
(1, "2014/11/31 12:40:32"),
(1, "2016/12/29 09:54:00"),
(1, "2016/05/09 10:12:43")).toDF("id", "date")
// COMMAND ----------
val res = df.select($"id", $"date", unix_timestamp($"date", "yyyy/MM/dd HH:mm:ss").cast(TimestampType).as("timestamp"), current_timestamp(), current_date())
// COMMAND ----------
res.printSchema
// COMMAND ----------
res.show(false)
// COMMAND ----------
val df = Seq(
(1, "1/1/2014 23:00"),
(1, "11/31/2014 12:40"),
(1, "12/29/2016 09:54"),
(1, "5/9/2016 10:12")).toDF("id", "date")
// COMMAND ----------
val res = df.select($"id", $"date", unix_timestamp($"date", "MM/dd/yyyy HH:mm").cast(TimestampType).as("timestamp"), current_timestamp(), current_date())
// COMMAND ----------
res.show(false)
// COMMAND ----------
| raazesh-sainudiin/scalable-data-science | meme-evolution/db/timestampConversion.scala | Scala | unlicense | 1,357 |
package provingground.library
import provingground._
import HoTT._
import induction._
import implicits._
import shapeless._
import Fold._
object pprodInd {
lazy val value = Subst.Lambda(
"$mpdiqi" :: Type,
Subst.Lambda(
"$mpdiqj" :: Type,
ConstructorSeqTL(
ConstructorSeqDom.Cons(
ApplnSym(
("pprod.mk" :: piDefn("'f_1394520732" :: Type)(
piDefn("'g_1655163109" :: Type)(
FuncTyp("'f_1394520732" :: Type,
FuncTyp("'g_1655163109" :: Type,
("pprod" :: FuncTyp(Type, FuncTyp(Type, Type)))(
"'f_1394520732" :: Type)(
"'g_1655163109" :: Type))))))(
"$mpdiqi" :: Type),
"$mpdiqj" :: Type
),
ConstructorShape.CnstFuncConsShape(
"$mpdiqi" :: Type,
ConstructorShape.CnstFuncConsShape(
"$mpdiqj" :: Type,
ConstructorShape.IdShape.byTyp(
("pprod" :: FuncTyp(Type, FuncTyp(Type, Type)))(
"$mpdiqi" :: Type)("$mpdiqj" :: Type)))
),
ConstructorSeqDom.Empty.byTyp(
("pprod" :: FuncTyp(Type, FuncTyp(Type, Type)))("$mpdiqi" :: Type)(
"$mpdiqj" :: Type))
),
("pprod" :: FuncTyp(Type, FuncTyp(Type, Type)))("$mpdiqi" :: Type)(
"$mpdiqj" :: Type)
)
)
)
}
| siddhartha-gadgil/ProvingGround | leanlib/src/main/scala/provingground/library/inductive-types/pprodInd.scala | Scala | mit | 1,458 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.models
import org.apache.spark.ml.attribute.{ AttributeGroup, NumericAttribute, NominalAttribute }
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.DataFrame
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
class FrameFunctions(self: FrameRdd) extends Serializable {
/**
* Convert FrameRdd to Spark dataframe with feature vector and label
*
* @param observationColumns List of observation column names
* @param labelColumn Label column name
* @param outputFeatureVectorName Column name of output feature vector
* @param categoricalFeatures Optional arity of categorical features. Entry (name -> k) indicates that
* feature 'name' is categorical with 'k' categories indexed from 0:{0,1,...,k-1}
* @param labelNumClasses Optional number of categories in label column. If None, label column is continuous.
* @return Dataframe with feature vector and label
*/
def toLabeledDataFrame(observationColumns: List[String],
labelColumn: String,
outputFeatureVectorName: String,
categoricalFeatures: Option[Map[String, Int]] = None,
labelNumClasses: Option[Int] = None): DataFrame = {
require(labelColumn != null, "label column name must not be null")
require(observationColumns != null, "feature column names must not be null")
require(outputFeatureVectorName != null, "output feature vector name must not be null")
require(labelNumClasses.isEmpty || labelNumClasses.get >= 2,
"number of categories in label column must be greater than 1")
val assembler = new VectorAssembler().setInputCols(observationColumns.toArray)
.setOutputCol(outputFeatureVectorName)
val featureFrame = assembler.transform(self.toDataFrame)
// Identify categorical and numerical features
val categoricalFeaturesMap = categoricalFeatures.getOrElse(Map.empty[String, Int])
val featuresAttributes = observationColumns.indices.map { featureIndex =>
val featureName = observationColumns(featureIndex)
if (categoricalFeaturesMap.contains(featureName)) {
NominalAttribute.defaultAttr.withIndex(featureIndex)
.withNumValues(categoricalFeaturesMap(featureName))
}
else {
NumericAttribute.defaultAttr.withIndex(featureIndex)
}
}.toArray
val labelAttribute = if (labelNumClasses.isEmpty) {
NumericAttribute.defaultAttr.withName(labelColumn)
}
else {
NominalAttribute.defaultAttr.withName(labelColumn).withNumValues(labelNumClasses.get)
}
// Update frame metadata with categorical and numerical features
val featuresMetadata = new AttributeGroup(outputFeatureVectorName, featuresAttributes).toMetadata()
val labelMetadata = labelAttribute.toMetadata()
featureFrame.select(featureFrame(outputFeatureVectorName).as(outputFeatureVectorName, featuresMetadata),
featureFrame(labelColumn).as(labelColumn, labelMetadata))
}
}
object FrameImplicits {
implicit def frameRddToFrameFunctions(frameRdd: FrameRdd) = new FrameFunctions(frameRdd)
} | trustedanalytics/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/models/FrameFunctions.scala | Scala | apache-2.0 | 3,892 |
package org.codeswarm.aksync
import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable}
import scala.collection.mutable.{Queue, HashMap, Stack}
import scala.concurrent.duration.FiniteDuration
import Server._
/** A `Server` mediates access to a pool of "tokens", responding to each client request by
* issuing a lease that grant temporary exclusive access to one token until the lease is
* released.
*
* A client actor initiates communication with the server by sending the [[Lease.Request]]
* message. The requestor's `ActorRef` is held in a FIFO queue until a token is available,
* at which point the server replies with a [[Lease]]. The client is required to
* [[Lease.Acknowledge acknowledge]] the lease, and to [[Lease.Release release]] it when
* finished using it. [[Lease.apply]] is a convenient way to ensure that your clients handles
* its leases properly.
*
* @tparam Token Token type. This is typically a resource cannot be used concurrently, such
* as a database connection.
*
* @param lifecycle Strategy for creating and destroying tokens.
* @param poolSizeRange Minimum and maximum number of tokens in the pool. Defaults to 2-8.
* @param poolStructure Constructor for the collection data structure which will hold unleased
* tokens. Defaults to [[Pool.Stack]]. Another good choice is [[Pool.Queue]].
* @param leaseTimeout Amount of time that a lease is allowed to persist without acknowledgement.
* Defaults to a short time for the first acknowledgement and a longer duration subsequently.
* @param tokenRetryInterval Amount of time to wait between retries when token creation fails.
* Defaults to an exponential backoff.
* @param leaseTransform A function to apply to outgoing `Lease` messages. Defaults to the
* identity. This can be a useful way to work around limitations in pattern-matching on
* generic types that poses a problem when trying to receive the message type `Lease[Token]`.
*/
class Server[Token](lifecycle: Lifecycle[Token], poolSizeRange: PoolSizeRange = 2 to 8,
poolStructure: => Pool[Token] = new Pool.Stack[Token](),
leaseTimeout: LeaseTimeout = LeaseTimeout.FirstAndSubsequent(),
tokenRetryInterval: TokenRetryInterval = TokenRetryInterval.ExponentialBackoff(),
leaseTransform: (Lease[Token]) => Any = conforms[Lease[Token]])
(implicit tokenManifest: Manifest[Token]) extends Actor with ActorLogging {
val system = context.system
import system.{dispatcher, scheduler}
private val clients = Queue[ActorRef]()
private val tokens = poolStructure
private val leases = HashMap[StandardLease[Token], LeaseState]()
private var tokenCreationState: TokenCreationState = TokenCreationState.NotDoingAnything
private val leaseIds = (1 to Int.MaxValue).iterator
private val lifecycleActor = lifecycle.actor
override def preStart() {
self ! Internal.MaybeRequestToken
}
def receive = {
case Lease.Request =>
log debug "Received Lease.Request"
if (poolSizeRange.isZero) {
/* If the pool size is fixed at 0, this server will never do anything,
so there is no point in enqueuing any requests. */
} else {
clients enqueue sender
self ! Internal.MaybeIssueLeases
self ! Internal.MaybeRequestToken
}
case Lease.Acknowledge(lease: StandardLease[Token]) =>
log debug "Received Lease.Acknowledge[%d]".format(lease.id)
leases.get(lease) match {
case Some(state) => state.ack()
case None => log warning "Received Acknowledge for unknown lease from %s".format(sender)
}
case Lease.Release(lease: StandardLease[Token]) =>
log debug "Received Lease.Release[%d]".format(lease.id)
leases.remove(lease) match {
case Some(state) => tokens add lease.token
case None => log warning "Received Release for unknown lease from %s".format(sender)
}
self ! Internal.MaybeIssueLeases
case Internal.MaybeIssueLeases =>
createLease() match {
case Some(lease) =>
log debug "Issuing %s".format(lease)
leases += lease -> new LeaseState(lease)
lease.client ! leaseTransform(lease)
self ! Internal.MaybeIssueLeases
case None =>
}
self ! Internal.MaybeRequestToken
case Internal.MaybeRevoke(lease: StandardLease[Token], nrOfAcks: Int) =>
leases.get(lease) foreach { state =>
if (state.nrOfAcks == nrOfAcks) {
log warning "Revoking %s (acks: %d) that was issued to %s".
format(lease, nrOfAcks, lease.client)
leases -= lease
lifecycleActor ! Lifecycle.Revoked(lease.token)
self ! Internal.MaybeRequestToken
}
}
case Internal.MaybeRequestToken =>
if (tokenCreationState == TokenCreationState.NotDoingAnything) {
val size = tokens.size + leases.size
val needsAnotherToken = (poolSizeRange requiresMoreThan size) ||
(clients.nonEmpty && (poolSizeRange allowsMoreThan size))
if (needsAnotherToken) {
tokenCreationState = TokenCreationState.AnticipatingNewToken()
self ! Internal.RequestToken
}
}
case Internal.RequestToken =>
log debug "Sending Lifecycle.TokenRequest"
lifecycleActor ! Lifecycle.TokenRequest
case Lifecycle.NewToken(token) =>
log debug "Received Lifecycle.NewToken"
if (!tokenManifest.runtimeClass.isAssignableFrom(token.getClass)) {
log warning "Received NewToken of incorrect type %s".format(token.getClass)
} else {
tokenCreationState match {
case TokenCreationState.NotDoingAnything =>
log warning "Received unexpected NewToken from %s".format(sender)
case _: TokenCreationState.AnticipatingNewToken =>
tokenCreationState = TokenCreationState.NotDoingAnything
tokens add token.asInstanceOf[Token]
self ! Internal.MaybeIssueLeases
self ! Internal.MaybeRequestToken
}
}
case Lifecycle.TokenUnavailable =>
log debug("Received Lifecycle.TokenUnavailable")
tokenCreationState match {
case TokenCreationState.NotDoingAnything =>
log warning "Received unexpected TokenUnavailable from %s".format(sender)
case x: TokenCreationState.AnticipatingNewToken =>
tokenCreationState = x.fail
scheduler.scheduleOnce(
delay = tokenRetryInterval(tokenCreationState.nrOfFails),
receiver = self,
message = Internal.RequestToken
)
}
case m =>
log warning "Received unrecognized message: %s".format(m)
}
private def createLease(): Option[StandardLease[Token]] = {
// Remove terminated requestors to avoid wasting time issuing a lease to a dead actor.
// This does not guarantee that it will never happen (there is a race condition), but
// it's unlikely.
while (clients.headOption.exists(_.isTerminated)) {
log debug "Removing dead actor"
clients.dequeue()
}
// No one is currently waiting for a lease.
if (clients.isEmpty) {
log debug "There are no requestors waiting"
return None
}
// Remove dead tokens as a best effort toward avoiding giving a client a dead token
// (for example, if the token is a database connection that has timed out).
while (tokens.peek.exists(lifecycle.isDead(_))) {
log debug "Removing dead token"
lifecycleActor ! Lifecycle.Dead(tokens.remove())
}
// There are no free connections available.
if (tokens.isEmpty) {
log debug "There are no tokens available for lease"
return None
}
// Create a new lease.
Some(new StandardLease(
token = tokens.remove(),
id = leaseIds.next(),
client = clients.dequeue(),
server = self
))
}
/** State about a lease that is used internally by the server to manage lease expiration.
*/
private class LeaseState(lease: Lease[Token]) {
// The currently-running expiration timer, if there is one. This is None if the lease
// will never be revoked due to an indefinite timeout duration.
private var timer: Option[Cancellable] = None
// The number of times this lease has been acknowledged.
private[Server] var nrOfAcks = 0
setTimer()
def ack() {
nrOfAcks += 1
timer foreach (_.cancel())
setTimer()
}
private def setTimer() {
timer = leaseTimeout(nrOfAcks) match {
case delay: FiniteDuration =>
Some(scheduler.scheduleOnce(
delay = delay,
receiver = self,
message = Internal.MaybeRevoke(lease, nrOfAcks)
))
case _ =>
None
}
}
}
}
private object Server {
/** Messages that the server sends to itself.
*/
private object Internal {
case object MaybeIssueLeases
/** @param nrOfAcks The number of acknowledgements the lease had at the time this message
* was scheduled. When this message is received, if the lease has not been acknowledged
* since then (its `nrOfAcks` has not changed), then the lease shall be revoked.
*/
case class MaybeRevoke(lease: Lease[_], nrOfAcks: Int)
case object MaybeRequestToken
case object RequestToken
}
/** An enumeration of where the server is in its conversation with the lifecycle actor
* in regards to creating new tokens.
*/
private trait TokenCreationState {
def nrOfFails: Int = 0
}
private object TokenCreationState {
/** Nothing is going on. We're not interested in getting new tokens, and the lifecycle
* actor shouldn't be doing anything.
*/
case object NotDoingAnything extends TokenCreationState
/** A token has been requested, and we are waiting for the lifecycle actor to reply.
*/
case class AnticipatingNewToken(override val nrOfFails: Int = 0) extends TokenCreationState {
def fail = AnticipatingNewToken(nrOfFails + 1)
}
}
} | chris-martin/aksync | src/main/scala/Server.scala | Scala | apache-2.0 | 10,068 |
package aia.routing
import akka.actor.{ Props, ActorRef, Actor }
import scala.collection.mutable.ListBuffer
object CarOptions extends Enumeration {
val CAR_COLOR_GRAY, NAVIGATION, PARKING_SENSORS = Value
}
case class Order(options: Seq[CarOptions.Value])
case class Car(color: String = "",
hasNavigation: Boolean = false,
hasParkingSensors: Boolean = false)
case class RouteSlipMessage(routeSlip: Seq[ActorRef],
message: AnyRef)
trait RouteSlip {
def sendMessageToNextTask(routeSlip: Seq[ActorRef],
message: AnyRef) {
val nextTask = routeSlip.head
val newSlip = routeSlip.tail
if (newSlip.isEmpty) {
nextTask ! message
} else {
nextTask ! RouteSlipMessage(
routeSlip = newSlip,
message = message)
}
}
}
class PaintCar(color: String) extends Actor with RouteSlip {
def receive = {
case RouteSlipMessage(routeSlip, car: Car) => {
sendMessageToNextTask(routeSlip,
car.copy(color = color))
}
}
}
class AddNavigation() extends Actor with RouteSlip {
def receive = {
case RouteSlipMessage(routeSlip, car: Car) => {
sendMessageToNextTask(routeSlip,
car.copy(hasNavigation = true))
}
}
}
class AddParkingSensors() extends Actor with RouteSlip {
def receive = {
case RouteSlipMessage(routeSlip, car: Car) => {
sendMessageToNextTask(routeSlip,
car.copy(hasParkingSensors = true))
}
}
}
class SlipRouter(endStep: ActorRef) extends Actor with RouteSlip {
val paintBlack = context.actorOf(
Props(new PaintCar("black")), "paintBlack")
val paintGray = context.actorOf(
Props(new PaintCar("gray")), "paintGray")
val addNavigation = context.actorOf(
Props[AddNavigation], "navigation")
val addParkingSensor = context.actorOf(
Props[AddParkingSensors], "parkingSensors")
def receive = {
case order: Order => {
val routeSlip = createRouteSlip(order.options)
sendMessageToNextTask(routeSlip, new Car)
}
}
private def createRouteSlip(options: Seq[CarOptions.Value]):
Seq[ActorRef] = {
val routeSlip = new ListBuffer[ActorRef]
//car needs a color
if (!options.contains(CarOptions.CAR_COLOR_GRAY)) {
routeSlip += paintBlack
}
options.foreach {
case CarOptions.CAR_COLOR_GRAY => routeSlip += paintGray
case CarOptions.NAVIGATION => routeSlip += addNavigation
case CarOptions.PARKING_SENSORS => routeSlip += addParkingSensor
case other => //do nothing
}
routeSlip += endStep
routeSlip
}
}
| RayRoestenburg/akka-in-action | chapter-routing/src/main/scala/aia/routing/SlipRouter.scala | Scala | mit | 2,652 |
package com.twitter.finagle.param
import com.twitter.finagle.Stack
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.ssl.TrustCredentials
import com.twitter.finagle.ssl.client.{
SslClientConfiguration, SslClientEngineFactory,
SslClientSessionVerifier, SslContextClientEngineFactory}
import com.twitter.finagle.transport.Transport
import com.twitter.util.Duration
import javax.net.ssl.SSLContext
/**
* A collection of methods for configuring the [[Transport]] for Finagle clients.
*
* @tparam A a [[Stack.Parameterized]] client to configure
*
* @see [[com.twitter.finagle.param.TransportParams]]
*/
class ClientTransportParams[A <: Stack.Parameterized[A]](self: Stack.Parameterized[A])
extends TransportParams(self) {
/**
* Configures the TCP connection `timeout` of this client (default: 1 second).
*
* The connection timeout is the maximum amount of time a transport is allowed
* to spend establishing a TCP connection.
*/
def connectTimeout(timeout: Duration): A =
self.configured(Transporter.ConnectTimeout(timeout))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
*/
def tls(config: SslClientConfiguration): A =
self.configured(Transport.ClientSsl(Some(config)))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
*/
def tls(config: SslClientConfiguration, engineFactory: SslClientEngineFactory): A =
self
.configured(Transport.ClientSsl(Some(config)))
.configured(SslClientEngineFactory.Param(engineFactory))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
*/
def tls(config: SslClientConfiguration, sessionVerifier: SslClientSessionVerifier): A =
self
.configured(Transport.ClientSsl(Some(config)))
.configured(SslClientSessionVerifier.Param(sessionVerifier))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
*/
def tls(
config: SslClientConfiguration,
engineFactory: SslClientEngineFactory,
sessionVerifier: SslClientSessionVerifier
): A =
self
.configured(Transport.ClientSsl(Some(config)))
.configured(SslClientEngineFactory.Param(engineFactory))
.configured(SslClientSessionVerifier.Param(sessionVerifier))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
*
* @note Given that this uses default [[SSLContext]], all configuration params (trust/key stores)
* should be passed as Java system properties.
*/
def tls: A =
self
.configured(Transport.ClientSsl(Some(SslClientConfiguration())))
/**
* Enables SSL/TLS support (connection encrypting) on this client.
* Hostname verification will be provided against the given `hostname`.
*/
def tls(hostname: String): A =
self
.configured(Transport.ClientSsl(
Some(SslClientConfiguration(hostname = Some(hostname)))))
/**
* Enables SSL/TLS support (connection encrypting) with no hostname validation
* on this client. The SSL/TLS are configured using the given `context`.
*
* @note It's recommended to not use [[SSLContext]] directly, but rely on Finagle to pick
* the most efficient SSL/TLS available on your platform.
*/
def tls(context: SSLContext): A =
self
.configured(SslClientEngineFactory.Param(
new SslContextClientEngineFactory(context)))
.configured(Transport.ClientSsl(Some(SslClientConfiguration())))
/**
* Enables the TLS/SSL support (connection encrypting) with hostname validation
* on this client. The TLS/SSL sessions are configured using the given `context`.
*/
def tls(context: SSLContext, hostname: String): A =
self
.configured(SslClientEngineFactory.Param(
new SslContextClientEngineFactory(context)))
.configured(Transport.ClientSsl(
Some(SslClientConfiguration(hostname = Some(hostname)))))
/**
* Enables the TLS/SSL support (connection encrypting) with no certificate validation
* on this client.
*
* @note This makes a client trust any certificate sent by a server, which invalidates the entire
* idea of TLS/SSL. Use this carefully.
*/
def tlsWithoutValidation: A = {
self
.configured(Transport.ClientSsl(
Some(SslClientConfiguration(trustCredentials = TrustCredentials.Insecure))))
}
/**
* Enables TCP tunneling via `HTTP CONNECT` through an HTTP proxy [1] on this client
* (default: disabled).
*
* TCP tunneling might be used to flow any TCP traffic (not only HTTP), but is mostly used to
* establish an HTTPS (TLS/SSL over HTTP) connection to a remote HTTP server through a proxy.
*
* When enabled, a Finagle client treats the server it connects to as a proxy server and asks it
* to proxy the traffic to a given ultimate destination, specified as `host`.
*
* [1]: http://www.web-cache.com/Writings/Internet-Drafts/draft-luotonen-web-proxy-tunneling-01.txt
*
* @param host the ultimate host a proxy server connects to
*/
def httpProxyTo(host: String): A =
self.configured(Transporter.HttpProxyTo(Some(host -> None)))
/**
* Enables TCP tunneling via `HTTP CONNECT` through an HTTP proxy [1] on this client
* (default: disabled).
*
* TCP tunneling might be used to flow any TCP traffic (not only HTTP), but is mostly used to
* establish an HTTPS (TLS/SSL over HTTP) connection to a remote HTTP server through a proxy.
*
* When enabled, a Finagle client treats the server it connects to as a proxy server and asks it
* to proxy the traffic to a given ultimate destination, specified as `host`.
*
* [1]: http://www.web-cache.com/Writings/Internet-Drafts/draft-luotonen-web-proxy-tunneling-01.txt
*
* @param host the ultimate host a proxy server connects to
*
* @param credentials credentials for a proxy server
*/
def httpProxyTo(host: String, credentials: Transporter.Credentials): A =
self.configured(Transporter.HttpProxyTo(Some(host -> Some(credentials))))
/**
* Enables TCP tunneling via `HTTP CONNECT` through an HTTP proxy [1] on this client
* (default: disabled).
*
* TCP tunneling might be used to flow any TCP traffic (not only HTTP), but is mostly used to
* establish an HTTPS (TLS/SSL over HTTP) connection to a remote HTTP server through a proxy.
*
* When enabled, a Finagle client treats the server it connects to as a proxy server and asks it
* to proxy the traffic to a given ultimate destination, specified as `host`.
*
* [1]: http://www.web-cache.com/Writings/Internet-Drafts/draft-luotonen-web-proxy-tunneling-01.txt
*
* @param host the ultimate host a proxy server connects to
*
* @param credentials optional credentials for a proxy server
*/
@deprecated("Use httpProxyTo(String, Tansporter.Credentials) instead", "2017-7-11")
def httpProxyTo(
host: String,
credentials: Option[Transporter.Credentials]
): A = self.configured(Transporter.HttpProxyTo(Some(host -> credentials)))
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/param/ClientTransportParams.scala | Scala | apache-2.0 | 7,039 |
/*
* Copyright 2014 Andrey Kutyrev
*
* Licensed under the the GNU Public License v3.0;
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.gnu.org/licenses/gpl.html
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ========================================================================
*/
package squ1b3r.thingummies.integration.FMP
import codechicken.microblock.{BlockMicroMaterial, MicroMaterialRegistry}
import net.minecraft.block.Block
import squ1b3r.thingummies.blocks.ModBlocks
import squ1b3r.thingummies.helper.ColorHelper
object ThingummiesFMP {
def registerBlocks(): Unit = {
registerColorBlock(ModBlocks.slickBlock)
registerColorBlock(ModBlocks.stainedBlock)
registerColorBlock(ModBlocks.shabbyBlock)
registerColorBlock(ModBlocks.noisyBlock)
}
def registerColorBlock(block: Block): Unit = {
for (meta <- ColorHelper.COLORS.keys) {
MicroMaterialRegistry.registerMaterial(new ColoredMicroMaterial(block, meta), getMaterialKey(block, meta))
}
}
def getMaterialKey(block: Block, meta: Int): String = BlockMicroMaterial.materialKey(block, meta)
}
| squ1b3r/Thingummies | src/main/scala/squ1b3r/thingummies/integration/FMP/ThingummiesFMP.scala | Scala | gpl-3.0 | 1,462 |
package chapter5
import chapter5.Stream._
object Exercise5_8 {
/**
*
*/
def constant[A](a:A): Stream[A] = cons( a, constant(a))
/**
*
*/
def main(args: Array[String]): Unit = {
assert(constant(1).take(3).toList == List(1, 1, 1))
assert(constant(1).take(3).map(_ + 1).toList == List(2, 2, 2))
assert(constant('a').take(5).toList == List('a', 'a', 'a', 'a', 'a'))
println("All tests successful")
}
} | amolnayak311/functional-programming-in-scala | src/chapter5/Exercise5_8.scala | Scala | unlicense | 446 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.supervision
import controllers.{AmlsBaseController, CommonPlayDependencies}
import javax.inject.Inject
import play.api.mvc.MessagesControllerComponents
import utils.AuthAction
import views.html.supervision.what_you_need
import scala.concurrent.Future
class WhatYouNeedController @Inject() (val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
what_you_need: what_you_need) extends AmlsBaseController(ds, cc) {
def get() =
authAction.async {
implicit request =>
Future.successful(Ok(what_you_need()))
}
}
| hmrc/amls-frontend | app/controllers/supervision/WhatYouNeedController.scala | Scala | apache-2.0 | 1,321 |
object Test {
class Test1 extends ParameterlessOverriders2 {
override def bar(b: Boolean): Int = 1
val x = bar(true)
this.bar(true)
}
object Test2 extends ParameterlessOverriders2 {
override def bar(b: Boolean): Int = 1
val x = bar(true)
this.bar(true)
}
class Test3 extends ParameterlessOverriders2 {
override def bar(b: Boolean): Int = 1
val x = bar(true)
}
trait Test4 extends ParameterlessOverriders2 {
override def bar(b: Boolean): Int = 1
val x = bar(true)
}
} | ilinum/intellij-scala | testdata/changeSignature/fromJava/ParameterlessOverriders2_after.scala | Scala | apache-2.0 | 531 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "frontend"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
"com.typesafe" % "play-slick_2.10" % "0.3.0",
"postgresql" % "postgresql" % "9.1-901.jdbc4",
//"securesocial" %% "securesocial" % "master-SNAPSHOT",
"com.typesafe.play.plugins" %% "play-statsd" % "2.1.0",
"org.webjars" % "webjars-play" % "2.1.0",
"org.webjars" % "jquery" % "1.9.1",
"org.webjars" % "highlightjs" % "7.3",
"org.webjars" % "font-awesome" % "3.0.2",
"org.webjars" % "bootstrap" % "2.3.1",
"org.webjars" % "chosen" % "0.9.12",
jdbc,
anorm,
"org.scalatest" % "scalatest_2.10" % "1.9.1" % "test"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
resolvers += Resolver.url("sbt-plugin-snapshots", new URL("http://repo.scala-sbt.org/scalasbt/sbt-plugin-snapshots/"))(Resolver.ivyStylePatterns),
resolvers += Resolver.url("github repo for play-slick", url("http://loicdescotte.github.com/releases/"))(Resolver.ivyStylePatterns),
resolvers += "github repo for Chosen 0.9.12" at "http://codeblock.github.io/chosen/"
).dependsOn(uri("git://github.com/eval-so/minibcs")).settings(
testOptions in Test := Nil,
testOptions in Test += Tests.Argument("-oDS")
)
}
| eval-so/frontend | project/Build.scala | Scala | apache-2.0 | 1,364 |
package ch.wsl.box.rest.routes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.{Directives, Route}
import scribe.Logging
object Preloading extends Logging {
import Directives._
val route:Route =
path("status") {
get {
complete("BOOTING")
}
} ~
path("") {
getFromResource("preloading.html")
}
}
| Insubric/box | server/src/main/scala/ch/wsl/box/rest/routes/Preloading.scala | Scala | apache-2.0 | 372 |
package codeforces.round_281_div2
import test.utils.{StdoutTester, UnitSpec}
class B_VasyaAndWrestling$Test extends UnitSpec {
val testable = B_VasyaAndWrestling.parseSolveAndPrint _
"test_1" should "print correct output on test" in {
StdoutTester {
"5\n1\n2\n-3\n-4\n3"
} {
"second"
} {
testable
}
}
"test_2" should "print correct output on test" in {
StdoutTester {
"3\n-1\n-2\n3"
} {
"first"
} {
testable
}
}
"test_3" should "print correct output on test" in {
StdoutTester {
"2\n4\n-4"
} {
"second"
} {
testable
}
}
"test_4" should "print correct output on test" in {
StdoutTester {
"6\n-3\n-1\n-2\n3\n1\n2\n"
} {
"first"
} {
testable
}
}
}
| VitalyKalinkin/stdout_sites_scala | src/test/scala/codeforces/round_281_div2/B_VasyaAndWrestling$Test.scala | Scala | bsd-2-clause | 805 |
package net.lemonmodel.patterns
import java.net.URI
import scala.xml._
import net.lemonmodel.rdfutil.RDFUtil._
/**
* An adjective
*/
trait Adjective extends Pattern {
def makeWithForm(form : Form) : Adjective
protected def makeWithForms(forms : Seq[Form]) : Adjective
protected def senseOntoLexXML(namer : URINamer) : NodeSeq
protected def senseXML(namer : URINamer) : NodeSeq
def extractForms(namespace : Namespace, table : Map[(String,String),Any], props : List[(URI,URI)]) : Seq[Form] = {
(for(((prop,propVal),subtable) <- table) yield {
val propURI = namespace(prop)
val propValURI = namespace(propVal)
subtable match {
case form : String => Seq(Form(form,(props :+ (propURI,propValURI)).toMap))
case st : Map[_,_] => extractForms(namespace,st.asInstanceOf[Map[(String,String),Any]],props :+ (propURI,propValURI))
case st : (_,_) => extractForms(namespace,Map(st).asInstanceOf[Map[(String,String),Any]],props :+ (propURI,propValURI))
case fail => throw new IllegalArgumentException("Invalid value in a table " + fail.toString())
}
}).flatten.toSeq
}
def withTable(namespace : Namespace, table : Map[(String,String),Any]) : Adjective = {
val forms = extractForms(namespace,table,Nil)
makeWithForms(forms)
}
def withComparative(comparativeForm : String) = makeWithForm(Form(comparativeForm,Map(lexinfo("degree")->lexinfo("comparative"))))
def withSuperlative(superlativeForm : String) = makeWithForm(Form(superlativeForm,Map(lexinfo("degree")->lexinfo("superlative"))))
def lemma : AP
def forms : Seq[Form]
def toOntoLexXML(namer : URINamer, lang : String) = <ontolex:LexicalEntry rdf:about={namer("adjective",lemma.toString())}>
<ontolex:canonicalForm>
<ontolex:Form rdf:about={namer("adjective",lemma.toString(),Some("canonicalForm"))}>
<ontolex:writtenRep xml:lang={lang}>{lemma.toString()}</ontolex:writtenRep>
</ontolex:Form>
</ontolex:canonicalForm>
{ lemma.toOntoLexXML(namer,lang) }
{
for(form <- forms) yield {
<ontolex:otherForm>
<ontolex:Form rdf:about={namer("adjective",lemma.toString(),Some("form"))}>
<ontolex:writtenRep xml:lang={lang}>{form.writtenRep}</ontolex:writtenRep>
{
for((prop,propVal) <- form.props) yield {
val (prefixUri,prefix,suffix) = prefixURI(prop)
<lingonto:prop rdf:resource={propVal}/>.copy(prefix=prefix, label=suffix) %
Attribute("","xmlns:"+prefix,prefixUri,Null)
}
}
</ontolex:Form>
</ontolex:otherForm>
}
}
{senseOntoLexXML(namer)}
</ontolex:LexicalEntry>
def toXML(namer : URINamer, lang : String) = <lemon:LexicalEntry rdf:about={namer("adjective",lemma.toString())}>
<lemon:canonicalForm>
<lemon:Form rdf:about={namer("adjective",lemma.toString(),Some("canonicalForm"))}>
<lemon:writtenRep xml:lang={lang}>{lemma.toString()}</lemon:writtenRep>
</lemon:Form>
</lemon:canonicalForm>
{ lemma.toXML(namer,lang) }
{
for(form <- forms) yield {
<lemon:otherForm>
<lemon:Form rdf:about={namer("adjective",lemma.toString(),Some("form"))}>
<lemon:writtenRep xml:lang={lang}>{form.writtenRep}</lemon:writtenRep>
{
for((prop,propVal) <- form.props) yield {
val (prefixUri,prefix,suffix) = prefixURI(prop)
<lingonto:prop rdf:resource={propVal}/>.copy(prefix=prefix, label=suffix) %
Attribute("","xmlns:"+prefix,prefixUri,Null)
}
}
</lemon:Form>
</lemon:otherForm>
}
}
{senseXML(namer)}
</lemon:LexicalEntry>
}
case class IntersectiveAdjective(val lemma : AP,
val sense : URI = null,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = IntersectiveAdjective(lemma,sense,forms :+ form,register)
protected def makeWithForms(otherForms : Seq[Form]) = IntersectiveAdjective(lemma,sense, forms ++ otherForms,register)
def withRegister(register : Register) = IntersectiveAdjective(lemma,sense,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<owl:Class rdf:about={sense}/>
</ontolex:reference>
{registerXML(register)}
<synsem:isA>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:isA>
</ontolex:LexicalSense>
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<owl:Class rdf:about={sense}/>
</lemon:reference>
{registerXML(register)}
<lemon:isA>
<lemon:Argument rdf:about={subjURI}/>
</lemon:isA>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}
case class IntersectiveObjectPropertyAdjective(val lemma : AP,
val property : URI,
val value : URI,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = IntersectiveObjectPropertyAdjective(lemma,property,value,forms :+ form,register)
protected def makeWithForms(otherForms : Seq[Form]) = IntersectiveObjectPropertyAdjective(lemma,property,value, forms ++ otherForms,register)
def withRegister(register : Register) = IntersectiveObjectPropertyAdjective(lemma,property,value,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<owl:Restriction rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
<owl:onProperty rdf:resource={property}/>
<owl:hasValue rdf:resource={value}/>
</owl:Restriction>
</ontolex:reference>
{registerXML(register)}
<synsem:isA>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:isA>
</ontolex:LexicalSense>
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<owl:Restriction rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
<owl:onProperty rdf:resource={property}/>
<owl:hasValue rdf:resource={value}/>
</owl:Restriction>
</lemon:reference>
{registerXML(register)}
<lemon:isA>
<lemon:Argument rdf:about={subjURI}/>
</lemon:isA>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}
case class IntersectiveDataPropertyAdjective(val lemma : AP,
val property : URI,
val value : String,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = IntersectiveDataPropertyAdjective(lemma,property,value,forms :+ form,register)
protected def makeWithForms(otherForms : Seq[Form]) = IntersectiveDataPropertyAdjective(lemma,property,value, forms ++ otherForms,register)
def withRegister(register : Register) = IntersectiveDataPropertyAdjective(lemma,property,value,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<owl:Restriction rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
<owl:onProperty rdf:resource={property}/>
<owl:hasValue>{value}</owl:hasValue>
</owl:Restriction>
</ontolex:reference>
{registerXML(register)}
<synsem:isA>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:isA>
</ontolex:LexicalSense>
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<owl:Restriction rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
<owl:onProperty rdf:resource={property}/>
<owl:hasValue>{value}</owl:hasValue>
</owl:Restriction>
</lemon:reference>
{registerXML(register)}
<lemon:isA>
<lemon:Argument rdf:about={subjURI}/>
</lemon:isA>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}
case class PropertyModifyingAdjective(val lemma : AP,
val property : URI,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = PropertyModifyingAdjective(lemma,property,forms :+ form, register)
protected def makeWithForms(otherForms : Seq[Form]) = PropertyModifyingAdjective(lemma,property,forms ++ otherForms,register)
def withRegister(register : Register) = PropertyModifyingAdjective(lemma,property,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val objURI = namer("adjective",lemma.toString(),Some("attributive"))
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<rdf:Property rdf:about={property}/>
</ontolex:reference>
{registerXML(register)}
<synsem:subjOfProp>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:subjOfProp>
<synsem:objOfProp>
<synsem:SyntacticArgument rdf:about={objURI}/>
</synsem:objOfProp>
</ontolex:LexicalSense>
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePropertyModifyingFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
<lexinfo:attributeArg rdf:resource={objURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val objURI = namer("adjective",lemma.toString(),Some("attributive"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<rdf:Property rdf:about={property}/>
</lemon:reference>
{registerXML(register)}
<lemon:subjOfProp>
<lemon:Argument rdf:about={subjURI}/>
</lemon:subjOfProp>
<lemon:objOfProp>
<lemon:Argument rdf:about={objURI}/>
</lemon:objOfProp>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePropertyModifyingFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
<lexinfo:attributeArg rdf:resource={objURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}
case class RelationalAdjective(val lemma : AP,
val property : URI = null,
val relationalArg : Arg,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = RelationalAdjective(lemma,property,relationalArg,forms :+ form,register)
protected def makeWithForms(otherForms : Seq[Form]) = RelationalAdjective(lemma,property,relationalArg,forms ++ otherForms,register)
def withRegister(register : Register) = RelationalAdjective(lemma,property,relationalArg,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val objURI = namer("adjective",lemma.toString(),Some("attributive"))
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<rdf:Property rdf:about={property}/>
</ontolex:reference>
{registerXML(register)}
<synsem:subjOfProp>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:subjOfProp>
<synsem:objOfProp>
<synsem:SyntacticArgument rdf:about={objURI}/>
</synsem:objOfProp>
{
if(relationalArg.restriction != None) {
<synsem:propertyDomain rdf:resource={relationalArg.restriction.get}/>
}
}
</ontolex:LexicalSense>
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePPFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
{ relationalArg.toXML(objURI,namer,true) }
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val objURI = namer("adjective",lemma.toString(),Some("attributive"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<rdf:Property rdf:about={property}/>
</lemon:reference>
{registerXML(register)}
<lemon:subjOfProp>
<lemon:Argument rdf:about={subjURI}/>
</lemon:subjOfProp>
<lemon:objOfProp>
<lemon:Argument rdf:about={objURI}/>
</lemon:objOfProp>
{
if(relationalArg.restriction != None) {
<lemon:propertyDomain rdf:resource={relationalArg.restriction.get}/>
}
}
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePPFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
{ relationalArg.toXML(objURI,namer,false) }
</lemon:Frame>
</lemon:synBehavior>
}
}
case class ScalarAdjective(val lemma : AP,
val scalarMemberships : Seq[ScalarMembership] = Nil,
val forms : Seq[Form] = Nil,
val register : Option[Register] = None) extends Adjective {
def makeWithForm(form : Form) = ScalarAdjective(lemma,scalarMemberships,forms :+ form,register)
protected def makeWithForms(otherForms : Seq[Form]) = ScalarAdjective(lemma,scalarMemberships,forms ++ otherForms,register)
def withRegister(register : Register) = ScalarAdjective(lemma,scalarMemberships,forms,Some(register))
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val scaleSubjURI = namer("adjective",lemma.toString(),Some("scaleSubj"))
val scaleObjURI = namer("adjective",lemma.toString(),Some("scaleObj"))
(for(ScalarMembership(property,forClass,boundary,direction,boundary2) <- scalarMemberships) yield {
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<owl:Class rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
{
if(forClass != null) {
<rdfs:subClassOf rdf:resource={forClass}/>
}
}
{
if(!boundary.isNaN) {
<owl:equivalentClass>
<owl:Restriction>
<owl:onProperty rdf:resource={property}/>
<owl:someValuesFrom>
<rdfs:Datatype>
<owl:withRestrictions rdf:parseType="Collection">
<rdf:Description>{ if(direction == positive) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
} else if(direction == central) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
<xsd:maxExclusive>{boundary2}</xsd:maxExclusive>
} else {
<xsd:maxExclusive>{boundary}</xsd:maxExclusive>
}
}</rdf:Description>
</owl:withRestrictions>
</rdfs:Datatype>
</owl:someValuesFrom>
</owl:Restriction>
</owl:equivalentClass>
}
}
<oils:boundTo rdf:resource={property}/>
{
if(direction == positive) {
<rdfs:subClassOf rdf:resource="http://lemon-model.net/oils#CovariantScalar"/>
} else {
<rdfs:subClassOf rdf:resource="http://lemon-model.net/oils#ContravariantScalar"/>
}
}
</owl:Class>
</ontolex:reference>
{registerXML(register)}
<synsem:isA>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:isA>
</ontolex:LexicalSense>
</ontolex:sense> :+
<ontolex:sense>
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference rdf:resource={property}/>
<synsem:subjOfProp rdf:resource={scaleSubjURI}/>
<synsem:objOfProp rdf:resource={scaleObjURI}/>
</ontolex:LexicalSense>
</ontolex:sense>
}).flatten :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveScaleFrame")}/>
<lexinfo:copulativeSubject rdf:resource={scaleSubjURI}/>
<lexinfo:adverbialComplement rdf:resource={scaleObjURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val scaleSubjURI = namer("adjective",lemma.toString(),Some("scaleSubj"))
val scaleObjURI = namer("adjective",lemma.toString(),Some("scaleObj"))
(for(ScalarMembership(property,forClass,boundary,direction,boundary2) <- scalarMemberships) yield {
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<owl:Class rdf:about={namer("adjective",lemma.toString(),Some("reference"))}>
{
if(forClass != null) {
<rdfs:subClassOf rdf:resource={forClass}/>
}
}
{
if(!boundary.isNaN) {
<owl:equivalentClass>
<owl:Restriction>
<owl:onProperty rdf:resource={property}/>
<owl:someValuesFrom>
<rdfs:Datatype>
<owl:withRestrictions rdf:parseType="Collection">
<rdf:Description>{ if(direction == positive) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
} else if(direction == central) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
<xsd:maxExclusive>{boundary2}</xsd:maxExclusive>
} else {
<xsd:maxExclusive>{boundary}</xsd:maxExclusive>
}
}</rdf:Description>
</owl:withRestrictions>
</rdfs:Datatype>
</owl:someValuesFrom>
</owl:Restriction>
</owl:equivalentClass>
}
}
<oils:boundTo rdf:resource={property}/>
{
if(direction == positive) {
<rdfs:subClassOf rdf:resource="http://lemon-model.net/oils#CovariantScalar"/>
} else {
<rdfs:subClassOf rdf:resource="http://lemon-model.net/oils#ContravariantScalar"/>
}
}
</owl:Class>
</lemon:reference>
{registerXML(register)}
<lemon:isA>
<lemon:Argument rdf:about={subjURI}/>
</lemon:isA>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference rdf:resource={property}/>
<lemon:subjOfProp rdf:resource={scaleSubjURI}/>
<lemon:objOfProp rdf:resource={scaleObjURI}/>
</lemon:LexicalSense>
</lemon:sense>
}).flatten :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("predFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</lemon:Frame>
</lemon:synBehavior> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("attrFrame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveScaleFrame")}/>
<lexinfo:copulativeSubject rdf:resource={scaleSubjURI}/>
<lexinfo:adverbialComplement rdf:resource={scaleObjURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}
/*case class ScalarQuantifyingAdjective(val lemma : AP,
val scalarMembership : ScalarMembership,
val forms : Seq[Form] = Nil) extends Adjective {
def makeWithForm(form : Form) = ScalarQuantifyingAdjective(lemma,scalarMembership,forms :+ form)
protected def makeWithForms(otherForms : Seq[Form]) = ScalarQuantifyingAdjective(lemma,scalarMembership,forms ++ otherForms)
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val objURI = namer("adjective",lemma.toString(),Some("object"))
<lemon:sense>
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<owl:DatatypeProperty rdf:resource={scalarMembership.property}>
{
if(!scalarMembership.boundary.isNaN) {
<rdfs:range>
<owl:Restriction>
<owl:onProperty rdf:resource={scalarMembership.property}/>
<owl:someValuesForm>
<rdfs:Datatype>
<owl:withRestrictions rdf:parseType="Collection">
<rdf:Description>{
if(scalarMembership.direction == positive) {
<xsd:minExclusive>{scalarMembership.boundary}</xsd:minExclusive>
} else {
<xsd:maxExclusive>{scalarMembership.boundary}</xsd:maxExclusive>
}
}</rdf:Description>
</owl:withRestrictions>
</rdfs:Datatype>
</owl:someValuesForm>
</owl:Restriction>
</rdfs:range>
}
}
</owl:DatatypeProperty>
</lemon:reference>
<lemon:subjOfProp>
<lemon:Argument rdf:about={subjURI}/>
</lemon:subjOfProp>
<lemon:objOfProp>
<lemon:Argument rdf:about={subjURI}/>
</lemon:objOfProp>
</lemon:LexicalSense>
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveScaleFrame")}/>
<lexinfo:copulativeSubject rdf:resource={subjURI}/>
<lexinfo:adverbialComplement rdf:resource={objURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}*/
/*
case class ScalarParticleAdjective(val lemma : AP,
val scalarMemberships : Seq[ScalarMembership] = Nil,
val forms : Seq[Form] = Nil) extends Adjective[ScalarParticleAdjective] {
def makeWithForm(form : Form) = ScalarAdjective(lemma,scalarMemberships,forms :+ form)
protected def makeWithForms(otherForms : Seq[Form]) = ScalarAdjective(lemma,scalarMemberships,forms ++ otherForms)
protected def senseOntoLexXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val subjURI = namer("adjective",lemma.toString(),Some("object"))
<ontolex:sense>
{
for(ScalarMembership(property,boundary,direction) <- scalarMemberships) yield {
<ontolex:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<rdf:type rdf:resource="http://www.w3.org/ns/lemon/synsem#OntoMap"/>
<ontolex:reference>
<rdfs:Datatype>
<owl:withRestrictions rdf:parseType="Collection">
<rdf:Description>{
if(direction == positive) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
} else {
<xsd:maxExclusive>{boundary}</xsd:maxExclusive>
}
}
</rdf:Description>
</owl:withRestrictions>
</rdfs:Datatype>
</ontolex:reference>
<synsem:isA>
<synsem:SyntacticArgument rdf:about={subjURI}/>
</synsem:isA>
</ontolex:LexicalSense>
}
}
</ontolex:sense> :+
<synsem:synBehavior>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:subject rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</synsem:SyntacticFrame>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveComparativeFrame")}/>
<lexinfo:subject rdf:resource={subjURI}/>
<lexinfo:comparativeAdjunct rdf:resource={objURI}/>
</synsem:SyntacticFrame>
<synsem:SyntacticFrame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveSuperlativeFrame")}/>
<lexinfo:superlativeAdjunct rdf:resource={objURI}/>
</synsem:SyntacticFrame>
</synsem:synBehavior>
}
protected def senseXML(namer : URINamer) = {
val subjURI = namer("adjective",lemma.toString(),Some("subject"))
val subjURI = namer("adjective",lemma.toString(),Some("object"))
<lemon:sense>
{
for(ScalarMembership(property,boundary,direction) <- scalarMemberships) yield {
<lemon:LexicalSense rdf:about={namer("adjective",lemma.toString(),Some("sense"))}>
<lemon:reference>
<rdfs:Datatype>
<owl:withRestrictions rdf:parseType="Collection">
<rdf:Description>{
if(direction == positive) {
<xsd:minExclusive>{boundary}</xsd:minExclusive>
} else {
<xsd:maxExclusive>{boundary}</xsd:maxExclusive>
}
}
</rdf:Description>
</owl:withRestrictions>
</rdfs:Datatype>
</lemon:reference>
<lemon:isA>
<lemon:Argument rdf:about={subjURI}/>
</lemon:isA>
</lemon:LexicalSense>
}
}
</lemon:sense> :+
<lemon:synBehavior>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectivePredicativeFrame")}/>
<lexinfo:subject rdf:resource={subjURI}/>
</lemon:Frame>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveAttributiveFrame")}/>
<lexinfo:attributiveArg rdf:resource={subjURI}/>
</lemon:Frame>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveComparativeFrame")}/>
<lexinfo:subject rdf:resource={subjURI}/>
<lexinfo:comparativeAdjunct rdf:resource={objURI}/>
</lemon:Frame>
<lemon:Frame rdf:about={namer("adjective",lemma.toString(),Some("frame"))}>
<rdf:type rdf:resource={lexinfo("AdjectiveSuperlativeFrame")}/>
<lexinfo:superlativeAdjunct rdf:resource={objURI}/>
</lemon:Frame>
</lemon:synBehavior>
}
}*/
| jmccrae/lemon.patterns | src/main/scala/Adjectives.scala | Scala | apache-2.0 | 35,565 |
package org.jetbrains.plugins.scala.lang.psi.api.statements
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
* Time: 9:49:23
*/
trait ScFunctionDeclaration extends ScFunction with ScTypedDeclaration | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScFunctionDeclaration.scala | Scala | apache-2.0 | 206 |
package basics.variance
import org.scalatest.FunSuite
class VarianceTest extends FunSuite {
test("VarianceTest") {
val item = new ItemHolder(new Item("GGG"))
//item.getItem returns Item()
assert(item.getItem(new SubItem("item")).msg == "item")
}
test("VarianceTest2") {
assert(new ItemHolderCovariant[SubItem]().getItem(new SubItem("Item")).message == "Item");
//new ItemHolderCovariant[SubItem]().getItem(new Item("Item")).message == "Item" <--Won't compile as expected
}
} | szaqal/KitchenSink | Scala/01/src/test/scala/basics/variance/VarianceTest.scala | Scala | gpl-3.0 | 509 |
package com.alexknvl.btce.api
import spray.json._
case class Currency(name: String) {
override def toString = name
}
case class Pair(first: Currency, second: Currency) {
override def toString = {
val Currency(firstName) = first
val Currency(secondName) = second
firstName + "_" + secondName
}
}
object Pair {
def apply(firstName: String, secondName: String): Pair =
Pair(Currency(firstName), Currency(secondName))
def apply(pairName: String): Pair = {
pairName.split('_') match {
case Array(a, b) => Pair(a, b)
case _ => throw new IllegalArgumentException(
"Pair name should consist of currency names separated by '_'.")
}
}
}
case class Funds(
usd: BigDecimal,
btc: BigDecimal,
ltc: BigDecimal,
nmc: BigDecimal,
rur: BigDecimal,
eur: BigDecimal,
nvc: BigDecimal,
trc: BigDecimal,
ppc: BigDecimal,
ftc: BigDecimal,
xpm: BigDecimal)
private[btce] trait CommonApiFormats extends DefaultJsonProtocol {
implicit object CurrencyFormat extends RootJsonFormat[Currency] {
def write(currency: Currency) = JsString(currency.name)
def read(value: JsValue) =
value match {
case JsString(name) => Currency(name.toLowerCase)
case _ => throw new DeserializationException("Expected currency name.")
}
}
implicit object PairFormat extends RootJsonFormat[Pair] {
def write(pair: Pair) = JsString(pair.toString())
def read(value: JsValue) =
value match {
case JsString(name) => Pair(name.toLowerCase)
case _ => throw new DeserializationException("Expected pair of currencies.")
}
}
implicit val FundsFormat = jsonFormat11(Funds)
}
case class Info(
serverTime: Long,
pairs: Map[Pair, PairInfo])
case class PairInfo(
decimalPlaces: Int,
minPrice: BigDecimal,
maxPrice: BigDecimal,
minAmount: BigDecimal,
hidden: Boolean,
fee: BigDecimal)
case class Rights(
info: Boolean,
trade: Boolean,
withdraw: Boolean)
case class AccountInfo(
funds: Funds,
rights: Rights,
transactionCount: Long,
openOrders: Long,
serverTime: Long)
case class Ticker(
high: BigDecimal,
low: BigDecimal,
avg: BigDecimal,
vol: BigDecimal,
vol_cur: BigDecimal,
last: BigDecimal,
buy: BigDecimal,
sell: BigDecimal,
updated: Long)
case class Trade(
tpe: String,
price: BigDecimal,
amount: BigDecimal,
tid: BigInt,
timestamp: Long)
case class Depth(
asks: List[(BigDecimal, BigDecimal)],
bids: List[(BigDecimal, BigDecimal)])
private[btce] trait PublicApiFormats extends CommonApiFormats {
implicit object PairInfoFormat extends RootJsonFormat[PairInfo] {
def write(pairInfo: PairInfo) = JsObject(
"decimal_places" -> JsNumber(pairInfo.decimalPlaces),
"min_price" -> JsNumber(pairInfo.minPrice),
"max_price" -> JsNumber(pairInfo.maxPrice),
"min_amount" -> JsNumber(pairInfo.minAmount),
"hidden" -> JsNumber(if (pairInfo.hidden) 1 else 0),
"fee" -> JsNumber(pairInfo.fee))
def read(value: JsValue): PairInfo =
value.asJsObject.getFields(
"decimal_places", "min_price", "max_price", "min_amount", "hidden", "fee"
) match {
case Seq(JsNumber(decimalPlaces), JsNumber(minPrice), JsNumber(maxPrice), JsNumber(minAmount),
JsNumber(hidden), JsNumber(fee)) =>
PairInfo(decimalPlaces.toInt, minPrice, maxPrice, minAmount, hidden != BigDecimal(0), fee)
case _ => throw new DeserializationException("Expected pair of currencies.")
}
}
implicit val InfoFormat = jsonFormat(Info,
"server_time", "pairs")
implicit object RightsFormat extends RootJsonFormat[Rights] {
def write(rights: Rights) = JsObject(
"info" -> JsNumber(if (rights.info) 1 else 0),
"trade" -> JsNumber(if (rights.trade) 1 else 0))
def read(value: JsValue) =
value.asJsObject.getFields("info", "trade", "withdraw") match {
case Seq(JsNumber(info), JsNumber(trade), JsNumber(withdraw)) =>
Rights(info != BigDecimal(0), trade != BigDecimal(0), withdraw != BigDecimal(0))
case _ => throw new DeserializationException("Expected Rights object.")
}
}
implicit val AccountInfoFormat = jsonFormat(AccountInfo,
"funds", "rights", "transaction_count", "open_orders", "server_time")
implicit val TickerFormat = jsonFormat9(Ticker)
implicit val TradeFormat = jsonFormat(Trade,
"type", "price", "amount", "tid", "timestamp")
implicit val DepthFormat = jsonFormat2(Depth)
}
case class TransactionHistoryEntry(
tpe: Int,
amount: BigDecimal,
currency: Currency,
desc: String,
status: Int,
timestamp: Long)
case class TradeHistoryEntry(
orderId: BigInt,
pair: Pair,
tpe: String,
amount: BigDecimal,
rate: BigDecimal,
isMine: Boolean,
timestamp: Long)
case class OrderListEntry(
pair: Pair,
tpe: String,
amount: BigDecimal,
rate: BigDecimal,
createdTimestamp: Long,
status: Int)
case class TradeResponse(
received: BigDecimal,
remains: BigDecimal,
orderId: Long,
funds: Funds)
case class CancelOrderResponse(
orderId: Long,
funds: Funds)
private[btce] trait TradeApiFormats extends CommonApiFormats {
implicit val TransactionHistoryEntryFormat = jsonFormat(TransactionHistoryEntry,
"type", "amount", "currency", "desc", "status", "timestamp")
implicit object TradeHistoryEntryFormat extends RootJsonFormat[TradeHistoryEntry] {
def write(order: TradeHistoryEntry) = JsObject(
"order_id" -> JsNumber(order.orderId),
"pair" -> order.pair.toJson,
"type" -> JsString(order.tpe),
"amount" -> JsNumber(order.amount),
"rate" -> JsNumber(order.rate),
"is_your_order" -> JsNumber(if (order.isMine) 1 else 0),
"timestamp" -> JsNumber(order.timestamp))
def read(value: JsValue) =
value.asJsObject.getFields(
"order_id",
"pair",
"type",
"amount",
"rate",
"is_your_order",
"timestamp"
) match {
case Seq(JsNumber(orderId), JsString(pair), JsString(tpe), JsNumber(amount),
JsNumber(rate), JsNumber(isMine), JsNumber(timestamp)) =>
TradeHistoryEntry(orderId.toBigInt(), Pair(pair), tpe, amount, rate, isMine != BigDecimal(0),
timestamp.toLong)
case _ => throw new DeserializationException("Expected TradeHistoryEntry object.")
}
}
implicit val OrderListEntryFormat = jsonFormat(OrderListEntry,
"pair", "type", "amount", "rate", "timestamp_created", "status")
implicit val TradeResponseFormat = jsonFormat(TradeResponse,
"received", "remains", "order_id", "funds")
implicit val CancelOrderResponseFormat = jsonFormat(CancelOrderResponse,
"order_id", "funds")
}
private[btce] object ApiFormats extends PublicApiFormats with TradeApiFormats | alexknvl/scala-btce | btce-api/src/main/scala/com/alexknvl/btce/api/protocol.scala | Scala | gpl-3.0 | 6,787 |
package daos
import models.User
import play.api.libs.json.JsValue
import scala.concurrent.Future
trait UserDAO {
def find(id: Long): Future[Option[User]]
def findAll(): Future[List[User]]
def insert(user: User): Future[Long]
def remove(id: Long): Future[Unit]
def update(id: Long, update: JsValue): Future[Unit]
}
| luongbalinh/play-mongo | app/daos/UserDAO.scala | Scala | apache-2.0 | 331 |
package cwe.scala.library.code.templates
import cwe.scala.library.serviceproviders._
import cwe.scala.library.audit._
/**
* A service provider for the package
*/
class DummyServiceProvider {
private val auditor = AuditServiceProvider.createAuditor(this)
// instances
private var dummyServiceInstance: DummyService = null
private var dependentServiceInstance: DependentService = null
// service providing implementation
def getDummyService(): DummyService = {
if (dummyServiceInstance == null) { synchronized { if (dummyServiceInstance == null) { dummyServiceInstance = createDummyService(); auditor.createSingleton(dummyServiceInstance) } } }
dummyServiceInstance
}
def getDependentService(): DependentService = {
if (dependentServiceInstance == null) { synchronized { if (dependentServiceInstance == null) { dependentServiceInstance = createDependentService(); auditor.createSingleton(dependentServiceInstance) } } }
dependentServiceInstance
}
// service factory implementation
protected def createDummyService(): DummyService = new DummyService()
protected def createDependentService(): DependentService = new DependentService()
}
object DummyServiceProvider extends IServiceProvider {
private var instance: DummyServiceProvider = null
// dependency injection
/**
* Injects a DummyServiceProvider to be used.
*/
def setInstance(sp: DummyServiceProvider) {
if (sp == null) throw new IllegalArgumentException("DummyServiceProvider cannot be null")
else synchronized { instance = sp; AuditServiceProvider.createAuditor(sp).createSingleton(sp); ServiceProviders.registerServiceProvider(this) }
}
private def getInstance(): DummyServiceProvider = {
if (instance == null) { synchronized { if (instance == null) setInstance(new DummyServiceProvider) } }
instance
}
/**
* Resets ServiceProvider to built in default
*/
def reset() = if (instance != null) synchronized { instance = null }
// Methods delegation to DummyServiceProvider
def getDummyService(): DummyService = getInstance getDummyService ()
def getDependentService(): DependentService = getInstance getDependentService ()
} | wwwigii-system/research | cwe-scala-library/src/cwe/scala/library/code/templates/DummyServiceProvider.scala | Scala | gpl-3.0 | 2,141 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.streaming.{ProcessingTime, Trigger}
import org.apache.spark.unsafe.types.CalendarInterval
/**
* A [[Trigger]] that continuously processes streaming data, asynchronously checkpointing at
* the specified interval.
*/
@InterfaceStability.Evolving
case class ContinuousTrigger(intervalMs: Long) extends Trigger {
require(intervalMs >= 0, "the interval of trigger should not be negative")
}
private[sql] object ContinuousTrigger {
def apply(interval: String): ContinuousTrigger = {
if (StringUtils.isBlank(interval)) {
throw new IllegalArgumentException(
"interval cannot be null or blank.")
}
val cal = if (interval.startsWith("interval")) {
CalendarInterval.fromString(interval)
} else {
CalendarInterval.fromString("interval " + interval)
}
if (cal == null) {
throw new IllegalArgumentException(s"Invalid interval: $interval")
}
if (cal.months > 0) {
throw new IllegalArgumentException(s"Doesn't support month or year interval: $interval")
}
new ContinuousTrigger(cal.microseconds / 1000)
}
def apply(interval: Duration): ContinuousTrigger = {
ContinuousTrigger(interval.toMillis)
}
def create(interval: String): ContinuousTrigger = {
apply(interval)
}
def create(interval: Long, unit: TimeUnit): ContinuousTrigger = {
ContinuousTrigger(unit.toMillis(interval))
}
}
| ahnqirage/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTrigger.scala | Scala | apache-2.0 | 2,467 |
package greetings1 {
package hello {
object Hi {
def speak = println("Hi!")
}
}
package object hello {
def talk = println("Hello!!")
}
}
| grzegorzbalcerek/scala-book-examples | examples/PackageObject1.scala | Scala | mit | 163 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, IdentityOutputShape}
import com.intel.analytics.bigdl.nn.keras.KerasLayer
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.Net
import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Applies the randomized leaky rectified linear unit element-wise to the input.
*
* f(x) = max(0,x) + a * min(0, x) where a ~ U(l, u).
*
* In the training mode, negative inputs are multiplied by a factor drawn
* from a uniform random distribution U(l, u).
* In the evaluation mode, a RReLU behaves like a LeakyReLU with a constant mean
* factor a = (l + u) / 2.
* If l == u, a RReLU essentially becomes a LeakyReLU.
* Regardless of operating in in-place mode a RReLU will internally
* allocate an input-sized noise tensor to store random factors for negative inputs.
* For reference, see [Empirical Evaluation of Rectified Activations in Convolutional
* Network](http://arxiv.org/abs/1505.00853).
*
* When you use this layer as the first layer of a model, you need to provide
* the argument inputShape (a Single Shape, does not include the batch dimension).
*
* Remark: This layer is from Torch and wrapped in Keras style.
*
* @param lower Lower boundary of the uniform random distribution. Default is 1.0/8.
* @param upper Upper boundary of the uniform random distribution. Default is 1.0/3.
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class RReLU[T: ClassTag](
val lower: Double = 1.0/8,
val upper: Double = 1.0/3,
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](KerasUtils.addBatch(inputShape))
with IdentityOutputShape with Net {
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val layer = com.intel.analytics.bigdl.nn.RReLU(lower, upper)
layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
object RReLU {
def apply[@specialized(Float, Double) T: ClassTag](
lower: Double = 1.0/8,
upper: Double = 1.0/3,
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): RReLU[T] = {
new RReLU[T](lower, upper, inputShape)
}
}
| intel-analytics/analytics-zoo | zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/RReLU.scala | Scala | apache-2.0 | 3,156 |
package org.crudible.lift.util
object PropertyHelper {
def labelOf(base: { def label(): Option[String]}) = {
base.label().getOrElse("Unknown label")
}
} | rehei/crudible | crudible-lift/src/main/scala/org/crudible/lift/util/PropertyHelper.scala | Scala | apache-2.0 | 165 |
package net.tixxit.contract
package web
import java.net.URL
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Try, Success, Failure }
import akka.actor.{ Actor, Props, ActorRefFactory }
import spray.util.actorSystem
import spray.routing._
import spray.http._
import spray.http.StatusCodes.{ OK, NotFound, BadRequest, InternalServerError,
MovedPermanently, Found }
import spray.http.MediaTypes._
import spray.http.HttpHeaders._
import spray.httpx.marshalling._
import argonaut._
import Argonaut._
import ArgonautMarshallers._
trait ShortenerService extends HttpService {
private implicit def executionContext: ExecutionContext = actorSystem.dispatcher
def shortener: Shortener[Future]
def requireURL(key: String)
(f: URL => RequestContext => Unit): RequestContext => Unit = { ctx =>
shortener.expand(key) onComplete {
case Success(Some(url)) => f(url)(ctx)
case Success(None) => ctx.complete(NotFound)
case Failure(e) => ctx.complete(InternalServerError)
}
}
def meta(key: String): RequestContext => Unit =
requireURL(key) { url => _.complete(Map("key" -> key, "url" -> url.toString).asJson) }
def expand(key: String): RequestContext => Unit =
requireURL(key) { url => _.redirect(Uri(url.toString), MovedPermanently) }
private final def isHttp(url: URL): Boolean =
url.getProtocol == "http" || url.getProtocol == "https"
def parseURL(f: URL => RequestContext => Unit): String => RequestContext => Unit = { url0 =>
Try(new URL(url0)) match {
case Success(url) if isHttp(url) => f(url)
case _ => _.complete(BadRequest)
}
}
def shorten: String => RequestContext => Unit = parseURL { url => ctx =>
shortener.shorten(url) onComplete {
case Success(key) => ctx.redirect(Uri(s"/$key/meta"), Found)
case Failure(e) => ctx.complete(InternalServerError)
}
}
def postWithUrl = post & formField('url.as[String])
val uiRoute =
path("") {
getFromResource("web/index.html", `text/html`)
} ~
pathPrefix("static" ~ Slash) {
getFromResourceDirectory("web/static")
}
val shortenRoute =
path("") {
postWithUrl(shorten)
} ~
path(Segment / "meta") { key =>
get(meta(key))
} ~
path(Segment) { key =>
get(expand(key))
}
}
final class ShortenerServiceActor(val shortener: Shortener[Future])
extends HttpServiceActor with ShortenerService {
def receive = runRoute(uiRoute ~ shortenRoute)
}
object ShortenerServiceActor {
def props(shortener: Shortener[Future]) =
Props(classOf[ShortenerServiceActor], shortener)
}
| tixxit/contract | src/main/scala/net/tixxit/contract/web/ShortenerService.scala | Scala | mit | 2,655 |
package com.bokland.rubbercube.measure.kpi.mobile
import org.scalatest._
import com.bokland.rubbercube.sliceanddice.es.EsExecutionEngine
import org.elasticsearch.common.settings.ImmutableSettings
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.transport.InetSocketTransportAddress
import com.bokland.rubbercube.{DateAggregationType, DateAggregation, Dimension}
import com.bokland.rubbercube.sliceanddice.RequestResult
/**
* Created by remeniuk on 5/3/14.
*/
class MobileMeasuresSpec extends WordSpec with ShouldMatchers with BeforeAndAfterAll {
var engine: EsExecutionEngine = _
override protected def beforeAll = {
val settings = ImmutableSettings.settingsBuilder()
.put("cluster.name", "elasticsearch")
.put("network.server", true).build()
val client = new TransportClient(settings)
.addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
engine = new EsExecutionEngine(client, "rubbercube")
}
"Calcualte ARPDAU" in {
val query = RevenuePerUser(
sessionCube = "session",
purchaseCube = "purchase",
sessionDateField = "date",
purchaseDateField = "date",
purchaseAmountField = "amount")
.generateQuery(Seq(Dimension("date") -> DateAggregation(DateAggregationType.Day)))
val result = engine.execute(query)
result should be(RequestResult(
Seq(
Map("date" -> 1388534400000l, "total_revenue" -> 21.979999999999997, "active_users" -> 2, "revenue_per_user" -> 10.989999999999998),
Map("date" -> 1388534400000l, "total_revenue" -> 6.98, "active_users" -> 2, "revenue_per_user" -> 3.49),
Map("date" -> 1388534400000l, "total_revenue" -> 99.99, "active_users" -> 2, "revenue_per_user" -> 49.995)),
None)
)
}
"Calculate count of sessions per user" in {
val query = SessionsPerUsers(
sessionCube = "session",
idField = "date")
.generateQuery(Seq(Dimension("date") -> DateAggregation(DateAggregationType.Day)))
val result = engine.execute(query)
result should be(RequestResult(Seq(
Map("date" -> 1388534400000l, "count-date" -> 3, "countdistinct-_parent" -> 2, "sessions_per_users" -> 1.5),
Map("date" -> 1388620800000l, "count-date" -> 2, "countdistinct-_parent" -> 2, "sessions_per_users" -> 1.0),
Map("date" -> 1388707200000l, "count-date" -> 2, "countdistinct-_parent" -> 2, "sessions_per_users" -> 1.0)),
Some("session"))
)
}
}
| remeniuk/rubbercube | src/test/scala/com/bokland/rubbercube/measure/kpi/mobile/MobileMeasuresSpec.scala | Scala | mit | 2,483 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.streams.circuitbreaker
import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState
import java.lang.management.ManagementFactory
import javax.management.ObjectName
import scala.language.postfixOps
class CircuitBreakerStateSpec extends TestKit(ActorSystem("CircuitBreakerStateSpec"))
with AnyFlatSpecLike with Matchers {
implicit val scheduler = system.scheduler
import system.dispatcher
import scala.concurrent.duration._
it should "use default exponential backoff settings" in {
AtomicCircuitBreakerState(
"params-with-default-exponential-backoff",
1,
50.milliseconds,
20.milliseconds)
assertJmxValue("params-with-default-exponential-backoff", "MaxFailures", 1)
assertJmxValue("params-with-default-exponential-backoff", "CallTimeout", "50 milliseconds")
assertJmxValue("params-with-default-exponential-backoff", "ResetTimeout", "20 milliseconds")
assertJmxValue("params-with-default-exponential-backoff", "MaxResetTimeout", "36500 days")
assertJmxValue("params-with-default-exponential-backoff", "ExponentialBackoffFactor", 1.0)
}
it should "create circuit breaker state with provided exponential backoff settings" in {
AtomicCircuitBreakerState(
"params-with-custom-exponential-backoff",
1,
50.milliseconds,
20.milliseconds,
2.minutes,
16.0)
assertJmxValue("params-with-custom-exponential-backoff", "MaxFailures", 1)
assertJmxValue("params-with-custom-exponential-backoff", "CallTimeout", "50 milliseconds")
assertJmxValue("params-with-custom-exponential-backoff", "ResetTimeout", "20 milliseconds")
assertJmxValue("params-with-custom-exponential-backoff", "MaxResetTimeout", "2 minutes")
assertJmxValue("params-with-custom-exponential-backoff", "ExponentialBackoffFactor", 16.0)
}
it should "create circuit breaker state from configuration" in {
val config = ConfigFactory.parseString(
"""
|max-failures = 1
|call-timeout = 50 ms
|reset-timeout = 20 ms
|max-reset-timeout = 1 minute
|exponential-backoff-factor = 16.0
""".stripMargin)
AtomicCircuitBreakerState("from-config", config)
assertJmxValue("from-config", "MaxFailures", 1)
assertJmxValue("from-config", "CallTimeout", "50 milliseconds")
assertJmxValue("from-config", "ResetTimeout", "20 milliseconds")
assertJmxValue("from-config", "MaxResetTimeout", "1 minute")
assertJmxValue("from-config", "ExponentialBackoffFactor", 16.0)
}
it should "fallback to default values when configuration is empty" in {
AtomicCircuitBreakerState("empty-config", ConfigFactory.empty())
assertJmxValue("empty-config", "MaxFailures", 5)
assertJmxValue("empty-config", "CallTimeout", "1 second")
assertJmxValue("empty-config", "ResetTimeout", "5 seconds")
assertJmxValue("empty-config", "MaxResetTimeout", "36500 days")
assertJmxValue("empty-config", "ExponentialBackoffFactor", 1.0)
}
def assertJmxValue(name: String, key: String, expectedValue: Any) = {
val oName = ObjectName.getInstance(
s"org.squbs.configuration:type=squbs.circuitbreaker,name=${ObjectName.quote(name)}")
val actualValue = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key)
actualValue shouldEqual expectedValue
}
}
| akara/squbs | squbs-ext/src/test/scala/org/squbs/streams/circuitbreaker/CircuitBreakerStateSpec.scala | Scala | apache-2.0 | 4,118 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi
/**
* 'SettingsConstants' contains constants of strings, we would generally expect to find in
* the 'application.conf' file, which can be accessed by the application 'Settings'
*/
object TriplestoreTypes {
val EmbeddedJenaTdb = "embedded-jena-tdb"
val EmbeddedGraphDBSE= "embedded-jena-graphdb"
val HttpGraphDBSE = "graphdb-se"
val HttpGraphDBFree = "graphdb-free"
val HttpFuseki = "fuseki"
}
object KnoraDispatchers {
/**
* All normal actors should run on this dispatcher (non-blocking)
*/
val KnoraActorDispatcher = "knora-actor-dispatcher"
/**
* All blocking operations should run on this dispatcher (blocking)
*/
val KnoraBlockingDispatcher = "knora-blocking-dispatcher"
}
| musicEnfanthen/Knora | webapi/src/main/scala/org/knora/webapi/SettingsConstants.scala | Scala | agpl-3.0 | 1,532 |
package perceptlab
import java.io.Closeable
import javax.sound.midi.MidiSystem
import Game._
object SoundDisplay extends Closeable {
private val C6 = 84
private val Flute = 73
private val StringEnsemble1 = 48
private val PanControl = 10
private val MidPan = 64
private val Pans = SonarAngleOffsets.map(angle => (MidPan - MidPan * math.sin(angle)).toInt)
private val synth = MidiSystem.getSynthesizer
synth.open()
private val sonarChannels = List.tabulate(SonarNumber)(Note(_))
def setSonarLevels(levels: Seq[Int]) =
sonarChannels.foreach { note =>
note.adjustVolume(levels(note.sonarIndex))
}
def close() = {
sonarChannels.foreach(_.close())
synth.close()
}
private case class Note(sonarIndex: Int) {
private val note = C6 - sonarIndex * 4
private val chan = synth.getChannels()(sonarIndex)
chan.programChange(0, Flute)
chan.controlChange(PanControl, Pans(sonarIndex))
chan.noteOn(note, 0) // Starts at C6, and then go third by third down for each sonar.
private var previousVolume = 0
def adjustVolume(newVolume: Int) =
if (newVolume != previousVolume) {
chan.noteOff(note)
chan.noteOn(note, newVolume)
previousVolume = newVolume
}
def close() =
chan.allSoundOff()
}
}
| jletroui/perceptlab | src/main/scala/perceptlab/SoundDisplay.scala | Scala | mit | 1,301 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.calcite.rel.rules._
import org.apache.calcite.sql.fun.{OracleSqlOperatorTable, SqlStdOperatorTable}
import org.apache.calcite.sql2rel.SqlToRelConverter
import org.apache.calcite.tools.RuleSets
import org.apache.flink.table.plan.rules.datastream.DataStreamRetractionRules
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConverters._
class CalciteConfigBuilderTest {
@Test
def testDefaultRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder().build()
assertFalse(cc.replacesNormRuleSet)
assertFalse(cc.getNormRuleSet.isDefined)
assertFalse(cc.replacesLogicalOptRuleSet)
assertFalse(cc.getLogicalOptRuleSet.isDefined)
assertFalse(cc.replacesPhysicalOptRuleSet)
assertFalse(cc.getPhysicalOptRuleSet.isDefined)
assertFalse(cc.replacesDecoRuleSet)
assertFalse(cc.getDecoRuleSet.isDefined)
}
@Test
def testRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.FILTER_INSTANCE))
.replaceLogicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.replacePhysicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.replaceDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
.build()
assertFalse(cc.replacesNormRuleSet)
assertTrue(cc.getNormRuleSet.isDefined)
assertTrue(cc.replacesLogicalOptRuleSet)
assertTrue(cc.getLogicalOptRuleSet.isDefined)
assertTrue(cc.replacesPhysicalOptRuleSet)
assertTrue(cc.getPhysicalOptRuleSet.isDefined)
assertTrue(cc.replacesDecoRuleSet)
assertTrue(cc.getDecoRuleSet.isDefined)
}
@Test
def testReplaceNormalizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.FILTER_INSTANCE))
.build()
assertEquals(true, cc.replacesNormRuleSet)
assertTrue(cc.getNormRuleSet.isDefined)
val cSet = cc.getNormRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(ReduceExpressionsRule.FILTER_INSTANCE))
}
@Test
def testReplaceNormalizationAddRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.FILTER_INSTANCE))
.addNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.PROJECT_INSTANCE))
.build()
assertEquals(true, cc.replacesNormRuleSet)
assertTrue(cc.getNormRuleSet.isDefined)
val cSet = cc.getNormRuleSet.get.iterator().asScala.toSet
assertEquals(2, cSet.size)
assertTrue(cSet.contains(ReduceExpressionsRule.FILTER_INSTANCE))
assertTrue(cSet.contains(ReduceExpressionsRule.PROJECT_INSTANCE))
}
@Test
def testAddNormalizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.FILTER_INSTANCE))
.build()
assertEquals(false, cc.replacesNormRuleSet)
assertTrue(cc.getNormRuleSet.isDefined)
val cSet = cc.getNormRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(ReduceExpressionsRule.FILTER_INSTANCE))
}
@Test
def testAddAddNormalizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.FILTER_INSTANCE))
.addNormRuleSet(RuleSets.ofList(ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE))
.build()
assertEquals(false, cc.replacesNormRuleSet)
assertTrue(cc.getNormRuleSet.isDefined)
val cList = cc.getNormRuleSet.get.iterator().asScala.toList
assertEquals(3, cList.size)
assertEquals(cList.head, ReduceExpressionsRule.FILTER_INSTANCE)
assertEquals(cList(1), ReduceExpressionsRule.PROJECT_INSTANCE)
assertEquals(cList(2), ReduceExpressionsRule.CALC_INSTANCE)
}
@Test
def testReplaceLogicalOptimizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceLogicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.build()
assertEquals(true, cc.replacesLogicalOptRuleSet)
assertTrue(cc.getLogicalOptRuleSet.isDefined)
val cSet = cc.getLogicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
}
@Test
def testReplaceLogicalOptimizationAddRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceLogicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.addLogicalOptRuleSet(RuleSets.ofList(CalcMergeRule.INSTANCE, CalcSplitRule.INSTANCE))
.build()
assertEquals(true, cc.replacesLogicalOptRuleSet)
assertTrue(cc.getLogicalOptRuleSet.isDefined)
val cSet = cc.getLogicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(3, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcSplitRule.INSTANCE))
}
@Test
def testAddLogicalOptimizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addLogicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.addLogicalOptRuleSet(RuleSets.ofList(CalcMergeRule.INSTANCE, CalcSplitRule.INSTANCE))
.build()
assertEquals(false, cc.replacesLogicalOptRuleSet)
assertTrue(cc.getLogicalOptRuleSet.isDefined)
val cSet = cc.getLogicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(3, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcSplitRule.INSTANCE))
}
@Test
def testReplacePhysicalOptimizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replacePhysicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.build()
assertEquals(true, cc.replacesPhysicalOptRuleSet)
assertTrue(cc.getPhysicalOptRuleSet.isDefined)
val cSet = cc.getPhysicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
}
@Test
def testReplacePhysicalOptimizationAddRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replacePhysicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.addPhysicalOptRuleSet(RuleSets.ofList(CalcMergeRule.INSTANCE, CalcSplitRule.INSTANCE))
.build()
assertEquals(true, cc.replacesPhysicalOptRuleSet)
assertTrue(cc.getPhysicalOptRuleSet.isDefined)
val cSet = cc.getPhysicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(3, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcSplitRule.INSTANCE))
}
@Test
def testAddPhysicalOptimizationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addPhysicalOptRuleSet(RuleSets.ofList(FilterMergeRule.INSTANCE))
.addPhysicalOptRuleSet(RuleSets.ofList(CalcMergeRule.INSTANCE, CalcSplitRule.INSTANCE))
.build()
assertEquals(false, cc.replacesPhysicalOptRuleSet)
assertTrue(cc.getPhysicalOptRuleSet.isDefined)
val cSet = cc.getPhysicalOptRuleSet.get.iterator().asScala.toSet
assertEquals(3, cSet.size)
assertTrue(cSet.contains(FilterMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcMergeRule.INSTANCE))
assertTrue(cSet.contains(CalcSplitRule.INSTANCE))
}
@Test
def testReplaceDecorationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
.build()
assertEquals(true, cc.replacesDecoRuleSet)
assertTrue(cc.getDecoRuleSet.isDefined)
val cSet = cc.getDecoRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
}
@Test
def testReplaceDecorationAddRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
.addDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE))
.build()
assertEquals(true, cc.replacesDecoRuleSet)
assertTrue(cc.getDecoRuleSet.isDefined)
val cSet = cc.getDecoRuleSet.get.iterator().asScala.toSet
assertEquals(2, cSet.size)
assertTrue(cSet.contains(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
assertTrue(cSet.contains(DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE))
}
@Test
def testAddDecorationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
.build()
assertEquals(false, cc.replacesDecoRuleSet)
assertTrue(cc.getDecoRuleSet.isDefined)
val cSet = cc.getDecoRuleSet.get.iterator().asScala.toSet
assertEquals(1, cSet.size)
assertTrue(cSet.contains(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
}
@Test
def testAddAddDecorationRules(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.addDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE))
.addDecoRuleSet(RuleSets.ofList(DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE,
DataStreamRetractionRules.ACCMODE_INSTANCE))
.build()
assertEquals(false, cc.replacesDecoRuleSet)
assertTrue(cc.getDecoRuleSet.isDefined)
val cList = cc.getDecoRuleSet.get.iterator().asScala.toList
assertEquals(3, cList.size)
assertEquals(cList.head, DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE)
assertEquals(cList(1), DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE)
assertEquals(cList(2), DataStreamRetractionRules.ACCMODE_INSTANCE)
}
@Test
def testDefaultOperatorTable(): Unit = {
val cc: CalciteConfig = new CalciteConfigBuilder()
.build()
assertEquals(false, cc.replacesSqlOperatorTable)
assertFalse(cc.getSqlOperatorTable.isDefined)
}
@Test
def testReplaceOperatorTable(): Unit = {
val oracleTable = new OracleSqlOperatorTable
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceSqlOperatorTable(oracleTable)
.build()
val oracleOps = oracleTable.getOperatorList.asScala
assertEquals(true, cc.replacesSqlOperatorTable)
assertTrue(cc.getSqlOperatorTable.isDefined)
val ops = cc.getSqlOperatorTable.get.getOperatorList
.asScala.toSet
assertEquals(oracleOps.size, ops.size)
for (o <- oracleOps) {
assertTrue(ops.contains(o))
}
}
@Test
def testReplaceAddOperatorTable(): Unit = {
val oracleTable = new OracleSqlOperatorTable
val stdTable = new SqlStdOperatorTable
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceSqlOperatorTable(oracleTable)
.addSqlOperatorTable(stdTable)
.build()
val oracleOps = oracleTable.getOperatorList.asScala
val stdOps = stdTable.getOperatorList.asScala
assertEquals(true, cc.replacesSqlOperatorTable)
assertTrue(cc.getSqlOperatorTable.isDefined)
val ops = cc.getSqlOperatorTable.get.getOperatorList
.asScala.toSet
assertEquals(oracleOps.size + stdOps.size, ops.size)
for (o <- oracleOps) {
assertTrue(ops.contains(o))
}
for (o <- stdOps) {
assertTrue(ops.contains(o))
}
}
@Test
def testAddOperatorTable(): Unit = {
val oracleTable = new OracleSqlOperatorTable
val cc: CalciteConfig = new CalciteConfigBuilder()
.addSqlOperatorTable(oracleTable)
.build()
val oracleOps = oracleTable.getOperatorList.asScala
assertEquals(false, cc.replacesSqlOperatorTable)
assertTrue(cc.getSqlOperatorTable.isDefined)
val ops = cc.getSqlOperatorTable.get.getOperatorList
.asScala.toSet
assertEquals(oracleOps.size, ops.size)
for (o <- oracleOps) {
assertTrue(ops.contains(o))
}
}
@Test
def testAddAddOperatorTable(): Unit = {
val oracleTable = new OracleSqlOperatorTable
val stdTable = new SqlStdOperatorTable
val cc: CalciteConfig = new CalciteConfigBuilder()
.addSqlOperatorTable(oracleTable)
.addSqlOperatorTable(stdTable)
.build()
val oracleOps = oracleTable.getOperatorList.asScala
val stdOps = stdTable.getOperatorList.asScala
assertEquals(false, cc.replacesSqlOperatorTable)
assertTrue(cc.getSqlOperatorTable.isDefined)
val ops = cc.getSqlOperatorTable.get.getOperatorList
.asScala.toSet
assertEquals(oracleOps.size + stdOps.size, ops.size)
for (o <- oracleOps) {
assertTrue(ops.contains(o))
}
for (o <- stdOps) {
assertTrue(ops.contains(o))
}
}
@Test
def testReplaceSqlToRelConverterConfig(): Unit = {
val config = SqlToRelConverter.configBuilder()
.withTrimUnusedFields(false)
.withConvertTableAccess(false)
.withInSubQueryThreshold(Integer.MAX_VALUE)
.build()
val cc: CalciteConfig = new CalciteConfigBuilder()
.replaceSqlToRelConverterConfig(config)
.build()
assertTrue(cc.getSqlToRelConverterConfig.isDefined)
assertEquals(Integer.MAX_VALUE, cc.getSqlToRelConverterConfig.get.getInSubQueryThreshold)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/calcite/CalciteConfigBuilderTest.scala | Scala | apache-2.0 | 14,430 |
import collection.mutable.Stack
import org.scalatest._
import java.net.URI
import java.nio.file.Paths
import akka.actor.{ Actor, ActorRef, Props, ActorSystem }
import akka.testkit.{ ImplicitSender, TestKit, TestActorRef, TestProbe }
import scala.io.Source
import mumbler.transport.Messages.{StatsResponse, Indexed}
import mumbler.StatsCache
class StatsGathererSpec(_system: ActorSystem)
extends TestKit(_system)
with Matchers
with FlatSpecLike
with BeforeAndAfterAll {
def this() = this(ActorSystem("StatsGathererSpec"))
override def afterAll: Unit = {
shutdown(system)
}
"summarize" should "provide most recent totals" in {
val actorOne = system.actorOf(Props.empty)
val actorTwo = system.actorOf(Props.empty)
// the first arg's value is arbitrary
val stats = StatsCache(actorOne, Seq[ActorRef](actorOne, actorTwo))
for(i <- List(Indexed(1000, 10, 159015), Indexed(1200, 20, 165895), Indexed(1100, 15, 161222))) {
stats.record(actorOne.toString, StatsResponse(Some(i)))
}
for(i <- List(Indexed(900, 12, 156017), Indexed(1500, 33, 162201), Indexed(700, 5, 151010))) {
stats.record(actorTwo.toString, StatsResponse(Some(i)))
}
val summary: mumbler.SummaryOfMostRecent = stats.summarizeRecent()
val flattened = summary.remotes.values.flatten.toList
assert(flattened.size == 2)
// important that the most recently added records are summarized and other values do not determine ordering
assert(flattened.contains(Indexed(700, 5, 151010)))
assert(flattened.contains(Indexed(1100, 15, 161222)))
}
}
| michaeldye/mids-mumbler | mumbler/src/test/scala/TestStatsGatherer.scala | Scala | gpl-3.0 | 1,595 |
/*******************************************************************************
Copyright (c) 2012-2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.tests
import junit.framework.Test
import junit.framework.TestSuite
import junit.framework.TestCase
import junit.framework.Assert._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.{AddressManager, Operator}
import kr.ac.kaist.jsaf.Shell
import kr.ac.kaist.jsaf.ShellParameters
import kr.ac.kaist.jsaf.compiler.Predefined
// class definition for eclipse JUnit runner
class TypingOperatorJUTest
object TypingOperatorJUTest {
Shell.pred = new Predefined(new ShellParameters())
val joinCases:List[(String, List[Any], List[Any], List[Any], Boolean)] = List(
("{} + {} = {}", List(), List(), List(), true),
("{1} + {2} = {1, 2}", List(1), List(2), List(1, 2), true),
("{UndefTop} + {true} = {UndefTop, true}", List(UndefTop), List(true), List(UndefTop, true), true),
("{NullTop} + {} = {NullTop}", List(NullTop), List(), List(NullTop), true),
("{true} + {false} = {true, false}", List(true), List(false), List(true, false), true),
("{false} + {NullTop} = {false, NullTop}", List(false), List(NullTop), List(false, NullTop), true),
("{-1} + {true} = {-1, true}", List(-1), List(true), List(-1, true), true),
("{-3} + {0.2} = {-3, 0.2}", List(-3), List(0.2), List(-3, 0.2), true),
("{1, 2} + {false} = {1, 2, false}", List(1, 2), List(false), List(1, 2, false), true),
("{1, \\"s\\"} + {false} = {1, \\"s\\", false}", List(1, "s"), List(false), List(1, "s", false), true),
("{\\"1\\", \\"0\\"} + {1} = {\\"1\\", \\"0\\", 1}", List("1", "0"), List(1), List("1", "0", 1), true),
("{NaN} + {true} = {NaN, true}", List(NaN), List(true), List(NaN, true), true),
("{PosInf} + {NullTop} = {PosInf, NullTop}", List(PosInf), List(NullTop), List(PosInf, NullTop), true),
("{UndefTop} + {NegInf} = {UndefTop, NegInf}", List(UndefTop), List(NegInf), List(UndefTop, NegInf), true),
("{\\"foo\\"} + {\\"1\\"} = {\\"foo\\", \\"1\\"}", List("foo"), List("1"), List("foo", "1"), true),
("{UndefTop} + {\\"str\\"} = {UndefTop, \\"str\\"}", List(UndefTop), List("str"), List(UndefTop, "str"), true),
("{NaN} + {-1} = {NaN, -1}", List(NaN), List(-1), List(NaN, -1), true),
("{PosInf} + {NegInf} = {PosInf, NegInf}", List(PosInf), List(NegInf), List(PosInf, NegInf), true),
("{NaN} + {NegInf} = {NaN, NegInf}", List(NaN), List(NegInf), List(NaN, NegInf), true),
("{NaN} + {PosInf} = {NaN, PosInf}", List(NaN), List(PosInf), List(NaN, PosInf), true),
("{1, 2} + {-3, 4.3} = {1, 2, -3, 4.3}", List(1, 2), List(-3, 4.3), List(1, 2, -3, 4.3), true),
("{UndefTop, NullTop, true} + {false} = {UndefTop, NullTop, true, false}", List(UndefTop, NullTop, true), List(false), List(UndefTop, NullTop, true, false), true),
("{UndefTop, NullTop, true, false} + {NaN, 1, \\"1\\", \\"str\\"} = {UndefTop, NullTop, true, false, NaN, 1, \\"1\\", \\"str\\"}", List(UndefTop, NullTop, true, false), List(NaN, 1, "1", "str"), List(UndefTop, NullTop, true, false, NaN, 1, "1", "str"), true),
("{\\"-1\\"} + {\\"3.5\\"} = {\\"-1\\", \\"3.5\\"}", List("-1"), List("3.5"), List("-1", "3.5"), true),
("{\\"1\\"} + {} = {\\"1\\"}", List("1"), List(), List("1"), true),
("{\\"-1\\"} + {} = {\\"-1\\"}", List("-1"), List(), List("-1"), true),
("{UndefTop} + {} = {UndefTop}", List(UndefTop), List(), List(UndefTop), true),
("{NullTop} + {} = {NullTop}", List(NullTop), List(), List(NullTop), true),
("{true} + {} = {true}", List(true), List(), List(true), true)
// TODO
)
val binCases:List[TypeOperator] = List(
BinBitOr("{NaN} | {1, 2} = {1, 2}", List(NaN), List(1,2), List(1, 2), true),
BinBitOr("{-1, 3.2} | {NaN} = {-1, 3.2}", List(-1, 3.2), List(NaN), List(-1, 3), true),
BinBitOr("{1} | {2} = {3}", List(1), List(2), List(3), true),
BinBitOr("{1} | {-1} = {-1}", List(1), List(-1), List(-1), true),
BinBitOr("{1} | {1, 2} = {1, 3}", List(1), List(1, 2), List(1, 3), true),
BinBitOr("{1} | {-1, 2.1} = {-1, 3}", List(1), List(-1, 2.1), List(-1, 3), true),
BinBitOr("{-1} | {-2} = {-1}", List(-1), List(-2), List(-1), true),
BinBitOr("{-1} | {1} = {-1}", List(-1), List(1), List(-1), true),
BinBitOr("{-1} | {1, 2} >= {-1, -1}", List(-1), List(1, 2), List(-1, -1), false),
BinBitOr("{-2} | {1, 2} = {-1, -2}", List(-2), List(1, 2), List(-1, -2), true),
BinBitOr("{2.1} | {1, 2} = {3, 2}", List(2.1), List(1, 2), List(3, 2), true),
BinBitOr("{-1} | {-1, 3.1} >= {-1, -1}", List(-1), List(-1, 3.1), List(-1, -1), false),
BinBitOr("{7.1} | {-1, 3.1} = {-1, 7}", List(7.1), List(-1, 3.1), List(-1, 7), true),
BinBitOr("{1, 2} | {5} = {5, 7}", List(1, 2), List(5), List(5, 7), true),
BinBitOr("{1, 2} | {-3} = {-3, -1}", List(1, 2), List(-3), List(-3, -1), true),
BinBitOr("{1, 2} | {5.1} = {5, 7}", List(1, 2), List(5.1), List(5, 7), true),
BinBitOr("{1, 2} | {5, 6} = {5, 7, 7, 6}", List(1, 2), List(5, 6), List(5, 7, 7, 6), true),
BinBitOr("{1, 2} | {-4, 7.5} = {-3, 7, -2, 7}", List(1, 2), List(-4, 7.5), List(-3, 7, -2, 7), true),
BinBitOr("{-3, -2} | {2} >= {-1, -2}", List(-3, -2), List(2), List(-1, -2), false),
BinBitOr("{-3, 2.5} | {2} = {-1, 2}", List(-3, 2.5), List(2), List(-1, 2), true),
BinBitOr("{-3, -2} | {-1, 1} >= {-1, -3, -1, -1}", List(-3, -2), List(-1, 1), List(-1, -3, -1, -1), false),
BinBitOr("{-3, 2.5} | {-1, 1} = {-1, -3, -1, 3}", List(-3, 2.5), List(-1, 1), List(-1, -3, -1, 3), true),
BinBitOr("{PosInf} | {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitOr("{PosInf} | {1} = {1}", List(PosInf), List(1), List(1), true),
BinBitOr("{PosInf} | {3.5} = {3}", List(PosInf), List(3.5), List(3), true),
BinBitOr("{1} | {NegInf} = {1}", List(1), List(NegInf), List(1), true),
BinBitOr("{-1} | {NegInf} = {-1}", List(-1), List(NegInf), List(-1), true),
BinBitOr("{PosInf, NegInf} | {1} = {1}", List(PosInf, NegInf), List(1), List(1), true),
BinBitAnd("{1} & {2} = {0}", List(1), List(2), List(0), true),
BinBitAnd("{1} & {-1} = {1}", List(1), List(-1), List(1), true),
BinBitAnd("{1} & {2, 1} = {0, 1}", List(1), List(2, 1), List(0, 1), true),
BinBitAnd("{1} & {-2, 3} = {0, 1}", List(1), List(-2, 3), List(0, 1), true),
BinBitAnd("{1} & {-2, 3.4} = {0, 1}", List(1), List(-2, 3.4), List(0, 1), true),
BinBitAnd("{3} & {-4, 3.2} = {0, 3}", List(3), List(-4, 3.2), List(0, 3), true),
BinBitAnd("{-2} & {3} = {2}", List(-2), List(3), List(2), true),
BinBitAnd("{-2} & {-3} = {-4}", List(-2), List(-3), List(-4), true),
BinBitAnd("{-2} & {3, 5} = {2, 4}", List(-2), List(3, 5), List(2, 4), true),
BinBitAnd("{-2} & {-3, -6} >= {-4, -6}", List(-2), List(-3, -6), List(-4, -6), false),
BinBitAnd("{-2} & {-3, 6.5} = {-4, 6}", List(-2), List(-3, 6.5), List(-4, 6), true),
BinBitAnd("{-2} & {-3, 3} = {-4, 2}", List(-2), List(-3, 5), List(-4, 2), true),
BinBitAnd("{6, 7} & {1} = {0, 1}", List(6, 7), List(1), List(0, 1), true),
BinBitAnd("{6, 7} & {-5} = {2, 3}", List(6, 7), List(-5), List(2, 3), true),
BinBitAnd("{3.4, -6} & {1} >= {1, 0}", List(3.4, -6), List(1), List(1, 0), false),
BinBitAnd("{3.4, -6} & {-1} = {3, -6}", List(3.4, -6), List(-1), List(3, -6), true),
BinBitAnd("{3.4, -10} & {5.5} >= {1, 4}", List(3.4, -10), List(5.5), List(1, 4), false),
BinBitAnd("{3.4, -6} & {1, 2} >= {1, 2, 0, 2}", List(3.4, -6), List(1, 2), List(1, 2, 0, 2), false),
BinBitAnd("{3.4, -6} & {3.4, -6} = {3, 2, 2, -6}", List(3.4, -6), List(3.4, -6), List(3, 2, 2, -6), true),
BinBitAnd("{PosInf} & {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitAnd("{PosInf} & {1} = {0}", List(PosInf), List(1), List(0), true),
BinBitAnd("{PosInf} & {1, 2} = {0}", List(PosInf), List(1, 2), List(0), true),
BinBitAnd("{1} & {NegInf} = {0}", List(1), List(NegInf), List(0), true),
BinBitAnd("{2.5, -3} & {NegInf} = {0}", List(2.5, -3), List(NegInf), List(0), true),
BinBitAnd("{1} & {PosInf, NegInf} = {0}", List(1), List(PosInf, NegInf), List(0), true),
BinBitAnd("{NaN} & {-3} = {0}", List(NaN), List(-3), List(0), true),
BinBitXor("{3} ^ {2} = {1}", List(3), List(2), List(1), true),
BinBitXor("{3} ^ {1, 2} = {2, 1}", List(3), List(1, 2), List(2, 1), true),
BinBitXor("{3} ^ {-1} = {-4}", List(3), List(-1), List(-4), true),
BinBitXor("{-3} ^ {-3, 3.6} = {0, -2}", List(-3), List(-3, 3.6), List(0, -2), true),
BinBitXor("{-3} ^ {-1} = {2}", List(-3), List(-1), List(2), true),
BinBitXor("{-3} ^ {1, 2} = {-4, -1}", List(-3), List(1, 2), List(-4, -1), true),
BinBitXor("{-3} ^ {-1, 0.4} = {2, -3}", List(-3), List(-1, 0.4), List(2, -3), true),
BinBitXor("{3, 7} ^ {2} = {1, 5}", List(3, 7), List(2), List(1, 5), true),
BinBitXor("{3, 7} ^ {2, 6} = {1, 5, 5, 1}", List(3, 7), List(2, 6), List(1, 5, 5, 1), true),
BinBitXor("{3, 7} ^ {-4} = {-1, -5}", List(3, 7), List(-4), List(-1, -5), true),
BinBitXor("{3, 7} ^ {-6, 1.3} = {-7, 2, -3, 6}", List(3, 7), List(-6, 1.3), List(-7, 2, -3, 6), true),
BinBitXor("{0.2, -4} ^ {3} = {3, -1}", List(0.2, -4), List(3), List(3, -1), true),
BinBitXor("{0.2, -4} ^ {3, 6} = {3, -1, 6, -6}", List(0.2, -4), List(3, 6), List(3, -1, 6, -6), true),
BinBitXor("{0.2, -4} ^ {0.5} = {0, -4}", List(0.2, -4), List(0.5), List(0, -4), true),
BinBitXor("{0.2, -4} ^ {0.5, 3.6} = {0, -4, 3, -1}", List(0.2, -4), List(0.5, 3.6), List(0, -4, 3, -1), true),
BinBitXor("{PosInf} ^ {3} = {3}", List(PosInf), List(3), List(3), true),
BinBitXor("{PosInf} ^ {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitXor("{PosInf} ^ {-3} = {-3}", List(PosInf), List(-3), List(-3), true),
BinBitXor("{NegInf} ^ {3.5} = {3}", List(NegInf), List(3.5), List(3), true),
BinBitXor("{2, 4} ^ {NegInf} = {2, 4}", List(2, 4), List(NegInf), List(2, 4), true),
BinBitXor("{PosInf, NegInf} ^ {3, 2} = {3, 2}", List(PosInf, NegInf), List(3, 2), List(3, 2), true),
BinLShift("{3} << {1} = {6}", List(3), List(1), List(6), true),
BinLShift("{3} << {1.2} = {6}", List(3), List(1.2), List(6), true),
BinLShift("{3} << {-1.2} = {6}", List(3), List(-1.2), List(-2147483648), true),
BinLShift("{-3} << {1} = {-6}", List(-3), List(1), List(-6), true),
BinLShift("{-1} << {-3} = {-536870912}", List(-1), List(-3), List(-536870912), true),
BinLShift("{-1} << {-3, 2} >= {-536870912, -4}", List(-1), List(-3, 2), List(-536870912, -4), false),
BinLShift("{4} << {29} = {-2147483648}", List(4), List(29), List(-2147483648), true),
BinLShift("{-4} << {30} = {0}", List(-4), List(30), List(0), true),
BinLShift("{4} << {29, 28} = {-2147483648, 1073741824}", List(4), List(29, 28), List(-2147483648, 1073741824), true),
BinLShift("{4, 2} << {-2} = {0, -2147483648}", List(4, 2), List(-2), List(0, -2147483648), true),
BinRShift("{256} >> {3} = {32}", List(256), List(3), List(32), true),
BinRShift("{256} >> {-32} = {256}", List(256), List(-32), List(256), true),
BinRShift("{256} >> {-31} = {128}", List(256), List(-31), List(128), true),
BinRShift("{256} >> {-33} = {0}", List(256), List(-33), List(0), true),
BinRShift("{-256} >> {-3} = {-1}", List(-256), List(-3), List(-1), true),
BinRShift("{-256} >> {3} = {-32}", List(-256), List(3), List(-32), true),
BinRShift("{-256} >> {-0.5} = {-256}", List(-256), List(-0.5), List(-256), true),
BinRShift("{-256} >> {17, 31} >= {-1, -1}", List(-256), List(17, 31), List(-1, -1), false),
BinRShift("{343.4} >> {2} = {85}", List(343.4), List(2), List(85), true),
BinRShift("{256, 34} >> {2} = {64, 8}", List(256, 34), List(2), List(64, 8), true),
BinRShift("{34, -34} >> {2} = {64, -9}", List(34, -34), List(2), List(64, -9), true),
BinURShift("{32} >>> {2} = {8}", List(32), List(2), List(8), true),
BinURShift("{-32} >>> {2} = {1073741816}", List(-32), List(2), List(1073741816), true),
BinURShift("{-32} >>> {-1} = {1}", List(-32), List(-1), List(1), true),
BinURShift("{-32} >>> {30} = {3}", List(-32), List(30), List(3), true),
BinURShift("{-32} >>> {-1, 30} >= {1, 3}", List(-32), List(-1, 30), List(1, 3), false),
BinURShift("{564} >>> {30} = {0}", List(564), List(30), List(0), true),
BinURShift("{564} >>> {-30} = {141}", List(564), List(-30), List(141), true),
BinURShift("{-12345} >>> {31} = {1}", List(-12345), List(31), List(1), true),
BinURShift("{564, -32} >>> {30} = {0, 3}", List(564, -32), List(30), List(0, 3), true),
BinURShift("{34, 78} >>> {2} = {8, 19}", List(34, 78), List(2), List(8, 19), true),
BinURShift("{-65, -90} >>> {-2} >= {3, 3}", List(-65, -90), List(-2), List(3, 3), false),
BinURShift("{-65, -90} >>> {4} = {268435451, 268435450}", List(-65, -90), List(4), List(268435451, 268435450), true),
BinPlus("{NaN} + {2} = {NaN}", List(NaN), List(2), List(NaN), true),
BinPlus("{PosInf, NegInf} + {NaN} = {NaN}", List(PosInf, NegInf), List(NaN), List(NaN), true),
BinPlus("{PosInf} + {NegInf} = {NaN}", List(PosInf), List(NegInf), List(NaN), true),
BinPlus("{NegInf} + {PosInf} = {NaN}", List(NegInf), List(PosInf), List(NaN), true),
BinPlus("{NegInf} + {NegInf} = {NegInf}", List(NegInf), List(NegInf), List(NegInf), true),
BinPlus("{PosInf} + {PosInf} = {PosInf}", List(PosInf), List(PosInf), List(PosInf), true),
BinPlus("{PosInf, NegInf} + {PosInf, NegInf} = {NaN, PosInf, NegInf}", List(PosInf, NegInf), List(PosInf, NegInf), List(NaN, PosInf, NegInf), true),
BinPlus("{2} + {PosInf, NegInf} = {PosInf, NegInf}", List(2), List(PosInf, NegInf), List(PosInf, NegInf), true),
BinPlus("{PosInf, NegInf} + {-3.1} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3.1), List(PosInf, NegInf), true),
BinPlus("{PosInf} + {2} = {PosInf}", List(PosInf), List(2), List(PosInf), true),
BinPlus("{-4.3} + {PosInf} = {PosInf}", List(-4.3), List(PosInf), List(PosInf), true),
BinPlus("{NegInf} + {100} = {NegInf}", List(NegInf), List(100), List(NegInf), true),
BinPlus("{3} + {NegInf} = {NegInf}", List(3), List(NegInf), List(NegInf), true),
BinPlus("{1} + {2} = {3}", List(1), List(2), List(3), true),
BinPlus("{1} + {-4} = {-3}", List(1), List(-4), List(-3), true),
BinPlus("{1} + {3, 4} = {4, 5}", List(1), List(3, 4), List(4, 5), true),
BinPlus("{1, 2} + {4} = {5, 6}", List(1, 2), List(4), List(5, 6), true),
BinPlus("{-1} + {-4} = {-5}", List(-1), List(-4), List(-5), true),
BinPlus("{-1} + {3.4, -2} >= {2.4, -3}", List(-1), List(3.4, -2), List(2.4, -3), false),
BinPlus("{-1} + {0, 2} = {-1, 1}", List(-1), List(0, 2), List(-1, 1), true),
BinPlus("{-1} + {3} = {2}", List(-1), List(3), List(2), true),
BinPlus("{3.5} + {0.5} = {4}", List(3.5), List(0.5), List(4), true),
BinPlus("{3.5} + {0.5, 1.5} >= {4, 5}", List(3.5), List(0.5, 1.5), List(4, 5), false),
BinPlus("{3.5, 6.5} + {0.5} >= {3, 6}", List(3.5, 6.5), List(0.5), List(4, 7), false),
BinPlus("{2, 4} + {-5} >= {-3, -1}", List(2, 4), List(-5), List(-3, -1), false),
BinPlus("{1.2, 4} + {-1} = {0.2, 3}", List(1.2, 4), List(-1), List(0.2, 3), true),
BinPlus("{} + {} = {}", List(), List(), List(), true),
BinPlus("{\\"s\\"} + {PosInf} = {\\"sInfinity\\"}", List("s"), List(PosInf), List("sInfinity"), true),
BinPlus("{\\"s\\"} + {NegInf} = {\\"s-Infinity\\"}", List("s"), List(NegInf), List("s-Infinity"), true),
BinPlus("{NaN} + {\\"s\\"} = {\\"NaNs\\"}", List(NaN), List("s"), List("NaNs"), true),
BinPlus("{\\"A\\"} + {\\"B\\"} = {\\"AB\\"}", List("A"), List("B"), List("AB"), true),
BinPlus("{\\"1\\"} + {3} = {\\"13\\"}", List("1"), List(3), List("13"), true),
BinPlus("{\\"0\\"} + {1} = {\\"01\\"}", List("0"), List(1), List("01"), true),
BinPlus("{\\"2\\"} + {\\"\\"} = {\\"2\\"}", List("2"), List(""), List("2"), true),
BinPlus("{\\"2\\"} + {-3} = {\\"2-3\\"}", List("2"), List(-3), List("2-3"), true),
BinPlus("{\\"2\\"} + {1.2} = {\\"21.2\\"}", List("2"), List(1.2), List("21.2"), true),
BinPlus("{\\"-1\\"} + {\\"\\"} = {\\"-1\\"}", List("-1"), List(""), List("-1"), true),
BinPlus("{\\"-1\\"} + {1} = {\\"-11\\"}", List("-1"), List(1), List("-11"), true),
BinPlus("{\\"-1\\"} + {\\"\\", 2} = {\\"-1\\", \\"-12\\"}", List("-1"), List("", 2), List("-1", "-12"), true),
BinPlus("{\\"3.2\\", \\"\\"} + {0} = {\\"3.20\\", \\"0\\"}", List("3.2", ""), List(0), List("3.20", "0"), false),
BinPlus("{\\"3.2\\", \\"\\"} + {-1} = {\\"3.2-1\\", \\"-1\\"}", List("3.2", ""), List(-1), List("3.2-1", "-1"), true),
BinPlus("{\\"a\\", 1} + {\\"b\\", 2} >= {\\"ab\\", \\"a2\\", \\"1b\\", 3}", List("a", 1), List("b", 2), List("ab", "1b", "a2", 3), false),
BinPlus("{true, 1} + {\\"a\\", 2} = {\\"1a\\", \\"truea\\", 3, 3}", List(true, 1), List("a", 2), List("1a", "truea", 3, 3), true),
BinPlus("{false, 1} + {\\"a\\", 2} = {\\"1a\\", \\"falsea\\", 2, 3}", List(false, 1), List("a", 2), List("1a", "falsea", 2, 3), true),
BinPlus("{null, \\"1\\"} + {\\"1\\", \\"\\"} = {\\"null1\\", \\"11\\", \\"null\\", \\"1\\"}", List(NullTop, "1"), List("1", ""), List("null1", "11", "null", "1"), true),
BinPlus("{null, \\"1\\"} + {\\"1\\", \\"2\\"} = {\\"null1\\", \\"11\\", \\"null2\\", \\"12\\"}", List(NullTop, "1"), List("1", "2"), List("null1", "11", "null2", "12"), true),
BinPlus("{Undef, 1} + {\\"str\\", true} = {\\"undefinedstr\\", NaN, \\"1str\\", 2}", List(UndefTop, 1), List("str", true), List("undefinedstr", NaN, "1str", 2), true),
BinPlus("{\\"1\\", -2} + {1, \\"4\\"} = {\\"11\\", \\"14\\", -1, \\"-24\\"}", List("1", -2), List(1, "4"), List("11", "14", -1, "-24"), true),
BinPlus("{\\"\\"} + {1, 2} = {\\"1\\", \\"2\\"}", List(""), List(1, 2), List("1", "2"), true),
BinPlus("{\\"\\"} + {-1, 3.2} = {\\"-1\\", \\"3.2\\"}", List(""), List(-1, 3.2), List("-1", "3.2"), true),
BinPlus("{\\"\\"} + {1, -1} = {\\"1\\", \\"-1\\"}", List(""), List(1, -1), List("1", "-1"), true),
BinMinus("{} - {} = {}", List(), List(), List(), true),
BinMinus("{} - {1} = {}", List(), List(1), List(), true),
BinMinus("{-1} - {} = {}", List(-1), List(), List(), true),
BinMinus("{NaN} - {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMinus("{-2} - {NaN} = {NaN}", List(-2), List(NaN), List(NaN), true),
BinMinus("{PosInf} - {NegInf} = {PosInf}", List(PosInf), List(NegInf), List(PosInf), true),
BinMinus("{NegInf} - {PosInf} = {NegInf}", List(NegInf), List(PosInf), List(NegInf), true),
BinMinus("{PosInf} - {PosInf} = {NaN}", List(PosInf), List(PosInf), List(NaN), true),
BinMinus("{NegInf} - {NegInf} = {NaN}", List(NegInf), List(NegInf), List(NaN), true),
BinMinus("{PosInf} - {3} = {PosInf}", List(PosInf), List(3), List(PosInf), true),
BinMinus("{NegInf} - {-2} = {NegInf}", List(NegInf), List(-2), List(NegInf), true),
BinMinus("{2} - {PosInf} = {NegInf}", List(2), List(PosInf), List(NegInf), true),
BinMinus("{1} - {NegInf} = {PosInf}", List(1), List(NegInf), List(PosInf), true),
BinMinus("{3} - {2} = {1}", List(3), List(2), List(1), true),
BinMinus("{3} - {5} = {-2}", List(3), List(5), List(-2), true),
BinMinus("{3} - {-5} = {8}", List(3), List(-5), List(8), true),
BinMinus("{5} - {3.5} = {1.5}", List(5), List(3.5), List(1.5), true),
BinMinus("{3} - {-5, 3.5} = {8, 0.5}", List(3), List(-5, 3.5), List(8, 0.5), true),
BinMinus("{-2} - {1} = {-3}", List(-2), List(1), List(-3), true),
BinMinus("{-2} - {1, 3} >= {-3, -5}", List(-2), List(1, 3), List(-3, -5), false),
BinMinus("{5.2} - {2} = {3.2}", List(5.2), List(2), List(3.2), true),
BinMinus("{-2} - {-5} = {3}", List(-2), List(-5), List(3), true),
BinMinus("{-2} - {-5, -1} = {3, -1}", List(-2), List(-5, -1), List(3, -1), true),
BinMinus("{-2} - {-2, 1} = {0, -3}", List(-2), List(-2, -1), List(0, -3), true),
BinMinus("{2, 3} - {3} = {-1, 0}", List(2, 3), List(3), List(-1, 0), true),
BinMinus("{2, 3} - {-2} = {4, 5}", List(2, 3), List(-2), List(4, 5), true),
BinMinus("{2, 3} - {-2.5} = {4.5, 5.5}", List(2, 3), List(-2.5), List(4.5, 5.5), true),
BinMinus("{2, 3} - {-1, -3} >= {3, 4, 4, 6}", List(2, 3), List(-1, -3), List(3, 4, 4, 6), false),
BinMinus("{2, 3} - {-2.5, -1} = {4.5, 5.5, 3, 4}", List(2, 3), List(-2.5, -1), List(4.5, 5.5, 3, 4), true),
BinMinus("{2, 3} - {2.5} = {0.5, -0.5}", List(2, 3), List(2.5), List(0.5, -0.5), true),
BinMinus("{2, 3} - {2, 5} = {0, 1, -3, -2}", List(2, 3), List(2, 5), List(0, 1, -3, -2), true),
BinMinus("{-1, 4.2} - {3} = {-4, 1.2}", List(-1, 4.2), List(3), List(-4, 1.2), true),
BinMinus("{3.5, 1.5} - {0.5} >= {3, 1}", List(3.5, 1.5), List(0.5), List(3, 1), false),
BinMinus("{2.1, -4} - {-1} >= {3.1, -3}", List(2.1, -4), List(-1), List(3.1, -3), false),
BinMinus("{-2, 2.5} - {3, 2} = {-5, -0.5, -4, 0.5}", List(-2, 2.5), List(3, 2), List(-5, -0.5, -4, 0.5), true),
BinMinus("{-1, -5} - {-2, -1} = {1, 0, -3, -4}", List(-1, -5), List(-2, -1), List(1, 0, -3, -4), true),
BinMul("{NaN} * {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMul("{2} * {NaN} = {NaN}", List(2), List(NaN), List(NaN), true),
BinMul("{0} * {PosInf} = {NaN}", List(0), List(PosInf), List(NaN), true),
BinMul("{0} * {NegIng} = {NaN}", List(0), List(NegInf), List(NaN), true),
BinMul("{0} * {PosInf, NegInf} = {NaN}", List(0), List(PosInf, NegInf), List(NaN), true),
BinMul("{PosInf} * {PosInf} = {PosInf}", List(PosInf), List(PosInf), List(PosInf), true),
BinMul("{PosIng} * {NegInf} = {NegInf}", List(PosInf), List(NegInf), List(NegInf), true),
BinMul("{NegInf} * {NegInf} = {PosInf}", List(NegInf), List(NegInf), List(PosInf), true),
BinMul("{NegInf} * {PosInf} = {NegInf}", List(NegInf), List(PosInf), List(NegInf), true),
BinMul("{PosInf, NegInf} * {PosInf} = {PosInf, NegInf}", List(PosInf, NegInf), List(PosInf), List(PosInf, NegInf), true),
BinMul("{0, 2} * {PosInf} = {NaN, PosInf}", List(0, 2), List(PosInf), List(NaN, PosInf), true),
BinMul("{PosInf} * {0, 2} = {NaN, PosInf}", List(PosInf), List(0, 2), List(NaN, PosInf), true),
BinMul("{NegInf} * {2, 3} >= {NegInf}", List(NegInf), List(2, 3), List(NegInf), false),
BinMul("{1E9} * {5} = {5E9}", List(1E9), List(5), List(5E9), true),
BinMul("{PosInf} / {-1, 3.5} = {PosInf, NegInf}", List(PosInf), List(-1, 3.5), List(PosInf, NegInf), true),
BinMul("{-1, 3.5} / {PosInf} = {PosInf, NegInf}", List(-1, 3.5), List(PosInf), List(PosInf, NegInf), true),
BinMul("{NegInf} / {-1, 3.5} = {PosInf, NegInf}", List(NegInf), List(-1, 3.5), List(PosInf, NegInf), true),
BinMul("{-1, 3.5} / {NegInf} = {PosInf, NegInf}", List(-1, 3.5), List(NegInf), List(PosInf, NegInf), true),
BinMul("{0} * {-2, 3.4, 1} = {0}", List(0), List(-2, 3.4, 1), List(0), true),
BinMul("{-1, 4} * {0} = {0}", List(-1, 4), List(0), List(0), true),
BinMul("{2} * {3} = {6}", List(2), List(3), List(6), true),
BinMul("{2} * {1, 3} = {2, 6}", List(2), List(1, 3), List(2, 6), true),
BinMul("{2} * {-1} = {-2}", List(2), List(-1), List(-2), true),
BinMul("{2} * {3.2, -4} = {6.4, -6}", List(2), List(3.2, -4), List(6.4, -6), true),
BinMul("{0, 2} * {2} = {0, 4}", List(0, 2), List(2), List(0, 4), true),
BinMul("{0, 2} * {2, 5} = {0, 4, 10}", List(0, 2), List(2, 5), List(0, 4, 10), true),
BinMul("{0, 2} * {-1} = {0, -2}", List(0, 2), List(-1), List(0, -2), true),
BinMul("{0, 2} * {-1, 2.5} = {0, -2, 5}", List(0, 2), List(-1, 2.5), List(0, -2, 5), true),
BinMul("{2.5} * {2} = {5}", List(2.5), List(2), List(5), true),
BinMul("{2.5} * {2, 3} = {5, 7.5}", List(2.5), List(2, 3), List(5, 7.5), true),
BinMul("{-2} * {-4} = {8}", List(-2), List(-4), List(8), true),
BinMul("{-2} * {-4, 2.5} = {8, -5}", List(-2), List(-4, 2.5), List(8, -5), true),
BinMul("{2.5, 1.5} * {2} >= {5, 3}", List(2.5, 1.5), List(2), List(5, 3), false),
BinMul("{2.5 -2} * {1, 2} = {2.5, -2, 5, -4}", List(2.5, -2), List(1, 2), List(2.5, -2, 5, -4), true),
BinMul("{-2, -4} * {-3} >= {6, 12}", List(-2, -4), List(-3), List(6, 12), false),
BinMul("{-2, 2.5} * {-2, -1} = {2, 2.5, -4, 5}", List(-2, 2.5), List(-2, -1), List(2, 2.5, -4, 5), true),
BinDiv("{} / {} = {}", List(), List(), List(), true),
BinDiv("{NaN} / {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinDiv("{3} / {NaN} = {NaN}", List(3), List(NaN), List(NaN), true),
BinDiv("{PosInf} / {NegInf} = {NaN}", List(PosInf), List(NegInf), List(NaN), true),
BinDiv("{NegInf} / {NegInf} = {NaN}", List(NegInf), List(NegInf), List(NaN), true),
BinDiv("{PosInf} / {NegInf, PosInf} = {NaN}", List(PosInf), List(NegInf, PosInf), List(NaN), true),
BinDiv("{PosInf} / {0} = {PosInf}", List(PosInf), List(0), List(PosInf), true),
BinDiv("{NegInf} / {0} = {NegInf}", List(NegInf), List(0), List(NegInf), true),
BinDiv("{PosInf, NegInf} / {0} = {PosInf, NegInf}", List(PosInf, NegInf), List(0), List(PosInf, NegInf), true),
BinDiv("{PosInf} / {2, 3} = {PosInf}", List(PosInf), List(2, 3), List(PosInf), true),
BinDiv("{PosInf} / {-1} = {NegInf}", List(PosInf), List(-1), List(NegInf), true),
BinDiv("{PosInf} / {0.5} = {PosInf}", List(PosInf), List(0.5), List(PosInf), true),
BinDiv("{PosInf} / {-1, 0.5} = {NegInf, PosInf}", List(PosInf), List(-1, 0.5), List(NegInf, PosInf), true),
BinDiv("{NegInf} / {2, 3} = {NegInf}", List(NegInf), List(2, 3), List(NegInf), true),
BinDiv("{NegInf} / {-1} = {PosInf}", List(NegInf), List(-1), List(PosInf), true),
BinDiv("{NegInf} / {0.5} = {NegInf}", List(NegInf), List(0.5), List(NegInf), true),
BinDiv("{NegInf} / {-1, 0.5} = {PosInf, NegInf}", List(NegInf), List(-1, 0.5), List(PosInf, NegInf), true),
BinDiv("{PosInf, NegInf} / {-3} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3), List(PosInf, NegInf), true),
BinDiv("{PosInf, NegInf} / {-3, 0.5} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3, 0.5), List(PosInf, NegInf), true),
BinDiv("{3} / {PosInf} = {0}", List(3), List(PosInf), List(0), true),
BinDiv("{-2, 0.5} / {NegInf} = {0}", List(-2, 0.5), List(NegInf), List(0), true),
BinDiv("{1, 2} / {PosInf, NegInf} = {0}", List(1, 2), List(PosInf, NegInf), List(0), true),
BinDiv("{0} / {0} = {NaN}", List(0), List(0), List(NaN), true),
BinDiv("{0} / {-3} = {0}", List(0), List(-3), List(0), true),
BinDiv("{0} / {PosInf} = {0}", List(0), List(PosInf), List(0), true),
BinDiv("{0} / {-3, 2} = {0}", List(0), List(-3, 2), List(0), true),
BinDiv("{3} / {0} = {PosInf}", List(3), List(0), List(PosInf), true),
BinDiv("{1, 2} / {0} >= {PosInf}", List(1, 2), List(0), List(PosInf), false),
BinDiv("{0, 2} / {0} = {NaN, PosInf}", List(1, 2), List(0), List(NaN, PosInf), true),
BinDiv("{0.5} / {0} = {PosInf}", List(0.5), List(0), List(PosInf), true),
BinDiv("{-1} / {0} = {NegInf}", List(-1), List(0), List(NegInf), true),
BinDiv("{-1, 0.5} / {0} = {PosInf, NegInf}", List(-1, 0.5), List(0), List(PosInf, NegInf), true),
BinDiv("{3} / {2} = {1.5}", List(3), List(2), List(1.5), true),
BinDiv("{4} / {2} = {2}", List(4), List(2), List(2), true),
BinDiv("{3} / {2, 3} = {1.5, 1}", List(3), List(2, 3), List(1.5, 1), true),
BinDiv("{3} / {0, 3} = {PosInf, 1}", List(3), List(0, 3), List(PosInf, 1), true),
BinDiv("{3} / {1.5} = {2}", List(3), List(1.5), List(2), true),
BinDiv("{3} / {-1} = {-3}", List(3), List(-1), List(-3), true),
BinDiv("{3} / {1.5, -1} = {2, -3}", List(3), List(1.5, -1), List(2, -3), true),
BinDiv("{-2} / {2} = {-1}", List(-2), List(2), List(-1), true),
BinDiv("{-2} / {2, 0} = {-1, NegInf}", List(-2), List(2, 0), List(-1, NegInf), true),
BinDiv("{-2} / {-1} = {2}", List(-2), List(-1), List(2), true),
BinDiv("{-4} / {0.8} = {-5}", List(-4), List(0.8), List(-5), true),
BinDiv("{-4} / {-1, 0.8} = {4, -5}", List(-4), List(-1, 0.8), List(4, -5), true),
BinDiv("{-2, -4} / {2} = {-1, -2}", List(-2, -4), List(2), List(-1, -2), true),
BinDiv("{-2, 0.8} / {2, 1} >= {-1, 0.4, 0.8, -2}", List(-2, 0.8), List(2, 1), List(-1, 0.4, 0.8, -2), false),
BinDiv("{-1, 0.4} / {2, 0} = {-0.5, PosInf, NegInf, 0.2}", List(-1, 0.4), List(2, 0), List(-0.5, PosInf, NegInf, 0.2), true),
BinDiv("{-2, 0.4} / {-1} = {2, -0.4}", List(-2, 0.4), List(-1), List(2, -0.4), true),
BinDiv("{-2, 0.8} / {-2, 0.4} = {1, -0.4, 2, -5}", List(-2, 0.8), List(-2, 0.4), List(-2, 0.4, 2, -5), true),
BinMod("{} % {} = {}", List(), List(), List(), true),
BinMod("{NaN} % {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMod("{3} % {NaN} = {NaN}", List(3), List(NaN), List(NaN), true),
BinMod("{PosInf} % {3} = {NaN}", List(PosInf), List(3), List(NaN), true),
BinMod("{NegInf} % {2} = {NaN}", List(NegInf), List(2), List(NaN), true),
BinMod("{PosInf, NegInf} % {2, -3} = {NaN}", List(PosInf, NegInf), List(2, -3), List(NaN), true),
BinMod("{2} % {0} = {NaN}", List(2), List(0), List(NaN), true),
BinMod("{0} % {PosInf} = {0}", List(0), List(PosInf), List(0), true),
BinMod("{0} % {-1} = {0}", List(0), List(-1), List(0), true),
BinMod("{0} % {2, 3} = {0}", List(0), List(2, 3), List(0), true),
BinMod("{3} % {PosInf} = {3}", List(3), List(PosInf), List(3), true),
BinMod("{-1, 0.5} % {NegInf} = {-1, 0.5}", List(-1, 0.5), List(NegInf), List(-1, 0.5), true),
BinMod("{-2} % {PosInf, NegInf} = {-2}", List(-2), List(PosInf, NegInf), List(-2), true),
BinMod("{3} % {2} = {1}", List(3), List(2), List(1), true),
BinMod("{3} % {1, 0} = {1, NaN}", List(3), List(1, 0), List(1, NaN), true),
BinMod("{3} % {-2} = {1}", List(3), List(-2), List(1), true),
BinMod("{3} % {-2, 0.4} = {1, 0.2}", List(3), List(-2, 0.4), List(1, 0.2), true),
BinMod("{1, 0} % {1} >= {0, 0}", List(1, 0), List(1), List(0, 0), false),
BinMod("{-1} % {-2} = {-1}", List(-1), List(-2), List(-1), true),
BinMod("{-2} % {-1} = {0}", List(-2), List(-1), List(0), true),
BinMod("{-1} % {-1, -2} = {0, -1}", List(-1), List(-1, -2), List(0, -1), true),
BinMod("{3.5} % {-0.5} = {0}", List(3.5), List(-0.5), List(0), true),
BinMod("{-2, 2.5} % {3} = {-2, 2.5}", List(-2, 2.5), List(3), List(-2, 2.5), true),
BinMod("{-2, 3.2} % {0.4} >= {0}", List(-2, 3.2), List(0.4), List(0), false),
BinMod("{-2, -4} % {0.8} = {-0.4, 0}", List(-2, -4), List(0.8), List(-0.4, 0), true),
BinEq("{} == {} = {}", List(), List(), List(), true),
BinEq("{UndefTop} == {UndefTop} = {true}", List(UndefTop), List(UndefTop), List(true), true),
BinEq("{NullTop} == {NullTop} = {true}", List(NullTop), List(NullTop), List(true), true),
BinEq("{NaN} == {3, -1} = {false}", List(NaN), List(3, -1), List(false), true),
BinEq("{PosInf} == {NaN} = {false}", List(PosInf), List(NaN), List(false), true),
BinEq("{PosInf} == {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinEq("{NegInf} == {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinEq("{PosInf} == {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinEq("{NegInf} == {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinEq("{PosInf, NegInf} == {PosInf, NegInf} = {true, false}", List(PosInf, NegInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{PosInf, NegInf} == {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinEq("{PosInf, NegInf} == {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinEq("{PosInf} == {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{NegInf} == {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{1} == {1} = {true}", List(1), List(1), List(true), true),
BinEq("{-2} == {-2} = {true}", List(-2), List(-2), List(true), true),
BinEq("{1, 2} == {2} = {false, true}", List(1, 2), List(2), List(false, true), true),
BinEq("{-1, -2} == {-1} = {true, false}", List(-1, -2), List(-1), List(true, false), true),
BinEq("{2} == {1, 2} = {false, true}", List(2), List(1, 2), List(false, true), true),
BinEq("{-1} == {-1, -2} = {true, false}", List(-1), List(-1, -2), List(true, false), true),
BinEq("{-2} == {1, 2} = {false, false}", List(-2), List(1, 2), List(false), true),
BinEq("{1, 2} == {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinEq("{-1, -2} == {-4, -2} = {true, false}", List(-1, -2), List(-4, -2), List(true, false), true),
BinEq("{-1, 3} == {0.5} = {false, false}", List(-1, 3), List(0.5), List(false), false),
BinEq("{\\"1\\"} == {\\"1\\"} = {true}", List("1"), List("1"), List(true), true),
BinEq("{\\"-1\\"} == {\\"-1\\"} = {true}", List("-1"), List("-1"), List(true), true),
BinEq("{\\"1\\"} == {\\"2\\"} = {false}", List("1"), List("2"), List(false), true),
BinEq("{\\"-1\\"} == {\\"-2\\"} = {false}", List("-2"), List("-1"), List(false), true),
BinEq("{\\"1\\"} == {\\"1\\", \\"2\\"} = {true, false}", List("1"), List("1", "2"), List(true, false), true),
BinEq("{\\"-1\\"} == {\\"-2\\", \\"-1\\"} = {true, false}", List("-1"), List("-2", "-1"), List(false, true), true),
BinEq("{\\"1\\", \\"2\\"} == {\\"2\\", \\"3\\"} = {true, false}", List("1", "2"), List("2", "3"), List(true, false), true),
BinEq("{\\"-1\\", \\"-3\\"} == {\\"-3\\", \\"0.4\\"} = {true, false}", List("-1", "-3"), List("-3", "0.4"), List(true, false), true),
BinEq("{true} == {\\"1\\"} = {true}", List(true), List("1"), List(true), true),
BinEq("{false} == {\\"0\\"} = {true}", List(false), List("0"), List(true), true),
BinEq("{\\"true\\", true} == {\\"true\\"} = {true, false}", List("true", true), List("true"), List(true, false), true),
BinEq("{1, true} == {\\"true\\"} = {false}", List(1, true), List("true"), List(false), true),
BinEq("{true} == {ture} = {true}", List(true), List(true), List(true), true),
BinEq("{true} == {false} = {false}", List(true), List(false), List(false), true),
BinEq("{false} == {true} = {false}", List(false), List(true), List(false), true),
BinEq("{false} == {false} = {true}", List(false), List(false), List(true), true),
BinEq("{true, false} == {true} = {true, false}", List(true, false), List(true), List(true, false), true),
BinEq("{true, false} == {false} = {true, false}", List(true, false), List(false), List(true, false), true),
BinEq("{true} == {true, false} = {true, false}", List(true), List(true, false), List(true, false), true),
BinEq("{false} == {true, false} = {true, false}", List(false), List(true, false), List(true, false), true),
BinEq("{UndefTop} == {NullTop} = {true}", List(UndefTop), List(NullTop), List(true), true),
BinEq("{NullTop} == {UndefTop} = {true}", List(NullTop), List(UndefTop), List(true), true),
BinEq("{NaN} == {\\"NaN\\"} = {false}", List(NaN), List("NaN"), List(false), true),
BinEq("{1} == {\\"1\\"} = {true}", List(1), List("1"), List(true), true),
BinEq("{\\"1\\"} == {1} = {true}", List("1"), List(1), List(true), true),
BinEq("{-1} == {\\"-1\\"} = {true}", List(-1), List("-1"), List(true), true),
BinEq("{\\"-1\\"} == {-1} = {true}", List("-1"), List(-1), List(true), true),
BinEq("{-2} == {-1} = {false}", List(-2), List(-1), List(false), true),
BinEq("{1} == {\\"1\\", \\"2\\"} = {true, false}", List(1), List("1", "2"), List(true, false), true),
BinEq("{\\"1\\", \\"2\\"} == {1} = {true, false}", List("1", "2"), List(1), List(true, false), true),
BinEq("{-2} == {\\"1\\"} = {false}", List(-2), List("1"), List(false), true),
BinEq("{\\"1\\"} == {-2} = {false}", List("1"), List(-2), List(false), true),
BinEq("{1, 2} == {\\"1\\", \\"3\\"} = {true, false}", List(1, 2), List("1", "3"), List(true, false), true),
BinEq("{\\"1\\", \\"3\\"} == {1, 2} = {true, false}", List("1", "3"), List(1, 2), List(true, false), true),
BinEq("{-1, -2} == {\\"-1\\", \\"-3\\"} = {true, false}", List(-1, -2), List("-1", "-3"), List(true, false), true),
BinEq("{\\"-1\\", \\"-3\\"} == {-1, -2} = {true, false}", List("-1", "-3"), List(-1, -2), List(true, false), true),
BinEq("{true} == {1} = {true}", List(true), List(1), List(true), true),
BinEq("{false} == {0} = {true}", List(false), List(0), List(true), true),
BinEq("{1} == {true} = {true}", List(1), List(true), List(true), true),
BinEq("{0} == {false} = {true}", List(0), List(false), List(true), true),
BinEq("{false} == {1} = {false}", List(false), List(1), List(false), true),
BinEq("{1} == {false} = {false}", List(1), List(false), List(false), true),
BinEq("{3} == {true} = {false}", List(3), List(true), List(false), true),
BinEq("{true} == {3} = {false}", List(true), List(3), List(false), true),
BinNeq("{} != {} = {}", List(), List(), List(), true),
BinNeq("{UndefTop} != {UndefTop} = {false}", List(UndefTop), List(UndefTop), List(false), true),
BinNeq("{NullTop} != {NullTop} = {false}", List(NullTop), List(NullTop), List(false), true),
BinNeq("{NaN} != {3, -1} = {true}", List(NaN), List(3, -1), List(true), true),
BinNeq("{PosInf} != {NaN} = {true}", List(PosInf), List(NaN), List(true), true),
BinNeq("{PosInf} != {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinNeq("{NegInf} != {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinNeq("{PosInf} != {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinNeq("{NegInf} != {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinNeq("{PosInf, NegInf} != {PosInf, NegInf} = {true, false}", List(PosInf, NegInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{PosInf, NegInf} != {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinNeq("{PosInf, NegInf} != {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinNeq("{PosInf} != {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{NegInf} != {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{1} != {1} = {false}", List(1), List(1), List(false), true),
BinNeq("{-2} != {-2} = {false}", List(-2), List(-2), List(false), true),
BinNeq("{1, 2} != {2} = {true, false}", List(1, 2), List(2), List(true, false), true),
BinNeq("{-1, -2} != {-1} = {false, true}", List(-1, -2), List(-1), List(false, true), true),
BinNeq("{2} != {1, 2} = {true, false}", List(2), List(1, 2), List(true, false), true),
BinNeq("{-1} != {-1, -2} = {false, true}", List(-1), List(-1, -2), List(false, true), true),
BinNeq("{-2} != {1, 2} = {true, true}", List(-2), List(1, 2), List(true), true),
BinNeq("{1, 2} != {2, 3} = {false, true}", List(1, 2), List(2, 3), List(false, true), true),
BinNeq("{-1, -2} != {-4, -2} = {false, true}", List(-1, -2), List(-4, -2), List(false, true), true),
BinNeq("{-1, 3} != {0.5} >= {true, true}", List(-1, 3), List(0.5), List(true), false),
BinNeq("{\\"1\\"} != {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinNeq("{\\"-1\\"} != {\\"-1\\"} = {false}", List("-1"), List("-1"), List(false), true),
BinNeq("{\\"1\\"} != {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinNeq("{\\"-1\\"} != {\\"-2\\"} = {true}", List("-2"), List("-1"), List(true), true),
BinNeq("{\\"1\\"} != {\\"1\\", \\"2\\"} = {false, true}", List("1"), List("1", "2"), List(false, true), true),
BinNeq("{\\"-1\\"} != {\\"-2\\", \\"-1\\"} = {false, true}", List("-1"), List("-2", "-1"), List(true, false), true),
BinNeq("{\\"1\\", \\"2\\"} != {\\"2\\", \\"3\\"} = {false, true}", List("1", "2"), List("2", "3"), List(false, true), true),
BinNeq("{\\"-1\\", \\"-3\\"} != {\\"-3\\", \\"0.4\\"} = {false, true}", List("-1", "-3"), List("-3", "0.4"), List(false, true), true),
BinNeq("{false} != {false} = {false}", List(false), List(false), List(false), true),
BinNeq("{false} != {true} = {true}", List(false), List(true), List(true), true),
BinNeq("{true} != {false} = {true}", List(true), List(false), List(true), true),
BinNeq("{true} != {true} = {false}", List(true), List(true), List(false), true),
BinNeq("{false, true} != {false} = {false, true}", List(false, true), List(false), List(false, true), true),
BinNeq("{false, true} != {true} = {false, true}", List(false, true), List(true), List(false, true), true),
BinNeq("{false} != {false, true} = {false, true}", List(false), List(false, true), List(false, true), true),
BinNeq("{true} != {false, true} = {false, true}", List(true), List(false, true), List(false, true), true),
BinNeq("{UndefTop} != {NullTop} = {false}", List(UndefTop), List(NullTop), List(false), true),
BinNeq("{NullTop} != {UndefTop} = {false}", List(NullTop), List(UndefTop), List(false), true),
BinNeq("{NaN} != {\\"NaN\\"} = {true}", List(NaN), List("NaN"), List(true), true),
BinNeq("{1} != {\\"1\\"} = {false}", List(1), List("1"), List(false), true),
BinNeq("{\\"1\\"} != {1} = {false}", List("1"), List(1), List(false), true),
BinNeq("{-1} != {\\"-1\\"} = {false}", List(-1), List("-1"), List(false), true),
BinNeq("{\\"-1\\"} != {-1} = {false}", List("-1"), List(-1), List(false), true),
BinNeq("{-2} != {-1} = {true}", List(-2), List(-1), List(true), true),
BinNeq("{1} != {\\"1\\", \\"2\\"} = {false, true}", List(1), List("1", "2"), List(false, true), true),
BinNeq("{\\"1\\", \\"2\\"} != {1} = {false, true}", List("1", "2"), List(1), List(false, true), true),
BinNeq("{-2} != {\\"1\\"} = {true}", List(-2), List("1"), List(true), true),
BinNeq("{\\"1\\"} != {-2} = {true}", List("1"), List(-2), List(true), true),
BinNeq("{1, 2} != {\\"1\\", \\"3\\"} = {false, true}", List(1, 2), List("1", "3"), List(false, true), true),
BinNeq("{\\"1\\", \\"3\\"} != {1, 2} = {false, true}", List("1", "3"), List(1, 2), List(true, false), true),
BinNeq("{-1, -2} != {\\"-1\\", \\"-3\\"} = {false, true}", List(-1, -2), List("-1", "-3"), List(false, true), true),
BinNeq("{\\"-1\\", \\"-3\\"} != {-1, -2} = {false, true}", List("-1", "-3"), List(-1, -2), List(false, true), true),
BinNeq("{true} != {1} = {false}", List(true), List(1), List(false), true),
BinNeq("{false} != {0} = {false}", List(false), List(0), List(false), true),
BinNeq("{1} != {true} = {false}", List(1), List(true), List(false), true),
BinNeq("{0} != {false} = {false}", List(0), List(false), List(false), true),
BinNeq("{false} != {1} = {true}", List(false), List(1), List(true), true),
BinNeq("{1} != {false} = {true}", List(1), List(false), List(true), true),
BinNeq("{3} != {true} = {true}", List(3), List(true), List(true), true),
BinNeq("{true} != {3} = {true}", List(true), List(3), List(true), true),
BinSEq("{} === {} = {}", List(), List(), List(), true),
BinSEq("{UndefTop} === {UndefTop} = {true}", List(UndefTop), List(UndefTop), List(true), true),
BinSEq("{NullTop} === {NullTop} = {true}", List(NullTop), List(NullTop), List(true), true),
BinSEq("{} === {1, -1} = {}", List(), List(1, -1), List(), true),
BinSEq("{1, -1} === {} = {}", List(1, -1), List(), List(), true),
BinSEq("{NaN} === {1} = {false}", List(NaN), List(1), List(false), true),
BinSEq("{2} === {NaN} = {false}", List(2), List(NaN), List(false), true),
BinSEq("{PosInf} === {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinSEq("{PosInf, NegInf} === {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinSEq("{1} === {1} = {true}", List(1), List(1), List(true), true),
BinSEq("{-2} === {-2} = {true}", List(-2), List(-2), List(true), true),
BinSEq("{1, 2} === {2} = {false, true}", List(1, 2), List(2), List(false, true), true),
BinSEq("{-1, -2} === {-1} = {true, false}", List(-1, -2), List(-1), List(true, false), true),
BinSEq("{2} === {1, 2} = {false, true}", List(2), List(1, 2), List(false, true), true),
BinSEq("{-1} == {-1, -2} = {true, false}", List(-1), List(-1, -2), List(true, false), true),
BinSEq("{-2} === {1, 2} = {false, false}", List(-2), List(1, 2), List(false), true),
BinSEq("{1, 2} === {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinSEq("{-1, -2} === {-4, -2} = {true, false}", List(-1, -2), List(-4, -2), List(true, false), true),
BinSEq("{-1, 3} === {0.5} >= {false, false}", List(-1, 3), List(0.5), List(false), false),
BinSEq("{1} === {\\"1\\"} = {false}", List(1), List("1"), List(false), true),
BinSEq("{\\"1\\"} === {1} = {false}", List("1"), List(1), List(false), true),
BinSEq("{\\"1\\"} === {\\"1\\"} = {true}", List("1"), List("1"), List(true), true),
BinSEq("{\\"-1\\"} === {\\"-1\\"} = {true}", List("-1"), List("-1"), List(true), true),
BinSEq("{\\"1\\"} === {\\"2\\"} = {false}", List("1"), List("2"), List(false), true),
BinSEq("{\\"-1\\"} === {\\"-2\\"} = {false}", List("-2"), List("-1"), List(false), true),
BinSEq("{\\"1\\"} === {\\"1\\", \\"2\\"} = {true, false}", List("1"), List("1", "2"), List(true, false), true),
BinSEq("{\\"-1\\"} === {\\"-2\\", \\"-1\\"} = {true, false}", List("-1"), List("-2", "-1"), List(false, true), true),
BinSEq("{\\"1\\", \\"2\\"} === {\\"2\\", \\"3\\"} = {true, false}", List("1", "2"), List("2", "3"), List(true, false), true),
BinSEq("{\\"-1\\", \\"-3\\"} === {\\"-3\\", \\"0.4\\"} = {true, false}", List("-1", "-3"), List("-3", "0.4"), List(true, false), true),
BinSEq("{\\"s\\"} === {\\"s\\"} = {true}", List("s"), List("s"), List(true), true),
BinSEq("{\\"Ta\\"} === {\\"ta\\"} = {false}", List("Ta"), List("ta"), List(false), true),
BinSEq("{true} === {\\"1\\"} = {false}", List(true), List("1"), List(false), true),
BinSEq("{false} === {\\"0\\"} = {false}", List(false), List("0"), List(false), true),
BinSEq("{true} === {1} = {false}", List(true), List(1), List(false), true),
BinSEq("{false} === {0} = {false}", List(false), List(0), List(false), true),
BinSEq("{true} === {true} = {true}", List(true), List(true), List(true), true),
BinSNeq("{} !== {} = {}", List(), List(), List(), true),
BinSNeq("{UndefTop} !== {UndefTop} = {false}", List(UndefTop), List(UndefTop), List(false), true),
BinSNeq("{NullTop} !== {NullTop} = {false}", List(NullTop), List(NullTop), List(false), true),
BinSNeq("{} !== {1, -1} = {}", List(), List(1, -1), List(), true),
BinSNeq("{1, -1} !== {} = {}", List(1, -1), List(), List(), true),
BinSNeq("{NaN} !== {1} = {true}", List(NaN), List(1), List(true), true),
BinSNeq("{2} !== {NaN} = {true}", List(2), List(NaN), List(true), true),
BinSNeq("{PosInf} !== {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinSNeq("{PosInf, NegInf} !== {PosInf} = {false, true}", List(PosInf, NegInf), List(PosInf), List(false, true), true),
BinSNeq("{1} !== {1} = {false}", List(1), List(1), List(false), true),
BinSNeq("{-2} !== {-2} = {false}", List(-2), List(-2), List(false), true),
BinSNeq("{1, 2} !== {2} = {true, false}", List(1, 2), List(2), List(true, false), true),
BinSNeq("{-1, -2} !== {-1} = {false, true}", List(-1, -2), List(-1), List(false, true), true),
BinSNeq("{2} !== {1, 2} = {true, false}", List(2), List(1, 2), List(true, false), true),
BinSNeq("{-1} !== {-1, -2} = {false, true}", List(-1), List(-1, -2), List(false, true), true),
BinSNeq("{-2} !== {1, 2} = {true, true}", List(-2), List(1, 2), List(true), true),
BinSNeq("{1, 2} !== {2, 3} = {false, true}", List(1, 2), List(2, 3), List(false, true), true),
BinSNeq("{-1, -2} !== {-4, -2} = {false, true}", List(-1, -2), List(-4, -2), List(false, true), true),
BinSNeq("{-1, 3} !== {0.5} >= {true, true}", List(-1, 3), List(0.5), List(true), false),
BinSNeq("{1} !== {\\"1\\"} = {true}", List(1), List("1"), List(true), true),
BinSNeq("{\\"1\\"} !== {1} = {true}", List("1"), List(1), List(true), true),
BinSNeq("{\\"1\\"} !== {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinSNeq("{\\"-1\\"} !== {\\"-1\\"} = {false}", List("-1"), List("-1"), List(false), true),
BinSNeq("{\\"1\\"} !== {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinSNeq("{\\"-1\\"} !== {\\"-2\\"} = {true}", List("-2"), List("-1"), List(true), true),
BinSNeq("{\\"1\\"} !== {\\"1\\", \\"2\\"} = {false, true}", List("1"), List("1", "2"), List(false, true), true),
BinSNeq("{\\"-1\\"} !== {\\"-2\\", \\"-1\\"} = {false, true}", List("-1"), List("-2", "-1"), List(true, false), true),
BinSNeq("{\\"1\\", \\"2\\"} !== {\\"2\\", \\"3\\"} = {false, true}", List("1", "2"), List("2", "3"), List(false, true), true),
BinSNeq("{\\"-1\\", \\"-3\\"} !== {\\"-3\\", \\"0.4\\"} = {false, true}", List("-1", "-3"), List("-3", "0.4"), List(false, true), true),
BinSNeq("{\\"s\\"} !== {\\"s\\"} = {false}", List("s"), List("s"), List(false), true),
BinSNeq("{\\"Ta\\"} !== {\\"ta\\"} = {true}", List("Ta"), List("ta"), List(true), true),
BinSNeq("{true} !== {\\"1\\"} = {true}", List(true), List("1"), List(true), true),
BinSNeq("{false} !== {\\"0\\"} = {true}", List(false), List("0"), List(true), true),
BinSNeq("{true} !== {1} = {true}", List(true), List(1), List(true), true),
BinSNeq("{false} !== {0} = {true}", List(false), List(0), List(true), true),
BinSNeq("{true} !== {true} = {false}", List(true), List(true), List(false), true),
BinLess("{2, \\"-3\\"} < {NaN, \\"5\\"} = {true, false}", List(2, "-3"), List(NaN, "5"), List(true, false), true),
BinLess("{} < {} = {}", List(), List(), List(), true),
BinLess("{false} < {true} = {true}", List(false), List(true), List(true), true),
BinLess("{true} < {false} = {false}", List(true), List(false), List(false), true),
BinLess("{false} < {true, false} = {true, false}", List(false), List(true, false), List(true, false), true),
BinLess("{true} < {true, false} >= {false}", List(true), List(true, false), List(false), false),
BinLess("{true, false} < {true} = {false, true}", List(true, false), List(true), List(true, false), true),
BinLess("{true, false} < {false} = {false}", List(true, false), List(false), List(false), true),
BinLess("{null} < {true} = {true}", List(NullTop), List(true), List(true), true),
BinLess("{false} < {null} = {false}", List(false), List(NullTop), List(false), true),
BinLess("{NaN} < {3} = {false}", List(NaN), List(3), List(false), true),
BinLess("{2} < {NaN} = {false}", List(2), List(NaN), List(false), true),
BinLess("{PosInf} < {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinLess("{NegInf} < {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinLess("{PosInf, NegInf} < {1} = {true, false}", List(PosInf, NegInf), List(1), List(true, false), true),
BinLess("{3} < {PosInf, NegInf} = {true, false}", List(3), List(PosInf, NegInf), List(true, false), true),
BinLess("{PosInf} < {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinLess("{NegInf} < {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinLess("{1} < {2} = {true}", List(1), List(2), List(true), true),
BinLess("{2} < {1} = {false}", List(2), List(1), List(false), true),
BinLess("{1} < {1.5} = {true}", List(1), List(1.5), List(true), true),
BinLess("{1} < {-1} = {false}", List(1), List(-1), List(false), true),
BinLess("{-3} < {1} = {true}", List(-3), List(1), List(true), true),
BinLess("{3.4} < {1} = {false}", List(3.4), List(1), List(false), true),
BinLess("{-2} < {-1} = {true}", List(-2), List(-1), List(true), true),
BinLess("{-1} < {-2} = {false}", List(-1), List(-2), List(false), true),
BinLess("{1, 2} < {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinLess("{-2, -3} < {-5, -2} = {true, false}", List(-2, -3), List(-5, -2), List(true, false), true),
BinLess("{-2, \\"-5\\"} < {\\"-3\\", -2} = {true, false}", List(-2, "-5"), List("-3", -2), List(true, false), true),
BinLess("{\\"1\\"} < {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinLess("{\\"1\\"} < {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinLess("{\\"1\\"} < {\\"1d\\"} = {true}", List("1"), List("1d"), List(true), true),
BinLess("{\\"1\\"} < {\\"-1\\"} = {false}", List("1"), List("-1"), List(false), true),
BinLess("{\\"\\"} < {\\"1\\"} = {true}", List(""), List("1"), List(true), true),
BinLess("{\\"s\\"} < {\\"1\\"} = {false}", List("s"), List("1"), List(false), true),
BinLess("{\\"s\\"} < {\\"t\\"} = {true}", List("s"), List("t"), List(true), true),
BinLess("{\\"s\\"} < {\\"s1\\"} = {true}", List("s"), List("s1"), List(true), true),
BinLess("{\\"s\\"} < {\\"d\\"} = {false}", List("s"), List("d"), List(false), true),
BinLess("{\\"1\\", \\"2\\"} < {\\"12\\", \\"23\\"} = {true, false}", List("1", "2"), List("12", "23"), List(true, false), true),
BinLess("{\\"s\\", \\"d\\"} < {\\"sd\\"} = {ture, false}", List("s", "d"), List("sd"), List(true, false), true),
BinLess("{\\"-5\\"} < {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLess("{3} < {\\"5\\", NaN} = {true, false}", List(3), List("5", NaN), List(true, false), true),
BinLess("{-2, \\"-5\\"} < {\\"-3\\", NaN} = {false}", List(-2, "-5"), List("-3", NaN), List(false), false),
BinGreater("{} > {} = {}", List(), List(), List(), true),
BinGreater("{2} > {1} = {true}", List(2), List(1), List(true), true),
BinGreater("{2} > {4} = {false}", List(2), List(4), List(false), true),
BinGreater("{2} > {1, 4} = {true, false}", List(2), List(1, 4), List(true, false), true),
BinGreater("{2} > {-1} = {true}", List(2), List(-1), List(true), true),
BinGreater("{2} > {3.5} = {false}", List(2), List(3.5), List(false), true),
BinGreater("{2} > {-1, 3.5} = {true, false}", List(2), List(-1, 3.5), List(true, false), true),
BinGreater("{-3} > {1} = {false}", List(-3), List(1), List(false), true),
BinGreater("{4.3} > {1} = {true}", List(4.3), List(1), List(true), true),
BinGreater("{-3} > {0, 1} = {false}", List(-3), List(0, 1), List(false), true),
BinGreater("{4.5} > {0, 5} = {ture, false}", List(4.5), List(0, 5), List(true, false), true),
BinGreater("{-2} > {4.5} = {false}", List(-2), List(4.5), List(false), true),
BinGreater("{3.2} > {-1} = {true}", List(3.2), List(-1), List(true), true),
BinGreater("{-3} > {-2, -5} = {true, false}", List(-3), List(-2, -5), List(true, false), true),
BinGreater("{1, 3} > {2} = {true, false}", List(1, 3), List(2), List(true, false), true),
BinGreater("{1, 2} > {3.5} >= {false}", List(1, 2), List(3.5), List(false), false),
BinGreater("{1, 2} > {1.5} = {true, false}", List(1, 2), List(1.5), List(true, false), true),
BinGreater("{1, 2} > {-1, 3.5} = {true, false}", List(1, 2), List(-1, 3.5), List(true, false), true),
BinGreater("{-2, -5} > {2} >= {false}", List(-2, -5), List(2), List(false), false),
BinGreater("{-1, 3.5} > {2} = {true, false}", List(-1, 3.5), List(2), List(true, false), true),
BinGreater("{-2, -3} > {-4} >= {true}", List(-2, -3), List(-4), List(true), false),
BinGreater("{-2, -3} > {0, 6} >= {false}", List(-2, -3), List(0, 6), List(false), false),
BinGreater("{-2, 5.5} > {0, 6} = {true, false}", List(-2, 5.5), List(0, 6), List(true, false), true),
BinGreater("{-3, 2.5} > {1.5, -2} = {true, false}", List(-3, 2.5), List(1.5, -2), List(true, false), true),
BinGreater("{-2, \\"-3\\"} > {\\"-5\\", -2} = {true, false}", List(-2, "-3"), List("-5", -2), List(true, false), true),
BinGreater("{false} > {true} = {false}", List(false), List(true), List(false), true),
BinGreater("{true} > {false} = {true}", List(true), List(false), List(true), true),
BinGreater("{false} > {true, false} = {false}", List(false), List(true, false), List(false), true),
BinGreater("{true, false} > {true} >= {false}", List(true, false), List(true), List(false), false),
BinGreater("{true, false} > {false} = {true, false}", List(true, false), List(false), List(true, false), true),
BinGreater("{null} > {true} = {false}", List(NullTop), List(true), List(false), true),
BinGreater("{false} > {null} = {false}", List(false), List(NullTop), List(false), true),
BinGreater("{NaN} > {3} = {false}", List(NaN), List(3), List(false), true),
BinGreater("{2} > {NaN} = {false}", List(2), List(NaN), List(false), true),
BinGreater("{PosInf} > {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinGreater("{NegInf} > {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinGreater("{PosInf, NegInf} > {1} = {true, false}", List(PosInf, NegInf), List(1), List(true, false), true),
BinGreater("{3} > {PosInf, NegInf} = {true, false}", List(3), List(PosInf, NegInf), List(true, false), true),
BinGreater("{PosInf} > {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinGreater("{NegInf} > {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinGreater("{-2, \\"-3\\"} > {-4, \\"-1\\"} >= {true, false}", List(-2, "-3"), List(-4, "-1"), List(true, false), true),
BinGreater("{} > {} = {}", List(), List(), List(), true),
BinGreater("{\\"-5\\"} > {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinGreater("{-2} > {\\"-3\\", NaN} = {true, false}", List(-2), List("-3", NaN), List(true, false), true),
BinGreater("{3} > {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false),
BinLessEq("{} <= {} = {}", List(), List(), List(), true),
BinLessEq("{} <= {1} = {}", List(), List(1), List(), true),
BinLessEq("{1} <= {} = {}", List(1), List(), List(), true),
BinLessEq("{true} <= {true} = {true}", List(true), List(true), List(true), true),
BinLessEq("{false} <= {true, false} = {true}", List(false), List(true, false), List(true), true),
BinLessEq("{null} <= {0, 1} = {true}", List(NullTop), List(0, 1), List(true), true),
BinLessEq("{null} <= {-1, -4} = {true, false}", List(NullTop), List(-1, -4), List(true, false), true),
BinLessEq("{UndefTop} <= {0, 1} = {false}", List(UndefTop), List(0, 1), List(false), true),
BinLessEq("{PosInf} <= {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinLessEq("{NegInf} <= {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinLessEq("{PosInf} <= {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinLessEq("{NegInf} <= {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinLessEq("{NegInf} <= {PosInf, NegInf} = {true}", List(NegInf), List(PosInf, NegInf), List(true), true),
BinLessEq("{PosInf} <= {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinLessEq("{PosInf, NegInf} <= {PosInf} = {true}", List(PosInf, NegInf), List(PosInf), List(true), true),
BinLessEq("{PosInf, NegInf} <= {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinLessEq("{1} <= {1} = {true}", List(1), List(1), List(true), true),
BinLessEq("{1} <= {0, 1} = {true, false}", List(1), List(0, 1), List(true, false), true),
BinLessEq("{1} <= {-1} = {false}", List(1), List(-1), List(false), true),
BinLessEq("{1} <= {2.3} = {true}", List(1), List(2.3), List(true), true),
BinLessEq("{1} <= {-1, 2.3} = {true, false}", List(1), List(-1, 2.3), List(true, false), true),
BinLessEq("{-1} <= {-1} = {true}", List(-1), List(-1), List(true), true),
BinLessEq("{-1} <= {1} = {true}", List(-1), List(1), List(true), true),
BinLessEq("{-1} <= {0, 1} = {true}", List(-1), List(0, 1), List(true), true),
BinLessEq("{-1} <= {-3} = {false}", List(-1), List(-3), List(false), true),
BinLessEq("{-1} <= {2.5, -3} = {true, false}", List(-1), List(2.5, -3), List(true, false), true),
BinLessEq("{2.5} <= {3} = {true}", List(2.5), List(3), List(true), true),
BinLessEq("{2.5} <= {3.5} = {true}", List(2.5), List(3.5), List(true), true),
BinLessEq("{2.5} <= {2, 3} = {true, false}", List(2.5), List(2, 3), List(true, false), true),
BinLessEq("{2.5} <= {-1, 3.5} = {true, false}", List(2.5), List(-1, 3.5), List(true, false), true),
BinLessEq("{0, 1} <= {0} = {true, false}", List(0, 1), List(0), List(true, false), true),
BinLessEq("{0, 1} <= {-1} = {false}", List(0, 1), List(-1), List(false), true),
BinLessEq("{0, 1} <= {0, 3} = {true, false}", List(0, 1), List(0, 3), List(true, false), true),
BinLessEq("{0, 1} <= {-2, 3.5} = {true, false}", List(0, 1), List(-2, 3.5), List(true, false), true),
BinLessEq("{-1, 2.5} <= {1} = {true, false}", List(-1, 2.5), List(1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {-1} = {true, false}", List(-1, 2.5), List(-1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {0, 1} = {true, false}", List(-1, 2.5), List(0, 1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {-3, 2.5} = {true, false}", List(-1, 2.5), List(-3, 2.5), List(true, false), true),
BinLessEq("{-1, \\"2\\"} <= {\\"3\\", 2.5} = {true, false}", List(-1, "2"), List("3", 2.5), List(true, false), true),
BinLessEq("{\\"s\\"} <= {\\"s\\"} = {true}", List("s"), List("s"), List(true), true),
BinLessEq("{\\"s\\"} <= {\\"str\\"} = {true}", List("s"), List("str"), List(true), true),
BinLessEq("{\\"a\\"} <= {\\"b\\"} = {true}", List("a"), List("b"), List(true), true),
BinLessEq("{\\"1\\"} <= {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinLessEq("{\\"0\\"} <= {\\"-3\\"} = {false}", List("0"), List("-3"), List(false), true),
BinLessEq("{\\"0\\", \\"1\\"} <= {\\"0\\", \\"3\\"} = {true, false}", List("0", "1"), List("0", "3"), List(true, false), true),
BinLessEq("{\\"0\\", \\"1\\"} <= {\\"-1\\"} = {false}", List("0", "1"), List("-1"), List(false), false),
BinLessEq("{\\"-1\\", \\"2.5\\"} <= {\\"-1\\"} = {true, false}", List("-1", "2.5"), List("-1"), List(true, false), true),
BinLessEq("{\\"-5\\"} <= {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLessEq("{-2} <= {\\"-3\\", NaN} = {false}", List(-2), List("-3", NaN), List(false), false),
BinLessEq("{3} <= {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false),
BinGreaterEq("{} >= {} = {}", List(), List(), List(), true),
BinGreaterEq("{NaN} >= {2} = {false}", List(NaN), List(2), List(false), true),
BinGreaterEq("{PosInf} >= {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf} >= {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinGreaterEq("{NegInf} >= {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinGreaterEq("{NegInf} >= {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf, NegInf} >= {NegInf} = {true}", List(PosInf, NegInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf} >= {PosInf, NegInf} = {true}", List(PosInf), List(PosInf, NegInf), List(true), true),
BinGreaterEq("{NegInf} >= {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinGreaterEq("{1} >= {1} = {true}", List(1), List(1), List(true), true),
BinGreaterEq("{3} >= {5} = {false}", List(3), List(5), List(false), true),
BinGreaterEq("{1} >= {-1} = {true}", List(1), List(-1), List(true), true),
BinGreaterEq("{1} >= {1, 3} = {true, false}", List(1), List(1, 3), List(true, false), true),
BinGreaterEq("{1} >= {-1, 3.5} = {true, false}", List(1), List(-1, 3.5), List(true, false), true),
BinGreaterEq("{-1} >= {0} = {false}", List(-1), List(0), List(false), true),
BinGreaterEq("{3.5} >= {0} = {true}", List(3.5), List(0), List(true), true),
BinGreaterEq("{-1} >= {-2} = {true}", List(-1), List(-2), List(true), true),
BinGreaterEq("{-5} >= {-2} = {false}", List(-5), List(-2), List(false), true),
BinGreaterEq("{-1} >= {1, 2} = {false}", List(-1), List(1, 2), List(false), true),
BinGreaterEq("{3.5} >= {3, 4} = {true, false}", List(3.5), List(3, 4), List(true, false), true),
BinGreaterEq("{-1} >= {-2, 2.5} = {ture, false}", List(-1), List(-2, 2.5), List(true, false), true),
BinGreaterEq("{0, 1} >= {0} = {true}", List(0, 1), List(0), List(true), true),
BinGreaterEq("{0, 1} >= {-2} = {true}", List(0, 1), List(-2), List(true), true),
BinGreaterEq("{0, 1} >= {0.5} = {true, false}", List(0, 1), List(0.5), List(true, false), true),
BinGreaterEq("{0, 1} >= {0, 3} = {true, false}", List(0, 1), List(0, 3), List(true, false), true),
BinGreaterEq("{0, 1} >= {-2, 0.5} = {true, false}", List(0, 1), List(-2, 0.5), List(true, false), true),
BinGreaterEq("{\\"0\\", 1} >= {\\"-2\\", 0.5} = {true, false}", List("0", 1), List("-2", 0.5), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {1} = {true, false}", List(-2, 3.5), List(1), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {0, 1} = {true, false}", List(-2, 3.5), List(0, 1), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {-2} >= {true}", List(-2, 3.5), List(-2), List(true), false),
BinGreaterEq("{-2, 3.5} >= {-3, 4.2} = {true, false}", List(-2, 3.5), List(-3, 4.2), List(true, false), true),
BinLessEq("{\\"-5\\"} >= {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLessEq("{-2} >= {\\"-3\\", NaN} = {true, false}", List(-2), List("-3", NaN), List(true, false), true),
BinLessEq("{3} >= {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false)
)
val unaCases:List[TypeOperator] = List (
UnaVoid("void {1} = {\\"undefined\\"}", List(1), List(UndefTop), true),
UnaVoid("void {null} = {\\"undefined\\"}", List(NullTop), List(UndefTop), true),
UnaVoid("void {null, PosInf} = {\\"undefined\\"}", List(NullTop, PosInf), List(UndefTop), true),
UnaPlus("+{null} = {0}", List(NullTop), List(0), true),
UnaPlus("+{true, 1} = {1}", List(true, 1), List(1), true),
UnaMinus("-{NaN} = {NaN}", List(NaN), List(NaN), true),
UnaMinus("-{0} = {0}", List(0), List(0), true),
UnaMinus("-{1} = {-1}", List(1), List(-1), true),
UnaMinus("-{-3.2} = {3.2}", List(-3.2), List(3.2), true),
UnaMinus("-{-3} = {3}", List(-3), List(3), true),
UnaMinus("-{1, 3} = {-3, -1}", List(1,3), List(-1,-3), true),
UnaMinus("-{-1, 2.1} = {1, -2.1}", List(-1, 2.1), List(1, -2.1), true),
UnaMinus("-{PosInf} = {NegInf}", List(PosInf), List(NegInf), true),
UnaMinus("-{NegInf} = {PosInf}", List(NegInf), List(PosInf), true),
UnaMinus("-{\\"str\\", null} = {NaN, 0}", List("str", NullTop), List(NaN, 0), true),
UnaBitNeg("~{32} = {-33}", List(32), List(-33), true),
UnaBitNeg("~{3.1} = {-4}", List(3.1), List(-4), true),
UnaBitNeg("~{3, 10} = {-4, -11}", List(3, 10), List(-4, -11), true),
UnaBitNeg("~{-3, 0.5} = {2, -1}", List(-3, 0.5), List(2, -1), true),
UnaBitNeg("~{1, -1} = {-2, 0}", List(1, -1), List(-2, 0), true),
UnaNeg("!{true} = {false}", List(true), List(false), true),
UnaNeg("!{false} = {true}", List(false), List(true), true),
UnaNeg("!{true, false = {false, true}}", List(true, false), List(false, true), true)
)
def suite(): Test = {
// Initialize AddressManager
AddressManager.reset()
val suite = new TestSuite("Typing Operator Test")
val suiteJoin = new TestSuite("Join")
val suiteBin = new TestSuite("Binary Operators")
val suiteUna = new TestSuite("Unary Operators")
for(joinCase <-joinCases) {
suiteJoin.addTest(new JoinTest(joinCase._1, joinCase._2, joinCase._3, joinCase._4, joinCase._5, "testJoin"))
}
for(binCase <-binCases) {
binCase match {
case BinBitOr(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitOr"))
case BinBitAnd(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitAnd"))
case BinBitXor(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitXor"))
case BinLShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLShift"))
case BinRShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testRShift"))
case BinURShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testURShift"))
case BinPlus(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testPlus"))
case BinMinus(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMinus"))
case BinMul(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMul"))
case BinDiv(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testDiv"))
case BinMod(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMod"))
case BinEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testEq"))
case BinNeq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testNeq"))
case BinSEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testSEq"))
case BinSNeq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testSNeq"))
case BinLess(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLess"))
case BinGreater(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testGreater"))
case BinLessEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLessEq"))
case BinGreaterEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testGreaterEq"))
}
}
for(unaCase <-unaCases) {
unaCase match {
case UnaVoid(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testVoid"))
case UnaPlus(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testPlus"))
case UnaMinus(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testMinus"))
case UnaBitNeg(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testBitNeg"))
case UnaNeg(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testNeg"))
}
}
suite.addTest(suiteJoin)
suite.addTest(suiteBin)
suite.addTest(suiteUna)
suite
}
}
class OperatorTestCase(func:String) extends TestCase(func) {
// alpha function : concs -> abs
def toValue(in:List[Any]):Value = {
var v:Value = ValueBot
for(i <-in) {
v = i match {
case u:AbsUndef if u.isTop => v + Value(AbsUndef.alpha)
case n:AbsNumber => n.getAbsCase match {
case AbsSingle if !(n.getSingle.isDefined && AbsNumber.isNum(n)) => v + Value(n)
case _ => v
}
case n:Int => v + Value(AbsNumber.alpha(n))
case d:Number => v + Value(AbsNumber.alpha(d.doubleValue))
case s:String => v + Value(AbsString.alpha(s))
case b:Boolean => v + Value(AbsBool.alpha(b))
case n:AbsNull if n.isTop => v + Value(AbsNull.alpha)
}
}
v
}
}
class JoinTest(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var _left:Value = ValueBot
var _right:Value = ValueBot
var _expec:Value = ValueBot
def joinTest() {}
override def getName = name
override def setUp() = {
_left = super.toValue(lhs)
_right = super.toValue(rhs)
_expec = super.toValue(expec)
}
def testJoin = {
assertTrue(_expec <= (_left + _right))
if (equal) assertTrue((_left + _right) <= _expec)
}
}
class BinTest(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var leftVal:Value = ValueBot
var rightVal:Value = ValueBot
var expecVal:Value = ValueBot
def binTest() {}
override def getName = name
override def setUp = {
leftVal = super.toValue(lhs)
rightVal = super.toValue(rhs)
expecVal = super.toValue(expec)
}
def testBitOr = {
assertTrue(expecVal <= Operator.bopBitOr(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitOr(leftVal, rightVal) <= expecVal)
}
def testBitAnd = {
assertTrue(expecVal <= Operator.bopBitAnd(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitAnd(leftVal, rightVal) <= expecVal)
}
def testBitXor = {
assertTrue(expecVal <= Operator.bopBitXor(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitXor(leftVal, rightVal) <= expecVal)
}
def testLShift = {
assertTrue(expecVal <= Operator.bopLShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLShift(leftVal, rightVal) <= expecVal)
}
def testRShift = {
assertTrue(expecVal <= Operator.bopRShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopRShift(leftVal, rightVal) <= expecVal)
}
def testURShift = {
assertTrue(expecVal <= Operator.bopURShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopURShift(leftVal, rightVal) <= expecVal)
}
def testPlus = {
assertTrue(expecVal <= Operator.bopPlus(leftVal, rightVal))
if (equal) assertTrue(Operator.bopPlus(leftVal, rightVal) <= expecVal)
}
def testMinus = {
assertTrue(expecVal <= Operator.bopMinus(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMinus(leftVal, rightVal) <= expecVal)
}
def testMul = {
assertTrue(expecVal <= Operator.bopMul(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMul(leftVal, rightVal) <= expecVal)
}
def testDiv = {
assertTrue(expecVal <= Operator.bopDiv(leftVal, rightVal))
if (equal) assertTrue(Operator.bopDiv(leftVal, rightVal) <= expecVal)
}
def testMod = {
assertTrue(expecVal <= Operator.bopMod(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMod(leftVal, rightVal) <= expecVal)
}
def testEq = {
assertTrue(expecVal <= Operator.bopEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopEq(leftVal, rightVal) <= expecVal)
}
def testNeq = {
assertTrue(expecVal <= Operator.bopNeq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopNeq(leftVal, rightVal) <= expecVal)
}
def testSEq = {
assertTrue(expecVal <= Operator.bopSEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopSEq(leftVal, rightVal) <= expecVal)
}
def testSNeq = {
assertTrue(expecVal <= Operator.bopSNeq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopSNeq(leftVal, rightVal) <= expecVal)
}
def testLess = {
assertTrue(expecVal <= Operator.bopLess(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLess(leftVal, rightVal) <= expecVal)
}
def testGreater = {
assertTrue(expecVal <= Operator.bopGreater(leftVal, rightVal))
if (equal) assertTrue(Operator.bopGreater(leftVal, rightVal) <= expecVal)
}
def testLessEq = {
assertTrue(expecVal <= Operator.bopLessEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLessEq(leftVal, rightVal) <= expecVal)
}
def testGreaterEq = {
assertTrue(expecVal <= Operator.bopGreaterEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopGreaterEq(leftVal, rightVal) <= expecVal)
}
}
class UnaTest(name:String, oprnd:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var oprndVal:Value = ValueBot
var expecVal:Value = ValueBot
def unaTest() {}
override def getName = name
override def setUp = {
oprndVal = super.toValue(oprnd)
expecVal = super.toValue(expec)
}
def testVoid = {
assertTrue(expecVal <= Operator.uVoid(oprndVal))
if (equal) assertTrue(Operator.uVoid(oprndVal) <= expecVal)
}
def testPlus = {
assertTrue(expecVal <= Operator.uopPlus(oprndVal))
if (equal) assertTrue(Operator.uopPlus(oprndVal) <= expecVal)
}
def testMinus = {
assertTrue(expecVal <= Operator.uopMinus(oprndVal))
if (equal) assertTrue(Operator.uopMinus(oprndVal) <= expecVal)
}
def testBitNeg = {
assertTrue(expecVal <= Operator.uopBitNeg(oprndVal))
if (equal) assertTrue(Operator.uopBitNeg(oprndVal) <= expecVal)
}
def testNeg = {
assertTrue(expecVal <= Operator.uopNeg(oprndVal))
if (equal) assertTrue(Operator.uopNeg(oprndVal) <= expecVal)
}
}
abstract class TypeOperator
/* Binary */
case class BinBitOr(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinBitAnd(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinBitXor(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinLShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinRShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinURShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinPlus(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMinus(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMul(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinDiv(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMod(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinNeq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinSEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinSNeq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinLess(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinGreater(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinLessEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinGreaterEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
/* Unary */
case class UnaVoid(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
//case class UnaTypeof(name:String, oprn:List[Any], expec:List[String], equal:Boolean) extends TypeOperator
case class UnaPlus(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaMinus(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaBitNeg(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaNeg(name:String, oprn:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/tests/TypingOperatorJUTest.scala | Scala | bsd-3-clause | 82,559 |
package no.skytteren.elasticala.search
trait SortDSL {
implicit def toSort[S: SortBuilder](s: S): Sort1 = Sort1(implicitly[SortBuilder[S]].toSort(s))
implicit def toSort2[S1: SortBuilder, S2: SortBuilder](in: (S1, S2)): Sort2 =
Sort2(implicitly[SortBuilder[S1]].toSort(in._1), implicitly[SortBuilder[S2]].toSort(in._2))
implicit def toSort3[S1: SortBuilder, S2: SortBuilder, S3: SortBuilder](in: (S1, S2, S3)): Sort3 =
Sort3(implicitly[SortBuilder[S1]].toSort(in._1), implicitly[SortBuilder[S2]].toSort(in._2), implicitly[SortBuilder[S3]].toSort(in._3))
implicit def toSort4[S1: SortBuilder, S2: SortBuilder, S3: SortBuilder, S4: SortBuilder](in: (S1, S2, S3, S4)): Sort4 =
Sort4(implicitly[SortBuilder[S1]].toSort(in._1), implicitly[SortBuilder[S2]].toSort(in._2), implicitly[SortBuilder[S3]].toSort(in._3), implicitly[SortBuilder[S4]].toSort(in._4))
implicit def toSort5[S1: SortBuilder, S2: SortBuilder, S3: SortBuilder, S4: SortBuilder, S5: SortBuilder](in: (S1, S2, S3, S4, S5)): Sort5 =
Sort5(implicitly[SortBuilder[S1]].toSort(in._1), implicitly[SortBuilder[S2]].toSort(in._2), implicitly[SortBuilder[S3]].toSort(in._3), implicitly[SortBuilder[S4]].toSort(in._4), implicitly[SortBuilder[S5]].toSort(in._5))
implicit def toSort6[S1: SortBuilder, S2: SortBuilder, S3: SortBuilder, S4: SortBuilder, S5: SortBuilder, S6: SortBuilder](in: (S1, S2, S3, S4, S5, S6)): Sort6 =
Sort6(implicitly[SortBuilder[S1]].toSort(in._1), implicitly[SortBuilder[S2]].toSort(in._2), implicitly[SortBuilder[S3]].toSort(in._3), implicitly[SortBuilder[S4]].toSort(in._4), implicitly[SortBuilder[S5]].toSort(in._5), implicitly[SortBuilder[S6]].toSort(in._6))
}
sealed trait SortMode
object SortMode {
case object Default extends SortMode
case object Avg extends SortMode
case object Min extends SortMode
case object Max extends SortMode
case object Sum extends SortMode
}
sealed trait Order
sealed trait ExplicitOrder extends Order
case object default extends ExplicitOrder
case class asc(mode: SortMode = SortMode.Default) extends Order
object asc extends ExplicitOrder
case class desc(mode: SortMode = SortMode.Default) extends Order
object desc extends ExplicitOrder
/*
* TODO: nested_path, nested_filter, missing_value, unmapped_type, _geo_distance, _script
*
* TODO "track_scores": true ?
*/
case class Sort(field: String, order: Order = default)
trait Sorting
case object DefaultSorting extends Sorting
case class Sort1(s1: Sort) extends Sorting
case class Sort2(s1: Sort, s2: Sort) extends Sorting
case class Sort3(s1: Sort, s2: Sort, s3: Sort) extends Sorting
case class Sort4(s1: Sort, s2: Sort, s3: Sort, s4: Sort) extends Sorting
case class Sort5(s1: Sort, s2: Sort, s3: Sort, s4: Sort, s5: Sort) extends Sorting
case class Sort6(s1: Sort, s2: Sort, s3: Sort, s4: Sort, s5: Sort, s6: Sort) extends Sorting
trait SortBuilder[S] {
def toSort(in: S): Sort
}
object SortBuilder {
implicit def Tuple2SortBuilder[O <: Order]: SortBuilder[(String, O)] = new SortBuilder[(String, O)] {
override def toSort(in: (String, O)): Sort = Sort(in._1, in._2)
}
implicit object StringSortBuilder extends SortBuilder[String] {
def toSort(in: String): Sort = Sort(in)
}
} | skytteren/elasticala | src/main/scala/no/skytteren/elasticala/search/SortDSL.scala | Scala | apache-2.0 | 3,240 |
object Test {
scala.sys.addShutdownHook {
Thread.sleep(1000)
println("Test#shutdown.")
}
def daemon() = {
val t = new Thread {
override def run(): Unit = {
Thread.sleep(10000)
println("Hallelujah!") // should not see this
}
}
t.setDaemon(true)
t.start()
t
}
def nonDaemon() = {
val t = new Thread {
override def run(): Unit = {
Thread.sleep(100)
println("Fooblitzky!")
}
}
t.start()
t
}
def main(args: Array[String]): Unit = {
daemon()
nonDaemon()
scala.sys.addShutdownHook {
println("main#shutdown.")
}
}
}
| folone/dotty | tests/run/shutdownhooks.scala | Scala | bsd-3-clause | 649 |
package edu.arizona.sista.learning
/**
* Allows traversal of a dataset (or ranking dataset)'s features and values, and also destructive updates of the values. Useful for finding the range of values and then rescaling them. Analogous to iterator with destructive updates
* Created by dfried on 5/27/14.
*/
trait FeatureUpdater[F, V] extends Traversable[(F, V)] {
/**
* Destructively modify all feature values using a function of the feature and value
*/
def updateAll(fn: ((F, V)) => V): Unit
}
| michaelcapizzi/processors | src/main/scala/edu/arizona/sista/learning/FeatureUpdater.scala | Scala | apache-2.0 | 509 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.spark
import com.azure.cosmos.implementation.Strings
import org.apache.spark.SparkEnv
private object CosmosPredicates {
private[this] val ParameterName = "parameterName"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[this] val DRIVER_IDENTIFIER = "driver"
private[this] def argumentMustNotBeNullOrEmptyMessage(parameterName: String): String =
s"Argument '$parameterName' must not be null or empty."
private[spark] def requireNotNull[T](candidate: T, parameterName: String): T = {
requireNotNullOrEmpty(parameterName, ParameterName)
require(candidate != null, s"Argument '$parameterName' must not be null.")
candidate
}
private[spark] def requireNotNullOrEmpty(candidate: String, parameterName: String): String = {
require(
!Strings.isNullOrWhiteSpace(parameterName),
argumentMustNotBeNullOrEmptyMessage(ParameterName))
require(!Strings.isNullOrWhiteSpace(candidate), argumentMustNotBeNullOrEmptyMessage(parameterName))
candidate
}
private[spark] def assertNotNullOrEmpty(candidate: String, parameterName: String): String = {
assert(
!Strings.isNullOrWhiteSpace(parameterName),
argumentMustNotBeNullOrEmptyMessage(ParameterName))
assert(!Strings.isNullOrWhiteSpace(candidate), argumentMustNotBeNullOrEmptyMessage(parameterName))
candidate
}
private[spark] def assertNotNull[T](candidate: T, parameterName: String): T = {
assertNotNullOrEmpty(parameterName, ParameterName)
assert(candidate != null, s"Argument '$parameterName' must not be null.")
candidate
}
private[spark] def requireNotNullOrEmpty[T](candidate: Array[T], parameterName: String): Array[T] = {
require(!Strings.isNullOrWhiteSpace(parameterName), argumentMustNotBeNullOrEmptyMessage(ParameterName))
require(candidate != null && !candidate.isEmpty, argumentMustNotBeNullOrEmptyMessage(parameterName))
candidate
}
private[spark] def assertNotNullOrEmpty[T](candidate: Array[T], parameterName: String): Array[T] = {
assert(!Strings.isNullOrWhiteSpace(parameterName), argumentMustNotBeNullOrEmptyMessage(ParameterName))
assert(candidate != null && !candidate.isEmpty, argumentMustNotBeNullOrEmptyMessage(parameterName))
candidate
}
private[spark] def isOnSparkDriver(): Boolean = {
SparkEnv.get.executorId == DRIVER_IDENTIFIER
}
private[spark] def assertOnSparkDriver(): Unit = {
// assert that we're only accessing it on the driver.
assert(isOnSparkDriver(), "This code should only be executed on the Spark driver.")
}
}
| Azure/azure-sdk-for-java | sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/CosmosPredicates.scala | Scala | mit | 2,868 |
/*
* Copyright (c) 2015 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolxit
import util.Positional
/** TeX works with a token stream */
sealed trait Token extends Positional {
def toString(env: TeXEnvironment): String
}
/** A character token read as input. It may be one of the following tokens:
* - escape character (by default `\\`)
* - beginning of group (by default `{`)
* - end of group (by default `}`)
* - math shift (by default `$`)
* - alignment tab (by default `&`)
* - end of line (by default `\\n`)
* - parameter (by default `#`)
* - superscript (by default `^`)
* - subscript (by default `_`)
* - ignored character (for example `null`)
* - space (such as ` `)
* - a letter (by default a UTF-8 encoded character)
* - active character (by default `~`)
* - comment character (by default `%`)
* - invalid character (<delete>)
* - other character (none of the above)
*
* @author Lucas Satabin
*
*/
case class CharacterToken(value: Char, category: Category) extends Token {
def toString(env: TeXEnvironment) = value.toString
}
/** A control sequence token has not category.
*
* @author Lucas Satabin
*/
case class ControlSequenceToken(name: String, active: Boolean = false) extends Token {
def toString(env: TeXEnvironment) = f"${env.escapechar}$name "
}
/** A parameter token may only occur in the parameter or replacement text
* of a control sequence.
*
* @author Lucas Satabin
*/
case class ParameterToken(number: Int) extends Token {
def toString(env: TeXEnvironment) = f"#$number"
}
/** A bunch of token nested between a token of category BEGINNING_OF_GROUP and
* a token of category END_OF_GROUP
*
* @author Lucas Satabin
*/
case class GroupToken(open: Token, inner: List[Token], close: Token) extends Token {
def toString(env: TeXEnvironment) = f"$open${inner.reverseMap(_.toString(env)).mkString}"
}
case class EOIToken() extends Token {
def toString(env: TeXEnvironment) = "<EOI>"
}
| satabin/toolxit-ng | core/src/main/scala/toolxit/Tokens.scala | Scala | apache-2.0 | 2,508 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ByteArrayOutputStream, CharArrayWriter, DataOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{SparkException, TaskContext}
import org.apache.spark.annotation.{DeveloperApi, Evolving, Experimental, Stable, Unstable}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
import org.apache.spark.api.python.{PythonRDD, SerDeUtil}
import org.apache.spark.api.r.RRDD
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.QueryPlanningTracker
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.json.{JacksonGenerator, JSONOptions}
import org.apache.spark.sql.catalyst.optimizer.CombineUnions
import org.apache.spark.sql.catalyst.parser.{ParseException, ParserUtils}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, PartitioningCollection}
import org.apache.spark.sql.catalyst.trees.TreeNodeTag
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.arrow.{ArrowBatchStreamWriter, ArrowConverters}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, FileTable}
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.execution.stat.StatFunctions
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.DataStreamWriter
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
private[sql] object Dataset {
val curId = new java.util.concurrent.atomic.AtomicLong()
val DATASET_ID_KEY = "__dataset_id"
val COL_POS_KEY = "__col_position"
val DATASET_ID_TAG = TreeNodeTag[Long]("dataset_id")
def apply[T: Encoder](sparkSession: SparkSession, logicalPlan: LogicalPlan): Dataset[T] = {
val dataset = new Dataset(sparkSession, logicalPlan, implicitly[Encoder[T]])
// Eagerly bind the encoder so we verify that the encoder matches the underlying
// schema. The user will get an error if this is not the case.
// optimization: it is guaranteed that [[InternalRow]] can be converted to [[Row]] so
// do not do this check in that case. this check can be expensive since it requires running
// the whole [[Analyzer]] to resolve the deserializer
if (dataset.exprEnc.clsTag.runtimeClass != classOf[Row]) {
dataset.resolvedEnc
}
dataset
}
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame = {
val qe = sparkSession.sessionState.executePlan(logicalPlan)
qe.assertAnalyzed()
new Dataset[Row](sparkSession, qe, RowEncoder(qe.analyzed.schema))
}
/** A variant of ofRows that allows passing in a tracker so we can track query parsing time. */
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan, tracker: QueryPlanningTracker)
: DataFrame = {
val qe = new QueryExecution(sparkSession, logicalPlan, tracker)
qe.assertAnalyzed()
new Dataset[Row](sparkSession, qe, RowEncoder(qe.analyzed.schema))
}
}
/**
* A Dataset is a strongly typed collection of domain-specific objects that can be transformed
* in parallel using functional or relational operations. Each Dataset also has an untyped view
* called a `DataFrame`, which is a Dataset of [[Row]].
*
* Operations available on Datasets are divided into transformations and actions. Transformations
* are the ones that produce new Datasets, and actions are the ones that trigger computation and
* return results. Example transformations include map, filter, select, and aggregate (`groupBy`).
* Example actions count, show, or writing data out to file systems.
*
* Datasets are "lazy", i.e. computations are only triggered when an action is invoked. Internally,
* a Dataset represents a logical plan that describes the computation required to produce the data.
* When an action is invoked, Spark's query optimizer optimizes the logical plan and generates a
* physical plan for efficient execution in a parallel and distributed manner. To explore the
* logical plan as well as optimized physical plan, use the `explain` function.
*
* To efficiently support domain-specific objects, an [[Encoder]] is required. The encoder maps
* the domain specific type `T` to Spark's internal type system. For example, given a class `Person`
* with two fields, `name` (string) and `age` (int), an encoder is used to tell Spark to generate
* code at runtime to serialize the `Person` object into a binary structure. This binary structure
* often has much lower memory footprint as well as are optimized for efficiency in data processing
* (e.g. in a columnar format). To understand the internal binary representation for data, use the
* `schema` function.
*
* There are typically two ways to create a Dataset. The most common way is by pointing Spark
* to some files on storage systems, using the `read` function available on a `SparkSession`.
* {{{
* val people = spark.read.parquet("...").as[Person] // Scala
* Dataset<Person> people = spark.read().parquet("...").as(Encoders.bean(Person.class)); // Java
* }}}
*
* Datasets can also be created through transformations available on existing Datasets. For example,
* the following creates a new Dataset by applying a filter on the existing one:
* {{{
* val names = people.map(_.name) // in Scala; names is a Dataset[String]
* Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING));
* }}}
*
* Dataset operations can also be untyped, through various domain-specific-language (DSL)
* functions defined in: Dataset (this class), [[Column]], and [[functions]]. These operations
* are very similar to the operations available in the data frame abstraction in R or Python.
*
* To select a column from the Dataset, use `apply` method in Scala and `col` in Java.
* {{{
* val ageCol = people("age") // in Scala
* Column ageCol = people.col("age"); // in Java
* }}}
*
* Note that the [[Column]] type can also be manipulated through its various functions.
* {{{
* // The following creates a new column that increases everybody's age by 10.
* people("age") + 10 // in Scala
* people.col("age").plus(10); // in Java
* }}}
*
* A more concrete example in Scala:
* {{{
* // To create Dataset[Row] using SparkSession
* val people = spark.read.parquet("...")
* val department = spark.read.parquet("...")
*
* people.filter("age > 30")
* .join(department, people("deptId") === department("id"))
* .groupBy(department("name"), people("gender"))
* .agg(avg(people("salary")), max(people("age")))
* }}}
*
* and in Java:
* {{{
* // To create Dataset<Row> using SparkSession
* Dataset<Row> people = spark.read().parquet("...");
* Dataset<Row> department = spark.read().parquet("...");
*
* people.filter(people.col("age").gt(30))
* .join(department, people.col("deptId").equalTo(department.col("id")))
* .groupBy(department.col("name"), people.col("gender"))
* .agg(avg(people.col("salary")), max(people.col("age")));
* }}}
*
* @groupname basic Basic Dataset functions
* @groupname action Actions
* @groupname untypedrel Untyped transformations
* @groupname typedrel Typed transformations
*
* @since 1.6.0
*/
@Stable
class Dataset[T] private[sql](
@transient private val _sparkSession: SparkSession,
@DeveloperApi @Unstable @transient val queryExecution: QueryExecution,
@DeveloperApi @Unstable @transient val encoder: Encoder[T])
extends Serializable {
@transient lazy val sparkSession: SparkSession = {
if (_sparkSession == null) {
throw new SparkException(
"Dataset transformations and actions can only be invoked by the driver, not inside of" +
" other Dataset transformations; for example, dataset1.map(x => dataset2.values.count()" +
" * x) is invalid because the values transformation and count action cannot be " +
"performed inside of the dataset1.map transformation. For more information," +
" see SPARK-28702.")
}
_sparkSession
}
// A globally unique id of this Dataset.
private val id = Dataset.curId.getAndIncrement()
queryExecution.assertAnalyzed()
// Note for Spark contributors: if adding or updating any action in `Dataset`, please make sure
// you wrap it with `withNewExecutionId` if this actions doesn't call other action.
def this(sparkSession: SparkSession, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sparkSession, sparkSession.sessionState.executePlan(logicalPlan), encoder)
}
def this(sqlContext: SQLContext, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sqlContext.sparkSession, logicalPlan, encoder)
}
@transient private[sql] val logicalPlan: LogicalPlan = {
// For various commands (like DDL) and queries with side effects, we force query execution
// to happen right away to let these side effects take place eagerly.
val plan = queryExecution.analyzed match {
case c: Command =>
LocalRelation(c.output, withAction("command", queryExecution)(_.executeCollect()))
case u @ Union(children) if children.forall(_.isInstanceOf[Command]) =>
LocalRelation(u.output, withAction("command", queryExecution)(_.executeCollect()))
case _ =>
queryExecution.analyzed
}
if (sparkSession.sessionState.conf.getConf(SQLConf.FAIL_AMBIGUOUS_SELF_JOIN)) {
plan.setTagValue(Dataset.DATASET_ID_TAG, id)
}
plan
}
/**
* Currently [[ExpressionEncoder]] is the only implementation of [[Encoder]], here we turn the
* passed in encoder to [[ExpressionEncoder]] explicitly, and mark it implicit so that we can use
* it when constructing new Dataset objects that have the same object type (that will be
* possibly resolved to a different schema).
*/
private[sql] implicit val exprEnc: ExpressionEncoder[T] = encoderFor(encoder)
// The resolved `ExpressionEncoder` which can be used to turn rows to objects of type T, after
// collecting rows to the driver side.
private lazy val resolvedEnc = {
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer)
}
private implicit def classTag = exprEnc.clsTag
// sqlContext must be val because a stable identifier is expected when you import implicits
@transient lazy val sqlContext: SQLContext = sparkSession.sqlContext
private[sql] def resolve(colName: String): NamedExpression = {
queryExecution.analyzed.resolveQuoted(colName, sparkSession.sessionState.analyzer.resolver)
.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
}
private[sql] def numericColumns: Seq[Expression] = {
schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
queryExecution.analyzed.resolveQuoted(n.name, sparkSession.sessionState.analyzer.resolver).get
}
}
/**
* Get rows represented in Sequence by specific truncate and vertical requirement.
*
* @param numRows Number of rows to return
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
*/
private[sql] def getRows(
numRows: Int,
truncate: Int): Seq[Seq[String]] = {
val newDf = toDF()
val castCols = newDf.logicalPlan.output.map { col =>
// Since binary types in top-level schema fields have a specific format to print,
// so we do not cast them to strings here.
if (col.dataType == BinaryType) {
Column(col)
} else {
Column(col).cast(StringType)
}
}
val data = newDf.select(castCols: _*).take(numRows + 1)
// For array values, replace Seq and Array with square brackets
// For cells that are beyond `truncate` characters, replace it with the
// first `truncate-3` and "..."
schema.fieldNames.toSeq +: data.map { row =>
row.toSeq.map { cell =>
val str = cell match {
case null => "null"
case binary: Array[Byte] => binary.map("%02X".format(_)).mkString("[", " ", "]")
case _ => cell.toString
}
if (truncate > 0 && str.length > truncate) {
// do not show ellipses for strings shorter than 4 characters.
if (truncate < 4) str.substring(0, truncate)
else str.substring(0, truncate - 3) + "..."
} else {
str
}
}: Seq[String]
}
}
/**
* Compose the string representing rows for output
*
* @param _numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
*/
private[sql] def showString(
_numRows: Int,
truncate: Int = 20,
vertical: Boolean = false): String = {
val numRows = _numRows.max(0).min(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH - 1)
// Get rows represented by Seq[Seq[String]], we may get one more line if it has more data.
val tmpRows = getRows(numRows, truncate)
val hasMoreData = tmpRows.length - 1 > numRows
val rows = tmpRows.take(numRows + 1)
val sb = new StringBuilder
val numCols = schema.fieldNames.length
// We set a minimum column width at '3'
val minimumColWidth = 3
if (!vertical) {
// Initialise the width of each column to a minimum value
val colWidths = Array.fill(numCols)(minimumColWidth)
// Compute the width of each column
for (row <- rows) {
for ((cell, i) <- row.zipWithIndex) {
colWidths(i) = math.max(colWidths(i), Utils.stringHalfWidth(cell))
}
}
val paddedRows = rows.map { row =>
row.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell, colWidths(i) - Utils.stringHalfWidth(cell) + cell.length)
} else {
StringUtils.rightPad(cell, colWidths(i) - Utils.stringHalfWidth(cell) + cell.length)
}
}
}
// Create SeparateLine
val sep: String = colWidths.map("-" * _).addString(sb, "+", "+", "+\\n").toString()
// column names
paddedRows.head.addString(sb, "|", "|", "|\\n")
sb.append(sep)
// data
paddedRows.tail.foreach(_.addString(sb, "|", "|", "|\\n"))
sb.append(sep)
} else {
// Extended display mode enabled
val fieldNames = rows.head
val dataRows = rows.tail
// Compute the width of field name and data columns
val fieldNameColWidth = fieldNames.foldLeft(minimumColWidth) { case (curMax, fieldName) =>
math.max(curMax, Utils.stringHalfWidth(fieldName))
}
val dataColWidth = dataRows.foldLeft(minimumColWidth) { case (curMax, row) =>
math.max(curMax, row.map(cell => Utils.stringHalfWidth(cell)).max)
}
dataRows.zipWithIndex.foreach { case (row, i) =>
// "+ 5" in size means a character length except for padded names and data
val rowHeader = StringUtils.rightPad(
s"-RECORD $i", fieldNameColWidth + dataColWidth + 5, "-")
sb.append(rowHeader).append("\\n")
row.zipWithIndex.map { case (cell, j) =>
val fieldName = StringUtils.rightPad(fieldNames(j),
fieldNameColWidth - Utils.stringHalfWidth(fieldNames(j)) + fieldNames(j).length)
val data = StringUtils.rightPad(cell,
dataColWidth - Utils.stringHalfWidth(cell) + cell.length)
s" $fieldName | $data "
}.addString(sb, "", "\\n", "\\n")
}
}
// Print a footer
if (vertical && rows.tail.isEmpty) {
// In a vertical mode, print an empty row set explicitly
sb.append("(0 rows)\\n")
} else if (hasMoreData) {
// For Data that has more than "numRows" records
val rowsString = if (numRows == 1) "row" else "rows"
sb.append(s"only showing top $numRows $rowsString\\n")
}
sb.toString()
}
override def toString: String = {
try {
val builder = new StringBuilder
val fields = schema.take(2).map {
case f => s"${f.name}: ${f.dataType.simpleString(2)}"
}
builder.append("[")
builder.append(fields.mkString(", "))
if (schema.length > 2) {
if (schema.length - fields.size == 1) {
builder.append(" ... 1 more field")
} else {
builder.append(" ... " + (schema.length - 2) + " more fields")
}
}
builder.append("]").toString()
} catch {
case NonFatal(e) =>
s"Invalid tree; ${e.getMessage}:\\n$queryExecution"
}
}
/**
* Converts this strongly typed collection of data to generic Dataframe. In contrast to the
* strongly typed objects that Dataset operations work on, a Dataframe returns generic [[Row]]
* objects that allow fields to be accessed by ordinal or name.
*
* @group basic
* @since 1.6.0
*/
// This is declared with parentheses to prevent the Scala compiler from treating
// `ds.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = new Dataset[Row](sparkSession, queryExecution, RowEncoder(schema))
/**
* :: Experimental ::
* Returns a new Dataset where each record has been mapped on to the specified type. The
* method used to map columns depend on the type of `U`:
* - When `U` is a class, fields for the class will be mapped to columns of the same name
* (case sensitivity is determined by `spark.sql.caseSensitive`).
* - When `U` is a tuple, the columns will be mapped by ordinal (i.e. the first column will
* be assigned to `_1`).
* - When `U` is a primitive type (i.e. String, Int, etc), then the first column of the
* `DataFrame` will be used.
*
* If the schema of the Dataset does not match the desired `U` type, you can use `select`
* along with `alias` or `as` to rearrange or rename as required.
*
* Note that `as[]` only changes the view of the data that is passed into typed operations,
* such as `map()`, and does not eagerly project away any columns that are not present in
* the specified class.
*
* @group basic
* @since 1.6.0
*/
@Experimental
@Evolving
def as[U : Encoder]: Dataset[U] = Dataset[U](sparkSession, logicalPlan)
/**
* Converts this strongly typed collection of data to generic `DataFrame` with columns renamed.
* This can be quite convenient in conversion from an RDD of tuples into a `DataFrame` with
* meaningful names. For example:
* {{{
* val rdd: RDD[(Int, String)] = ...
* rdd.toDF() // this implicit conversion creates a DataFrame with column name `_1` and `_2`
* rdd.toDF("id", "name") // this creates a DataFrame with column name "id" and "name"
* }}}
*
* @group basic
* @since 2.0.0
*/
@scala.annotation.varargs
def toDF(colNames: String*): DataFrame = {
require(schema.size == colNames.size,
"The number of columns doesn't match.\\n" +
s"Old column names (${schema.size}): " + schema.fields.map(_.name).mkString(", ") + "\\n" +
s"New column names (${colNames.size}): " + colNames.mkString(", "))
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
select(newCols : _*)
}
/**
* Returns the schema of this Dataset.
*
* @group basic
* @since 1.6.0
*/
def schema: StructType = queryExecution.analyzed.schema
/**
* Prints the schema to the console in a nice tree format.
*
* @group basic
* @since 1.6.0
*/
def printSchema(): Unit = printSchema(Int.MaxValue)
// scalastyle:off println
/**
* Prints the schema up to the given level to the console in a nice tree format.
*
* @group basic
* @since 3.0.0
*/
def printSchema(level: Int): Unit = println(schema.treeString(level))
// scalastyle:on println
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(extended: Boolean): Unit = {
// Because temporary views are resolved during analysis when we create a Dataset, and
// `ExplainCommand` analyzes input query plan and resolves temporary views again. Using
// `ExplainCommand` here will probably output different query plans, compared to the results
// of evaluation of the Dataset. So just output QueryExecution's query plans here.
val qe = ExplainCommandUtil.explainedQueryExecution(sparkSession, logicalPlan, queryExecution)
val outputString =
if (extended) {
qe.toString
} else {
qe.simpleString
}
// scalastyle:off println
println(outputString)
// scalastyle:on println
}
/**
* Prints the physical plan to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(): Unit = explain(extended = false)
/**
* Returns all column names and their data types as an array.
*
* @group basic
* @since 1.6.0
*/
def dtypes: Array[(String, String)] = schema.fields.map { field =>
(field.name, field.dataType.toString)
}
/**
* Returns all column names as an array.
*
* @group basic
* @since 1.6.0
*/
def columns: Array[String] = schema.fields.map(_.name)
/**
* Returns true if the `collect` and `take` methods can be run locally
* (without any Spark executors).
*
* @group basic
* @since 1.6.0
*/
def isLocal: Boolean = logicalPlan.isInstanceOf[LocalRelation]
/**
* Returns true if the `Dataset` is empty.
*
* @group basic
* @since 2.4.0
*/
def isEmpty: Boolean = withAction("isEmpty", limit(1).groupBy().count().queryExecution) { plan =>
plan.executeCollect().head.getLong(0) == 0
}
/**
* Returns true if this Dataset contains one or more sources that continuously
* return data as it arrives. A Dataset that reads data from a streaming source
* must be executed as a `StreamingQuery` using the `start()` method in
* `DataStreamWriter`. Methods that return a single answer, e.g. `count()` or
* `collect()`, will throw an [[AnalysisException]] when there is a streaming
* source present.
*
* @group streaming
* @since 2.0.0
*/
@Evolving
def isStreaming: Boolean = logicalPlan.isStreaming
/**
* Eagerly checkpoint a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@Evolving
def checkpoint(): Dataset[T] = checkpoint(eager = true, reliableCheckpoint = true)
/**
* Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
* logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@Evolving
def checkpoint(eager: Boolean): Dataset[T] = checkpoint(eager = eager, reliableCheckpoint = true)
/**
* Eagerly locally checkpoints a Dataset and return the new Dataset. Checkpointing can be
* used to truncate the logical plan of this Dataset, which is especially useful in iterative
* algorithms where the plan may grow exponentially. Local checkpoints are written to executor
* storage and despite potentially faster they are unreliable and may compromise job completion.
*
* @group basic
* @since 2.3.0
*/
@Experimental
@Evolving
def localCheckpoint(): Dataset[T] = checkpoint(eager = true, reliableCheckpoint = false)
/**
* Locally checkpoints a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. Local checkpoints are written to executor storage and despite
* potentially faster they are unreliable and may compromise job completion.
*
* @group basic
* @since 2.3.0
*/
@Experimental
@Evolving
def localCheckpoint(eager: Boolean): Dataset[T] = checkpoint(
eager = eager,
reliableCheckpoint = false
)
/**
* Returns a checkpointed version of this Dataset.
*
* @param eager Whether to checkpoint this dataframe immediately
* @param reliableCheckpoint Whether to create a reliable checkpoint saved to files inside the
* checkpoint directory. If false creates a local checkpoint using
* the caching subsystem
*/
private def checkpoint(eager: Boolean, reliableCheckpoint: Boolean): Dataset[T] = {
val actionName = if (reliableCheckpoint) "checkpoint" else "localCheckpoint"
withAction(actionName, queryExecution) { physicalPlan =>
val internalRdd = physicalPlan.execute().map(_.copy())
if (reliableCheckpoint) {
internalRdd.checkpoint()
} else {
internalRdd.localCheckpoint()
}
if (eager) {
internalRdd.count()
}
// Takes the first leaf partitioning whenever we see a `PartitioningCollection`. Otherwise the
// size of `PartitioningCollection` may grow exponentially for queries involving deep inner
// joins.
def firstLeafPartitioning(partitioning: Partitioning): Partitioning = {
partitioning match {
case p: PartitioningCollection => firstLeafPartitioning(p.partitionings.head)
case p => p
}
}
val outputPartitioning = firstLeafPartitioning(physicalPlan.outputPartitioning)
Dataset.ofRows(
sparkSession,
LogicalRDD(
logicalPlan.output,
internalRdd,
outputPartitioning,
physicalPlan.outputOrdering,
isStreaming
)(sparkSession)).as[T]
}
}
/**
* Defines an event time watermark for this [[Dataset]]. A watermark tracks a point in time
* before which we assume no more late data is going to arrive.
*
* Spark will use this watermark for several purposes:
* - To know when a given time window aggregation can be finalized and thus can be emitted when
* using output modes that do not allow updates.
* - To minimize the amount of state that we need to keep for on-going aggregations,
* `mapGroupsWithState` and `dropDuplicates` operators.
*
* The current watermark is computed by looking at the `MAX(eventTime)` seen across
* all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
* of coordinating this value across partitions, the actual watermark used is only guaranteed
* to be at least `delayThreshold` behind the actual event time. In some cases we may still
* process records that arrive more than `delayThreshold` late.
*
* @param eventTime the name of the column that contains the event time of the row.
* @param delayThreshold the minimum delay to wait to data to arrive late, relative to the latest
* record that has been processed in the form of an interval
* (e.g. "1 minute" or "5 hours"). NOTE: This should not be negative.
*
* @group streaming
* @since 2.1.0
*/
@Evolving
// We only accept an existing column name, not a derived column here as a watermark that is
// defined on a derived column cannot referenced elsewhere in the plan.
def withWatermark(eventTime: String, delayThreshold: String): Dataset[T] = withTypedPlan {
val parsedDelay =
try {
CalendarInterval.fromCaseInsensitiveString(delayThreshold)
} catch {
case e: IllegalArgumentException =>
throw new AnalysisException(
s"Unable to parse time delay '$delayThreshold'",
cause = Some(e))
}
require(parsedDelay.milliseconds >= 0 && parsedDelay.months >= 0,
s"delay threshold ($delayThreshold) should not be negative.")
EliminateEventTimeWatermark(
EventTimeWatermark(UnresolvedAttribute(eventTime), parsedDelay, logicalPlan))
}
/**
* Displays the Dataset in a tabular form. Strings more than 20 characters will be truncated,
* and all cells will be aligned right. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
*
* @group action
* @since 1.6.0
*/
def show(numRows: Int): Unit = show(numRows, truncate = true)
/**
* Displays the top 20 rows of Dataset in a tabular form. Strings more than 20 characters
* will be truncated, and all cells will be aligned right.
*
* @group action
* @since 1.6.0
*/
def show(): Unit = show(20)
/**
* Displays the top 20 rows of Dataset in a tabular form.
*
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
def show(truncate: Boolean): Unit = show(20, truncate)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
* @param numRows Number of rows to show
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = if (truncate) {
println(showString(numRows, truncate = 20))
} else {
println(showString(numRows, truncate = 0))
}
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @group action
* @since 1.6.0
*/
def show(numRows: Int, truncate: Int): Unit = show(numRows, truncate, vertical = false)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* If `vertical` enabled, this command prints output rows vertically (one line per column value)?
*
* {{{
* -RECORD 0-------------------
* year | 1980
* month | 12
* AVG('Adj Close) | 0.503218
* AVG('Adj Close) | 0.595103
* -RECORD 1-------------------
* year | 1981
* month | 01
* AVG('Adj Close) | 0.523289
* AVG('Adj Close) | 0.570307
* -RECORD 2-------------------
* year | 1982
* month | 02
* AVG('Adj Close) | 0.436504
* AVG('Adj Close) | 0.475256
* -RECORD 3-------------------
* year | 1983
* month | 03
* AVG('Adj Close) | 0.410516
* AVG('Adj Close) | 0.442194
* -RECORD 4-------------------
* year | 1984
* month | 04
* AVG('Adj Close) | 0.450090
* AVG('Adj Close) | 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
* @group action
* @since 2.3.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Int, vertical: Boolean): Unit =
println(showString(numRows, truncate, vertical))
// scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
* {{{
* // Dropping rows containing any null values.
* ds.na.drop()
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def na: DataFrameNaFunctions = new DataFrameNaFunctions(toDF())
/**
* Returns a [[DataFrameStatFunctions]] for working statistic functions support.
* {{{
* // Finding frequent items in column with name 'a'.
* ds.stat.freqItems(Seq("a"))
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def stat: DataFrameStatFunctions = new DataFrameStatFunctions(toDF())
/**
* Join with another `DataFrame`.
*
* Behaves as an INNER JOIN and requires a subsequent join predicate.
*
* @param right Right side of the join operation.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Inner, None, JoinHint.NONE)
}
/**
* Inner equi-join with another `DataFrame` using the given column.
*
* Different from other join functions, the join column will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the column "user_id"
* df1.join(df2, "user_id")
* }}}
*
* @param right Right side of the join operation.
* @param usingColumn Name of the column to join on. This column must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumn: String): DataFrame = {
join(right, Seq(usingColumn))
}
/**
* Inner equi-join with another `DataFrame` using the given columns.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the columns "user_id" and "user_name"
* df1.join(df2, Seq("user_id", "user_name"))
* }}}
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String]): DataFrame = {
join(right, usingColumns, "inner")
}
/**
* Equi-join with another `DataFrame` using the given columns. A cross join with a predicate
* is specified as an inner join. If you would explicitly like to perform a cross join use the
* `crossJoin` method.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`, `full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`,
* `semi`, `leftsemi`, `left_semi`, `anti`, `leftanti`, left_anti`.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String], joinType: String): DataFrame = {
// Analyze the self join. The assumption is that the analyzer will disambiguate left vs right
// by creating a new instance for one of the branch.
val joined = sparkSession.sessionState.executePlan(
Join(logicalPlan, right.logicalPlan, joinType = JoinType(joinType), None, JoinHint.NONE))
.analyzed.asInstanceOf[Join]
withPlan {
Join(
joined.left,
joined.right,
UsingJoin(JoinType(joinType), usingColumns),
None,
JoinHint.NONE)
}
}
/**
* Inner join with another `DataFrame`, using the given join expression.
*
* {{{
* // The following two are equivalent:
* df1.join(df2, $"df1Key" === $"df2Key")
* df1.join(df2).where($"df1Key" === $"df2Key")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column): DataFrame = join(right, joinExprs, "inner")
/**
* Join with another `DataFrame`, using the given join expression. The following performs
* a full outer join between `df1` and `df2`.
*
* {{{
* // Scala:
* import org.apache.spark.sql.functions._
* df1.join(df2, $"df1Key" === $"df2Key", "outer")
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df1.join(df2, col("df1Key").equalTo(col("df2Key")), "outer");
* }}}
*
* @param right Right side of the join.
* @param joinExprs Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`, `full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`,
* `semi`, `leftsemi`, `left_semi`, `anti`, `leftanti`, left_anti`.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column, joinType: String): DataFrame = {
// Note that in this function, we introduce a hack in the case of self-join to automatically
// resolve ambiguous join conditions into ones that might make sense [SPARK-6231].
// Consider this case: df.join(df, df("key") === df("key"))
// Since df("key") === df("key") is a trivially true condition, this actually becomes a
// cartesian join. However, most likely users expect to perform a self join using "key".
// With that assumption, this hack turns the trivially true condition into equality on join
// keys that are resolved to both sides.
// Trigger analysis so in the case of self-join, the analyzer will clone the plan.
// After the cloning, left and right side will have distinct expression ids.
val plan = withPlan(
Join(logicalPlan, right.logicalPlan, JoinType(joinType), Some(joinExprs.expr), JoinHint.NONE))
.queryExecution.analyzed.asInstanceOf[Join]
// If auto self join alias is disabled, return the plan.
if (!sparkSession.sessionState.conf.dataFrameSelfJoinAutoResolveAmbiguity) {
return withPlan(plan)
}
// If left/right have no output set intersection, return the plan.
val lanalyzed = withPlan(this.logicalPlan).queryExecution.analyzed
val ranalyzed = withPlan(right.logicalPlan).queryExecution.analyzed
if (lanalyzed.outputSet.intersect(ranalyzed.outputSet).isEmpty) {
return withPlan(plan)
}
// Otherwise, find the trivially true predicates and automatically resolves them to both sides.
// By the time we get here, since we have already run analysis, all attributes should've been
// resolved and become AttributeReference.
val cond = plan.condition.map { _.transform {
case catalyst.expressions.EqualTo(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualTo(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
case catalyst.expressions.EqualNullSafe(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualNullSafe(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
}}
withPlan {
plan.copy(condition = cond)
}
}
/**
* Explicit cartesian join with another `DataFrame`.
*
* @param right Right side of the join operation.
*
* @note Cartesian joins are very expensive without an extra filter that can be pushed down.
*
* @group untypedrel
* @since 2.1.0
*/
def crossJoin(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Cross, None, JoinHint.NONE)
}
/**
* :: Experimental ::
* Joins this Dataset returning a `Tuple2` for each pair where `condition` evaluates to
* true.
*
* This is similar to the relation `join` function with one important difference in the
* result schema. Since `joinWith` preserves objects present on either side of the join, the
* result schema is similarly nested into a tuple under the column names `_1` and `_2`.
*
* This type of join can be useful both for preserving type-safety with the original object
* types as well as working with relational data where either side of the join has column
* names in common.
*
* @param other Right side of the join.
* @param condition Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`,`full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def joinWith[U](other: Dataset[U], condition: Column, joinType: String): Dataset[(T, U)] = {
// Creates a Join node and resolve it first, to get join condition resolved, self-join resolved,
// etc.
val joined = sparkSession.sessionState.executePlan(
Join(
this.logicalPlan,
other.logicalPlan,
JoinType(joinType),
Some(condition.expr),
JoinHint.NONE)).analyzed.asInstanceOf[Join]
if (joined.joinType == LeftSemi || joined.joinType == LeftAnti) {
throw new AnalysisException("Invalid join type in joinWith: " + joined.joinType.sql)
}
implicit val tuple2Encoder: Encoder[(T, U)] =
ExpressionEncoder.tuple(this.exprEnc, other.exprEnc)
val leftResultExpr = {
if (!this.exprEnc.isSerializedAsStructForTopLevel) {
assert(joined.left.output.length == 1)
Alias(joined.left.output.head, "_1")()
} else {
Alias(CreateStruct(joined.left.output), "_1")()
}
}
val rightResultExpr = {
if (!other.exprEnc.isSerializedAsStructForTopLevel) {
assert(joined.right.output.length == 1)
Alias(joined.right.output.head, "_2")()
} else {
Alias(CreateStruct(joined.right.output), "_2")()
}
}
if (joined.joinType.isInstanceOf[InnerLike]) {
// For inner joins, we can directly perform the join and then can project the join
// results into structs. This ensures that data remains flat during shuffles /
// exchanges (unlike the outer join path, which nests the data before shuffling).
withTypedPlan(Project(Seq(leftResultExpr, rightResultExpr), joined))
} else { // outer joins
// For both join sides, combine all outputs into a single column and alias it with "_1
// or "_2", to match the schema for the encoder of the join result.
// Note that we do this before joining them, to enable the join operator to return null
// for one side, in cases like outer-join.
val left = Project(leftResultExpr :: Nil, joined.left)
val right = Project(rightResultExpr :: Nil, joined.right)
// Rewrites the join condition to make the attribute point to correct column/field,
// after we combine the outputs of each join side.
val conditionExpr = joined.condition.get transformUp {
case a: Attribute if joined.left.outputSet.contains(a) =>
if (!this.exprEnc.isSerializedAsStructForTopLevel) {
left.output.head
} else {
val index = joined.left.output.indexWhere(_.exprId == a.exprId)
GetStructField(left.output.head, index)
}
case a: Attribute if joined.right.outputSet.contains(a) =>
if (!other.exprEnc.isSerializedAsStructForTopLevel) {
right.output.head
} else {
val index = joined.right.output.indexWhere(_.exprId == a.exprId)
GetStructField(right.output.head, index)
}
}
withTypedPlan(Join(left, right, joined.joinType, Some(conditionExpr), JoinHint.NONE))
}
}
/**
* :: Experimental ::
* Using inner equi-join to join this Dataset returning a `Tuple2` for each pair
* where `condition` evaluates to true.
*
* @param other Right side of the join.
* @param condition Join expression.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def joinWith[U](other: Dataset[U], condition: Column): Dataset[(T, U)] = {
joinWith(other, condition, "inner")
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): Dataset[T] = {
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortExprs: Column*): Dataset[T] = {
sortInternal(global = false, sortExprs)
}
/**
* Returns a new Dataset sorted by the specified column, all in ascending order.
* {{{
* // The following 3 are equivalent
* ds.sort("sortcol")
* ds.sort($"sortcol")
* ds.sort($"sortcol".asc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): Dataset[T] = {
sort((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset sorted by the given expressions. For example:
* {{{
* ds.sort($"col1", $"col2".desc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortExprs: Column*): Dataset[T] = {
sortInternal(global = true, sortExprs)
}
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortCol: String, sortCols: String*): Dataset[T] = sort(sortCol, sortCols : _*)
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortExprs: Column*): Dataset[T] = sort(sortExprs : _*)
/**
* Selects column based on the column name and returns it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def apply(colName: String): Column = col(colName)
/**
* Specifies some hint on the current Dataset. As an example, the following code specifies
* that one of the plan can be broadcasted:
*
* {{{
* df1.join(df2.hint("broadcast"))
* }}}
*
* @group basic
* @since 2.2.0
*/
@scala.annotation.varargs
def hint(name: String, parameters: Any*): Dataset[T] = withTypedPlan {
UnresolvedHint(name, parameters, logicalPlan)
}
/**
* Selects column based on the column name and returns it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def col(colName: String): Column = colName match {
case "*" =>
Column(ResolvedStar(queryExecution.analyzed.output))
case _ =>
if (sqlContext.conf.supportQuotedRegexColumnName) {
colRegex(colName)
} else {
Column(addDataFrameIdToCol(resolve(colName)))
}
}
// Attach the dataset id and column position to the column reference, so that we can detect
// ambiguous self-join correctly. See the rule `DetectAmbiguousSelfJoin`.
// This must be called before we return a `Column` that contains `AttributeReference`.
// Note that, the metadata added here are only avaiable in the analyzer, as the analyzer rule
// `DetectAmbiguousSelfJoin` will remove it.
private def addDataFrameIdToCol(expr: NamedExpression): NamedExpression = {
val newExpr = expr transform {
case a: AttributeReference
if sparkSession.sessionState.conf.getConf(SQLConf.FAIL_AMBIGUOUS_SELF_JOIN) =>
val metadata = new MetadataBuilder()
.withMetadata(a.metadata)
.putLong(Dataset.DATASET_ID_KEY, id)
.putLong(Dataset.COL_POS_KEY, logicalPlan.output.indexWhere(a.semanticEquals))
.build()
a.withMetadata(metadata)
}
newExpr.asInstanceOf[NamedExpression]
}
/**
* Selects column based on the column name specified as a regex and returns it as [[Column]].
* @group untypedrel
* @since 2.3.0
*/
def colRegex(colName: String): Column = {
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
colName match {
case ParserUtils.escapedIdentifier(columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, None, caseSensitive))
case ParserUtils.qualifiedEscapedIdentifier(nameParts, columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, Some(nameParts), caseSensitive))
case _ =>
Column(addDataFrameIdToCol(resolve(colName)))
}
}
/**
* Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 1.6.0
*/
def as(alias: String): Dataset[T] = withTypedPlan {
SubqueryAlias(alias, logicalPlan)
}
/**
* (Scala-specific) Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 2.0.0
*/
def as(alias: Symbol): Dataset[T] = as(alias.name)
/**
* Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: String): Dataset[T] = as(alias)
/**
* (Scala-specific) Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: Symbol): Dataset[T] = as(alias)
/**
* Selects a set of column based expressions.
* {{{
* ds.select($"colA", $"colB" + 1)
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(cols: Column*): DataFrame = withPlan {
Project(cols.map(_.named), logicalPlan)
}
/**
* Selects a set of columns. This is a variant of `select` that can only select
* existing columns using column names (i.e. cannot construct expressions).
*
* {{{
* // The following two are equivalent:
* ds.select("colA", "colB")
* ds.select($"colA", $"colB")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
* SQL expressions.
*
* {{{
* // The following are equivalent:
* ds.selectExpr("colA", "colB as newName", "abs(colC)")
* ds.select(expr("colA"), expr("colB as newName"), expr("abs(colC)"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
Column(sparkSession.sessionState.sqlParser.parseExpression(expr))
}: _*)
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expression for each element.
*
* {{{
* val ds = Seq(1, 2, 3).toDS()
* val newDS = ds.select(expr("value + 1").as[Int])
* }}}
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def select[U1](c1: TypedColumn[T, U1]): Dataset[U1] = {
implicit val encoder = c1.encoder
val project = Project(c1.withInputType(exprEnc, logicalPlan.output).named :: Nil, logicalPlan)
if (!encoder.isSerializedAsStructForTopLevel) {
new Dataset[U1](sparkSession, project, encoder)
} else {
// Flattens inner fields of U1
new Dataset[Tuple1[U1]](sparkSession, project, ExpressionEncoder.tuple(encoder)).map(_._1)
}
}
/**
* Internal helper function for building typed selects that return tuples. For simplicity and
* code reuse, we do this without the help of the type system and then use helper functions
* that cast appropriately for the user facing interface.
*/
protected def selectUntyped(columns: TypedColumn[_, _]*): Dataset[_] = {
val encoders = columns.map(_.encoder)
val namedColumns =
columns.map(_.withInputType(exprEnc, logicalPlan.output).named)
val execution = new QueryExecution(sparkSession, Project(namedColumns, logicalPlan))
new Dataset(sparkSession, execution, ExpressionEncoder.tuple(encoders))
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def select[U1, U2](c1: TypedColumn[T, U1], c2: TypedColumn[T, U2]): Dataset[(U1, U2)] =
selectUntyped(c1, c2).asInstanceOf[Dataset[(U1, U2)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def select[U1, U2, U3](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3]): Dataset[(U1, U2, U3)] =
selectUntyped(c1, c2, c3).asInstanceOf[Dataset[(U1, U2, U3)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def select[U1, U2, U3, U4](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4]): Dataset[(U1, U2, U3, U4)] =
selectUntyped(c1, c2, c3, c4).asInstanceOf[Dataset[(U1, U2, U3, U4)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def select[U1, U2, U3, U4, U5](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4],
c5: TypedColumn[T, U5]): Dataset[(U1, U2, U3, U4, U5)] =
selectUntyped(c1, c2, c3, c4, c5).asInstanceOf[Dataset[(U1, U2, U3, U4, U5)]]
/**
* Filters rows using the given condition.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(condition: Column): Dataset[T] = withTypedPlan {
Filter(condition.expr, logicalPlan)
}
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.filter("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Filters rows using the given condition. This is an alias for `filter`.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(condition: Column): Dataset[T] = filter(condition)
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.where("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Groups the Dataset using the specified columns, so we can run aggregation on them. See
* [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy($"department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.GroupByType)
}
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup($"department", $"group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube($"department", $"group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.CubeType)
}
/**
* Groups the Dataset using the specified columns, so that we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of groupBy that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy("department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.GroupByType)
}
/**
* :: Experimental ::
* (Scala-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@Evolving
def reduce(func: (T, T) => T): T = withNewRDDExecutionId {
rdd.reduce(func)
}
/**
* :: Experimental ::
* (Java-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@Evolving
def reduce(func: ReduceFunction[T]): T = reduce(func.call(_, _))
/**
* :: Experimental ::
* (Scala-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@Evolving
def groupByKey[K: Encoder](func: T => K): KeyValueGroupedDataset[K, T] = {
val withGroupingKey = AppendColumns(func, logicalPlan)
val executed = sparkSession.sessionState.executePlan(withGroupingKey)
new KeyValueGroupedDataset(
encoderFor[K],
encoderFor[T],
executed,
logicalPlan.output,
withGroupingKey.newColumns)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@Evolving
def groupByKey[K](func: MapFunction[T, K], encoder: Encoder[K]): KeyValueGroupedDataset[K, T] =
groupByKey(func.call(_))(encoder)
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of rollup that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup("department", "group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of cube that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube("department", "group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.CubeType)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg("age" -> "max", "salary" -> "avg")
* ds.groupBy().agg("age" -> "max", "salary" -> "avg")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
groupBy().agg(aggExpr, aggExprs : _*)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* (Java-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: java.util.Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(max($"age"), avg($"salary"))
* ds.groupBy().agg(max($"age"), avg($"salary"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
/**
* Returns a new Dataset by taking the first `n` rows. The difference between this function
* and `head` is that `head` is an action and returns an array (by triggering query execution)
* while `limit` returns a new Dataset.
*
* @group typedrel
* @since 2.0.0
*/
def limit(n: Int): Dataset[T] = withTypedPlan {
Limit(Literal(n), logicalPlan)
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name):
*
* {{{
* val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2")
* val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0")
* df1.union(df2).show
*
* // output:
* // +----+----+----+
* // |col0|col1|col2|
* // +----+----+----+
* // | 1| 2| 3|
* // | 4| 5| 6|
* // +----+----+----+
* }}}
*
* Notice that the column positions in the schema aren't necessarily matched with the
* fields in the strongly typed objects in a Dataset. This function resolves columns
* by their positions in the schema, not the fields in the strongly typed objects. Use
* [[unionByName]] to resolve columns by field name in the typed objects.
*
* @group typedrel
* @since 2.0.0
*/
def union(other: Dataset[T]): Dataset[T] = withSetOperator {
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, other.logicalPlan))
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
* This is an alias for `union`.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.0.0
*/
def unionAll(other: Dataset[T]): Dataset[T] = union(other)
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
* union (that does deduplication of elements), use this function followed by a [[distinct]].
*
* The difference between this function and [[union]] is that this function
* resolves columns by name (not by position):
*
* {{{
* val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2")
* val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0")
* df1.unionByName(df2).show
*
* // output:
* // +----+----+----+
* // |col0|col1|col2|
* // +----+----+----+
* // | 1| 2| 3|
* // | 6| 4| 5|
* // +----+----+----+
* }}}
*
* @group typedrel
* @since 2.3.0
*/
def unionByName(other: Dataset[T]): Dataset[T] = withSetOperator {
// Check column name duplication
val resolver = sparkSession.sessionState.analyzer.resolver
val leftOutputAttrs = logicalPlan.output
val rightOutputAttrs = other.logicalPlan.output
SchemaUtils.checkColumnNameDuplication(
leftOutputAttrs.map(_.name),
"in the left attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
SchemaUtils.checkColumnNameDuplication(
rightOutputAttrs.map(_.name),
"in the right attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
// Builds a project list for `other` based on `logicalPlan` output names
val rightProjectList = leftOutputAttrs.map { lattr =>
rightOutputAttrs.find { rattr => resolver(lattr.name, rattr.name) }.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "${lattr.name}" among """ +
s"""(${rightOutputAttrs.map(_.name).mkString(", ")})""")
}
}
// Delegates failure checks to `CheckAnalysis`
val notFoundAttrs = rightOutputAttrs.diff(rightProjectList)
val rightChild = Project(rightProjectList ++ notFoundAttrs, other.logicalPlan)
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, rightChild))
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset.
* This is equivalent to `INTERSECT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 1.6.0
*/
def intersect(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan, isAll = false)
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset while
* preserving the duplicates.
* This is equivalent to `INTERSECT ALL` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`. Also as standard
* in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.4.0
*/
def intersectAll(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan, isAll = true)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset.
* This is equivalent to `EXCEPT DISTINCT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def except(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan, isAll = false)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset while
* preserving the duplicates.
* This is equivalent to `EXCEPT ALL` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`. Also as standard in
* SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.4.0
*/
def exceptAll(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan, isAll = true)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a user-supplied seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double, seed: Long): Dataset[T] = {
sample(withReplacement = false, fraction = fraction, seed = seed)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a random seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double): Dataset[T] = {
sample(withReplacement = false, fraction = fraction)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a user-supplied seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] = {
withTypedPlan {
Sample(0.0, fraction, withReplacement, seed, logicalPlan)
}
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a random seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the total count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* For Java API, use [[randomSplitAsList]].
*
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double], seed: Long): Array[Dataset[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
// It is possible that the underlying dataframe doesn't guarantee the ordering of rows in its
// constituent partitions each time a split is materialized which could result in
// overlapping splits. To prevent this, we explicitly sort each input partition to make the
// ordering deterministic. Note that MapTypes cannot be sorted and are explicitly pruned out
// from the sort order.
val sortOrder = logicalPlan.output
.filter(attr => RowOrdering.isOrderable(attr.dataType))
.map(SortOrder(_, Ascending))
val plan = if (sortOrder.nonEmpty) {
Sort(sortOrder, global = false, logicalPlan)
} else {
// SPARK-12662: If sort order is empty, we materialize the dataset to guarantee determinism
cache()
logicalPlan
}
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
new Dataset[T](
sparkSession, Sample(x(0), x(1), withReplacement = false, seed, plan), encoder)
}.toArray
}
/**
* Returns a Java list that contains randomly split Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* @group typedrel
* @since 2.0.0
*/
def randomSplitAsList(weights: Array[Double], seed: Long): java.util.List[Dataset[T]] = {
val values = randomSplit(weights, seed)
java.util.Arrays.asList(values : _*)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double]): Array[Dataset[T]] = {
randomSplit(weights, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights. Provided for the Python Api.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*/
private[spark] def randomSplit(weights: List[Double], seed: Long): Array[Dataset[T]] = {
randomSplit(weights.toArray, seed)
}
/**
* Returns a new Dataset by adding a column or replacing the existing column that has
* the same name.
*
* `column`'s expression must only refer to attributes supplied by this Dataset. It is an
* error to add a column that refers to some other Dataset.
*
* @note this method introduces a projection internally. Therefore, calling it multiple times,
* for instance, via loops in order to add multiple columns can generate big plans which
* can cause performance issues and even `StackOverflowException`. To avoid this,
* use `select` with the multiple columns at once.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumn(colName: String, col: Column): DataFrame = withColumns(Seq(colName), Seq(col))
/**
* Returns a new Dataset by adding columns or replacing the existing columns that has
* the same names.
*/
private[spark] def withColumns(colNames: Seq[String], cols: Seq[Column]): DataFrame = {
require(colNames.size == cols.size,
s"The size of column names: ${colNames.size} isn't equal to " +
s"the size of columns: ${cols.size}")
SchemaUtils.checkColumnNameDuplication(
colNames,
"in given column names",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val columnMap = colNames.zip(cols).toMap
val replacedAndExistingColumns = output.map { field =>
columnMap.find { case (colName, _) =>
resolver(field.name, colName)
} match {
case Some((colName: String, col: Column)) => col.as(colName)
case _ => Column(field)
}
}
val newColumns = columnMap.filter { case (colName, col) =>
!output.exists(f => resolver(f.name, colName))
}.map { case (colName, col) => col.as(colName) }
select(replacedAndExistingColumns ++ newColumns : _*)
}
/**
* Returns a new Dataset by adding columns with metadata.
*/
private[spark] def withColumns(
colNames: Seq[String],
cols: Seq[Column],
metadata: Seq[Metadata]): DataFrame = {
require(colNames.size == metadata.size,
s"The size of column names: ${colNames.size} isn't equal to " +
s"the size of metadata elements: ${metadata.size}")
val newCols = colNames.zip(cols).zip(metadata).map { case ((colName, col), metadata) =>
col.as(colName, metadata)
}
withColumns(colNames, newCols)
}
/**
* Returns a new Dataset by adding a column with metadata.
*/
private[spark] def withColumn(colName: String, col: Column, metadata: Metadata): DataFrame =
withColumns(Seq(colName), Seq(col), Seq(metadata))
/**
* Returns a new Dataset with a column renamed.
* This is a no-op if schema doesn't contain existingName.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumnRenamed(existingName: String, newName: String): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldRename = output.exists(f => resolver(f.name, existingName))
if (shouldRename) {
val columns = output.map { col =>
if (resolver(col.name, existingName)) {
Column(col).as(newName)
} else {
Column(col)
}
}
select(columns : _*)
} else {
toDF()
}
}
/**
* Returns a new Dataset with a column dropped. This is a no-op if schema doesn't contain
* column name.
*
* This method can only be used to drop top level columns. the colName string is treated
* literally without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(colName: String): DataFrame = {
drop(Seq(colName) : _*)
}
/**
* Returns a new Dataset with columns dropped.
* This is a no-op if schema doesn't contain column name(s).
*
* This method can only be used to drop top level columns. the colName string is treated literally
* without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def drop(colNames: String*): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val remainingCols = allColumns.filter { attribute =>
colNames.forall(n => !resolver(attribute.name, n))
}.map(attribute => Column(attribute))
if (remainingCols.size == allColumns.size) {
toDF()
} else {
this.select(remainingCols: _*)
}
}
/**
* Returns a new Dataset with a column dropped.
* This version of drop accepts a [[Column]] rather than a name.
* This is a no-op if the Dataset doesn't have a column
* with an equivalent expression.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(col: Column): DataFrame = {
val expression = col match {
case Column(u: UnresolvedAttribute) =>
queryExecution.analyzed.resolveQuoted(
u.name, sparkSession.sessionState.analyzer.resolver).getOrElse(u)
case Column(expr: Expression) => expr
}
val attrs = this.logicalPlan.output
val colsAfterDrop = attrs.filter { attr =>
!attr.semanticEquals(expression)
}.map(attr => Column(attr))
select(colsAfterDrop : _*)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `distinct`.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(): Dataset[T] = dropDuplicates(this.columns)
/**
* (Scala-specific) Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Seq[String]): Dataset[T] = withTypedPlan {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val groupCols = colNames.toSet.toSeq.flatMap { (colName: String) =>
// It is possibly there are more than one columns with the same name,
// so we call filter instead of find.
val cols = allColumns.filter(col => resolver(col.name, colName))
if (cols.isEmpty) {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
cols
}
Deduplicate(groupCols, logicalPlan)
}
/**
* Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Array[String]): Dataset[T] = dropDuplicates(colNames.toSeq)
/**
* Returns a new [[Dataset]] with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def dropDuplicates(col1: String, cols: String*): Dataset[T] = {
val colNames: Seq[String] = col1 +: cols
dropDuplicates(colNames)
}
/**
* Computes basic statistics for numeric and string columns, including count, mean, stddev, min,
* and max. If no columns are given, this function computes statistics for all numerical or
* string columns.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.describe("age", "height").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // max 92.0 192.0
* }}}
*
* Use [[summary]] for expanded statistics and control over which statistics to compute.
*
* @param cols Columns to compute statistics on.
*
* @group action
* @since 1.6.0
*/
@scala.annotation.varargs
def describe(cols: String*): DataFrame = {
val selected = if (cols.isEmpty) this else select(cols.head, cols.tail: _*)
selected.summary("count", "mean", "stddev", "min", "max")
}
/**
* Computes specified statistics for numeric and string columns. Available statistics are:
*
* - count
* - mean
* - stddev
* - min
* - max
* - arbitrary approximate percentiles specified as a percentage (eg, 75%)
*
* If no statistics are given, this function computes count, mean, stddev, min,
* approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.summary().show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 50% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* {{{
* ds.summary("count", "min", "25%", "75%", "max").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* To do a summary for specific columns first select them:
*
* {{{
* ds.select("age", "height").summary().show()
* }}}
*
* See also [[describe]] for basic statistics.
*
* @param statistics Statistics from above list to be computed.
*
* @group action
* @since 2.3.0
*/
@scala.annotation.varargs
def summary(statistics: String*): DataFrame = StatFunctions.summary(this, statistics.toSeq)
/**
* Returns the first `n` rows.
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @group action
* @since 1.6.0
*/
def head(n: Int): Array[T] = withAction("head", limit(n).queryExecution)(collectFromPlan)
/**
* Returns the first row.
* @group action
* @since 1.6.0
*/
def head(): T = head(1).head
/**
* Returns the first row. Alias for head().
* @group action
* @since 1.6.0
*/
def first(): T = head()
/**
* Concise syntax for chaining custom transformations.
* {{{
* def featurize(ds: Dataset[T]): Dataset[U] = ...
*
* ds
* .transform(featurize)
* .transform(...)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def transform[U](t: Dataset[T] => Dataset[U]): Dataset[U] = t(this)
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def filter(func: T => Boolean): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def filter(func: FilterFunction[T]): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def map[U : Encoder](func: T => U): Dataset[U] = withTypedPlan {
MapElements[T, U](func, logicalPlan)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def map[U](func: MapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
implicit val uEnc = encoder
withTypedPlan(MapElements[T, U](func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sparkSession,
MapPartitions[T, U](func, logicalPlan),
implicitly[Encoder[U]])
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `f` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def mapPartitions[U](f: MapPartitionsFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (Iterator[T]) => Iterator[U] = x => f.call(x.asJava).asScala
mapPartitions(func)(encoder)
}
/**
* Returns a new `DataFrame` that contains the result of applying a serialized R function
* `func` to each partition.
*/
private[sql] def mapPartitionsInR(
func: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
schema: StructType): DataFrame = {
val rowEncoder = encoder.asInstanceOf[ExpressionEncoder[Row]]
Dataset.ofRows(
sparkSession,
MapPartitionsInR(func, packageNames, broadcastVars, schema, rowEncoder, logicalPlan))
}
/**
* Applies a Scalar iterator Pandas UDF to each partition. The user-defined function
* defines a transformation: `iter(pandas.DataFrame)` -> `iter(pandas.DataFrame)`.
* Each partition is each iterator consisting of DataFrames as batches.
*
* This function uses Apache Arrow as serialization format between Java executors and Python
* workers.
*/
private[sql] def mapInPandas(func: PythonUDF): DataFrame = {
Dataset.ofRows(
sparkSession,
MapInPandas(
func,
func.dataType.asInstanceOf[StructType].toAttributes,
logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@Evolving
def flatMap[U](f: FlatMapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (T) => Iterator[U] = x => f.call(x).asScala
flatMap(func)(encoder)
}
/**
* Applies a function `f` to all rows.
*
* @group action
* @since 1.6.0
*/
def foreach(f: T => Unit): Unit = withNewRDDExecutionId {
rdd.foreach(f)
}
/**
* (Java-specific)
* Runs `func` on each element of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreach(func: ForeachFunction[T]): Unit = foreach(func.call(_))
/**
* Applies a function `f` to each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withNewRDDExecutionId {
rdd.foreachPartition(f)
}
/**
* (Java-specific)
* Runs `func` on each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(func: ForeachPartitionFunction[T]): Unit = {
foreachPartition((it: Iterator[T]) => func.call(it.asJava))
}
/**
* Returns the first `n` rows in the Dataset.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def take(n: Int): Array[T] = head(n)
/**
* Returns the first `n` rows in the Dataset as a list.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def takeAsList(n: Int): java.util.List[T] = java.util.Arrays.asList(take(n) : _*)
/**
* Returns an array that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* For Java API, use [[collectAsList]].
*
* @group action
* @since 1.6.0
*/
def collect(): Array[T] = withAction("collect", queryExecution)(collectFromPlan)
/**
* Returns a Java list that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def collectAsList(): java.util.List[T] = withAction("collectAsList", queryExecution) { plan =>
val values = collectFromPlan(plan)
java.util.Arrays.asList(values : _*)
}
/**
* Returns an iterator that contains all rows in this Dataset.
*
* The iterator will consume as much memory as the largest partition in this Dataset.
*
* @note this results in multiple Spark jobs, and if the input Dataset is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input Dataset should be cached first.
*
* @group action
* @since 2.0.0
*/
def toLocalIterator(): java.util.Iterator[T] = {
withAction("toLocalIterator", queryExecution) { plan =>
// `ExpressionEncoder` is not thread-safe, here we create a new encoder.
val enc = resolvedEnc.copy()
plan.executeToIterator().map(enc.fromRow).asJava
}
}
/**
* Returns the number of rows in the Dataset.
* @group action
* @since 1.6.0
*/
def count(): Long = withAction("count", groupBy().count().queryExecution) { plan =>
plan.executeCollect().head.getLong(0)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions.
*
* @group typedrel
* @since 1.6.0
*/
def repartition(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = true, logicalPlan)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(numPartitions: Int, partitionExprs: Column*): Dataset[T] = {
// The underlying `LogicalPlan` operator special-cases all-`SortOrder` arguments.
// However, we don't want to complicate the semantics of this API method.
// Instead, let's give users a friendly error message, pointing them to the new method.
val sortOrders = partitionExprs.filter(_.expr.isInstanceOf[SortOrder])
if (sortOrders.nonEmpty) throw new IllegalArgumentException(
s"""Invalid partitionExprs specified: $sortOrders
|For range partitioning use repartitionByRange(...) instead.
""".stripMargin)
withTypedPlan {
RepartitionByExpression(partitionExprs.map(_.expr), logicalPlan, numPartitions)
}
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(partitionExprs: Column*): Dataset[T] = {
repartition(sparkSession.sessionState.conf.numShufflePartitions, partitionExprs: _*)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is range partitioned.
*
* At least one partition-by expression must be specified.
* When no explicit sort order is specified, "ascending nulls first" is assumed.
* Note, the rows are not sorted in each partition of the resulting Dataset.
*
*
* Note that due to performance reasons this method uses sampling to estimate the ranges.
* Hence, the output may not be consistent, since sampling can return different values.
* The sample size can be controlled by the config
* `spark.sql.execution.rangeExchange.sampleSizePerPartition`.
*
* @group typedrel
* @since 2.3.0
*/
@scala.annotation.varargs
def repartitionByRange(numPartitions: Int, partitionExprs: Column*): Dataset[T] = {
require(partitionExprs.nonEmpty, "At least one partition-by expression must be specified.")
val sortOrder: Seq[SortOrder] = partitionExprs.map(_.expr match {
case expr: SortOrder => expr
case expr: Expression => SortOrder(expr, Ascending)
})
withTypedPlan {
RepartitionByExpression(sortOrder, logicalPlan, numPartitions)
}
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is range partitioned.
*
* At least one partition-by expression must be specified.
* When no explicit sort order is specified, "ascending nulls first" is assumed.
* Note, the rows are not sorted in each partition of the resulting Dataset.
*
* Note that due to performance reasons this method uses sampling to estimate the ranges.
* Hence, the output may not be consistent, since sampling can return different values.
* The sample size can be controlled by the config
* `spark.sql.execution.rangeExchange.sampleSizePerPartition`.
*
* @group typedrel
* @since 2.3.0
*/
@scala.annotation.varargs
def repartitionByRange(partitionExprs: Column*): Dataset[T] = {
repartitionByRange(sparkSession.sessionState.conf.numShufflePartitions, partitionExprs: _*)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions, when the fewer partitions
* are requested. If a larger number of partitions is requested, it will stay at the current
* number of partitions. Similar to coalesce defined on an `RDD`, this operation results in
* a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not
* be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can call repartition. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @group typedrel
* @since 1.6.0
*/
def coalesce(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = false, logicalPlan)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `dropDuplicates`.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def distinct(): Dataset[T] = dropDuplicates()
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def persist(): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this)
this
}
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def cache(): this.type = persist()
/**
* Persist this Dataset with the given storage level.
* @param newLevel One of: `MEMORY_ONLY`, `MEMORY_AND_DISK`, `MEMORY_ONLY_SER`,
* `MEMORY_AND_DISK_SER`, `DISK_ONLY`, `MEMORY_ONLY_2`,
* `MEMORY_AND_DISK_2`, etc.
*
* @group basic
* @since 1.6.0
*/
def persist(newLevel: StorageLevel): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this, None, newLevel)
this
}
/**
* Get the Dataset's current storage level, or StorageLevel.NONE if not persisted.
*
* @group basic
* @since 2.1.0
*/
def storageLevel: StorageLevel = {
sparkSession.sharedState.cacheManager.lookupCachedData(this).map { cachedData =>
cachedData.cachedRepresentation.cacheBuilder.storageLevel
}.getOrElse(StorageLevel.NONE)
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
* This will not un-persist any cached data that is built upon this Dataset.
*
* @param blocking Whether to block until all blocks are deleted.
*
* @group basic
* @since 1.6.0
*/
def unpersist(blocking: Boolean): this.type = {
sparkSession.sharedState.cacheManager.uncacheQuery(
sparkSession, logicalPlan, cascade = false, blocking)
this
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
* This will not un-persist any cached data that is built upon this Dataset.
*
* @group basic
* @since 1.6.0
*/
def unpersist(): this.type = unpersist(blocking = false)
// Represents the `QueryExecution` used to produce the content of the Dataset as an `RDD`.
@transient private lazy val rddQueryExecution: QueryExecution = {
val deserialized = CatalystSerde.deserialize[T](logicalPlan)
sparkSession.sessionState.executePlan(deserialized)
}
/**
* Represents the content of the Dataset as an `RDD` of `T`.
*
* @group basic
* @since 1.6.0
*/
lazy val rdd: RDD[T] = {
val objectType = exprEnc.deserializer.dataType
rddQueryExecution.toRdd.mapPartitions { rows =>
rows.map(_.get(0, objectType).asInstanceOf[T])
}
}
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def toJavaRDD: JavaRDD[T] = rdd.toJavaRDD()
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def javaRDD: JavaRDD[T] = toJavaRDD
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* Local temporary view is session-scoped. Its lifetime is the lifetime of the session that
* created it, i.e. it will be automatically dropped when the session terminates. It's not
* tied to any databases, i.e. we can't use `db1.view1` to reference a local temporary view.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.0.0
*/
@throws[AnalysisException]
def createTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = false)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 2.0.0
*/
def createOrReplaceTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = false)
}
/**
* Creates a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.1.0
*/
@throws[AnalysisException]
def createGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = true)
}
/**
* Creates or replaces a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @group basic
* @since 2.2.0
*/
def createOrReplaceGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = true)
}
private def createTempViewCommand(
viewName: String,
replace: Boolean,
global: Boolean): CreateViewCommand = {
val viewType = if (global) GlobalTempView else LocalTempView
val tableIdentifier = try {
sparkSession.sessionState.sqlParser.parseTableIdentifier(viewName)
} catch {
case _: ParseException => throw new AnalysisException(s"Invalid view name: $viewName")
}
CreateViewCommand(
name = tableIdentifier,
userSpecifiedColumns = Nil,
comment = None,
properties = Map.empty,
originalText = None,
child = logicalPlan,
allowExisting = false,
replace = replace,
viewType = viewType)
}
/**
* Interface for saving the content of the non-streaming Dataset out into external storage.
*
* @group basic
* @since 1.6.0
*/
def write: DataFrameWriter[T] = {
if (isStreaming) {
logicalPlan.failAnalysis(
"'write' can not be called on streaming Dataset/DataFrame")
}
new DataFrameWriter[T](this)
}
/**
* Interface for saving the content of the streaming Dataset out into external storage.
*
* @group basic
* @since 2.0.0
*/
@Evolving
def writeStream: DataStreamWriter[T] = {
if (!isStreaming) {
logicalPlan.failAnalysis(
"'writeStream' can be called only on streaming Dataset/DataFrame")
}
new DataStreamWriter[T](this)
}
/**
* Returns the content of the Dataset as a Dataset of JSON strings.
* @since 2.0.0
*/
def toJSON: Dataset[String] = {
val rowSchema = this.schema
val sessionLocalTimeZone = sparkSession.sessionState.conf.sessionLocalTimeZone
mapPartitions { iter =>
val writer = new CharArrayWriter()
// create the Generator without separator inserted between 2 records
val gen = new JacksonGenerator(rowSchema, writer,
new JSONOptions(Map.empty[String, String], sessionLocalTimeZone))
new Iterator[String] {
override def hasNext: Boolean = iter.hasNext
override def next(): String = {
gen.write(exprEnc.toRow(iter.next()))
gen.flush()
val json = writer.toString
if (hasNext) {
writer.reset()
} else {
gen.close()
}
json
}
}
} (Encoders.STRING)
}
/**
* Returns a best-effort snapshot of the files that compose this Dataset. This method simply
* asks each constituent BaseRelation for its respective files and takes the union of all results.
* Depending on the source relations, this may not find all input files. Duplicates are removed.
*
* @group basic
* @since 2.0.0
*/
def inputFiles: Array[String] = {
val files: Seq[String] = queryExecution.optimizedPlan.collect {
case LogicalRelation(fsBasedRelation: FileRelation, _, _, _) =>
fsBasedRelation.inputFiles
case fr: FileRelation =>
fr.inputFiles
case r: HiveTableRelation =>
r.tableMeta.storage.locationUri.map(_.toString).toArray
case DataSourceV2Relation(table: FileTable, _, _) =>
table.fileIndex.inputFiles
}.flatten
files.toSet.toArray
}
////////////////////////////////////////////////////////////////////////////
// For Python API
////////////////////////////////////////////////////////////////////////////
/**
* Converts a JavaRDD to a PythonRDD.
*/
private[sql] def javaToPython: JavaRDD[Array[Byte]] = {
val structType = schema // capture it for closure
val rdd = queryExecution.toRdd.map(EvaluatePython.toJava(_, structType))
EvaluatePython.javaToPython(rdd)
}
private[sql] def collectToPython(): Array[Any] = {
EvaluatePython.registerPicklers()
withAction("collectToPython", queryExecution) { plan =>
val toJava: (Any) => Any = EvaluatePython.toJava(_, schema)
val iter: Iterator[Array[Byte]] = new SerDeUtil.AutoBatchedPickler(
plan.executeCollect().iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-DataFrame")
}
}
private[sql] def getRowsToPython(
_numRows: Int,
truncate: Int): Array[Any] = {
EvaluatePython.registerPicklers()
val numRows = _numRows.max(0).min(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH - 1)
val rows = getRows(numRows, truncate).map(_.toArray).toArray
val toJava: (Any) => Any = EvaluatePython.toJava(_, ArrayType(ArrayType(StringType)))
val iter: Iterator[Array[Byte]] = new SerDeUtil.AutoBatchedPickler(
rows.iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-GetRows")
}
/**
* Collect a Dataset as Arrow batches and serve stream to SparkR. It sends
* arrow batches in an ordered manner with buffering. This is inevitable
* due to missing R API that reads batches from socket directly. See ARROW-4512.
* Eventually, this code should be deduplicated by `collectAsArrowToPython`.
*/
private[sql] def collectAsArrowToR(): Array[Any] = {
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
withAction("collectAsArrowToR", queryExecution) { plan =>
RRDD.serveToStream("serve-Arrow") { outputStream =>
val buffer = new ByteArrayOutputStream()
val out = new DataOutputStream(outputStream)
val batchWriter = new ArrowBatchStreamWriter(schema, buffer, timeZoneId)
val arrowBatchRdd = toArrowBatchRdd(plan)
val numPartitions = arrowBatchRdd.partitions.length
// Store collection results for worst case of 1 to N-1 partitions
val results = new Array[Array[Array[Byte]]](numPartitions - 1)
var lastIndex = -1 // index of last partition written
// Handler to eagerly write partitions to Python in order
def handlePartitionBatches(index: Int, arrowBatches: Array[Array[Byte]]): Unit = {
// If result is from next partition in order
if (index - 1 == lastIndex) {
batchWriter.writeBatches(arrowBatches.iterator)
lastIndex += 1
// Write stored partitions that come next in order
while (lastIndex < results.length && results(lastIndex) != null) {
batchWriter.writeBatches(results(lastIndex).iterator)
results(lastIndex) = null
lastIndex += 1
}
// After last batch, end the stream
if (lastIndex == results.length) {
batchWriter.end()
val batches = buffer.toByteArray
out.writeInt(batches.length)
out.write(batches)
}
} else {
// Store partitions received out of order
results(index - 1) = arrowBatches
}
}
sparkSession.sparkContext.runJob(
arrowBatchRdd,
(ctx: TaskContext, it: Iterator[Array[Byte]]) => it.toArray,
0 until numPartitions,
handlePartitionBatches)
}
}
}
/**
* Collect a Dataset as Arrow batches and serve stream to PySpark. It sends
* arrow batches in an un-ordered manner without buffering, and then batch order
* information at the end. The batches should be reordered at Python side.
*/
private[sql] def collectAsArrowToPython: Array[Any] = {
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
withAction("collectAsArrowToPython", queryExecution) { plan =>
PythonRDD.serveToStream("serve-Arrow") { outputStream =>
val out = new DataOutputStream(outputStream)
val batchWriter = new ArrowBatchStreamWriter(schema, out, timeZoneId)
// Batches ordered by (index of partition, batch index in that partition) tuple
val batchOrder = ArrayBuffer.empty[(Int, Int)]
// Handler to eagerly write batches to Python as they arrive, un-ordered
val handlePartitionBatches = (index: Int, arrowBatches: Array[Array[Byte]]) =>
if (arrowBatches.nonEmpty) {
// Write all batches (can be more than 1) in the partition, store the batch order tuple
batchWriter.writeBatches(arrowBatches.iterator)
arrowBatches.indices.foreach {
partitionBatchIndex => batchOrder.append((index, partitionBatchIndex))
}
}
Utils.tryWithSafeFinally {
val arrowBatchRdd = toArrowBatchRdd(plan)
sparkSession.sparkContext.runJob(
arrowBatchRdd,
(it: Iterator[Array[Byte]]) => it.toArray,
handlePartitionBatches)
} {
// After processing all partitions, end the batch stream
batchWriter.end()
// Write batch order indices
out.writeInt(batchOrder.length)
// Sort by (index of partition, batch index in that partition) tuple to get the
// overall_batch_index from 0 to N-1 batches, which can be used to put the
// transferred batches in the correct order
batchOrder.zipWithIndex.sortBy(_._1).foreach { case (_, overallBatchIndex) =>
out.writeInt(overallBatchIndex)
}
}
}
}
}
private[sql] def toPythonIterator(): Array[Any] = {
withNewExecutionId {
PythonRDD.toLocalIteratorAndServe(javaToPython.rdd)
}
}
////////////////////////////////////////////////////////////////////////////
// Private Helpers
////////////////////////////////////////////////////////////////////////////
/**
* Wrap a Dataset action to track all Spark jobs in the body so that we can connect them with
* an execution.
*/
private def withNewExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(sparkSession, queryExecution)(body)
}
/**
* Wrap an action of the Dataset's RDD to track all Spark jobs in the body so that we can connect
* them with an execution. Before performing the action, the metrics of the executed plan will be
* reset.
*/
private def withNewRDDExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(sparkSession, rddQueryExecution) {
rddQueryExecution.executedPlan.foreach { plan =>
plan.resetMetrics()
}
body
}
}
/**
* Wrap a Dataset action to track the QueryExecution and time cost, then report to the
* user-registered callback functions.
*/
private def withAction[U](name: String, qe: QueryExecution)(action: SparkPlan => U) = {
SQLExecution.withNewExecutionId(sparkSession, qe, Some(name)) {
qe.executedPlan.foreach { plan =>
plan.resetMetrics()
}
action(qe.executedPlan)
}
}
/**
* Collect all elements from a spark plan.
*/
private def collectFromPlan(plan: SparkPlan): Array[T] = {
// `ExpressionEncoder` is not thread-safe, here we create a new encoder.
val enc = resolvedEnc.copy()
plan.executeCollect().map(enc.fromRow)
}
private def sortInternal(global: Boolean, sortExprs: Seq[Column]): Dataset[T] = {
val sortOrder: Seq[SortOrder] = sortExprs.map { col =>
col.expr match {
case expr: SortOrder =>
expr
case expr: Expression =>
SortOrder(expr, Ascending)
}
}
withTypedPlan {
Sort(sortOrder, global = global, logicalPlan)
}
}
/** A convenient function to wrap a logical plan and produce a DataFrame. */
@inline private def withPlan(logicalPlan: LogicalPlan): DataFrame = {
Dataset.ofRows(sparkSession, logicalPlan)
}
/** A convenient function to wrap a logical plan and produce a Dataset. */
@inline private def withTypedPlan[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
Dataset(sparkSession, logicalPlan)
}
/** A convenient function to wrap a set based logical plan and produce a Dataset. */
@inline private def withSetOperator[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
if (classTag.runtimeClass.isAssignableFrom(classOf[Row])) {
// Set operators widen types (change the schema), so we cannot reuse the row encoder.
Dataset.ofRows(sparkSession, logicalPlan).asInstanceOf[Dataset[U]]
} else {
Dataset(sparkSession, logicalPlan)
}
}
/** Convert to an RDD of serialized ArrowRecordBatches. */
private[sql] def toArrowBatchRdd(plan: SparkPlan): RDD[Array[Byte]] = {
val schemaCaptured = this.schema
val maxRecordsPerBatch = sparkSession.sessionState.conf.arrowMaxRecordsPerBatch
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
plan.execute().mapPartitionsInternal { iter =>
val context = TaskContext.get()
ArrowConverters.toBatchIterator(
iter, schemaCaptured, maxRecordsPerBatch, timeZoneId, context)
}
}
// This is only used in tests, for now.
private[sql] def toArrowBatchRdd: RDD[Array[Byte]] = {
toArrowBatchRdd(queryExecution.executedPlan)
}
}
| techaddict/spark | sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala | Scala | apache-2.0 | 123,443 |
package twine.app.climate
import org.mockito.Mockito._
import twine.{UnitTestSupport, StandardSpec}
import scala.concurrent.Future
import twine.app.climate.wbclimate.{WbClimateData, WbClimateClient}
/**
*/
class ClimateServiceSpec extends StandardSpec with UnitTestSupport
{
val TestFromYear = 1980
val TestToYear = 1999
val TestLocation = "FJI"
/**
* Create the climateService manually and use a mock WbClimateClient
* instead of the real one. Note that this is a method so that it can be
* overridden by the ClimateServiceIntegSpec which extends this class to
* reuse for integration testing
*/
def climateService: ClimateService = {
val mockClient = mock[WbClimateClient]
when(mockClient.fetchPrecipitationStats(TestLocation, TestFromYear, TestToYear)).thenReturn(
Future(Seq(WbClimateData.dummyPrecipitationData(TestFromYear, TestToYear)))
)
when(mockClient.fetchTemperatureStats(TestLocation, TestFromYear, TestToYear)).thenReturn(
Future(Seq(WbClimateData.dummyTemperatureData(TestFromYear, TestToYear)))
)
new ClimateServiceImpl(mockClient)
}
"a query for 20 year average Fiji climate" should {
"respond with entry containing precipitation and temperature" in {
whenReady (climateService.query(TestLocation, TestFromYear, TestToYear)) { r =>
r should have (
'location (TestLocation),
'fromYear (TestFromYear),
'toYear (TestToYear)
)
r.temperature.annual should be > 1.0
r.precipitation.annual should be > 1.0
}
}
}
} | ehalpern/sandbox | src/test/scala/twine/app/climate/ClimateServiceSpec.scala | Scala | mit | 1,581 |
package org.jetbrains.plugins.hocon
import com.intellij.FileSetTestCase
import com.intellij.openapi.application.Result
import com.intellij.openapi.command.WriteCommandAction
import com.intellij.openapi.fileTypes.FileTypeManager
import com.intellij.openapi.util.Computable
import com.intellij.psi.PsiFileFactory
import com.intellij.psi.codeStyle.CodeStyleSettingsManager
import com.intellij.testFramework.LightPlatformTestCase.getProject
import com.intellij.testFramework.{EditorTestUtil, LightPlatformTestCase}
import com.intellij.util.LocalTimeCounter
import org.jetbrains.plugins.hocon.lang.HoconLanguage
import org.jetbrains.plugins.hocon.psi.HoconPsiFile
import org.jetbrains.plugins.scala.util.TestUtils
/**
* @author ghik
*/
abstract class HoconFileSetTestCase(subpath: String) extends FileSetTestCase(s"${TestUtils.getTestDataPath}/hocon/$subpath") {
override final def transform(testName: String, data: Array[String]): String =
transform(data.map(_.stripLineEnd))
protected def transform(data: Seq[String]): String
override protected def setUp(): Unit = {
val settings = CodeStyleSettingsManager.getInstance(LightPlatformTestCase.getProject)
.getCurrentSettings
.getCommonSettings(HoconLanguage)
val indentOptions = settings.getIndentOptions
indentOptions.INDENT_SIZE = 2
indentOptions.CONTINUATION_INDENT_SIZE = 2
indentOptions.TAB_SIZE = 2
}
override def getName: String = getClass.getName
}
object HoconFileSetTestCase {
System.setProperty("fileset.pattern", "(.*)\\\\.test")
private[hocon] def createPseudoPhysicalHoconFile(text: String): HoconPsiFile = {
val project = LightPlatformTestCase.getProject
val tempFile = project.getBaseDir + "temp.conf"
val fileType = FileTypeManager.getInstance.getFileTypeByFileName(tempFile)
PsiFileFactory.getInstance(project)
.createFileFromText(tempFile, fileType, text, LocalTimeCounter.currentTime(), true)
.asInstanceOf[HoconPsiFile]
}
private[hocon] def extractCaret(fileText: String): (String, Int) = {
import EditorTestUtil.CARET_TAG
val caretOffset = fileText.indexOf(CARET_TAG)
val newFileText =
if (caretOffset >= 0) fileText.substring(0, caretOffset) + fileText.substring(caretOffset + CARET_TAG.length)
else fileText
(newFileText, caretOffset)
}
private[hocon] def inWriteCommandAction[T](body: => T): T = {
val computable = new Computable[T] {
override def compute(): T = body
}
new WriteCommandAction[T](getProject, "Undefined") {
protected def run(result: Result[T]): Unit = {
result.setResult(computable.compute())
}
}.execute.getResultObject
}
}
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/hocon/HoconFileSetTestCase.scala | Scala | apache-2.0 | 2,687 |
package com.avsystem.commons
package rpc
import com.avsystem.commons.rpc.DummyRPC._
import com.avsystem.commons.serialization.{HasGenCodec, optionalParam, transientDefault, whenAbsent}
import scala.annotation.nowarn
class prepend(prefix: String) extends EncodingInterceptor[String, String] with DecodingInterceptor[String, String] {
def toOriginalRaw(newRaw: String): String = prefix + newRaw
def toNewRaw(raw: String): String = raw.stripPrefix(prefix)
}
case class Record(i: Int, fuu: String)
object Record extends HasGenCodec[Record]
trait InnerRPC {
def proc(): Unit
def func(@prepend("bul:") arg: Int): Future[String]
def moreInner(name: String): InnerRPC
def indirectRecursion(): TestRPC
}
object InnerRPC extends RPCCompanion[InnerRPC]
trait TestRPC {
def defaultNum: Int = 42
@nowarn("msg=side-effecting nullary methods")
def handle: Unit
def handleMore(): Unit
def doStuff(lol: Int, fuu: String = "pisiont")(implicit cos: Option[Boolean]): Unit
@rpcName("doStuffBoolean")
def doStuff(@prepend("bul:") yes: Boolean): Future[String]
@rpcName("doStuffInt")
def doStuff(@whenAbsent(defaultNum) num: Int): Unit
@namedArgs
def doStuffNamed(@transientDefault @rawWhenAbsent("42") int: Int): Unit
@namedArgs
def doStuffOptional(@optionalParam thing: Opt[Int]): Unit
def takeCC(r: Record = Record(-1, "_")): Unit
def srslyDude(): Unit
def innerRpc(name: String): InnerRPC
def generallyDoStuff[T](list: List[T])(implicit @encodingDependency tag: Tag[T]): Future[Option[T]]
}
@nowarn("msg=side-effecting nullary methods")
object TestRPC extends RPCCompanion[TestRPC] {
// AsRaw.materialize[DummyRPC.RawRPC, TestRPC].showAst
// AsReal.materialize[DummyRPC.RawRPC, TestRPC].showAst
def rpcImpl(onInvocation: (RawInvocation, Option[Any]) => Any): TestRPC = new TestRPC { outer =>
private def onProcedure(methodName: String, args: List[String]): Unit =
onInvocation(RawInvocation(methodName, args), None)
private def onCall[T](methodName: String, args: List[String], result: T): Future[T] = {
onInvocation(RawInvocation(methodName, args), Some(result))
Future.successful(result)
}
private def onGet[T](methodName: String, args: List[String], result: T): T = {
onInvocation(RawInvocation(methodName, args), None)
result
}
def handleMore(): Unit =
onProcedure("handleMore", Nil)
def doStuff(lol: Int, fuu: String)(implicit cos: Option[Boolean]): Unit =
onProcedure("doStuff", List(write(lol), write(fuu), write(cos)))
def doStuff(yes: Boolean): Future[String] =
onCall("doStuffBoolean", List(write(yes)), "doStuffResult")
def doStuff(num: Int): Unit =
onProcedure("doStuffInt", List(write(num)))
def doStuffNamed(int: Int): Unit =
onProcedure("doStuffNamed", List(write(int)))
def doStuffOptional(@optionalParam thing: Opt[Int]): Unit =
onProcedure("doStuffOptional", thing.map(_.toString).toList)
def handle: Unit =
onProcedure("handle", Nil)
def takeCC(r: Record): Unit =
onProcedure("takeCC", List(write(r)))
def srslyDude(): Unit =
onProcedure("srslyDude", Nil)
def innerRpc(name: String): InnerRPC = {
onGet("innerRpc", List(write(name)), new InnerRPC {
def func(arg: Int): Future[String] =
onCall("innerRpc.func", List(write(arg)), "innerRpc.funcResult")
def proc(): Unit =
onProcedure("innerRpc.proc", Nil)
def moreInner(name: String): InnerRPC =
this
def indirectRecursion(): TestRPC =
outer
})
}
def generallyDoStuff[T](list: List[T])(implicit tag: Tag[T]): Future[Option[T]] =
onCall("generallyDoStuff", List(write(tag), write(list)), list.headOption)
}
}
| AVSystem/scala-commons | commons-core/src/test/scala/com/avsystem/commons/rpc/TestRPC.scala | Scala | mit | 3,793 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.