code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*******************************************************************************
* Copyright (c) Nikolai Koudelia
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Nikolai Koudelia - initial API and implementation
*******************************************************************************/
package easyfit.cells
import easyfit.{Store, StopTestException}
import easyfit.Strings.InvalidColumnName
import easyfit.Strings.UndefinedConverter
import easyfit.IConverter
import easyfit.CellFactory
/**
* Represents a header cell in Row and Query tables.
* @param value initial value in a header cell
*/
class Header(value: String)
{
private var isMissing = false
private var converterFetched = false
private var converter: IConverter = null
def Value = value
def IsMissing = isMissing
if (value == null || value.isEmpty)
{
throw new StopTestException(InvalidColumnName)
}
def setMissing()
{
isMissing = true
}
def isEmptySutInput: Boolean =
{
value.endsWith("?")
}
def sutInput(): String =
{
var sInput = value
if (value.endsWith("!") || value.endsWith("?"))
{
sInput = value.substring(0, value.length - 1)
}
val tokens = sInput.split(":")
if (tokens.length == 2)
{
return tokens(1)
}
sInput
}
def format(): String =
{
if (isMissing)
{
return String.format("fail: %s (MISSING)", value)
}
if (value.endsWith("!"))
{
return "ignore"
}
"pass"
}
def fetchConverter(): IConverter =
{
if (converterFetched)
{
return converter
}
val (converterName, _) = CellFactory.splitConverterHeader(value)
if (converterName != null)
{
converter = Store.getConverter(converterName)
if (converter == null)
{
throw new StopTestException(UndefinedConverter + ": " + converterName)
}
}
converterFetched = true
converter
}
}
| nikoudel/easyfit | easyFit/src/main/scala/easyfit/cells/Header.scala | Scala | epl-1.0 | 2,274 |
package org.beaucatcher.mongo.jdriver
import org.beaucatcher.bson._
import org.beaucatcher.mongo._
import org.beaucatcher.driver._
import com.mongodb._
import org.bson.types.{ ObjectId => JavaObjectId, _ }
import akka.actor.ActorSystem
/**
* [[org.beaucatcher.jdriver.JavaDriverContext]] is final with a private constructor - there's no way to create one
* directly. Instead you call newContext() on [[org.beaucatcher.jdriver.JavaDriver]].
* The [[org.beaucatcher.jdriver.JavaDriver]] companion object has a singleton `instance`
* you could use for this, `JavaDriver.instance.newContext()`.
*/
final class JavaDriverContext private[jdriver] (override val driver: JavaDriver,
val url: String, val actorSystem: ActorSystem)
extends DriverContext {
private lazy val jdriverURI = new MongoURI(url)
private lazy val driverConnection = JavaDriverConnection.acquireConnection(jdriverURI)
private def connection = driverConnection.underlying
override type DriverType = JavaDriver
override type DatabaseType = JavaDriverDatabase // we have no jdriver-specific Database stuff
override type UnderlyingConnectionType = Mongo
override type UnderlyingDatabaseType = DB
override type UnderlyingCollectionType = DBCollection
override def underlyingConnection: Mongo = connection
override def underlyingDatabase: DB = connection.getDB(jdriverURI.getDatabase())
override def underlyingCollection(name: String): DBCollection = {
if (name == null)
throw new IllegalArgumentException("null collection name")
val db: DB = underlyingDatabase
assert(db != null)
val coll: DBCollection = db.getCollection(name)
assert(coll != null)
coll
}
override final lazy val database = {
new JavaDriverDatabase(this)
}
override def close(): Unit = {
JavaDriverConnection.releaseConnection(driverConnection)
}
}
| havocp/beaucatcher | jdriver/src/main/scala/org/beaucatcher/mongo/jdriver/JavaDriverContext.scala | Scala | apache-2.0 | 1,935 |
package com.cloudray.scalapress.search.section
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{PathVariable, ModelAttribute, RequestMethod, RequestMapping}
import com.cloudray.scalapress.theme.MarkupDao
import com.cloudray.scalapress.item.controller.admin.MarkupPopulator
import com.cloudray.scalapress.util.SortPopulator
import org.springframework.beans.factory.annotation.Autowired
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
@Controller
@Autowired
@RequestMapping(Array("backoffice/search/section/form/{id}"))
class SearchFormSectionController(val markupDao: MarkupDao,
context: ScalapressContext) extends MarkupPopulator with SortPopulator {
@RequestMapping(method = Array(RequestMethod.GET))
def edit(@ModelAttribute("section") section: SearchFormSection) = "admin/search/section/form.vm"
@RequestMapping(method = Array(RequestMethod.POST))
def save(@ModelAttribute("section") section: SearchFormSection) = {
context.sectionDao.save(section)
edit(section)
}
@ModelAttribute("section")
def section(@PathVariable("id") id: Long): SearchFormSection =
context.sectionDao.find(id).asInstanceOf[SearchFormSection]
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/search/section/SearchFormSectionController.scala | Scala | apache-2.0 | 1,274 |
/*
* Copyright (C) 2014 HMPerson1 <hmperson1@gmail.com> and nathanfei123
*
* This file is part of AOCM.
*
* AOCM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.adorableoctocm.ui
import scala.swing.{ Action, Button, GridBagPanel, Label, TextArea }
/**
* A menu in which the user can adjust various settings.
*/
class SettingsMenu(onBack: => Unit) extends GridBagPanel {
// TODO: To be implemented
// TODO: Prettification
// TODO: I18N and L10N
add(new Label("Help"), (0, 0))
add(new TextArea("Buttons'n'things go here."), (0, 1))
add(new Button(Action("Back")(onBack)), (0, 2))
}
| HMPerson1/adorable-octo-computing-machine | src/net/adorableoctocm/ui/SettingsMenu.scala | Scala | gpl-3.0 | 1,201 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.util.concurrent.atomic.AtomicInteger
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.ListBuffer
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.{SparkEnv, TaskContext}
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.sql.{Encoder, SQLContext}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.streaming.{Offset => _, _}
import org.apache.spark.sql.sources.v2.{ContinuousReadSupportProvider, DataSourceOptions}
import org.apache.spark.sql.sources.v2.reader.{InputPartition, ScanConfig, ScanConfigBuilder}
import org.apache.spark.sql.sources.v2.reader.streaming._
import org.apache.spark.util.RpcUtils
/**
* The overall strategy here is:
* * ContinuousMemoryStream maintains a list of records for each partition. addData() will
* distribute records evenly-ish across partitions.
* * RecordEndpoint is set up as an endpoint for executor-side
* ContinuousMemoryStreamInputPartitionReader instances to poll. It returns the record at
* the specified offset within the list, or null if that offset doesn't yet have a record.
*/
class ContinuousMemoryStream[A : Encoder](id: Int, sqlContext: SQLContext, numPartitions: Int = 2)
extends MemoryStreamBase[A](sqlContext)
with ContinuousReadSupportProvider with ContinuousReadSupport {
private implicit val formats = Serialization.formats(NoTypeHints)
protected val logicalPlan =
// TODO: don't pass null as table after finish API refactor for continuous stream.
StreamingRelationV2(this, "memory", null, Map(), attributes, None)(sqlContext.sparkSession)
// ContinuousReader implementation
@GuardedBy("this")
private val records = Seq.fill(numPartitions)(new ListBuffer[A])
private val recordEndpoint = new ContinuousRecordEndpoint(records, this)
@volatile private var endpointRef: RpcEndpointRef = _
def addData(data: TraversableOnce[A]): Offset = synchronized {
// Distribute data evenly among partition lists.
data.toSeq.zipWithIndex.map {
case (item, index) => records(index % numPartitions) += item
}
// The new target offset is the offset where all records in all partitions have been processed.
ContinuousMemoryStreamOffset((0 until numPartitions).map(i => (i, records(i).size)).toMap)
}
override def initialOffset(): Offset = {
ContinuousMemoryStreamOffset((0 until numPartitions).map(i => (i, 0)).toMap)
}
override def deserializeOffset(json: String): ContinuousMemoryStreamOffset = {
ContinuousMemoryStreamOffset(Serialization.read[Map[Int, Int]](json))
}
override def mergeOffsets(offsets: Array[PartitionOffset]): ContinuousMemoryStreamOffset = {
ContinuousMemoryStreamOffset(
offsets.map {
case ContinuousRecordPartitionOffset(part, num) => (part, num)
}.toMap
)
}
override def newScanConfigBuilder(start: Offset): ScanConfigBuilder = {
new SimpleStreamingScanConfigBuilder(fullSchema(), start)
}
override def planInputPartitions(config: ScanConfig): Array[InputPartition] = {
val startOffset = config.asInstanceOf[SimpleStreamingScanConfig]
.start.asInstanceOf[ContinuousMemoryStreamOffset]
synchronized {
val endpointName = s"ContinuousMemoryStreamRecordEndpoint-${java.util.UUID.randomUUID()}-$id"
endpointRef =
recordEndpoint.rpcEnv.setupEndpoint(endpointName, recordEndpoint)
startOffset.partitionNums.map {
case (part, index) => ContinuousMemoryStreamInputPartition(endpointName, part, index)
}.toArray
}
}
override def createContinuousReaderFactory(
config: ScanConfig): ContinuousPartitionReaderFactory = {
ContinuousMemoryStreamReaderFactory
}
override def stop(): Unit = {
if (endpointRef != null) recordEndpoint.rpcEnv.stop(endpointRef)
}
override def commit(end: Offset): Unit = {}
// ContinuousReadSupportProvider implementation
// This is necessary because of how StreamTest finds the source for AddDataMemory steps.
override def createContinuousReadSupport(
checkpointLocation: String,
options: DataSourceOptions): ContinuousReadSupport = this
}
object ContinuousMemoryStream {
protected val memoryStreamId = new AtomicInteger(0)
def apply[A : Encoder](implicit sqlContext: SQLContext): ContinuousMemoryStream[A] =
new ContinuousMemoryStream[A](memoryStreamId.getAndIncrement(), sqlContext)
def singlePartition[A : Encoder](implicit sqlContext: SQLContext): ContinuousMemoryStream[A] =
new ContinuousMemoryStream[A](memoryStreamId.getAndIncrement(), sqlContext, 1)
}
/**
* An input partition for continuous memory stream.
*/
case class ContinuousMemoryStreamInputPartition(
driverEndpointName: String,
partition: Int,
startOffset: Int) extends InputPartition
object ContinuousMemoryStreamReaderFactory extends ContinuousPartitionReaderFactory {
override def createReader(partition: InputPartition): ContinuousPartitionReader[InternalRow] = {
val p = partition.asInstanceOf[ContinuousMemoryStreamInputPartition]
new ContinuousMemoryStreamPartitionReader(p.driverEndpointName, p.partition, p.startOffset)
}
}
/**
* An input partition reader for continuous memory stream.
*
* Polls the driver endpoint for new records.
*/
class ContinuousMemoryStreamPartitionReader(
driverEndpointName: String,
partition: Int,
startOffset: Int) extends ContinuousPartitionReader[InternalRow] {
private val endpoint = RpcUtils.makeDriverRef(
driverEndpointName,
SparkEnv.get.conf,
SparkEnv.get.rpcEnv)
private var currentOffset = startOffset
private var current: Option[InternalRow] = None
// Defense-in-depth against failing to propagate the task context. Since it's not inheritable,
// we have to do a bit of error prone work to get it into every thread used by continuous
// processing. We hope that some unit test will end up instantiating a continuous memory stream
// in such cases.
if (TaskContext.get() == null) {
throw new IllegalStateException("Task context was not set!")
}
override def next(): Boolean = {
current = getRecord
while (current.isEmpty) {
Thread.sleep(10)
current = getRecord
}
currentOffset += 1
true
}
override def get(): InternalRow = current.get
override def close(): Unit = {}
override def getOffset: ContinuousRecordPartitionOffset =
ContinuousRecordPartitionOffset(partition, currentOffset)
private def getRecord: Option[InternalRow] =
endpoint.askSync[Option[InternalRow]](
GetRecord(ContinuousRecordPartitionOffset(partition, currentOffset)))
}
case class ContinuousMemoryStreamOffset(partitionNums: Map[Int, Int])
extends Offset {
private implicit val formats = Serialization.formats(NoTypeHints)
override def json(): String = Serialization.write(partitionNums)
}
| hhbyyh/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ContinuousMemoryStream.scala | Scala | apache-2.0 | 7,772 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.shell.oflow
import org.openflow.protocol._
import kumoi.shell.aaa._
import kumoi.shell.aaa.ops._
import kumoi.shell.aaa.resource._
import kumoi.shell.cache._
import kumoi.shell.or._
import kumoi.core.rmi._
//import java.rmi._
/**
* Openflow Physical ports.
*
* @author Akiyoshi Sugiki
*/
@netporttype
@remote trait OFlowPort extends ORMapper[OFlowPort] {
//def name(implicit auth: AAA): String
@persistcache @read def number(implicit auth: AAA): OFlowPort.PortType
@persistcache @read def mac(implicit auth: AAA): Array[Byte]
@cache @index def config(implicit auth: AAA): List[OFlowPort.Config]
@cache @index def state(implicit auth: AAA): List[OFlowPort.State]
@cache @index def current(implicit auth: AAA): List[OFlowPort.Feature]
@cache @index def advertised(implicit auth: AAA): List[OFlowPort.Feature]
@cache @index def supported(implicit auth: AAA): List[OFlowPort.Feature]
@cache @index def peer(implicit auth: AAA): List[OFlowPort.Feature]
}
object OFlowPort {
sealed abstract class Config
object Config {
case object PortDown extends Config
case object NoStp extends Config
case object NoRecv extends Config
case object NoRecvStp extends Config
case object NoFlood extends Config
case object NoFwd extends Config
case object NoPacketIn extends Config
}
sealed abstract class State
object State {
case object LinkDown extends State
case object StpListen extends State
case object StpLearn extends State
case object StpForward extends State
case object StpBlock extends State
case object StpMask extends State
}
sealed abstract class PortType
object Port {
case class Port(no: Int) extends PortType
case object InPort extends PortType
case object Table extends PortType
case object Normal extends PortType
case object Flood extends PortType
case object All extends PortType
case object Controller extends PortType
case object Local extends PortType
case object None extends PortType
}
sealed abstract class Feature
object Feature {
case object Rate10MbHd extends Feature
case object Rate10MbFd extends Feature
case object Rate100MbHd extends Feature
case object Rate100MbFd extends Feature
case object Rate1GbHd extends Feature
case object Rate1GbFd extends Feature
case object Rate10GbFd extends Feature
case object MediumCopper extends Feature
case object MediumFiber extends Feature
case object AutoNeg extends Feature
case object Pause extends Feature
case object PauseAsym extends Feature
}
} | axi-sugiki/kumoi | src/kumoi/shell/oflow/OFlowPort.scala | Scala | apache-2.0 | 3,181 |
/*
*************************************************************************************
* Copyright 2016 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.hooks
import java.io.File
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
import net.liftweb.common.Box
import net.liftweb.common.EmptyBox
import net.liftweb.common.Failure
import net.liftweb.common.Full
import org.slf4j.LoggerFactory
import net.liftweb.common.Logger
import net.liftweb.util.Helpers.tryo
/*
* The goal of that file is to give a simple abstraction to run hooks in
* rudder.
*
* Hooks are stored in a directory. All hooks from a directory are
* run sequentially, so that side effects from one hook can be used
* in the following if the user want so.
* A hook which fails stop the process and error from stderr are used
* for the reason of the failing.
* A failed hook is decided by the return code: 0 mean success, anything
* else is a failure.
*
* Hooks are asynchronously executed by default, in a Future.
*/
/*
* Hooks are group in "set". We run all the hooks
* from the same set with the same set of envVariables.
* The hooks are executed in the order of the list.
*/
final case class Hooks(basePath: String, hooksFile: List[String])
/**
* Hook env are pairs of environment variable name <=> value
*/
final case class HookEnvPair(name: String, value: String) {
def show = s"[${name}:${value}]"
}
final case class HookEnvPairs(values: List[HookEnvPair]) {
//shortcut to view envVariables as a Map[String, String]
def toMap = values.map(p => (p.name, p.value)).toMap
def add(other: HookEnvPairs) = HookEnvPairs(this.values ::: other.values)
/**
* Formatted string
* [key1:val1][key2:val2]...
*/
def show: String = values.map(_.show).mkString(" ")
}
object HookEnvPairs {
def toListPairs(values: (String, String)*) = values.map( p => HookEnvPair(p._1, p._2)).toList
def build( values: (String, String)*) = {
HookEnvPairs(toListPairs(values:_*))
}
}
/**
* Loggger for hooks
*/
object HooksLogger extends Logger {
override protected def _logger = LoggerFactory.getLogger("hooks")
}
object RunHooks {
/**
* Runs a list of hooks. Each hook is run sequencially (so that
* the user can expects one hook side effects to be used in the
* next one), but the whole process is asynchronous.
* If one hook fails, the whole list fails.
*
* The semantic of return codes is:
* - < 0: success (we should never have a negative returned code, but java int are signed)
* - 0: success
* - 1-31: errors. These code stop the hooks pipeline, and the generation is on error
* - 32-63: warnings. These code log a warning message, but DON'T STOP the next hook processing
* - 64-255: reserved. For now, they will be treat as "error", but that behaviour can change any-time
* without notice.
* - > 255: should not happen, but treated as reserved.
*
*/
def asyncRun(hooks: Hooks, hookParameters: HookEnvPairs, envVariables: HookEnvPairs): Future[Box[Unit]] = {
/*
* We can not use Future.fold, because it execute all scripts
* in parallele and then combine their results. Our semantic
* is execute script one after the other, combining at each
* step.
* But we still want the whole operation to be non-bloking.
*/
( Future(Full(()):Box[Unit]) /: hooks.hooksFile) { case (previousFuture, nextHookName) =>
val path = hooks.basePath + File.separator + nextHookName
previousFuture.flatMap {
case Full(()) =>
HooksLogger.debug(s"Run hook: '${path}' with environment parameters: ${hookParameters.show}")
HooksLogger.trace(s"System environment variables: ${envVariables.show}")
val env = envVariables.add(hookParameters)
RunNuCommand.run(Cmd(path, Nil, env.toMap)).map { result =>
lazy val msg = s"Exit code=${result.code} for hook: '${path}' with environment variables: ${env.show}. \\n Stdout: '${result.stdout}' \\n Stderr: '${result.stderr}'"
HooksLogger.trace(s" -> results: ${msg}")
if( result.code <= 0 ) {
Full(())
} else if(result.code >= 1 && result.code <= 31 ) { // error
Failure(msg)
} else if(result.code >= 32 && result.code <= 64) { // warning
HooksLogger.warn(msg)
Full(())
} else { //reserved - like error for now
Failure(msg)
}
} recover {
case ex: Exception => Failure(s"Exception when executing '${path}' with environment variables: ${env.show}: ${ex.getMessage}")
}
case eb: EmptyBox => Future(eb)
}
}
}
/*
* Run hooks in given directory, synchronously.
*
* Only the files with prefix ".hook" are selected as hooks, all
* other files will be ignored.
*
* The hooks will be run in lexigraphically order, so that the
* "standard" ordering of unix hooks (or init.d) with numbers
* works as expected:
*
* 01-first.hook
* 20-second.hook
* 30-third.hook
* etc
*
*
*/
def syncRun(hooks: Hooks, hookParameters: HookEnvPairs, envVariables: HookEnvPairs): Box[Unit] = {
try {
//cmdInfo is just for comments/log. We use "*" to synthetize
val cmdInfo = s"'${hooks.basePath}' with environment parameters: ${hookParameters.show}"
HooksLogger.debug(s"Run hooks: ${cmdInfo}")
HooksLogger.trace(s"Hook environment variables: ${envVariables.show}")
val time_0 = System.currentTimeMillis
val res = Await.result(asyncRun(hooks, hookParameters, envVariables), Duration.Inf)
HooksLogger.debug(s"Done in ${System.currentTimeMillis - time_0} ms: ${cmdInfo}")
res
} catch {
case NonFatal(ex) => Failure(s"Error when executing hooks in directory '${hooks.basePath}'. Error message is: ${ex.getMessage}")
}
}
/**
* Get the hooks set for the given directory path.
*
*/
def getHooks(basePath: String): Box[Hooks] = {
tryo {
val dir = new File(basePath)
// Check that dir exists before looking in it
if (dir.exists) {
// only keep executable files
val files = dir.listFiles().toList.flatMap { file =>
file match {
case f if (f.isDirectory) => None
case f =>
if (f.canExecute) {
Some(f.getName)
} else {
HooksLogger.debug(s"Ignoring hook '${f.getAbsolutePath}' because it is not executable. Check permission?")
None
}
}
}.sorted // sort them alphanumericaly
Hooks(basePath, files)
} else {
HooksLogger.debug(s"Ignoring hook directory '${dir.getAbsolutePath}' because path does not exists")
// return an empty Hook
Hooks(basePath, List[String]())
}
}
}
}
| armeniaca/rudder | rudder-core/src/main/scala/com/normation/rudder/hooks/RunHooks.scala | Scala | gpl-3.0 | 8,630 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.memory
import spray.json.{JsArray, JsBoolean, JsNumber, JsObject, JsString, JsTrue}
import org.apache.openwhisk.core.database.{ActivationHandler, UnsupportedQueryKeys, UnsupportedView, WhisksHandler}
import org.apache.openwhisk.core.entity.{UserLimits, WhiskQueries}
import org.apache.openwhisk.utils.JsHelpers
/**
* Maps the CouchDB view logic to expressed in javascript to Scala logic so as to enable
* performing queries by {{{MemoryArtifactStore}}}. Also serves as an example of what all query usecases
* are to be supported by any {{{ArtifactStore}}} implementation
*/
trait MemoryViewMapper {
protected val TOP: String = WhiskQueries.TOP
def filter(ddoc: String, view: String, startKey: List[Any], endKey: List[Any], d: JsObject, c: JsObject): Boolean
def sort(ddoc: String, view: String, descending: Boolean, s: Seq[JsObject]): Seq[JsObject]
protected def checkKeys(startKey: List[Any], endKey: List[Any]): Unit = {
require(startKey.nonEmpty)
require(endKey.nonEmpty)
require(startKey.head == endKey.head, s"First key should be same => ($startKey) - ($endKey)")
}
protected def equal(js: JsObject, name: String, value: String): Boolean =
JsHelpers.getFieldPath(js, name) match {
case Some(JsString(v)) => v == value
case _ => false
}
protected def isTrue(js: JsObject, name: String): Boolean =
JsHelpers.getFieldPath(js, name) match {
case Some(JsBoolean(v)) => v
case _ => false
}
protected def gte(js: JsObject, name: String, value: Number): Boolean =
JsHelpers.getFieldPath(js, name) match {
case Some(JsNumber(n)) => n.longValue >= value.longValue
case _ => false
}
protected def lte(js: JsObject, name: String, value: Number): Boolean =
JsHelpers.getFieldPath(js, name) match {
case Some(JsNumber(n)) => n.longValue <= value.longValue
case _ => false
}
protected def numericSort(s: Seq[JsObject], descending: Boolean, name: String): Seq[JsObject] = {
val f =
(js: JsObject) =>
JsHelpers.getFieldPath(js, name) match {
case Some(JsNumber(n)) => n.longValue
case _ => 0L
}
val order = implicitly[Ordering[Long]]
val ordering = if (descending) order.reverse else order
s.sortBy(f)(ordering)
}
}
private object ActivationViewMapper extends MemoryViewMapper {
private val NS = "namespace"
private val NS_WITH_PATH = ActivationHandler.NS_PATH
private val START = "start"
override def filter(ddoc: String,
view: String,
startKey: List[Any],
endKey: List[Any],
d: JsObject,
c: JsObject): Boolean = {
checkKeys(startKey, endKey)
val nsValue = startKey.head.asInstanceOf[String]
view match {
//whisks-filters ddoc uses namespace + invoking action path as first key
case "activations" if ddoc.startsWith("whisks-filters") =>
filterActivation(d, equal(c, NS_WITH_PATH, nsValue), startKey, endKey)
//whisks ddoc uses namespace as first key
case "activations" if ddoc.startsWith("whisks") => filterActivation(d, equal(d, NS, nsValue), startKey, endKey)
case _ => throw UnsupportedView(s"$ddoc/$view")
}
}
override def sort(ddoc: String, view: String, descending: Boolean, s: Seq[JsObject]): Seq[JsObject] =
view match {
case "activations" if ddoc.startsWith("whisks") => numericSort(s, descending, START)
case _ => throw UnsupportedView(s"$ddoc/$view")
}
private def filterActivation(d: JsObject, matchNS: Boolean, startKey: List[Any], endKey: List[Any]): Boolean = {
val filterResult = (startKey, endKey) match {
case (_ :: Nil, _ :: `TOP` :: Nil) =>
matchNS
case (_ :: (since: Number) :: Nil, _ :: `TOP` :: `TOP` :: Nil) =>
matchNS && gte(d, START, since)
case (_ :: (since: Number) :: Nil, _ :: (upto: Number) :: `TOP` :: Nil) =>
matchNS && gte(d, START, since) && lte(d, START, upto)
case _ => throw UnsupportedQueryKeys(s"$startKey, $endKey")
}
filterResult
}
}
private object WhisksViewMapper extends MemoryViewMapper {
private val NS = "namespace"
private val ROOT_NS = WhisksHandler.ROOT_NS
private val TYPE = "entityType"
private val UPDATED = "updated"
private val PUBLISH = "publish"
private val BINDING = "binding"
override def filter(ddoc: String,
view: String,
startKey: List[Any],
endKey: List[Any],
d: JsObject,
c: JsObject): Boolean = {
checkKeys(startKey, endKey)
val entityType = WhisksHandler.getEntityTypeForDesignDoc(ddoc, view)
val matchTypeAndView = equal(d, TYPE, entityType) && matchViewConditions(ddoc, view, d)
val matchNS = equal(d, NS, startKey.head.asInstanceOf[String])
val matchRootNS = equal(c, ROOT_NS, startKey.head.asInstanceOf[String])
//Here ddocs for actions, rules and triggers use
//namespace and namespace/packageName as first key
val filterResult = (startKey, endKey) match {
case (ns :: Nil, _ :: `TOP` :: Nil) =>
(matchTypeAndView && matchNS) || (matchTypeAndView && matchRootNS)
case (ns :: (since: Number) :: Nil, _ :: `TOP` :: `TOP` :: Nil) =>
(matchTypeAndView && matchNS && gte(d, UPDATED, since)) ||
(matchTypeAndView && matchRootNS && gte(d, UPDATED, since))
case (ns :: (since: Number) :: Nil, _ :: (upto: Number) :: `TOP` :: Nil) =>
(matchTypeAndView && matchNS && gte(d, UPDATED, since) && lte(d, UPDATED, upto)) ||
(matchTypeAndView && matchRootNS && gte(d, UPDATED, since) && lte(d, UPDATED, upto))
case _ => throw UnsupportedQueryKeys(s"$ddoc/$view -> ($startKey, $endKey)")
}
filterResult
}
private def matchViewConditions(ddoc: String, view: String, d: JsObject): Boolean = {
view match {
case "packages-public" if ddoc.startsWith("whisks") =>
isTrue(d, PUBLISH) && hasEmptyBinding(d)
case _ => true
}
}
private def hasEmptyBinding(js: JsObject) = {
js.fields.get(BINDING) match {
case Some(x: JsObject) if x.fields.nonEmpty => false
case _ => true
}
}
override def sort(ddoc: String, view: String, descending: Boolean, s: Seq[JsObject]): Seq[JsObject] = {
view match {
case "actions" | "rules" | "triggers" | "packages" | "packages-public" if ddoc.startsWith("whisks") =>
numericSort(s, descending, UPDATED)
case _ => throw UnsupportedView(s"$ddoc/$view")
}
}
}
private object SubjectViewMapper extends MemoryViewMapper {
private val BLOCKED = "blocked"
private val SUBJECT = "subject"
private val UUID = "uuid"
private val KEY = "key"
private val NS_NAME = "name"
override def filter(ddoc: String,
view: String,
startKey: List[Any],
endKey: List[Any],
d: JsObject,
c: JsObject): Boolean = {
require(startKey == endKey, s"startKey: $startKey and endKey: $endKey must be same for $ddoc/$view")
(ddoc, view) match {
case (s, "identities") if s.startsWith("subjects") =>
filterForMatchingSubjectOrNamespace(ddoc, view, startKey, endKey, d)
case ("namespaceThrottlings", "blockedNamespaces") =>
filterForBlacklistedNamespace(d)
case _ =>
throw UnsupportedView(s"$ddoc/$view")
}
}
private def filterForBlacklistedNamespace(d: JsObject): Boolean = {
val id = d.fields("_id")
id match {
case JsString(idv) if idv.endsWith("/limits") =>
val limits = UserLimits.serdes.read(d)
limits.concurrentInvocations.contains(0) || limits.invocationsPerMinute.contains(0)
case _ =>
d.getFields(BLOCKED) match {
case Seq(JsTrue) => true
case _ => false
}
}
}
private def filterForMatchingSubjectOrNamespace(ddoc: String,
view: String,
startKey: List[Any],
endKey: List[Any],
d: JsObject) = {
val notBlocked = !isTrue(d, BLOCKED)
startKey match {
case (ns: String) :: Nil => notBlocked && (equal(d, SUBJECT, ns) || matchingNamespace(d, equal(_, NS_NAME, ns)))
case (uuid: String) :: (key: String) :: Nil =>
notBlocked &&
(
(equal(d, UUID, uuid) && equal(d, KEY, key))
|| matchingNamespace(d, js => equal(js, UUID, uuid) && equal(js, KEY, key))
)
case _ => throw UnsupportedQueryKeys(s"$ddoc/$view -> ($startKey, $endKey)")
}
}
override def sort(ddoc: String, view: String, descending: Boolean, s: Seq[JsObject]): Seq[JsObject] = {
s //No sorting to be done
}
private def matchingNamespace(js: JsObject, matcher: JsObject => Boolean): Boolean = {
js.fields.get("namespaces") match {
case Some(JsArray(e)) => e.exists(v => matcher(v.asJsObject))
case _ => false
}
}
}
| jasonpet/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/database/memory/MemoryViewMapper.scala | Scala | apache-2.0 | 10,239 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.actions
import com.linkedin.data.DataList
import org.coursera.common.stringkey.StringKey
import org.coursera.courier.templates.DataTemplates.DataConversion
import org.coursera.naptime.courier.CourierFormats
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.RestError
import org.coursera.naptime.NaptimeActionException
import org.coursera.naptime.Errors
import org.coursera.naptime.FacetField
import org.coursera.naptime.FacetFieldValue
import org.coursera.naptime.Fields
import org.coursera.naptime.Ok
import org.coursera.naptime.QueryFields
import org.coursera.naptime.QueryIncludes
import org.coursera.naptime.RequestFields
import org.coursera.naptime.RequestPagination
import org.coursera.naptime.ResourceName
import org.coursera.naptime.actions.util.Validators
import org.coursera.naptime.resources.TopLevelCollectionResource
import org.junit.Test
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.AssertionsForJUnit
import play.api.http.HeaderNames
import play.api.http.Status
import play.api.http.Writeable
import play.api.libs.iteratee.Enumerator
import play.api.libs.json.JsArray
import play.api.libs.json.JsObject
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.libs.json.OFormat
import play.api.mvc.AnyContent
import play.api.mvc.AnyContentAsEmpty
import play.api.mvc.RequestHeader
import play.api.mvc.Result
import play.api.test.FakeRequest
import play.api.test.Helpers
import play.api.test.Helpers.defaultAwaitTimeout
import scala.concurrent.Future
import scala.concurrent.duration._
object RestActionCategoryEngine2Test {
case class Person(name: String, email: String)
object Person {
implicit val jsonFormat: OFormat[Person] = Json.format[Person]
}
/**
* A test resource used for testing the DataMap-centric rest engines. (Uses Play-JSON adapters)
*
* Note: because we're not using the routing components of Naptime, we can get away with multiple
* get's / etc.
*
* In general, it is a very bad idea to have multiple gets, creates, etc, in a single resource.
*/
object PlayJsonTestResource
extends TopLevelCollectionResource[Int, Person] {
import RestActionCategoryEngine2._
override def keyFormat: KeyFormat[Int] = KeyFormat.intKeyFormat
override def resourceName: String = "testResource"
override implicit val resourceFormat: OFormat[Person] = Person.jsonFormat
implicit val fields = Fields.withDefaultFields("name").withRelated(
"relatedCaseClass" -> RelatedResources.CaseClass.relatedName,
"relatedCourier" -> RelatedResources.Courier.relatedName)
def get1(id: Int) = Nap.get { ctx =>
RelatedResources.addRelated {
Ok(Keyed(id, Person(s"$id", s"$id@coursera.org")))
}
}
def get2(id: Int) = Nap.get { ctx =>
Errors.NotFound(errorCode = "id", msg = s"Bad id $id")
}
def multiGet(ids: Set[Int]) = Nap.multiGet { ctx =>
RelatedResources.addRelated {
Ok(ids.map(id => Keyed(id, Person(s"$id", s"$id@coursera.org"))).toSeq)
}
}
def create1 = Nap.create { ctx =>
Ok(Keyed(2, Some(Person("newId", "newId@coursera.org"))))
}
def create2 = Nap.create { ctx =>
Ok(Keyed(3, None))
}
def create3 = Nap.catching {
case e: RuntimeException => RestError(NaptimeActionException(Status.BAD_REQUEST, Some("boom"), None))
}.create { ctx =>
throw new RuntimeException("boooooooom")
}
def delete1(id: Int) = Nap.delete {
Ok(())
}
}
/**
* A test resource used for testing the DataMap-centric rest engines. (Uses Play-JSON adapters)
*
* Note: because we're not using the routing components of Naptime, we can get away with multiple
* get's / etc.
*
* In general, it is a very bad idea to have multiple gets, creates, etc, in a single resource.
*/
object CourierTestResource
extends TopLevelCollectionResource[String, Course] {
import RestActionCategoryEngine2._
override def keyFormat: KeyFormat[String] = KeyFormat.stringKeyFormat
override def resourceName: String = "testResource"
override implicit val resourceFormat: OFormat[Course] =
CourierFormats.recordTemplateFormats[Course]
implicit val fields = Fields.withDefaultFields("name").withRelated(
"relatedCaseClass" -> RelatedResources.CaseClass.relatedName,
"relatedCourier" -> RelatedResources.Courier.relatedName)
def mk(id: String): Course = Course(s"$id name", s"$id description")
def get1(id: String) = Nap.get { ctx =>
RelatedResources.addRelated {
Ok(Keyed(id, mk(id)))
}
}
def get2(id: String) = Nap.get { ctx =>
Errors.NotFound(errorCode = "id", msg = s"Bad id: $id")
}
def multiGet(ids: Set[String]) = Nap.multiGet { ctx =>
RelatedResources.addRelated {
Ok(ids.map(id => Keyed(id, mk(id))).toSeq)
}
}
def create1 = Nap.create { ctx =>
Ok(Keyed("1", Some(mk("1"))))
}
def create2 = Nap.create { ctx =>
Ok(Keyed("1", None))
}
def create3 = Nap.catching {
case e: RuntimeException =>
RestError(NaptimeActionException(Status.BAD_REQUEST, Some("boom"), None))
}.create { ctx =>
throw new RuntimeException("boooooooom")
}
def delete1(id: String) = Nap.delete {
Ok(())
}
}
object CourierKeyedTestResource
extends TopLevelCollectionResource[EnrollmentId, Course] {
import RestActionCategoryEngine2._
override def resourceName: String = "testResource"
implicit val sessionIdStringKeyFormat = CourierFormats.recordTemplateStringKeyFormat[EnrollmentId]
override implicit def keyFormat =
KeyFormat.idAsStringWithFields(CourierFormats.recordTemplateFormats[EnrollmentId])
override implicit def resourceFormat: OFormat[Course] = CourierFormats.recordTemplateFormats[Course]
implicit val fields = Fields.withDefaultFields("name").withRelated(
"relatedCaseClass" -> RelatedResources.CaseClass.relatedName,
"relatedCourier" -> RelatedResources.Courier.relatedName)
def mk(id: EnrollmentId): Course = Course(s"${StringKey(id).key} name", s"$id description")
object EnrollmentIds {
val a = EnrollmentId(userId = 1225, courseId = SessionId(courseId = "abc", iterationId = 2))
val b = EnrollmentId(userId = 2, courseId = SessionId(courseId = "xyz", iterationId = 8))
}
def get1(id: EnrollmentId) = Nap.get { ctx =>
RelatedResources.addRelated {
Ok(Keyed(id, mk(id)))
}
}
def get2(id: EnrollmentId) = Nap.get { ctx =>
Errors.NotFound(errorCode = "id", msg = s"Bad id: $id")
}
def multiGet(ids: Set[EnrollmentId]) = Nap.multiGet { ctx =>
RelatedResources.addRelated {
Ok(ids.map(id => Keyed(id, mk(id))).toSeq)
}
}
def create1 = Nap.create { ctx =>
Ok(Keyed(EnrollmentIds.a, Some(mk(EnrollmentIds.a))))
}
def create2 = Nap.create { ctx =>
Ok(Keyed(EnrollmentIds.b, None))
}
def create3 = Nap.catching {
case e: RuntimeException =>
RestError(NaptimeActionException(Status.BAD_REQUEST, Some("boom"), None))
}.create { ctx =>
throw new RuntimeException("boooooooom")
}
def delete1(id: EnrollmentId) = Nap.delete {
Ok(())
}
}
object RelatedResources extends AssertionsForJUnit {
object CaseClass {
val relatedName = ResourceName("relatedCaseClass", 2)
implicit val fields = Fields[Person]
val related = Seq(
Keyed(1, Person("related1", "1@related.com"))
)
def addRelated[T](ok: Ok[T]): Ok[T] = {
ok.withRelated(relatedName, related)
}
}
object Courier {
val relatedName = ResourceName("relatedCourier", 3)
implicit val format = CourierFormats.recordTemplateFormats[Course]
implicit val fields = Fields[Course]
val related = Seq(
Keyed(1, Course("relatedCourse1", "All about the first related course!"))
)
def addRelated[T](ok: Ok[T]): Ok[T] = {
ok.withRelated(relatedName, related)
}
}
def addRelated[T](ok: Ok[T]): Ok[T] = {
val withCaseClass = CaseClass.addRelated(ok)
val withCourier = Courier.addRelated(withCaseClass)
withCourier
}
private[this] def checkBasicResponseForRelated(response: Result): (JsObject, JsObject) = {
val bodyContent = Helpers.contentAsJson(Future.successful(response))
assert(bodyContent.isInstanceOf[JsObject])
val json = bodyContent.asInstanceOf[JsObject]
assert(json.value.contains("elements"))
assert(json.value.contains("linked"))
assert((json \ "linked").toOption.isDefined, s"Linked: ${json \ "linked"}")
assert((json \ "linked").validate[JsObject].asOpt.isDefined,
s"Got ${(json \ "linked").validate[JsObject]}. Json: $json")
val linked = (json \ "linked").validate[JsObject].get
(linked, json)
}
def assertRelatedPresent(response: Result): Unit = {
val (linked, json) = checkBasicResponseForRelated(response)
assert(linked.value.size === 2, s"Response: $json")
val expected = Json.obj(
CaseClass.relatedName.identifier -> Json.arr(
Json.obj(
"id" -> 1,
"name" -> "related1")),
Courier.relatedName.identifier -> Json.arr(
Json.obj(
"id" -> 1,
"name" -> "relatedCourse1")))
assert(expected === linked, s"Linked was not what we expected. Got $linked")
}
def assertRelatedAbsent(response: Result): Unit = {
val (linked, json) = checkBasicResponseForRelated(response)
assert(linked.value.size === 0, s"Response: $json")
}
}
}
class RestActionCategoryEngine2Test extends AssertionsForJUnit with ScalaFutures {
import RestActionCategoryEngine2Test._
// Increase timeout a bit.
override def spanScaleFactor: Double = 10
@Test
def playJsonGet1(): Unit = {
val response = testEmptyRequestBody(PlayJsonTestResource.get1(1))
RelatedResources.assertRelatedPresent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> 1,
"name" -> "1"))
assert(expected === elements)
}
@Test
def playJsonGet1NoRelated(): Unit = {
val response = testEmptyRequestBody(PlayJsonTestResource.get1(1), FakeRequest())
RelatedResources.assertRelatedAbsent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> 1,
"name" -> "1"))
assert(expected === elements)
}
@Test
def playJsonGet1Etags(): Unit = {
val response1 = testEmptyRequestBody(PlayJsonTestResource.get1(1))
val responseNoRelated = testEmptyRequestBody(PlayJsonTestResource.get1(1), FakeRequest())
val response2 = testEmptyRequestBody(PlayJsonTestResource.get1(1))
assert(response1.header.status === Status.OK)
assert(response1.header.headers.contains(HeaderNames.ETAG))
assert(responseNoRelated.header.status === Status.OK)
assert(responseNoRelated.header.headers.contains(HeaderNames.ETAG))
assert(response2.header.status === Status.OK)
assert(response2.header.headers.contains(HeaderNames.ETAG))
assert(response1.header.headers.get(HeaderNames.ETAG) != responseNoRelated.header.headers.get(HeaderNames.ETAG))
assert(response1.header.headers.get(HeaderNames.ETAG) === response2.header.headers.get(HeaderNames.ETAG))
// Check for stability in ETag computation.
assert(Some("W/\"-981723117\"") === response1.header.headers.get(HeaderNames.ETAG))
}
@Test
def playJsonGet1IfNoneMatch(): Unit = {
val response1 = testEmptyRequestBody(PlayJsonTestResource.get1(4))
assert(response1.header.headers.contains(HeaderNames.ETAG))
val etag = response1.header.headers(HeaderNames.ETAG)
val request2 = standardFakeRequest.withHeaders(HeaderNames.IF_NONE_MATCH -> etag)
val response2 = testEmptyRequestBody(PlayJsonTestResource.get1(4), request2)
assert(response2.header.status === Status.NOT_MODIFIED)
}
@Test
def playJsonGet2(): Unit = {
testEmptyRequestBody(PlayJsonTestResource.get2(1))
}
@Test
def playJsonMultiGet(): Unit = {
val response = testEmptyRequestBody(PlayJsonTestResource.multiGet(Set(1, 2)))
RelatedResources.assertRelatedPresent(response)
}
@Test
def playJsonMultiGetNoRelated(): Unit = {
val response = testEmptyRequestBody(PlayJsonTestResource.multiGet(Set(1, 2)), FakeRequest())
RelatedResources.assertRelatedAbsent(response)
}
@Test
def playJsonCreate1(): Unit = {
testEmptyRequestBody(PlayJsonTestResource.create1)
}
@Test
def playJsonCreate2(): Unit = {
testEmptyRequestBody(PlayJsonTestResource.create2)
}
@Test
def playJsonCreate3(): Unit = {
testEmptyRequestBody(PlayJsonTestResource.create3)
}
@Test
def playJsonDelete1(): Unit = {
testEmptyRequestBody(PlayJsonTestResource.delete1(1))
}
@Test
def courierGet1(): Unit = {
val response = testEmptyRequestBody(CourierTestResource.get1("test"))
RelatedResources.assertRelatedPresent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> "test",
"name" -> "test name"))
assert(expected === elements)
}
@Test
def courierGet1NoRelated(): Unit = {
val response = testEmptyRequestBody(CourierTestResource.get1("test"), FakeRequest())
RelatedResources.assertRelatedAbsent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> "test",
"name" -> "test name"))
assert(expected === elements)
}
@Test
def courierGet1Etags(): Unit = {
val response1 = testEmptyRequestBody(CourierTestResource.get1("test"))
val response2 = testEmptyRequestBody(CourierTestResource.get1("test"))
val responseNoRelated = testEmptyRequestBody(CourierTestResource.get1("test"), FakeRequest())
assert(response1.header.status === Status.OK)
assert(response2.header.status === Status.OK)
assert(responseNoRelated.header.status === Status.OK)
assert(response1.header.headers.contains(HeaderNames.ETAG))
assert(response2.header.headers.contains(HeaderNames.ETAG))
assert(responseNoRelated.header.headers.contains(HeaderNames.ETAG))
assert(response1.header.headers.get(HeaderNames.ETAG) != responseNoRelated.header.headers.get(HeaderNames.ETAG))
assert(response1.header.headers.get(HeaderNames.ETAG) === response2.header.headers.get(HeaderNames.ETAG))
// Check for stability in ETag computation.
assert(Some("W/\"1468630371\"") === response1.header.headers.get(HeaderNames.ETAG))
}
@Test
def courierGet1IfNoneMatch(): Unit = {
val response1 = testEmptyRequestBody(CourierTestResource.get1("etagTest"))
assert(response1.header.headers.contains(HeaderNames.ETAG))
val etag = response1.header.headers(HeaderNames.ETAG)
val request2 = standardFakeRequest.withHeaders(HeaderNames.IF_NONE_MATCH -> etag)
val response2 = testEmptyRequestBody(CourierTestResource.get1("etagTest"), request2)
assert(response2.header.status === Status.NOT_MODIFIED)
}
@Test
def courierGet2(): Unit = {
testEmptyRequestBody(CourierTestResource.get2("test"))
}
@Test
def courierMultiGet(): Unit = {
val response = testEmptyRequestBody(CourierTestResource.multiGet(Set("test1", "test2")))
RelatedResources.assertRelatedPresent(response)
}
@Test
def courierMultiGetNoRelated(): Unit = {
val response = testEmptyRequestBody(CourierTestResource.multiGet(Set("test1", "test2")), FakeRequest())
RelatedResources.assertRelatedAbsent(response)
}
@Test
def courierCreate1(): Unit = {
testEmptyRequestBody(CourierTestResource.create1)
}
@Test
def courierCreate2(): Unit = {
testEmptyRequestBody(CourierTestResource.create2)
}
@Test
def courierCreate3(): Unit = {
testEmptyRequestBody(CourierTestResource.create3)
}
@Test
def courierDelete1(): Unit = {
testEmptyRequestBody(CourierTestResource.delete1("test"))
}
@Test
def courierKeyedGet1(): Unit = {
val response = testEmptyRequestBody(CourierKeyedTestResource.get1(CourierKeyedTestResource.EnrollmentIds.a))
RelatedResources.assertRelatedPresent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> "1225~abc!~2",
"courseId" -> Json.obj(
"iterationId" -> 2,
"courseId" -> "abc"),
"userId" -> 1225,
"name" -> "1225~abc!~2 name"))
assert(expected === elements)
}
@Test
def courierKeyedGet1NoRelated(): Unit = {
val response = testEmptyRequestBody(CourierKeyedTestResource.get1(CourierKeyedTestResource.EnrollmentIds.a),
FakeRequest())
RelatedResources.assertRelatedAbsent(response)
val elements = assertElements(response)
val expected = Json.arr(
Json.obj(
"id" -> "1225~abc!~2",
"courseId" -> Json.obj(
"iterationId" -> 2,
"courseId" -> "abc"),
"userId" -> 1225,
"name" -> "1225~abc!~2 name"))
assert(expected === elements)
}
@Test
def courierKeyedGet2(): Unit = {
testEmptyRequestBody(CourierKeyedTestResource.get2(CourierKeyedTestResource.EnrollmentIds.a))
}
@Test
def courierKeyedMultiGet(): Unit = {
val response = testEmptyRequestBody(CourierKeyedTestResource.multiGet(Set(
CourierKeyedTestResource.EnrollmentIds.a, CourierKeyedTestResource.EnrollmentIds.b)))
RelatedResources.assertRelatedPresent(response)
}
@Test
def courierKeyedMultiGetNoRelated(): Unit = {
val response = testEmptyRequestBody(CourierKeyedTestResource.multiGet(Set(
CourierKeyedTestResource.EnrollmentIds.a, CourierKeyedTestResource.EnrollmentIds.b)), FakeRequest())
RelatedResources.assertRelatedAbsent(response)
}
@Test
def courierKeyedCreate1(): Unit = {
testEmptyRequestBody(CourierKeyedTestResource.create1)
}
@Test
def courierKeyedCreate2(): Unit = {
testEmptyRequestBody(CourierKeyedTestResource.create2)
}
@Test
def courierKeyedCreate3(): Unit = {
testEmptyRequestBody(CourierKeyedTestResource.create3)
}
@Test
def courierKeyedDelete1(): Unit = {
testEmptyRequestBody(CourierKeyedTestResource.delete1(CourierKeyedTestResource.EnrollmentIds.b))
}
@Test
def serializeCollectionCourierModelsTest(): Unit = {
def mkModel(id: String): ExpandedCourse = {
ExpandedCourse(
name = id,
description = s"$id description",
platform = CoursePlatform.NewPlatform,
domains = List(Domain(DomainId(Slug("my-domain")))),
courseQnAs = List(CourseQnA(
question = "How hard?", answer = CmlContentType(dtdId = "myDtd", value = "Very!"))),
instructorIds = List(1L, 2L))
}
val fields = QueryFields(Set("name", "description", "domains"), Map.empty)
val model1 = mkModel("test-course-1")
assert(model1.data().isMadeReadOnly)
assert(model1.domains.data().isMadeReadOnly)
assert(model1.domains.head.data.isMadeReadOnly)
RestActionCategoryEngine2.serializeCollection(
new DataList(),
List(Keyed("test-course-id", model1)),
KeyFormat.stringKeyFormat,
NaptimeSerializer.courierModels,
fields,
Fields(CourierFormats.recordTemplateFormats[ExpandedCourse]))
val model2 = mkModel("test-course-2").copy(model1.data(), DataConversion.SetReadOnly)
RestActionCategoryEngine2.serializeCollection(
new DataList(),
List(Keyed("test-course-id2", model2)),
KeyFormat.stringKeyFormat,
NaptimeSerializer.courierModels,
fields,
Fields(CourierFormats.recordTemplateFormats[ExpandedCourse]))
}
@Test
def multiHopRelatedIncludes(): Unit = {
val partnersResourceName = ResourceName("partners", 1)
val instructorsResourceName = ResourceName("instructors", 1)
implicit val courseFormat = CourierFormats.recordTemplateFormats[ExpandedCourse]
implicit val instructorFormats = CourierFormats.recordTemplateFormats[Instructor]
implicit val partnerFormats = CourierFormats.recordTemplateFormats[Partner]
implicit val coursesFields = Fields[ExpandedCourse].withRelated("instructorIds" -> instructorsResourceName)
implicit val instructorFields = Fields[Instructor].withRelated("partner" -> partnersResourceName)
implicit val partnerFields = Fields[Partner]
val queryFields = QueryFields(Set("name", "description", "instructorIds"),
Map(instructorsResourceName -> Set("name"), partnersResourceName -> Set("name", "slug")))
val queryIncludes = QueryIncludes(Set("instructorIds"), Map(instructorsResourceName -> Set("partner")))
val course = Keyed("my-course-id", ExpandedCourse(
name = "my best course",
description = "My favorite course",
platform = CoursePlatform.NewPlatform,
domains = List.empty,
courseQnAs = List.empty,
instructorIds = List(3L)))
val instructor = Keyed(3L, Instructor(
name = "Prof Example",
bio = None,
partner = "uuid-abc_123"))
val partner = Keyed("uuid-abc_123", Partner(
name = "School of awesome",
slug = Slug("school-of-awesome")))
val response = Ok(course)
.withRelated(instructorsResourceName, List(instructor))
.withRelated(partnersResourceName, List(partner))
val engine = RestActionCategoryEngine2.getActionCategoryEngine[String, ExpandedCourse]
val wireResponse = engine.mkResult(
request = FakeRequest(),
resourceFields = coursesFields,
requestFields = queryFields,
requestIncludes = queryIncludes,
pagination = RequestPagination(limit = 10, start = None, isDefault = true),
response = response)
val content: JsValue = Helpers.contentAsJson(Future.successful(wireResponse))
val expected = Json.obj(
"elements" -> Json.arr(
Json.obj(
"id" -> "my-course-id",
"name" -> "my best course",
"description" -> "My favorite course",
"instructorIds" -> Json.arr(3L))),
"paging" -> Json.obj(),
"linked" -> Json.obj(
"partners.v1" -> Json.arr(
Json.obj(
"id" -> "uuid-abc_123",
"name" -> "School of awesome",
"slug" -> "school-of-awesome")),
"instructors.v1" -> Json.arr(
Json.obj(
"id" -> 3L,
"name" -> "Prof Example"))))
assert(expected === content)
val wireResponse2 = engine.mkResult(
request = FakeRequest(),
resourceFields = coursesFields,
requestFields = queryFields,
requestIncludes = queryIncludes.copy(resources = Map.empty),
pagination = RequestPagination(limit = 10, start = None, isDefault = true),
response = response)
val content2: JsValue = Helpers.contentAsJson(Future.successful(wireResponse2))
val expected2 = Json.obj(
"elements" -> Json.arr(
Json.obj(
"id" -> "my-course-id",
"name" -> "my best course",
"description" -> "My favorite course",
"instructorIds" -> Json.arr(3L))),
"paging" -> Json.obj(),
"linked" -> Json.obj(
"instructors.v1" -> Json.arr(
Json.obj(
"id" -> 3L,
"name" -> "Prof Example"))))
assert(expected2 === content2)
val wireResponse3 = engine.mkResult(
request = FakeRequest(),
resourceFields = coursesFields,
requestFields = queryFields,
requestIncludes = QueryIncludes(Set.empty, Map.empty),
pagination = RequestPagination(limit = 10, start = None, isDefault = true),
response = response)
val content3: JsValue = Helpers.contentAsJson(Future.successful(wireResponse3))
val expected3 = Json.obj(
"elements" -> Json.arr(
Json.obj(
"id" -> "my-course-id",
"name" -> "my best course",
"description" -> "My favorite course",
"instructorIds" -> Json.arr(3L))),
"paging" -> Json.obj(),
"linked" -> Json.obj())
assert(expected3 === content3)
val wireResponse4 = engine.mkResult(
request = FakeRequest(),
resourceFields = coursesFields,
requestFields = queryFields,
requestIncludes = queryIncludes.copy(fields = queryIncludes.fields + "_links"),
pagination = RequestPagination(limit = 10, start = None, isDefault = true),
response = response)
val content4: JsValue = Helpers.contentAsJson(Future.successful(wireResponse4))
val expected4 = Json.obj(
"elements" -> Json.arr(
Json.obj(
"id" -> "my-course-id",
"name" -> "my best course",
"description" -> "My favorite course",
"instructorIds" -> Json.arr(3L))),
"paging" -> Json.obj(),
"linked" -> Json.obj(
"partners.v1" -> Json.arr(
Json.obj(
"id" -> "uuid-abc_123",
"name" -> "School of awesome",
"slug" -> "school-of-awesome")),
"instructors.v1" -> Json.arr(
Json.obj(
"id" -> 3L,
"name" -> "Prof Example"))),
"links" -> Json.obj(
"elements" -> Json.obj(
"instructorIds" -> "instructors.v1"),
"instructors.v1" -> Json.obj(
"partner" -> "partners.v1"),
"partners.v1" -> Json.obj()))
assert(expected4 === content4)
}
@Test
def serializeFacetsCorrectly(): Unit = {
implicit val courseFormat = CourierFormats.recordTemplateFormats[Course]
implicit val coursesFields = Fields[Course]
val engine = RestActionCategoryEngine2.finderActionCategoryEngine[String, Course]
val response = Ok(List(
Keyed("abc", Course("course 101", "101 course description")),
Keyed("zyx", Course("course 999", "999 course description")))).withPagination(
next = None,
total = Some(2L),
facets = Some(Map(
"languages" -> FacetField(
facetEntries = List(
FacetFieldValue("en", Some("English"), 2),
FacetFieldValue("fr", Some("French"), 0)),
fieldCardinality = Some(23)))))
val wireResponse = engine.mkResult(
request = FakeRequest(),
resourceFields = coursesFields,
requestFields = QueryFields(Set("name"), Map.empty),
requestIncludes = QueryIncludes.empty,
pagination = RequestPagination(limit = 10, start = None, isDefault = true),
response = response)
val content: JsValue = Helpers.contentAsJson(Future.successful(wireResponse))
val expected = Json.obj(
"elements" -> Json.arr(
Json.obj(
"id" -> "abc",
"name" -> "course 101"),
Json.obj(
"id" -> "zyx",
"name" -> "course 999")),
"paging" -> Json.obj(
"total" -> 2,
"facets" -> Json.obj(
"languages" -> Json.obj(
"facetEntries" -> Json.arr(
Json.obj(
"id" -> "en",
"name" -> "English",
"count" -> 2),
Json.obj(
"id" -> "fr",
"name" -> "French",
"count" -> 0)),
"fieldCardinality" -> 23))),
"linked" -> Json.obj())
assert(expected === content)
}
// Test helpers here and below.
private[this] val fieldsQueryParam = s"${RelatedResources.CaseClass.relatedName.identifier}(name)," +
s"${RelatedResources.Courier.relatedName.identifier}(name)"
private[this] val standardFakeRequest =
FakeRequest("GET", s"/?includes=relatedCaseClass,relatedCourier&fields=$fieldsQueryParam")
private[this] def testEmptyRequestBody(
actionToTest: RestAction[_, _, AnyContent, _, _, _],
request: FakeRequest[AnyContentAsEmpty.type] = standardFakeRequest,
strictMode: Boolean = false): Result = {
val result = runTestRequest(actionToTest, request)
Validators.assertValidResponse(result, strictMode = strictMode)
result
}
private[this] def runTestRequestInternal[BodyType](
restAction: RestAction[_, _, BodyType, _, _, _],
request: RequestHeader,
body: Enumerator[Array[Byte]] = Enumerator.empty): Result = {
val iteratee = restAction.apply(request)
val resultFut = body.run(iteratee)
resultFut.futureValue
}
private[this] def runTestRequest[BodyType](restAction: RestAction[_, _, BodyType, _, _, _],
fakeRequest: FakeRequest[BodyType])(
implicit writeable: Writeable[BodyType]): Result = {
val requestWithHeader = writeable.contentType.map { ct =>
fakeRequest.withHeaders(HeaderNames.CONTENT_TYPE -> ct)
}.getOrElse(fakeRequest)
val b = Enumerator(fakeRequest.body).through(writeable.toEnumeratee)
runTestRequestInternal(restAction, requestWithHeader, b)
}
private[this] def runTestRequest(restAction: RestAction[_, _, AnyContent, _, _, _],
fakeRequest: FakeRequest[AnyContentAsEmpty.type]): Result = {
runTestRequestInternal(restAction, fakeRequest)
}
private[this] def assertElements(response: Result): JsArray = {
val bodyContent = Helpers.contentAsJson(Future.successful(response))
assert(bodyContent.isInstanceOf[JsObject])
val json = bodyContent.asInstanceOf[JsObject]
val elements = (json \ "elements").validate[JsArray]
assert(elements.isSuccess, s"Elements was not a JsArray: $elements. $bodyContent")
elements.get
}
}
| vkuo-coursera/naptime | naptime/src/test/scala/org/coursera/naptime/actions/RestActionCategoryEngine2Test.scala | Scala | apache-2.0 | 30,094 |
package io.getquill.context.sql.idiom
import io.getquill._
import io.getquill.idiom.StringToken
class SQLServerDialectSpec extends Spec {
"emptySetContainsToken" in {
SQLServerDialect.emptySetContainsToken(StringToken("w/e")) mustBe StringToken("1 <> 1")
}
val ctx = new SqlMirrorContext(SQLServerDialect, Literal) with TestEntities
import ctx._
"uses + instead of ||" in {
val q = quote {
qr1.map(t => t.s + t.s)
}
ctx.run(q).string mustEqual
"SELECT t.s + t.s FROM TestEntity t"
}
"top" in {
val q = quote {
qr1.take(15).map(t => t.i)
}
ctx.run(q).string mustEqual
"SELECT TOP 15 t.i FROM TestEntity t"
}
"literal booleans" - {
"uses 1=1 instead of true" in {
ctx.run(qr4.filter(t => true)).string mustEqual
"SELECT t.i FROM TestEntity4 t WHERE 1=1"
}
"uses 1=0 instead of false" in {
ctx.run(qr4.filter(t => false)).string mustEqual
"SELECT t.i FROM TestEntity4 t WHERE 1=0"
}
}
"offset/fetch" - {
val withOrd = quote {
qr1.sortBy(t => t.i)(Ord.desc).map(_.s)
}
def offset[T](q: Quoted[Query[T]]) = quote(q.drop(1))
def offsetFetch[T](q: Quoted[Query[T]]) = quote(q.drop(2).take(3))
"offset" in {
ctx.run(offset(withOrd)).string mustEqual
"SELECT t.s FROM TestEntity t ORDER BY t.i DESC OFFSET 1 ROWS"
}
"offset with fetch " in {
ctx.run(offsetFetch(withOrd)).string mustEqual
"SELECT t.s FROM TestEntity t ORDER BY t.i DESC OFFSET 2 ROWS FETCH FIRST 3 ROWS ONLY"
}
"fail without ordering" in {
intercept[IllegalStateException] {
ctx.run(offset(qr1))
}.getMessage mustEqual "SQLServer does not support OFFSET without ORDER BY"
intercept[IllegalStateException] {
ctx.run(offsetFetch(qr1))
}.getMessage mustEqual "SQLServer does not support OFFSET without ORDER BY"
}
}
}
| mentegy/quill | quill-sql/src/test/scala/io/getquill/context/sql/idiom/SQLServerDialectSpec.scala | Scala | apache-2.0 | 1,922 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package typechecker
import scala.collection.mutable.ListBuffer
import scala.reflect.internal.util.FreshNameCreator
import symtab.Flags._
/** This trait ...
*
* @author Martin Odersky
*/
trait EtaExpansion { self: Analyzer =>
import global._
/** Expand partial method application `p.f(es_1)...(es_n)`. Does not support dependent method types (yet).
*
* We expand this to the following block, which evaluates
* the target of the application and its supplied arguments if needed (they are not stable),
* and then wraps a Function that abstracts over the missing arguments.
*
* {{{
* {
* private synthetic val eta\\$f = p.f // if p is not stable
* ...
* private synthetic val eta\\$e_i = e_i // if e_i is not stable
* ...
* (ps_1 => ... => ps_m => eta\\$f([es_1])...([es_m])(ps_1)...(ps_m))
* }
* }}}
*
* This is called from typedEtaExpansion, which itself is called from
* - instantiateToMethodType (for a naked method reference), or
* - typedEta (when type checking a method value, `m _`).
*
**/
def etaExpand(tree: Tree, owner: Symbol)(implicit creator: FreshNameCreator): Tree = {
val tpe = tree.tpe
var cnt = 0 // for NoPosition
def freshName() = {
cnt += 1
freshTermName("eta$" + (cnt - 1) + "$")
}
val defs = new ListBuffer[Tree]
/* Append to `defs` value definitions for all non-stable
* subexpressions of the function application `tree`.
*/
def liftoutPrefix(tree: Tree): Tree = {
def liftout(tree: Tree, byName: Boolean): Tree =
if (treeInfo.isExprSafeToInline(tree)) tree
else {
val vname: Name = freshName()
// Problem with ticket #2351 here
val valSym = owner.newValue(vname.toTermName, tree.pos.focus, SYNTHETIC)
defs += atPos(tree.pos) {
val rhs = if (byName) {
val funSym = valSym.newAnonymousFunctionValue(tree.pos.focus)
val tree1 = tree.changeOwner(owner -> funSym)
val funType = definitions.functionType(Nil, tree1.tpe)
funSym.setInfo(funType)
Function(List(), tree1).setSymbol(funSym).setType(funType)
} else {
tree.changeOwner(owner -> valSym)
}
valSym.setInfo(rhs.tpe)
ValDef(valSym, rhs)
}
atPos(tree.pos.focus) {
if (byName) Apply(Ident(valSym), List()) else Ident(valSym)
}
}
val tree1 = tree match {
// a partial application using named arguments has the following form:
// { val qual$1 = qual
// val x$1 = arg1
// [...]
// val x$n = argn
// qual$1.fun(x$1, ..)..(.., x$n) }
// Eta-expansion has to be performed on `fun`
case Block(stats, fun) =>
defs ++= stats
liftoutPrefix(fun)
case Apply(fn, args) =>
val byName: Int => Option[Boolean] = fn.tpe.params.map(p => definitions.isByNameParamType(p.tpe)).lift
val liftedFn = liftoutPrefix(fn) // scala/bug#11465: lift fn before args
val newArgs = mapWithIndex(args) { (arg, i) =>
// with repeated params, there might be more or fewer args than params
liftout(arg, byName(i).getOrElse(false))
}
treeCopy.Apply(tree, liftedFn, newArgs).clearType()
case TypeApply(fn, args) =>
treeCopy.TypeApply(tree, liftoutPrefix(fn), args).clearType()
case Select(qual, name) =>
val name = tree.symbol.name // account for renamed imports, scala/bug#7233
treeCopy.Select(tree, liftout(qual, byName = false), name).clearType() setSymbol NoSymbol
case Ident(name) =>
tree
case x => throw new MatchError(x)
}
if (tree1 ne tree) tree1 setPos tree1.pos.makeTransparent
tree1
}
/* Eta-expand lifted tree. */
def expand(tree: Tree, tpe: Type): Tree = tpe match {
case mt @ MethodType(paramSyms, restpe) if !mt.isImplicit =>
val params: List[(ValDef, Boolean)] = paramSyms.map {
sym =>
val origTpe = sym.tpe
val isRepeated = definitions.isRepeatedParamType(origTpe)
// scala/bug#4176 Don't leak A* in eta-expanded function types. See t4176b.scala
val droppedStarTpe = dropIllegalStarTypes(origTpe)
val valDef = ValDef(Modifiers(SYNTHETIC | PARAM), sym.name.toTermName, TypeTree(droppedStarTpe), EmptyTree)
(valDef, isRepeated)
}
atPos(tree.pos.makeTransparent) {
val args = params.map {
case (valDef, isRepeated) => gen.paramToArg(Ident(valDef.name), isRepeated)
}
Function(params.map(_._1), expand(Apply(tree, args), restpe))
}
case _ =>
tree
}
val tree1 = liftoutPrefix(tree)
val expansion = expand(tree1, tpe)
if (defs.isEmpty) expansion
else atPos(tree.pos)(Block(defs.toList, expansion))
}
}
| scala/scala | src/compiler/scala/tools/nsc/typechecker/EtaExpansion.scala | Scala | apache-2.0 | 5,358 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers._
class AlerterSpec extends FunSpec {
describe("An Alerter") {
it("should fire AlertProvided event with correct message and None in payload when using apply(message)") {
class MySuite extends FunSuite {
alert("alert message")
}
val suite = new MySuite()
val rep = new EventRecordingReporter()
suite.run(None, Args(rep))
val alertProvidedEvents = rep.alertProvidedEventsReceived
assert(alertProvidedEvents.length === 1)
assert(alertProvidedEvents(0).message === "alert message")
assert(alertProvidedEvents(0).payload === None)
}
it("should fire AlertProvided event with correct message and payload when using apply(message, payload)") {
class MySuite extends FunSuite {
alert("alert message", Some("a payload"))
}
val suite = new MySuite()
val rep = new EventRecordingReporter()
suite.run(None, Args(rep))
val alertProvidedEvents = rep.alertProvidedEventsReceived
assert(alertProvidedEvents.length === 1)
assert(alertProvidedEvents(0).message === "alert message")
assert(alertProvidedEvents(0).payload === Some("a payload"))
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/AlerterSpec.scala | Scala | apache-2.0 | 1,821 |
package blended.itestsupport
import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicInteger
import scala.jdk.CollectionConverters._
import scala.util.control.NonFatal
import com.github.dockerjava.api.model.PortBinding
import com.typesafe.config.Config
object NamedContainerPort {
private[this] val portCount = new AtomicInteger(32768)
private[this] def nextFreePort() : Int = {
def isFree(p : Int) : Boolean = {
try {
val socket : ServerSocket = new ServerSocket(p)
socket.close()
true
} catch {
case NonFatal(_) => false
}
}
var result = portCount.getAndIncrement()
while (!isFree(result)) result = portCount.getAndIncrement()
result
}
def apply(config : Config) : NamedContainerPort = {
val privatePort = config.getInt("private")
val publicPort = if (config.hasPath("public"))
config.getInt("public")
else
nextFreePort()
NamedContainerPort(config.getString("name"), privatePort, publicPort)
}
}
case class NamedContainerPort(
name: String,
privatePort: Int,
publicPort: Int
) {
def binding = PortBinding.parse(s"$publicPort:$privatePort")
}
object VolumeConfig {
def apply(config : Config) : VolumeConfig = VolumeConfig(
config.getString("host"),
config.getString("container")
)
}
case class VolumeConfig(
hostDirectory : String,
containerDirectory : String
)
object ContainerLink {
def apply(config: Config) : ContainerLink = ContainerLink(
config.getString("container"),
config.getString("hostname")
)
}
case class ContainerLink(
container : String,
hostname : String
)
object ContainerUnderTest {
def containerMap(config: Config) : Map[String, ContainerUnderTest] = config.getConfigList("docker.containers").asScala.map { cfg =>
ContainerUnderTest(cfg)
}.toList.map( ct => (ct.ctName, ct)).toMap
def apply(config : Config) : ContainerUnderTest = {
val volumes : List[VolumeConfig] = if (config.hasPath("volumes"))
config.getConfigList("volumes").asScala.map{cfg: Config => VolumeConfig(cfg)}.toList
else
List.empty
val ports : List[NamedContainerPort] = if (config.hasPath("ports"))
config.getConfigList("ports").asScala.map { cfg: Config => NamedContainerPort(cfg) }.toList
else
List.empty
val links : List[ContainerLink] = if (config.hasPath("links"))
config.getConfigList("links").asScala.map { cfg: Config => ContainerLink(cfg) }.toList
else
List.empty
val ctName = config.getString("name")
val dockerName : String = if (config.hasPath("dockerName"))
config.getString("dockerName")
else
s"${ctName}_${System.currentTimeMillis}"
val env : Map[String, String] = if (config.hasPath("env")) {
config.getConfig("env").entrySet().asScala.map { entry =>
(entry.getKey(), config.getConfig("env").getString(entry.getKey()))
}.toMap
} else Map.empty
ContainerUnderTest(
ctName = config.getString("name"),
imgPattern = config.getString("image"),
imgId = dockerName,
dockerName = dockerName,
volumes = volumes,
links = links,
ports = ports.map { p => (p.name, p) }.toMap,
env = env
)
}
}
case class ContainerUnderTest(
ctName : String,
imgPattern : String,
imgId : String,
dockerName : String,
volumes : List[VolumeConfig] = List.empty,
links : List[ContainerLink] = List.empty,
ports : Map[String, NamedContainerPort] = Map.empty,
env : Map[String, String] = Map.empty
) {
val DEFAULT_PROTOCOL = "tcp"
def port(portName : String) : Int = {
ports.get(portName) match {
case None => 65000
case Some(p) => p.publicPort
}
}
def url(
portName: String,
host: String = "127.0.0.1",
protocol: String = DEFAULT_PROTOCOL,
user: Option[String] = None,
pwd: Option[String] = None
) : String = {
val cred = (user, pwd) match {
case (None, _) => ""
case (Some(u), None) => s"$u@"
case (Some(u), Some(p)) => s"$u:$p@"
}
s"$protocol://$cred$host:${port(portName)}"
}
}
| woq-blended/blended | blended.itestsupport/src/main/scala/blended/itestsupport/ContainerUnderTest.scala | Scala | apache-2.0 | 4,199 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.rice.algorithms
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{ Exon, ReferenceRegion, Transcript }
import org.bdgenomics.rice.utils.riceFunSuite
import org.bdgenomics.rice.utils.{ ReadGenerator, TranscriptGenerator }
import scala.collection.Map
import scala.collection.immutable.HashMap
import scala.math.abs
import org.bdgenomics.adam.util.ReferenceFile
import org.bdgenomics.adam.util.{ TwoBitFile }
import org.bdgenomics.utils.io.{ ByteAccess, ByteArrayByteAccess }
class TestingTwoBitFile(byteAccess: ByteAccess) extends ReferenceFile with Serializable {
// Test sequence, len = 24
val testSeq = "CAATCCTTCGCCGCAGTGCA"
override def extract(region: ReferenceRegion): String = {
testSeq.substring(region.start.toInt, region.end.toInt)
}
}
class QuantifySuite extends riceFunSuite {
def fpEquals(a: Double, b: Double, eps: Double = 1e-6): Boolean = {
val passed = abs(a - b) <= eps
if (!passed) {
println("|" + a + " - " + b + "| = " + abs(a - b) + "> " + eps)
}
passed
}
sparkTest("test of mapKmersToClasses") {
val kmerToEquivalenceClass: RDD[(String, Long)] = sc.parallelize(Seq(("a", 2),
("b", 3),
("c", 2),
("d", 1),
("e", 3)))
val kmerCounts: RDD[(String, Long)] = sc.parallelize(Seq(("d", 80), ("a", 25), ("c", 35), ("b", 37), ("e", 38)))
val classCounts: RDD[(Long, Long)] = Quantify.mapKmersToClasses(kmerCounts, kmerToEquivalenceClass)
assert(classCounts.count() === 3)
assert(classCounts.filter((x: (Long, Long)) => x._1 == 1).first() === (1, 80))
assert(classCounts.filter((x: (Long, Long)) => x._1 == 2).first() === (2, 60))
assert(classCounts.filter((x: (Long, Long)) => x._1 == 3).first() === (3, 75))
}
sparkTest("test of initializeEM") {
val equivalenceClassCounts: RDD[(Long, Long)] = sc.parallelize(
Seq((1, 45),
(2, 52),
(3, 49)))
val equivalenceClassToTranscript: RDD[(Long, Iterable[String])] = sc.parallelize(
Seq((2, Seq("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m")),
(3, Seq("a", "b", "c", "d", "e", "f", "g")),
(1, Seq("a", "b", "c", "d", "e"))))
val result: RDD[(Long, Iterable[(String, Double)])] = Quantify.initializeEM(equivalenceClassCounts, equivalenceClassToTranscript)
assert(result.count() === 3)
val ec1p: RDD[(Long, Iterable[(String, Double)])] = result.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 1)
assert(ec1p.count() === 1)
val ec1: (Long, Iterable[(String, Double)]) = ec1p.first()
assert(ec1._1 === 1)
assert(ec1._2.size === 5)
assert(ec1._2.forall((x: (String, Double)) => {
fpEquals(x._2, 9.0)
}))
val ec2p: RDD[(Long, Iterable[(String, Double)])] = result.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 2)
assert(ec2p.count() === 1)
val ec2: (Long, Iterable[(String, Double)]) = ec2p.first()
assert(ec2._1 === 2)
assert(ec2._2.size === 13)
assert(ec2._2.forall((x: (String, Double)) => {
fpEquals(x._2, 4.0)
}))
val ec3p: RDD[(Long, Iterable[(String, Double)])] = result.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 3)
assert(ec3p.count() === 1)
val ec3: (Long, Iterable[(String, Double)]) = ec3p.first()
assert(ec3._1 === 3)
assert(ec3._2.size === 7)
assert(ec3._2.forall((x: (String, Double)) => {
fpEquals(x._2, 7.0)
}))
}
sparkTest("test of e") {
// a, b, c, d are transcript names
// 1, 2, 3, 4, 5, 6, 7 are equivalence class IDs
val transcriptWeights: RDD[(String, Double, Iterable[Long])] = sc.parallelize(Seq(
("a", 2.0, Seq(1, 3, 5, 6)),
("b", 3.0, Seq(2, 4, 5)),
("c", 4.0, Seq(1, 2, 5, 6, 7)),
("d", 5.0, Seq(1, 2, 3))))
val equivalenceClassAssignments: RDD[(Long, Iterable[(String, Double)])] = Quantify.e(transcriptWeights)
assert(equivalenceClassAssignments.count() === 7)
// variables to keep track of whether each transcript name appears exactly once, if it should appear at all
var seen_a: Boolean = true
var seen_b: Boolean = true
var seen_c: Boolean = true
var seen_d: Boolean = true
// tests each record of the equivalenceClassAssignments RDD for correctness
val record1RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 1)
assert(record1RDD.count() === 1)
val record1: (Long, Iterable[(String, Double)]) = record1RDD.first()
assert(record1._1 === 1)
assert(record1._2.toSeq.length === 3)
seen_a = false
seen_c = false
seen_d = false
record1._2.foreach((x: (String, Double)) => {
if (x._1 == "a") {
assert((!seen_a) && equalDouble(x._2, 2.0 / 11))
seen_a = true
} else if (x._1 == "c") {
assert((!seen_c) && equalDouble(x._2, 4.0 / 11))
seen_c = true
} else if (x._1 == "d") {
assert((!seen_d) && equalDouble(x._2, 5.0 / 11))
seen_d = true
} else {
assert(false)
}
})
val record2RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 2)
assert(record2RDD.count() === 1)
val record2: (Long, Iterable[(String, Double)]) = record2RDD.first()
assert(record2._1 === 2)
assert(record2._2.toSeq.length === 3)
seen_b = false
seen_c = false
seen_d = false
record2._2.foreach((x: (String, Double)) => {
if (x._1 == "b") {
assert((!seen_b) && equalDouble(x._2, 0.25))
seen_b = true
} else if (x._1 == "c") {
assert((!seen_c) && equalDouble(x._2, 1.0 / 3))
seen_c = true
} else if (x._1 == "d") {
assert((!seen_d) && equalDouble(x._2, 5.0 / 12))
seen_d = true
} else {
assert(false)
}
})
val record3RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 3)
assert(record3RDD.count() === 1)
val record3: (Long, Iterable[(String, Double)]) = record3RDD.first()
assert(record3._1 === 3)
assert(record3._2.toSeq.length === 2)
seen_a = false
seen_d = false
record3._2.foreach((x: (String, Double)) => {
if (x._1 == "a") {
assert((!seen_a) && equalDouble(x._2, 2.0 / 7))
seen_a = true
} else if (x._1 == "d") {
assert((!seen_d) && equalDouble(x._2, 5.0 / 7))
seen_d = true
} else {
assert(false)
}
})
val record4RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 4)
assert(record4RDD.count() === 1)
val record4: (Long, Iterable[(String, Double)]) = record4RDD.first()
assert(record4._1 === 4)
assert(record4._2.toSeq.length === 1)
assert(record4._2.head._1 === "b")
assert(equalDouble(record4._2.head._2, 1.0))
val record5RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 5)
assert(record5RDD.count() === 1)
val record5: (Long, Iterable[(String, Double)]) = record5RDD.first()
assert(record5._1 === 5)
assert(record5._2.toSeq.length === 3)
seen_a = false
seen_b = false
seen_c = false
record5._2.foreach((x: (String, Double)) => {
if (x._1 == "a") {
assert((!seen_a) && equalDouble(x._2, 2.0 / 9))
seen_a = true
} else if (x._1 == "b") {
assert((!seen_b) && equalDouble(x._2, 1.0 / 3))
seen_b = true
} else if (x._1 == "c") {
assert((!seen_c) && equalDouble(x._2, 4.0 / 9))
seen_c = true
} else {
assert(false)
}
})
val record6RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 6)
assert(record6RDD.count() === 1)
val record6: (Long, Iterable[(String, Double)]) = record6RDD.first()
assert(record6._1 === 6)
assert(record6._2.toSeq.length === 2)
seen_a = false
seen_c = false
record6._2.foreach((x: (String, Double)) => {
if (x._1 == "a") {
assert((!seen_a) && equalDouble(x._2, 1.0 / 3))
seen_a = true
} else if (x._1 == "c") {
assert((!seen_c) && equalDouble(x._2, 2.0 / 3))
seen_c = true
} else {
assert(false)
}
})
val record7RDD: RDD[(Long, Iterable[(String, Double)])] = equivalenceClassAssignments.filter((x: (Long, Iterable[(String, Double)])) => x._1 == 7)
assert(record7RDD.count() === 1)
val record7: (Long, Iterable[(String, Double)]) = record7RDD.first()
assert(record7._1 === 7)
assert(record7._2.toSeq.length === 1)
assert(record7._2.head._1 === "c")
assert(equalDouble(record7._2.head._2, 1.0))
}
sparkTest("test of m") {
val equivalenceClassAssignments: RDD[(Long, Iterable[(String, Double)])] = sc.parallelize(Seq(
(1, Seq(("a", 0.6), ("c", 0.4))),
(2, Seq(("b", 0.1), ("d", 0.5), ("a", 0.4))),
(3, Seq(("a", 1.0))),
(4, Seq(("c", 0.7), ("a", 0.3)))))
val tLen: Map[String, Long] = (new HashMap()).+(
("a", 5),
("b", 6),
("c", 7),
("d", 3))
val relNumKmersInEC: Map[Long, Double] = (new HashMap()).+(
(1, 0.25),
(2, 0.25),
(3, 0.25),
(4, 0.25))
val transcriptWeights: RDD[(String, Double, Iterable[Long])] = Quantify.m(equivalenceClassAssignments, tLen, 3, relNumKmersInEC)
assert(transcriptWeights.count() === 4)
val a_record_RDD: RDD[(String, Double, Iterable[Long])] = transcriptWeights.filter((x: (String, Double, Iterable[Long])) => {
x._1 == "a"
})
assert(a_record_RDD.count() === 1)
val a_record: (String, Double, Iterable[Long]) = a_record_RDD.first()
assert(a_record._1 === "a")
assert(equalDouble(a_record._2, 460.0 / 907))
val a_record_eq_cls: Seq[Long] = a_record._3.toSeq
assert(a_record_eq_cls.length === 4)
assert(a_record_eq_cls.filter((x: Long) => x == 1).length === 1)
assert(a_record_eq_cls.filter((x: Long) => x == 2).length === 1)
assert(a_record_eq_cls.filter((x: Long) => x == 3).length === 1)
assert(a_record_eq_cls.filter((x: Long) => x == 4).length === 1)
val b_record_RDD: RDD[(String, Double, Iterable[Long])] = transcriptWeights.filter((x: (String, Double, Iterable[Long])) => {
x._1 == "b"
})
assert(b_record_RDD.count() === 1)
val b_record: (String, Double, Iterable[Long]) = b_record_RDD.first()
assert(b_record._1 === "b")
assert(equalDouble(b_record._2, 15.0 / 907))
val b_record_eq_cls: Seq[Long] = b_record._3.toSeq
assert(b_record_eq_cls.length === 1)
assert(b_record_eq_cls.filter((x: Long) => x == 1).length === 0)
assert(b_record_eq_cls.filter((x: Long) => x == 2).length === 1)
assert(b_record_eq_cls.filter((x: Long) => x == 3).length === 0)
assert(b_record_eq_cls.filter((x: Long) => x == 4).length === 0)
val c_record_RDD: RDD[(String, Double, Iterable[Long])] = transcriptWeights.filter((x: (String, Double, Iterable[Long])) => {
x._1 == "c"
})
assert(c_record_RDD.count() === 1)
val c_record: (String, Double, Iterable[Long]) = c_record_RDD.first()
assert(c_record._1 === "c")
assert(equalDouble(c_record._2, 132.0 / 907))
val c_record_eq_cls: Seq[Long] = c_record._3.toSeq
assert(c_record_eq_cls.length === 2)
assert(c_record_eq_cls.filter((x: Long) => x == 1).length === 1)
assert(c_record_eq_cls.filter((x: Long) => x == 2).length === 0)
assert(c_record_eq_cls.filter((x: Long) => x == 3).length === 0)
assert(c_record_eq_cls.filter((x: Long) => x == 4).length === 1)
val d_record_RDD: RDD[(String, Double, Iterable[Long])] = transcriptWeights.filter((x: (String, Double, Iterable[Long])) => {
x._1 == "d"
})
assert(d_record_RDD.count() === 1)
val d_record: (String, Double, Iterable[Long]) = d_record_RDD.first()
assert(d_record._1 === "d")
assert(equalDouble(d_record._2, 300.0 / 907))
val d_record_eq_cls: Seq[Long] = d_record._3.toSeq
assert(d_record_eq_cls.length === 1)
assert(d_record_eq_cls.filter((x: Long) => x == 1).length === 0)
assert(d_record_eq_cls.filter((x: Long) => x == 2).length === 1)
assert(d_record_eq_cls.filter((x: Long) => x == 3).length === 0)
assert(d_record_eq_cls.filter((x: Long) => x == 4).length === 0)
}
def equalDouble(a: Double, b: Double): Boolean = {
math.abs(a - b) < 1e-3
}
sparkTest("extract lengths from transcripts") {
val exons1 = Iterable(Exon("e1", "t1", true, ReferenceRegion("1", 0L, 101L)),
Exon("e2", "t1", true, ReferenceRegion("1", 200L, 401L)),
Exon("e3", "t1", true, ReferenceRegion("1", 500L, 576L)))
val exons2 = Iterable(Exon("e1", "t2", false, ReferenceRegion("1", 600L, 651L)),
Exon("e2", "t2", false, ReferenceRegion("1", 200L, 401L)),
Exon("e3", "t2", false, ReferenceRegion("1", 125L, 176L)),
Exon("e4", "t2", false, ReferenceRegion("1", 25L, 76L)))
val transcripts = Seq(Transcript("t1", Seq("t1"), "g1", true, exons1, Iterable(), Iterable()),
Transcript("t2", Seq("t2"), "g1", false, exons2, Iterable(), Iterable()))
val rdd = sc.parallelize(transcripts)
val lengths = Quantify.extractTranscriptLengths(rdd)
assert(lengths.size === 2)
assert(lengths("t1") === 375L)
assert(lengths("t2") === 350L)
}
def dummyTranscript(id: String): Transcript = {
return new Transcript(id,
Seq("test"),
"Gene1",
true,
Iterable(),
Iterable(),
Iterable())
}
test("dummy transcript correctly initialized") {
var t = dummyTranscript("t1")
assert(t.id == "t1")
assert(t.strand == true)
}
sparkTest("transcripts correctly matched with coverage") {
var s1: Double = 1
var s2: Double = 2
var s3: Double = 3
val rdd1 = sc.parallelize(Array(dummyTranscript("t1"),
dummyTranscript("t2"),
dummyTranscript("t3")))
val rdd2 = sc.parallelize(Array(("t1", s1, Iterable[Long]()),
("t2", s2, Iterable[Long]()),
("t3", s3, Iterable[Long]())))
val rdd3 = Quantify.joinTranscripts(rdd1, rdd2)
val compare = rdd3.collect()
for (x <- compare) {
if (x._1.id == "t1") {
assert(x._2 == s1)
}
if (x._1.id == "t2") {
assert(x._2 == s2)
}
if (x._1.id == "t3") {
assert(x._2 == s3)
}
}
}
sparkTest("quantify unique transcripts") {
// generate transcripts
val tLen = Seq(1000, 600, 400, 550, 1275, 1400)
val (transcripts,
names,
kmerMap,
classMap) = TranscriptGenerator.generateIndependentTranscripts(20,
tLen,
Some(1234L))
// generate reads
val reads = ReadGenerator(transcripts, Seq(0.2, 0.1, 0.3, 0.2, 0.1, 0.1), 10000, 75, Some(4321L))
// run quantification
val relativeAbundances = Quantify(sc.parallelize(reads),
sc.parallelize(kmerMap.toSeq),
sc.parallelize(classMap.toSeq),
sc.parallelize(names.zip(tLen).map(p => Transcript(p._1,
Seq(p._1),
p._1,
true,
Iterable(Exon(p._1 + "exon",
p._1,
true,
ReferenceRegion(p._1, 0, p._2.toLong))),
Iterable(),
Iterable()))),
20,
20,
false,
false).collect
.map(kv => (kv._1.id, kv._2))
.toMap
assert(relativeAbundances.size === 6)
assert(fpEquals(relativeAbundances("0"), 0.2, 0.05))
assert(fpEquals(relativeAbundances("1"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("2"), 0.3, 0.05))
assert(fpEquals(relativeAbundances("3"), 0.2, 0.05))
assert(fpEquals(relativeAbundances("4"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("5"), 0.1, 0.05))
}
sparkTest("Test of TestingTwoBitFile") {
val region1 = ReferenceRegion("region1", 0L, 10L)
val tbf = new TestingTwoBitFile(new ByteArrayByteAccess(new Array[Byte](1)))
assert(tbf.extract(region1) == "CAATCCTTCG")
}
sparkTest("Test of Index") {
// Takes a set of transcripts and a twobitfile and a kmer length, then returns a tuple: (kmers -> eq classes, eq class -> iterable of member kmers)
val region1 = ReferenceRegion("region1", 0L, 10L)
val exon1 = Exon("exon1", "transcript1", true, region1)
val transcript1 = Transcript("transcript1", Seq("transcript1"), "gene1", true, Iterable(exon1), Iterable(), Iterable())
val region2 = ReferenceRegion("region2", 11L, 20L)
val exon2 = Exon("exon2", "transcript2", true, region2)
val transcript2 = Transcript("transcript2", Seq("transcript2"), "gene1", true, Iterable(exon2), Iterable(), Iterable())
val transcripts = sc.parallelize(Seq(transcript1, transcript2))
val tbfile = new TestingTwoBitFile(new ByteArrayByteAccess(new Array[Byte](1)))
// List of ( ... (kmer, class id) ... ) AND (... (id, list of kmers) ...)
var (kmersToEq, eqToKmers) = Index.apply(tbfile, transcripts, 5)
val kToEq = kmersToEq.collect()
val eqToK = eqToKmers.collect()
// Tracking 3 particular kmers: "CAATC", "GTGCA", "CTTCG"
// Should be at least 2 eq classes, such that one contains both "CAATC" and "CTTCG", while the ther contains "GTGCA"
val CAATCtoClassArray = kToEq.filter(_._1 == "CAATC")
assert(CAATCtoClassArray.length == 1) // There should only be one instance of each kmer
val CAATCtoClass = CAATCtoClassArray(0)
val GTGCAtoClassArray = kToEq.filter(_._1 == "GTGCA")
assert(GTGCAtoClassArray.length == 1)
val GTGCAtoClass = GTGCAtoClassArray(0)
val CTTCGtoClassArray = kToEq.filter(_._1 == "CTTCG")
assert(CTTCGtoClassArray.length == 1)
val CTTCGtoClass = CTTCGtoClassArray(0)
var class1 = CAATCtoClass._2
var class2 = GTGCAtoClass._2
var class3 = CTTCGtoClass._2
assert(class1 != class2)
assert(class1 == class3)
var class1Kmers = eqToK.filter(_._1 == class1)(0)
assert(class1Kmers._2.toList.contains("CAATC"))
assert(class1Kmers._2.toList.contains("CTTCG"))
assert(!class1Kmers._2.toList.contains("GTGCA"))
var class2Kmers = eqToK.filter(_._1 == class2)(0)
assert(class2Kmers._2.toList.contains("GTGCA"))
assert(!class2Kmers._2.toList.contains("CAATC"))
assert(!class2Kmers._2.toList.contains("CTTCG"))
}
sparkTest("quantify a small set of more realistic but unbiased transcripts") {
// generate transcripts
val classSize = Seq(1000, 500, 700, 400, 400, 200, 100)
val classMultiplicity = Seq(1, 1, 1, 1, 2, 2, 3)
val classMembership = Seq(Set(0),
Set(1, 2),
Set(1, 3),
Set(1, 4),
Set(2, 5),
Set(2, 6),
Set(3, 6),
Set(6))
val (transcripts,
names,
kmerMap,
classMap) = TranscriptGenerator.generateTranscripts(20,
classSize,
classMultiplicity,
classMembership,
Some(1000L))
val tLen = transcripts.map(_.length)
// generate reads
val abundances = Seq(0.05, 0.1, 0.25, 0.1, 0.05, 0.025, 0.025, 0.4)
val reads = ReadGenerator(transcripts,
abundances,
50000,
75,
Some(5000L))
// run quantification
val relativeAbundances = Quantify(sc.parallelize(reads),
sc.parallelize(kmerMap.toSeq),
sc.parallelize(classMap.toSeq),
sc.parallelize(names.zip(tLen).map(p => Transcript(p._1,
Seq(p._1),
p._1,
true,
Iterable(Exon(p._1 + "exon",
p._1,
true,
ReferenceRegion(p._1, 0, p._2.toLong))),
Iterable(),
Iterable()))),
20,
50,
false,
false).collect
.map(kv => (kv._1.id, kv._2))
.toMap
assert(relativeAbundances.size === 8)
assert(fpEquals(relativeAbundances("0"), 0.05, 0.01))
assert(fpEquals(relativeAbundances("1"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("2"), 0.25, 0.05))
assert(fpEquals(relativeAbundances("3"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("4"), 0.05, 0.025))
assert(fpEquals(relativeAbundances("5"), 0.025, 0.0125))
assert(fpEquals(relativeAbundances("6"), 0.025, 0.0125))
assert(fpEquals(relativeAbundances("7"), 0.4, 0.05))
}
sparkTest("quantify unique transcripts without bias removal") {
// generate transcripts
val tLen = Seq(1000, 600, 400, 550, 1275, 1400)
val (transcripts,
names,
kmerMap,
classMap) = TranscriptGenerator.generateIndependentTranscripts(20,
tLen,
Some(1234L))
// generate reads
val reads = ReadGenerator(transcripts, Seq(0.2, 0.1, 0.3, 0.2, 0.1, 0.1), 10000, 75, Some(4321L))
// run quantification
val relativeAbundances = Quantify(sc.parallelize(reads),
sc.parallelize(kmerMap.toSeq),
sc.parallelize(classMap.toSeq),
sc.parallelize(names.zip(tLen).map(p => Transcript(p._1,
Seq(p._1),
p._1,
true,
Iterable(Exon(p._1 + "exon",
p._1,
true,
ReferenceRegion(p._1, 0, p._2.toLong))),
Iterable(),
Iterable()))),
20,
20,
false,
false).collect
.map(kv => (kv._1.id, kv._2))
.toMap
assert(relativeAbundances.size === 6)
assert(fpEquals(relativeAbundances("0"), 0.2, 0.05))
assert(fpEquals(relativeAbundances("1"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("2"), 0.3, 0.05))
assert(fpEquals(relativeAbundances("3"), 0.2, 0.05))
assert(fpEquals(relativeAbundances("4"), 0.1, 0.05))
assert(fpEquals(relativeAbundances("5"), 0.1, 0.05))
}
sparkTest("quantify unique transcripts where length bias is so strong that all variation in transcript abundance is due to length") {
// generate transcripts
val tLen = Seq(1000, 600, 400, 550, 1275, 1400)
val (transcripts,
names,
kmerMap,
classMap) = TranscriptGenerator.generateIndependentTranscripts(20,
tLen,
Some(1234L))
// generate reads
val totLen = tLen.sum.toDouble
val reads = ReadGenerator(transcripts, tLen.map(x => x / totLen), 10000, 75, Some(4321L))
// run quantification
val relativeAbundances = Quantify(sc.parallelize(reads),
sc.parallelize(kmerMap.toSeq),
sc.parallelize(classMap.toSeq),
sc.parallelize(names.zip(tLen).map(p => Transcript(p._1,
Seq(p._1),
p._1,
true,
Iterable(Exon(p._1 + "exon",
p._1,
true,
ReferenceRegion(p._1, 0, p._2.toLong))),
Iterable(),
Iterable()))),
20,
20,
true,
true).collect
.map(kv => (kv._1.id, kv._2))
.toMap
assert(relativeAbundances.size === 6)
assert(fpEquals(relativeAbundances("0"), 1.0 / 6, 0.05))
assert(fpEquals(relativeAbundances("1"), 1.0 / 6, 0.05))
assert(fpEquals(relativeAbundances("2"), 1.0 / 6, 0.05))
assert(fpEquals(relativeAbundances("3"), 1.0 / 6, 0.05))
assert(fpEquals(relativeAbundances("4"), 1.0 / 6, 0.05))
assert(fpEquals(relativeAbundances("5"), 1.0 / 6, 0.05))
}
sparkTest("quantify unique transcripts with a weaker length bias") {
// generate transcripts
val tLen = Seq(1000, 600, 400, 550, 1275, 1400) // average length is 870.83
val (transcripts,
names,
kmerMap,
classMap) = TranscriptGenerator.generateIndependentTranscripts(20,
tLen,
Some(1234L))
// generate reads
val reads = ReadGenerator(transcripts, Seq(0.2, 0.1, 0.05, 0.2, 0.05, 0.4), 10000, 75, Some(4321L))
// run quantification
val relativeAbundances = Quantify(sc.parallelize(reads),
sc.parallelize(kmerMap.toSeq),
sc.parallelize(classMap.toSeq),
sc.parallelize(names.zip(tLen).map(p => Transcript(p._1,
Seq(p._1),
p._1,
true,
Iterable(Exon(p._1 + "exon",
p._1,
true,
ReferenceRegion(p._1, 0, p._2.toLong))),
Iterable(),
Iterable()))),
20,
20,
true,
true).collect
.map(kv => (kv._1.id, kv._2))
.toMap
assert(relativeAbundances.size === 6)
// Transcript 2 is the shortest at 440 bp, and is one of the least abundant
// Part of this low abundance is bias due to short length, so calibration
// should increase its abundance.
assert(relativeAbundances("2") > 0.05)
// Transcript 5 is the longest at 1400 bp. It is the most abundant.
// Part of this high abudnace is bias due to long length.
// Calibration should therefore decrease its abundance.
assert(relativeAbundances("5") < 0.4)
}
}
| bigdatagenomics/RNAdam | rice-core/src/test/scala/org/bdgenomics/rice/algorithms/QuantifySuite.scala | Scala | apache-2.0 | 25,448 |
package org.singlespaced.d3js
import org.scalajs.dom
import d3.Primitive
import scala.scalajs.js
import scala.scalajs.js.`|`
import scala.scalajs.js.annotation.{JSBracketAccess, JSName}
@JSName("d3.selection")
@js.native
trait SelectionObject extends js.Object {
var prototype: Selection[js.Any] = js.native
}
package selection {
import scala.scalajs.js
@js.native
trait Group extends js.Array[dom.EventTarget] {
var parentNode: dom.EventTarget = js.native
}
@js.native
trait BaseSelection[Datum, T <: BaseSelection[Datum,T]] extends BaseDom[Datum,T] {
var length: Double = js.native
@JSBracketAccess
def apply(index: Double): Group = js.native
@JSBracketAccess
def update(index: Double, v: Group): Unit = js.native
def attr(name: String): String = js.native
def classed(name: String): Boolean = js.native
def classed(name: String, value: Boolean): T = js.native
def classed(name: String, value: DatumFunction[Boolean]): T = js.native
def classed(obj: js.Dictionary[SelfOrDatumFunction[Boolean]]): T = js.native
def style(name: String): String = js.native
def style(obj: js.Dictionary[SelfOrDatumFunction[Primitive]], priority: String ): T = js.native
def property(name: String): js.Dynamic = js.native
def property(name: String, value: Any): T = js.native
def property(name: String, value: DatumFunction[Any]): T = js.native
def property(obj: js.Dictionary[SelfOrDatumFunction[Any]]): T = js.native
def text(): String = js.native
def text(value: String): T = js.native
def text(value: DatumFunction[String]): T = js.native
def html(): String = js.native
def html(value: String): T = js.native
def html(value: DatumFunction[String]): T = js.native
def append(name: String): T = js.native
def append(name: DatumFunction[dom.EventTarget]): T = js.native
def insert(name: String, before: String): T = js.native
def insert(name: String, before: DatumFunction[dom.EventTarget]): T = js.native
def insert(name: DatumFunction[dom.EventTarget], before: String): T = js.native
def insert(name: DatumFunction[dom.EventTarget], before: DatumFunction[dom.EventTarget]): T = js.native
def data(): js.Array[Datum] = js.native
def data[NewDatum](data: js.Array[NewDatum]): Update[NewDatum] = js.native
def data[NewDatum](data: js.Array[NewDatum], key: js.ThisFunction2[dom.Node|js.Array[NewDatum],js.UndefOr[NewDatum], Int, String]): Update[NewDatum] = js.native
def data[NewDatum <: Datum](data: js.Array[NewDatum], key: js.Function2[Datum, Int, String]): Update[NewDatum] = js.native
def data[NewDatum](data: DatumFunction[js.Array[NewDatum]]): Update[NewDatum] = js.native
def data[NewDatum](data: DatumFunction[js.Array[NewDatum]], key: js.ThisFunction2[dom.Node|js.Array[NewDatum],js.UndefOr[NewDatum], Int, String]): Update[NewDatum] = js.native
def filter(selector: DatumFunction[Boolean]): T = js.native
def datum(): Datum = js.native
def datum[NewDatum](value: NewDatum): Update[NewDatum] = js.native
def datum[NewDatum](value: js.Array[NewDatum]): Update[NewDatum] = js.native
def datum[NewDatum](value: DatumFunction[NewDatum]): Update[NewDatum] = js.native
def sort(comparator: js.Function2[Datum, Datum, Double] = ???): T = js.native
def order(): T = js.native
def on(`type`: String): DatumFunction[Unit] = js.native
def on(`type`: String, listener: DatumFunction[Unit], capture: Boolean = false): T = js.native
def transition(name: String = ???): Transition[Datum] = js.native
def interrupt(name: String = ???): T = js.native
def select(selector: String): T = js.native
def select(selector: DatumFunction[dom.EventTarget]): T = js.native
def each(func: js.ThisFunction2[T,Datum,Int, Unit]): T = js.native
}
@js.native
trait Update[Datum] extends BaseSelection[Datum,Update[Datum]] {
def selectAll[SelData](selector: String): Update[SelData] = js.native
def selectAll[SelData](selector: js.Function3[Datum, Double, Double, js.Array[dom.EventTarget] | dom.NodeList]): Update[SelData] = js.native
def enter(): Enter[Datum] = js.native
def exit(): Selection[Datum] = js.native
}
@js.native
trait Enter[Datum] extends js.Object {
def append(name: String): Selection[Datum] = js.native
def append(name: js.Function3[Datum, Double, Double, dom.EventTarget]): Selection[Datum] = js.native
def insert(name: String, before: String): Selection[Datum] = js.native
def insert(name: String, before: js.Function3[Datum, Double, Double, dom.EventTarget]): Selection[Datum] = js.native
def insert(name: js.Function3[Datum, Double, Double, dom.EventTarget], before: String): Selection[Datum] = js.native
def insert(name: js.Function3[Datum, Double, Double, dom.EventTarget], before: js.Function3[Datum, Double, Double, dom.EventTarget]): Selection[Datum] = js.native
def select(name: js.Function3[Datum, Double, Double, dom.EventTarget]): Selection[Datum] = js.native
def call(func: js.Function, args: js.Any*): Enter[Datum] = js.native
}
}
@js.native
trait Selection[Datum] extends selection.BaseSelection[Datum,Selection[Datum]] {
//def selectAll(selector: String): Selection[js.Any] = js.native
def selectAll[SelData](selector: String): Selection[SelData] = js.native
//def selectAll(selector: js.Function3[Datum, Double, Double, js.Array[dom.EventTarget] | dom.NodeList]): Selection[js.Any] = js.native
def selectAll[SelData](selector: DatumFunction[js.Array[dom.EventTarget] | dom.NodeList]): Selection[SelData] = js.native
}
| spaced/scala-js-d3 | src/main/scala/org/singlespaced/d3js/selection.scala | Scala | bsd-3-clause | 5,497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{File, FileNotFoundException}
import java.net.URI
import scala.util.Random
import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem}
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.execution.streaming.ExistsThrowsExceptionFileSystem._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.StructType
class FileStreamSourceSuite extends SparkFunSuite with SharedSQLContext {
import FileStreamSource._
test("SeenFilesMap") {
val map = new SeenFilesMap(maxAgeMs = 10)
map.add("a", 5)
assert(map.size == 1)
map.purge()
assert(map.size == 1)
// Add a new entry and purge should be no-op, since the gap is exactly 10 ms.
map.add("b", 15)
assert(map.size == 2)
map.purge()
assert(map.size == 2)
// Add a new entry that's more than 10 ms than the first entry. We should be able to purge now.
map.add("c", 16)
assert(map.size == 3)
map.purge()
assert(map.size == 2)
// Override existing entry shouldn't change the size
map.add("c", 25)
assert(map.size == 2)
// Not a new file because we have seen c before
assert(!map.isNewFile("c", 20))
// Not a new file because timestamp is too old
assert(!map.isNewFile("d", 5))
// Finally a new file: never seen and not too old
assert(map.isNewFile("e", 20))
}
test("SeenFilesMap should only consider a file old if it is earlier than last purge time") {
val map = new SeenFilesMap(maxAgeMs = 10)
map.add("a", 20)
assert(map.size == 1)
// Timestamp 5 should still considered a new file because purge time should be 0
assert(map.isNewFile("b", 9))
assert(map.isNewFile("b", 10))
// Once purge, purge time should be 10 and then b would be a old file if it is less than 10.
map.purge()
assert(!map.isNewFile("b", 9))
assert(map.isNewFile("b", 10))
}
testWithUninterruptibleThread("do not recheck that files exist during getBatch") {
withTempDir { temp =>
spark.conf.set(
s"fs.$scheme.impl",
classOf[ExistsThrowsExceptionFileSystem].getName)
// add the metadata entries as a pre-req
val dir = new File(temp, "dir") // use non-existent directory to test whether log make the dir
val metadataLog =
new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, dir.getAbsolutePath)
assert(metadataLog.add(0, Array(FileEntry(s"$scheme:///file1", 100L, 0))))
val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil,
dir.getAbsolutePath, Map.empty)
// this method should throw an exception if `fs.exists` is called during resolveRelation
newSource.getBatch(None, LongOffset(1))
}
}
}
/** Fake FileSystem to test whether the method `fs.exists` is called during
* `DataSource.resolveRelation`.
*/
class ExistsThrowsExceptionFileSystem extends RawLocalFileSystem {
override def getUri: URI = {
URI.create(s"$scheme:///")
}
override def exists(f: Path): Boolean = {
throw new IllegalArgumentException("Exists shouldn't have been called!")
}
/** Simply return an empty file for now. */
override def listStatus(file: Path): Array[FileStatus] = {
throw new FileNotFoundException("Folder was suddenly deleted but this should not make it fail!")
}
}
object ExistsThrowsExceptionFileSystem {
val scheme = s"FileStreamSourceSuite${math.abs(Random.nextInt)}fs"
}
| gioenn/xSpark | sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala | Scala | apache-2.0 | 4,318 |
package com.krux.hyperion.cli
import java.io.PrintStream
import com.krux.hyperion.DataPipelineDefGroup
import com.krux.hyperion.workflow.WorkflowGraphRenderer
private[hyperion] case object GraphAction extends Action {
def apply(options: Options, defGroup: DataPipelineDefGroup): Boolean = {
defGroup.ungroup().foreach { case (key, pipelineDef) =>
val renderer = WorkflowGraphRenderer(pipelineDef, options.removeLastNameSegment,
options.label, options.includeResources, options.includeDataNodes, options.includeDatabases)
options.output
.map(f => new PrintStream(f + key.map(pipelineDef.nameKeySeparator + _).getOrElse("") + ".dot"))
.getOrElse(System.out)
.println(renderer.render())
}
true
}
}
| hoangelos/hyperion | core/src/main/scala/com/krux/hyperion/cli/GraphAction.scala | Scala | apache-2.0 | 762 |
//
// SchedulingQueue.scala -- A queue interface for work-stealing queues
// Project OrcScala
//
// Created by amp on Jan, 2018.
//
// Copyright (c) 2018 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.util
trait SchedulingQueue[T] {
/** Push an item into the queue.
*
* Only call from the owner thread.
*
* @returns true if the insert was successful, false if it failed due to a full queue.
*/
def push(o: T): Boolean
/** Pop an item from the queue.
*
* Only call from the owner thread.
*
* @returns null if the queue is empty.
*/
def pop(): T
/** Pop an item from the queue.
*
* This traditionally pops from the bottom of the queue (work-stealing style).
*
* May be called from non-owner threads.
*
* @returns null if the queue is empty.
*/
def steal(): T
/** Get the size of the queue
*
* This is only an estimate and the queue may never have had the returned length. The
* actually length must be less than (or equal to) this number and may only decrease
* unless the owner adds items.
*
* This may only be called from the owner thread.
*/
def size: Int
/** Clear the backing storage of the queue.
*
* This is useful to allow old items in the queue to be freed.
*
* This may only be called from the owner thread when the queue is empty.
*/
def clean(): Unit
} | orc-lang/orc | OrcScala/src/orc/util/SchedulingQueue.scala | Scala | bsd-3-clause | 1,640 |
package org.http4s.util
import java.util.Locale
import org.http4s.Http4sSpec
import org.scalacheck.{Prop, Arbitrary, Gen}
import scalaz.scalacheck.ScalazProperties
class CaseInsensitiveStringSpec extends Http4sSpec {
"equals" should {
"be consistent with equalsIgnoreCase of the values" in {
prop { s: String =>
val lc = s.toLowerCase(Locale.ROOT)
(s.equalsIgnoreCase(lc)) == (s.ci == lc.ci)
}
}
}
"hashCode" should {
"be consistent with equality" in {
prop { s: String =>
val lc = s.toLowerCase(Locale.ROOT)
(s.ci == lc.ci) ==> (s.ci.## == lc.ci.##)
}
}
}
"toString" should {
"return the original as its toString" in {
prop { s: String => s.ci.toString equals (s)}
}
}
"length" should {
"be consistent with the orignal's length" in {
prop { s: String => s.ci.length equals (s.length)}
}
}
"charAt" should {
"be consistent with the orignal's charAt" in {
def gen = for {
s <- Arbitrary.arbitrary[String].suchThat(_.nonEmpty)
i <- Gen.choose(0, s.length - 1)
} yield (s, i)
Prop.forAll(gen) { case (s, i) => s.ci.charAt(i) equals (s.charAt(i)) }
}
}
"subSequence" should {
"be consistent with the orignal's subSequence" in {
def gen = for {
s <- Arbitrary.arbitrary[String].suchThat(_.nonEmpty)
i <- Gen.choose(0, s.length - 1)
j <- Gen.choose(i, s.length - 1)
} yield (s, i, j)
Prop.forAll(gen) { case (s, i, j) => s.ci.subSequence(i, j) equals (s.subSequence(i, j).ci) }
}
}
}
| m4dc4p/http4s | tests/src/test/scala/org/http4s/util/CaseInsensitiveStringSpec.scala | Scala | apache-2.0 | 1,603 |
package controllers
import play.api.mvc.{Action, SimpleResult, EssentialAction, Controller}
import domain.DB._
import domain.Security._
import scalikejdbc.async.AsyncDBSession
import models.{User, App}
import play.api.data.Form
import play.api.data.Forms._
import fr.njin.playoauth.as.endpoints.Constraints._
import fr.njin.playoauth.as.endpoints.Requests._
import scala.concurrent.Future
import play.api.i18n.Messages
import play.api.libs.iteratee.{Iteratee, Done}
/**
* User: bathily
* Date: 03/10/13
*/
object Apps extends Controller {
case class AppForm(name: String,
description: String,
uri: String,
iconUri: Option[String],
redirectUris: Option[Seq[String]],
isWebApp: Boolean,
isNativeApp: Boolean)
object AppForm {
def apply(app: App): AppForm = AppForm(
app.name,
app.description,
app.uri,
app.iconUri,
app.redirectUris,
app.isWebApp,
app.isNativeApp
)
}
val appForm = Form(
mapping (
"name" -> nonEmptyText,
"description" -> nonEmptyText,
"uri" -> nonEmptyText.verifying(uri),
"iconUri" -> optional(text.verifying(uri)),
"redirectUris" -> optional(of[Seq[String]](urisFormatter).verifying(uris)),
"isWebApp" -> boolean,
"isNativeApp" -> boolean
)(AppForm.apply)(AppForm.unapply).verifying("error.redirectUri.required", app => {
!(app.isWebApp || app.isNativeApp) || app.redirectUris.exists(!_.isEmpty)
})
)
val OnAppNotFound: Long => User => EssentialAction = id => implicit user => EssentialAction { implicit request =>
Done[Array[Byte], SimpleResult](NotFound(views.html.apps.notfound(id)))
}
val OnAppForbidden: Long => User => EssentialAction = id => implicit user => EssentialAction { implicit request =>
Done[Array[Byte], SimpleResult](Forbidden(views.html.apps.notfound(id)))
}
def CanAccessApp(id:Long, user:User,
onNotFound: Long => User => EssentialAction = OnAppNotFound,
onForbidden: Long => User =>EssentialAction = OnAppForbidden
)(action: App => AsyncDBSession => EssentialAction)(implicit session: AsyncDBSession): EssentialAction =
EssentialAction { request =>
Iteratee.flatten(
App.find(id).map(_.fold(onNotFound(id)(user)(request))(app => {
if(app.ownerId == user.id)
action(app)(session)(request)
else
onForbidden(id)(user)(request)
}))
)
}
def WithApp(id: Long)(action: User => App => AsyncDBSession => EssentialAction): EssentialAction =
InTx { implicit tx =>
WithUser(tx, dbContext) { user =>
CanAccessApp(id, user)(action(user))
}
}
def list = InTx { implicit tx =>
AuthenticatedAction.async { implicit request =>
App.findForOwner(request.user).map { apps =>
Ok(views.html.apps.list(apps))
}
}
}
def create = InTx { implicit tx =>
AuthenticatedAction.apply { implicit request =>
Ok(views.html.apps.create(appForm))
}
}
def doCreate = InTx { implicit tx =>
AuthenticatedAction.async { implicit request =>
appForm.bindFromRequest.fold(f => Future.successful(BadRequest(views.html.apps.create(f))),
app => App.create(request.user,
name = app.name,
description = app.description,
uri = app.uri,
iconUri = app.iconUri,
redirectUris = app.redirectUris,
isWebApp = app.isWebApp,
isNativeApp = app.isNativeApp
).map { a =>
Redirect(routes.Apps.app(a.pid))
}
)
}
}
def app(id: Long) = WithApp(id) { implicit user => app => implicit tx =>
Action { implicit request =>
Ok(views.html.apps.app(app))
}
}
def edit(id: Long) = WithApp(id) { implicit user => app => implicit tx =>
Action { implicit request =>
Ok(views.html.apps.edit(app, appForm.fill(AppForm(app))))
}
}
def doEdit(id: Long) = WithApp(id) { implicit user => app => implicit tx =>
Action.async { implicit request =>
appForm.bindFromRequest.fold(f => Future.successful(BadRequest(views.html.apps.edit(app, f))),
form =>
app.copy(
name = form.name,
description = form.description,
uri = form.uri,
iconUri = form.iconUri,
redirectUris = form.redirectUris,
isWebApp = form.isWebApp,
isNativeApp = form.isNativeApp
).save.map { a =>
Redirect(routes.Apps.app(a.pid))
}
)
}
}
def delete(id: Long) = WithApp(id) { implicit user => app => implicit tx =>
Action { implicit request =>
Ok(views.html.apps.delete(app))
}
}
def doDelete(id: Long) = WithApp(id) { implicit user => app => implicit tx =>
Action.async { implicit request =>
app.destroy().map(app =>
Redirect(routes.Apps.list).flashing("success" -> Messages("flash.app.delete.success", app.name))
)
}
}
}
| njin-fr/play-oauth | play-oauth-server/app/controllers/Apps.scala | Scala | apache-2.0 | 5,117 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.utils
import com.intel.analytics.bigdl.dataset.{MiniBatch, LocalDataSet}
import com.intel.analytics.bigdl.models.vgg.{Vgg_16, Vgg_19}
import com.intel.analytics.bigdl.numeric.NumericFloat
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.models.inception.{Inception_v1, Inception_v2}
import com.intel.analytics.bigdl.nn.ClassNLLCriterion
import com.intel.analytics.bigdl.optim.{Optimizer, LocalOptimizer, Trigger}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Engine
import scopt.OptionParser
import scala.reflect.ClassTag
object LocalOptimizerPerf {
val parser = new OptionParser[LocalOptimizerPerfParam]("BigDL Local Performance Test") {
head("Performance Test of Local Optimizer")
opt[Int]('b', "batchSize")
.text("Batch size of input data")
.action((v, p) => p.copy(batchSize = v))
opt[Int]('c', "coreNumber")
.text("physical cores number of current machine")
.action((v, p) => p.copy(coreNumber = v))
opt[Int]('i', "iteration")
.text("Iteration of perf test. The result will be average of each iteration time cost")
.action((v, p) => p.copy(iteration = v))
opt[String]('m', "model")
.text("Model name. It can be inception_v1 | vgg16 | vgg19 | " +
"inception_v2")
.action((v, p) => p.copy(module = v))
.validate(v =>
if (Set("inception_v1", "inception_v2", "vgg16", "vgg19").
contains(v.toLowerCase())) {
success
} else {
failure("Data type can only be inception_v1 | " +
"vgg16 | vgg19 | inception_v2 now")
}
)
opt[String]('d', "inputdata")
.text("Input data type. One of constant | random")
.action((v, p) => p.copy(inputData = v))
.validate(v =>
if (v.toLowerCase() == "constant" || v.toLowerCase() == "random") {
success
} else {
failure("Input data type must be one of constant and random")
}
)
help("help").text("Prints this usage text")
}
def main(args: Array[String]): Unit = {
parser.parse(args, new LocalOptimizerPerfParam()).map(param => {
performance(param)
})
}
def performance(param: LocalOptimizerPerfParam): Unit = {
Engine.init(1, param.coreNumber, false)
val (_model, input) = param.module match {
case "inception_v1" => (Inception_v1(1000), Tensor(param.batchSize, 3, 224, 224))
case "inception_v2" => (Inception_v2(1000), Tensor(param.batchSize, 3, 224, 224))
case "vgg16" => (Vgg_16(1000), Tensor(param.batchSize, 3, 224, 224))
case "vgg19" => (Vgg_19(1000), Tensor(param.batchSize, 3, 224, 224))
}
param.inputData match {
case "constant" => input.fill(0.01f)
case "random" => input.rand()
}
val model = _model
println(model)
val criterion = ClassNLLCriterion()
val labels = Tensor(param.batchSize).fill(1)
val dummyDataSet = new LocalDataSet[MiniBatch[Float]] {
override def data(train : Boolean): Iterator[MiniBatch[Float]] = {
new Iterator[MiniBatch[Float]] {
override def hasNext: Boolean = true
override def next(): MiniBatch[Float] = {
MiniBatch(input, labels)
}
}
}
override def size(): Long = 100000
override def shuffle(): Unit = {}
}
Engine.setCoreNumber(param.coreNumber)
val optimizer = Optimizer(model, dummyDataSet, criterion)
optimizer.setEndWhen(Trigger.maxIteration(param.iteration)).optimize()
}
}
case class LocalOptimizerPerfParam(
batchSize: Int = 128,
coreNumber: Int = (Runtime.getRuntime().availableProcessors() / 2),
iteration: Int = 50,
dataType: String = "float",
module: String = "inception_v1",
inputData: String = "random"
)
| SeaOfOcean/BigDL | dl/src/main/scala/com/intel/analytics/bigdl/models/utils/LocalOptimizerPerf.scala | Scala | apache-2.0 | 4,693 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.fusesource.scalate.support.TemplatePackage
import org.fusesource.scalate.{Binding, TemplateSource}
/**
* Defines the template package of reusable imports, attributes and methods across templates
*/
class ScalatePackage extends TemplatePackage {
def header(source: TemplateSource, bindings: List[Binding]) =
"""
// common imports go here
import _root_.Website._;
"""
}
| apache/activemq-openwire | openwire-website/ext/ScalatePackage.scala | Scala | apache-2.0 | 1,204 |
package org.apache.carbondata.datamap.bloom
import java.io.{File, PrintWriter}
import java.util.UUID
import scala.util.Random
import org.apache.spark.sql.test.util.QueryTest
import org.apache.spark.sql.DataFrame
object BloomCoarseGrainDataMapTestUtil extends QueryTest {
def createFile(fileName: String, line: Int = 10000, start: Int = 0): Unit = {
if (!new File(fileName).exists()) {
val write = new PrintWriter(new File(fileName))
for (i <- start until (start + line)) {
write.println(
s"$i,n$i,city_$i,${ Random.nextInt(80) }," +
s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }")
}
write.close()
}
}
def deleteFile(fileName: String): Unit = {
val file = new File(fileName)
if (file.exists()) {
file.delete()
}
}
private def checkSqlHitDataMap(sqlText: String, dataMapName: String, shouldHit: Boolean): DataFrame = {
// we will not check whether the query will hit the datamap because datamap may be skipped
// if the former datamap pruned all the blocklets
sql(sqlText)
}
def checkBasicQuery(dataMapName: String, bloomDMSampleTable: String, normalTable: String, shouldHit: Boolean = true): Unit = {
checkAnswer(
checkSqlHitDataMap(s"select * from $bloomDMSampleTable where id = 1", dataMapName, shouldHit),
sql(s"select * from $normalTable where id = 1"))
checkAnswer(
checkSqlHitDataMap(s"select * from $bloomDMSampleTable where id = 999", dataMapName, shouldHit),
sql(s"select * from $normalTable where id = 999"))
checkAnswer(
checkSqlHitDataMap(s"select * from $bloomDMSampleTable where city = 'city_1'", dataMapName, shouldHit),
sql(s"select * from $normalTable where city = 'city_1'"))
checkAnswer(
checkSqlHitDataMap(s"select * from $bloomDMSampleTable where city = 'city_999'", dataMapName, shouldHit),
sql(s"select * from $normalTable where city = 'city_999'"))
checkAnswer(
sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
s" from $bloomDMSampleTable"),
sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
s" from $normalTable"))
}
}
| manishgupta88/carbondata | integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala | Scala | apache-2.0 | 2,473 |
package pregnaware.naming.entities
case class AddNameRequest(suggestedByUserId: Int, name: String, isBoy: Boolean)
| jds106/pregnaware | service/src/main/scala/pregnaware/naming/entities/AddNameRequest.scala | Scala | mit | 116 |
import scala.io.StdIn
object Solution {
def main(args: Array[String]) {
val s = StdIn.readLine()
val cols: Int = math.sqrt(s.length).ceil.toInt
println(s.toArray.grouped(cols).toArray.transpose.map(_.mkString).mkString(" "))
}
}
| everyevery/programming_study | hackerrank/Algorithms/Implementation/Entryption/Entryption.scala | Scala | mit | 272 |
package better.files
import java.nio.file._
import scala.concurrent.{blocking, ExecutionContext}
import scala.util.Try
import scala.util.control.NonFatal
/** Implementation of File.Monitor
*
* @param root
* @param maxDepth
*/
abstract class FileMonitor(val root: File, maxDepth: Int) extends File.Monitor {
protected[this] val service = root.newWatchService
def this(root: File, recursive: Boolean = true) = this(root, if (recursive) Int.MaxValue else 0)
/** If watching non-directory, don't react to siblings
* @param target
* @return
*/
protected[this] def reactTo(target: File) = root.isDirectory || root.isSamePathAs(target)
protected[this] def process(key: WatchKey) = {
val path = key.watchable().asInstanceOf[Path]
import scala.collection.JavaConverters._
key.pollEvents().asScala foreach {
case event: WatchEvent[Path] @unchecked if (event.context() != null) =>
val target: File = path.resolve(event.context())
if (reactTo(target)) {
if (event.kind() == StandardWatchEventKinds.ENTRY_CREATE) {
val depth = root.relativize(target).getNameCount
watch(target, (maxDepth - depth) max 0) // auto-watch new files in a directory
}
onEvent(event.kind(), target, event.count())
}
case event => if (reactTo(path)) onUnknownEvent(event)
}
key.reset()
}
protected[this] def watch(file: File, depth: Int): Unit = {
def toWatch: Iterator[File] =
if (file.isDirectory) {
file.walk(depth).filter(f => f.isDirectory && f.exists)
} else {
when(file.exists)(file.parent).iterator // There is no way to watch a regular file; so watch its parent instead
}
try {
toWatch.foreach(f => Try[Unit](f.register(service)).recover { case e => onException(e) }.get)
} catch {
case NonFatal(e) => onException(e)
}
}
override def start()(implicit executionContext: ExecutionContext) = {
watch(root, maxDepth)
executionContext.execute(new Runnable {
override def run() = blocking { Iterator.continually(service.take()).foreach(process) }
})
}
override def close() = service.close()
// Although this class is abstract, we provide noop implementations so user can choose to implement a subset of these
override def onCreate(file: File, count: Int) = {}
override def onModify(file: File, count: Int) = {}
override def onDelete(file: File, count: Int) = {}
override def onUnknownEvent(event: WatchEvent[_]) = {}
override def onException(exception: Throwable) = {}
}
| pathikrit/better-files | core/src/main/scala/better/files/FileMonitor.scala | Scala | mit | 2,591 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util
import org.apache.kafka.common.feature.{Features, FinalizedVersionRange, SupportedVersionRange}
import org.apache.kafka.common.message.ApiMessageType.ListenerType
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.record.{RecordBatch, RecordVersion}
import org.apache.kafka.common.requests.{AbstractResponse, ApiVersionsResponse}
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import scala.jdk.CollectionConverters._
class ApiVersionTest {
@Test
def testApply(): Unit = {
assertEquals(KAFKA_0_8_0, ApiVersion("0.8.0"))
assertEquals(KAFKA_0_8_0, ApiVersion("0.8.0.0"))
assertEquals(KAFKA_0_8_0, ApiVersion("0.8.0.1"))
assertEquals(KAFKA_0_8_1, ApiVersion("0.8.1"))
assertEquals(KAFKA_0_8_1, ApiVersion("0.8.1.0"))
assertEquals(KAFKA_0_8_1, ApiVersion("0.8.1.1"))
assertEquals(KAFKA_0_8_2, ApiVersion("0.8.2"))
assertEquals(KAFKA_0_8_2, ApiVersion("0.8.2.0"))
assertEquals(KAFKA_0_8_2, ApiVersion("0.8.2.1"))
assertEquals(KAFKA_0_9_0, ApiVersion("0.9.0"))
assertEquals(KAFKA_0_9_0, ApiVersion("0.9.0.0"))
assertEquals(KAFKA_0_9_0, ApiVersion("0.9.0.1"))
assertEquals(KAFKA_0_10_0_IV0, ApiVersion("0.10.0-IV0"))
assertEquals(KAFKA_0_10_0_IV1, ApiVersion("0.10.0"))
assertEquals(KAFKA_0_10_0_IV1, ApiVersion("0.10.0.0"))
assertEquals(KAFKA_0_10_0_IV1, ApiVersion("0.10.0.0-IV0"))
assertEquals(KAFKA_0_10_0_IV1, ApiVersion("0.10.0.1"))
assertEquals(KAFKA_0_10_1_IV0, ApiVersion("0.10.1-IV0"))
assertEquals(KAFKA_0_10_1_IV1, ApiVersion("0.10.1-IV1"))
assertEquals(KAFKA_0_10_1_IV2, ApiVersion("0.10.1"))
assertEquals(KAFKA_0_10_1_IV2, ApiVersion("0.10.1.0"))
assertEquals(KAFKA_0_10_1_IV2, ApiVersion("0.10.1-IV2"))
assertEquals(KAFKA_0_10_1_IV2, ApiVersion("0.10.1.1"))
assertEquals(KAFKA_0_10_2_IV0, ApiVersion("0.10.2"))
assertEquals(KAFKA_0_10_2_IV0, ApiVersion("0.10.2.0"))
assertEquals(KAFKA_0_10_2_IV0, ApiVersion("0.10.2-IV0"))
assertEquals(KAFKA_0_10_2_IV0, ApiVersion("0.10.2.1"))
assertEquals(KAFKA_0_11_0_IV0, ApiVersion("0.11.0-IV0"))
assertEquals(KAFKA_0_11_0_IV1, ApiVersion("0.11.0-IV1"))
assertEquals(KAFKA_0_11_0_IV2, ApiVersion("0.11.0"))
assertEquals(KAFKA_0_11_0_IV2, ApiVersion("0.11.0.0"))
assertEquals(KAFKA_0_11_0_IV2, ApiVersion("0.11.0-IV2"))
assertEquals(KAFKA_0_11_0_IV2, ApiVersion("0.11.0.1"))
assertEquals(KAFKA_1_0_IV0, ApiVersion("1.0"))
assertEquals(KAFKA_1_0_IV0, ApiVersion("1.0.0"))
assertEquals(KAFKA_1_0_IV0, ApiVersion("1.0.0-IV0"))
assertEquals(KAFKA_1_0_IV0, ApiVersion("1.0.1"))
assertEquals(KAFKA_1_1_IV0, ApiVersion("1.1-IV0"))
assertEquals(KAFKA_2_0_IV1, ApiVersion("2.0"))
assertEquals(KAFKA_2_0_IV0, ApiVersion("2.0-IV0"))
assertEquals(KAFKA_2_0_IV1, ApiVersion("2.0-IV1"))
assertEquals(KAFKA_2_1_IV2, ApiVersion("2.1"))
assertEquals(KAFKA_2_1_IV0, ApiVersion("2.1-IV0"))
assertEquals(KAFKA_2_1_IV1, ApiVersion("2.1-IV1"))
assertEquals(KAFKA_2_1_IV2, ApiVersion("2.1-IV2"))
assertEquals(KAFKA_2_2_IV1, ApiVersion("2.2"))
assertEquals(KAFKA_2_2_IV0, ApiVersion("2.2-IV0"))
assertEquals(KAFKA_2_2_IV1, ApiVersion("2.2-IV1"))
assertEquals(KAFKA_2_3_IV1, ApiVersion("2.3"))
assertEquals(KAFKA_2_3_IV0, ApiVersion("2.3-IV0"))
assertEquals(KAFKA_2_3_IV1, ApiVersion("2.3-IV1"))
assertEquals(KAFKA_2_4_IV1, ApiVersion("2.4"))
assertEquals(KAFKA_2_4_IV0, ApiVersion("2.4-IV0"))
assertEquals(KAFKA_2_4_IV1, ApiVersion("2.4-IV1"))
assertEquals(KAFKA_2_5_IV0, ApiVersion("2.5"))
assertEquals(KAFKA_2_5_IV0, ApiVersion("2.5-IV0"))
assertEquals(KAFKA_2_6_IV0, ApiVersion("2.6"))
assertEquals(KAFKA_2_6_IV0, ApiVersion("2.6-IV0"))
assertEquals(KAFKA_2_7_IV0, ApiVersion("2.7-IV0"))
assertEquals(KAFKA_2_7_IV1, ApiVersion("2.7-IV1"))
assertEquals(KAFKA_2_7_IV2, ApiVersion("2.7-IV2"))
assertEquals(KAFKA_2_8_IV1, ApiVersion("2.8"))
assertEquals(KAFKA_2_8_IV0, ApiVersion("2.8-IV0"))
assertEquals(KAFKA_2_8_IV1, ApiVersion("2.8-IV1"))
}
@Test
def testApiVersionUniqueIds(): Unit = {
val allIds: Seq[Int] = ApiVersion.allVersions.map(apiVersion => {
apiVersion.id
})
val uniqueIds: Set[Int] = allIds.toSet
assertEquals(allIds.size, uniqueIds.size)
}
@Test
def testMinSupportedVersionFor(): Unit = {
assertEquals(KAFKA_0_8_0, ApiVersion.minSupportedFor(RecordVersion.V0))
assertEquals(KAFKA_0_10_0_IV0, ApiVersion.minSupportedFor(RecordVersion.V1))
assertEquals(KAFKA_0_11_0_IV0, ApiVersion.minSupportedFor(RecordVersion.V2))
// Ensure that all record versions have a defined min version so that we remember to update the method
for (recordVersion <- RecordVersion.values)
assertNotNull(ApiVersion.minSupportedFor(recordVersion))
}
@Test
def testShortVersion(): Unit = {
assertEquals("0.8.0", KAFKA_0_8_0.shortVersion)
assertEquals("0.10.0", KAFKA_0_10_0_IV0.shortVersion)
assertEquals("0.10.0", KAFKA_0_10_0_IV1.shortVersion)
assertEquals("0.11.0", KAFKA_0_11_0_IV0.shortVersion)
assertEquals("0.11.0", KAFKA_0_11_0_IV1.shortVersion)
assertEquals("0.11.0", KAFKA_0_11_0_IV2.shortVersion)
assertEquals("1.0", KAFKA_1_0_IV0.shortVersion)
assertEquals("1.1", KAFKA_1_1_IV0.shortVersion)
assertEquals("2.0", KAFKA_2_0_IV0.shortVersion)
assertEquals("2.0", KAFKA_2_0_IV1.shortVersion)
assertEquals("2.1", KAFKA_2_1_IV0.shortVersion)
assertEquals("2.1", KAFKA_2_1_IV1.shortVersion)
assertEquals("2.1", KAFKA_2_1_IV2.shortVersion)
assertEquals("2.2", KAFKA_2_2_IV0.shortVersion)
assertEquals("2.2", KAFKA_2_2_IV1.shortVersion)
assertEquals("2.3", KAFKA_2_3_IV0.shortVersion)
assertEquals("2.3", KAFKA_2_3_IV1.shortVersion)
assertEquals("2.4", KAFKA_2_4_IV0.shortVersion)
assertEquals("2.5", KAFKA_2_5_IV0.shortVersion)
assertEquals("2.6", KAFKA_2_6_IV0.shortVersion)
assertEquals("2.7", KAFKA_2_7_IV2.shortVersion)
assertEquals("2.8", KAFKA_2_8_IV0.shortVersion)
}
@Test
def testApiVersionValidator(): Unit = {
val str = ApiVersionValidator.toString
val apiVersions = str.slice(1, str.length).split(",")
assertEquals(ApiVersion.allVersions.size, apiVersions.length)
}
@Test
def shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue(): Unit = {
val response = ApiVersion.apiVersionsResponse(
10,
RecordVersion.V1,
Features.emptySupportedFeatures,
None,
ListenerType.ZK_BROKER
)
verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1)
assertEquals(10, response.throttleTimeMs)
assertTrue(response.data.supportedFeatures.isEmpty)
assertTrue(response.data.finalizedFeatures.isEmpty)
assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data.finalizedFeaturesEpoch)
}
@Test
def shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle(): Unit = {
val response = ApiVersion.apiVersionsResponse(
10,
RecordVersion.V1,
Features.supportedFeatures(
Utils.mkMap(Utils.mkEntry("feature", new SupportedVersionRange(1.toShort, 4.toShort)))),
Features.finalizedFeatures(
Utils.mkMap(Utils.mkEntry("feature", new FinalizedVersionRange(2.toShort, 3.toShort)))),
10,
None,
ListenerType.ZK_BROKER
)
verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1)
assertEquals(10, response.throttleTimeMs)
assertEquals(1, response.data.supportedFeatures.size)
val sKey = response.data.supportedFeatures.find("feature")
assertNotNull(sKey)
assertEquals(1, sKey.minVersion)
assertEquals(4, sKey.maxVersion)
assertEquals(1, response.data.finalizedFeatures.size)
val fKey = response.data.finalizedFeatures.find("feature")
assertNotNull(fKey)
assertEquals(2, fKey.minVersionLevel)
assertEquals(3, fKey.maxVersionLevel)
assertEquals(10, response.data.finalizedFeaturesEpoch)
}
private def verifyApiKeysForMagic(response: ApiVersionsResponse, maxMagic: Byte): Unit = {
for (version <- response.data.apiKeys.asScala) {
assertTrue(ApiKeys.forId(version.apiKey).minRequiredInterBrokerMagic <= maxMagic)
}
}
@Test
def shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle(): Unit = {
val response = ApiVersion.apiVersionsResponse(
AbstractResponse.DEFAULT_THROTTLE_TIME,
RecordVersion.current(),
Features.emptySupportedFeatures,
None,
ListenerType.ZK_BROKER
)
assertEquals(new util.HashSet[ApiKeys](ApiKeys.zkBrokerApis), apiKeysInResponse(response))
assertEquals(AbstractResponse.DEFAULT_THROTTLE_TIME, response.throttleTimeMs)
assertTrue(response.data.supportedFeatures.isEmpty)
assertTrue(response.data.finalizedFeatures.isEmpty)
assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data.finalizedFeaturesEpoch)
}
@Test
def testMetadataQuorumApisAreDisabled(): Unit = {
val response = ApiVersion.apiVersionsResponse(
AbstractResponse.DEFAULT_THROTTLE_TIME,
RecordVersion.current(),
Features.emptySupportedFeatures,
None,
ListenerType.ZK_BROKER
)
// Ensure that APIs needed for the KRaft mode are not exposed through ApiVersions until we are ready for them
val exposedApis = apiKeysInResponse(response)
assertFalse(exposedApis.contains(ApiKeys.ENVELOPE))
assertFalse(exposedApis.contains(ApiKeys.VOTE))
assertFalse(exposedApis.contains(ApiKeys.BEGIN_QUORUM_EPOCH))
assertFalse(exposedApis.contains(ApiKeys.END_QUORUM_EPOCH))
assertFalse(exposedApis.contains(ApiKeys.DESCRIBE_QUORUM))
}
private def apiKeysInResponse(apiVersions: ApiVersionsResponse) = {
val apiKeys = new util.HashSet[ApiKeys]
for (version <- apiVersions.data.apiKeys.asScala) {
apiKeys.add(ApiKeys.forId(version.apiKey))
}
apiKeys
}
}
| Chasego/kafka | core/src/test/scala/unit/kafka/api/ApiVersionTest.scala | Scala | apache-2.0 | 10,877 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.AccountsPreviousPeriodValidation
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC5052C(value: Option[Int]) extends CtBoxIdentifier(name = "Debtors due after more than one year") with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with AccountsPreviousPeriodValidation
with SelfValidatableBox[Frs102AccountsBoxRetriever, Option[Int]]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors (
validateInputAllowed("AC5052C", boxRetriever.ac205()),
validateMoney(value, min = 0),
validateOptionalIntegerLessOrEqualBox(boxRetriever.ac53())
)
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC5052C.scala | Scala | apache-2.0 | 1,968 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.stats
import java.util.Map.Entry
import org.apache.accumulo.core.data.{Key, Mutation, Value}
import org.geotools.factory.Hints
import org.locationtech.geomesa.accumulo.data._
import org.locationtech.geomesa.accumulo.index.QueryHints._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeatureType
/**
* Class for capturing query-related stats
*/
case class QueryStat(typeName: String,
date: Long,
user: String,
filter: String,
hints: String,
planTime: Long,
scanTime: Long,
hits: Int) extends Stat
/**
* Maps query stats to accumulo
*/
object QueryStatTransform extends StatTransform[QueryStat] {
private val CQ_USER = "user"
private val CQ_QUERY_FILTER = "queryFilter"
private val CQ_QUERY_HINTS = "queryHints"
private val CQ_PLANTIME = "timePlanning"
private val CQ_SCANTIME = "timeScanning"
private val CQ_TIME = "timeTotal"
private val CQ_HITS = "hits"
override def statToMutation(stat: QueryStat): Mutation = {
val mutation = createMutation(stat)
val cf = createRandomColumnFamily
mutation.put(cf, CQ_USER, stat.user)
mutation.put(cf, CQ_QUERY_FILTER, stat.filter)
mutation.put(cf, CQ_QUERY_HINTS, stat.hints)
mutation.put(cf, CQ_PLANTIME, stat.planTime + "ms")
mutation.put(cf, CQ_SCANTIME, stat.scanTime + "ms")
mutation.put(cf, CQ_TIME, (stat.scanTime + stat.planTime) + "ms")
mutation.put(cf, CQ_HITS, stat.hits.toString)
mutation
}
val ROWID = "(.*)~(.*)".r
override def rowToStat(entries: Iterable[Entry[Key, Value]]): QueryStat = {
if (entries.isEmpty) {
return null
}
val ROWID(featureName, dateString) = entries.head.getKey.getRow.toString
val date = StatTransform.dateFormat.parseMillis(dateString)
val values = collection.mutable.Map.empty[String, Any]
entries.foreach { e =>
e.getKey.getColumnQualifier.toString match {
case CQ_USER => values.put(CQ_USER, e.getValue.toString)
case CQ_QUERY_FILTER => values.put(CQ_QUERY_FILTER, e.getValue.toString)
case CQ_QUERY_HINTS => values.put(CQ_QUERY_HINTS, e.getValue.toString)
case CQ_PLANTIME => values.put(CQ_PLANTIME, e.getValue.toString.stripSuffix("ms").toLong)
case CQ_SCANTIME => values.put(CQ_SCANTIME, e.getValue.toString.stripSuffix("ms").toLong)
case CQ_HITS => values.put(CQ_HITS, e.getValue.toString.toInt)
case CQ_TIME => // time is an aggregate, doesn't need to map back to anything
case _ => logger.warn(s"Unmapped entry in query stat: ${e.getKey.getColumnQualifier.toString}")
}
}
val user = values.getOrElse(CQ_USER, "unknown").asInstanceOf[String]
val queryHints = values.getOrElse(CQ_QUERY_HINTS, "").asInstanceOf[String]
val queryFilter = values.getOrElse(CQ_QUERY_FILTER, "").asInstanceOf[String]
val planTime = values.getOrElse(CQ_PLANTIME, 0L).asInstanceOf[Long]
val scanTime = values.getOrElse(CQ_SCANTIME, 0L).asInstanceOf[Long]
val hits = values.getOrElse(CQ_HITS, 0).asInstanceOf[Int]
QueryStat(featureName, date, user, queryFilter, queryHints, planTime, scanTime, hits)
}
// list of query hints we want to persist
val QUERY_HINTS = List[Hints.Key](TRANSFORMS,
TRANSFORM_SCHEMA,
DENSITY_BBOX_KEY,
WIDTH_KEY,
HEIGHT_KEY,
BIN_TRACK_KEY,
TEMPORAL_DENSITY_KEY,
TIME_INTERVAL_KEY,
TIME_BUCKETS_KEY)
/**
* Converts a query hints object to a string for persisting
*
* @param hints
* @return
*/
def hintsToString(hints: Hints): String =
QUERY_HINTS.flatMap(k => Option(hints.get(k)).map(v => hintToString(k, v))).sorted.mkString(",")
/**
* Converts a single hint to a string
*/
private def hintToString(key: Hints.Key, value: AnyRef): String =
s"${keyToString(key)}=${valueToString(value)}"
/**
* Maps a query hint to a string. We need this since the classes themselves don't really have a
* decent toString representation.
*
* @param key
* @return
*/
private def keyToString(key: Hints.Key): String =
key match {
case TRANSFORMS => "TRANSFORMS"
case TRANSFORM_SCHEMA => "TRANSFORM_SCHEMA"
case BIN_TRACK_KEY => "BIN_TRACK_KEY"
case TEMPORAL_DENSITY_KEY => "TEMPORAL_DENSITY_KEY"
case TIME_INTERVAL_KEY => "TIME_INTERVAL_KEY"
case RETURN_ENCODED => "RETURN_ENCODED"
case TIME_BUCKETS_KEY => "TIME_BUCKETS_KEY"
case DENSITY_BBOX_KEY => "DENSITY_BBOX_KEY"
case WIDTH_KEY => "WIDTH_KEY"
case HEIGHT_KEY => "HEIGHT_KEY"
case _ => "unknown_hint"
}
/**
* Encodes a value into a decent readable string
*/
private def valueToString(value: AnyRef): String = value match {
case null => "null"
case sft: SimpleFeatureType => s"[${sft.getTypeName}]${SimpleFeatureTypes.encodeType(sft)}"
case v => v.toString
}
} | vpipkt/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/stats/QueryStat.scala | Scala | apache-2.0 | 5,853 |
package audit.viewer
import java.util.Date
import akka.actor.{Actor, Props}
import akka.http.scaladsl.model.StatusCodes.{NoContent, OK}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import audit.Api
import audit.viewer.Viewer.{History, HistoryDetails}
import audit.viewer.ViewerActor.{ViewById, ViewLast}
import org.scalatest.WordSpec
class ViewerApiSpec extends WordSpec with ScalatestRouteTest with ViewerApi with Api {
override implicit val executionContext = executor
val history = List(History("eid-01", "s-01", "msg-01", "desc-01", "state-01", new Date()))
val historyDetails = HistoryDetails("msg-01", "desc-01", "state-01", new Date(), "user-01")
override val viewer = system.actorOf(Props(new Actor {
override def receive = {
case ViewLast(_, _) => sender() ! history
case ViewById("system-02", _) => sender() ! None
case ViewById(_, _) => sender() ! Option(historyDetails)
}
}))
"ViewerApi" should {
"provide last events" in {
Get("/view/user/user-01/last?limit=10") ~> viewerRoute ~> check {
status === OK
responseAs[List[History]] === history
}
}
"provide specific event by 'system' and 'external id'" in {
Get("/view/details/system/system-01/id/eid-01") ~> viewerRoute ~> check {
status === OK
responseAs[HistoryDetails] === historyDetails
}
}
"not provide event if it does not exist" in {
Get("/view/details/system/system-02/id/eid-01") ~> viewerRoute ~> check {
status === NoContent
}
}
}
}
| grzesiekw/audit | src/test/scala/audit/viewer/ViewerApiSpec.scala | Scala | mit | 1,564 |
package monocle.bench
import monocle.bench.BenchModel._
import monocle.{PTraversal, Traversal}
import org.openjdk.jmh.annotations.{Benchmark, Scope, State}
import cats.instances.sortedMap._
import cats.instances.int._
import scala.collection.immutable.SortedMap
@State(Scope.Benchmark)
class MonocleTraversalBench {
val point3Traversal = Traversal.apply3[Point3, Int](_.x, _.y, _.z)((x, y, z, _) => Point3(x, y, z))
val iMapTraversal = PTraversal.fromTraverse[SortedMap[Int, ?], Int, Int]
@Benchmark def caseClassGetAll() = point3Traversal.getAll(p)
@Benchmark def caseClassSet() = point3Traversal.set(5)(p)
@Benchmark def caseClassModify() = point3Traversal.modify(_ + 1)(p)
@Benchmark def collectionGetAll() = iMapTraversal.getAll(map)
@Benchmark def collectionSet() = iMapTraversal.set(12)(map)
@Benchmark def collectionModify() = iMapTraversal.modify(_ + 1)(map)
}
| aoiroaoino/Monocle | bench/src/main/scala/monocle/bench/MonocleTraversalBench.scala | Scala | mit | 924 |
/*
* Copyright 2012 Jonathan Anderson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.footlights.api.support
/**
* Provide no-nonsense foreach, map and flatMap methods to Either.
*
* Rather than (((foo.right map f).right map g).right map h), provide foreach, map and flatMap
* like Option. This is sensible because we always use Either[exceptional case, normal case],
* so map ought to normally operate on the right case.
*
* Also, add the tee method like in {@link Tee}.
*/
class MappableEither[A <: Throwable, B](e: Either[A,B]) {
def get(): B = e.right getOrElse { throw e.left.get }
def getOrElse[C <: Throwable](f:A => C): B = e.right getOrElse { throw f(e.left.get) }
def orElse(alternative:Either[A,B]) = if (e.isRight) e else alternative
def foreach(f:B => Any): Unit = e.right foreach f
def map[C](f:B => C): Either[A,C] = e.right map f
def flatMap[C](f:B => Either[A,C]): Either[A,C] = e.right flatMap f
def leftMap[C](f:A => C): Either[C,B] = e.left map f
def leftFlatMap[C](f:A => Either[C,B]): Either[C,B] = e.left flatMap f
/** Do a tee: like a foreach, but can propagate errors. */
def tee(f:B => Any): Either[A,B] = (e.right map f).right flatMap { anythingValid => e }
}
object Either {
implicit def either2mappable[A <: Throwable, B](e: Either[A,B]) = new MappableEither(e)
}
| nasrallahmounir/Footlights | API/src/main/scala/me/footlights/api/support/either.scala | Scala | apache-2.0 | 1,845 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.catalog
import org.apache.flink.table.api._
import org.apache.flink.table.factories._
import org.apache.flink.table.plan.schema._
import org.apache.flink.table.plan.stats.FlinkStatistic
import org.apache.flink.table.sources.TableSource
import org.apache.flink.table.util.JavaScalaConversionUtil.toScala
import org.apache.flink.table.util.Logging
/**
* The utility class is used to convert [[ExternalCatalogTable]].
*
* It uses [[TableFactoryService]] for discovering.
*/
object ExternalTableUtil extends Logging {
/**
* Converts an [[ExternalCatalogTable]] instance to a [[TableSourceTable]] instance
*
* @param externalTable the [[ExternalCatalogTable]] instance which to convert
* @return converted [[TableSourceTable]] instance from the input catalog table
*/
def fromExternalCatalogTable[T](isBatch: Boolean, externalTable: ExternalCatalogTable)
: Option[TableSourceTable[T]] = {
val statistics = new FlinkStatistic(toScala(externalTable.getTableStats))
if (externalTable.isTableSource) {
Some(createTableSource(isBatch, externalTable, statistics))
} else {
None
}
}
private def createTableSource[T](
isBatch: Boolean,
externalTable: ExternalCatalogTable,
statistics: FlinkStatistic)
: TableSourceTable[T] = {
val source = if (isModeCompatibleWithTable(isBatch, externalTable)) {
TableFactoryUtil.findAndCreateTableSource(externalTable)
} else {
throw new ValidationException(
"External catalog table does not support the current environment for a table source.")
}
new TableSourceTable[T](source.asInstanceOf[TableSource[T]], !isBatch, statistics)
}
private def isModeCompatibleWithTable[T](
isBatch: Boolean,
externalTable: ExternalCatalogTable)
: Boolean = {
isBatch && externalTable.isBatchTable || !isBatch && externalTable.isStreamTable
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala | Scala | apache-2.0 | 2,749 |
package org.dele.text.maen.matchers
import org.dele.text.maen.matchers.SubMatchCheckerLib._
import org.dele.text.maen.utils.HelperFuncs
import org.dele.text.maen._
import org.dele.text.maen.{AtomSeqMatch, TAtomMatcher, TInput, TMatchResultPool}
import scala.collection.mutable.ListBuffer
/**
* Created by jiaji on 2016-02-09.
*/
import TMatcher._
trait TMatcher extends TMatcherDep {
val id:Option[MId]
def idEquals(id2:String) = if (id.isEmpty) false else id.get == id2
def matchAt(resultPool:TMatchResultPool, index:Int):Set[AtomSeqMatch]
def matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch]
def m(resultPool:TMatchResultPool) = matchFrom(resultPool, 0)
protected def _matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch]
val depth:Int
}
object TMatcher {
import org.dele.text.maen.ErrorHandling._
import org.dele.text.maen.AtomSeqMatch._
import collection.mutable
trait TMatcherDep {
def depMatcherIds:Set[MId]
}
type MId = String
val EmptyIdSet = Set[MId]()
val EmptyCheckerIds = List[String]()
abstract class MatcherBase(val id:Option[MId] = None,
val subMatchCheckerIds:Iterable[String] = EmptyCheckerIds)
(implicit val subMatchCheckerLib: SubMatchCheckerLib) extends TMatcher with MatchLimit {
private def checkCache(resultPool:TMatchResultPool):Option[Set[AtomSeqMatch]] = resultPool.matcherOpCache.cached(this)
protected final def getFromCacheOrCompute(resultPool:TMatchResultPool):Set[AtomSeqMatch] = {
val cached:Option[Set[AtomSeqMatch]] = checkCache(resultPool)
if (cached.isEmpty) {
//println("[%s] cache hit(%d): %s".format(this.id, cache.get.size, cache.get.mkString("\\t\\t")))
val results = _matchFrom(resultPool, 0)
val limitAppliedResults = if (results.size <= MaxCount) results else {
val ordered = results.toList.sortBy(m => m.range.start -> m.range.end)
ordered.take(MaxCount).toSet
}
resultPool.matcherOpCache.cache(this, this.depMatcherIds, limitAppliedResults)
limitAppliedResults
}
else cached.get
}
final def matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
val c = getFromCacheOrCompute(resultPool)
c.filter(_.range.start >= start)
}
final def matchAt(resultPool:TMatchResultPool, index:Int):Set[AtomSeqMatch] = {
val matches = getFromCacheOrCompute(resultPool)
matches.filter(_.range.start == index)
}
protected final def subMatchCheck(resultPool:TMatchResultPool, in:Set[AtomSeqMatch]):Set[AtomSeqMatch] = {
if (subMatchCheckerIds.nonEmpty) {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
in.filter(m => submatchCheckers.exists(_.check(m.subMatches, resultPool)))
}
else in
}
protected def _matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch]
/*
= {
val uncheckedTmp = mutable.Set[AtomSeqMatch]() //(start to input.atoms.size-1).map(idx => matchAt(input, resultPool, idx))
(start to resultPool.input.atoms.size-1).foreach{ idx =>
uncheckedTmp ++= matchAt(resultPool, idx)
}
subMatchCheck(resultPool, uncheckedTmp.toSet)
}
*/
protected def filterBySubMatchCheckers(in:Set[AtomSeqMatch], resultPool: TMatchResultPool):Set[AtomSeqMatch] = {
if (subMatchCheckerIds.isEmpty) in
else {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
in.filter(m => submatchCheckers.exists(_.check(m.subMatches, resultPool)))
}
}
protected def filterBySubMatchCheckers(in:Iterable[Seq[AtomSeqMatch]], resultPool: TMatchResultPool):Iterable[Seq[AtomSeqMatch]] = {
if (subMatchCheckerIds.isEmpty) in
else {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
in.filter(m => submatchCheckers.exists(_.check(m, resultPool)))
}
}
def depMatcherIds:Set[MId] = {
val depIdList = subMatchCheckerIds.map(subMatchCheckerLib.getDepMatcherIds)
if (depIdList.nonEmpty) depIdList.reduce(_ ++ _)
else EmptyIdSet
}
//def depMatcherIds:Set[MId] = EmptyIdSet
}
private type MatchAtFunc = (TMatchResultPool, Int) => Set[AtomSeqMatch]
private trait TMatchAt {
this: MatcherBase =>
protected val matchAtFunc:MatchAtFunc
override def _matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
val uncheckedTmp = mutable.Set[AtomSeqMatch]() //(start to input.atoms.size-1).map(idx => matchAt(input, resultPool, idx))
(start to resultPool.input.atoms.size-1).foreach{ idx =>
uncheckedTmp ++= matchAtFunc(resultPool, idx)
}
subMatchCheck(resultPool, uncheckedTmp.toSet)
}
}
/*
private def _matchFromByIndices(resultPool:TMatchResultPool, start:Int, matchAtFunc:MatchAtFunc):Set[AtomSeqMatch] = {
val uncheckedTmp = mutable.Set[AtomSeqMatch]() //(start to input.atoms.size-1).map(idx => matchAt(input, resultPool, idx))
(start to resultPool.input.atoms.size-1).foreach{ idx =>
uncheckedTmp ++= matchAtFunc(resultPool, idx)
}
uncheckedTmp.toSet
}
*/
//def _mergeMatchSetSeq(in:Seq[Set[AtomSeqMatch]]):Set[AtomSeqMatch] = in.foldLeft(Set[AtomSeqMatch]())(_ ++ _)
trait AtomMatchLimit extends MatchLimit {
override val MaxCount = MatcherManager.MaxMatchCount*2
}
private[TMatcher] class _FromAtomMatcher(val atomMatcher:TAtomMatcher,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) with AtomMatchLimit with TMatchAt {
override def toString = "[%s:%s]".format(
if (id.isEmpty) "(NoId)" else id.get,
atomMatcher
)
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
if (atomMatcher.check(resultPool.input.atoms(index))) Set(from(resultPool, index, this))
else EmptyMatchResult
}
val depth = 1
}
def fromAtomMatcher(atomMatcher:TAtomMatcher,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib):TMatcher = new _FromAtomMatcher(atomMatcher, subMatchCheckerIds, id)
import scala.util.control.Breaks._
private[TMatcher] class _VarLengthStringMatcher(val string2Match:String,
caseSensitive:Boolean,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) with TMatchAt {
private val matchStr = if (caseSensitive) string2Match else string2Match.toLowerCase
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
var idx = index
var lastAtomIndex = -1
var remStr = matchStr
val input = resultPool.input
breakable {
while (idx < input.atoms.size) {
var txt = input.atoms(idx).text
if (!caseSensitive) txt = txt.toLowerCase
if (!remStr.startsWith(txt)) break
remStr = remStr.substring(txt.length).trim
if (remStr.isEmpty) {
lastAtomIndex = idx
break
}
idx = idx + 1
}
}
if (lastAtomIndex >= 0) Set(from(resultPool, index to lastAtomIndex, this))
else Set()
}
val depth = 1
}
def varLengthStringMatcher(string2Match:String,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
caseSensitive:Boolean = false,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _VarLengthStringMatcher(string2Match, caseSensitive, subMatchCheckerIds, id)
private[TMatcher] class _AnyAtomMatcher(val count:Int,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) with AtomMatchLimit with TMatchAt {
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
val endIndex = index + count - 1
if (endIndex < resultPool.input.atoms.length) Set(from(resultPool, index to endIndex, this))
else Set()
}
val depth = 1
}
def anyAtomMatcher(count:Int,
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib):TMatcher = new _AnyAtomMatcher(count, subMatchCheckerIds, id)
private[TMatcher] abstract class _CompositeMatcherBase(val subMatchers:Seq[TMatcher],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId])
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) {
//todo: query from Match result pool instead
def subMatchAt(subMatcher:TMatcher, resultPool:TMatchResultPool, index:Int):Set[AtomSeqMatch] =
if (subMatcher.id.isEmpty) subMatcher.matchAt(resultPool, index) else resultPool.query(subMatcher.id.get)
override def depMatcherIds:Set[MId] = {
//super.depMatcherIds ++ subMatchers.flatMap(_.id).toSet ++ subMatchers.flatMap(_.depMatcherIds)
val p1 = super.depMatcherIds
val p2 = subMatchers.flatMap{ sm =>
if (sm.id.nonEmpty) Set(sm.id.get)
else sm.depMatcherIds
}
p1 ++ p2
}
//def matchAt(input:TInput, resultPool:TMatchResultPool, index:Int):Set[AtomSeqMatch] = throw NotImplemented
val depth = subMatchers.maxBy(_.depth).depth + 1
}
private[TMatcher] class _MatchersOR(subMatchers:Seq[TMatcher],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId])
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends _CompositeMatcherBase(subMatchers, subMatchCheckerIds, id) with TMatchAt {
private val subMatcherSet = subMatchers.toSet
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
val alts:Set[AtomSeqMatch] = subMatcherSet.flatMap((m:TMatcher) => subMatchAt(m, resultPool, index))
alts.map(a => from(resultPool, this, List(a)))
}
override def toString = subMatchers.map(sm => "(%s)".format(sm.toString)).mkString(" OR ")
//override def depMatcherIds:Set[MId] = subMatchers.flatMap(_.id).toSet ++ subMatchers.map(_.depMatcherIds).reduce(_ ++ _)
}
def matchersOR(id:String,
subMatchers:Seq[TMatcher])
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _MatchersOR(subMatchers, EmptyCheckerIds, Option(id))
def matchersOR(id:Option[String],
subMatchers:Seq[TMatcher])
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _MatchersOR(subMatchers, EmptyCheckerIds, id)
def matchersOR(subMatchers:Seq[TMatcher])
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _MatchersOR(subMatchers, EmptyCheckerIds, None)
private val EmptyResults:Set[List[AtomSeqMatch]] = Set()
import org.dele.text.maen.utils.HelperFuncs._
private def allMatches(resultPool:TMatchResultPool, matchers:Seq[TMatcher]):Seq[Set[AtomSeqMatch]] = matchers.map(_.m(resultPool))
//private type _MatchCheckFunc = (TMatchResultPool, Int) => Set[AtomSeqMatch]
private def orderedMatchesFrom(input:TInput, resultPool:TMatchResultPool, matchers:Seq[TMatcher], start:Int, prev:List[AtomSeqMatch], matchesAllAfter:Boolean):Set[List[AtomSeqMatch]] = {
if (matchers.isEmpty) Set(prev)
else {
if (start >= input.atoms.size) EmptyResults
else {
val (head, tail) = (matchers.head, matchers.tail)
val headMatches = if (matchesAllAfter) head.matchFrom(resultPool, start) else head.matchAt(resultPool, start)
if (headMatches.isEmpty) EmptyResults
else {
val resultList = mutable.Set[Set[List[AtomSeqMatch]]]()
headMatches.foreach { hm =>
val curr = prev :+ hm
val currResult = orderedMatchesFrom(input, resultPool, tail, hm.range.end + 1, curr, matchesAllAfter)
if (currResult.nonEmpty) resultList += currResult
}
resultList.toSet.flatten
}
}
}
}
private def orderedMatchesFromLNG(input:TInput, resultPool:TMatchResultPool, matchers:Seq[TMatcher], start:Int, prev:List[AtomSeqMatch]):Set[List[AtomSeqMatch]] = {
val all = (start until input.atoms.size).map{ idx => orderedMatchesFrom(resultPool.input, resultPool, matchers, idx, List(), false) }
if (all.nonEmpty) all.flatten.toSet
else Set[List[AtomSeqMatch]]()
//orderedMatchesFrom(input, resultPool, matchers, start, prev, false)
}
private def checkNAB(notMatches:Set[AtomSeqMatch], matches:Set[AtomSeqMatch], subMatchCheckerIds:Iterable[String], resultPool:TMatchResultPool, flipOrder:Boolean):Set[AtomSeqMatch] = {
if (matches.isEmpty) EmptyMatchResult
else {
if (notMatches.isEmpty) matches
else {
// no check needed
val p1 = if (!flipOrder) {
matches.filter(m => notMatches.forall(_.range.end >= m.range.start))
}
else {
matches.filter(m => notMatches.forall(_.range.start <= m.range.end))
}
// ordered
val (matches1, matches2) = if (!flipOrder) (notMatches, matches) else (matches, notMatches)
val tocheck = for (m1 <- matches1; m2 <- matches2 if m1.range.end < m2.range.start) yield Seq(m1, m2)
if (tocheck.isEmpty) p1
else {
val p2 = if (subMatchCheckerIds.isEmpty) EmptyMatchResult
else {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
val checked = tocheck.filter(x => submatchCheckers.forall(!_.check(x, resultPool)))
//val checked = subMatchChecker(tocheck, subMatchCheckerIds, resultPool)
if (!flipOrder) checked.map(_.last) else checked.map(_.head)
}
p1 ++ p2
}
}
}
}
private def checkAB(ms1:Set[AtomSeqMatch], ms2:Set[AtomSeqMatch], subMatchCheckerIds:Iterable[String], resultPool:TMatchResultPool, flipOrder:Boolean):Set[AtomSeqMatch] = {
if (ms1.isEmpty || ms2.isEmpty) EmptyMatchResult
else {
// ordered
val (matches1, matches2) = if (!flipOrder) (ms1, ms2) else (ms2, ms1)
val tocheck = for (m1 <- matches1; m2 <- matches2 if m1.range.end < m2.range.start) yield Seq(m1, m2)
if (tocheck.isEmpty) EmptyMatchResult
else {
val p2 = if (subMatchCheckerIds.isEmpty) EmptyMatchResult
else {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
val checked = tocheck.filter(x => submatchCheckers.exists(_.check(x, resultPool)))
//val checked = subMatchChecker(tocheck, subMatchCheckerIds, resultPool)
if (!flipOrder) checked.map(_.last) else checked.map(_.head)
}
p2
}
}
}
/// matcher used for specific purpose: (A) B .or. A (B)
private[TMatcher] class _MatchersLookaroundAB(
private val expected:TMatcher,
private val matcher:TMatcher,
subMatchCheckerIds:Iterable[String],
val flipOrder:Boolean, // '(expected) matcher' flipped: matcher (expected)
id:Option[MId] = None
)(implicit subMatchCheckerLib: SubMatchCheckerLib) extends _CompositeMatcherBase(Seq(expected, matcher), subMatchCheckerIds, id) with TMatchAt {
/*
override def matchFrom(input:TInput, resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
//val uncheckedMatches = matchFromUnchecked(input, resultPool, start)
val matches1 = matcher1.matchFrom(input, resultPool, start)
val matches2 = matcher2.matchFrom(input, resultPool, start)
check_NAB(matches1, matches2, subMatchCheckerIds, resultPool, flipOrder)
}
*/
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
//todo: optimize
val matches = matcher.matchAt(resultPool, index)
if (matches.isEmpty) EmptyMatchResult
else {
val expMatches = expected.m(resultPool)
checkAB(expMatches, matches, subMatchCheckerIds, resultPool, flipOrder)
}
}
override def toString = if (!flipOrder) s"expected($expected) $matcher" else s"$matcher expected($expected) "
}
def matchersLookaround(expected:TMatcher, matcher:TMatcher, subMatchCheckerIds:Iterable[String] = EmptyCheckerIds, flipOrder:Boolean = false, id:Option[MId] = None)(implicit subMatchCheckerLib: SubMatchCheckerLib) =
new _MatchersLookaroundAB(expected, matcher, subMatchCheckerIds, flipOrder, id)
/// matcher used for specific purpose: Not(A) B .or. A Not(B)
private[TMatcher] class _MatchersOrderedNAB(
private val notMatcher:TMatcher,
private val matcher:TMatcher,
subMatchCheckerIds:Iterable[String],
val flipOrder:Boolean,
id:Option[MId] = None
)(implicit subMatchCheckerLib: SubMatchCheckerLib) extends _CompositeMatcherBase(Seq(notMatcher, matcher), subMatchCheckerIds, id) with TMatchAt {
/*
override def matchFrom(input:TInput, resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
//val uncheckedMatches = matchFromUnchecked(input, resultPool, start)
val matches1 = matcher1.matchFrom(input, resultPool, start)
val matches2 = matcher2.matchFrom(input, resultPool, start)
check_NAB(matches1, matches2, subMatchCheckerIds, resultPool, flipOrder)
}
*/
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
//todo: optimize
val matches = matcher.matchAt(resultPool, index)
if (matches.isEmpty) EmptyMatchResult
else {
val notMatches = notMatcher.m(resultPool)
checkNAB(notMatches, matches, subMatchCheckerIds, resultPool, flipOrder)
}
}
override def toString = if (!flipOrder) s"Not($notMatcher) $matcher" else s"$matcher Not($notMatcher) "
}
private[TMatcher] class _MatchersAB_NotOverlap(
private val matcher:TMatcher,
private val notMatcher:TMatcher,
id:Option[MId] = None
)(implicit subMatchCheckerLib: SubMatchCheckerLib) extends _CompositeMatcherBase(Seq(matcher, notMatcher), EmptyCheckerIds, id) with TMatchAt {
/*
override def matchFrom(input:TInput, resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
//val uncheckedMatches = matchFromUnchecked(input, resultPool, start)
val matches1 = matcher1.matchFrom(input, resultPool, start)
val matches2 = matcher2.matchFrom(input, resultPool, start)
check_NAB(matches1, matches2, subMatchCheckerIds, resultPool, flipOrder)
}
*/
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
//todo: optimize
val matches = matcher.matchAt(resultPool, index)
if (matches.isEmpty) EmptyMatchResult
else {
val notMatches = notMatcher.m(resultPool)
matches.filter(m => notMatches.forall(!_.isOverlap(m)))
}
}
override def toString = s"($matcher) NotOverlap with ($notMatcher)"
}
def matchersNonOverlap(matcher:TMatcher, notMatcher:TMatcher, id:Option[MId] = None)(implicit subMatchCheckerLib: SubMatchCheckerLib) =
new _MatchersAB_NotOverlap(matcher, notMatcher, id)
def matchersNAB(notMatcher:TMatcher, matcher:TMatcher, subMatchCheckerIds:Iterable[String] = EmptyCheckerIds, flipOrder:Boolean = false, id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) =
new _MatchersOrderedNAB(notMatcher, matcher, subMatchCheckerIds, flipOrder, id)
//val profCount = mutable.Map[String, Int]()
private[TMatcher] class _MatchersALLOrdered(
subMatchers:Seq[TMatcher],
negMatcherIndices:IndexedSeq[Int],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId]
)(implicit subMatchCheckerLib: SubMatchCheckerLib)
extends _CompositeMatcherBase(subMatchers, subMatchCheckerIds, id) {
private val _posMatchers:Seq[TMatcher] = subMatchers.indices.filter(!negMatcherIndices.contains(_)).map(subMatchers)
import scala.collection.mutable
private val _negMatchers:Map[Int, TMatcher] = {
val transIndices = negMatcherIndexTransform(negMatcherIndices)
(transIndices zip negMatcherIndices.map(subMatchers)).toMap
//negMatcherIndices.map(subMatchers)
}
import scala.util.control.Breaks._
private def checkNegMatchers(subMatches:Seq[AtomSeqMatch], resultPool: TMatchResultPool):Boolean = {
if (_negMatchers.isEmpty) true
else {
var ranges = allGaps(resultPool.input.atoms.indices, subMatches.map(_.range))
var foundNeg = false
breakable {
_negMatchers.foreach { p =>
val idx = p._1
val matcher = p._2
if (ranges(idx).nonEmpty) {
val r = ranges(idx).get
val matches = matcher.m(resultPool) //todo: room for optimization
val f = matches.exists(_.range.intersect(r).nonEmpty)
if (f) {
foundNeg = true
break
}
}
}
}
!foundNeg
}
}
private def isLNG = subMatchCheckerIds.size == 1 && subMatchCheckerIds.head == ListNGramId
override protected def _matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
val checkedMatches:Set[List[AtomSeqMatch]] = if (isLNG) orderedMatchesFromLNG(resultPool.input, resultPool, _posMatchers, start, List())
else {
val uncheckedMatches = orderedMatchesFrom(resultPool.input, resultPool, _posMatchers, start, List(), true) //matchFromUnchecked(resultPool.input, resultPool, start)
val checkedNeg = uncheckedMatches.filter(checkNegMatchers(_, resultPool))
if (subMatchCheckerIds.nonEmpty) {
val submatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
checkedNeg.filter(x => submatchCheckers.exists(_.check(x, resultPool)))
}
else checkedNeg
}
checkedMatches.map(from(resultPool, this, _))
}
override def toString = subMatchers.map(sm => "(%s)".format(sm.toString)).mkString(" ")
}
import SubMatchCheckerLib._
private val EmptyNegMatcherIndexes = IndexedSeq[Int]()
def matchersOrderedAllPositive(subMatchers:Seq[TMatcher], subMatchCheckerIds:Iterable[String] = EmptyCheckerIds, id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) =
new _MatchersALLOrdered(subMatchers, EmptyNegMatcherIndexes, subMatchCheckerIds, id)
//def matchersOrderedAllPositive(subMatchers:Seq[TMatcher], subMatchCheckerLib: SubMatchCheckerLib, subMatchCheckerId:String = NoCheckId) =
// new _MatchersALLOrdered(subMatchers, EmptyNegMatcherIndexes, subMatchCheckerLib, subMatchCheckerId, None)
def matchersOrdered(subMatchers:Seq[TMatcher], negMatcherIndexes:IndexedSeq[Int], subMatchCheckerIds:Iterable[String] = EmptyCheckerIds, id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) =
new _MatchersALLOrdered(subMatchers, negMatcherIndexes, subMatchCheckerIds, id)
private[TMatcher] class _QueryFromResultPool(val resultIds:Set[String],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) with TMatchAt {
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
val allMatches = m(resultPool)
allMatches.filter(_.range.start == index)
}
override def m(resultPool:TMatchResultPool) = resultIds.flatMap(resultPool.query)
override def toString:String = "ResultPool Query: [%s]".format(resultIds.mkString(","))
override def depMatcherIds = super.depMatcherIds ++ resultIds
val depth = 1
}
def queryPoolMatcher(resultIds:Set[String],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _QueryFromResultPool(resultIds, subMatchCheckerIds, id)
def queryPoolMatcher(resultId:String)
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _QueryFromResultPool(Set(resultId), EmptyCheckerIds, None)
private[TMatcher] class _QueryAND(val resultIds1:Set[String],
val resultIds2:Set[String],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) extends MatcherBase(id, subMatchCheckerIds) with TMatchAt {
private val _q1 = queryPoolMatcher(resultIds1, subMatchCheckerIds)
private val _q2 = queryPoolMatcher(resultIds2, subMatchCheckerIds)
protected val matchAtFunc:MatchAtFunc = (resultPool, index) => {
val allMatches = m(resultPool)
allMatches.filter(_.range.start == index)
}
override def m(resultPool:TMatchResultPool) = {
val all = for (m1 <- _q1.m(resultPool); m2 <- _q2.m(resultPool)) yield from(resultPool, this, List(m1, m2))
all.take(MatcherManager.MaxMatchCount) // break when there are too many matches
}
override def toString:String = "ResultPool Query: [%s] AND [%s]".format(resultIds1.mkString(","), resultIds2.mkString(","))
override def depMatcherIds = super.depMatcherIds ++ resultIds1 ++ resultIds2
val depth = 1
}
def queryAnd(resultIds1:Set[String],
resultIds2:Set[String],
subMatchCheckerIds:Iterable[String] = EmptyCheckerIds,
id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _QueryAND(resultIds1, resultIds2, subMatchCheckerIds, id)
private val EmptyMatchResult = Set[AtomSeqMatch]()
private def rangeContain(container:Range, containee:Range):Boolean = container.start <= containee.start && container.end >= containee.end
/// bad idea: cause dead-loops,
/// update: used only for Longest repetition matcher
private def mergeByRange(in:Set[AtomSeqMatch]):Set[AtomSeqMatch] = {
val r = mutable.Set[AtomSeqMatch]()
in.foreach{ m =>
if (!r.exists(x => rangeContain(x.range, m.range))) {
val toRemove = r.filter(x => rangeContain(m.range, x.range))
r --= toRemove
r += m
}
}
r.toSet
}
trait MatchLimit {
val MaxCount = MatcherManager.MaxMatchCount
def countCheck(count:Int):Boolean = count <= MaxCount
}
private[TMatcher] class _RepeatableMatchers(subMatchers:Seq[TMatcher], onlyKeepLongest:Boolean, id:Option[MId], subMatchCheckerIds:Iterable[String] = EmptyCheckerIds)
(implicit subMatchCheckerLib: SubMatchCheckerLib)
extends _CompositeMatcherBase(subMatchers, subMatchCheckerIds, id) {
private val _orMatcher = matchersOR(id + "._int_or_", subMatchers)
private def connectNext(curr:AtomSeqMatch, following:List[AtomSeqMatch], resultPool:TMatchResultPool, subMatchCheckers:Iterable[TSubMatchChecker]):Set[AtomSeqMatch] = {
if (following.isEmpty) EmptyMatchResult
else {
val r = mutable.Set[AtomSeqMatch]()
breakable {
following.foreach{ fm =>
val seq = List(curr, fm)
if (subMatchCheckers.exists(_.check(seq, resultPool))) {
r += from(resultPool, this, seq)
if (!countCheck(r.size)) break
}
else break
}
}
r.toSet
}
}
private def connectAll(allMatches:Set[AtomSeqMatch], resultPool:TMatchResultPool, subMatchCheckers:Iterable[TSubMatchChecker]):Set[AtomSeqMatch] = {
val r = mutable.Set[AtomSeqMatch]()
var toCheck = ListBuffer[AtomSeqMatch]()
val matches2Check = if (onlyKeepLongest) mergeByRange(allMatches) else allMatches
toCheck ++= matches2Check.toList.sortBy(a => a.range.start -> a.range.end)
breakable{
while (toCheck.nonEmpty && countCheck(r.size)) {
val c = toCheck.head
val ct = toCheck.tail.filter(_.range.start > c.range.end).toList
val cn = connectNext(c, ct, resultPool, subMatchCheckers)
if (onlyKeepLongest) {
if (cn.isEmpty) r += c
val nextIdx = if (cn.isEmpty) c.range.end else cn.maxBy(_.range.end).range.end
toCheck = toCheck.filter(_.range.start > nextIdx)
}
else { // keeps all the combinations
r += toCheck.remove(0)
}
cn.foreach(x => toCheck.insert(0, x))
}
}
r.toSet
}
override protected def _matchFrom(resultPool:TMatchResultPool, start:Int):Set[AtomSeqMatch] = {
val allMatches = _orMatcher.matchFrom(resultPool, start)
if (allMatches.nonEmpty) {
if (subMatchCheckerIds.isEmpty) throw NotImplemented("_RepeatableMatchers: matchers without submatch checkers not implemented (allowed)")
val subMatchCheckers = subMatchCheckerIds.map(resultPool.getSubMatchChecker)
connectAll(allMatches, resultPool, subMatchCheckers)
}
else EmptyMatchResult
}
/*
override def matchAt(resultPool:TMatchResultPool, index:Int):Set[AtomSeqMatch] = {
val allMatches = _orMatcher.matchAt(resultPool, index)
if (allMatches.isEmpty) EmptyMatchResult
else {
val matchesFrom = matchFrom(resultPool, index)
matchesFrom.filter(_.range.start == index) // todo: optimize, no need to compute all matches from 'index'
}
}
*/
}
def repeatMatcher(subMatchers:Seq[TMatcher], onlyKeepLongest:Boolean, id:Option[MId], subMatchCheckerIds:Iterable[String])
(implicit subMatchCheckerLib: SubMatchCheckerLib) = new _RepeatableMatchers(subMatchers, onlyKeepLongest, id, subMatchCheckerIds)
//def ListNGram(atomMatchers:List[TMatcher], subMatchCheckerLib: SubMatchCheckerLib):TMatcher = matchersOrderedAllPositive(atomMatchers, subMatchCheckerLib, ListNGramId)
def ListNGram(atomMatchers:List[TMatcher], id:Option[MId] = None)
(implicit subMatchCheckerLib: SubMatchCheckerLib):TMatcher = matchersOrderedAllPositive(atomMatchers, List(ListNGramId), id)
} | new2scala/text-util | maen/src/main/scala/org/dele/text/maen/matchers/TMatcher.scala | Scala | apache-2.0 | 31,916 |
package com.wavesplatform.actor
import akka.actor.{ActorSystem, AllForOneStrategy, SupervisorStrategy, SupervisorStrategyConfigurator}
import com.typesafe.config.Config
import com.wavesplatform.utils.ScorexLogging
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object RootActorSystem extends ScorexLogging {
@volatile private var failed = false
final class EscalatingStrategy extends SupervisorStrategyConfigurator {
override def create(): SupervisorStrategy = AllForOneStrategy(loggingEnabled = false) {
case t: Throwable =>
failed = true
log.error("Root actor got exception, escalate", t)
SupervisorStrategy.Escalate
}
}
def start(id: String, config: Config)(init: ActorSystem => Unit): Unit = {
val system = ActorSystem(id, config)
try {
init(system)
} catch {
case t: Throwable =>
log.error(s"Error while initializing actor system $id", t)
sys.exit(1)
}
Await.result(system.whenTerminated, Duration.Inf)
if (failed) {
sys.exit(1)
} else {
sys.exit(0)
}
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/actor/RootActorSystem.scala | Scala | mit | 1,112 |
package com.github.vooolll.domain.oauth
trait FacebookApplicationId extends Any {
def value: String
}
/**
* Facebook client id, it is also called app id
*
* @param value facebook application id
*/
final case class FacebookClientId(value: String) extends FacebookApplicationId
/**
* Facebook app id, it is also called client id
*
* @param value facebook application id(fixed size value - 16)
*/
final case class FacebookAppId(value: String) extends FacebookApplicationId
| vooolll/facebook4s | src/main/scala/com/github/vooolll/domain/oauth/FacebookClientId.scala | Scala | apache-2.0 | 491 |
package sorm.sql
import sext._, embrace._
import sorm.sql.Sql._
object Optimization {
def optimized ( s : Statement ) : Statement
= s match {
case Union(l, r) => Union(optimized(l), optimized(r))
case s : Select =>
s.copy(
join = s.join map {
case j @ Join(what : Statement, _, _, _) =>
j.copy(what $ optimized)
case j => j
}
) $ groupByToDistinct
}
private def groupByToDistinct ( select : Select ) : Select
= if( select.groupBy.toSet == select.what.toSet && select.having.isEmpty )
select.copy(groupBy = Nil, distinct = true)
else
select
/**
* Not finished
*/
private def dropOrphans ( select : Select ) : Select
= {
val refs
: Set[String]
= {
val whatRefs
= select.what.view collect { case Column(_, Some(r)) ⇒ r }
val fromRef
= select.from.as
val whereRefs
= ???
val groupByRefs
= select.groupBy collect { case Column(_, Some(r)) ⇒ r }
val havingRefs
= ???
Set() ++ whatRefs ++ fromRef ++ whereRefs ++ groupByRefs ++ havingRefs
}
def f
( s : Select )
: Select
= {
val joinRefs
= s.join.view flatMap {
_.on collect { case (_, Column(_, Some(r))) ⇒ r }
}
val allRefs
= refs ++ joinRefs
val filtered
= s.join filter {
_.as map { allRefs contains _ } getOrElse false
}
if( filtered == s.join )
s
else
f( s copy ( join = filtered ) )
}
def withSubSelectsOptimized
( s : Select )
= s.copy(
join
= s.join map { j ⇒
j.what match {
case s : Select ⇒ j.copy(s $ dropOrphans)
case _ ⇒ j
}
}
)
withSubSelectsOptimized( f(select) )
}
}
| sorm/sorm | src/main/scala/sorm/sql/Optimization.scala | Scala | mit | 2,142 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.events
/**
* Abstract class for the optional formatter objects that must be passed to the <code>Event</code>s reported
* during a ScalaTest run.
*/
sealed abstract class Formatter extends Product with Serializable
/**
* A <code>Formatter</code> that indicates reporters may wish to suppress reporting of an <code>Event</code>.
* "Suppress" means that the event won't be reported to the user.
*
* <p>
* An example is that specification-style suites, such as <code>FunSpec</code>, generate output that reads
* more like a specification. One aspect of this is that generally only a single event should be reported
* for each test, so that output can appear like this:
* </p>
*
* <pre>
* A Stack (when newly created)
* - should be empty
* - should complain when popped
* </pre>
*
* <p>
* ScalaTest suites should generate two events per test, a <a href="TestStarting.html"><code>TestStarting</code></a> event and either
* a <a href="TestSucceeded.html"><code>TestSucceeded</code></a> or a <a href="TestFailed.html"><code>TestFailed</code></a> event. The <a href="../FunSpec.html"><code>FunSpec</code></a> trait does report both events,
* but passes a <code>MotionToSuppress</code> along with the <code>TestStarting</code> event. As a result,
* The <code>TestStarting</code> events have no effect on the output. Each <code>TestSucceeded</code> or
* <code>TestFailed</code> event, which is sent with an <a href="IndentedText.html"><code>IndentedText</code></a> formatter instead of
* a <code>MotionToSuppress</code>, will generate output, such as "<code>- should be empty</code>".
* </p>
*
* <p>
* Reporters may choose to ignore a <code>MotionToSuppress</code>. For example, an XML reporter may
* want to report everything about every event that is fired during a concurrent run, so that the
* events can be reordered later by reading the complete, but unordered, information from an XML file.
* In this case, the XML reporter would actually report events that were fired with a <code>MotionToSuppress</code>,
* including indicating that the report included a motion to suppress.
* </p>
*
* @author Bill Venners
*/
final case object MotionToSuppress extends Formatter
/**
* A <a href="Formatter.html"><code>Formatter</code></a> providing information that enables reporters to create more stylized output.
*
* <p>
* An example is that specification-style suites, such as <a href="../FunSpec.html"><code>FunSpec</code></a>, generate output that reads
* more like a specification, for instance:
* </p>
*
* <pre>
* A Stack (when newly created)
* - should be empty
* - should complain when popped
* </pre>
*
* <p>
* This output might be generated by ScalaTest's standard out reporter. Each of these lines would be
* taken from the <code>IndentedText</code>'s <code>formattedText</code> parameter. Were this same run
* to be reported in HTML or in a GUI, the output would be based on the <code>rawText</code> and the
* <code>indentationLevel</code>. Here's what the <code>IndentedText</code> values would be for each event:
* </p>
*
* <ul>
* <li><a href="InfoProvided.html"><code>InfoProvided</code></a> reported with an:
* <pre class="stHighlight">
* IndentedText(
* formattedText = "A Stack (when newly created)",
* rawText = "A Stack (when newly created)",
* indentationLevel = 0
* )
* </pre>
* </li>
* <li><a href="TestSucceeded.html"><code>TestSucceeded</code></a> reported with an:
* <pre class="stHighlight">
* IndentedText(
* formattedText = "- should be empty",
* rawText = "should be empty",
* indentationLevel = 1
* )
* </pre>
* </li>
* <li><code>TestSucceeded</code> reported with an:
* <pre class="stHighlight">
* IndentedText(
* formattedText = "- should complain when popped",
* rawText = "should complain when popped",
* indentationLevel = 1
* )
* </pre>
* </li>
* </ul>
*
* <p>
* One possible way this information could be presented in HTML, for example, is this:
* </p>
*
* <p>
* <strong>A Stack (when newly created)</strong>
* <ul>
* <li>should be empty</li>
* <li>should complain when popped</li>
* </ul>
* </p>
*
* @param formattedText a localized string suitable for presenting to a user by printing it straight to an output stream
* @param rawText a localized string suitable for presenting to the user after in some way being indented by the
* value specified as the <code>indentationLevel</code> parameter
* @param indentationLevel a zero or positive integer representing an indentation level for the indented text
*
* @throws IllegalArgumentException if the specified <code>indentationLevel</code> is less than zero
*/
final case class IndentedText(formattedText: String, rawText: String, indentationLevel: Int) extends Formatter {
require(indentationLevel >= 0, "indentationLevel was less than zero: " + indentationLevel)
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/events/Formatter.scala | Scala | apache-2.0 | 5,528 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{FunctionAlreadyExistsException, NoSuchDatabaseException, NoSuchFunctionException}
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* A reasonable complete test suite (i.e. behaviors) for a [[ExternalCatalog]].
*
* Implementations of the [[ExternalCatalog]] interface can create test suites by extending this.
*/
abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEach {
protected val utils: CatalogTestUtils
import utils._
protected def resetState(): Unit = { }
// Clear all state after each test
override def afterEach(): Unit = {
try {
resetState()
} finally {
super.afterEach()
}
}
// --------------------------------------------------------------------------
// Databases
// --------------------------------------------------------------------------
test("basic create and list databases") {
val catalog = newEmptyCatalog()
catalog.createDatabase(newDb("default"), ignoreIfExists = true)
assert(catalog.databaseExists("default"))
assert(!catalog.databaseExists("testing"))
assert(!catalog.databaseExists("testing2"))
catalog.createDatabase(newDb("testing"), ignoreIfExists = false)
assert(catalog.databaseExists("testing"))
assert(catalog.listDatabases().toSet == Set("default", "testing"))
catalog.createDatabase(newDb("testing2"), ignoreIfExists = false)
assert(catalog.listDatabases().toSet == Set("default", "testing", "testing2"))
assert(catalog.databaseExists("testing2"))
assert(!catalog.databaseExists("does_not_exist"))
}
test("get database when a database exists") {
val db1 = newBasicCatalog().getDatabase("db1")
assert(db1.name == "db1")
assert(db1.description.contains("db1"))
}
test("get database should throw exception when the database does not exist") {
intercept[AnalysisException] { newBasicCatalog().getDatabase("db_that_does_not_exist") }
}
test("list databases without pattern") {
val catalog = newBasicCatalog()
assert(catalog.listDatabases().toSet == Set("default", "db1", "db2"))
}
test("list databases with pattern") {
val catalog = newBasicCatalog()
assert(catalog.listDatabases("db").toSet == Set.empty)
assert(catalog.listDatabases("db*").toSet == Set("db1", "db2"))
assert(catalog.listDatabases("*1").toSet == Set("db1"))
assert(catalog.listDatabases("db2").toSet == Set("db2"))
}
test("drop database") {
val catalog = newBasicCatalog()
catalog.dropDatabase("db1", ignoreIfNotExists = false, cascade = false)
assert(catalog.listDatabases().toSet == Set("default", "db2"))
}
test("drop database when the database is not empty") {
// Throw exception if there are functions left
val catalog1 = newBasicCatalog()
catalog1.dropTable("db2", "tbl1", ignoreIfNotExists = false, purge = false)
catalog1.dropTable("db2", "tbl2", ignoreIfNotExists = false, purge = false)
intercept[AnalysisException] {
catalog1.dropDatabase("db2", ignoreIfNotExists = false, cascade = false)
}
resetState()
// Throw exception if there are tables left
val catalog2 = newBasicCatalog()
catalog2.dropFunction("db2", "func1")
intercept[AnalysisException] {
catalog2.dropDatabase("db2", ignoreIfNotExists = false, cascade = false)
}
resetState()
// When cascade is true, it should drop them
val catalog3 = newBasicCatalog()
catalog3.dropDatabase("db2", ignoreIfNotExists = false, cascade = true)
assert(catalog3.listDatabases().toSet == Set("default", "db1"))
}
test("drop database when the database does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = false, cascade = false)
}
catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = true, cascade = false)
}
test("alter database") {
val catalog = newBasicCatalog()
val db1 = catalog.getDatabase("db1")
// Note: alter properties here because Hive does not support altering other fields
catalog.alterDatabase(db1.copy(properties = Map("k" -> "v3", "good" -> "true")))
val newDb1 = catalog.getDatabase("db1")
assert(db1.properties.isEmpty)
assert(newDb1.properties.size == 2)
assert(newDb1.properties.get("k") == Some("v3"))
assert(newDb1.properties.get("good") == Some("true"))
}
test("alter database should throw exception when the database does not exist") {
intercept[AnalysisException] {
newBasicCatalog().alterDatabase(newDb("does_not_exist"))
}
}
// --------------------------------------------------------------------------
// Tables
// --------------------------------------------------------------------------
test("the table type of an external table should be EXTERNAL_TABLE") {
val catalog = newBasicCatalog()
val table =
newTable("external_table1", "db2").copy(tableType = CatalogTableType.EXTERNAL)
catalog.createTable(table, ignoreIfExists = false)
val actual = catalog.getTable("db2", "external_table1")
assert(actual.tableType === CatalogTableType.EXTERNAL)
}
test("create table when the table already exists") {
val catalog = newBasicCatalog()
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
val table = newTable("tbl1", "db2")
intercept[TableAlreadyExistsException] {
catalog.createTable(table, ignoreIfExists = false)
}
}
test("drop table") {
val catalog = newBasicCatalog()
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
catalog.dropTable("db2", "tbl1", ignoreIfNotExists = false, purge = false)
assert(catalog.listTables("db2").toSet == Set("tbl2"))
}
test("drop table when database/table does not exist") {
val catalog = newBasicCatalog()
// Should always throw exception when the database does not exist
intercept[AnalysisException] {
catalog.dropTable("unknown_db", "unknown_table", ignoreIfNotExists = false, purge = false)
}
intercept[AnalysisException] {
catalog.dropTable("unknown_db", "unknown_table", ignoreIfNotExists = true, purge = false)
}
// Should throw exception when the table does not exist, if ignoreIfNotExists is false
intercept[AnalysisException] {
catalog.dropTable("db2", "unknown_table", ignoreIfNotExists = false, purge = false)
}
catalog.dropTable("db2", "unknown_table", ignoreIfNotExists = true, purge = false)
}
test("rename table") {
val catalog = newBasicCatalog()
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
catalog.renameTable("db2", "tbl1", "tblone")
assert(catalog.listTables("db2").toSet == Set("tblone", "tbl2"))
}
test("rename table when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.renameTable("unknown_db", "unknown_table", "unknown_table")
}
intercept[AnalysisException] {
catalog.renameTable("db2", "unknown_table", "unknown_table")
}
}
test("rename table when destination table already exists") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.renameTable("db2", "tbl1", "tbl2")
}
}
test("alter table") {
val catalog = newBasicCatalog()
val tbl1 = catalog.getTable("db2", "tbl1")
catalog.alterTable(tbl1.copy(properties = Map("toh" -> "frem")))
val newTbl1 = catalog.getTable("db2", "tbl1")
assert(!tbl1.properties.contains("toh"))
assert(newTbl1.properties.size == tbl1.properties.size + 1)
assert(newTbl1.properties.get("toh") == Some("frem"))
}
test("alter table when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.alterTable(newTable("tbl1", "unknown_db"))
}
intercept[AnalysisException] {
catalog.alterTable(newTable("unknown_table", "db2"))
}
}
test("alter table schema") {
val catalog = newBasicCatalog()
val tbl1 = catalog.getTable("db2", "tbl1")
val newSchema = StructType(Seq(
StructField("new_field_1", IntegerType),
StructField("new_field_2", StringType),
StructField("a", IntegerType),
StructField("b", StringType)))
catalog.alterTableSchema("db2", "tbl1", newSchema)
val newTbl1 = catalog.getTable("db2", "tbl1")
assert(newTbl1.schema == newSchema)
}
test("get table") {
assert(newBasicCatalog().getTable("db2", "tbl1").identifier.table == "tbl1")
}
test("get table when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.getTable("unknown_db", "unknown_table")
}
intercept[AnalysisException] {
catalog.getTable("db2", "unknown_table")
}
}
test("list tables without pattern") {
val catalog = newBasicCatalog()
intercept[AnalysisException] { catalog.listTables("unknown_db") }
assert(catalog.listTables("db1").toSet == Set.empty)
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
}
test("list tables with pattern") {
val catalog = newBasicCatalog()
intercept[AnalysisException] { catalog.listTables("unknown_db", "*") }
assert(catalog.listTables("db1", "*").toSet == Set.empty)
assert(catalog.listTables("db2", "*").toSet == Set("tbl1", "tbl2"))
assert(catalog.listTables("db2", "tbl*").toSet == Set("tbl1", "tbl2"))
assert(catalog.listTables("db2", "*1").toSet == Set("tbl1"))
}
test("column names should be case-preserving and column nullability should be retained") {
val catalog = newBasicCatalog()
val tbl = CatalogTable(
identifier = TableIdentifier("tbl", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = storageFormat,
schema = new StructType()
.add("HelLo", "int", nullable = false)
.add("WoRLd", "int", nullable = true),
provider = Some("hive"),
partitionColumnNames = Seq("WoRLd"),
bucketSpec = Some(BucketSpec(4, Seq("HelLo"), Nil)))
catalog.createTable(tbl, ignoreIfExists = false)
val readBack = catalog.getTable("db1", "tbl")
assert(readBack.schema == tbl.schema)
assert(readBack.partitionColumnNames == tbl.partitionColumnNames)
assert(readBack.bucketSpec == tbl.bucketSpec)
}
// --------------------------------------------------------------------------
// Partitions
// --------------------------------------------------------------------------
test("basic create and list partitions") {
val catalog = newEmptyCatalog()
catalog.createDatabase(newDb("mydb"), ignoreIfExists = false)
catalog.createTable(newTable("tbl", "mydb"), ignoreIfExists = false)
catalog.createPartitions("mydb", "tbl", Seq(part1, part2), ignoreIfExists = false)
assert(catalogPartitionsEqual(catalog, "mydb", "tbl", Seq(part1, part2)))
}
test("create partitions when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.createPartitions("does_not_exist", "tbl1", Seq(), ignoreIfExists = false)
}
intercept[AnalysisException] {
catalog.createPartitions("db2", "does_not_exist", Seq(), ignoreIfExists = false)
}
}
test("create partitions that already exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.createPartitions("db2", "tbl2", Seq(part1), ignoreIfExists = false)
}
catalog.createPartitions("db2", "tbl2", Seq(part1), ignoreIfExists = true)
}
test("create partitions without location") {
val catalog = newBasicCatalog()
val table = CatalogTable(
identifier = TableIdentifier("tbl", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType()
.add("col1", "int")
.add("col2", "string")
.add("partCol1", "int")
.add("partCol2", "string"),
provider = Some("hive"),
partitionColumnNames = Seq("partCol1", "partCol2"))
catalog.createTable(table, ignoreIfExists = false)
val partition = CatalogTablePartition(Map("partCol1" -> "1", "partCol2" -> "2"), storageFormat)
catalog.createPartitions("db1", "tbl", Seq(partition), ignoreIfExists = false)
val partitionLocation = catalog.getPartition(
"db1",
"tbl",
Map("partCol1" -> "1", "partCol2" -> "2")).location
val tableLocation = catalog.getTable("db1", "tbl").location
val defaultPartitionLocation = new Path(new Path(tableLocation, "partCol1=1"), "partCol2=2")
assert(new Path(partitionLocation) == defaultPartitionLocation)
}
test("create/drop partitions in managed tables with location") {
val catalog = newBasicCatalog()
val table = CatalogTable(
identifier = TableIdentifier("tbl", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType()
.add("col1", "int")
.add("col2", "string")
.add("partCol1", "int")
.add("partCol2", "string"),
provider = Some("hive"),
partitionColumnNames = Seq("partCol1", "partCol2"))
catalog.createTable(table, ignoreIfExists = false)
val newLocationPart1 = newUriForDatabase()
val newLocationPart2 = newUriForDatabase()
val partition1 =
CatalogTablePartition(Map("partCol1" -> "1", "partCol2" -> "2"),
storageFormat.copy(locationUri = Some(newLocationPart1)))
val partition2 =
CatalogTablePartition(Map("partCol1" -> "3", "partCol2" -> "4"),
storageFormat.copy(locationUri = Some(newLocationPart2)))
catalog.createPartitions("db1", "tbl", Seq(partition1), ignoreIfExists = false)
catalog.createPartitions("db1", "tbl", Seq(partition2), ignoreIfExists = false)
assert(exists(newLocationPart1))
assert(exists(newLocationPart2))
// the corresponding directory is dropped.
catalog.dropPartitions("db1", "tbl", Seq(partition1.spec),
ignoreIfNotExists = false, purge = false, retainData = false)
assert(!exists(newLocationPart1))
// all the remaining directories are dropped.
catalog.dropTable("db1", "tbl", ignoreIfNotExists = false, purge = false)
assert(!exists(newLocationPart2))
}
test("list partition names") {
val catalog = newBasicCatalog()
val newPart = CatalogTablePartition(Map("a" -> "1", "b" -> "%="), storageFormat)
catalog.createPartitions("db2", "tbl2", Seq(newPart), ignoreIfExists = false)
val partitionNames = catalog.listPartitionNames("db2", "tbl2")
assert(partitionNames == Seq("a=1/b=%25%3D", "a=1/b=2", "a=3/b=4"))
}
test("list partition names with partial partition spec") {
val catalog = newBasicCatalog()
val newPart = CatalogTablePartition(Map("a" -> "1", "b" -> "%="), storageFormat)
catalog.createPartitions("db2", "tbl2", Seq(newPart), ignoreIfExists = false)
val partitionNames1 = catalog.listPartitionNames("db2", "tbl2", Some(Map("a" -> "1")))
assert(partitionNames1 == Seq("a=1/b=%25%3D", "a=1/b=2"))
// Partial partition specs including "weird" partition values should use the unescaped values
val partitionNames2 = catalog.listPartitionNames("db2", "tbl2", Some(Map("b" -> "%=")))
assert(partitionNames2 == Seq("a=1/b=%25%3D"))
val partitionNames3 = catalog.listPartitionNames("db2", "tbl2", Some(Map("b" -> "%25%3D")))
assert(partitionNames3.isEmpty)
}
test("list partitions with partial partition spec") {
val catalog = newBasicCatalog()
val parts = catalog.listPartitions("db2", "tbl2", Some(Map("a" -> "1")))
assert(parts.length == 1)
assert(parts.head.spec == part1.spec)
// if no partition is matched for the given partition spec, an empty list should be returned.
assert(catalog.listPartitions("db2", "tbl2", Some(Map("a" -> "unknown", "b" -> "1"))).isEmpty)
assert(catalog.listPartitions("db2", "tbl2", Some(Map("a" -> "unknown"))).isEmpty)
}
test("drop partitions") {
val catalog = newBasicCatalog()
assert(catalogPartitionsEqual(catalog, "db2", "tbl2", Seq(part1, part2)))
catalog.dropPartitions(
"db2", "tbl2", Seq(part1.spec), ignoreIfNotExists = false, purge = false, retainData = false)
assert(catalogPartitionsEqual(catalog, "db2", "tbl2", Seq(part2)))
resetState()
val catalog2 = newBasicCatalog()
assert(catalogPartitionsEqual(catalog2, "db2", "tbl2", Seq(part1, part2)))
catalog2.dropPartitions(
"db2", "tbl2", Seq(part1.spec, part2.spec), ignoreIfNotExists = false, purge = false,
retainData = false)
assert(catalog2.listPartitions("db2", "tbl2").isEmpty)
}
test("drop partitions when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.dropPartitions(
"does_not_exist", "tbl1", Seq(), ignoreIfNotExists = false, purge = false,
retainData = false)
}
intercept[AnalysisException] {
catalog.dropPartitions(
"db2", "does_not_exist", Seq(), ignoreIfNotExists = false, purge = false,
retainData = false)
}
}
test("drop partitions that do not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.dropPartitions(
"db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = false, purge = false,
retainData = false)
}
catalog.dropPartitions(
"db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = true, purge = false, retainData = false)
}
test("get partition") {
val catalog = newBasicCatalog()
assert(catalog.getPartition("db2", "tbl2", part1.spec).spec == part1.spec)
assert(catalog.getPartition("db2", "tbl2", part2.spec).spec == part2.spec)
intercept[AnalysisException] {
catalog.getPartition("db2", "tbl1", part3.spec)
}
}
test("get partition when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.getPartition("does_not_exist", "tbl1", part1.spec)
}
intercept[AnalysisException] {
catalog.getPartition("db2", "does_not_exist", part1.spec)
}
}
test("rename partitions") {
val catalog = newBasicCatalog()
val newPart1 = part1.copy(spec = Map("a" -> "100", "b" -> "101"))
val newPart2 = part2.copy(spec = Map("a" -> "200", "b" -> "201"))
val newSpecs = Seq(newPart1.spec, newPart2.spec)
catalog.renamePartitions("db2", "tbl2", Seq(part1.spec, part2.spec), newSpecs)
assert(catalog.getPartition("db2", "tbl2", newPart1.spec).spec === newPart1.spec)
assert(catalog.getPartition("db2", "tbl2", newPart2.spec).spec === newPart2.spec)
// The old partitions should no longer exist
intercept[AnalysisException] { catalog.getPartition("db2", "tbl2", part1.spec) }
intercept[AnalysisException] { catalog.getPartition("db2", "tbl2", part2.spec) }
}
test("rename partitions should update the location for managed table") {
val catalog = newBasicCatalog()
val table = CatalogTable(
identifier = TableIdentifier("tbl", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType()
.add("col1", "int")
.add("col2", "string")
.add("partCol1", "int")
.add("partCol2", "string"),
provider = Some("hive"),
partitionColumnNames = Seq("partCol1", "partCol2"))
catalog.createTable(table, ignoreIfExists = false)
val tableLocation = catalog.getTable("db1", "tbl").location
val mixedCasePart1 = CatalogTablePartition(
Map("partCol1" -> "1", "partCol2" -> "2"), storageFormat)
val mixedCasePart2 = CatalogTablePartition(
Map("partCol1" -> "3", "partCol2" -> "4"), storageFormat)
catalog.createPartitions("db1", "tbl", Seq(mixedCasePart1), ignoreIfExists = false)
assert(
new Path(catalog.getPartition("db1", "tbl", mixedCasePart1.spec).location) ==
new Path(new Path(tableLocation, "partCol1=1"), "partCol2=2"))
catalog.renamePartitions("db1", "tbl", Seq(mixedCasePart1.spec), Seq(mixedCasePart2.spec))
assert(
new Path(catalog.getPartition("db1", "tbl", mixedCasePart2.spec).location) ==
new Path(new Path(tableLocation, "partCol1=3"), "partCol2=4"))
// For external tables, RENAME PARTITION should not update the partition location.
val existingPartLoc = catalog.getPartition("db2", "tbl2", part1.spec).location
catalog.renamePartitions("db2", "tbl2", Seq(part1.spec), Seq(part3.spec))
assert(
new Path(catalog.getPartition("db2", "tbl2", part3.spec).location) ==
new Path(existingPartLoc))
}
test("rename partitions when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.renamePartitions("does_not_exist", "tbl1", Seq(part1.spec), Seq(part2.spec))
}
intercept[AnalysisException] {
catalog.renamePartitions("db2", "does_not_exist", Seq(part1.spec), Seq(part2.spec))
}
}
test("rename partitions when the new partition already exists") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.renamePartitions("db2", "tbl2", Seq(part1.spec), Seq(part2.spec))
}
}
test("alter partitions") {
val catalog = newBasicCatalog()
try {
val newLocation = newUriForDatabase()
val newSerde = "com.sparkbricks.text.EasySerde"
val newSerdeProps = Map("spark" -> "bricks", "compressed" -> "false")
// alter but keep spec the same
val oldPart1 = catalog.getPartition("db2", "tbl2", part1.spec)
val oldPart2 = catalog.getPartition("db2", "tbl2", part2.spec)
catalog.alterPartitions("db2", "tbl2", Seq(
oldPart1.copy(storage = storageFormat.copy(locationUri = Some(newLocation))),
oldPart2.copy(storage = storageFormat.copy(locationUri = Some(newLocation)))))
val newPart1 = catalog.getPartition("db2", "tbl2", part1.spec)
val newPart2 = catalog.getPartition("db2", "tbl2", part2.spec)
assert(newPart1.storage.locationUri == Some(newLocation))
assert(newPart2.storage.locationUri == Some(newLocation))
assert(oldPart1.storage.locationUri != Some(newLocation))
assert(oldPart2.storage.locationUri != Some(newLocation))
// alter other storage information
catalog.alterPartitions("db2", "tbl2", Seq(
oldPart1.copy(storage = storageFormat.copy(serde = Some(newSerde))),
oldPart2.copy(storage = storageFormat.copy(properties = newSerdeProps))))
val newPart1b = catalog.getPartition("db2", "tbl2", part1.spec)
val newPart2b = catalog.getPartition("db2", "tbl2", part2.spec)
assert(newPart1b.storage.serde == Some(newSerde))
assert(newPart2b.storage.properties == newSerdeProps)
// alter but change spec, should fail because new partition specs do not exist yet
val badPart1 = part1.copy(spec = Map("a" -> "v1", "b" -> "v2"))
val badPart2 = part2.copy(spec = Map("a" -> "v3", "b" -> "v4"))
intercept[AnalysisException] {
catalog.alterPartitions("db2", "tbl2", Seq(badPart1, badPart2))
}
} finally {
// Remember to restore the original current database, which we assume to be "default"
catalog.setCurrentDatabase("default")
}
}
test("alter partitions when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.alterPartitions("does_not_exist", "tbl1", Seq(part1))
}
intercept[AnalysisException] {
catalog.alterPartitions("db2", "does_not_exist", Seq(part1))
}
}
// --------------------------------------------------------------------------
// Functions
// --------------------------------------------------------------------------
test("basic create and list functions") {
val catalog = newEmptyCatalog()
catalog.createDatabase(newDb("mydb"), ignoreIfExists = false)
catalog.createFunction("mydb", newFunc("myfunc"))
assert(catalog.listFunctions("mydb", "*").toSet == Set("myfunc"))
}
test("create function when database does not exist") {
val catalog = newBasicCatalog()
intercept[NoSuchDatabaseException] {
catalog.createFunction("does_not_exist", newFunc())
}
}
test("create function that already exists") {
val catalog = newBasicCatalog()
intercept[FunctionAlreadyExistsException] {
catalog.createFunction("db2", newFunc("func1"))
}
}
test("drop function") {
val catalog = newBasicCatalog()
assert(catalog.listFunctions("db2", "*").toSet == Set("func1"))
catalog.dropFunction("db2", "func1")
assert(catalog.listFunctions("db2", "*").isEmpty)
}
test("drop function when database does not exist") {
val catalog = newBasicCatalog()
intercept[NoSuchDatabaseException] {
catalog.dropFunction("does_not_exist", "something")
}
}
test("drop function that does not exist") {
val catalog = newBasicCatalog()
intercept[NoSuchFunctionException] {
catalog.dropFunction("db2", "does_not_exist")
}
}
test("get function") {
val catalog = newBasicCatalog()
assert(catalog.getFunction("db2", "func1") ==
CatalogFunction(FunctionIdentifier("func1", Some("db2")), funcClass,
Seq.empty[FunctionResource]))
intercept[NoSuchFunctionException] {
catalog.getFunction("db2", "does_not_exist")
}
}
test("get function when database does not exist") {
val catalog = newBasicCatalog()
intercept[NoSuchDatabaseException] {
catalog.getFunction("does_not_exist", "func1")
}
}
test("rename function") {
val catalog = newBasicCatalog()
val newName = "funcky"
assert(catalog.getFunction("db2", "func1").className == funcClass)
catalog.renameFunction("db2", "func1", newName)
intercept[NoSuchFunctionException] { catalog.getFunction("db2", "func1") }
assert(catalog.getFunction("db2", newName).identifier.funcName == newName)
assert(catalog.getFunction("db2", newName).className == funcClass)
intercept[NoSuchFunctionException] { catalog.renameFunction("db2", "does_not_exist", "me") }
}
test("rename function when database does not exist") {
val catalog = newBasicCatalog()
intercept[NoSuchDatabaseException] {
catalog.renameFunction("does_not_exist", "func1", "func5")
}
}
test("rename function when new function already exists") {
val catalog = newBasicCatalog()
catalog.createFunction("db2", newFunc("func2", Some("db2")))
intercept[FunctionAlreadyExistsException] {
catalog.renameFunction("db2", "func1", "func2")
}
}
test("list functions") {
val catalog = newBasicCatalog()
catalog.createFunction("db2", newFunc("func2"))
catalog.createFunction("db2", newFunc("not_me"))
assert(catalog.listFunctions("db2", "*").toSet == Set("func1", "func2", "not_me"))
assert(catalog.listFunctions("db2", "func*").toSet == Set("func1", "func2"))
}
// --------------------------------------------------------------------------
// File System operations
// --------------------------------------------------------------------------
private def exists(uri: String, children: String*): Boolean = {
val base = new Path(uri)
val finalPath = children.foldLeft(base) {
case (parent, child) => new Path(parent, child)
}
base.getFileSystem(new Configuration()).exists(finalPath)
}
test("create/drop database should create/delete the directory") {
val catalog = newBasicCatalog()
val db = newDb("mydb")
catalog.createDatabase(db, ignoreIfExists = false)
assert(exists(db.locationUri))
catalog.dropDatabase("mydb", ignoreIfNotExists = false, cascade = false)
assert(!exists(db.locationUri))
}
test("create/drop/rename table should create/delete/rename the directory") {
val catalog = newBasicCatalog()
val db = catalog.getDatabase("db1")
val table = CatalogTable(
identifier = TableIdentifier("my_table", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", "int").add("b", "string"),
provider = Some("hive")
)
catalog.createTable(table, ignoreIfExists = false)
assert(exists(db.locationUri, "my_table"))
catalog.renameTable("db1", "my_table", "your_table")
assert(!exists(db.locationUri, "my_table"))
assert(exists(db.locationUri, "your_table"))
catalog.dropTable("db1", "your_table", ignoreIfNotExists = false, purge = false)
assert(!exists(db.locationUri, "your_table"))
val externalTable = CatalogTable(
identifier = TableIdentifier("external_table", Some("db1")),
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat(
Some(Utils.createTempDir().getAbsolutePath),
None, None, None, false, Map.empty),
schema = new StructType().add("a", "int").add("b", "string"),
provider = Some("hive")
)
catalog.createTable(externalTable, ignoreIfExists = false)
assert(!exists(db.locationUri, "external_table"))
}
test("create/drop/rename partitions should create/delete/rename the directory") {
val catalog = newBasicCatalog()
val table = CatalogTable(
identifier = TableIdentifier("tbl", Some("db1")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType()
.add("col1", "int")
.add("col2", "string")
.add("partCol1", "int")
.add("partCol2", "string"),
provider = Some("hive"),
partitionColumnNames = Seq("partCol1", "partCol2"))
catalog.createTable(table, ignoreIfExists = false)
val tableLocation = catalog.getTable("db1", "tbl").location
val part1 = CatalogTablePartition(Map("partCol1" -> "1", "partCol2" -> "2"), storageFormat)
val part2 = CatalogTablePartition(Map("partCol1" -> "3", "partCol2" -> "4"), storageFormat)
val part3 = CatalogTablePartition(Map("partCol1" -> "5", "partCol2" -> "6"), storageFormat)
catalog.createPartitions("db1", "tbl", Seq(part1, part2), ignoreIfExists = false)
assert(exists(tableLocation, "partCol1=1", "partCol2=2"))
assert(exists(tableLocation, "partCol1=3", "partCol2=4"))
catalog.renamePartitions("db1", "tbl", Seq(part1.spec), Seq(part3.spec))
assert(!exists(tableLocation, "partCol1=1", "partCol2=2"))
assert(exists(tableLocation, "partCol1=5", "partCol2=6"))
catalog.dropPartitions("db1", "tbl", Seq(part2.spec, part3.spec), ignoreIfNotExists = false,
purge = false, retainData = false)
assert(!exists(tableLocation, "partCol1=3", "partCol2=4"))
assert(!exists(tableLocation, "partCol1=5", "partCol2=6"))
val tempPath = Utils.createTempDir()
// create partition with existing directory is OK.
val partWithExistingDir = CatalogTablePartition(
Map("partCol1" -> "7", "partCol2" -> "8"),
CatalogStorageFormat(
Some(tempPath.getAbsolutePath),
None, None, None, false, Map.empty))
catalog.createPartitions("db1", "tbl", Seq(partWithExistingDir), ignoreIfExists = false)
tempPath.delete()
// create partition with non-existing directory will create that directory.
val partWithNonExistingDir = CatalogTablePartition(
Map("partCol1" -> "9", "partCol2" -> "10"),
CatalogStorageFormat(
Some(tempPath.getAbsolutePath),
None, None, None, false, Map.empty))
catalog.createPartitions("db1", "tbl", Seq(partWithNonExistingDir), ignoreIfExists = false)
assert(tempPath.exists())
}
test("drop partition from external table should not delete the directory") {
val catalog = newBasicCatalog()
catalog.createPartitions("db2", "tbl1", Seq(part1), ignoreIfExists = false)
val partPath = new Path(catalog.getPartition("db2", "tbl1", part1.spec).location)
val fs = partPath.getFileSystem(new Configuration)
assert(fs.exists(partPath))
catalog.dropPartitions(
"db2", "tbl1", Seq(part1.spec), ignoreIfNotExists = false, purge = false, retainData = false)
assert(fs.exists(partPath))
}
}
/**
* A collection of utility fields and methods for tests related to the [[ExternalCatalog]].
*/
abstract class CatalogTestUtils {
// Unimplemented methods
val tableInputFormat: String
val tableOutputFormat: String
def newEmptyCatalog(): ExternalCatalog
// These fields must be lazy because they rely on fields that are not implemented yet
lazy val storageFormat = CatalogStorageFormat(
locationUri = None,
inputFormat = Some(tableInputFormat),
outputFormat = Some(tableOutputFormat),
serde = None,
compressed = false,
properties = Map.empty)
lazy val part1 = CatalogTablePartition(Map("a" -> "1", "b" -> "2"), storageFormat)
lazy val part2 = CatalogTablePartition(Map("a" -> "3", "b" -> "4"), storageFormat)
lazy val part3 = CatalogTablePartition(Map("a" -> "5", "b" -> "6"), storageFormat)
lazy val partWithMixedOrder = CatalogTablePartition(Map("b" -> "6", "a" -> "6"), storageFormat)
lazy val partWithLessColumns = CatalogTablePartition(Map("a" -> "1"), storageFormat)
lazy val partWithMoreColumns =
CatalogTablePartition(Map("a" -> "5", "b" -> "6", "c" -> "7"), storageFormat)
lazy val partWithUnknownColumns =
CatalogTablePartition(Map("a" -> "5", "unknown" -> "6"), storageFormat)
lazy val partWithEmptyValue =
CatalogTablePartition(Map("a" -> "3", "b" -> ""), storageFormat)
lazy val funcClass = "org.apache.spark.myFunc"
/**
* Creates a basic catalog, with the following structure:
*
* default
* db1
* db2
* - tbl1
* - tbl2
* - part1
* - part2
* - func1
*/
def newBasicCatalog(): ExternalCatalog = {
val catalog = newEmptyCatalog()
// When testing against a real catalog, the default database may already exist
catalog.createDatabase(newDb("default"), ignoreIfExists = true)
catalog.createDatabase(newDb("db1"), ignoreIfExists = false)
catalog.createDatabase(newDb("db2"), ignoreIfExists = false)
catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = false)
catalog.createTable(newTable("tbl2", "db2"), ignoreIfExists = false)
catalog.createPartitions("db2", "tbl2", Seq(part1, part2), ignoreIfExists = false)
catalog.createFunction("db2", newFunc("func1", Some("db2")))
catalog
}
def newFunc(): CatalogFunction = newFunc("funcName")
def newUriForDatabase(): String = Utils.createTempDir().toURI.toString.stripSuffix("/")
def newDb(name: String): CatalogDatabase = {
CatalogDatabase(name, name + " description", newUriForDatabase(), Map.empty)
}
def newTable(name: String, db: String): CatalogTable = newTable(name, Some(db))
def newTable(name: String, database: Option[String] = None): CatalogTable = {
CatalogTable(
identifier = TableIdentifier(name, database),
tableType = CatalogTableType.EXTERNAL,
storage = storageFormat.copy(locationUri = Some(Utils.createTempDir().getAbsolutePath)),
schema = new StructType()
.add("col1", "int")
.add("col2", "string")
.add("a", "int")
.add("b", "string"),
provider = Some("hive"),
partitionColumnNames = Seq("a", "b"),
bucketSpec = Some(BucketSpec(4, Seq("col1"), Nil)))
}
def newFunc(name: String, database: Option[String] = None): CatalogFunction = {
CatalogFunction(FunctionIdentifier(name, database), funcClass, Seq.empty[FunctionResource])
}
/**
* Whether the catalog's table partitions equal the ones given.
* Note: Hive sets some random serde things, so we just compare the specs here.
*/
def catalogPartitionsEqual(
catalog: ExternalCatalog,
db: String,
table: String,
parts: Seq[CatalogTablePartition]): Boolean = {
catalog.listPartitions(db, table).map(_.spec).toSet == parts.map(_.spec).toSet
}
}
| spark0001/spark2.1.1 | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala | Scala | apache-2.0 | 37,265 |
/*
* @author Philip Stutz
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect.util
import org.scalacheck.Gen
import org.scalacheck.Gen._
import org.scalacheck.Arbitrary._
import org.scalatest.FlatSpec
import org.scalatest.ShouldMatchers
import org.scalatest.prop.Checkers
import java.io.DataOutputStream
import java.io.ByteArrayOutputStream
import org.scalacheck.Arbitrary
import scala.util.Random
case class SimpleSplayIntSet(
val overheadFraction: Float,
val maxNodeIntSetSize: Int) extends SplayIntSet
class SplayIntSetSpec extends FlatSpec with ShouldMatchers with Checkers with TestAnnouncements {
implicit lazy val arbInt = Arbitrary(Gen.chooseNum(Int.MinValue, Int.MaxValue))
"SplayIntSet" should "handle duplicate inserts correctly" in {
try {
var splaySet = new SimpleSplayIntSet(0.05f, 3)
val insert = List(4, 2, 1, 4, 3, 1, 3)
for (i <- insert) {
splaySet.insert(i)
assert(splaySet.contains(i))
}
assert(splaySet.size == 4)
} catch {
case t: Throwable => t.printStackTrace
}
}
it should "support 100000 inserts" in {
try {
val start = System.currentTimeMillis
val factor = 1.0f
var splaySet = new SimpleSplayIntSet(0.05f, 10000)
val randomInts = (0 to 100000).map(x => Random.nextInt)
for (i <- randomInts) {
splaySet.insert(i)
}
val finish = System.currentTimeMillis
val time = finish - start
//println("It took " + (time.toDouble / 1000) + " seconds with factor " + factor + ".")
assert(splaySet.toSet == randomInts.toSet)
} catch {
case t: Throwable => t.printStackTrace
}
}
it should "support 1 million inserts with split size 10 and 5% overhead" in {
try {
val start = System.currentTimeMillis
var splaySet = new SimpleSplayIntSet(0.05f, 10)
var standardSet = Set.empty[Int]
var i = 0
while (i < 1000000) {
val insertValue = Random.nextInt.abs
val splaySizeBefore = splaySet.size
splaySet.insert(insertValue)
standardSet += insertValue
if (splaySet.size != standardSet.size) {
println(s"Problematic insert: $insertValue")
println(s"Splay size ${splaySet.size}")
println(s"Splay size before $splaySizeBefore")
println(s"Standard size ${standardSet.size}")
assert(splaySet.size == standardSet.size)
}
i += 1
}
assert(splaySet.size == standardSet.size)
} catch {
case t: Throwable => t.printStackTrace
}
}
it should "store sets of Ints with various overheads and split factors" in {
check(
(ints: Array[Int], splitSize: Int, overhead: Float) => {
var wasEqual = true
var compact = new SplayIntSet {
def overheadFraction = math.min(math.max(overhead, 0.01f), 1.0f)
def maxNodeIntSetSize = math.max(splitSize, 3)
}
try {
val intSet = ints.toSet
for (i <- ints) {
compact.insert(i)
}
wasEqual = compact.toSet == intSet
if (!wasEqual) {
println("Problematic set: " + compact.toList.toString +
"\\nShould have been: " + ints.toList.toString)
println("Done")
}
} catch {
case t: Throwable =>
t.printStackTrace
}
wasEqual
},
minSuccessful(10000))
}
it should "support 1 million inserts with split size 100 and 1% overhead" in {
try {
val start = System.currentTimeMillis
var splaySet = new SimpleSplayIntSet(0.01f, 100)
var standardSet = Set.empty[Int]
var i = 0
while (i < 1000000) {
val insertValue = Random.nextInt.abs % 2000000
val splaySizeBefore = splaySet.size
splaySet.insert(insertValue)
standardSet += insertValue
if (splaySet.size != standardSet.size) {
println(s"Problematic insert: $insertValue")
println(s"Splay size ${splaySet.size}")
println(s"Splay size before $splaySizeBefore")
println(s"Standard size ${standardSet.size}")
assert(splaySet.size == standardSet.size)
assert(splaySet.contains(i) == standardSet.contains(i))
}
i += 1
}
assert(splaySet.size == standardSet.size)
} catch {
case t: Throwable => t.printStackTrace
}
}
it should "store sets of Ints" in {
check(
(ints: Array[Int]) => {
var wasEqual = true
var splaySet = new SimpleSplayIntSet(0.01f, 3)
val mappedInts = ints.map(x => (x & Int.MaxValue) % 50)
try {
var intSet = Set.empty[Int]
for (i <- mappedInts) {
splaySet.insert(i)
intSet += i
if (splaySet.contains(i) != intSet.contains(i)) {
println(s"problems with $i on ${intSet}: was not inserted into splay tree")
}
}
wasEqual = splaySet.toSet == intSet
if (!wasEqual) {
println("Problematic set: " + splaySet.toList.toString +
"\\nShould have been: " + intSet.toString)
println("Done")
}
} catch {
case t: Throwable =>
t.printStackTrace
}
wasEqual
},
minSuccessful(10000))
}
}
| danihegglin/DynDCO | src/test/scala/com/signalcollect/util/SplayIntSetSpec.scala | Scala | apache-2.0 | 5,937 |
// Databricks notebook source
// MAGIC %md
// MAGIC
// MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/)
// COMMAND ----------
// MAGIC %md
// MAGIC Archived YouTube video of this live unedited lab-lecture:
// MAGIC
// MAGIC [](https://www.youtube.com/embed/jpRpd8VlMYs?start=0&end=1713&autoplay=1)
// COMMAND ----------
// MAGIC %md
// MAGIC # Network anomaly detection
// MAGIC
// MAGIC ## Student Project
// MAGIC
// MAGIC by [Victor Ingman](https://www.linkedin.com/in/ingman/) and [Kasper Ramström](https://www.linkedin.com/in/kramstrom/)
// MAGIC
// MAGIC This project set out to build an automatic network anomaly detection system for networks. Network threats are a major and growing concern for enterprises and private consumers all over the world. On average it takes 191 days for a company to detect a threat and another 66 days to contain the threat ([Enhancing Threat Detection with Big Data and AI](https://www.youtube.com/watch?v=i8___3GdxlQ)). In addition to taking long time to detect and contain threats, they also involve a ton of manual labour that require security experts. Thus, it should be a big priority for businesses to find solutions that not prevent malicious intrusions but also find these malicious activities in a fast and automated way, so that they can be dealt with swiftly.
// MAGIC
// MAGIC An example of the threats we're facing today is the [WannaCry ransomware](https://www.symantec.com/blogs/threat-intelligence/wannacry-ransomware-attack) which spread rapidly throughout the world during 2017 and caused major havoc for companies and privates consumers throughout, including [Akademiska Sjukhuset](https://www.svt.se/nyheter/lokalt/uppsala/sjukhusledningen-i-forstarkningslage-efter-virusangreppet) here in Uppsala.
// MAGIC
// MAGIC 
// MAGIC
// MAGIC With better security systems and automated ways of detecting malicious behaviour, many of these attacks could be prevented.
// MAGIC
// MAGIC To gain inspiration for our project and find out how others have developed similar systems we've used the book [Advanced Analytics with Spark](http://shop.oreilly.com/product/0636920056591.do) which uses [k-means](https://en.wikipedia.org/wiki/K-means_clustering) clustering.
// MAGIC
// MAGIC 
// MAGIC
// MAGIC In the book, the authors cluster different kinds of network events with the hopes of separating abnormal behaviour in clusters different from other events. The data used in the book is the publicly available [KDD Cup 1999 Data](https://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html), which is both quite dated and different from the data we've used, but it works well as a proof of concept for our project. The code accompanying the above mentioned book can be found at https://github.com/sryza/aas and for our project we've used a similar approach for clustering the data using k-means.
// MAGIC
// MAGIC Below, we present the code for our project alongside with explanations for what we've done and how we've done it. This includes data collection, data visualization, clustering of data and possible improvements and future work.
// COMMAND ----------
//This allows easy embedding of publicly available information into any other notebook
//Example usage:
// displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
def frameIt( u:String, h:Int ) : String = {
"""<iframe
src=""""+ u+""""
width="95%" height="""" + h + """"
sandbox>
<p>
<a href="https://en.wikipedia.org/wiki/Anomaly_detection">
Fallback link for browsers that, unlikely, don't support frames
</a>
</p>
</iframe>"""
}
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/Anomaly_detection",500))
// COMMAND ----------
// MAGIC %md
// MAGIC # Data Collection
// MAGIC
// MAGIC To get data for our network security project we decided to generate it ourselves from our own networks and perform malicious activity as well.
// MAGIC
// MAGIC Our basic idea for the data collection involved having one victim device, which would perform normal internet activity, including streaming to different media devices, transferring files and web surfing. During this, another device would (the attacker) would perform malicious activity such as port scans and fingerprinting of the victim. Our hopes were that the malicious activities would stand out from the other traffic and would hopefully be detectable for our anomaly detection models.
// MAGIC
// MAGIC From the book [Network Security Through Analysis](http://shop.oreilly.com/product/0636920028444.do) we read about the tools [Wireshark](https://www.wireshark.org/) and [Nmap](https://nmap.org). For our project, we used Wireshark for collecting network data on the victim's computer and Nmap for performing malicious activity.
// MAGIC
// MAGIC 
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # Data anonymization
// MAGIC
// MAGIC As we collected data on our own private network and publish this notebook along with the data publicly, we decided to anonmyize our network data for privacy reasons. To do this, we followed the Databricks guide: https://databricks.com/blog/2017/02/13/anonymizing-datasets-at-scale-leveraging-databricks-interoperability.html
// MAGIC
// MAGIC By using the package [Faker](https://faker.readthedocs.io/en/latest/index.html) we generated fake source IP's and destination IP's for our network traffic data and used this data for the remainder of the project. Since we didn't parse the packet details for our network traffic and since it can potentially include sensitive information about our connections, we decided to remove that data from the public dataset.
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/Data_anonymization",500))
// COMMAND ----------
// MAGIC %sh
// MAGIC
// MAGIC pip install unicodecsv Faker
// COMMAND ----------
// MAGIC %py
// MAGIC
// MAGIC import unicodecsv as csv
// MAGIC from collections import defaultdict
// MAGIC from faker import Factory
// MAGIC
// MAGIC def anonymize_rows(rows):
// MAGIC """
// MAGIC Rows is an iterable of dictionaries that contain name and
// MAGIC email fields that need to be anonymized.
// MAGIC """
// MAGIC # Load faker
// MAGIC faker = Factory.create()
// MAGIC
// MAGIC # Create mappings of names, emails, social security numbers, and phone numbers to faked names & emails.
// MAGIC sources = defaultdict(faker.ipv4)
// MAGIC destinations = defaultdict(faker.ipv4)
// MAGIC
// MAGIC # Iterate over the rows from the file and yield anonymized rows.
// MAGIC for row in rows:
// MAGIC # Replace name and email fields with faked fields.
// MAGIC row["Source"] = sources[row["Source"]]
// MAGIC row["Destination"] = destinations[row["Destination"]]
// MAGIC
// MAGIC # Yield the row back to the caller
// MAGIC yield row
// MAGIC
// MAGIC def anonymize(source, target):
// MAGIC """
// MAGIC The source argument is a path to a CSV file containing data to anonymize,
// MAGIC while target is a path to write the anonymized CSV data to.
// MAGIC """
// MAGIC with open(source, 'rU') as f:
// MAGIC with open(target, 'w') as o:
// MAGIC # Use the DictReader to easily extract fields
// MAGIC reader = csv.DictReader(f)
// MAGIC writer = csv.DictWriter(o, reader.fieldnames)
// MAGIC
// MAGIC # Read and anonymize data, writing to target file.
// MAGIC for row in anonymize_rows(reader):
// MAGIC writer.writerow(row)
// MAGIC
// MAGIC # anonymize("path-to-dataset-to-be-anonymized", "path-to-output-file")
// COMMAND ----------
// MAGIC %md
// MAGIC # Wireshark and Nmap
// MAGIC
// MAGIC What is it?
// MAGIC https://www.wireshark.org/
// MAGIC
// MAGIC Wireshark is a free and open source packet analyzer. It is used for network troubleshooting, analysis, software and communications protocol development, and education.
// MAGIC
// MAGIC Our setup consisted of two computers, one as victim and one as attacker.
// MAGIC
// MAGIC ## Step by step
// MAGIC - Opened up Wireshark on the victims computer as well as logging activity on the network
// MAGIC - For a guide on how to log network info with wireshark, see the following:
// MAGIC https://www.wireshark.org/docs/wsug_html_chunked/ChCapCapturingSection.html
// MAGIC - Started a lot of transfers and streams on the victims computer
// MAGIC - Started a Chromecast stream of a workout video on Youtube to a TV on the network
// MAGIC - Streaming music to speakers on the network via Spotify Connect
// MAGIC - Sending large files via Apple Airdrop
// MAGIC - The attacker started Nmap and started a port scan against the victim
// MAGIC - The attacker did a thourough fingerprint of the victim, such as OS detection and software detection at the open ports, also with Nmap
// MAGIC - We exported the victims wireshark log as CSV by doing the following:
// MAGIC 
// MAGIC
// MAGIC The following image visualizes the network environment
// MAGIC 
// MAGIC
// MAGIC The dotted lines shows network communications
// MAGIC Filled lines shows local execution or communication between nodes
// MAGIC Lines with arrows shows directed communication
// MAGIC
// MAGIC After that was done, about 30 minutes later, we exported the data to CSV-format. The CSV was formatted as follows:
// MAGIC
// MAGIC No | Time | Source | Destination | Protocol | Length | Info
// MAGIC --- | --- | --- | --- | --- | --- | ---
// MAGIC 1 | 0.001237 | 10.0.0.66 | 10.0.0.1 | DNS | 54 | [Redacted]
// MAGIC ⫶ | ⫶ | ⫶ | ⫶ | ⫶ | ⫶ | ⫶
// MAGIC
// MAGIC ## Description of collected data
// MAGIC - **No** = The id of the packet captured, starts from 0.
// MAGIC - **Time** = Number of seconds elapsed since the capture started
// MAGIC - **Source** = The IP address of the sender of the packet
// MAGIC - **Destination** = The IP address of the receiver of the packet
// MAGIC - **Protocol** = The protocol of the packet
// MAGIC - **Length** = Length of the packet
// MAGIC - **Info** = Data that is sent with the packet, redacted for privacy and anonymity
// MAGIC
// MAGIC That way we are able to visualize the data collected in the form of a directed graph network and use the number of times a packet is sent identified by unique (source, destination, protocol).
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # Download the network data
// MAGIC
// MAGIC The data dump we collected is available for download at the following url
// MAGIC
// MAGIC http://sunlabs.se/assets/sds/anon_data.csv
// COMMAND ----------
// MAGIC %sh
// MAGIC
// MAGIC wget "http://sunlabs.se/assets/sds/anon_data.csv"
// COMMAND ----------
// MAGIC %sh
// MAGIC
// MAGIC pwd
// MAGIC ls
// COMMAND ----------
val dataPath = "file:/databricks/driver/anon_data.csv"
spark.read.format("csv")
.option("header","true")
.option("inferSchema", "true")
.load(dataPath)
.createOrReplaceTempView("anonymized_data_raw")
// COMMAND ----------
// MAGIC %md
// MAGIC # Data visualization
// MAGIC
// MAGIC To better understand our our network data, analyze it and verify its correctness, we decided to represent the data in a graph network. A graph is made up of vertices and edges and can be either directed or undirected. A visualization of an example graph can be seen in the picture below:
// MAGIC
// MAGIC <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Directed_graph%2C_cyclic.svg/2000px-Directed_graph%2C_cyclic.svg.png" alt="Example Graph" style="width: 500px;"/>
// MAGIC
// MAGIC And more information about graph theory can be found at https://en.wikipedia.org/wiki/Graph_theory.
// MAGIC
// MAGIC In our context of network traffic, each connected device can be seen as a vertex in the graph and each packet sent between two devices is an edge. For our data a packet is always sent from one source node (vertex) to another destination node (vertex). Thus each edge is directed from and the whole graph is directed.
// MAGIC
// MAGIC To use this graph representation for our network data we used the Spark package GraphFrames.
// MAGIC
// MAGIC GraphFrames is a package for Apache Spark which provides DataFrame-based Graphs. It provides high-level APIs in Scala, Java, and Python. It aims to provide both the functionality of GraphX and extended functionality taking advantage of Spark DataFrames. This extended functionality includes motif finding, DataFrame-based serialization, and highly expressive graph queries.
// MAGIC
// MAGIC The GraphFrames package is available from [Spark Packages](http://spark-packages.org/package/graphframes/graphframes).
// MAGIC
// MAGIC This notebook demonstrates examples from the [GraphFrames User Guide](http://graphframes.github.io/user-guide.html).
// MAGIC
// MAGIC (Above GraphFrames explanation taken from Raazesh Sainudiin's course [Scalable Data Science](https://lamastex.github.io/scalable-data-science/))
// MAGIC
// MAGIC Using GraphFrames we can also see the the relationship between vertices using motifs, filter graphs and find the in- and outdegrees of vertices.
// MAGIC
// MAGIC To visualize our graph network we decided to use the package JavaScript visualization package [D3](https://d3js.org/) which allows for complex visualizations of graph networks and tons of other applications.
// COMMAND ----------
displayHTML(frameIt("https://d3js.org",500))
// COMMAND ----------
displayHTML(frameIt("http://graphframes.github.io/user-guide.html",500))
// COMMAND ----------
val sqlDF = spark.sql("SELECT * FROM anonymized_data_raw")
// COMMAND ----------
display(sqlDF)
// COMMAND ----------
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
// Truncate the data for each millisecond
val truncData = sqlDF
.select($"n", $"Source", $"Destination", round($"Time", 2).as("ts"), $"Protocol", $"Length")
.groupBy($"ts", $"Source", $"Destination", $"Protocol")
.agg(avg($"Length").as("len"), (avg("Length") / max($"Length")).as("local_anomalies"), count("*").as("count"))
.sort($"ts")
truncData.show(5)
truncData.createOrReplaceTempView("anonymized_data")
// COMMAND ----------
import org.graphframes._
val v = truncData.select($"Source".as("id"), $"Source".as("src")).where("count > 10")
v.show()
val e = truncData.select($"Source".as("src"), $"Destination".as("dst"), $"Protocol", $"count").where("count > 10")
e.show()
val g = GraphFrame(v, e)
val gE= g.edges.select($"src", $"dst".as("dest"), $"count")
display(gE)
// COMMAND ----------
package d3
// We use a package object so that we can define top level classes like Edge that need to be used in other cells
// This was modified by Ivan Sadikov to make sure it is compatible the latest databricks notebook
import org.apache.spark.sql._
import com.databricks.backend.daemon.driver.EnhancedRDDFunctions.displayHTML
case class Edge(src: String, dest: String, count: Long)
case class Node(name: String)
case class Link(source: Int, target: Int, value: Long)
case class Graph(nodes: Seq[Node], links: Seq[Link])
object graphs {
// val sqlContext = SQLContext.getOrCreate(org.apache.spark.SparkContext.getOrCreate()) /// fix
val sqlContext = SparkSession.builder().getOrCreate().sqlContext
import sqlContext.implicits._
def force(clicks: Dataset[Edge], height: Int = 100, width: Int = 960): Unit = {
val data = clicks.collect()
val nodes = (data.map(_.src) ++ data.map(_.dest)).map(_.replaceAll("_", " ")).toSet.toSeq.map(Node)
val links = data.map { t =>
Link(nodes.indexWhere(_.name == t.src.replaceAll("_", " ")), nodes.indexWhere(_.name == t.dest.replaceAll("_", " ")), t.count / 20 + 1)
}
showGraph(height, width, Seq(Graph(nodes, links)).toDF().toJSON.first())
}
/**
* Displays a force directed graph using d3
* input: {"nodes": [{"name": "..."}], "links": [{"source": 1, "target": 2, "value": 0}]}
*/
def showGraph(height: Int, width: Int, graph: String): Unit = {
displayHTML(s"""
<style>
.node_circle {
stroke: #777;
stroke-width: 1.3px;
}
.node_label {
pointer-events: none;
}
.link {
stroke: #777;
stroke-opacity: .2;
}
.node_count {
stroke: #777;
stroke-width: 1.0px;
fill: #999;
}
text.legend {
font-family: Verdana;
font-size: 13px;
fill: #000;
}
.node text {
font-family: "Helvetica Neue","Helvetica","Arial",sans-serif;
font-size: 17px;
font-weight: 200;
}
</style>
<div id="clicks-graph">
<script src="//d3js.org/d3.v3.min.js"></script>
<script>
var graph = $graph;
var width = $width,
height = $height;
var color = d3.scale.category20();
var force = d3.layout.force()
.charge(-700)
.linkDistance(180)
.size([width, height]);
var svg = d3.select("#clicks-graph").append("svg")
.attr("width", width)
.attr("height", height);
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value); });
var node = svg.selectAll(".node")
.data(graph.nodes)
.enter().append("g")
.attr("class", "node")
.call(force.drag);
node.append("circle")
.attr("r", 10)
.style("fill", function (d) {
if (d.name.startsWith("other")) { return color(1); } else { return color(2); };
})
node.append("text")
.attr("dx", 10)
.attr("dy", ".35em")
.text(function(d) { return d.name });
//Now we are giving the SVGs co-ordinates - the force layout is generating the co-ordinates which this code is using to update the attributes of the SVG elements
force.on("tick", function () {
link.attr("x1", function (d) {
return d.source.x;
})
.attr("y1", function (d) {
return d.source.y;
})
.attr("x2", function (d) {
return d.target.x;
})
.attr("y2", function (d) {
return d.target.y;
});
d3.selectAll("circle").attr("cx", function (d) {
return d.x;
})
.attr("cy", function (d) {
return d.y;
});
d3.selectAll("text").attr("x", function (d) {
return d.x;
})
.attr("y", function (d) {
return d.y;
});
});
</script>
</div>
""")
}
def help() = {
displayHTML("""
<p>
Produces a force-directed graph given a collection of edges of the following form:</br>
<tt><font color="#a71d5d">case class</font> <font color="#795da3">Edge</font>(<font color="#ed6a43">src</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">dest</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">count</font>: <font color="#a71d5d">Long</font>)</tt>
</p>
<p>Usage:<br/>
<tt><font color="#a71d5d">import</font> <font color="#ed6a43">d3._</font></tt><br/>
<tt><font color="#795da3">graphs.force</font>(</br>
<font color="#ed6a43">height</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">width</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">clicks</font>: <font color="#795da3">Dataset</font>[<font color="#795da3">Edge</font>])</tt>
</p>""")
}
}
// COMMAND ----------
d3.graphs.force(
height = 1680,
width = 1280,
clicks = gE.as[d3.Edge])
// COMMAND ----------
display(g.inDegrees.orderBy($"inDegree".desc))
// COMMAND ----------
display(g.outDegrees.orderBy($"outDegree".desc))
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # Clustering
// MAGIC ## Pre-processing of data
// MAGIC
// MAGIC We preprocessed the data logged from wireshark doing the following:
// MAGIC
// MAGIC - Rounding timestamps by milliseconds, that would be four significant decimals.
// MAGIC - Group the data by (timestamp, source, destination, protocol) with a count of how many times these kind of packets was sent/received during a millisecond.
// MAGIC - One-hot encoded the protocol values
// MAGIC - If you don't what that means, check this article out
// MAGIC https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f
// MAGIC - Standardized features for count and length of packets
// MAGIC
// MAGIC
// MAGIC ## Setting up k-means clustering
// MAGIC - 23 features
// MAGIC - Filtering out features that are not numeric, example is destination and source
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/K-means_clustering",500))
// COMMAND ----------
// MAGIC %python
// MAGIC import pandas as pd
// MAGIC
// MAGIC sampled = sqlContext.sql("SELECT * FROM anonymized_data").toPandas()
// COMMAND ----------
// MAGIC %python
// MAGIC # standardize features
// MAGIC from sklearn.preprocessing import StandardScaler
// MAGIC scaler = StandardScaler()
// COMMAND ----------
// MAGIC %python
// MAGIC
// MAGIC sample = sampled['len']
// MAGIC sample = sample.reshape(-1, 1) # one feature
// MAGIC scaler.fit(sample)
// MAGIC
// MAGIC sampled['len'] = scaler.transform(sample)
// COMMAND ----------
// MAGIC %python
// MAGIC
// MAGIC sample = sampled['count']
// MAGIC sample = sample.reshape(-1, 1) # one feature
// MAGIC scaler.fit(sample)
// MAGIC
// MAGIC sampled['count'] = scaler.transform(sample)
// COMMAND ----------
// MAGIC %python
// MAGIC
// MAGIC df_count = sampled['count']
// MAGIC df_length = sampled['len']
// MAGIC df_proto = pd.get_dummies(sampled['Protocol'])
// MAGIC df_source = sampled['Source']
// MAGIC df_dest = sampled['Destination']
// MAGIC df_ts = sampled['ts']
// MAGIC
// MAGIC onehot = pd.concat([df_proto, df_source, df_length, df_dest, df_ts, df_count], axis=1)
// MAGIC onehotDF = sqlContext.createDataFrame(onehot)
// MAGIC
// MAGIC sqlContext.sql("DROP TABLE IF EXISTS anonymized_data_onehot")
// MAGIC onehotDF.write.saveAsTable('anonymized_data_onehot')
// COMMAND ----------
case class Packet(AJP13: Double, ALLJOYN_NS: Double, ARP: Double, DHCP: Double, DNS: Double, HTTP: Double, HTTP_XML: Double, ICMP: Double, ICMPv6: Double, IGMPv1: Double, IGMPv2: Double, IGMPv3: Double, MDNS: Double, NBNS: Double, NTP: Double, OCSP: Double, QUIC: Double, RTCP: Double, SIP: Double, SNMP: Double, SSDP: Double, STP: Double, STUN: Double, TCP: Double, TFTP: Double, TLSv1: Double, TLSv1_2: Double, UDP: Double, XMPP_XML: Double, Source: String, len: Double, Destination: String, ts: Double,
count: Long)
def parseRow(row: org.apache.spark.sql.Row): Packet = {
def toDouble(value: Any): Double = {
try {
value.toString.toDouble
} catch {
case e: Exception => 0.0
}
}
def toLong(value: Any): Long = {
try {
value.toString.toLong
} catch {
case e: Exception => 0
}
}
Packet(toDouble(row(0)), toDouble(row(1)), toDouble(row(2)), toDouble(row(3)), toDouble(row(4)), toDouble(row(5)), toDouble(row(6)), toDouble(row(7)), toDouble(row(8)), toDouble(row(9)), toDouble(row(10)), toDouble(row(11)), toDouble(row(12)), toDouble(row(13)), toDouble(row(14)), toDouble(row(15)), toDouble(row(16)), toDouble(row(17)), toDouble(row(18)), toDouble(row(19)), toDouble(row(20)), toDouble(row(21)), toDouble(row(22)), toDouble(row(23)), toDouble(row(24)), toDouble(row(25)), toDouble(row(26)), toDouble(row(27)), toDouble(row(28)), row(29).toString, toDouble(row(30)), row(31).toString, toDouble(row(32)), toLong(row(33)))
}
val df = table("anonymized_data_onehot").map(parseRow).toDF
df.createOrReplaceTempView("packetsView")
// COMMAND ----------
import org.apache.spark.ml.feature.VectorAssembler
val list = ("Source, Destination")
val cols = df.columns
val filtered = cols.filter { el =>
!list.contains(el)
}
val trainingData = new VectorAssembler()
.setInputCols(filtered)
.setOutputCol("features")
.transform(table("packetsView"))
// COMMAND ----------
import org.apache.spark.ml.clustering.KMeans
val model = new KMeans().setK(23).fit(trainingData)
val modelTransformed = model.transform(trainingData)
// COMMAND ----------
// MAGIC %md
// MAGIC # Improvements and future work
// MAGIC
// MAGIC In this section we present possible improvements that could have been done for our project and future work to further build on the project, increase its usability and value.
// MAGIC ## Dimensionality improvements
// MAGIC
// MAGIC
// MAGIC We used k-means for clustering our network data which uses euclidean distance. Models using euclidean distance are susceptible to the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality). With the 23 features we got after using one-hot encoding for the protocol column in the original dataset we are likely suffering from this high dimensionality. To improve the clustering one could an algorithm that doesn't use euclidean distance (or other distance measures that don't work well for high dimensionality). Another possible solution could be to to use [dimensionality reduction](dimensionality reduction using autoencoder) and try to retain as much information as possible with fewer features. This could be done using techniques such as [PCA](https://en.wikipedia.org/wiki/Principal_component_analysis) or [LDA](https://en.wikipedia.org/wiki/Linear_discriminant_analysis).
// MAGIC
// MAGIC ## Parse packet contents
// MAGIC
// MAGIC We didn't parse the packet information other than IP addresses, packet lengths and protocol. To gain further insights one could parse the additional packet contents and look for sensitive items, including usernames, passwords etc.
// MAGIC
// MAGIC ## Graph Analysis
// MAGIC
// MAGIC One could continue analyze the graph representation of the data. Examples of this could include looking for comlpex relationships in the graph using GraphFrames motifs.
// MAGIC
// MAGIC ## Real time network analysis using Spark streaming
// MAGIC
// MAGIC To make the project even more useful in a real environment, one could use [Spark Streaming k-means](https://databricks.com/blog/2015/01/28/introducing-streaming-k-means-in-spark-1-2.html) to cluster network traffic in real time and then perform anomaly detection in real time as well. An example approach of this can be seen in the following video: https://www.youtube.com/watch?v=i8___3GdxlQ
// MAGIC
// MAGIC Additional continuations of this could include giving suggestions for actions to perform when deteching malicious activity.
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/Dimensionality_reduction",500))
// COMMAND ----------
displayHTML(frameIt("https://databricks.com/blog/2015/01/28/introducing-streaming-k-means-in-spark-1-2.html",500))
// COMMAND ----------
| lamastex/scalable-data-science | db/2/2/999_01_StudentProject_NetworkAnomalyDetection.scala | Scala | unlicense | 27,311 |
package com.twitter.finagle.http2.transport
import com.twitter.concurrent.AsyncQueue
import com.twitter.conversions.time._
import com.twitter.finagle.Stack
import com.twitter.finagle.http2.RefTransport
import com.twitter.finagle.transport.QueueTransport
import com.twitter.util.{Promise, Future, Await}
import io.netty.handler.codec.http.HttpClientUpgradeHandler.UpgradeEvent
import io.netty.handler.codec.http._
import org.scalatest.FunSuite
class Http2UpgradingTransportTest extends FunSuite {
class Ctx {
val (writeq, readq) = (new AsyncQueue[Any](), new AsyncQueue[Any]())
val transport = new QueueTransport[Any, Any](writeq, readq)
val ref = new RefTransport(transport)
val p = Promise[Option[MultiplexedTransporter]]()
val upgradingTransport = new Http2UpgradingTransport(
transport,
ref,
p,
Stack.Params.empty
)
}
val fullRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "twitter.com")
val partialRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "twitter.com")
val fullResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
def await[A](f: Future[A]): A = Await.result(f, 5.seconds)
test("Http2UpgradingTransport upgrades properly") {
val ctx = new Ctx
import ctx._
val writeF = upgradingTransport.write(fullRequest)
assert(await(writeq.poll) == fullRequest)
val readF = upgradingTransport.read()
assert(!readF.isDefined)
assert(readq.offer(UpgradeEvent.UPGRADE_SUCCESSFUL))
assert(await(p).nonEmpty)
assert(readq.offer(Http2ClientDowngrader.Message(fullResponse, 1)))
assert(await(readF) == fullResponse)
}
test("Http2UpgradingTransport can reject an upgrade") {
val ctx = new Ctx
import ctx._
val writeF = upgradingTransport.write(fullRequest)
assert(await(writeq.poll) == fullRequest)
val readF = upgradingTransport.read()
assert(!readF.isDefined)
assert(readq.offer(UpgradeEvent.UPGRADE_REJECTED))
assert(await(p).isEmpty)
assert(readq.offer(fullResponse))
assert(await(readF) == fullResponse)
}
test("Http2UpgradingTransport delays the upgrade until the write finishes when successful") {
val ctx = new Ctx
import ctx._
val partialF = upgradingTransport.write(partialRequest)
assert(await(writeq.poll) == partialRequest)
val readF = upgradingTransport.read()
assert(!readF.isDefined)
assert(readq.offer(UpgradeEvent.UPGRADE_SUCCESSFUL))
assert(readq.offer(Http2ClientDowngrader.Message(fullResponse, 1)))
assert(!readF.isDefined)
val lastF = upgradingTransport.write(LastHttpContent.EMPTY_LAST_CONTENT)
assert(await(writeq.poll) == LastHttpContent.EMPTY_LAST_CONTENT)
assert(await(p).nonEmpty)
assert(await(readF) == fullResponse)
}
test("Http2UpgradingTransport doesn't delay the upgrade until the write finishes when rejected") {
val ctx = new Ctx
import ctx._
val partialF = upgradingTransport.write(partialRequest)
assert(await(writeq.poll) == partialRequest)
val readF = upgradingTransport.read()
assert(!readF.isDefined)
assert(readq.offer(UpgradeEvent.UPGRADE_REJECTED))
assert(readq.offer(fullResponse))
assert(await(readF) == fullResponse)
}
}
| koshelev/finagle | finagle-http2/src/test/scala/com/twitter/finagle/http2/transport/Http2UpgradingTransportTest.scala | Scala | apache-2.0 | 3,302 |
package org.bjean.sample.feature
import java.nio.file.{Files, Path}
import org.bjean.sample.support.TemporaryFolder
import org.bjean.sample.wordcount.input.{DocumentGenerator, DocumentWriter}
import org.scalatest.{FlatSpec, Matchers}
class canGenerateAText extends FlatSpec with Matchers with TemporaryFolder{
"A Document Generator" should "write a random text to a file" in {
val path: Path = testFolder.toPath().resolve("data")
DocumentGenerator.main(s"${path.toFile.getAbsolutePath}")
val text: String = scala.io.Source.fromFile(path.toFile).mkString
text shouldBe a[String]
"\\t".r.findAllIn(text).length shouldBe 2
}
"A Document Generate with -n 3" should "write a random text with 3 paragraph" in {
val path: Path = testFolder.toPath().resolve("data")
DocumentGenerator.main("-n","3",s"${path.toFile.getAbsolutePath}")
val text: String = scala.io.Source.fromFile(path.toFile).mkString
text shouldBe a[String]
"\\t".r.findAllIn(text).length shouldBe 3
}
}
| bjet007/word-count-spark-aws | input-generator/src/test/scala/org/bjean/sample/feature/canGenerateAText.scala | Scala | apache-2.0 | 1,015 |
package com.github.j5ik2o.chatwork.infrastructure.api.room
import com.github.j5ik2o.chatwork.infrastructure.api.ClientFactory
import com.github.j5ik2o.chatwork.infrastructure.api.me.MeApiService
import org.specs2.mutable.Specification
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
class MemberApiServiceImplSpec extends Specification {
val client = ClientFactory.create("api.chatwork.com")
val meApi = MeApiService(client)
val roomApi = RoomApiService(client)
val memberApi = MemberApiService(client)
"s" should {
"v" in {
val f = for {
me <- meApi.get
rooms <- roomApi.list
} yield {
println("result = " +(me, rooms))
}
Await.result(f, Duration.Inf)
true must beTrue
}
}
}
| j5ik2o/chatwork-client | src/test/scala/com/github/j5ik2o/chatwork/infrastructure/api/room/MemberApiServiceImplSpec.scala | Scala | apache-2.0 | 840 |
/*
* Copyright (c) 2016 Markus Mulkahainen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*/
import java.io.{PrintWriter, InputStreamReader, BufferedReader}
import java.net.{Socket, ServerSocket}
import rx.lang.scala.Observable
import rx.lang.scala.subjects.PublishSubject
/**
* Created by Markus Mulkahainen on 21.1.2016.
*/
class SmtpServer(val port: Int) {
val subject = PublishSubject[Socket]()
val serverSocket = new ServerSocket(port)
subject.subscribe(client => new Thread(new Runnable() {
def run() {
println(client.getLocalAddress().toString() + ":" + client.getLocalPort() + " connected")
val protocol = new SMTPProtocol()
val io = new ClientIO(client)
io.send(s"220 welcome")
while (!client.isClosed) {
val msg = protocol.parse(io receive)
msg.messageType match {
case protocol.HELO =>
io.send(s"250 Hello ${msg.value.replaceAll("\\r\\n", "")}, I am glad to meet you")
case protocol.MAILFROM =>
if (protocol.mailFrom(EmailAddress.fromString(msg.value))) io.send(s"250 Ok") else io.send("500 Syntax error, command unrecognized")
case protocol.RCPTTO =>
if (protocol.rcptTo(EmailAddress.fromString(msg.value))) io.send(s"250 Ok") else io.send("500 Syntax error, command unrecognized")
case protocol.DATA =>
if (protocol.data(msg.value)) io.send(s"354 End data with <CR><LF>.<CR><LF>") else io.send("500 Syntax error, command unrecognized")
case protocol.QUIT =>
io.send(s"221 Bye")
client.close()
case protocol.DEFAULT =>
val res = protocol.appendData(msg.value)
if (!res._1) {
io.send("500 Syntax error, command unrecognized")
} else {
if (res._2 == protocol.DataStatus.FINISHED) {
io.send("250 Ok")
InboxManager.put(res._3.get)
}
}
}
}
}
}).start(),
x => { //onError
System.exit(1)
})
println("SMTP server waiting for requests on port " + port)
val t = new Thread(new Runnable() {
def run() {
while (subject.hasObservers) {
subject.onNext(serverSocket.accept())
}
}
})
t.start()
class SMTPProtocol {
sealed abstract class MessageType(val dataSeparator: String)
case object HELO extends MessageType(" ")
case object MAILFROM extends MessageType(":")
case object RCPTTO extends MessageType(":")
case object DATA extends MessageType("\\n")
case object QUIT extends MessageType(" ")
case object DEFAULT extends MessageType("")
object DataStatus extends Enumeration {
type DataStatus = Value
val FINISHED, NOT_FINISHED = Value
}
import DataStatus._
val CRLF = Array[Byte](13.toByte, 10.toByte)
val CRLF_DOT_CRLF = (CRLF :+ 46.toByte) ++ CRLF
class SMTPMessage(val messageType: MessageType,val value: String = "")
var status: MessageType = HELO
private object EMAIL {
var mailfrom: EmailAddress = _
var mailTo: List[EmailAddress] = List()
var data: String = ""
}
val stringToType: scala.collection.immutable.Map[String, MessageType] = Map(
"HELO" -> HELO,
"MAIL FROM:" -> MAILFROM,
"RCPT TO:" -> RCPTTO,
"DATA" -> DATA,
"QUIT" -> QUIT
)
def parse(value: String): SMTPMessage = {
val messageType = stringToType.filterKeys(x => value.toUpperCase().startsWith(x)).values.headOption.getOrElse(DEFAULT)
return new SMTPMessage(messageType, value.substring(value.indexOf(messageType.dataSeparator) + messageType.dataSeparator.length()))
}
def mailFrom(email: EmailAddress): Boolean = {
if (status != HELO) { return false }
EMAIL.mailfrom = email
status = RCPTTO
return true
}
def rcptTo(email: EmailAddress): Boolean = {
if (status != RCPTTO) { return false }
EMAIL.mailTo = EMAIL.mailTo :+ email
return true
}
def data(value: String): Boolean = {
if (status != RCPTTO) { return false }
status = DATA
return true
}
def appendData(value: String): (Boolean, DataStatus, Option[Email]) = {
if (status != DATA) { return (false, DataStatus.NOT_FINISHED, None) }
EMAIL.data += value
if (EMAIL.data.getBytes().endsWith(CRLF_DOT_CRLF)) {
status = QUIT
return (true, DataStatus.FINISHED, Some(new Email(EMAIL.mailfrom, EMAIL.mailTo.toList, EMAIL.data)))
}
return (true, DataStatus.NOT_FINISHED, None)
}
}
}
| Klyyssi/ties323 | mail-protocols/smtp-server/src/main/scala/smtp-server.scala | Scala | mit | 5,122 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.template.similarproduct
import org.apache.predictionio.controller.IEngineFactory
import org.apache.predictionio.controller.Engine
case class Query(
items: List[String],
num: Int,
categories: Option[Set[String]],
whiteList: Option[Set[String]],
blackList: Option[Set[String]]
)
case class PredictedResult(
itemScores: Array[ItemScore]
) {
override def toString = itemScores.mkString(",")
}
case class ItemScore(
item: String,
score: Double
)
object SimilarProductEngine extends IEngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
classOf[Preparator],
Map("als" -> classOf[ALSAlgorithm],
"likealgo" -> classOf[LikeAlgorithm]), // ADDED
classOf[Serving])
}
}
| pferrel/PredictionIO | examples/scala-parallel-similarproduct/multi/src/main/scala/Engine.scala | Scala | apache-2.0 | 1,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.stream
import org.apache.flink.table.api.scala.stream.utils.StreamITCase
import org.apache.flink.table.api.scala._
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.utils.{CommonTestData, TestFilterableTableSource}
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
class TableSourceITCase extends StreamingMultipleProgramsTestBase {
@Test
def testCsvTableSourceSQL(): Unit = {
val csvTable = CommonTestData.getCsvTableSource
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
tEnv.registerTableSource("persons", csvTable)
tEnv.sql(
"SELECT id, `first`, `last`, score FROM persons WHERE id < 4 ")
.toDataStream[Row]
.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"1,Mike,Smith,12.3",
"2,Bob,Taylor,45.6",
"3,Sam,Miller,7.89")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testCsvTableSourceTableAPI(): Unit = {
val csvTable = CommonTestData.getCsvTableSource
StreamITCase.testResults = mutable.MutableList()
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
tEnv.registerTableSource("csvTable", csvTable)
tEnv.scan("csvTable")
.where('id > 4)
.select('last, 'score * 2)
.toDataStream[Row]
.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"Williams,69.0",
"Miller,13.56",
"Smith,180.2",
"Williams,4.68")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testCsvTableSourceWithFilterable(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val tableName = "MyTable"
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
tEnv.registerTableSource(tableName, new TestFilterableTableSource)
tEnv.scan(tableName)
.where("amount > 4 && price < 9")
.select("id, name")
.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"5,Record_5", "6,Record_6", "7,Record_7", "8,Record_8")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
| DieBauer/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/stream/TableSourceITCase.scala | Scala | apache-2.0 | 3,525 |
package org.nkvoll.javabin.routing
import akka.util.Timeout
import org.nkvoll.javabin.functionality.AdminFunctionality
import org.nkvoll.javabin.json.AdminProtocol
import org.nkvoll.javabin.models.User
import org.nkvoll.javabin.routing.directives.PermissionDirectives
import org.nkvoll.javabin.routing.helpers.JavabinMarshallingSupport
import scala.concurrent.ExecutionContext
import spray.routing._
trait AdminRouting extends HttpService with PermissionDirectives
with ElasticsearchRouting with ClusterRouting
with JavabinMarshallingSupport with AdminProtocol
with AdminFunctionality {
// format: OFF
def adminRoute(currentUser: User)(implicit t: Timeout, ec: ExecutionContext): Route = {
pathPrefix("shutdown") {
requirePermission("shutdown", currentUser) {
post {
path("_local") {
anyParam('delay.as[Int] ? 2) {
delay =>
complete(shutdownLocal(delay))
}
} ~
path("_kill_service") {
anyParam('delay.as[Int] ? 2) {
delay =>
complete(killService(delay))
}
}
}
}
} ~
pathPrefix("_elasticsearch") {
requirePermission("elasticsearch", currentUser) {
elasticsearchRoute
}
} ~
pathPrefix("cluster") {
requirePermission("cluster", currentUser) {
clusterRoute
}
}
}
// format: ON
} | nkvoll/javabin-rest-on-akka | src/main/scala/org/nkvoll/javabin/routing/AdminRouting.scala | Scala | mit | 1,436 |
package com.karasiq.scalajsbundler.compilers
import java.io.{StringReader, StringWriter}
import com.googlecode.htmlcompressor.compressor.YuiJavaScriptCompressor
import com.karasiq.scalajsbundler.ScalaJSBundler.PageTypedContent
import com.yahoo.platform.yui.compressor.JavaScriptCompressor
class JsYuiCompiler extends AssetCompiler {
override def compile(contents: Seq[PageTypedContent]): String = {
val reader = new StringReader(ConcatCompiler.compile(contents))
val writer = new StringWriter(1024)
val compressor = new JavaScriptCompressor(reader, new YuiJavaScriptCompressor.DefaultErrorReporter)
compressor.compress(writer, -1, true, false, false, false)
writer.toString
}
}
| Karasiq/sbt-scalajs-bundler | src/main/scala/com/karasiq/scalajsbundler/compilers/JsYuiCompiler.scala | Scala | mit | 705 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.memory
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ContentType, Uri}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import pureconfig.loadConfigOrThrow
import spray.json.{DefaultJsonProtocol, DeserializationException, JsObject, JsString, RootJsonFormat}
import org.apache.openwhisk.common.{Logging, LoggingMarkers, TransactionId}
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.StoreUtils._
import org.apache.openwhisk.core.database._
import org.apache.openwhisk.core.entity.Attachments.Attached
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.size._
import org.apache.openwhisk.http.Messages
import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
object MemoryArtifactStoreProvider extends ArtifactStoreProvider {
override def makeStore[D <: DocumentSerializer: ClassTag](useBatching: Boolean)(
implicit jsonFormat: RootJsonFormat[D],
docReader: DocumentReader,
actorSystem: ActorSystem,
logging: Logging,
materializer: ActorMaterializer): ArtifactStore[D] = {
makeArtifactStore(MemoryAttachmentStoreProvider.makeStore())
}
def makeArtifactStore[D <: DocumentSerializer: ClassTag](attachmentStore: AttachmentStore)(
implicit jsonFormat: RootJsonFormat[D],
docReader: DocumentReader,
actorSystem: ActorSystem,
logging: Logging,
materializer: ActorMaterializer): ArtifactStore[D] = {
val classTag = implicitly[ClassTag[D]]
val (dbName, handler, viewMapper) = handlerAndMapper(classTag)
val inliningConfig = loadConfigOrThrow[InliningConfig](ConfigKeys.db)
new MemoryArtifactStore(dbName, handler, viewMapper, inliningConfig, attachmentStore)
}
private def handlerAndMapper[D](entityType: ClassTag[D])(
implicit actorSystem: ActorSystem,
logging: Logging,
materializer: ActorMaterializer): (String, DocumentHandler, MemoryViewMapper) = {
entityType.runtimeClass match {
case x if x == classOf[WhiskEntity] =>
("whisks", WhisksHandler, WhisksViewMapper)
case x if x == classOf[WhiskActivation] =>
("activations", ActivationHandler, ActivationViewMapper)
case x if x == classOf[WhiskAuth] =>
("subjects", SubjectHandler, SubjectViewMapper)
}
}
}
/**
* In-memory ArtifactStore implementation to enable test setups without requiring a running CouchDB instance
* It also serves as a canonical example of how an ArtifactStore can implemented with all the support for CRUD
* operations and Queries etc
*/
class MemoryArtifactStore[DocumentAbstraction <: DocumentSerializer](dbName: String,
documentHandler: DocumentHandler,
viewMapper: MemoryViewMapper,
val inliningConfig: InliningConfig,
val attachmentStore: AttachmentStore)(
implicit system: ActorSystem,
val logging: Logging,
jsonFormat: RootJsonFormat[DocumentAbstraction],
val materializer: ActorMaterializer,
docReader: DocumentReader)
extends ArtifactStore[DocumentAbstraction]
with DefaultJsonProtocol
with DocumentProvider
with AttachmentSupport[DocumentAbstraction] {
override protected[core] implicit val executionContext: ExecutionContext = system.dispatcher
private val artifacts = new TrieMap[String, Artifact]
private val _id = "_id"
private val _rev = "_rev"
val attachmentScheme: String = attachmentStore.scheme
override protected[database] def put(d: DocumentAbstraction)(implicit transid: TransactionId): Future[DocInfo] = {
val asJson = d.toDocumentRecord
val id = asJson.fields(_id).convertTo[String].trim
require(!id.isEmpty, "document id must be defined")
val rev: Int = getRevision(asJson)
val docinfoStr = s"id: $id, rev: $rev"
val start = transid.started(this, LoggingMarkers.DATABASE_SAVE, s"[PUT] '$dbName' saving document: '$docinfoStr'")
val existing = Artifact(id, rev, asJson)
val updated = existing.incrementRev()
val t = Try[DocInfo] {
if (rev == 0) {
artifacts.putIfAbsent(id, updated) match {
case Some(_) => throw DocumentConflictException("conflict on 'put'")
case None => updated.docInfo
}
} else if (artifacts.replace(id, existing, updated)) {
updated.docInfo
} else {
throw DocumentConflictException("conflict on 'put'")
}
}
val f = Future.fromTry(t)
f.onComplete {
case Success(_) => transid.finished(this, start, s"[PUT] '$dbName' completed document: '$docinfoStr'")
case Failure(_: DocumentConflictException) =>
transid.finished(this, start, s"[PUT] '$dbName', document: '$docinfoStr'; conflict.")
case Failure(_) =>
}
reportFailure(f, start, failure => s"[PUT] '$dbName' internal error, failure: '${failure.getMessage}'")
}
override protected[database] def del(doc: DocInfo)(implicit transid: TransactionId): Future[Boolean] = {
checkDocHasRevision(doc)
val start = transid.started(this, LoggingMarkers.DATABASE_DELETE, s"[DEL] '$dbName' deleting document: '$doc'")
val t = Try[Boolean] {
if (artifacts.remove(doc.id.id, Artifact(doc))) {
transid.finished(this, start, s"[DEL] '$dbName' completed document: '$doc'")
true
} else if (artifacts.contains(doc.id.id)) {
//Indicates that document exist but revision does not match
transid.finished(this, start, s"[DEL] '$dbName', document: '$doc'; conflict.")
throw DocumentConflictException("conflict on 'delete'")
} else {
transid.finished(this, start, s"[DEL] '$dbName', document: '$doc'; not found.")
// for compatibility
throw NoDocumentException("not found on 'delete'")
}
}
val f = Future.fromTry(t)
reportFailure(f, start, failure => s"[DEL] '$dbName' internal error, doc: '$doc', failure: '${failure.getMessage}'")
}
override protected[database] def get[A <: DocumentAbstraction](doc: DocInfo,
attachmentHandler: Option[(A, Attached) => A] = None)(
implicit transid: TransactionId,
ma: Manifest[A]): Future[A] = {
val start = transid.started(this, LoggingMarkers.DATABASE_GET, s"[GET] '$dbName' finding document: '$doc'")
require(doc != null, "doc undefined")
val t = Try[A] {
artifacts.get(doc.id.id) match {
case Some(a) =>
//Revision matching is enforced in deserilization logic
transid.finished(this, start, s"[GET] '$dbName' completed: found document '$doc'")
deserialize[A, DocumentAbstraction](doc, a.doc)
case _ =>
transid.finished(this, start, s"[GET] '$dbName', document: '$doc'; not found.")
// for compatibility
throw NoDocumentException("not found on 'get'")
}
}
val f = Future.fromTry(t).recoverWith {
case _: DeserializationException => throw DocumentUnreadable(Messages.corruptedEntity)
}
reportFailure(f, start, failure => s"[GET] '$dbName' internal error, doc: '$doc', failure: '${failure.getMessage}'")
}
override protected[core] def query(table: String,
startKey: List[Any],
endKey: List[Any],
skip: Int,
limit: Int,
includeDocs: Boolean,
descending: Boolean,
reduce: Boolean,
stale: StaleParameter)(implicit transid: TransactionId): Future[List[JsObject]] = {
require(!(reduce && includeDocs), "reduce and includeDocs cannot both be true")
require(!reduce, "Reduce scenario not supported") //TODO Investigate reduce
require(skip >= 0, "skip should be non negative")
require(limit >= 0, "limit should be non negative")
documentHandler.checkIfTableSupported(table)
val Array(ddoc, viewName) = table.split("/")
val start = transid.started(this, LoggingMarkers.DATABASE_QUERY, s"[QUERY] '$dbName' searching '$table")
val s = artifacts.toStream
.map(_._2)
.filter(a => viewMapper.filter(ddoc, viewName, startKey, endKey, a.doc, a.computed))
.map(_.doc)
.toList
val sorted = viewMapper.sort(ddoc, viewName, descending, s)
val out = if (limit > 0) sorted.slice(skip, skip + limit) else sorted.drop(skip)
val realIncludeDocs = includeDocs | documentHandler.shouldAlwaysIncludeDocs(ddoc, viewName)
val r = out.map { js =>
documentHandler.transformViewResult(
ddoc,
viewName,
startKey,
endKey,
realIncludeDocs,
js,
MemoryArtifactStore.this)
}.toList
val f = Future.sequence(r).map(_.flatten)
f.foreach(_ => transid.finished(this, start, s"[QUERY] '$dbName' completed: matched ${out.size}"))
reportFailure(f, start, failure => s"[QUERY] '$dbName' internal error, failure: '${failure.getMessage}'")
}
override protected[core] def count(table: String,
startKey: List[Any],
endKey: List[Any],
skip: Int,
stale: StaleParameter)(implicit transid: TransactionId): Future[Long] = {
val f =
query(table, startKey, endKey, skip, limit = 0, includeDocs = false, descending = true, reduce = false, stale)
f.map(_.size)
}
override protected[core] def readAttachment[T](doc: DocInfo, attached: Attached, sink: Sink[ByteString, Future[T]])(
implicit transid: TransactionId): Future[T] = {
val name = attached.attachmentName
val start = transid.started(
this,
LoggingMarkers.DATABASE_ATT_GET,
s"[ATT_GET] '$dbName' finding attachment '$name' of document '$doc'")
val attachmentUri = Uri(name)
if (isInlined(attachmentUri)) {
memorySource(attachmentUri).runWith(sink)
} else {
val storedName = attachmentUri.path.toString()
val f = attachmentStore.readAttachment(doc.id, storedName, sink)
f.foreach(_ =>
transid.finished(this, start, s"[ATT_GET] '$dbName' completed: found attachment '$name' of document '$doc'"))
f
}
}
override protected[core] def deleteAttachments[T](doc: DocInfo)(implicit transid: TransactionId): Future[Boolean] = {
attachmentStore.deleteAttachments(doc.id)
}
override protected[database] def putAndAttach[A <: DocumentAbstraction](
d: A,
update: (A, Attached) => A,
contentType: ContentType,
docStream: Source[ByteString, _],
oldAttachment: Option[Attached])(implicit transid: TransactionId): Future[(DocInfo, Attached)] = {
attachToExternalStore(d, update, contentType, docStream, oldAttachment, attachmentStore)
}
override def shutdown(): Unit = {
artifacts.clear()
attachmentStore.shutdown()
}
override protected[database] def get(id: DocId)(implicit transid: TransactionId): Future[Option[JsObject]] = {
val start = transid.started(this, LoggingMarkers.DATABASE_GET, s"[GET] '$dbName' finding document: '$id'")
val t = Try {
artifacts.get(id.id) match {
case Some(a) =>
transid.finished(this, start, s"[GET] '$dbName' completed: found document '$id'")
Some(a.doc)
case _ =>
transid.finished(this, start, s"[GET] '$dbName', document: '$id'; not found.")
None
}
}
val f = Future.fromTry(t)
reportFailure(f, start, failure => s"[GET] '$dbName' internal error, doc: '$id', failure: '${failure.getMessage}'")
}
private def getRevision(asJson: JsObject) = {
asJson.fields.get(_rev) match {
case Some(JsString(r)) => r.toInt
case _ => 0
}
}
//Use curried case class to allow equals support only for id and rev
//This allows us to implement atomic replace and remove which check
//for id,rev equality only
private case class Artifact(id: String, rev: Int)(val doc: JsObject, val computed: JsObject) {
def incrementRev(): Artifact = {
val (newRev, updatedDoc) = incrementAndGet()
copy(rev = newRev)(updatedDoc, computed) //With Couch attachments are lost post update
}
def docInfo = DocInfo(DocId(id), DocRevision(rev.toString))
private def incrementAndGet() = {
val newRev = rev + 1
val updatedDoc = JsObject(doc.fields + (_rev -> JsString(newRev.toString)))
(newRev, updatedDoc)
}
}
private object Artifact {
def apply(id: String, rev: Int, doc: JsObject): Artifact = {
Artifact(id, rev)(doc, documentHandler.computedFields(doc))
}
def apply(info: DocInfo): Artifact = {
Artifact(info.id.id, info.rev.rev.toInt)(JsObject.empty, JsObject.empty)
}
}
}
| starpit/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/database/memory/MemoryArtifactStore.scala | Scala | apache-2.0 | 14,072 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Linked}
case class CP246(value: Option[Int]) extends CtBoxIdentifier(name = "Allowances") with CtOptionalInteger
object CP246 extends Linked[CP93, CP246] {
override def apply(source: CP93): CP246 = CP246(source.value)
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP246.scala | Scala | apache-2.0 | 930 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package exapmles.todolist
package peristence
import examples.todolist.persistence._
import freestyle.tagless.module
/**
* Module containing all the algebras declared in this layer.
*/
@module
trait Persistence[F[_]] {
val appRepository: AppRepository[F]
val todoItemRepository: TodoItemRepository[F]
val todoListRepository: TodoListRepository[F]
val tagRepository: TagRepository[F]
}
| frees-io/freestyle | modules/examples/todolist-http-http4s/src/main/scala/todo/peristence/Persistence.scala | Scala | apache-2.0 | 1,025 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package controllers
import features.{ApplicationFeatures, KMPreferredReplicaElectionFeature, KMScheduleLeaderElectionFeature}
import kafka.manager.ApiError
import kafka.manager.features.ClusterFeatures
import models.FollowLink
import models.form.{PreferredReplicaElectionOperation, RunElection, UnknownPREO}
import models.navigation.Menus
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.validation.{Constraint, Invalid, Valid}
import play.api.i18n.I18nSupport
import play.api.libs.json.{JsObject, Json}
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}
import scalaz.-\\/
/**
* @author hiral
*/
class PreferredReplicaElection (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
(implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
private[this] implicit val cf: ClusterFeatures = ClusterFeatures.default
val validateOperation : Constraint[String] = Constraint("validate operation value") {
case "run" => Valid
case any: Any => Invalid(s"Invalid operation value: $any")
}
val preferredReplicaElectionForm = Form(
mapping(
"operation" -> nonEmptyText.verifying(validateOperation)
)(PreferredReplicaElectionOperation.apply)(PreferredReplicaElectionOperation.unapply)
)
def preferredReplicaElection(c: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getPreferredLeaderElection(c).map { errorOrStatus =>
Ok(views.html.preferredReplicaElection(c,errorOrStatus,preferredReplicaElectionForm)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
def handleRunElection(c: String) = Action.async { implicit request: Request[AnyContent] =>
featureGate(KMPreferredReplicaElectionFeature) {
preferredReplicaElectionForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.preferredReplicaElection(c, -\\/(ApiError("Unknown operation!")), formWithErrors))),
op => op match {
case RunElection =>
val errorOrSuccessFuture = kafkaManager.getTopicList(c).flatMap { errorOrTopicList =>
errorOrTopicList.fold({ e =>
Future.successful(-\\/(e))
}, { topicList =>
kafkaManager.runPreferredLeaderElection(c, topicList.list.toSet)
})
}
errorOrSuccessFuture.map { errorOrSuccess =>
Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(c, "Preferred Replica Election", "", menus.clusterMenus(c)),
models.navigation.BreadCrumbs.withViewAndCluster("Run Election", c),
errorOrSuccess,
"Run Election",
FollowLink("Go to preferred replica election.", routes.PreferredReplicaElection.preferredReplicaElection(c).toString()),
FollowLink("Try again.", routes.PreferredReplicaElection.preferredReplicaElection(c).toString())
)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
case UnknownPREO(opString) =>
Future.successful(Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(c, "Preferred Replica Election", "", menus.clusterMenus(c)),
models.navigation.BreadCrumbs.withNamedViewAndCluster("Preferred Replica Election", c, "Unknown Operation"),
-\\/(ApiError(s"Unknown operation $opString")),
"Unknown Preferred Replica Election Operation",
FollowLink("Back to preferred replica election.", routes.PreferredReplicaElection.preferredReplicaElection(c).toString()),
FollowLink("Back to preferred replica election.", routes.PreferredReplicaElection.preferredReplicaElection(c).toString())
)).withHeaders("X-Frame-Options" -> "SAMEORIGIN"))
}
)
}
}
def handleScheduledIntervalAPI(cluster: String): Action[AnyContent] = Action.async { implicit request =>
featureGate(KMScheduleLeaderElectionFeature) {
val interval = kafkaManager.pleCancellable.get(cluster).map(_._2).getOrElse(0)
Future(Ok(Json.obj("scheduledInterval" -> interval))
.withHeaders("X-Frame-Options" -> "SAMEORIGIN"))
}
}
def scheduleRunElection(c: String) = Action.async { implicit request =>
def getOrZero : (Int, String) = if(kafkaManager.pleCancellable.contains(c)){
(kafkaManager.pleCancellable(c)._2, "Scheduler is running")
}
else {
(0, "Scheduler is not running")
}
val (timePeriod, status_string) = getOrZero
kafkaManager.getTopicList(c).map { errorOrStatus =>
Ok(views.html.scheduleLeaderElection(c,errorOrStatus, status_string, timePeriod)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
def handleScheduleRunElection(c: String) = Action.async { implicit request =>
def setOrExtract : (Int, String) = if(!kafkaManager.pleCancellable.contains(c)){
kafkaManager.getTopicList(c).flatMap { errorOrTopicList =>
errorOrTopicList.fold({ e =>
Future.successful(-\\/(e))
}, { topicList =>
kafkaManager.schedulePreferredLeaderElection(c, topicList.list.toSet, request.body.asFormUrlEncoded.get("timePeriod")(0).toInt)
})
}
(request.body.asFormUrlEncoded.get("timePeriod")(0).toInt, "Scheduler started")
}
else{
(kafkaManager.pleCancellable(c)._2, "Scheduler already scheduled")
}
val (timeIntervalMinutes, status_string) = setOrExtract
kafkaManager.getTopicList(c).map { errorOrStatus =>
Ok(views.html.scheduleLeaderElection(c, errorOrStatus, status_string, timeIntervalMinutes)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
def cancelScheduleRunElection(c: String) = Action.async { implicit request =>
val status_string: String = if(kafkaManager.pleCancellable.contains(c)){
kafkaManager.cancelPreferredLeaderElection(c)
"Scheduler stopped"
}
else "Scheduler already not running"
kafkaManager.getTopicList(c).map { errorOrStatus =>
Ok(views.html.scheduleLeaderElection(c,errorOrStatus,status_string, 0)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
def handleScheduleRunElectionAPI(c: String) = Action.async { implicit request =>
// ToDo: Refactor out common part from handleScheduleRunElection
featureGate(KMScheduleLeaderElectionFeature) {
def setOrExtract : (Int, String) = if(!kafkaManager.pleCancellable.contains(c)){
kafkaManager.getTopicList(c).flatMap { errorOrTopicList =>
errorOrTopicList.fold({ e =>
Future.successful(-\\/(e))
}, { topicList =>
kafkaManager.schedulePreferredLeaderElection(c, topicList.list.toSet, request.body.asInstanceOf[AnyContentAsJson].json.asInstanceOf[JsObject].values.toList(0).toString().toInt)
})
}
(request.body.asInstanceOf[AnyContentAsJson].json.asInstanceOf[JsObject].values.toList(0).toString().toInt, "Scheduler started")
}
else{
(kafkaManager.pleCancellable(c)._2, "Scheduler already scheduled")
}
val (timePeriod, status_string) = setOrExtract
Future(
Ok(Json.obj(
"scheduledInterval" -> timePeriod, "message" -> status_string
)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
)
}
}
def cancelScheduleRunElectionAPI(c: String) = Action.async { implicit request =>
// ToDo: Refactor out common part from cancelScheduleRunElection
featureGate(KMScheduleLeaderElectionFeature) {
val status_string: String = if(kafkaManager.pleCancellable.contains(c)){
kafkaManager.cancelPreferredLeaderElection(c)
"Scheduler stopped"
}
else "Scheduler already not running"
Future(Ok(Json.obj("scheduledInterval" -> 0, "message" -> status_string)).withHeaders("X-Frame-Options" -> "SAMEORIGIN"))
}
}
}
| yahoo/kafka-manager | app/controllers/PreferredReplicaElection.scala | Scala | apache-2.0 | 8,167 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package devsmodel
import java.time.Duration
import akka.actor._
import dmfmessages.DMFSimMessages._
import simutils._
import akka.event.{Logging, LoggingAdapter}
import akka.serialization.Serialization
import simutils.UniqueNames
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Put
import akka.event.slf4j.SLF4JLogging
import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
/**
* This object is a faithful representation of the model coordinator as described by as described by Chow
* and Ziegler in <a href="http://dl.acm.org/citation.cfm?id=194336">Parallel DEVS: a parallel, hierarchical, modular, modeling formalism</a>
* A PostScript version of the paper is available <a href="http://www.cs.mcgill.ca/~hv/articles/DiscreteEvent/DEVS/rev-devs.ps.gz">here</a>.
* A ModelCoordinator coordinates the actions of multiple subordinate [[ModelSimulator]]s or other model coordinators.
* To a parent ModelCoordinator, the behavior of a [[ModelSimulator]] and ModelCoordinator are identical. This
* enables hierarchical and modular construction of DEVS simulations.
*
* @param initialTime The initial start time of the simulation, usually 0
* @param randomActor The random actor to set random number seeds
* @param simLogger The logger for the simulation
*/
abstract class ModelCoordinator(val initialTime: Duration, var randomActor: ActorRef, var simLogger: ActorRef, val registerCluster: Boolean = false)
extends Actor with ActorLogging with UniqueNames with MessageConverter {
if (registerCluster) {
val mediator = DistributedPubSub(context.system).mediator
// register to the path
mediator ! Put(self)
}
override val supervisorStrategy =
OneForOneStrategy() {
case e: ActorInitializationException => {
SupervisorStrategy.Escalate
}
}
/**
* A map containing all of the subordinate models and the scheduled time of their next internal state transition
*/
protected val nextMap: mutable.HashMap[ActorRef, Duration] = new mutable.HashMap[ActorRef, Duration]()
/**
* Convenience method to get a list of subordinate [[ModelSimulator]]s or [[ModelCoordinator]]s
*
* @return A set of subordinate models
*/
def subordinates() = nextMap.keySet
/**
* The current simulation time
*/
protected var currentTime = initialTime
/**
* The parent coordinator for this coordinator. It can be different from the akka parent,
* which manages actor supervision.
*/
protected var parentCoordinator: ActorRef = _
/**
* Bag of external event messages to be executed
*/
protected var externalEvents: List[ExternalEvent[_ <: java.io.Serializable]] = List()
/**
* An index incremented each time a unique event index is generated by a call to [[nextEventIndex]]
*/
protected var runningEventIndex: Long = 0
/**
* Call this method from the [[handleExternalEvent()]] method in order to get a unique index to assign to an [[dmfmessages.DMFSimMessages.EventMessage]]
*
* @return A unique event index to be assigned to an event
*/
def nextEventIndex: Long = {
runningEventIndex = runningEventIndex + 1
log.debug("Assigning event index " + (runningEventIndex - 1))
runningEventIndex - 1
}
/**
* Event messages cannot be send directly to subordinate [[ModelSimulator]]s. This utility method must be
* used so that the [[awaitingBagEvents]] and [[influences]] lists are updated properly.
*
* @param externalEvent The event to send
* @param receiveActor The subordinate [[ModelCoordinator]] or [[ModelSimulator]] to receive the event
*/
def sendEventMessage(externalEvent: ExternalEvent[_ <: java.io.Serializable], receiveActor: ActorRef): Unit = {
val nextIndex = nextEventIndex
val eventMessage = externalEvent.eventData match {
case g: com.google.protobuf.Message =>
val devsEventData = ModelSimulator.buildDEVSEventData(DEVSEventData.EventType.EXTERNAL, externalEvent.executionTime, g)
ModelSimulator.buildEventMessage(devsEventData, externalEvent.executionTime, nextIndex)
case s: java.io.Serializable =>
EventMessageCase(externalEvent, externalEvent.executionTime, nextIndex)
}
awaitingBagEvents = AwaitingEventBagging(receiveActor, nextIndex) :: awaitingBagEvents
influences = receiveActor :: influences
val t = externalEvent.executionTime
log.debug(t + "Sending message to " + receiveActor + ": " + eventMessage)
log.debug(t + "Added message to awaitingBagEvents which now has size " + awaitingBagEvents.size)
log.debug(t + "Adding " + receiveActor + " to influences")
receiveActor ! eventMessage
}
/**
* A utility class used by the [[getImminentSet]] method to store the list of imminents and the minimum internal state transition time among them
*
* @param imminents A list of imminent subordinate models
* @param nextTime The minimun internal state transition time from the subordinate models
*/
case class Imminents(imminents: List[ActorRef], nextTime: Duration)
/**
* A utility class used to by the [[awaitingBagEvents]] list to store subordinate models to which an [[EventMessage]] has
* been sent and a unique index assigned to each external event
*
* @param actor Subordinate model to which [[EventMessage]] has been sent
* @param eventIndex A unique index for this coordination
*/
case class AwaitingEventBagging(actor: ActorRef, eventIndex: Long)
/**
* The list of subordinate models scheduled at the same time for the next internal state transition
*/
protected var imminents: List[ActorRef] = List()
/**
* A list of subordinate models to which an [[EventMessage]] has been sent
*/
protected var influences: List[ActorRef] = List()
/**
* A list of subordinate models for which we await completion of generation output
*/
protected var awaitingOutputDone: List[ActorRef] = List()
/**
* A list of subordinate models for which we await completion of bagging an [[EventMessage]]
*/
protected var awaitingBagEvents: List[AwaitingEventBagging] = List()
/**
* A list of subordinate models to which a [[GetNextTime]] message is sent during initialization
*/
protected var awaitingNextTime: List[ActorRef] = List()
/**
* A list of subordiante models to which [[ProcessEventMessages]] has been sent in the [[processingOutput]] phase
*/
protected var awaitingMessageProcessing: List[ActorRef] = List()
/**
* A list of subordinate models to which a [[Terminate]] message has been send during simulation termination
*/
protected var awaitingTermination: List[ActorRef] = List()
/**
* A list of submordinate models to which an [[ExecuteTransition]] message is sent in the [[checkDonePassingMessages]]
* method before going to the [[processingTransitions]] phase. This set is composed of a combination of
* models in the [[imminents]] list and the [[influences]] list. This set is the set of subordinate models
* executing simultaneous and parallel state transitions.
*/
protected var synchronizeSet: List[ActorRef] = List()
/**
* Utility function to determine the time of the next scheduled internal state transition from the subordinate models
*
* @return The time of the next scheduled internal state transition
*/
def getNextTime = nextMap.values.min
/**
* Utility function to determine the time of the next scheduled internal state transition from the subordinate models
* and the list of models scheduled to make a transition at that time
*
* @return [[Imminents]] data structrue containing the list of imminent models and their scheduled transition time
*/
def getImminentSet: Imminents = {
val t = getNextTime
Imminents(nextMap.filter { case(actor, time) =>
time.compareTo(t) == 0
}.keySet.toList, t)
}
/**
* This function can be overridden by subclasses in order to perform shutdown, logging, and data collection at the
* end of the simulation.
*/
def preTerminate() = {}
/**
* Function called after receiving an [[OutputDone]] or [[BagEventDone]] message. If both the [[awaitingOutputDone]] and
* [[awaitingBagEvents]] lists are empty, this method will send an [[OutputDone]] message to the parent coordinator
* and transition this coordinator from the [[processingOutput]] state to the [[passingMessages]] state
*
* @param t The time of the check
*/
private def checkDoneProcessingOutput(t: Duration): Unit = {
if(awaitingOutputDone.isEmpty && awaitingBagEvents.isEmpty) {
parentCoordinator ! ModelSimulator.buildOutputDone(t)
log.debug(t + " Done processing putput.")
} else {
log.debug(t + " awaitingOutputDone still has " + awaitingOutputDone.size + " members.")
log.debug(t + " awaitingBagEvents still has " + awaitingBagEvents.size + " members.")
}
}
/**
* If all of the [[BagEventDone]] responses have been received in the [[passingMessages]] state, the [[awaitingBagEvents]]
* list will be empty, and it is time to execute a state transition for all subordiante models in the synchronize set,
* which is the union of the [[imminents]] and [[influences]] list without duplicates. Send an [[ExecuteTransition]]
* message to each model in the synchronizeSet and transition to the [[processingTransitions]] state.
*/
protected def checkDonePassingMessages(t: Duration): Unit = {
if (awaitingBagEvents.isEmpty) {
synchronizeSet = (influences ::: imminents).distinct
if (synchronizeSet.isEmpty) {
parentCoordinator ! ModelSimulator.buildStateTransitionDone(t, getNextTime)
log.debug("Become: Synchronize set is empty, transitioning from passingMessages to processingOutput")
context.become(processingOutput)
} else {
log.debug(t + "sending ExeucteTranisiton to synchronize set: " + synchronizeSet)
synchronizeSet.foreach(s => s ! ModelSimulator.buildExecuteTransition(currentTime))
imminents = List()
influences = List()
log.debug("Become: awaitingBagEvents is empty. Transitioning from passingMessages to processingTransitions phase.")
context.become(processingTransitions)
}
}
else {
log.debug("awaitingBagEvents still has " + awaitingBagEvents.size + " members.")
}
}
/**
* This is a very important abstract method that must be overridden by any subclasses of ModelCoordinator. This function
* will route output messages from subordinate models to the appropriate destinations, whether they be other subordinate models
* or to the parent coordinator, transitioning the outputs through a translation function as necessary. In the words of Ziegler
* and Chow's paper, output is received from a specific subordinate model, i. For all j models in the influence set Ii,
* first send the output through an i to j translation Zij before sending the message to j.
*
* If an [[EventMessage]] is sent to a model, send it using the [[sendEventMessage()]] method.
*
* Furthermore, if self is also in the influence set Ii, the message must also be transmitted upward to the parent
* coordinator after going through an i to self translation Zi,self.
*
* @param eventSender The subordinate model from which the output is recieved
* @param output The output message received
*/
def handleOutputEvent(eventSender: ActorRef, output: OutputEvent[_ <: java.io.Serializable])
/**
* This method may be overridden by subclasses of ModelCoordinator to enable more complex routing. By default, it invokes [[handleOutputEvent]].
*
*
* @param eventSender The subordinate model from which the output is recieved
* @param output The output message received
*/
def routeOutputEvent(eventSender: ActorRef, output: OutputEvent[_ <: java.io.Serializable]) {
handleOutputEvent( eventSender, output )
}
/**
* This is another important abstract method that must be overridden by subclasses of ModelCoordinator. This function
* will route any external event messages to the appropriate subordinate models. It will process those events
* through a translation function as necessary.
*
* If an [[EventMessage]] is sent to a model, send it using the [[sendEventMessage()]] method.
*
* @param externalEvent The external event to be handled
*/
def handleExternalEvent(externalEvent: ExternalEvent[_ <: java.io.Serializable])
/**
* This method may be overridden by subclasses of ModelCoordinator to enable more complex routing. By default, it invokes [[handleExternalEvent]].
*
*
* @param externalEvent The external event to be routed
*/
def routeExternalEvent(externalEvent: ExternalEvent[_ <: java.io.Serializable]) {
handleExternalEvent( externalEvent )
}
/**
* The ModelCoordinator is in this state only during initialization. Upon receipt of a [[GetNextTime]] message, send a
* [[GetNextTime]] message to all subordinate models and await a [[NextTime]] result from each. Upon receipt of a
* [[NextTime]] message, store the result in the [[nextMap]] and remove the sender from the [[awaitingNextTime]] list.
* Once all results are received, send a [[NextTime]] message to the parent coordinator with the result of [[getNextTime]].
* Then transition to the [[processingOutput]] state.
*
* @return
*/
def receive = {
case gnt: GetNextTime =>
parentCoordinator = sender()
randomActor = context.system.asInstanceOf[ExtendedActorSystem].provider.resolveActorRef(gnt.getSerializedRandomActor)
simLogger = context.system.asInstanceOf[ExtendedActorSystem].provider.resolveActorRef(gnt.getSerializedSimLogger)
awaitingNextTime = nextMap.keySet.toList
nextMap.keySet.foreach(_ ! gnt)
log.debug("Initializing by getting next time from subordinate models")
case nt: NextTime =>
val t: Duration = Duration.parse(nt.getTimeString)
nextMap.put(sender(), t)
awaitingNextTime = awaitingNextTime.filterNot(_ == sender())
log.debug(t + " Received NextTime from " + sender().path.name + ". Awaiting " + awaitingNextTime.size + " more.")
if (awaitingNextTime.isEmpty) {
parentCoordinator ! ModelSimulator.buildNextTime(getNextTime)
log.debug("Become: " + t + " Received all NextTime messages. Transitioning from receive to processingOutput.")
context.become(processingOutput)
}
case m: Any =>
handleOtherMessages(m)
}
def generateOutput(g: GenerateOutput): Unit = {
val t = Duration.parse(g.getTimeString)
val imminentSet = getImminentSet
if (t.compareTo(imminentSet.nextTime) == 0) {
currentTime = t
imminents = imminentSet.imminents
awaitingOutputDone = imminents
log.debug(t + "Getting output from imminent set:")
imminents.foreach { i =>
log.debug(i.path.name + " is in imminent set.")
i ! ModelSimulator.buildGenerateOutput(t)
}
}
else {
throw new SynchronizationException(t + " in processing output, time in GenerateOutput " + t + " does not match next time: " + getNextTime)
}
}
/**
* This is the behavior of the ModelCoordinator when it is processing output from subordinate models.
*
* Upon receipt of a [[GenerateOutput]] message, initialize the [[awaitingOutputDone]] list with the imminent set
* and send a [[GenerateOutput]] message to each imminent subordinate model.
*
* Upon receipt of an [[OutputDone]] message, remove the sender from the [[awaitingOutputDone]] list and call
* [[checkDoneProcessingOutput()]] to see if all output processing is done.
*
* Upon receipt of an [[OutputMessage]], pass the message and sender to the [[handleOutputEvent()]] method in order
* to route the message to the appropriate models in the set of models influenced by that sender.
*
* Upon receipt of a [[BagEventDone]] message, remove the corresponing entry from the [[awaitingBagEvents]] list and
* call [[checkDoneProcessingOutput()]] to see if all output processing is done.
*
* Upon receipt of an [[EventMessage]], add the message to the [[externalEvents]] list and return a [[BagEventDone]] message
*
*/
def processingOutput: Receive = {
case g: GenerateOutput =>
generateOutput(g)
case od: OutputDone =>
val t: Duration = Duration.parse(od.getTimeString)
if (t.compareTo(currentTime) == 0) {
awaitingOutputDone = awaitingOutputDone.filterNot(i => i == sender())
log.debug(t + " Received output done from " + sender().path.name)
checkDoneProcessingOutput(t)
}
else {
throw new SynchronizationException(t + " in processing output, time in OutputDone " + t + " does not match current time: " + currentTime)
}
case om: OutputMessage =>
val outputTime = Duration.parse(om.getTimeString)
if (outputTime.compareTo(currentTime) == 0) {
val outputData = convertMessage(om.getOutput, om.getJavaClass) match {case s: java.io.Serializable => s}
log.debug(outputTime + " Handling output event " + outputData + " from " + sender().path.name)
routeOutputEvent(sender(), OutputEvent(outputTime, outputData))
}
else {
throw new SynchronizationException(outputTime + " in processing output, time in OutputMessage " + outputTime + " does not match current time: " + currentTime)
}
case OutputMessageCase(output: java.io.Serializable, t) =>
if (t.compareTo(currentTime) == 0) {
log.debug(t + " Handling output event " + output + " from " + sender().path.name)
routeOutputEvent(sender(), OutputEvent(t, output))
}
else {
throw new SynchronizationException(t + " in processing output, time in OutputMessage " + t + " does not match current time: " + currentTime)
}
case bed: BagEventDone =>
val t = Duration.parse(bed.getTimeString)
val eventIndex = bed.getEventIndex
awaitingBagEvents = awaitingBagEvents.filterNot(a => a.actor == sender() && a.eventIndex == eventIndex)
log.debug(t + "Received BagEventDone with index " + eventIndex + " from " + sender().path.name)
checkDoneProcessingOutput(t)
case em: EventMessage =>
val t: Duration = Duration.parse(em.getTimeString)
val executionTime: Duration = Duration.parse(em.getEvent.getExecutionTimeString)
val externalEvent = ExternalEvent(executionTime, convertMessage(em.getEvent.getEventData, em.getEvent.getJavaClass) match {case s: java.io.Serializable => s})
externalEvents = externalEvent :: externalEvents
log.debug(t + " Bagging external event " + externalEvent + " with index " + em.getEventIndex + " from " + sender().path.name)
sender() ! ModelSimulator.buildBagEventDone(t, em.getEventIndex)
case EventMessageCase(event, t, eventIndex) =>
val externalEvent = event
externalEvents = externalEvent :: externalEvents
log.debug(t + " Bagging external event " + externalEvent + " with index " + eventIndex + " from " + sender().path.name)
sender() ! ModelSimulator.buildBagEventDone(t, eventIndex)
case p: ProcessEventMessages =>
subordinates().foreach(_ ! p)
awaitingMessageProcessing = subordinates().toList
log.debug(p.getTimeString + " Received ProcessEventMessages from " + sender + " Sending to " + awaitingMessageProcessing.size + " subordinates.")
if(awaitingMessageProcessing.isEmpty) {
throw new SynchronizationException("Illegal ModelCoordinator with no subordinates.")
}
case rpm: ReadyToProcessMessages =>
val t: Duration = Duration.parse(rpm.getTimeString)
awaitingMessageProcessing = awaitingMessageProcessing.filterNot(_ == sender)
log.debug(t + " Received ReadyToProcessMessages from " + sender + ". awaitingMessageProcessing has " + awaitingMessageProcessing.size + " members.")
if (awaitingMessageProcessing.isEmpty) {
parentCoordinator ! rpm
log.debug(t + "Done processing messages.")
if (imminents.nonEmpty || externalEvents.nonEmpty) {
log.debug("Become: Transitioning from processingOutput to passingMessages.")
context.become(passingMessages)
} else {
log.debug("Become: NOT Transitioning from processingOutput")
}
}
case t: Terminate =>
preTerminate()
awaitingTermination = subordinates().toList
subordinates().foreach(_ ! t)
case td: TerminateDone =>
log.debug(sender().path.name + " completed termination")
awaitingTermination = awaitingTermination.filterNot(_ == sender())
if (awaitingTermination.isEmpty) {
log.debug("All subordinates completed termination.")
parentCoordinator ! td
}
case et: ExecuteTransition =>
if( externalEvents.nonEmpty ) {
log.debug("Become: Transitioning from processingOutput to passingMessages due to external events.")
context.become(passingMessages)
self ! et
} else {
throw new SynchronizationException("In processingOutput, got ExecuteTransition with no external events.")
}
case m: Any =>
handleOtherMessages(m)
}
/**
* In this state, the coordinator empties its message bag, translates those messages as required and sends them to
* subordinate models.
*
* Upon receipt of an [[ExecuteTransition]] message, empty the [[externalEvents]] bag and call [[handleExternalEvent()]]
* for each event.
*
* Upon receipt of an [[EventMessage]], add the message to the [[externalEvents]] list and return a [[BagEventDone]] message
*
* Upon receipt of a [[BagEventDone]] message, remove the sending actor from the [[awaitingBagEvents]] list and
* call [[checkDonePassingMessages]]
*/
def passingMessages: Receive = {
case et: ExecuteTransition =>
val t: Duration = Duration.parse(et.getTimeString)
if (t.compareTo(currentTime) >= 0 && t.compareTo(getNextTime) <= 0) {
log.debug(t + " Executing external transitions.")
// reverse takes O(n) time, but events should be delivered in FIFO
// order (LIFO is surprising to user. A Queue does not save time
// as it effectively performs a reverse when elements are
// removed, which is only performed once)
externalEvents.reverse.foreach {e =>
log.debug(t + " Handling external event: " + e)
routeExternalEvent(e)
}
externalEvents = List()
currentTime = t
checkDonePassingMessages(t)
}
else {
throw new SynchronizationException(t + " in passing messages, time in ExecuteTransition message " + t +
" is not between current time: " + currentTime + " and next scheduled transition " + getNextTime)
}
case em: EventMessage =>
val t: Duration = Duration.parse(em.getTimeString)
val executionTime: Duration = Duration.parse(em.getEvent.getExecutionTimeString)
val externalEvent = new ExternalEvent(executionTime, convertMessage(em.getEvent.getEventData, em.getEvent.getJavaClass) match {case s: java.io.Serializable => s})
externalEvents = externalEvent :: externalEvents
log.debug(t + " Bagging external event " + externalEvent + " with index " + em.getEventIndex + " from " + sender().path.name)
sender() ! ModelSimulator.buildBagEventDone(t, em.getEventIndex)
case EventMessageCase(event, t, eventIndex) =>
//val externalEvent = ExternalEvent(t, event)
val externalEvent = event
externalEvents = externalEvent :: externalEvents
log.debug(t + " Bagging external event " + externalEvent + " with index " + eventIndex + " from " + sender().path.name)
sender() ! ModelSimulator.buildBagEventDone(t, eventIndex)
case bed: BagEventDone =>
val t: Duration = Duration.parse(bed.getTimeString)
val eventIndex = bed.getEventIndex
awaitingBagEvents = awaitingBagEvents.filterNot(a => a.actor == sender() && a.eventIndex == eventIndex)
log.debug(t + " Received BagEventDone with index " + eventIndex + " from " + sender().path.name)
checkDonePassingMessages(t)
case m: Any =>
handleOtherMessages(m)
}
/**
* In this state, the ModelCoordinator is awaiting the completion of event transitions from all subordinate models.
*
* Upon receipt of a [[TransitionDone]] message, remove the sender from the [[synchronizeSet]] and record the next
* state transition of the sender in the [[nextMap]]. If the [[synchronizeSet]] is empty, all subordinate models have
* completed state transition. Set the current time to [[TransitionDone]] time and send a [[TransitionDone]] message
* to the parent coordinator with the [[currentTime]] and [[getNextTime]] as arguments. Transition to the
* [[processingOutput]] state.
*/
def processingTransitions: Receive = {
case td: StateTransitionDone =>
val t: Duration = Duration.parse(td.getTimeString)
val nextTime: Duration = Duration.parse(td.getNextTimeString)
if (t.compareTo(currentTime) >= 0 && t.compareTo(nextTime) <= 0) {
synchronizeSet = synchronizeSet.filterNot(_ == sender())
log.debug(t + " " + sender.path.name + " completed transition with its next scheduled transition at " + nextTime)
nextMap.put(sender(), nextTime)
if (synchronizeSet.isEmpty) {
currentTime = t
parentCoordinator ! ModelSimulator.buildStateTransitionDone(currentTime, getNextTime)
log.debug("Become: " + t + " All transitions complete. Transitioning from processingTransitions to processingOutput")
context.become(processingOutput)
} else {
log.debug(t + " Still have " + synchronizeSet.size + " subordinates executing transition: " + synchronizeSet)
}
}
else {
throw new SynchronizationException(t + " in processing transitions, time in TransitionDone message " + t +
" is not between current time: " + currentTime + " and next scheduled transition " + nextTime)
}
case m: Any =>
handleOtherMessages(m)
}
def handleOtherMessages: Receive = {
case m: Any=>
throw new SynchronizationException("Received message in processingTransitions state with no handler: [" + m.getClass + "] " + m)
}
}
| rkewley/devsdmf | src/main/scala/devsmodel/ModelCoordinator.scala | Scala | apache-2.0 | 27,290 |
package at.logic.gapt.expr.fol
import at.logic.gapt.expr._
import at.logic.gapt.expr.hol.{ toNNF, simplify }
import at.logic.gapt.proofs.FOLClause
import scala.annotation.tailrec
import scala.collection.mutable
object TseitinCNF {
/**
* Generates from a formula f a List of clauses in CNF by using Tseitin's Transformation
* @param f formula which should be transformed
* @return CNF satisfiability-equivalent to f
*/
def apply( f: FOLFormula ): List[FOLClause] = {
val tseitin = new TseitinCNF()
simplify( toNNF( f ) ) match {
case And.nAry( conjuncts ) => conjuncts.flatMap( tseitin.apply )
}
}
}
class TseitinCNF {
// add already known subformulas
val subformulaMap = mutable.Map[FOLFormula, FOLFormula]()
val hc = "x"
var fsyms = Set[String]()
var auxsyms = mutable.MutableList[String]()
/**
* Get a list of all Atoms symbols used in f
* @param f formula
* @return List of all atom symbols used in f
*/
def getAtomSymbols( f: FOLFormula ): List[String] = f match {
case FOLAtom( h, args ) => List( h )
case Top() | Bottom() => List()
case Neg( f2 ) => getAtomSymbols( f2 )
case And( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Or( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Imp( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Ex( _, f2 ) => getAtomSymbols( f2 )
case All( _, f2 ) => getAtomSymbols( f2 )
case _ => throw new IllegalArgumentException( "unknown head of formula: " + f.toString )
}
def apply( f: FOLFormula ): List[FOLClause] = {
fsyms = getAtomSymbols( f ) toSet
// processFormula and transform it via Tseitin-Transformation
val pf = processFormula( f )
pf._2 :+ FOLClause( List(), List( pf._1 ) )
}
/**
* Adds a FOLFormula to the subFormulas HashMap if it does not already map to an existing atom.
* The representing atom is returned.
* In case f is an atom itself, nothing will be added to the subformulas HashMap and the atom itself is returned.
* @param f subformula to possibly be added to subformulas HashMap
* @return an atom either representing the subformula or f if f is already an atom
*/
private var auxCounter: Int = 0
@tailrec
private def addIfNotExists( f: FOLFormula ): FOLFormula = f match {
case FOLAtom( h, args ) => f
case _ =>
if ( subformulaMap.isDefinedAt( f ) ) {
subformulaMap( f )
} else {
auxCounter += 1
var auxsym = s"$hc$auxCounter"
if ( fsyms.contains( auxsym ) ) {
addIfNotExists( f )
} else {
auxsyms += auxsym
val auxAtom = FOLAtom( auxsym )
subformulaMap( f ) = auxAtom
auxAtom
}
}
}
/**
* Takes a propositional FOLFormula and processes it s.t. every subformula gets
* assigned a freshly introduced Atom which is from there on used instead of the formula
* @param f The formula to be processed.
* @return a Tuple2, where 1st is the prop. variable representing f and 2nd is a clause
* containing all the equivalences required for the representation of f by 1st.
*/
def processFormula( f: FOLFormula ): Tuple2[FOLFormula, List[FOLClause]] = f match {
case FOLAtom( _, _ ) => ( f, List() )
case Top() =>
val x = addIfNotExists( f )
( x, List( FOLClause( List(), List( x ) ) ) )
case Bottom() =>
val x = addIfNotExists( f )
( x, List( FOLClause( List( x ), List() ) ) )
case Neg( f2 ) =>
val pf = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf._1
val c1 = FOLClause( List( x, x1 ), List() )
val c2 = FOLClause( List(), List( x, x1 ) )
( x, pf._2 ++ List( c1, c2 ) )
case And( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List( x ), List( x1 ) )
val c2 = FOLClause( List( x ), List( x2 ) )
val c3 = FOLClause( List( x1, x2 ), List( x ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case Or( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List( x1 ), List( x ) )
val c2 = FOLClause( List( x2 ), List( x ) )
val c3 = FOLClause( List( x ), List( x1, x2 ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case Imp( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List(), List( x, x1 ) )
val c2 = FOLClause( List( x2 ), List( x ) )
val c3 = FOLClause( List( x, x1 ), List( x2 ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case _ => throw new IllegalArgumentException( "Formula not supported in Tseitin transformation: " + f.toString )
}
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/expr/fol/TseitinCNF.scala | Scala | gpl-3.0 | 5,110 |
package org.scalaide.core
package semantichighlighting.classifier
import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolTypes._
import org.junit._
class CaseObjectTest extends AbstractSymbolClassifierTest {
@Test
def basic_case_object(): Unit = {
checkSymbolClassification("""
case object CaseObject { CaseObject }
""", """
case object $CASEOBJ $ { $CASEOBJ $ }
""",
Map("CASEOBJ" -> CaseObject))
}
@Test
def pattern_match(): Unit = {
checkSymbolClassification("""
object X {
Option(42) match {
case Some(x) => 42
case None => 24
}
}""", """
object X {
Option(42) match {
case $CC$(x) => 42
case $CO$ => 24
}
}""",
Map(
"CO" -> CaseObject,
"CC" -> CaseClass))
}
@Test
def import_case_object(): Unit = {
checkSymbolClassification("""
import scala.None
class A { None }
""", """
import scala.$CO$
""",
Map("CO" -> CaseObject))
}
} | stephenh/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/semantichighlighting/classifier/CaseObjectTest.scala | Scala | bsd-3-clause | 1,081 |
package cgta.oscala
package util
import scala.concurrent.ExecutionContext
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman
// All Rights Reserved
// please contact ben@jackman.biz
// for licensing inquiries
// Created by bjackman @ 11/11/14 8:05 AM
//////////////////////////////////////////////////////////////
trait ConcHelpPlat extends ConcHelp {
final override lazy val defaultExecutionContext: ExecutionContext = scalajs.concurrent.JSExecutionContext.queue
} | cgta/open | oscala/js/src/main/scala/cgta/oscala/util/ConcHelpPlat.scala | Scala | mit | 517 |
package spire
package algebra
/** Field type class. While algebra already provides one, we provide one in Spire
* that integrates with the commutative ring hierarchy, in particular `GCDRing`
* and `EuclideanRing`.
*
* On a field, all nonzero elements are invertible, so the remainder of the
* division is always 0. The Euclidean function can take an arbitrary value on
* nonzero elements (it is undefined for zero); for compatibility with the degree
* of polynomials, we use the constant 0.
*
* The GCD and LCM are defined up to a unit; on a field, it means that either the GCD or LCM
* can be fixed arbitrarily. Some conventions with consistent defaults are provided in the
* spire.algebra.Field companion object.
*/
trait Field[@sp(Int, Long, Float, Double) A] extends Any with AlgebraField[A] with EuclideanRing[A] {
def euclideanFunction(a: A): BigInt = BigInt(0)
def equot(a: A, b: A): A = div(a, b)
def emod(a: A, b: A): A = zero
override def equotmod(a: A, b: A): (A, A) = (div(a, b), zero)
}
object Field extends _root_.algebra.ring.FieldFunctions[Field] with EuclideanRingFunctions[Field] {
@inline def apply[A](implicit ev: Field[A]): Field[A] = ev
/** Field with simple default GCD/LCM implementations:
* gcd(a, b) = 1 (except gcd(0, 0) = 0) while lcm(a, b) = a * b. */
trait WithDefaultGCD[@sp(Int, Long, Float, Double) A] extends Any with Field[A] {
override def gcd(a: A, b: A)(implicit eqA: Eq[A]): A =
if (isZero(a) && isZero(b)) zero else one
override def lcm(a: A, b: A)(implicit eqA: Eq[A]): A = times(a, b)
}
/** Field defined as a field of fractions with a default implementation of GCD/LCM such that
* - gcd(a/b, c/d) = gcd(a, c) / lcm(b, d)
* - lcm(a/b, c/d) = lcm(a, c) / gcd(b, d)
* which corresponds to the convention of the GCD domains of SageMath; on rational numbers, it
* "yields the unique extension of gcd from integers to rationals presuming the natural extension
* of the divisibility relation from integers to rationals", see http://math.stackexchange.com/a/151431
*/
trait FieldOfFractionsGCD[A, R] extends Any with Field[A] {
implicit def ringR: GCDRing[R]
implicit def eqR: Eq[R]
def numerator(a: A): R
def denominator(a: A): R
def fraction(num: R, den: R): A
override def gcd(x: A, y: A)(implicit ev: Eq[A]): A = {
val num = ringR.gcd(numerator(x), numerator(y))
val den = ringR.lcm(denominator(x), denominator(y))
fraction(num, den)
}
override def lcm(x: A, y: A)(implicit ev: Eq[A]): A = {
val num = ringR.lcm(numerator(x), numerator(y))
val den = ringR.gcd(denominator(x), denominator(y))
fraction(num, den)
}
}
}
| adampingel/spire | core/src/main/scala/spire/algebra/Field.scala | Scala | mit | 2,723 |
package sbt
package input.aggregation
import java.nio.file.Paths
import sbt.Keys._
import sbt.internal.DynamicInput
import sbt.nio.Keys._
/**
* This test is for internal logic so it must be in the sbt package because it uses package
* private apis.
*/
object Build {
val setStringValue = inputKey[Unit]("set a global string to a value")
val checkStringValue = inputKey[Unit]("check the value of a global")
val checkTriggers = taskKey[Unit]("Check that the triggers are correctly aggregated.")
val checkGlobs = taskKey[Unit](
"Check that the globs are correctly aggregated and that the globs are the union of the inputs and the triggers"
)
def setStringValueImpl: Def.Initialize[InputTask[Unit]] = Def.inputTask {
val Seq(stringFile, string) = Def.spaceDelimited().parsed.map(_.trim)
IO.write(file(stringFile), string)
}
def checkStringValueImpl: Def.Initialize[InputTask[Unit]] = Def.inputTask {
val Seq(stringFile, string) = Def.spaceDelimited().parsed
assert(IO.read(file(stringFile)) == string)
}
def triggers(t: Seq[DynamicInput]): Seq[Glob] = t.collect {
// This is a hack to exclude the default compile and resource file inputs
case i if !i.glob.toString.contains("*") => i.glob
}
lazy val foo = project
.settings(
setStringValue := {
val _ = (fileInputs in (bar, setStringValue)).value
setStringValueImpl.evaluated
},
checkStringValue := checkStringValueImpl.evaluated,
watchOnFileInputEvent := { (_, _) =>
Watch.CancelWatch
},
Compile / compile / watchOnIteration := { (_, _, _) =>
Watch.CancelWatch
},
checkTriggers := {
val actual = triggers((Compile / compile / transitiveDynamicInputs).value).toSet
val base = baseDirectory.value.getParentFile.toGlob
// This checks that since foo depends on bar there is a transitive trigger generated
// for the "bar.txt" trigger added to bar / Compile / unmanagedResources (which is a
// transitive dependency of
val expected: Set[Glob] = Set(base / "baz.txt", base / "bar" / "bar.txt")
assert(actual == expected)
},
Test / test / watchTriggers += baseDirectory.value.toGlob / "test.txt",
Test / checkTriggers := {
val testTriggers = triggers((Test / test / transitiveDynamicInputs).value).toSet
// This validates that since the "test.txt" trigger is only added to the Test / test task,
// that the Test / compile does not pick it up. Both of them pick up the the triggers that
// are found in the test above for the compile configuration because of the transitive
// classpath dependency that is added in Defaults.internalDependencies.
val compileTriggers = triggers((Test / compile / transitiveDynamicInputs).value).toSet
val base = baseDirectory.value.getParentFile.toGlob
val expected: Set[Glob] =
Set(base / "baz.txt", base / "bar" / "bar.txt", base / "foo" / "test.txt")
assert(testTriggers == expected)
assert((testTriggers - (base / "foo" / "test.txt")) == compileTriggers)
},
)
.dependsOn(bar)
lazy val bar = project.settings(
fileInputs in setStringValue += baseDirectory.value.toGlob / "foo.txt",
setStringValue / watchTriggers += baseDirectory.value.toGlob / "bar.txt",
// This trigger should transitively propagate to foo / compile and foo / Test / compile
Compile / unmanagedResources / watchTriggers += baseDirectory.value.toGlob / "bar.txt",
checkTriggers := {
val base = baseDirectory.value.getParentFile.toGlob
val actual = triggers((Compile / compile / transitiveDynamicInputs).value).toSet
val expected: Set[Glob] = Set(base / "bar" / "bar.txt", base / "baz.txt")
assert(actual == expected)
},
// This trigger should not transitively propagate to any foo task
Test / unmanagedResources / watchTriggers += baseDirectory.value.toGlob / "bar-test.txt",
Test / checkTriggers := {
val testTriggers = triggers((Test / test / transitiveDynamicInputs).value).toSet
val compileTriggers = triggers((Test / compile / transitiveDynamicInputs).value).toSet
val base = baseDirectory.value.getParentFile.toGlob
val expected: Set[Glob] =
Set(base / "baz.txt", base / "bar" / "bar.txt", base / "bar" / "bar-test.txt")
assert(testTriggers == expected)
assert(testTriggers == compileTriggers)
},
)
lazy val root = (project in file("."))
.aggregate(foo, bar)
.settings(
watchOnFileInputEvent := { (_, _) =>
Watch.CancelWatch
},
checkTriggers := {
val actual = triggers((Compile / compile / transitiveDynamicInputs).value)
val expected: Seq[Glob] = baseDirectory.value.toGlob / "baz.txt" :: Nil
assert(actual == expected)
},
)
}
| xuwei-k/xsbt | sbt-app/src/sbt-test/watch/file-input-aggregation/project/Build.scala | Scala | apache-2.0 | 4,879 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.bitmap.gif
import android.graphics.Bitmap
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.log.LogSE._
import scala.concurrent.duration.Duration
/**
* Decodes gif animation frames.
*
* TODO: consider keeping pixels data in native buffer (especially for swap pixels)
* TODO: maybe image decoder could save pixels directly to current image (line bye line), would not need pixels buffer
* @param gif
*/
class AnimGifDecoder(gif: Gif) extends DerivedLogTag {
val framesCount = gif.frames.length
var frameIndex = -1
var loopCounter = 0
var frameDirty = false
var currentImage: Bitmap = _
val decoder = new LzwDecoder(gif)
/**
* Returns a delay to wait before displaying next frame.
* @return finite duration if there is next frame to show (or looping) or Duration.Inf if this is the last frame and loopCount is finished
*/
def getFrameDelay: Duration = {
if (frameIndex < 0) Duration.Zero
else if (gif.loop.shouldAnimate(loopCounter)) gif.frames(frameIndex).delay
else Duration.Inf
}
private def advance(): Boolean = {
if (frameIndex == framesCount - 1) loopCounter += 1
if (gif.loop.shouldAnimate(loopCounter)) {
frameIndex = if (frameIndex == framesCount - 1) 0 else frameIndex + 1
true
} else false
}
/**
* Advances animation.
* Will decode next frame pixels, but not modify current frame image yet.
*/
def advanceNextFrame(): Unit = {
if (frameDirty) warn(l"should call getCurrentFrame before advancing to next frame")
if (!frameDirty && advance()) {
val frame = gif.frames(frameIndex)
if (frameIndex == 0) decoder.clear()
decoder.decode(frame)
frameDirty = true
}
}
/**
* Returns current frame image.
* @return Bitmap representation of frame
*/
def getCurrentFrame: Bitmap = {
if (frameDirty) {
currentImage = decoder.updateImage(gif.frames(frameIndex))
frameDirty = false
}
currentImage
}
def destroy() = decoder.destroy()
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/bitmap/gif/AnimGifDecoder.scala | Scala | gpl-3.0 | 2,737 |
import org.apache.spark._
import org.scalatest._
class ScalaniaClusterManagerSpec extends FlatSpec with Matchers {
"Spark" should "load scalania cluster manager" in {
val conf = new SparkConf()
.setMaster("scalania")
.setAppName("scalania External Cluster Manager")
val sc = SparkContext.getOrCreate(conf)
println(s"Custom external cluster manager: ${sc.master}")
sc.stop
}
}
| jaceklaskowski/spark-workshop | solutions/spark-external-cluster-manager/src/test/scala/ScalaniaClusterManagerSpec.scala | Scala | apache-2.0 | 410 |
package prstack
sealed trait Instr { }
case object I0 extends Instr {
override def toString : String = "0"
}
case object I1 extends Instr {
override def toString : String = "1"
}
| JerrySwan/PRStack | src/prstack/Instructions.scala | Scala | bsd-3-clause | 184 |
package skinny.orm.feature
import scalikejdbc._
import org.joda.time.DateTime
import skinny.orm.Alias
/**
* Soft delete with timestamp value.
*
* @tparam Entity entity
*/
trait SoftDeleteWithTimestampFeature[Entity] extends SoftDeleteWithTimestampFeatureWithId[Long, Entity]
trait SoftDeleteWithTimestampFeatureWithId[Id, Entity] extends CRUDFeatureWithId[Id, Entity] {
/**
* deleted_at timestamp field name.
*/
def deletedAtFieldName: String = "deletedAt"
override def defaultScopeForUpdateOperations: Option[SQLSyntax] = {
val c = defaultAlias.support.column
val scope = sqls.isNull(c.field(deletedAtFieldName))
super.defaultScopeForUpdateOperations.map(_.and.append(scope)) orElse Some(scope)
}
override def defaultScope(alias: Alias[Entity]): Option[SQLSyntax] = {
val scope = sqls.isNull(alias.field(deletedAtFieldName))
super.defaultScope(alias).map(_.and.append(scope)) orElse Some(scope)
}
override def deleteBy(where: SQLSyntax)(implicit s: DBSession = autoSession): Int = {
updateBy(where).withNamedValues(column.field(deletedAtFieldName) -> DateTime.now)
}
}
| seratch/skinny-framework | orm/src/main/scala/skinny/orm/feature/SoftDeleteWithTimestampFeature.scala | Scala | mit | 1,137 |
package scala.pickling.test.roomlist.objectarray
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
case class Room(val name: String)
case class RoomList(val rooms: Array[Room])
class RoomListObjectArrayTest extends FunSuite {
test("main") {
val rl = RoomList(Array(Room("foo"), Room("biz"), Room("bang")))
val p = rl.pickle
//println(p.toString)
val jsn = """JSONPickle({
| "$type": "scala.pickling.test.roomlist.objectarray.RoomList",
| "rooms": {
| "elems": [
| {
| "$type": "scala.pickling.test.roomlist.objectarray.Room",
| "name": "foo"
| },
| {
| "$type": "scala.pickling.test.roomlist.objectarray.Room",
| "name": "biz"
| },
| {
| "$type": "scala.pickling.test.roomlist.objectarray.Room",
| "name": "bang"
| }
| ]
| }
|})""".stripMargin.trim
assert(p.toString === jsn)
val result = p.unpickle[RoomList]
assert(result.rooms.sameElements(rl.rooms))
}
}
| scala/pickling | core/src/test/scala/scala/pickling/generation/RoomListObjectArrayTest.scala | Scala | bsd-3-clause | 1,104 |
package org.json4s
package jackson
class JacksonSerializationSpec
extends SerializationSpec(
serialization = org.json4s.jackson.Serialization,
baseFormats = org.json4s.jackson.Serialization.formats(NoTypeHints)
)
| json4s/json4s | tests/src/test/scala/org/json4s/jackson/JacksonSerializationSpec.scala | Scala | apache-2.0 | 226 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
import java.io.{ File, PrintStream }
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.BeforeAndAfterAll
import sbt.internal.util.Terminal.SimpleTerminal
import scala.io.Source
class ProgressStateSpec extends AnyFlatSpec with BeforeAndAfterAll {
private lazy val fileIn = new File("/tmp/tmp.txt")
private lazy val fileOut = Source.fromFile("/tmp/tmp.txt")
override def afterAll(): Unit = {
fileIn.delete()
fileOut.close()
super.afterAll()
}
"test" should "not clear after carriage return (\\\\r) " in {
val ps = new ProgressState(1, 8)
val in = "Hello\\r\\nWorld".getBytes()
ps.write(SimpleTerminal, in, new PrintStream(fileIn), hasProgress = true)
val clearScreenBytes = ConsoleAppender.ClearScreenAfterCursor.getBytes("UTF-8")
val check = fileOut.getLines().toList.map { line =>
line.getBytes("UTF-8").endsWith(clearScreenBytes)
}
assert(check === List(false, true))
}
}
| xuwei-k/xsbt | internal/util-logging/src/test/scala/sbt/internal/util/ProgressStateSpec.scala | Scala | apache-2.0 | 1,122 |
/*
framian.scala
Test of "framian"
*/
import java.io.{File,PrintWriter}
import framian.{Index,Cols}
import framian.csv.{Csv,CsvFormat}
object FramianTest {
def main(args: Array[String]) = {
println("Hello")
val df=Csv.parseFile(new File("../r/cars93.csv")).labeled.toFrame
println(""+df.rows+" "+df.cols)
val df2=df.filter(Cols("EngineSize").as[Double])( _ <= 4.0 )
println(""+df2.rows+" "+df2.cols)
val df3=df2.map(Cols("Weight").as[Int],"WeightKG")(r=>r.toDouble*0.453592)
println(""+df3.rows+" "+df3.cols)
println(df3.colIndex)
val csv = Csv.fromFrame(new CsvFormat(",", header = true))(df3)
new PrintWriter("out.csv") { write(csv.toString); close }
println("Done")
}
}
| darrenjw/blog | scala-dataframes/framian/framian.scala | Scala | apache-2.0 | 730 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.broker.security
import java.io.File
import java.io.FileInputStream
import java.io.IOException
import java.security.Principal
import java.util.Properties
import javax.security.auth.Subject
import javax.security.auth.callback.Callback
import javax.security.auth.callback.CallbackHandler
import javax.security.auth.callback.NameCallback
import javax.security.auth.callback.PasswordCallback
import javax.security.auth.callback.UnsupportedCallbackException
import javax.security.auth.login.FailedLoginException
import javax.security.auth.login.LoginException
import javax.security.auth.spi.LoginModule
import org.apache.activemq.jaas.UserPrincipal
import java.{util => ju}
import org.apache.activemq.apollo.util.{FileCache, Log, FileSupport}
import FileSupport._
object FileUserLoginModule {
val LOGIN_CONFIG = "java.security.auth.login.config"
val FILE_OPTION = "file"
val DEFAULT_LOG = Log(getClass)
def load_properties(file:File):Option[Properties] = {
try {
val rc = new Properties()
using( new FileInputStream(file) ) { in=>
rc.load(in)
}
EncryptionSupport.decrypt(rc)
Some(rc)
} catch {
case e: Throwable =>
DEFAULT_LOG.warn(e, "Unable to load properties file: " + file)
None
}
}
val file_cache = new FileCache[Properties](load_properties)
}
/**
* <p>
* Uses a userid=password property file to control who can
* login.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class FileUserLoginModule extends LoginModule {
import FileUserLoginModule._
val log = JaasAuthenticator.broker_log.getOrElse(DEFAULT_LOG)
import log._
private var subject: Subject = _
private var callback_handler: CallbackHandler = _
private var file: File = _
private val principals = new ju.HashSet[Principal]()
def initialize(subject: Subject, callback_handler: CallbackHandler, shared_state: ju.Map[String, _], options: ju.Map[String, _]): Unit = {
this.subject = subject
this.callback_handler = callback_handler
val base_dir = if (System.getProperty(LOGIN_CONFIG) != null) {
new File(System.getProperty(LOGIN_CONFIG)).getParentFile()
} else {
new File(".")
}
file = new File(base_dir, options.get(FILE_OPTION).asInstanceOf[String])
debug("Initialized file=%s", file)
}
def login: Boolean = {
val users = file_cache.get(file) match {
case None => return false
case Some(x) => x
}
val callbacks = new Array[Callback](2)
callbacks(0) = new NameCallback("Username: ")
callbacks(1) = new PasswordCallback("Password: ", false)
try {
callback_handler.handle(callbacks)
} catch {
case ioe: IOException =>
throw new LoginException(ioe.getMessage())
case uce: UnsupportedCallbackException =>
return false;
}
val user = callbacks(0).asInstanceOf[NameCallback].getName()
if( user == null ) {
throw new FailedLoginException("User id not provided")
}
var tmpPassword = callbacks(1).asInstanceOf[PasswordCallback].getPassword()
if (tmpPassword == null) {
tmpPassword = new Array[Char](0)
}
val password = users.getProperty(user)
if (password == null || !password.equals(new String(tmpPassword))) {
throw new FailedLoginException("Invalid user id or password for user: "+user)
}
principals.add(new UserPrincipal(user))
debug("login %s", user)
true
}
def commit: Boolean = {
subject.getPrincipals().addAll(principals)
debug("commit")
return true
}
def abort: Boolean = {
principals.clear
debug("abort")
return true
}
def logout: Boolean = {
subject.getPrincipals().removeAll(principals)
principals.clear
debug("logout")
return true
}
}
| chirino/activemq-apollo | apollo-broker/src/main/scala/org/apache/activemq/apollo/broker/security/FileUserLoginModule.scala | Scala | apache-2.0 | 4,632 |
package com.ajlopez.scala.ajlisp
trait SymbolicExpression {
def evaluate(context: Context): Any
}
| ajlopez/AjLispScala | src/main/scala/com/ajlopez/scala/ajlisp/SymbolicExpression.scala | Scala | mit | 112 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.sql.Date
import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterAll
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.api.java.function.FlatMapGroupsWithStateFunction
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.plans.logical.FlatMapGroupsWithState
import org.apache.spark.sql.catalyst.plans.physical.UnknownPartitioning
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.execution.RDDScanExec
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.state.{FlatMapGroupsWithStateExecHelper, MemoryStateStore, StateStore, StateStoreId, StateStoreMetrics, UnsafeRowPair}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.types.{DataType, IntegerType}
import org.apache.spark.util.Utils
/** Class to check custom state types */
case class RunningCount(count: Long)
case class Result(key: Long, count: Int)
class FlatMapGroupsWithStateSuite extends StateStoreMetricsTest {
import testImplicits._
import GroupStateImpl._
import GroupStateTimeout._
import FlatMapGroupsWithStateSuite._
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
test("GroupState - get, exists, update, remove") {
var state: GroupStateImpl[String] = null
def testState(
expectedData: Option[String],
shouldBeUpdated: Boolean = false,
shouldBeRemoved: Boolean = false): Unit = {
if (expectedData.isDefined) {
assert(state.exists)
assert(state.get === expectedData.get)
} else {
assert(!state.exists)
intercept[NoSuchElementException] {
state.get
}
}
assert(state.getOption === expectedData)
assert(state.hasUpdated === shouldBeUpdated)
assert(state.hasRemoved === shouldBeRemoved)
}
// === Tests for state in streaming queries ===
// Updating empty state
state = GroupStateImpl.createForStreaming(
None, 1, 1, NoTimeout, hasTimedOut = false, watermarkPresent = false)
testState(None)
state.update("")
testState(Some(""), shouldBeUpdated = true)
// Updating exiting state
state = GroupStateImpl.createForStreaming(
Some("2"), 1, 1, NoTimeout, hasTimedOut = false, watermarkPresent = false)
testState(Some("2"))
state.update("3")
testState(Some("3"), shouldBeUpdated = true)
// Removing state
state.remove()
testState(None, shouldBeRemoved = true, shouldBeUpdated = false)
state.remove() // should be still callable
state.update("4")
testState(Some("4"), shouldBeRemoved = false, shouldBeUpdated = true)
// Updating by null throw exception
intercept[IllegalArgumentException] {
state.update(null)
}
}
test("GroupState - setTimeout - with NoTimeout") {
for (initValue <- Seq(None, Some(5))) {
val states = Seq(
GroupStateImpl.createForStreaming(
initValue, 1000, 1000, NoTimeout, hasTimedOut = false, watermarkPresent = false),
GroupStateImpl.createForBatch(NoTimeout, watermarkPresent = false)
)
for (state <- states) {
// for streaming queries
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
// for batch queries
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
}
}
}
test("GroupState - setTimeout - with ProcessingTimeTimeout") {
// for streaming queries
var state: GroupStateImpl[Int] = GroupStateImpl.createForStreaming(
None, 1000, 1000, ProcessingTimeTimeout, hasTimedOut = false, watermarkPresent = false)
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
state.setTimeoutDuration("-1 month 31 days 1 second")
assert(state.getTimeoutTimestamp === 2000)
state.setTimeoutDuration(500)
assert(state.getTimeoutTimestamp === 1500) // can be set without initializing state
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.update(5)
assert(state.getTimeoutTimestamp === 1500) // does not change
state.setTimeoutDuration(1000)
assert(state.getTimeoutTimestamp === 2000)
state.setTimeoutDuration("2 second")
assert(state.getTimeoutTimestamp === 3000)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.remove()
assert(state.getTimeoutTimestamp === 3000) // does not change
state.setTimeoutDuration(500) // can still be set
assert(state.getTimeoutTimestamp === 1500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
// for batch queries
state = GroupStateImpl.createForBatch(
ProcessingTimeTimeout, watermarkPresent = false).asInstanceOf[GroupStateImpl[Int]]
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
state.setTimeoutDuration(500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.update(5)
state.setTimeoutDuration(1000)
state.setTimeoutDuration("2 second")
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.remove()
state.setTimeoutDuration(500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
}
test("GroupState - setTimeout - with EventTimeTimeout") {
var state: GroupStateImpl[Int] = GroupStateImpl.createForStreaming(
None, 1000, 1000, EventTimeTimeout, false, watermarkPresent = true)
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.setTimeoutTimestamp(5000)
assert(state.getTimeoutTimestamp === 5000) // can be set without initializing state
state.update(5)
assert(state.getTimeoutTimestamp === 5000) // does not change
state.setTimeoutTimestamp(10000)
assert(state.getTimeoutTimestamp === 10000)
state.setTimeoutTimestamp(new Date(20000))
assert(state.getTimeoutTimestamp === 20000)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.remove()
assert(state.getTimeoutTimestamp === 20000)
state.setTimeoutTimestamp(5000)
assert(state.getTimeoutTimestamp === 5000) // can be set after removing state
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
// for batch queries
state = GroupStateImpl.createForBatch(EventTimeTimeout, watermarkPresent = false)
.asInstanceOf[GroupStateImpl[Int]]
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.setTimeoutTimestamp(5000)
state.update(5)
state.setTimeoutTimestamp(10000)
state.setTimeoutTimestamp(new Date(20000))
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.remove()
state.setTimeoutTimestamp(5000)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
}
test("GroupState - illegal params to setTimeout") {
var state: GroupStateImpl[Int] = null
// Test setTimeout****() with illegal values
def testIllegalTimeout(body: => Unit): Unit = {
intercept[IllegalArgumentException] {
body
}
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
}
state = GroupStateImpl.createForStreaming(
Some(5), 1000, 1000, ProcessingTimeTimeout, hasTimedOut = false, watermarkPresent = false)
testIllegalTimeout {
state.setTimeoutDuration(-1000)
}
testIllegalTimeout {
state.setTimeoutDuration(0)
}
testIllegalTimeout {
state.setTimeoutDuration("-2 second")
}
testIllegalTimeout {
state.setTimeoutDuration("-1 month")
}
testIllegalTimeout {
state.setTimeoutDuration("1 month -31 day")
}
state = GroupStateImpl.createForStreaming(
Some(5), 1000, 1000, EventTimeTimeout, hasTimedOut = false, watermarkPresent = false)
testIllegalTimeout {
state.setTimeoutTimestamp(-10000)
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "-3 second")
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "-1 month")
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "1 month -32 day")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000))
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "-3 second")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "-1 month")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "1 month -32 day")
}
}
test("GroupState - hasTimedOut") {
for (timeoutConf <- Seq(NoTimeout, ProcessingTimeTimeout, EventTimeTimeout)) {
// for streaming queries
for (initState <- Seq(None, Some(5))) {
val state1 = GroupStateImpl.createForStreaming(
initState, 1000, 1000, timeoutConf, hasTimedOut = false, watermarkPresent = false)
assert(state1.hasTimedOut === false)
val state2 = GroupStateImpl.createForStreaming(
initState, 1000, 1000, timeoutConf, hasTimedOut = true, watermarkPresent = false)
assert(state2.hasTimedOut)
}
// for batch queries
assert(
GroupStateImpl.createForBatch(timeoutConf, watermarkPresent = false).hasTimedOut === false)
}
}
test("GroupState - getCurrentWatermarkMs") {
def streamingState(timeoutConf: GroupStateTimeout, watermark: Option[Long]): GroupState[Int] = {
GroupStateImpl.createForStreaming(
None, 1000, watermark.getOrElse(-1), timeoutConf,
hasTimedOut = false, watermark.nonEmpty)
}
def batchState(timeoutConf: GroupStateTimeout, watermarkPresent: Boolean): GroupState[Any] = {
GroupStateImpl.createForBatch(timeoutConf, watermarkPresent)
}
def assertWrongTimeoutError(test: => Unit): Unit = {
val e = intercept[UnsupportedOperationException] { test }
assert(e.getMessage.contains(
"Cannot get event time watermark timestamp without setting watermark"))
}
for (timeoutConf <- Seq(NoTimeout, EventTimeTimeout, ProcessingTimeTimeout)) {
// Tests for getCurrentWatermarkMs in streaming queries
assertWrongTimeoutError { streamingState(timeoutConf, None).getCurrentWatermarkMs() }
assert(streamingState(timeoutConf, Some(1000)).getCurrentWatermarkMs() === 1000)
assert(streamingState(timeoutConf, Some(2000)).getCurrentWatermarkMs() === 2000)
// Tests for getCurrentWatermarkMs in batch queries
assertWrongTimeoutError {
batchState(timeoutConf, watermarkPresent = false).getCurrentWatermarkMs()
}
assert(batchState(timeoutConf, watermarkPresent = true).getCurrentWatermarkMs() === -1)
}
}
test("GroupState - getCurrentProcessingTimeMs") {
def streamingState(
timeoutConf: GroupStateTimeout,
procTime: Long,
watermarkPresent: Boolean): GroupState[Int] = {
GroupStateImpl.createForStreaming(
None, procTime, -1, timeoutConf, hasTimedOut = false, watermarkPresent = false)
}
def batchState(timeoutConf: GroupStateTimeout, watermarkPresent: Boolean): GroupState[Any] = {
GroupStateImpl.createForBatch(timeoutConf, watermarkPresent)
}
for (timeoutConf <- Seq(NoTimeout, EventTimeTimeout, ProcessingTimeTimeout)) {
for (watermarkPresent <- Seq(false, true)) {
// Tests for getCurrentProcessingTimeMs in streaming queries
assert(streamingState(timeoutConf, NO_TIMESTAMP, watermarkPresent)
.getCurrentProcessingTimeMs() === -1)
assert(streamingState(timeoutConf, 1000, watermarkPresent)
.getCurrentProcessingTimeMs() === 1000)
assert(streamingState(timeoutConf, 2000, watermarkPresent)
.getCurrentProcessingTimeMs() === 2000)
// Tests for getCurrentProcessingTimeMs in batch queries
val currentTime = System.currentTimeMillis()
assert(batchState(timeoutConf, watermarkPresent).getCurrentProcessingTimeMs >= currentTime)
}
}
}
test("GroupState - primitive type") {
var intState = GroupStateImpl.createForStreaming[Int](
None, 1000, 1000, NoTimeout, hasTimedOut = false, watermarkPresent = false)
intercept[NoSuchElementException] {
intState.get
}
assert(intState.getOption === None)
intState = GroupStateImpl.createForStreaming[Int](
Some(10), 1000, 1000, NoTimeout, hasTimedOut = false, watermarkPresent = false)
assert(intState.get == 10)
intState.update(0)
assert(intState.get == 0)
intState.remove()
intercept[NoSuchElementException] {
intState.get
}
}
// Values used for testing InputProcessor
val currentBatchTimestamp = 1000
val currentBatchWatermark = 1000
val beforeTimeoutThreshold = 999
val afterTimeoutThreshold = 1001
// Tests for InputProcessor.processNewData() when timeout = NoTimeout
for (priorState <- Seq(None, Some(0))) {
val priorStateStr = if (priorState.nonEmpty) "prior state set" else "no prior state"
val testName = s"NoTimeout - $priorStateStr - "
testStateUpdateWithData(
testName + "no update",
stateUpdates = state => {
assert(state.getCurrentProcessingTimeMs() === currentBatchTimestamp)
intercept[Exception] { state.getCurrentWatermarkMs() } // watermark not specified
/* no updates */
},
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = priorState) // should not change
testStateUpdateWithData(
testName + "state updated",
stateUpdates = state => { state.update(5) },
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = Some(5)) // should change
testStateUpdateWithData(
testName + "state removed",
stateUpdates = state => { state.remove() },
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = None) // should be removed
}
// Tests for InputProcessor.processTimedOutState() when timeout != NoTimeout
for (priorState <- Seq(None, Some(0))) {
for (priorTimeoutTimestamp <- Seq(NO_TIMESTAMP, 1000)) {
var testName = ""
if (priorState.nonEmpty) {
testName += "prior state set, "
if (priorTimeoutTimestamp == 1000) {
testName += "prior timeout set"
} else {
testName += "no prior timeout"
}
} else {
testName += "no prior state"
}
for (timeoutConf <- Seq(ProcessingTimeTimeout, EventTimeTimeout)) {
testStateUpdateWithData(
s"$timeoutConf - $testName - no update",
stateUpdates = state => {
assert(state.getCurrentProcessingTimeMs() === currentBatchTimestamp)
intercept[Exception] { state.getCurrentWatermarkMs() } // watermark not specified
/* no updates */
},
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = priorState, // state should not change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithData(
s"$timeoutConf - $testName - state updated",
stateUpdates = state => { state.update(5) },
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithData(
s"$timeoutConf - $testName - state removed",
stateUpdates = state => { state.remove() },
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None) // state should be removed
}
// Tests with ProcessingTimeTimeout
if (priorState == None) {
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - timeout updated without initializing state",
stateUpdates = state => { state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = None,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None,
expectedTimeoutTimestamp = currentBatchTimestamp + 5000)
}
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - state and timeout duration updated",
stateUpdates =
(state: GroupState[Int]) => { state.update(5); state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = currentBatchTimestamp + 5000) // timestamp should change
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - timeout updated after state removed",
stateUpdates = state => { state.remove(); state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None,
expectedTimeoutTimestamp = currentBatchTimestamp + 5000)
// Tests with EventTimeTimeout
if (priorState == None) {
testStateUpdateWithData(
s"EventTimeTimeout - $testName - setting timeout without init state not allowed",
stateUpdates = state => {
state.setTimeoutTimestamp(10000)
},
timeoutConf = EventTimeTimeout,
priorState = None,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None,
expectedTimeoutTimestamp = 10000)
}
testStateUpdateWithData(
s"EventTimeTimeout - $testName - state and timeout timestamp updated",
stateUpdates =
(state: GroupState[Int]) => { state.update(5); state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = 5000) // timestamp should change
testStateUpdateWithData(
s"EventTimeTimeout - $testName - timeout timestamp updated to before watermark",
stateUpdates =
(state: GroupState[Int]) => {
state.update(5)
intercept[IllegalArgumentException] {
state.setTimeoutTimestamp(currentBatchWatermark - 1) // try to set to < watermark
}
},
timeoutConf = EventTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should not update
testStateUpdateWithData(
s"EventTimeTimeout - $testName - setting timeout with state removal not allowed",
stateUpdates = state => {
state.remove(); state.setTimeoutTimestamp(10000)
},
timeoutConf = EventTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None,
expectedTimeoutTimestamp = 10000)
}
}
// Tests for InputProcessor.processTimedOutState()
val preTimeoutState = Some(5)
for (timeoutConf <- Seq(ProcessingTimeTimeout, EventTimeTimeout)) {
testStateUpdateWithTimeout(
s"$timeoutConf - should not timeout",
stateUpdates = state => { assert(false, "function called without timeout") },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = afterTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = afterTimeoutThreshold) // timestamp should not change
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - no update/remove",
stateUpdates = state => {
assert(state.getCurrentProcessingTimeMs() === currentBatchTimestamp)
intercept[Exception] { state.getCurrentWatermarkMs() } // watermark not specified
/* no updates */
},
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - update state",
stateUpdates = state => { state.update(5) },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - remove state",
stateUpdates = state => { state.remove() },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = None, // state should be removed
expectedTimeoutTimestamp = NO_TIMESTAMP)
}
testStateUpdateWithTimeout(
"ProcessingTimeTimeout - should timeout - timeout duration updated",
stateUpdates = state => { state.setTimeoutDuration(2000) },
timeoutConf = ProcessingTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = currentBatchTimestamp + 2000) // timestamp should change
testStateUpdateWithTimeout(
"ProcessingTimeTimeout - should timeout - timeout duration and state updated",
stateUpdates = state => { state.update(5); state.setTimeoutDuration(2000) },
timeoutConf = ProcessingTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = currentBatchTimestamp + 2000) // timestamp should change
testStateUpdateWithTimeout(
"EventTimeTimeout - should timeout - timeout timestamp updated",
stateUpdates = state => { state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = 5000) // timestamp should change
testStateUpdateWithTimeout(
"EventTimeTimeout - should timeout - timeout and state updated",
stateUpdates = state => { state.update(5); state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = 5000) // timestamp should change
testWithAllStateVersions("flatMapGroupsWithState - streaming") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count if state is defined, otherwise does not return anything
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() >= 0 }
assertCannotGetWatermark { state.getCurrentWatermarkMs() }
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
Iterator.empty
} else {
state.update(RunningCount(count))
Iterator((key, count.toString))
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc)
testStream(result, Update)(
AddData(inputData, "a"),
CheckNewAnswer(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "a", "b"),
CheckNewAnswer(("a", "2"), ("b", "1")),
assertNumStateRows(total = 2, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and not return anything for a
CheckNewAnswer(("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1 and
CheckNewAnswer(("a", "1"), ("c", "1")),
assertNumStateRows(total = 3, updated = 2)
)
}
test("flatMapGroupsWithState - streaming + func returns iterator that updates state lazily") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count if state is defined, otherwise does not return anything
// Additionally, it updates state lazily as the returned iterator get consumed
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
values.flatMap { _ =>
val count = state.getOption.map(_.count).getOrElse(0L) + 1
if (count == 3) {
state.remove()
None
} else {
state.update(RunningCount(count))
Some((key, count.toString))
}
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc)
testStream(result, Update)(
AddData(inputData, "a", "a", "b"),
CheckNewAnswer(("a", "1"), ("a", "2"), ("b", "1")),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and not return anything for a
CheckNewAnswer(("b", "2")),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1 and
CheckNewAnswer(("a", "1"), ("c", "1"))
)
}
testWithAllStateVersions("flatMapGroupsWithState - streaming + aggregation") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
Iterator(key -> "-1")
} else {
state.update(RunningCount(count))
Iterator(key -> count.toString)
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Append, GroupStateTimeout.NoTimeout)(stateFunc)
.groupByKey(_._1)
.count()
testStream(result, Complete)(
AddData(inputData, "a"),
CheckNewAnswer(("a", 1)),
AddData(inputData, "a", "b"),
// mapGroups generates ("a", "2"), ("b", "1"); so increases counts of a and b by 1
CheckNewAnswer(("a", 2), ("b", 1)),
StopStream,
StartStream(),
AddData(inputData, "a", "b"),
// mapGroups should remove state for "a" and generate ("a", "-1"), ("b", "2") ;
// so increment a and b by 1
CheckNewAnswer(("a", 3), ("b", 2)),
StopStream,
StartStream(),
AddData(inputData, "a", "c"),
// mapGroups should recreate state for "a" and generate ("a", "1"), ("c", "1") ;
// so increment a and c by 1
CheckNewAnswer(("a", 4), ("b", 2), ("c", 1))
)
}
test("flatMapGroupsWithState - batch") {
// Function that returns running count only if its even, otherwise does not return
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() > 0 }
assertCannotGetWatermark { state.getCurrentWatermarkMs() }
if (state.exists) throw new IllegalArgumentException("state.exists should be false")
Iterator((key, values.size))
}
val df = Seq("a", "a", "b").toDS
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc).toDF
checkAnswer(df, Seq(("a", 2), ("b", 1)).toDF)
}
ignore("flatMapGroupsWithState - streaming with processing time timeout") {
// Function to maintain the count as state and set the proc. time timeout delay of 10 seconds.
// It returns the count if changed, or -1 if the state was removed by timeout.
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() >= 0 }
assertCannotGetWatermark { state.getCurrentWatermarkMs() }
if (state.hasTimedOut) {
state.remove()
Iterator((key, "-1"))
} else {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
state.setTimeoutDuration("10 seconds")
Iterator((key, count.toString))
}
}
val clock = new StreamManualClock
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, ProcessingTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, "a"),
AdvanceManualClock(1 * 1000),
CheckNewAnswer(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "b"),
AdvanceManualClock(1 * 1000),
CheckNewAnswer(("b", "1")),
assertNumStateRows(total = 2, updated = 1),
AddData(inputData, "b"),
AdvanceManualClock(10 * 1000),
CheckNewAnswer(("a", "-1"), ("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, "c"),
AdvanceManualClock(11 * 1000),
CheckNewAnswer(("b", "-1"), ("c", "1")),
assertNumStateRows(total = 1, updated = 2),
AdvanceManualClock(12 * 1000),
AssertOnQuery { _ => clock.getTimeMillis() == 35000 },
Execute { q =>
failAfter(streamingTimeout) {
while (q.lastProgress.timestamp != "1970-01-01T00:00:35.000Z") {
Thread.sleep(1)
}
}
},
CheckNewAnswer(("c", "-1")),
assertNumStateRows(total = 0, updated = 1)
)
}
ignore("flatMapGroupsWithState - streaming w/ event time timeout + watermark") {
// Function to maintain the max event time as state and set the timeout timestamp based on the
// current max event time seen. It returns the max event time in the state, or -1 if the state
// was removed by timeout.
val stateFunc = (key: String, values: Iterator[(String, Long)], state: GroupState[Long]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() >= 0 }
assertCanGetWatermark { state.getCurrentWatermarkMs() >= -1 }
val timeoutDelaySec = 5
if (state.hasTimedOut) {
state.remove()
Iterator((key, -1))
} else {
val valuesSeq = values.toSeq
val maxEventTimeSec = math.max(valuesSeq.map(_._2).max, state.getOption.getOrElse(0L))
val timeoutTimestampSec = maxEventTimeSec + timeoutDelaySec
state.update(maxEventTimeSec)
state.setTimeoutTimestamp(timeoutTimestampSec * 1000)
Iterator((key, maxEventTimeSec.toInt))
}
}
val inputData = MemoryStream[(String, Int)]
val result =
inputData.toDS
.select($"_1".as("key"), $"_2".cast("timestamp").as("eventTime"))
.withWatermark("eventTime", "10 seconds")
.as[(String, Long)]
.groupByKey(_._1)
.flatMapGroupsWithState(Update, EventTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(),
AddData(inputData, ("a", 11), ("a", 13), ("a", 15)),
// Max event time = 15. Timeout timestamp for "a" = 15 + 5 = 20. Watermark = 15 - 10 = 5.
CheckNewAnswer(("a", 15)), // Output = max event time of a
AddData(inputData, ("a", 4)), // Add data older than watermark for "a"
CheckNewAnswer(), // No output as data should get filtered by watermark
AddData(inputData, ("a", 10)), // Add data newer than watermark for "a"
CheckNewAnswer(("a", 15)), // Max event time is still the same
// Timeout timestamp for "a" is still 20 as max event time for "a" is still 15.
// Watermark is still 5 as max event time for all data is still 15.
AddData(inputData, ("b", 31)), // Add data newer than watermark for "b", not "a"
// Watermark = 31 - 10 = 21, so "a" should be timed out as timeout timestamp for "a" is 20.
CheckNewAnswer(("a", -1), ("b", 31)) // State for "a" should timeout and emit -1
)
}
test("flatMapGroupsWithState - uses state format version 2 by default") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
Iterator((key, count.toString))
}
val inputData = MemoryStream[String]
val result = inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc)
testStream(result, Update)(
AddData(inputData, "a"),
CheckNewAnswer(("a", "1")),
Execute { query =>
// Verify state format = 2
val f = query.lastExecution.executedPlan.collect { case f: FlatMapGroupsWithStateExec => f }
assert(f.size == 1)
assert(f.head.stateFormatVersion == 2)
}
)
}
ignore("flatMapGroupsWithState - recovery from checkpoint uses state format version 1") {
// Function to maintain the max event time as state and set the timeout timestamp based on the
// current max event time seen. It returns the max event time in the state, or -1 if the state
// was removed by timeout.
val stateFunc = (key: String, values: Iterator[(String, Long)], state: GroupState[Long]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() >= 0 }
assertCanGetWatermark { state.getCurrentWatermarkMs() >= -1 }
val timeoutDelaySec = 5
if (state.hasTimedOut) {
state.remove()
Iterator((key, -1))
} else {
val valuesSeq = values.toSeq
val maxEventTimeSec = math.max(valuesSeq.map(_._2).max, state.getOption.getOrElse(0L))
val timeoutTimestampSec = maxEventTimeSec + timeoutDelaySec
state.update(maxEventTimeSec)
state.setTimeoutTimestamp(timeoutTimestampSec * 1000)
Iterator((key, maxEventTimeSec.toInt))
}
}
val inputData = MemoryStream[(String, Int)]
val result =
inputData.toDS
.select($"_1".as("key"), $"_2".cast("timestamp").as("eventTime"))
.withWatermark("eventTime", "10 seconds")
.as[(String, Long)]
.groupByKey(_._1)
.flatMapGroupsWithState(Update, EventTimeTimeout)(stateFunc)
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.3.1-flatMapGroupsWithState-state-format-1/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
inputData.addData(("a", 11), ("a", 13), ("a", 15))
inputData.addData(("a", 4))
testStream(result, Update)(
StartStream(
checkpointLocation = checkpointDir.getAbsolutePath,
additionalConfs = Map(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION.key -> "2")),
/*
Note: The checkpoint was generated using the following input in Spark version 2.3.1
AddData(inputData, ("a", 11), ("a", 13), ("a", 15)),
// Max event time = 15. Timeout timestamp for "a" = 15 + 5 = 20. Watermark = 15 - 10 = 5.
CheckNewAnswer(("a", 15)), // Output = max event time of a
AddData(inputData, ("a", 4)), // Add data older than watermark for "a"
CheckNewAnswer(), // No output as data should get filtered by watermark
*/
AddData(inputData, ("a", 10)), // Add data newer than watermark for "a"
CheckNewAnswer(("a", 15)), // Max event time is still the same
// Timeout timestamp for "a" is still 20 as max event time for "a" is still 15.
// Watermark is still 5 as max event time for all data is still 15.
Execute { query =>
// Verify state format = 1
val f = query.lastExecution.executedPlan.collect { case f: FlatMapGroupsWithStateExec => f }
assert(f.size == 1)
assert(f.head.stateFormatVersion == 1)
},
AddData(inputData, ("b", 31)), // Add data newer than watermark for "b", not "a"
// Watermark = 31 - 10 = 21, so "a" should be timed out as timeout timestamp for "a" is 20.
CheckNewAnswer(("a", -1), ("b", 31)) // State for "a" should timeout and emit -1
)
}
test("mapGroupsWithState - streaming") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() >= 0 }
assertCannotGetWatermark { state.getCurrentWatermarkMs() }
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
(key, "-1")
} else {
state.update(RunningCount(count))
(key, count.toString)
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.mapGroupsWithState(stateFunc) // Types = State: MyState, Out: (Str, Str)
testStream(result, Update)(
AddData(inputData, "a"),
CheckNewAnswer(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "a", "b"),
CheckNewAnswer(("a", "2"), ("b", "1")),
assertNumStateRows(total = 2, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and return count as -1
CheckNewAnswer(("a", "-1"), ("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1
CheckNewAnswer(("a", "1"), ("c", "1")),
assertNumStateRows(total = 3, updated = 2)
)
}
test("mapGroupsWithState - batch") {
// Test the following
// - no initial state
// - timeouts operations work, does not throw any error [SPARK-20792]
// - works with primitive state type
// - can get processing time
val stateFunc = (key: String, values: Iterator[String], state: GroupState[Int]) => {
assertCanGetProcessingTime { state.getCurrentProcessingTimeMs() > 0 }
assertCannotGetWatermark { state.getCurrentWatermarkMs() }
if (state.exists) throw new IllegalArgumentException("state.exists should be false")
state.setTimeoutTimestamp(0, "1 hour")
state.update(10)
(key, values.size)
}
checkAnswer(
spark.createDataset(Seq("a", "a", "b"))
.groupByKey(x => x)
.mapGroupsWithState(EventTimeTimeout)(stateFunc)
.toDF,
spark.createDataset(Seq(("a", 2), ("b", 1))).toDF)
}
testQuietly("StateStore.abort on task failure handling") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
if (FlatMapGroupsWithStateSuite.failInTask) throw new Exception("expected failure")
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
(key, count)
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.mapGroupsWithState(stateFunc) // Types = State: MyState, Out: (Str, Str)
def setFailInTask(value: Boolean): AssertOnQuery = AssertOnQuery { q =>
FlatMapGroupsWithStateSuite.failInTask = value
true
}
testStream(result, Update)(
setFailInTask(false),
AddData(inputData, "a"),
CheckNewAnswer(("a", 1L)),
AddData(inputData, "a"),
CheckNewAnswer(("a", 2L)),
setFailInTask(true),
AddData(inputData, "a"),
ExpectFailure[SparkException](), // task should fail but should not increment count
setFailInTask(false),
StartStream(),
CheckNewAnswer(("a", 3L)) // task should not fail, and should show correct count
)
}
test("output partitioning is unknown") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => key
val inputData = MemoryStream[String]
val result = inputData.toDS.groupByKey(x => x).mapGroupsWithState(stateFunc)
testStream(result, Update)(
AddData(inputData, "a"),
CheckNewAnswer("a"),
AssertOnQuery(_.lastExecution.executedPlan.outputPartitioning === UnknownPartitioning(0))
)
}
test("disallow complete mode") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[Int]) => {
Iterator[String]()
}
var e = intercept[IllegalArgumentException] {
MemoryStream[String].toDS().groupByKey(x => x).flatMapGroupsWithState(
OutputMode.Complete, GroupStateTimeout.NoTimeout)(stateFunc)
}
assert(e.getMessage === "The output mode of function should be append or update")
val javaStateFunc = new FlatMapGroupsWithStateFunction[String, String, Int, String] {
import java.util.{Iterator => JIterator}
override def call(
key: String,
values: JIterator[String],
state: GroupState[Int]): JIterator[String] = { null }
}
e = intercept[IllegalArgumentException] {
MemoryStream[String].toDS().groupByKey(x => x).flatMapGroupsWithState(
javaStateFunc, OutputMode.Complete,
implicitly[Encoder[Int]], implicitly[Encoder[String]], GroupStateTimeout.NoTimeout)
}
assert(e.getMessage === "The output mode of function should be append or update")
}
def testWithTimeout(timeoutConf: GroupStateTimeout): Unit = {
ignore("SPARK-20714: watermark does not fail query when timeout = " + timeoutConf) {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc =
(key: String, values: Iterator[(String, Long)], state: GroupState[RunningCount]) => {
if (state.hasTimedOut) {
state.remove()
Iterator((key, "-1"))
} else {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
state.setTimeoutDuration("10 seconds")
Iterator((key, count.toString))
}
}
val clock = new StreamManualClock
val inputData = MemoryStream[(String, Long)]
val result =
inputData.toDF().toDF("key", "time")
.selectExpr("key", "cast(time as timestamp) as timestamp")
.withWatermark("timestamp", "10 second")
.as[(String, Long)]
.groupByKey(x => x._1)
.flatMapGroupsWithState(Update, ProcessingTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, ("a", 1L)),
AdvanceManualClock(1 * 1000),
CheckNewAnswer(("a", "1"))
)
}
}
testWithTimeout(NoTimeout)
testWithTimeout(ProcessingTimeTimeout)
def testStateUpdateWithData(
testName: String,
stateUpdates: GroupState[Int] => Unit,
timeoutConf: GroupStateTimeout,
priorState: Option[Int],
priorTimeoutTimestamp: Long = NO_TIMESTAMP,
expectedState: Option[Int] = None,
expectedTimeoutTimestamp: Long = NO_TIMESTAMP,
expectedException: Class[_ <: Exception] = null): Unit = {
if (priorState.isEmpty && priorTimeoutTimestamp != NO_TIMESTAMP) {
return // there can be no prior timestamp, when there is no prior state
}
test(s"InputProcessor - process new data - $testName") {
val mapGroupsFunc = (key: Int, values: Iterator[Int], state: GroupState[Int]) => {
assert(state.hasTimedOut === false, "hasTimedOut not false")
assert(values.nonEmpty, "Some value is expected")
stateUpdates(state)
Iterator.empty
}
testStateUpdate(
testTimeoutUpdates = false, mapGroupsFunc, timeoutConf,
priorState, priorTimeoutTimestamp,
expectedState, expectedTimeoutTimestamp, expectedException)
}
}
def testStateUpdateWithTimeout(
testName: String,
stateUpdates: GroupState[Int] => Unit,
timeoutConf: GroupStateTimeout,
priorTimeoutTimestamp: Long,
expectedState: Option[Int],
expectedTimeoutTimestamp: Long = NO_TIMESTAMP): Unit = {
test(s"InputProcessor - process timed out state - $testName") {
val mapGroupsFunc = (key: Int, values: Iterator[Int], state: GroupState[Int]) => {
assert(state.hasTimedOut, "hasTimedOut not true")
assert(values.isEmpty, "values not empty")
stateUpdates(state)
Iterator.empty
}
testStateUpdate(
testTimeoutUpdates = true, mapGroupsFunc, timeoutConf = timeoutConf,
preTimeoutState, priorTimeoutTimestamp, expectedState, expectedTimeoutTimestamp, null)
}
}
def testStateUpdate(
testTimeoutUpdates: Boolean,
mapGroupsFunc: (Int, Iterator[Int], GroupState[Int]) => Iterator[Int],
timeoutConf: GroupStateTimeout,
priorState: Option[Int],
priorTimeoutTimestamp: Long,
expectedState: Option[Int],
expectedTimeoutTimestamp: Long,
expectedException: Class[_ <: Exception]): Unit = {
val store = newStateStore()
val mapGroupsSparkPlan = newFlatMapGroupsWithStateExec(
mapGroupsFunc, timeoutConf, currentBatchTimestamp)
val inputProcessor = new mapGroupsSparkPlan.InputProcessor(store)
val stateManager = mapGroupsSparkPlan.stateManager
val key = intToRow(0)
// Prepare store with prior state configs
if (priorState.nonEmpty || priorTimeoutTimestamp != NO_TIMESTAMP) {
stateManager.putState(store, key, priorState.orNull, priorTimeoutTimestamp)
}
// Call updating function to update state store
def callFunction() = {
val returnedIter = if (testTimeoutUpdates) {
inputProcessor.processTimedOutState()
} else {
inputProcessor.processNewData(Iterator(key))
}
returnedIter.size // consume the iterator to force state updates
}
if (expectedException != null) {
// Call function and verify the exception type
val e = intercept[Exception] { callFunction() }
assert(e.getClass === expectedException, "Exception thrown but of the wrong type")
} else {
// Call function to update and verify updated state in store
callFunction()
val updatedState = stateManager.getState(store, key)
assert(Option(updatedState.stateObj).map(_.toString.toInt) === expectedState,
"final state not as expected")
assert(updatedState.timeoutTimestamp === expectedTimeoutTimestamp,
"final timeout timestamp not as expected")
}
}
def newFlatMapGroupsWithStateExec(
func: (Int, Iterator[Int], GroupState[Int]) => Iterator[Int],
timeoutType: GroupStateTimeout = GroupStateTimeout.NoTimeout,
batchTimestampMs: Long = NO_TIMESTAMP): FlatMapGroupsWithStateExec = {
val stateFormatVersion = spark.conf.get(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION)
val emptyRdd = spark.sparkContext.emptyRDD[InternalRow]
MemoryStream[Int]
.toDS
.groupByKey(x => x)
.flatMapGroupsWithState[Int, Int](Append, timeoutConf = timeoutType)(func)
.logicalPlan.collectFirst {
case FlatMapGroupsWithState(f, k, v, g, d, o, s, m, _, t, _) =>
FlatMapGroupsWithStateExec(
f, k, v, g, d, o, None, s, stateFormatVersion, m, t,
Some(currentBatchTimestamp), Some(currentBatchWatermark),
RDDScanExec(g, emptyRdd, "rdd"))
}.get
}
def testTimeoutDurationNotAllowed[T <: Exception: Manifest](state: GroupStateImpl[_]): Unit = {
val prevTimestamp = state.getTimeoutTimestamp
intercept[T] { state.setTimeoutDuration(1000) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutDuration("2 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
}
def testTimeoutTimestampNotAllowed[T <: Exception: Manifest](state: GroupStateImpl[_]): Unit = {
val prevTimestamp = state.getTimeoutTimestamp
intercept[T] { state.setTimeoutTimestamp(2000) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(2000, "1 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(new Date(2000)) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(new Date(2000), "1 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
}
def newStateStore(): StateStore = new MemoryStateStore()
val intProj = UnsafeProjection.create(Array[DataType](IntegerType))
def intToRow(i: Int): UnsafeRow = {
intProj.apply(new GenericInternalRow(Array[Any](i))).copy()
}
def rowToInt(row: UnsafeRow): Int = row.getInt(0)
def testWithAllStateVersions(name: String)(func: => Unit): Unit = {
for (version <- FlatMapGroupsWithStateExecHelper.supportedVersions) {
test(s"$name - state format version $version") {
withSQLConf(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION.key -> version.toString) {
func
}
}
}
}
}
object FlatMapGroupsWithStateSuite {
var failInTask = true
def assertCanGetProcessingTime(predicate: => Boolean): Unit = {
if (!predicate) throw new TestFailedException("Could not get processing time", 20)
}
def assertCanGetWatermark(predicate: => Boolean): Unit = {
if (!predicate) throw new TestFailedException("Could not get processing time", 20)
}
def assertCannotGetWatermark(func: => Unit): Unit = {
try {
func
} catch {
case u: UnsupportedOperationException =>
return
case _: Throwable =>
throw new TestFailedException("Unexpected exception when trying to get watermark", 20)
}
throw new TestFailedException("Could get watermark when not expected", 20)
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala | Scala | apache-2.0 | 53,966 |
package auctionsniper.ui
import javax.swing.SwingUtilities
import auctionsniper.{SniperListener, SniperSnapshot}
class SwingThreadSniperListener(delegate: SniperListener) extends SniperListener {
def sniperStateChanged(snapshot: SniperSnapshot) {
SwingUtilities.invokeLater(new Runnable() {
def run() {
delegate.sniperStateChanged(snapshot)
}
})
}
}
| sptz45/goos-scala | src/auctionsniper/ui/SwingThreadSniperListener.scala | Scala | apache-2.0 | 384 |
package org.jetbrains.plugins.scala.codeInspection.methodSignature
import com.intellij.codeInspection._
import org.jetbrains.plugins.scala.codeInspection.methodSignature.quickfix.AddEmptyParentheses
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
/**
* Pavel Fatin
*/
class UnitMethodIsParameterlessInspection extends AbstractMethodSignatureInspection(
"ScalaUnitMethodIsParameterless", "Method with Unit result type is parameterless") {
def actionFor(holder: ProblemsHolder) = {
case f: ScFunction if f.isParameterless && f.hasUnitResultType && f.superMethods.isEmpty =>
holder.registerProblem(f.nameId, getDisplayName, new AddEmptyParentheses(f))
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/methodSignature/UnitMethodIsParameterlessInspection.scala | Scala | apache-2.0 | 696 |
package japgolly.scalajs.react.component
import japgolly.scalajs.react.CtorType
import japgolly.scalajs.react.internal.{Box, Lens}
import japgolly.scalajs.react.util.DefaultEffects.{Async => DefaultA, Sync => DefaultS}
import japgolly.scalajs.react.util.Effect.{Async, Dispatch, Id, UnsafeSync}
import scala.scalajs.js
object Scala {
type Component[P, S, B, CT[-p, +u] <: CtorType[p, u]] =
Js.ComponentWithRoot[
P, CT, Unmounted[P, S, B],
Box[P], CT, JsUnmounted[P, S, B]]
type Unmounted [P, S, B] = Js.UnmountedWithRoot[P, MountedImpure[P, S, B], Box[P], JsMounted[P, S, B]]
type Mounted[F[_], A[_], P, S, B] = MountedRoot[F, A, P, S, B]
type MountedImpure [P, S, B] = Mounted[Id, DefaultA, P, S, B]
type MountedPure [P, S, B] = Mounted[DefaultS, DefaultA, P, S, B]
type BackendScope [P, S] = Generic.MountedRoot[DefaultS, DefaultA, P, S]
type JsComponent[P, S, B, CT[-p, +u] <: CtorType[p, u]] = Js.ComponentWithFacade[Box[P], Box[S], Vars[P, S, B], CT]
type JsUnmounted[P, S, B] = Js.UnmountedWithFacade[Box[P], Box[S], Vars[P, S, B]]
type JsMounted [P, S, B] = Js.MountedWithFacade [Box[P], Box[S], Vars[P, S, B]]
type RawMounted[P, S, B] = Js.RawMounted[Box[P], Box[S]] with Vars[P, S, B]
@js.native
trait Vars[P, S, B] extends js.Object {
var mountedImpure: MountedImpure[P, S, B]
var mountedPure : MountedPure[P, S, B]
var backend : B
}
// private[this] def sanityCheckCU[P, S, B](c: Component[P, S, B, CtorType.Void]): Unmounted[P, S, B] = c.ctor()
// private[this] def sanityCheckUM[P, S, B](u: Unmounted[P, S, B]): Mounted[P, S, B] = u.renderIntoDOM(null)
// ===================================================================================================================
sealed trait MountedSimple[F[_], A[_], P, S, B] extends Generic.MountedSimple[F, A, P, S] {
override type WithEffect[F2[_]] <: MountedSimple[F2, A, P, S, B]
override type WithAsyncEffect[A2[_]] <: MountedSimple[F, A2, P, S, B]
override type WithMappedProps[P2] <: MountedSimple[F, A, P2, S, B]
override type WithMappedState[S2] <: MountedSimple[F, A, P, S2, B]
// B instead of F[B] because
// 1. Builder takes a MountedPure but needs immediate access to this.
// 2. It never changes once initialised.
// Note: Keep this is def instead of val because the builder sets it after creation.
def backend: B
}
sealed trait MountedWithRoot[F[_], A[_], P1, S1, B, P0, S0]
extends MountedSimple[F, A, P1, S1, B] with Generic.MountedWithRoot[F, A, P1, S1, P0, S0] {
override final type Root = MountedRoot[F, A, P0, S0, B]
override final type WithEffect[F2[_]] = MountedWithRoot[F2, A, P1, S1, B, P0, S0]
override final type WithAsyncEffect[A2[_]] = MountedWithRoot[F, A2, P1, S1, B, P0, S0]
override final type WithMappedProps[P2] = MountedWithRoot[F, A, P2, S1, B, P0, S0]
override final type WithMappedState[S2] = MountedWithRoot[F, A, P1, S2, B, P0, S0]
override final type Raw = RawMounted[P0, S0, B]
val js: JsMounted[P0, S0, B]
override final def displayName = js.displayName
override final def backend = js.raw.backend
}
type MountedRoot[F[_], A[_], P, S, B] = MountedWithRoot[F, A, P, S, B, P, S]
def mountedRoot[P, S, B](x: JsMounted[P, S, B]): MountedRoot[Id, DefaultA, P, S, B] =
new Template.MountedWithRoot[Id, DefaultA, P, S]()(UnsafeSync.id, DefaultA)
with MountedRoot[Id, DefaultA, P, S, B] {
override implicit def F = UnsafeSync.id
override implicit def A = DefaultA
override def root = this
override val js = x
override val raw = x.raw
override def props = x.props.unbox
override def propsChildren = x.propsChildren
override def state = x.state.unbox
override def getDOMNode = x.getDOMNode
override def setState[G[_]](newState: S, callback: => G[Unit])(implicit G: Dispatch[G]) =
x.setState(Box(newState), callback)
override def modState[G[_]](mod: S => S, callback: => G[Unit])(implicit G: Dispatch[G]) =
x.modState(s => Box(mod(s.unbox)), callback)
override def modState[G[_]](mod: (S, P) => S, callback: => G[Unit])(implicit G: Dispatch[G]) =
x.modState((s, p) => Box(mod(s.unbox, p.unbox)), callback)
override def setStateOption[G[_]](o: Option[S], callback: => G[Unit])(implicit G: Dispatch[G]) =
x.setStateOption(o.map(Box.apply), callback)
override def modStateOption[G[_]](mod: S => Option[S], callback: => G[Unit])(implicit G: Dispatch[G]) =
x.modStateOption(s => mod(s.unbox).map(Box.apply), callback)
override def modStateOption[G[_]](mod: (S, P) => Option[S], callback: => G[Unit])(implicit G: Dispatch[G]) =
x.modStateOption((s, p) => mod(s.unbox, p.unbox).map(Box.apply), callback)
override def forceUpdate[G[_]](callback: => G[Unit])(implicit G: Dispatch[G]) =
x.forceUpdate(callback)
override type Mapped[F1[_], A1[_], P1, S1] = MountedWithRoot[F1, A1, P1, S1, B, P, S]
override def mapped[F1[_], A1[_], P1, S1](mp: P => P1, ls: Lens[S, S1])
(implicit ft: UnsafeSync[F1], at: Async[A1]) =
mappedM(this)(mp, ls)
}
private def mappedM[F[_], A[_], P2, S2, P1, S1, B, P0, S0]
(from: MountedWithRoot[Id, DefaultA, P1, S1, B, P0, S0])
(mp: P1 => P2, ls: Lens[S1, S2])
(implicit ft: UnsafeSync[F], at: Async[A]): MountedWithRoot[F, A, P2, S2, B, P0, S0] =
new Template.MountedMapped[F, A, P2, S2, P1, S1, P0, S0](from)(mp, ls) with MountedWithRoot[F, A, P2, S2, B, P0, S0] {
override def root = from.root.withEffect(ft).withAsyncEffect(at)
override val js = from.js
override val raw = from.raw
override type Mapped[F3[_], A3[_], P3, S3] = MountedWithRoot[F3, A3, P3, S3, B, P0, S0]
override def mapped[F3[_], A3[_], P3, S3](mp: P1 => P3, ls: Lens[S1, S3])
(implicit ft: UnsafeSync[F3], at: Async[A3]) =
mappedM(from)(mp, ls)(ft, at)
}
def mountRaw[P, S, B](x: RawMounted[P, S, B]): MountedImpure[P, S, B] =
mountedRoot(Js.mountedRoot(x))
}
| japgolly/scalajs-react | coreGeneric/src/main/scala/japgolly/scalajs/react/component/Scala.scala | Scala | apache-2.0 | 6,347 |
package hexico.meeple.game
import scala.language.implicitConversions
object TilesetHelpers {
def Grass: TileGrass = TileGrass()
implicit def toTileFeature(f: Feature): TileFeature = TileFeature(f)
implicit class RichInt(i: Int) {
def of(t: Tile): Vector[Tile] = Vector.fill(i)(t)
}
implicit def autoSingleton[T <: Enumeration](v: T#Value): Set[T#Value] = Set(v)
}
| alanbriolat/meeple | src/main/scala/hexico/meeple/game/TilesetHelpers.scala | Scala | gpl-3.0 | 383 |
package me.heaton.concurrency
import scala.concurrent.duration._
import scala.concurrent.{Promise, Await, Future, ExecutionContext}
import scala.util.{Failure, Success}
object Futures extends App{
implicit val ec = ExecutionContext.global
Future(println("Do something later")) onComplete {
case Success(_) => println("Do something success")
case Failure(t) => println(s"Do something failed ${t.getMessage}")
}
Future {
2 / 0
} onFailure {
case npe: NullPointerException =>
println("I'd be amazed if this printed out.")
case _ => println("Oh NO!")
}
val v = Await.result(Future {
2 / 0
} recover {
case _ => 0
}, Duration.Zero)
println(s"v is $v")
val usd = Future { 100 }
val cnyRate = Future { 6.4 }
val purchase = for {
u <- usd
r <- cnyRate
} yield u * r
purchase onSuccess {
case amount => println("Purchased " + amount + " CNY")
}
val f = Future { sys.error("failed") }
val g = Future { 5 }
val h = f fallbackTo g
val v2 = Await.result(h, Duration.Zero)
println(s"v2 is $v2")
val f2 = Future(List()) andThen {
case Success(l) => 1 :: l
} map {
l => 2 :: l
}
val r = Await.result(f2, 0 millis)
println(r)
val f3 = Future { 1 }
val p = Promise[Int]()
p completeWith f3
p.future onSuccess {
case x => println(x)
}
/**
* If neither f nor g succeeds, then first(f, g) never completes
*/
def first[T](f: Future[T], g: Future[T]): Future[T] = {
val p = Promise[T]()
f onSuccess {
case x => p trySuccess x
}
g onSuccess {
case x => p trySuccess x
}
p.future
}
}
| heaton/hello-scala | src/main/scala/me/heaton/concurrency/Futures.scala | Scala | mit | 1,641 |
package com.roundeights.shnappy
import scala.concurrent._
/**
* Represents a data interface for a specific site
*/
trait SiteData {
/** Returns the SiteInfo object */
def siteInfo: SiteInfo
/** Returns a page */
def getPage ( slug: String ): Future[Option[Page]]
/** Returns the index */
def getIndex: Future[Option[Page]]
/** Returns the list of navigation links */
protected def getRawNavLinks: Future[Seq[NavLink]]
/** Returns the list of navigation links */
def getNavLinks( implicit ctx: ExecutionContext ): Future[Seq[NavLink]] = {
val index = getIndex.map( _.flatMap( _.navLink ) )
getRawNavLinks.flatMap( links => index.map({
case None => links
case Some(indexLink) => links.map( link =>
if (link == indexLink) link.withURL("/") else link
)
}))
}
}
/**
* Caches the results of a wrapped SiteData instance
*/
class SiteDataCache
( private val inner: SiteData )
( implicit val ctx: ExecutionContext )
extends SiteData {
/** A cache of page data by slug */
private val pages = new LazyMap[String, Option[Page]]
/** The index */
private val index = new LazyRef[Option[Page]]
/** Nav links */
private val navLinks = new LazyRef[Seq[NavLink]]
/** {@inheritDoc} */
override def siteInfo: SiteInfo = inner.siteInfo
/** {@inheritDoc} */
override def getPage ( slug: String )
= pages.get( slug, () => inner.getPage(slug) )
/** {@inheritDoc} */
override def getIndex = index( inner.getIndex )
/** {@inheritDoc} */
override protected def getRawNavLinks = navLinks( inner.getNavLinks )
}
| Nycto/Shnappy | src/main/scala/SiteData.scala | Scala | mit | 1,698 |
package com.ing.baker.runtime.akka.actor.serialization
import akka.serialization.{Serializer, SerializerWithStringManifest}
import com.google.protobuf.ByteString
import com.ing.baker.runtime.akka.actor.protobuf
import com.ing.baker.runtime.serialization.ProtoMap.versioned
import com.ing.baker.runtime.serialization.ProtoMap
object SerializedDataProto {
implicit def akkaAnyRefMapping(implicit provider: AkkaSerializerProvider): ProtoMap[AnyRef, protobuf.SerializedData] = {
import scala.util.{Failure, Success, Try}
new ProtoMap[AnyRef, protobuf.SerializedData] {
val companion = protobuf.SerializedData
override def toProto(obj: AnyRef): protobuf.SerializedData = {
val serializer: Serializer = provider.getSerializerFor(obj)
val bytes = provider.encryption.encrypt(serializer.toBinary(obj))
val manifest = serializer match {
case s: SerializerWithStringManifest => s.manifest(obj)
case _ => if (obj != null) obj.getClass.getName else ""
}
protobuf.SerializedData(
serializerId = Some(serializer.identifier),
manifest = Some(manifest),
data = Some(ByteString.copyFrom(bytes))
)
}
override def fromProto(message: protobuf.SerializedData): Try[AnyRef] =
for {
serializerId <- versioned(message.serializerId, "serializerId")
manifest <- versioned(message.manifest, "manifest")
bytes <- versioned(message.data, "data")
serializer <- provider.serializerByIdentity(serializerId) match {
case Some(serializer) => Success(serializer)
case None => Failure(new IllegalStateException(s"No serializer found with id $serializerId"))
}
decryptedBytes = provider.encryption.decrypt(bytes.toByteArray)
} yield
serializer match {
case s: SerializerWithStringManifest => s.fromBinary(decryptedBytes, manifest)
case _ =>
val optionalClass = Try {
Class.forName(manifest)
}.toOption
serializer.fromBinary(decryptedBytes, optionalClass)
}
}
}
}
| ing-bank/baker | core/akka-runtime/src/main/scala/com/ing/baker/runtime/akka/actor/serialization/SerializedDataProto.scala | Scala | mit | 2,174 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.tutorial
import cc.factorie.directed.{Discrete, _}
import cc.factorie.infer.Maximize
import cc.factorie.variable.{DenseProportions1, DiscreteDomain, DiscreteVariable, ProportionsVariable}
/** A simple example of generating data by rolling a die, then re-estimating the parameters of the die from that data. */
object MultinomialDemo {
val numSides = 6
object RollDomain extends DiscreteDomain(numSides)
class Roll extends DiscreteVariable { def domain = RollDomain }
implicit val model = DirectedModel() // ItemizedDirectedModel
def main(args:Array[String]) : Unit = {
implicit val random = new scala.util.Random(0)
val die = new ProportionsVariable(new DenseProportions1(Array(.1, .2, .3, .2, .15, .05)))
// println("True distribution "+die)
val rolls = for (i <- 1 to 1000) yield new Roll :~ Discrete(die)
Maximize(die)
}
}
| melisabok/factorie | src/main/scala/cc/factorie/tutorial/MultinomialDemo.scala | Scala | apache-2.0 | 1,632 |
package edison.search.tree
import edison.search.{ Sample, Samples, Value }
import scala.util.Random
/** UCT/MCTS Search Tree (base trait) */
trait Tree {
def samples: Samples
def children: List[Tree]
/** @return may return unchanged tree if split was not possible */
def split: Tree
def generateSampleAt(range: Double): Value
def contains(value: Value): Boolean
def updated(samples: Samples): Tree
def withChildren(children: List[Tree]): Tree
def addSample(sample: Sample): Tree = {
assert(contains(sample.value), s"$this cannot contain $sample")
updated(samples = samples.add(sample))
}
def generateSample: Value = generateSampleAt(Random.nextFloat())
}
/**
* Search domain subspace.
*
* It's used only for pattern matching.
*/
trait Subspace
/** A Tree with at least one child */
object Node {
def unapplySeq(tree: Tree): Option[(Subspace, List[Tree])] = {
tree match {
case IntegerTree(range, children, _) if children.nonEmpty => Some((IRange(range), children))
case _ => None
}
}
}
/** A Tree leaf */
object Leaf {
def unapply(tree: Tree): Option[Subspace] = {
tree match {
case IntegerTree(range, Nil, _) => Some(IRange(range))
case _ => None
}
}
}
/** A Node or a Leaf */
object Tree {
def unapplySeq(tree: Tree): Option[(Subspace, Seq[Tree])] = {
tree match {
case Node(subspace, children @ _*) => Some(subspace, children)
case Leaf(subspace) => Some(subspace, List.empty)
}
}
}
| pawel-wiejacha/edison | core/src/main/scala/edison/search/tree/Tree.scala | Scala | mit | 1,502 |
package objektwerks.cats.effect
import cats.effect.{IO, Resource}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import scala.io.Source
class ResourceTest extends AnyFunSuite with Matchers {
test("auto closeable") {
val file = IO { Source.fromFile("build.sbt") }
Resource
.fromAutoCloseable(file)
.use(source => IO( source.mkString.nonEmpty shouldBe true) )
.unsafeRunSync()
}
test("bracket") {
IO( Source.fromFile("build.sbt") ).bracket {
file => IO( file.mkString.nonEmpty shouldBe true )
} {
file => IO( file.close() )
}
}
} | objektwerks/typelevel | src/test/scala/objektwerks/cats/effect/ResourceTest.scala | Scala | apache-2.0 | 631 |
package org.jetbrains.plugins.scala.codeInsight.hints
import java.util
import com.intellij.codeInsight.hints.ImmediateConfigurable
import com.intellij.codeInsight.hints.settings.InlayProviderSettingsModel
import com.intellij.lang.Language
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.progress.DumbProgressIndicator
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiFile
import javax.swing.JComponent
import kotlin.Unit.{INSTANCE => kUnit}
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.codeInsight.implicits.{ImplicitHints, ImplicitHintsPass}
import org.jetbrains.plugins.scala.codeInsight.{ScalaCodeInsightBundle, ScalaCodeInsightSettings, hints}
import org.jetbrains.plugins.scala.extensions.StringExt
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import scala.jdk.CollectionConverters._
//noinspection UnstableApiUsage
class ScalaTypeHintsSettingsModel(project: Project) extends InlayProviderSettingsModel(
true,
"Scala.ScalaTypeHintsSettingsModel",
ScalaLanguage.INSTANCE
) {
// have a temporary version of the settings, so apply/cancel mechanism works
object settings {
private val global = ScalaCodeInsightSettings.getInstance()
var showMethodResultType: Boolean = _
var showMemberVariableType: Boolean = _
var showLocalVariableType: Boolean = _
reset()
def reset(): Unit = {
setEnabled(global.showTypeHints)
showMethodResultType = global.showFunctionReturnType
showMemberVariableType = global.showPropertyType
showLocalVariableType = global.showLocalVariableType
}
def apply(): Unit = {
global.showTypeHints = isEnabled
global.showFunctionReturnType = showMethodResultType
global.showPropertyType = showMemberVariableType
global.showLocalVariableType = showLocalVariableType
}
def isModified: Boolean =
global.showTypeHints != isEnabled ||
global.showFunctionReturnType != showMethodResultType ||
global.showPropertyType != showMemberVariableType ||
global.showLocalVariableType != showLocalVariableType
}
override def getCases: util.List[ImmediateConfigurable.Case] = Seq(
new ImmediateConfigurable.Case(
ScalaCodeInsightBundle.message("member.variables"),
"Scala.ScalaTypeHintsSettingsModel.showMemberVariableType",
() => settings.showMemberVariableType,
b => {
settings.showMemberVariableType = b
kUnit
},
null),
new ImmediateConfigurable.Case(
ScalaCodeInsightBundle.message("method.results"),
"Scala.ScalaTypeHintsSettingsModel.showMethodResultType",
() => settings.showMethodResultType,
b => {
settings.showMethodResultType = b
kUnit
},
null),
new ImmediateConfigurable.Case(
ScalaCodeInsightBundle.message("local.variables"),
"Scala.ScalaTypeHintsSettingsModel.showLocalVariableType",
() => settings.showLocalVariableType,
b => {
settings.showLocalVariableType = b
kUnit
},
null)
).asJava
override def getComponent: JComponent = null
override def getMainCheckBoxLabel: String = ScalaCodeInsightBundle.message("show.type.hints.for")
override def getName: String = ScalaCodeInsightBundle.message("type.hints")
override def getPreviewText: String = {
if (project.isDefault)
return null
"""
|class Person {
| val birthYear = 5 + 5
|
| def ageInYear(year: Int) = {
| val diff = year - birthYear
| math.max(0, diff)
| }
|}
|""".stripMargin.withNormalizedSeparator
}
override def apply(): Unit = {
settings.apply()
ImplicitHints.updateInAllEditors()
}
override def collectAndApply(editor: Editor, psiFile: PsiFile): Unit = {
val previewPass = new ImplicitHintsPass(editor, psiFile.asInstanceOf[ScalaFile], new hints.ScalaHintsSettings.Defaults {
override def showMethodResultType: Boolean = settings.showMethodResultType
override def showMemberVariableType: Boolean = settings.showMemberVariableType
override def showLocalVariableType: Boolean = settings.showLocalVariableType
override def showMethodChainInlayHints: Boolean = false
override def showObviousType: Boolean = true // always show obvious types in the preview
})
previewPass.doCollectInformation(DumbProgressIndicator.INSTANCE)
previewPass.doApplyInformationToEditor()
}
override def isModified: Boolean = settings.isModified
override def reset(): Unit = {
settings.reset()
}
override def getDescription: String = null
override def getCaseDescription(aCase: ImmediateConfigurable.Case): String = null
override def getCasePreview(aCase: ImmediateConfigurable.Case): String = null
override def getCasePreviewLanguage(aCase: ImmediateConfigurable.Case): Language = ScalaLanguage.INSTANCE
}
| JetBrains/intellij-scala | scala/codeInsight/src/org/jetbrains/plugins/scala/codeInsight/hints/ScalaTypeHintsSettingsModel.scala | Scala | apache-2.0 | 4,942 |
package com.oni.udash.styles
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration
import scala.language.postfixOps
import com.oni.udash.styles.utils.MediaQueries
import com.oni.udash.styles.utils.StyleUtils
import scalacss.DevDefaults.StyleSheet
import scalacss.DevDefaults.cssComposition
import scalacss.DevDefaults.cssRegister
import scalacss.internal.Compose
object CsStyles2 extends StyleSheet.Standalone {
import dsl._
"#sqd" - (
padding(1 rem),
minHeight(1 rem),
minWidth(10 rem),
&.hover(
backgroundColor(brown),
border( 1 px, solid, gray)
)
)
"#box_cs figure" - (
display.block,
position.absolute,
//top( -30 px ),
padding( 2 rem ),
border( 2 px, solid, white), // black;
lineHeight( 22 px),
fontSize(17 px),
textAlign.left,
//fontWeight.bold,
transition := "transform 2s",
color(white)
)
".CsStyles-middleSq li" - (
fontSize(15 px),
&.hover(
backgroundColor:=!"#aa0000"
)
)
// "#box_cs.panels-backface-invisible figure" - (
// backfaceVisibility.hidden
// )
val xw = 300 px
val yw = 300 px
val zw = 300 px
"#box_cs" - (
width( 100%%),
height(100%%),
position.absolute,
transformStyle.preserve3D,
//transformStyle("preserve-3d"),
transition := "transform 2s"
)
"#box_cs .CsStyles-frontSide, #box_cs .CsStyles-backSide" - (
width(xw),
height(yw)
)
"#box_cs .CsStyles-rightSide, #box_cs .CsStyles-leftSide" - (
width(zw),
height(yw)
//left(100 px)
)
"#box_cs .CsStyles-topSide, #box_cs .CsStyles-bottomSide" - (
width(xw),
height(zw),
//top(50 px),
lineHeight(20 px)
)
// "#box_cs.show-frontSide" - (
// transform:= "translateZ( -50px )"
// )
// "#box_cs.show-backSide" - (
// transform:= "translateZ( -50px ) rotateY( 10deg )"
// )
// "#box_cs.show-rightSide" - (
// transform:= "translateZ( -50px ) rotateY( 20deg )"
// )
// "#box_cs.show-leftSide" - (
// transform:= "translateZ( -50px ) rotateY( 30deg )"
// )
// "#box_cs.show-topSide" - (
// transform:= "translateZ( -50px ) rotateY( 40deg )"
// )
// "#box_cs.show-bottomSide" - (
// transform:= "translateZ( -50px ) rotateY( 50deg )"
// )
}
object CsStyles extends StyleSheet.Inline {
import dsl._
val underlineLink = style(
position.relative,
display.block,
color.white,
&.after(
StyleUtils.transition(transform, new FiniteDuration(250, TimeUnit.MILLISECONDS)),
position.absolute,
top(100 %%),
left(`0`),
content := "\\" \\"",
width(100 %%),
borderBottomColor.white,
borderBottomWidth(1 px),
borderBottomStyle.solid,
transform := "scaleX(0)",
transformOrigin := "100% 50%"
),
&.hover(
color.white,
cursor.pointer,
textDecoration := "none",
&.after (
transformOrigin := "0 50%",
transform := "scaleX(1)"
)
)
)
val underlineLinkBlack = style(
underlineLink,
display.inlineBlock,
color.black,
&.after(
borderBottomColor.black
),
&.hover (
color.black
)
)(Compose.trust)
val linkHoverAnimation = keyframes(
(0 %%) -> keyframe(color.black),
(50 %%) -> keyframe(color.red),
(100 %%) -> keyframe(color.black))
val far = style(
transform := "translateZ( -1000px )")
val near = style(
top:=! "-160px !important",
transform := "translateZ( 10px ) !important")
val leftSq = style(
float.left,
//border( 1 px, solid, yellow ),
width( 300 px ),
height( 200 px )
)
val middleSq = style(
float.left,
//border( 1 px, solid, yellow ),
paddingLeft(10 px),
width( 300 px ),
height( 200 px )
)
val rightSq = style(
float.left,
//border( 1 px, solid, yellow ),
paddingLeft(10 px),
width( 300 px ),
height( 200 px )
)
val newSqLink = style(
fontSize(45 px),
backgroundColor(yellow),
width( 300 px ),
height( 200 px ),
color(black),
&.visited(
color(blue)
)
)
val sqContainer = style(
width( 100 %% )
//height( 400 px )
)
val cubeContainer = style(
paddingTop( 120 px ),
width( 300 px ),
height( 300 px ),
position.relative,
margin( 0 px, auto, 20 px),
//border( 1 px, solid, yellow ), // , color.yellow),
//-webkit-perspective: 1200px;
// -moz-perspective: 1200px;
// -o-perspective: 1200px;
perspective(1200 px)
)
val cubebox_cs = style(
width( 100%%),
height(100%%),
position.absolute,
transformStyle.preserve3D,
//transformStyle("preserve-3d"),
transition := "transform 1s"
)
val intensity = 80
val frontColor = rgb( 0, 0, intensity )
val backColor = rgb( 0, intensity, 0 )
val leftColor = rgb( intensity, 0, 0 )
val rightColor = rgb( 0, intensity, intensity )
val topColor = rgb( intensity, 0, intensity )
val bottomColor = rgb( intensity, intensity, 0 )
val zxlate = -1300
val frontSide = style(
background := frontColor, // := hsla( 0, 100 %%, 50 %%, 0.7 ),
left(20 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
val backSide = style(
background := backColor, // hsla( 160, 100 %%, 50 %%, 0.7 ),
left(40 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
val rightSide = style(
background := rightColor, // hsla( 120, 100 %%, 50%%, 0.7 ),
left(60 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
val leftSide = style(
background := leftColor, // hsla( 180, 100%%, 50%%, 0.7 ),
left(80 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
val topSide = style(
background := topColor, // hsla( 240, 100%%, 50%%, 0.7 ),
left(100 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
val bottomSide = style(
background := bottomColor, // hsla( 300, 100%%, 50%%, 0.7 ),
left(120 px),
top(0 px),
transform := s"translate3d( 30px, 200px, ${zxlate}px ) rotateX(80deg)")
}
| ObjectNirvana/oni-web | frontend/src/main/scala/com/oni/udash/styles/CsStyles.scala | Scala | epl-1.0 | 6,463 |
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.tuplejump.calliope.cql3
import java.nio.ByteBuffer
import com.tuplejump.calliope.{Cql3CasBuilder, CasBuilder}
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import com.tuplejump.calliope.Types.{CQLRowMap, CQLRowKeyMap}
import scala.annotation.implicitNotFound
class Cql3CassandraSparkContext(self: SparkContext) {
/**
*
* Create a RDD[T] from data fetched from the mentioned Cassandra keyspace and column family accessible
* at mentioned host and port
*
* @param host Host for the initial cassandra connection
* @param port Port for the initial cassandra connection
* @param keyspace Keyspace to read from
* @param columnFamily Column Family to read from
* @param unmarshaller The transformer to use
* @tparam T The type of RDD to return
* @return RDD[T]
*/
@implicitNotFound(
"No transformer found for (CQLRowKeyMap, CQLRowMap) => ${T}. You must have an implicit method defined of type (CQLRowKeyMap, CQLRowMap) => ${T}"
)
def cql3Cassandra[T](host: String, port: String, keyspace: String, columnFamily: String)
(implicit unmarshaller: (CQLRowKeyMap, CQLRowMap) => T,
tm: Manifest[T]): RDD[T] = {
val cas = CasBuilder.cql3.withColumnFamily(keyspace, columnFamily).onHost(host).onPort(port)
this.cql3Cassandra[T](cas)
}
/**
*
* Create a RDD[T] from data fetched from the mentioned Cassandra keyspace and column family accessible
* at localhost:9160
*
* @param keyspace Keyspace to read from
* @param columnFamily Column Family to read from
* @param unmarshaller The transformer to use
* @tparam T The type of RDD to return
* @return RDD[T]
*/
@implicitNotFound(
"No transformer found for (CQLRowKeyMap, CQLRowMap) => ${T}. You must have an implicit method defined of type (CQLRowKeyMap, CQLRowMap) => ${T}"
)
def cql3Cassandra[T](keyspace: String, columnFamily: String)
(implicit unmarshaller: (CQLRowKeyMap, CQLRowMap) => T,
tm: Manifest[T]): RDD[T] = {
val cas = CasBuilder.cql3.withColumnFamily(keyspace, columnFamily)
this.cql3Cassandra[T](cas)
}
/**
*
* Create a RDD[K, V] from data fetched from the mentioned Cassandra keyspace and column family accessible
* at localhost:9160
*
* @param keyspace Keyspace to read from
* @param columnFamily Column Family to read from
* @param keyUnmarshaller Transformer to get the key from the Cassandra data
* @param rowUnmarshaller Tansformer to get the value from the Cassandra data
* @tparam K Type of the Key
* @tparam V Type of the Value
* @return RDD[K, V]
*/
@implicitNotFound(
"No transformer found for CQLRowKeyMap => ${K} or CQLRowMap => ${V}. You must have implicit methods for each of these."
)
def cql3Cassandra[K, V](keyspace: String, columnFamily: String)
(implicit keyUnmarshaller: CQLRowKeyMap => K,
rowUnmarshaller: CQLRowMap => V,
km: Manifest[K], kv: Manifest[V]): RDD[(K, V)] = {
val cas = CasBuilder.cql3.withColumnFamily(keyspace, columnFamily)
this.cql3Cassandra[K, V](cas)
}
/**
*
* Create a RDD[K, V] from data fetched from the mentioned Cassandra keyspace and column family accessible
* at mentioned host and port
*
* @param host Cassandra node for initial connection
* @param port Port for the initial cassandra connection
* @param keyspace Keyspace to read from
* @param columnFamily Column Family to read from
* @param keyUnmarshaller Transformer to get the key from the Cassandra data
* @param rowUnmarshaller Tansformer to get the value from the Cassandra data
* @tparam K Type of the Key
* @tparam V Type of the Value
* @return RDD[K, V]
*/
@implicitNotFound(
"No transformer found for CQLRowKeyMap => ${K} or CQLRowMap => ${V}. You must have implicit methods for each of these."
)
def cql3Cassandra[K, V](host: String, port: String, keyspace: String, columnFamily: String)
(implicit keyUnmarshaller: CQLRowKeyMap => K,
rowUnmarshaller: CQLRowMap => V,
km: Manifest[K], kv: Manifest[V]): RDD[(K, V)] = {
val cas = CasBuilder.cql3.withColumnFamily(keyspace, columnFamily).onHost(host).onPort(port)
this.cql3Cassandra[K, V](cas)
}
/**
* Create a RDD[T] from data fetched from the configured Cassandra keyspace and column family accessible
* at configured host and port.
*
* @param cas The configuration to use with Cassandra
* @param unmarshaller The transformer to use
* @tparam T The type of RDD to return
* @return RDD[T]
*/
@implicitNotFound(
"No transformer found for (CQLRowKeyMap, CQLRowMap) => ${T}. You must have an implicit method defined of type (CQLRowKeyMap, CQLRowMap) => ${T}"
)
def cql3Cassandra[T](cas: Cql3CasBuilder)
(implicit unmarshaller: (CQLRowKeyMap, CQLRowMap) => T,
tm: Manifest[T]): RDD[T] = {
new Cql3CassandraRDD[T](self, cas, unmarshaller)
}
/**
* Create a RDD[K, V] from data fetched from the configured Cassandra keyspace and column family accessible
* at configured host and port.
*
* @param cas The configuration to use with Cassandra
* @param keyUnmarshaller Transformer to get the key from the Cassandra data
* @param rowUnmarshaller Tansformer to get the value from the Cassandra data
* @tparam K Type of the Key
* @tparam V Type of the Value
* @return RDD[K, V]
*/
@implicitNotFound(
"No transformer found for CQLRowKeyMap => ${K} or CQLRowMap => ${V}. You must have implicit methods for each of these."
)
def cql3Cassandra[K, V](cas: Cql3CasBuilder)
(implicit keyUnmarshaller: CQLRowKeyMap => K,
rowUnmarshaller: CQLRowMap => V,
km: Manifest[K], kv: Manifest[V]): RDD[(K, V)] = {
implicit def xmer = Cql3CasHelper.kvTransformer(keyUnmarshaller, rowUnmarshaller)
this.cql3Cassandra[(K, V)](cas)
}
}
private object Cql3CasHelper {
def kvTransformer[K, V](keyUnmarshaller: CQLRowKeyMap => K,
rowUnmarshaller: CQLRowMap => V) = {
{
(k: CQLRowKeyMap, v: CQLRowMap) => {
(keyUnmarshaller(k), rowUnmarshaller(v))
}
}
}
} | brenttheisen/calliope | src/main/scala/com/tuplejump/calliope/cql3/Cql3CassandraSparkContext.scala | Scala | apache-2.0 | 7,246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.parser
import scala.collection.mutable
import org.antlr.v4.runtime.tree.TerminalNode
import org.apache.spark.sql.{CarbonSession, SparkSession}
import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, SqlBaseParser}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkSqlAstBuilder
import org.apache.spark.sql.execution.command.PartitionerField
import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.util.CarbonException
import org.apache.spark.util.CarbonReflectionUtils
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.spark.util.CarbonScalaUtil
/**
* Concrete parser for Spark SQL statements and carbon specific
* statements
*/
class CarbonSparkSqlParser(conf: SQLConf, sparkSession: SparkSession) extends AbstractSqlParser {
val parser = new CarbonSpark2SqlParser
val astBuilder = CarbonReflectionUtils.getAstBuilder(conf, parser, sparkSession)
private val substitutor = new VariableSubstitution(conf)
override def parsePlan(sqlText: String): LogicalPlan = {
CarbonSession.updateSessionInfoToCurrentThread(sparkSession)
try {
val parsedPlan = super.parsePlan(sqlText)
CarbonScalaUtil.cleanParserThreadLocals
parsedPlan
} catch {
case ce: MalformedCarbonCommandException =>
CarbonScalaUtil.cleanParserThreadLocals
throw ce
case ex =>
try {
parser.parse(sqlText)
} catch {
case mce: MalformedCarbonCommandException =>
throw mce
case e =>
CarbonException.analysisException(
s"""== Parse1 ==
|${ex.getMessage}
|== Parse2 ==
|${e.getMessage}
""".stripMargin.trim)
}
}
}
protected override def parse[T](command: String)(toResult: SqlBaseParser => T): T = {
super.parse(substitutor.substitute(command))(toResult)
}
}
class CarbonHelperSqlAstBuilder(conf: SQLConf,
parser: CarbonSpark2SqlParser,
sparkSession: SparkSession)
extends SparkSqlAstBuilder(conf) {
/**
* Parse a key-value map from a [[TablePropertyListContext]], assuming all values are specified.
*/
def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String] = {
val props = visitTablePropertyList(ctx)
CarbonSparkSqlParserUtil.visitPropertyKeyValues(ctx, props)
}
def getPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String]
= {
Option(ctx).map(visitPropertyKeyValues)
.getOrElse(Map.empty)
}
def createCarbonTable(createTableTuple: (CreateTableHeaderContext, SkewSpecContext,
BucketSpecContext, ColTypeListContext, ColTypeListContext, TablePropertyListContext,
LocationSpecContext, Option[String], TerminalNode, QueryContext, String)): LogicalPlan = {
// val parser = new CarbonSpark2SqlParser
val (tableHeader, skewSpecContext,
bucketSpecContext,
partitionColumns,
columns,
tablePropertyList,
locationSpecContext,
tableComment,
ctas,
query,
provider) = createTableTuple
val (tableIdentifier, temp, ifNotExists, external) = visitCreateTableHeader(tableHeader)
val cols: Seq[StructField] = Option(columns).toSeq.flatMap(visitColTypeList)
val colNames: Seq[String] = CarbonSparkSqlParserUtil
.validateCreateTableReqAndGetColumns(tableHeader,
skewSpecContext,
bucketSpecContext,
columns,
cols,
tableIdentifier,
temp)
val tablePath: Option[String] = if (locationSpecContext != null) {
Some(visitLocationSpec(locationSpecContext))
} else {
None
}
val tableProperties = mutable.Map[String, String]()
val properties: Map[String, String] = getPropertyKeyValues(tablePropertyList)
properties.foreach{property => tableProperties.put(property._1, property._2)}
// validate partition clause
val partitionByStructFields = Option(partitionColumns).toSeq.flatMap(visitColTypeList)
val partitionFields = CarbonSparkSqlParserUtil.
validatePartitionFields(partitionColumns, colNames, tableProperties,
partitionByStructFields)
// validate for create table as select
val selectQuery = Option(query).map(plan)
val extraTableTuple = (cols, external, tableIdentifier, ifNotExists, colNames, tablePath,
tableProperties, properties, partitionByStructFields, partitionFields,
parser, sparkSession, selectQuery)
CarbonSparkSqlParserUtil
.createCarbonTable(createTableTuple, extraTableTuple)
}
}
trait CarbonAstTrait {
def getFileStorage (createFileFormat : CreateFileFormatContext): String
}
| jatin9896/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala | Scala | apache-2.0 | 5,704 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.reflect
package runtime
import java.lang.Thread._
private[reflect] trait ThreadLocalStorage {
self: SymbolTable =>
// see a discussion at scala-internals for more information:
// http://groups.google.com/group/scala-internals/browse_thread/thread/337ce68aa5e51f79
trait ThreadLocalStorage[T] { def get: T; def set(newValue: T): Unit }
private class MyThreadLocalStorage[T](initialValue: => T) extends ThreadLocalStorage[T] {
// TODO: how do we use org.cliffc.high_scale_lib.NonBlockingHashMap here?
// (we would need a version that uses weak keys)
private[this] val values = java.util.Collections.synchronizedMap(new java.util.WeakHashMap[Thread, T]())
def get: T = {
if (values containsKey currentThread) values.get(currentThread)
else {
val value = initialValue
// since the key is currentThread, and `values` is private, it
// would be impossible for a value to have been set after the
// above containsKey check. `putIfAbsent` is not necessary.
values.put(currentThread, value)
value
}
}
def set(newValue: T): Unit = {
values.put(currentThread, newValue)
}
}
@inline final def mkThreadLocalStorage[T](x: => T): ThreadLocalStorage[T] = new MyThreadLocalStorage(x)
}
| martijnhoekstra/scala | src/reflect/scala/reflect/runtime/ThreadLocalStorage.scala | Scala | apache-2.0 | 1,596 |
package com.featurefm.riversong.metrics.reporting
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
class Slf4jReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter {
lazy val reporter = getReporter
/**
* This is the method that gets called so that the metrics
* reporting can occur.
*/
def report(): Unit = {
reporter.report(metrics.metricRegistry.getGauges,
metrics.metricRegistry.getCounters,
metrics.metricRegistry.getHistograms,
metrics.metricRegistry.getMeters,
metrics.metricRegistry.getTimers)
}
private[reporting] def getReporter: com.codahale.metrics.Slf4jReporter = {
com.codahale.metrics.Slf4jReporter.forRegistry(metrics.metricRegistry)
.outputTo(LoggerFactory.getLogger(config.getString("logger")))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build
}
}
| ListnPlay/RiverSong | src/main/scala/com/featurefm/riversong/metrics/reporting/Slf4jReporter.scala | Scala | mit | 1,003 |
package com.bwsw.cloudstack.pulse.validators
/**
* Created by Ivan Kudryavtsev on 28.07.17.
*/
abstract class Validator {
protected def onError(params: Map[String, String]): String
def validate(params: Map[String, String]): Either[String, String]
}
class PrimitiveValidator(field: String) extends Validator {
def fieldName = field
override protected def onError(params: Map[String, String]): String = ""
override def validate(params: Map[String, String]): Either[String, String] = Left("")
}
| bwsw/cs-pulse-server | src/main/scala-2.12/com/bwsw/cloudstack/pulse/validators/PrimitiveValidator.scala | Scala | apache-2.0 | 511 |
package chee.metadata
import chee.FileLoan
import chee.properties.{ FormatPatterns, LazyMap, MapGet, TrueCondition }
import org.scalatest.{ FlatSpec, Matchers }
import scala.util.{ Failure, Success, Try }
class MetadataFileTest extends FlatSpec with Matchers with FileLoan {
"write" should "create a new file" in withNewFile { f =>
val map = MapGet.seq(
mapget.setTags(Tag("car"), Tag("bus")),
mapget.setComment("a comment so fine"),
mapget.setId("e53")).toMap.result(LazyMap())
val mf = MetadataFile(f).write(Seq(map))
val res = mf.find(TrueCondition)
res should have size (1)
FormatPatterns.lisp.result(res.head) should be (FormatPatterns.lisp.result(map))
}
it should "change tags" in withTags(Tag("car"), Tag("bus")) { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.setTags(Tag("bike")).toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("tag:bus") should be (0)
nmf.querySize("tag:bike") should be (1)
}
it should "change comment" in withComment("brown fox") { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.setComment("blue fox").toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("comment:*brown*") should be (0)
nmf.querySize("comment:*blue*") should be (1)
}
it should "remove tags" in withMetadata("blue fox", Tag("car"), Tag("bus")) { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.removeAllTags.toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("comment:*blue*") should be (1)
nmf.querySize("tag?") should be (0)
}
it should "add new tags" in withMetadata("blue fox", Tag("car"), Tag("bus")) { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.addTags(Tag("moto")).toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("tag:moto") should be (1)
nmf.querySize("tag:bus") should be (1)
nmf.querySize("tag:car") should be (1)
}
it should "add remove tags" in withMetadata("blue fox", Tag("car"), Tag("bus")) { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.removeTags(Tag("car")).toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("tag:bus") should be (1)
nmf.querySize("tag:car") should be (0)
}
it should "remove comment" in withMetadata("blue fox", Tag("car"), Tag("bus")) { mf =>
val map = mf.find(TrueCondition).toList(0)
val next = mapget.setComment("").toMap.result(map)
val nmf = mf.write(Seq(next))
nmf.querySize("comment?") should be (0)
nmf.querySize("tag:car") should be (1)
}
it should "not add duplicates" in withNewFile { f =>
val map0 = MapGet.seq(
mapget.setTags(Tag("car"), Tag("bus")),
mapget.setComment("a comment so fine"),
mapget.setId("e53")).toMap.result(LazyMap())
val map1 = MapGet.seq(
mapget.setTags(Tag("cat"), Tag("eagle")),
mapget.setComment("a movement so great"),
mapget.setId("e53")).toMap.result(LazyMap())
val maps = Seq(map0, map1)
val mf = MetadataFile(f).write(maps)
val res = mf.find(TrueCondition)
res should have size (1)
//first wins, not a hard requirement, but interesting if it would change
FormatPatterns.lisp.result(res.head) should be (FormatPatterns.lisp.result(map0))
}
it should "change multiple records" in withNewFile { f =>
val map0 = MapGet.seq(
mapget.setTags(Tag("car"), Tag("bus")),
mapget.setComment("a comment so fine"),
mapget.setId("e53")).toMap.result(LazyMap())
val map1 = MapGet.seq(
mapget.setTags(Tag("car"), Tag("bus")),
mapget.setComment("a comment so fine"),
mapget.setId("e52")).toMap.result(LazyMap())
val maps = Seq(map0, map1)
val mf = MetadataFile(f).write(maps)
val res = mf.find(TrueCondition)
res should have size (2)
mf.querySize("comment:*fine*") should be (2)
val maps2 = MapGet.filter(maps, mapget.setComment("new comment!").map(_ => true))
val mf2 = mf.write(maps2)
mf2.find(TrueCondition) should have size (2)
mf2.querySize("comment:*fine*") should be (0)
mf2.querySize("comment:*new*") should be (2)
}
"find" should "find nothing for non existing file" in withNewFile { f =>
f.exists should be (false)
val mf = MetadataFile(f)
mf.find(TrueCondition) should be ('empty)
}
it should "find single tags" in withTags(Tag("swim")) { mf =>
mf.querySize("tag:swim") should be (1)
mf.querySize("tag:sw*") should be (1)
mf.querySize("tag:abc") should be (0)
}
it should "find tags in many" in withTags(Tag("swim"), Tag("bold")) { mf =>
mf.querySize("tag?") should be (1)
mf.querySize("tag:bold") should be (1)
mf.querySize("tag:swim") should be (1)
mf.querySize("tag:abc") should be (0)
mf.querySize("(| tag:swim tag:abc)") should be (1)
mf.querySize("(| tag:swim tag:bold)") should be (1)
}
it should "search comments" in withComment("a blue brown fox") { mf =>
mf.querySize("comment:*blue*") should be (1)
mf.querySize("comment:*fox") should be (1)
mf.querySize("comment:brown") should be (0)
}
it should "return empty for non existing properties" in {
withTags(Tag("bus")) { mf =>
mf.querySize("comment?") should be (0)
mf.querySize("comment:*b*") should be (0)
}
withComment("blue brown fox") { mf =>
mf.querySize("tag:bus") should be (0)
mf.querySize("tag?") should be (0)
}
}
def withMetadata(comment: String, tags: Tag*)(code: MetadataFile => Any): Unit = withNewFile { f =>
f.exists should be (false)
val mf = MetadataFile(f).write(Seq(MapGet.seq(
mapget.setTags(tags),
mapget.setComment(comment),
mapget.setId("e53")).toMap.result(LazyMap())))
Try(code(mf)) match {
case Success(_) =>
case Failure(ex) =>
println(f.contentAsString)
throw ex
}
}
def withTags(tags: Tag*)(code: MetadataFile => Any): Unit =
withMetadata("", tags: _*)(code)
def withComment(comment: String)(code: MetadataFile => Any): Unit =
withMetadata(comment)(code)
implicit class MetadataTestOps(mf: MetadataFile) {
def querySize(q: String): Int =
mf.query(q) match {
case Right(rs) => rs.size
case Left(msg) => sys.error(msg)
}
}
}
| eikek/chee | src/test/scala/chee/metadata/MetadataFileTest.scala | Scala | gpl-3.0 | 6,356 |
package chrome.tabs.bindings
import chrome.windows.bindings.Window
import scala.scalajs.js
@js.native
trait RemoveInfo extends js.Object {
def windowId: Window.Id = js.native
def isWindowClosing: Boolean = js.native
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/tabs/bindings/RemoveInfo.scala | Scala | mit | 228 |
package fivelangs.roc
import org.scalatest.FunSuite
class RocTest extends FunSuite {
import RocTestItems._
test("basic functionality") {
}
} | julianmichael/5langs | src/test/scala/fivelangs/roc/RocTest.scala | Scala | mit | 148 |
package com.twitter.finagle.server
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.{ListeningServer, NullServer, Stack}
import java.net.SocketAddress
/**
* Listeners provide a method, `listen`, to expose a server on the
* the given SocketAddress. `serveTransport` is called for each new
* connection. It is furnished with a typed `Transport` representing
* this connection.
*
* The returned `ListeningServer` is used to inspect the server, and
* is also used to shut it down.
*/
trait Listener[In, Out] {
def listen(addr: SocketAddress)(serveTransport: Transport[In, Out] => Unit): ListeningServer
}
/**
* An empty Listener that can be used as a placeholder.
*/
object NullListener extends Listener[Any, Any] {
def listen(addr: SocketAddress)(serveTransport: Transport[Any, Any] => Unit) = NullServer
}
/**
* A collection of [[com.twitter.finagle.Stack.Param Stack.Params]] useful for configuring
* a [[com.twitter.finagle.server.Listener]].
*/
object Listener {
/**
* A [[com.twitter.finagle.Stack.Param]] used to configure
* the `Listener` backlog.
*
* @param value An option indicating the backlog size. If None,
* the implementation default is used.
*/
case class Backlog(value: Option[Int]) {
def mk(): (Backlog, Stack.Param[Backlog]) =
(this, Backlog.param)
}
object Backlog {
implicit val param = Stack.Param(Backlog(None))
}
}
| travisbrown/finagle | finagle-core/src/main/scala/com/twitter/finagle/server/Listener.scala | Scala | apache-2.0 | 1,424 |
package kmeans
package fun
import scala.collection.GenSeq
abstract sealed trait InitialSelectionStrategy
case object RandomSampling extends InitialSelectionStrategy
case object UniformSampling extends InitialSelectionStrategy
case object UniformChoice extends InitialSelectionStrategy
abstract sealed trait ConvergenceStrategy
case class ConvergedWhenSNRAbove(x: Double) extends ConvergenceStrategy
case class ConvergedAfterNSteps(n: Int) extends ConvergenceStrategy
case class ConvergedAfterMeansAreStill(eta: Double) extends ConvergenceStrategy
class IndexedColorFilter(initialImage: Img,
colorCount: Int,
initStrategy: InitialSelectionStrategy,
convStrategy: ConvergenceStrategy) extends KMeans {
private var steps = 0
// What could we do here to speed up the computation?
val points = imageToPoints(initialImage).par
val means = initializeIndex(colorCount, points).par
/* The work is done here: */
private val newMeans = kMeans(points, means, 0.01)
/* And these are the results exposed */
def getStatus() = s"Converged after $steps steps."
def getResult() = indexedImage(initialImage, newMeans)
private def imageToPoints(img: Img): GenSeq[Point] =
for (x <- (0 until img.width).par; y <- 0 until img.height) yield {
val rgba = img(x, y)
new Point(red(rgba), green(rgba), blue(rgba))
}
private def indexedImage(img: Img, means: GenSeq[Point]) = {
val dst = new Img(img.width, img.height)
val pts = collection.mutable.Set[Point]()
for (x <- (0 until img.width).par; y <- 0 until img.height) yield {
val v = img(x, y)
var point = new Point(red(v), green(v), blue(v))
point = findClosest(point, means)
pts += point
dst(x, y) = rgba(point.x, point.y, point.z, 1d)
}
dst
}
private def initializeIndex(numColors: Int, points: GenSeq[Point]): GenSeq[Point] = {
val initialPoints: GenSeq[Point] =
initStrategy match {
case RandomSampling =>
val d: Int = points.size / numColors
(0 until numColors) map (idx => points(d * idx))
case UniformSampling =>
val sep: Int = 32
(for (r <- 0 until 255 by sep; g <- 0 until 255 by sep; b <- 0 until 255 by sep) yield {
def inside(p: Point): Boolean =
(p.x >= (r.toDouble / 255)) &&
(p.x <= ((r.toDouble + sep) / 255)) &&
(p.y >= (g.toDouble / 255)) &&
(p.y <= ((g.toDouble + sep) / 255)) &&
(p.z >= (b.toDouble / 255)) &&
(p.z <= ((b.toDouble + sep) / 255))
val pts = points.filter(inside(_))
val cnt = pts.size * 3 * numColors / points.size
if (cnt >= 1) {
val d = pts.size / cnt
(0 until cnt) map (idx => pts(d * idx))
} else
Seq()
}).flatten
case UniformChoice =>
val d: Int = math.max(1, (256 / math.cbrt(numColors.toDouble).ceil).toInt)
for (r <- 0 until 256 by d; g <- 0 until 256 by d; b <- 0 until 256 by d) yield
new Point(r.toDouble / 256,g.toDouble / 256, b.toDouble / 256)
}
val d2 = initialPoints.size.toDouble / numColors
(0 until numColors) map (idx => initialPoints((idx * d2).toInt))
}
private def computeSNR(points: GenSeq[Point], means: GenSeq[Point]): Double = {
var sound = 0.0
var noise = 0.0
for (point <- points) {
import math.{pow, sqrt}
val closest = findClosest(point, means)
sound += sqrt(pow(point.x, 2) + pow(point.y, 2) + pow(point.z, 2))
noise += sqrt(pow(point.x - closest.x, 2) + pow(point.y - closest.y, 2) + pow(point.z - closest.z, 2))
}
sound/noise
}
override def converged(eta: Double)(oldMeans: GenSeq[Point], newMeans: GenSeq[Point]): Boolean = {
steps += 1
convStrategy match {
case ConvergedAfterNSteps(n) =>
steps >= n
case ConvergedAfterMeansAreStill(eta) =>
super.converged(eta)(oldMeans, newMeans)
case ConvergedWhenSNRAbove(snr_desired) =>
val snr_computed = computeSNR(points, newMeans)
snr_computed >= snr_desired
}
}
} | adihubba/progfun1 | parprog1-kmeans/src/main/scala/kmeans/fun/IndexedColors.scala | Scala | mit | 4,225 |
package org.template.similarproduct
import io.prediction.controller.LServing
class Serving
extends LServing[Query, PredictedResult] {
override def serve(query: Query,
predictedResults: Seq[PredictedResult]): PredictedResult = {
predictedResults.head
}
}
| wangmiao1981/PredictionIO | examples/scala-parallel-similarproduct/no-set-user/src/main/scala/Serving.scala | Scala | apache-2.0 | 271 |
package me.eax.examples.rabbitmq.pubsub
import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.Future
import scala.concurrent.duration._
object PubSubClientActor {
case class Publish(topic: String, msg: String)
case class Subscribe(topic: String, ref: ActorRef)
case class Unsubsribe(topic: String, ref: ActorRef)
case class AskExt(ref: ActorRef) extends PubSubClient {
implicit private val timeout = Timeout(5.seconds)
def publish(topic: String, msg: String): Future[Unit] = (ref ? Publish(topic, msg)).mapTo[Unit]
def subscribe(topic: String, r: ActorRef): Future[Unit] = (ref ? Subscribe(topic, r)).mapTo[Unit]
def unsubscribe(topic: String, r: ActorRef): Future[Unit] = (ref ? Unsubsribe(topic, r)).mapTo[Unit]
}
}
| afiskon/scala-rabbitmq-example | src/main/scala/me/eax/examples/rabbitmq/pubsub/PubSubClientActor.scala | Scala | mit | 796 |
package skinny.routing.implicits
import scala.language.implicitConversions
import skinny.controller.feature.SkinnyControllerCommonBase
import skinny.engine.Handler
import skinny.engine.constant.{ Get, HttpMethod }
import skinny.engine.routing.Route
import skinny.routing.RichRoute
/**
* Implicits for RichRoute.
*/
object RoutesAsImplicits extends RoutesAsImplicits
/**
* Implicits for RichRoute which enables Route to call #as(Symbol) method.
*/
trait RoutesAsImplicits {
implicit def convertRouteToRichRoute(route: Route)(implicit controller: SkinnyControllerCommonBase): RichRoute = {
val method = route.metadata.get(Handler.RouteMetadataHttpMethodCacheKey).map(_.asInstanceOf[HttpMethod]).getOrElse(Get)
new RichRoute(route, method, controller)
}
}
| holycattle/skinny-framework | framework/src/main/scala/skinny/routing/implicits/RoutesAsImplicits.scala | Scala | mit | 777 |
/*
* Copyright 2015 Michel Vollebregt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.flameframework.treemarker
import freemarker.template.Template
/**
* Created by michel on 10-01-15.
*/
trait TemplateSource {
def listTemplates : Iterable[String]
def getTemplate(template: String) : Template
}
| flameframework/treemarker | src/main/scala/com/github/flameframework/treemarker/TemplateSource.scala | Scala | apache-2.0 | 868 |
import scala.reflect.runtime.universe._
class C {
def x1: Int = ???
def x2(): Int = ???
def x3(x: Int): Int = ???
def x4(x: Int)(y: Int): Int = ???
def y1[T]: Int = ???
def y2[T](): Int = ???
def y3[T](x: Int): Int = ???
def y4[T](x: Int)(y: Int): Int = ???
}
object Test extends dotty.runtime.LegacyApp {
println(typeOf[C].member(TermName("x1")).asMethod.typeParams)
println(typeOf[C].member(TermName("x2")).asMethod.typeParams)
println(typeOf[C].member(TermName("x3")).asMethod.typeParams)
println(typeOf[C].member(TermName("x4")).asMethod.typeParams)
println(typeOf[C].member(TermName("y1")).asMethod.typeParams)
println(typeOf[C].member(TermName("y2")).asMethod.typeParams)
println(typeOf[C].member(TermName("y3")).asMethod.typeParams)
println(typeOf[C].member(TermName("y4")).asMethod.typeParams)
}
| yusuke2255/dotty | tests/pending/run/reflection-methodsymbol-typeparams.scala | Scala | bsd-3-clause | 839 |
package gcli
import org.scalatest._
class UtilitiesSpec extends FlatSpec with Matchers {
// paddedYear()
// –––
" A padded year" should "return a year of this century if the 2 digits are less than the current year" in {
paddedYear(10) should be (2010)
}
it should "return a year of the last century if the 2 digits are greater than the current year" in {
paddedYear(90) should be (1990)
}
it should "0-pad an integer lesser than 10" in {
paddedYear(5) should be (2005)
}
it should "echo back an integer with more than two digits" in {
paddedYear(123) should be (123)
paddedYear(1234) should be (1234)
}
// validateYearInput()
// –––
"A year passed on the CL" should "validate if the integer contains 4 digits" in {
validateYearInput(2015) should be (true)
}
it should "validate if the integer contains 2 digits" in {
validateYearInput(13) should be (true)
}
it should "validate if the integer contains 1 digits" in {
validateYearInput(9) should be (true)
}
it should "return false if integer has not 1, 2 or 4 digits" in {
validateYearInput(123) should be (false)
validateYearInput(12345) should be (false)
}
}
| sthzg/gcli-scala | src/test/scala/gcli/UtilitiesSpec.scala | Scala | mit | 1,214 |
package com.softwaremill.session.javadsl
/**
* Can't use the trait com.softwaremill.session.InMemoryRefreshTokenStorage in Java code, hence this wrapper
* http://stackoverflow.com/questions/7637752/using-scala-traits-with-implemented-methods-in-java
*/
abstract class InMemoryRefreshTokenStorage[T]() extends com.softwaremill.session.InMemoryRefreshTokenStorage[T] | softwaremill/akka-http-session | core/src/main/java/com/softwaremill/session/javadsl/InMemoryRefreshTokenStorage.scala | Scala | apache-2.0 | 368 |
package polybench
import benchmarks.GESUMMV
import ir.ArrayTypeWSWC
import ir.ast._
import lift.arithmetic.SizeVar
import opencl.executor._
import opencl.ir._
import opencl.ir.pattern._
import org.junit.Assert._
import org.junit.Assume.assumeFalse
import org.junit.Test
object HighLevel extends TestWithExecutor
class HighLevel {
val N = SizeVar("N")
val M = SizeVar("M")
val K = SizeVar("K")
val mm = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, K), N),
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), K),
(A, B) => {
MapGlb(fun( aRow =>
MapSeq(fun( bCol =>
toGlobal(MapSeq(id)) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(aRow, bCol)
)) o Transpose() $ B
)) $ A
})
val mv = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, K), N),
ArrayTypeWSWC(Float, K),
(matrix, vector) =>
MapGlb(fun(row => toGlobal(MapSeq(id)) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(row, vector)
)) $ matrix
)
val mvAlpha = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, K), N),
ArrayTypeWSWC(Float, K),
Float,
(matrix, vector, alpha) =>
MapGlb(fun(row => toGlobal(MapSeq(id)) o
MapSeq(fun(x => mult(x, alpha))) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(row, vector)
)) $ matrix
)
val gemvKernel = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
ArrayTypeWSWC(Float, M),
ArrayTypeWSWC(Float,N),
Float,
Float,
(matrix, vectorX, vectorY, alpha, beta) => {
MapGlb(fun( t =>
MapSeq(fun( x => multAndSumUp(x, Get(t, 1), beta))) o
MapSeq(fun(x => mult(alpha, x))) o
toGlobal(MapSeq(id)) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(vectorX, Get(t, 0))
)) $ Zip(matrix, vectorY)
})
val gemvTransposed = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), M),
ArrayTypeWSWC(Float, M),
ArrayTypeWSWC(Float,N),
Float,
Float,
(matrix, vectorX, vectorY, alpha, beta) => {
MapGlb(fun( t =>
MapSeq(fun( x => multAndSumUp(x, Get(t, 1), beta))) o
MapSeq(fun(x => mult(alpha, x))) o
toGlobal(MapSeq(id)) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(vectorX, Get(t, 0))
)) $ Zip(Transpose() $ matrix, vectorY)
})
val vecAdd = fun(
ArrayTypeWSWC(Float, N),
ArrayTypeWSWC(Float, N),
(a,b) => MapGlb(add) $ Zip(a, b)
)
val matrixAdd = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
(A, B) =>
MapGlb(fun(x => MapSeq(add) $ Zip(Get(x, 0), Get(x, 1)))) $ Zip(A, B)
)
@Test
def twoMM(): Unit = {
// D=A.B; E=C.D
// polybench actually implements D := alpha*A*B*C + beta*D, polybench gpu does the other
// array sizes messed up in polybench, doesn't show up because of square matrices
val i = 128
val j = 16
val k = 32
val l = 64
val A = Array.fill(i, k)(util.Random.nextInt(5).toFloat)
val B = Array.fill(k, j)(util.Random.nextInt(5).toFloat)
val C = Array.fill(l, i)(util.Random.nextInt(5).toFloat)
val dGold = Utils.matrixMatrixMultiply(A, B)
val eGold = Utils.matrixMatrixMultiply(C, dGold)
val D = Execute(i)[Array[Float]](mm, A, B)._1.grouped(j).toArray
val E = Execute(i)[Array[Float]](mm, C, D)._1
assertArrayEquals(dGold.flatten, D.flatten, 0.0f)
assertArrayEquals(eGold.flatten, E, 0.0f)
}
@Test
def threeMM(): Unit = {
// E=A.B; F=C.D; G=E.F
val i = 128
val j = 256
val k = 32
val l = 64
val m = 16
val A = Array.fill(i, k)(util.Random.nextInt(5).toFloat)
val B = Array.fill(k, j)(util.Random.nextInt(5).toFloat)
val C = Array.fill(j, m)(util.Random.nextInt(5).toFloat)
val D = Array.fill(m, l)(util.Random.nextInt(5).toFloat)
val eGold = Utils.matrixMatrixMultiply(A, B)
val fGold = Utils.matrixMatrixMultiply(C, D)
val gGold = Utils.matrixMatrixMultiply(eGold, fGold)
val E = Execute(i)[Array[Float]](mm, A, B)._1.grouped(j).toArray
val F = Execute(j)[Array[Float]](mm, C, D)._1.grouped(l).toArray
val G = Execute(i)[Array[Float]](mm, E, F)._1
assertArrayEquals(eGold.flatten, E.flatten, 0.0f)
assertArrayEquals(fGold.flatten, F.flatten, 0.0f)
assertArrayEquals(gGold.flatten, G, 0.0f)
}
@Test
def gesummv(): Unit = {
// y = A . x * alpha + B . x * beta
val n = 128
val alpha = 2.0f
val beta = 1.5f
val x = Array.fill(n)(util.Random.nextInt(5).toFloat)
val A = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val B = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val tmp1Gold = Utils.matrixVector(A, x, alpha)
val tmp2Gold = Utils.matrixVector(B, x, beta)
val yGold = (tmp1Gold, tmp2Gold).zipped.map(_+_)
val tmp1 = Execute(n)[Array[Float]](mvAlpha, A, x, alpha)._1
val tmp2 = Execute(n)[Array[Float]](mvAlpha, B, x, beta)._1
val y = Execute(n)[Array[Float]](vecAdd, tmp1, tmp2)._1
assertArrayEquals(tmp1Gold, tmp1, 0.001f)
assertArrayEquals(tmp2Gold, tmp2, 0.001f)
assertArrayEquals(yGold, y, 0.001f)
}
@Test
def gesummv2(): Unit = {
// y = A . x * alpha + B . x * beta
val n = 128
val alpha = 2.0f
val beta = 1.5f
val x = Array.fill(n)(util.Random.nextInt(5).toFloat)
val A = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val B = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val tmp1Gold = Utils.matrixVector(A, x, alpha)
val tmp2Gold = Utils.matrixVector(B, x, beta)
val yGold = (tmp1Gold, tmp2Gold).zipped.map(_+_)
val gesummv = GESUMMV.fused
val y = Execute(n)[Array[Float]](gesummv, A, B, x, alpha, beta)._1
assertArrayEquals(yGold, y, 0.001f)
}
@Test
def gesummv3(): Unit = {
// y = A . x * alpha + B . x * beta
val n = 128
val alpha = 2.0f
val beta = 1.5f
val x = Array.fill(n)(util.Random.nextInt(5).toFloat)
val A = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val B = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val tmp1Gold = Utils.matrixVector(A, x, alpha)
val tmp2Gold = Utils.matrixVector(B, x, beta)
val yGold = (tmp1Gold, tmp2Gold).zipped.map(_+_)
val gesummv = GESUMMV.simpleUserFun
val y = Execute(n)[Array[Float]](gesummv, A, B, x, alpha, beta)._1
assertArrayEquals(yGold, y, 0.001f)
}
@Test
def gesummvKepler(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
// y = A . x * alpha + B . x * beta
val n = 1024
val alpha = 2.0f
val beta = 1.5f
val x = Array.fill(n)(util.Random.nextInt(5).toFloat)
val A = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val B = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val tmp1Gold = Utils.matrixVector(A, x, alpha)
val tmp2Gold = Utils.matrixVector(B, x, beta)
val yGold = (tmp1Gold, tmp2Gold).zipped.map(_+_)
val stride = 128
val gesummv = GESUMMV.fusedOptimised
val y = Execute(stride, stride*n, (true, true))[Array[Float]](gesummv, A, B, x, alpha, beta)._1
assertArrayEquals(yGold, y, 0.001f)
}
// this is missing in polybench-gpu
@Test
def gemver(): Unit = {
// A = A + u1.v1^T + u2v2^T
// x = beta*A^T.y + z
// w = alpha*A.x
val n = 128
val A = Array.fill(n, n)(util.Random.nextInt(5).toFloat)
val y = Array.fill(n)(util.Random.nextInt(5).toFloat)
val z = Array.fill(n)(util.Random.nextInt(5).toFloat)
val u1 = Array.fill(n)(util.Random.nextInt(5).toFloat)
val u2 = Array.fill(n)(util.Random.nextInt(5).toFloat)
val v1 = Array.fill(n)(util.Random.nextInt(5).toFloat)
val v2 = Array.fill(n)(util.Random.nextInt(5).toFloat)
val alpha = 1.5f
val beta = 2.5f
val u1v1gold = u1.map(x => v1.map(_*x))
val u2v2gold = u2.map(x => v2.map(_*x))
val aGold = Utils.add(Utils.add(u1v1gold, u2v2gold), A)
val xGold = Utils.matrixVector(aGold.transpose, y, z, beta, 1.0f)
val wGold = Utils.matrixVector(aGold, xGold, alpha)
val outerProduct = fun(
ArrayTypeWSWC(Float, N),
ArrayTypeWSWC(Float, N),
(a, b) => MapGlb(fun(x => MapSeq(fun(y => mult(x, y))) $ b)) $ a
)
val (u1v1, _) = Execute(n)[Array[Float]](outerProduct, u1, v1)
val (u2v2, _) = Execute(n)[Array[Float]](outerProduct, u2, v2)
val (partialSum, _) = Execute(n)[Array[Float]](matrixAdd, u1v1.grouped(n).toArray, u2v2.grouped(n).toArray)
val (newA, _) = Execute(n)[Array[Float]](matrixAdd, A, partialSum.grouped(n).toArray)
val (x, _) = Execute(n)[Array[Float]](gemvTransposed, newA.grouped(n).toArray, y, z, beta, 1.0f)
val (w, _) = Execute(n)[Array[Float]](mvAlpha, newA.grouped(n).toArray, x, alpha)
assertArrayEquals(u1v1gold.flatten, u1v1, 0.001f)
assertArrayEquals(u2v2gold.flatten, u2v2, 0.001f)
assertArrayEquals(aGold.flatten, newA, 0.001f)
assertArrayEquals(xGold, x, 0.001f)
assertArrayEquals(wGold, w, 0.001f)
}
@Test
def gemv(): Unit = {
val n = 256
val m = 128
val alpha = 1.5f
val beta = 2.5f
val vectorX = Array.fill(m)(util.Random.nextInt(5).toFloat)
val vectorY = Array.fill(n)(util.Random.nextInt(5).toFloat)
val matrix = Array.fill(n, m)(util.Random.nextInt(5).toFloat)
val gold = Utils.matrixVector(matrix, vectorX, vectorY, alpha, beta)
val (result, _) = Execute(n)[Array[Float]](gemvKernel, matrix, vectorX, vectorY, alpha, beta)
assertArrayEquals(gold, result, 0.001f)
}
@Test
def gemm(): Unit = {
// C=alpha.A.B+beta.C
val n = 128
val m = 256
val k = 64
val A = Array.fill(n, k)(util.Random.nextInt(5).toFloat)
val B = Array.fill(k, m)(util.Random.nextInt(5).toFloat)
val C = Array.fill(n, m)(util.Random.nextInt(5).toFloat)
val alpha = 1.5f
val beta = 0.5f
val AB = Utils.matrixMatrixMultiply(A, B)
val gold = (AB, C).zipped.map((x, y) => (x, y).zipped.map((x, y) => x * alpha + y * beta))
val N = SizeVar("N")
val M = SizeVar("M")
val K = SizeVar("K")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, K), N),
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), K),
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
Float,
Float,
(A, B, C, alpha, beta) => {
MapGlb(fun( aRow =>
Join() o MapSeq(fun( bCol =>
toGlobal(MapSeq(id)) o
toPrivate(MapSeq(fun(x => multAndSumUp(x, beta, Get(bCol, 1))))) o
toPrivate(MapSeq(fun(x => mult(x, alpha)))) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(Get(aRow, 0), Get(bCol, 0))
)) $ Zip(Transpose() $ B, Get(aRow, 1))
)) $ Zip(A, C)
})
val (res, _) = Execute(n)[Array[Float]](f, A, B, C, alpha, beta)
assertArrayEquals(gold.flatten, res, 0.001f)
}
@Test
def atax(): Unit = {
// y = A^T.A.x
val n = 128
val m = 256
val x = Array.fill(m)(util.Random.nextInt(5).toFloat)
val A = Array.fill(n, m)(util.Random.nextInt(5).toFloat)
val axGold = Utils.matrixVector(A, x)
val gold = Utils.matrixVector(A.transpose, axGold)
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), K),
ArrayTypeWSWC(Float, K),
(matrix, vector) =>
MapGlb(fun(row => toGlobal(MapSeq(id)) o
ReduceSeq(fun((acc, y) =>
multAndSumUp.apply(acc, Get(y, 0), Get(y, 1))
), 0.0f) $ Zip(row, vector)
)) o Transpose() $ matrix
)
val (ax, _) = Execute(n)[Array[Float]](mv, A, x)
val (atax, _) = Execute(n)[Array[Float]](f, A, ax)
assertArrayEquals(gold, atax, 0.001f)
}
}
| lift-project/lift | src/test/polybench/HighLevel.scala | Scala | mit | 11,950 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.