code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2011, Patrick Boe
* ===========================
* This program is distributed under the terms of the GNU General Public License.
*
* This file is part of Thimblus.
*
* Thimblus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Thimblus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Thimblus. If not, see <http://www.gnu.org/licenses/>.
*/
package org.thimblus.model
import java.util.Date
import scala.swing._
import scala.swing.event.Event
import org.thimblus.data._
import akka.event.EventHandler
trait HomeStore {
var plan: Plan
var metadata: String
}
trait HomeSource extends HomeStore with Publisher {
private[this] var p: Plan = null
var metadata: String = null
def plan: Plan = p
def plan_= (x: Plan) {
p=x
publish(PlanUpdate(x))
}
}
case class Request(info: String)
case class PlanUpdate(revised: Plan) extends Event
class PlanTimeoutException extends RuntimeException
class GibberishException(message: Any) extends RuntimeException(message.toString)
// vim: sw=2:softtabstop=2:et:
| patrickboe/thimblus | src/main/scala/org/thimblus/model/model.scala | Scala | gpl-3.0 | 1,509 |
package spark.streaming.examples
import spark.streaming.{Seconds, StreamingContext}
import spark.storage.StorageLevel
import com.twitter.algebird._
import spark.streaming.StreamingContext._
import spark.SparkContext._
/**
* Illustrates the use of the Count-Min Sketch, from Twitter's Algebird library, to compute
* windowed and global Top-K estimates of user IDs occurring in a Twitter stream.
* <br>
* <strong>Note</strong> that since Algebird's implementation currently only supports Long inputs,
* the example operates on Long IDs. Once the implementation supports other inputs (such as String),
* the same approach could be used for computing popular topics for example.
* <p>
* <p>
* <a href="http://highlyscalable.wordpress.com/2012/05/01/probabilistic-structures-web-analytics-data-mining/">
* This blog post</a> has a good overview of the Count-Min Sketch (CMS). The CMS is a datastructure
* for approximate frequency estimation in data streams (e.g. Top-K elements, frequency of any given element, etc),
* that uses space sub-linear in the number of elements in the stream. Once elements are added to the CMS, the
* estimated count of an element can be computed, as well as "heavy-hitters" that occur more than a threshold
* percentage of the overall total count.
* <p><p>
* Algebird's implementation is a monoid, so we can succinctly merge two CMS instances in the reduce operation.
*/
object TwitterAlgebirdCMS {
def main(args: Array[String]) {
if (args.length < 3) {
System.err.println("Usage: TwitterAlgebirdCMS <master> <twitter_username> <twitter_password>" +
" [filter1] [filter2] ... [filter n]")
System.exit(1)
}
// CMS parameters
val DELTA = 1E-3
val EPS = 0.01
val SEED = 1
val PERC = 0.001
// K highest frequency elements to take
val TOPK = 10
val Array(master, username, password) = args.slice(0, 3)
val filters = args.slice(3, args.length)
val ssc = new StreamingContext(master, "TwitterAlgebirdCMS", Seconds(10),
System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
val stream = ssc.twitterStream(username, password, filters, StorageLevel.MEMORY_ONLY_SER)
val users = stream.map(status => status.getUser.getId)
val cms = new CountMinSketchMonoid(DELTA, EPS, SEED, PERC)
var globalCMS = cms.zero
val mm = new MapMonoid[Long, Int]()
var globalExact = Map[Long, Int]()
val approxTopUsers = users.mapPartitions(ids => {
ids.map(id => cms.create(id))
}).reduce(_ ++ _)
val exactTopUsers = users.map(id => (id, 1))
.reduceByKey((a, b) => a + b)
approxTopUsers.foreach(rdd => {
if (rdd.count() != 0) {
val partial = rdd.first()
val partialTopK = partial.heavyHitters.map(id =>
(id, partial.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK)
globalCMS ++= partial
val globalTopK = globalCMS.heavyHitters.map(id =>
(id, globalCMS.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK)
println("Approx heavy hitters at %2.2f%% threshold this batch: %s".format(PERC,
partialTopK.mkString("[", ",", "]")))
println("Approx heavy hitters at %2.2f%% threshold overall: %s".format(PERC,
globalTopK.mkString("[", ",", "]")))
}
})
exactTopUsers.foreach(rdd => {
if (rdd.count() != 0) {
val partialMap = rdd.collect().toMap
val partialTopK = rdd.map(
{case (id, count) => (count, id)})
.sortByKey(ascending = false).take(TOPK)
globalExact = mm.plus(globalExact.toMap, partialMap)
val globalTopK = globalExact.toSeq.sortBy(_._2).reverse.slice(0, TOPK)
println("Exact heavy hitters this batch: %s".format(partialTopK.mkString("[", ",", "]")))
println("Exact heavy hitters overall: %s".format(globalTopK.mkString("[", ",", "]")))
}
})
ssc.start()
}
}
| hobinyoon/spark-0.7.0 | examples/src/main/scala/spark/streaming/examples/TwitterAlgebirdCMS.scala | Scala | bsd-3-clause | 3,974 |
package play.api.db
import scala.language.reflectiveCalls
import play.api._
import play.api.libs._
import play.core._
import java.sql._
import javax.sql._
import com.jolbox.bonecp._
import com.jolbox.bonecp.hooks._
import scala.util.control.{ NonFatal, ControlThrowable }
/**
* The Play Database API manages several connection pools.
*
* @param datasources the managed data sources
*/
trait DBApi {
val datasources: List[(DataSource, String)]
/**
* Shutdown pool for given datasource
*/
def shutdownPool(ds: DataSource)
/**
* Retrieves a JDBC connection, with auto-commit set to `true`.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String): DataSource
/**
* Retrieves the JDBC connection URL for a particular data source.
*
* @param name the data source name
* @return The JDBC URL connection string, i.e. `jdbc:...`
* @throws an error if the required data source is not registered
*/
def getDataSourceURL(name: String): String = {
val connection = getDataSource(name).getConnection
val url = connection.getMetaData.getURL
connection.close()
url
}
/**
* Retrieves a JDBC connection.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @param autocommit when `true`, sets this connection to auto-commit
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getConnection(name: String, autocommit: Boolean = true): Connection = {
val connection = getDataSource(name).getConnection
connection.setAutoCommit(autocommit)
connection
}
/**
* Execute a block of code, providing a JDBC connection. The connection and all created statements are
* automatically released.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withConnection[A](name: String)(block: Connection => A): A = {
val connection = new AutoCleanConnection(getConnection(name))
try {
block(connection)
} finally {
connection.close()
}
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withTransaction[A](name: String)(block: Connection => A): A = {
withConnection(name) { connection =>
try {
connection.setAutoCommit(false)
val r = block(connection)
connection.commit()
r
} catch {
case e: ControlThrowable => connection.commit(); throw e
case NonFatal(e) => connection.rollback(); throw e
}
}
}
}
/**
* Provides a high-level API for getting JDBC connections.
*
* For example:
* {{{
* val conn = DB.getConnection("customers")
* }}}
*/
object DB {
/** The exception we are throwing. */
private def error = throw new Exception("DB plugin is not registered.")
/**
* Retrieves a JDBC connection.
*
* @param name data source name
* @param autocommit when `true`, sets this connection to auto-commit
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getConnection(name: String = "default", autocommit: Boolean = true)(implicit app: Application): Connection = app.plugin[DBPlugin].map(_.api.getConnection(name, autocommit)).getOrElse(error)
/**
* Retrieves a JDBC connection (autocommit is set to true).
*
* @param name data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String = "default")(implicit app: Application): DataSource = app.plugin[DBPlugin].map(_.api.getDataSource(name)).getOrElse(error)
/**
* Execute a block of code, providing a JDBC connection. The connection is
* automatically released.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withConnection[A](name: String)(block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withConnection(name)(block)).getOrElse(error)
}
/**
* Execute a block of code, providing a JDBC connection. The connection and all created statements are
* automatically released.
*
* @param block Code block to execute.
*/
def withConnection[A](block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withConnection("default")(block)).getOrElse(error)
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withTransaction[A](name: String = "default")(block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withTransaction(name)(block)).getOrElse(error)
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param block Code block to execute.
*/
def withTransaction[A](block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withTransaction("default")(block)).getOrElse(error)
}
}
/**
* Generic DBPlugin interface
*/
trait DBPlugin extends Plugin {
def api: DBApi
}
/**
* A DBPlugin implementation that provides a DBApi
*
* @param app the application that is registering the plugin
*/
class BoneCPPlugin(app: Application) extends DBPlugin {
private def error = throw new Exception("db keys are missing from application.conf")
lazy val dbConfig = app.configuration.getConfig("db").getOrElse(Configuration.empty)
private def dbURL(conn: Connection): String = {
val u = conn.getMetaData.getURL
conn.close()
u
}
// should be accessed in onStart first
private lazy val dbApi: DBApi = new BoneCPApi(dbConfig, app.classloader)
/**
* plugin is disabled if either configuration is missing or the plugin is explicitly disabled
*/
private lazy val isDisabled = {
app.configuration.getString("dbplugin").filter(_ == "disabled").isDefined || dbConfig.subKeys.isEmpty
}
/**
* Is this plugin enabled.
*
* {{{
* dbplugin=disabled
* }}}
*/
override def enabled = isDisabled == false
/**
* Retrieves the underlying `DBApi` managing the data sources.
*/
def api: DBApi = dbApi
/**
* Reads the configuration and connects to every data source.
*/
override def onStart() {
// Try to connect to each, this should be the first access to dbApi
dbApi.datasources.map { ds =>
try {
ds._1.getConnection.close()
app.mode match {
case Mode.Test =>
case mode => Play.logger.info("database [" + ds._2 + "] connected at " + dbURL(ds._1.getConnection))
}
} catch {
case NonFatal(e) => {
throw dbConfig.reportError(ds._2 + ".url", "Cannot connect to database [" + ds._2 + "]", Some(e.getCause))
}
}
}
}
/**
* Closes all data sources.
*/
override def onStop() {
dbApi.datasources.foreach {
case (ds, _) => try {
dbApi.shutdownPool(ds)
} catch { case NonFatal(_) => }
}
val drivers = DriverManager.getDrivers()
while (drivers.hasMoreElements) {
val driver = drivers.nextElement
DriverManager.deregisterDriver(driver)
}
}
}
private[db] class BoneCPApi(configuration: Configuration, classloader: ClassLoader) extends DBApi {
private def error(db: String, message: String = "") = throw configuration.reportError(db, message)
private val dbNames = configuration.subKeys
private def register(driver: String, c: Configuration) {
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case NonFatal(e) => throw c.reportError("driver", "Driver not found: [" + driver + "]", Some(e))
}
}
private def createDataSource(dbName: String, url: String, driver: String, conf: Configuration): DataSource = {
val datasource = new BoneCPDataSource
// Try to load the driver
conf.getString("driver").map { driver =>
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case NonFatal(e) => throw conf.reportError("driver", "Driver not found: [" + driver + "]", Some(e))
}
}
val autocommit = conf.getBoolean("autocommit").getOrElse(true)
val isolation = conf.getString("isolation").map {
case "NONE" => Connection.TRANSACTION_NONE
case "READ_COMMITTED" => Connection.TRANSACTION_READ_COMMITTED
case "READ_UNCOMMITTED " => Connection.TRANSACTION_READ_UNCOMMITTED
case "REPEATABLE_READ " => Connection.TRANSACTION_REPEATABLE_READ
case "SERIALIZABLE" => Connection.TRANSACTION_SERIALIZABLE
case unknown => throw conf.reportError("isolation", "Unknown isolation level [" + unknown + "]")
}
val catalog = conf.getString("defaultCatalog")
val readOnly = conf.getBoolean("readOnly").getOrElse(false)
datasource.setClassLoader(classloader)
val logger = Logger("com.jolbox.bonecp")
// Re-apply per connection config @ checkout
datasource.setConnectionHook(new AbstractConnectionHook {
override def onCheckIn(connection: ConnectionHandle) {
if (logger.isTraceEnabled) {
logger.trace("Check in connection [%s leased]".format(datasource.getTotalLeased))
}
}
override def onCheckOut(connection: ConnectionHandle) {
connection.setAutoCommit(autocommit)
isolation.map(connection.setTransactionIsolation(_))
connection.setReadOnly(readOnly)
catalog.map(connection.setCatalog(_))
if (logger.isTraceEnabled) {
logger.trace("Check out connection [%s leased]".format(datasource.getTotalLeased))
}
}
})
val PostgresFullUrl = "^postgres://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\\\s]+)$".r
val MysqlFullUrl = "^mysql://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\\\s]+)$".r
val MysqlCustomProperties = ".*\\\\?(.*)".r
val H2DefaultUrl = "^jdbc:h2:mem:.+".r
conf.getString("url") match {
case Some(PostgresFullUrl(username, password, host, dbname)) =>
datasource.setJdbcUrl("jdbc:postgresql://%s/%s".format(host, dbname))
datasource.setUsername(username)
datasource.setPassword(password)
case Some(url @ MysqlFullUrl(username, password, host, dbname)) =>
val defaultProperties = """?useUnicode=yes&characterEncoding=UTF-8&connectionCollation=utf8_general_ci"""
val addDefaultPropertiesIfNeeded = MysqlCustomProperties.findFirstMatchIn(url).map(_ => "").getOrElse(defaultProperties)
datasource.setJdbcUrl("jdbc:mysql://%s/%s".format(host, dbname + addDefaultPropertiesIfNeeded))
datasource.setUsername(username)
datasource.setPassword(password)
case Some(url @ H2DefaultUrl()) if !url.contains("DB_CLOSE_DELAY") =>
if (Play.maybeApplication.exists(_.mode == Mode.Dev)) {
datasource.setJdbcUrl(url + ";DB_CLOSE_DELAY=-1")
} else {
datasource.setJdbcUrl(url)
}
case Some(s: String) =>
datasource.setJdbcUrl(s)
case _ =>
throw conf.globalError("Missing url configuration for database [%s]".format(conf))
}
conf.getString("user").map(datasource.setUsername(_))
conf.getString("pass").map(datasource.setPassword(_))
conf.getString("password").map(datasource.setPassword(_))
// Pool configuration
datasource.setPartitionCount(conf.getInt("partitionCount").getOrElse(1))
datasource.setMaxConnectionsPerPartition(conf.getInt("maxConnectionsPerPartition").getOrElse(30))
datasource.setMinConnectionsPerPartition(conf.getInt("minConnectionsPerPartition").getOrElse(5))
datasource.setAcquireIncrement(conf.getInt("acquireIncrement").getOrElse(1))
datasource.setAcquireRetryAttempts(conf.getInt("acquireRetryAttempts").getOrElse(10))
datasource.setAcquireRetryDelayInMs(conf.getMilliseconds("acquireRetryDelay").getOrElse(1000))
datasource.setConnectionTimeoutInMs(conf.getMilliseconds("connectionTimeout").getOrElse(1000))
datasource.setIdleMaxAge(conf.getMilliseconds("idleMaxAge").getOrElse(1000 * 60 * 10), java.util.concurrent.TimeUnit.MILLISECONDS)
datasource.setMaxConnectionAge(conf.getMilliseconds("maxConnectionAge").getOrElse(1000 * 60 * 60), java.util.concurrent.TimeUnit.MILLISECONDS)
datasource.setDisableJMX(conf.getBoolean("disableJMX").getOrElse(true))
datasource.setStatisticsEnabled(conf.getBoolean("statisticsEnabled").getOrElse(false))
datasource.setIdleConnectionTestPeriod(conf.getMilliseconds("idleConnectionTestPeriod").getOrElse(1000 * 60), java.util.concurrent.TimeUnit.MILLISECONDS)
conf.getString("initSQL").map(datasource.setInitSQL(_))
conf.getBoolean("logStatements").map(datasource.setLogStatementsEnabled(_))
conf.getString("connectionTestStatement").map(datasource.setConnectionTestStatement(_))
// Bind in JNDI
conf.getString("jndiName").map { name =>
JNDI.initialContext.rebind(name, datasource)
Play.logger.info("datasource [" + conf.getString("url").get + "] bound to JNDI as " + name)
}
datasource
}
val datasources: List[Tuple2[DataSource, String]] = dbNames.map { dbName =>
val url = configuration.getString(dbName + ".url").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".url]"))
val driver = configuration.getString(dbName + ".driver").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".driver]"))
val extraConfig = configuration.getConfig(dbName).getOrElse(error(dbName, "Missing configuration [db." + dbName + "]"))
register(driver, extraConfig)
createDataSource(dbName, url, driver, extraConfig) -> dbName
}.toList
def shutdownPool(ds: DataSource) = {
ds match {
case ds: BoneCPDataSource => ds.close()
case _ => error(" - could not recognize DataSource, therefore unable to shutdown this pool")
}
}
/**
* Retrieves a JDBC connection, with auto-commit set to `true`.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String): DataSource = {
datasources.filter(_._2 == name).headOption.map(e => e._1).getOrElse(error(" - could not find datasource for " + name))
}
}
/**
* Provides an interface for retreiving the jdbc driver's implementation of java.sql.Connection
* from a "decorated" Connection (such as the Connection that DB.withConnection provides). Upcasting
* to this trait should be used with caution since exposing the internal jdbc connection can violate the
* guarantees Play otherwise makes (like automatically closing jdbc statements created from the connection)
*/
trait HasInternalConnection {
def getInternalConnection(): Connection
}
/**
* A connection that automatically releases statements on close
*/
private class AutoCleanConnection(connection: Connection) extends Connection with HasInternalConnection {
private val statements = scala.collection.mutable.ListBuffer.empty[Statement]
private def registering[T <: Statement](b: => T) = {
val statement = b
statements += statement
statement
}
private def releaseStatements() {
statements.foreach { statement =>
statement.close()
}
statements.clear()
}
override def getInternalConnection(): Connection = connection match {
case bonecpConn: com.jolbox.bonecp.ConnectionHandle =>
bonecpConn.getInternalConnection()
case x => x
}
def createStatement() = registering(connection.createStatement())
def createStatement(resultSetType: Int, resultSetConcurrency: Int) = registering(connection.createStatement(resultSetType, resultSetConcurrency))
def createStatement(resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability))
def prepareStatement(sql: String) = registering(connection.prepareStatement(sql))
def prepareStatement(sql: String, autoGeneratedKeys: Int) = registering(connection.prepareStatement(sql, autoGeneratedKeys))
def prepareStatement(sql: String, columnIndexes: scala.Array[Int]) = registering(connection.prepareStatement(sql, columnIndexes))
def prepareStatement(sql: String, resultSetType: Int, resultSetConcurrency: Int) = registering(connection.prepareStatement(sql, resultSetType, resultSetConcurrency))
def prepareStatement(sql: String, resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability))
def prepareStatement(sql: String, columnNames: scala.Array[String]) = registering(connection.prepareStatement(sql, columnNames))
def prepareCall(sql: String) = registering(connection.prepareCall(sql))
def prepareCall(sql: String, resultSetType: Int, resultSetConcurrency: Int) = registering(connection.prepareCall(sql, resultSetType, resultSetConcurrency))
def prepareCall(sql: String, resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability))
def close() {
releaseStatements()
connection.close()
}
def clearWarnings() { connection.clearWarnings() }
def commit() { connection.commit() }
def createArrayOf(typeName: String, elements: scala.Array[AnyRef]) = connection.createArrayOf(typeName, elements)
def createBlob() = connection.createBlob()
def createClob() = connection.createClob()
def createNClob() = connection.createNClob()
def createSQLXML() = connection.createSQLXML()
def createStruct(typeName: String, attributes: scala.Array[AnyRef]) = connection.createStruct(typeName, attributes)
def getAutoCommit() = connection.getAutoCommit()
def getCatalog() = connection.getCatalog()
def getClientInfo() = connection.getClientInfo()
def getClientInfo(name: String) = connection.getClientInfo(name)
def getHoldability() = connection.getHoldability()
def getMetaData() = connection.getMetaData()
def getTransactionIsolation() = connection.getTransactionIsolation()
def getTypeMap() = connection.getTypeMap()
def getWarnings() = connection.getWarnings()
def isClosed() = connection.isClosed()
def isReadOnly() = connection.isReadOnly()
def isValid(timeout: Int) = connection.isValid(timeout)
def nativeSQL(sql: String) = connection.nativeSQL(sql)
def releaseSavepoint(savepoint: Savepoint) { connection.releaseSavepoint(savepoint) }
def rollback() { connection.rollback() }
def rollback(savepoint: Savepoint) { connection.rollback(savepoint) }
def setAutoCommit(autoCommit: Boolean) { connection.setAutoCommit(autoCommit) }
def setCatalog(catalog: String) { connection.setCatalog(catalog) }
def setClientInfo(properties: java.util.Properties) { connection.setClientInfo(properties) }
def setClientInfo(name: String, value: String) { connection.setClientInfo(name, value) }
def setHoldability(holdability: Int) { connection.setHoldability(holdability) }
def setReadOnly(readOnly: Boolean) { connection.setReadOnly(readOnly) }
def setSavepoint() = connection.setSavepoint()
def setSavepoint(name: String) = connection.setSavepoint(name)
def setTransactionIsolation(level: Int) { connection.setTransactionIsolation(level) }
def setTypeMap(map: java.util.Map[String, Class[_]]) { connection.setTypeMap(map) }
def isWrapperFor(iface: Class[_]) = connection.isWrapperFor(iface)
def unwrap[T](iface: Class[T]) = connection.unwrap(iface)
// JDBC 4.1
def getSchema() = {
connection.asInstanceOf[{ def getSchema(): String }].getSchema()
}
def setSchema(schema: String) {
connection.asInstanceOf[{ def setSchema(schema: String): Unit }].setSchema(schema)
}
def getNetworkTimeout() = {
connection.asInstanceOf[{ def getNetworkTimeout(): Int }].getNetworkTimeout()
}
def setNetworkTimeout(executor: java.util.concurrent.Executor, milliseconds: Int) {
connection.asInstanceOf[{ def setNetworkTimeout(executor: java.util.concurrent.Executor, milliseconds: Int): Unit }].setNetworkTimeout(executor, milliseconds)
}
def abort(executor: java.util.concurrent.Executor) {
connection.asInstanceOf[{ def abort(executor: java.util.concurrent.Executor): Unit }].abort(executor)
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play-jdbc/src/main/scala/play/api/db/DB.scala | Scala | mit | 21,351 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eleflow.uberdata.core.data.json
/**
* Created by dirceu on 28/12/15.
*/
case class ExecutorAdded(initTimestamp: Long,
executorId: String,
time: Long,
masterHost: String,
executorHost: String,
totalCores: Int,
logUrlMap: Map[String, String],
cacheMemory: Long,
remainingMemory: Long,
executorMemory: Long)
case class ExecutorRemoved(initTimestamp: Long, executorId: String, time: Long, reason: String)
case class ExecutorMetricsUpdated(executorId: String,
time: Long,
taskId: Long,
stageId: Int,
bytesRead: Option[Long] = None,
recordsRead: Option[Long] = None,
writeMethod: Option[String],
bytesWritten: Option[Long] = None,
recordsWritten: Option[Long] = None,
remoteBlocksFetched: Option[Long] = None,
localBlocksFetched: Option[Long] = None,
fetchWaitTime: Option[Long] = None,
remoteBytesRead: Option[Long] = None,
localBytesRead: Option[Long] = None,
totalBytesRead: Option[Long] = None,
totalBlocksFetched: Option[Long] = None,
shuffleRecordsRead: Option[Long] = None,
shuffleBytesWritten: Option[Long] = None,
shuffleWriteTime: Option[Long] = None,
shuffleRecordsWritten: Option[Long] = None)
//(taskId, stageId, stageAttemptId, accumUpdates)
//accumUpdates: Seq[(Long, Int, Int, Seq[AccumulableInfo])])
//case class SparkListenerExecutorMetricsUpdate(
// execId: String,
// accumUpdates: Seq[(Long, Int, Int, Seq[String])])
case class AccumulatorInfoUpdateEvent(executorId: String,
accumUpdates: Seq[(Long, Int, Int, Seq[String])]
/*taskId: Long,
stageId: Int,
stageAttemptId: Int,
idAccumulableInfo: Long,
nameAccumulableInfo: Option[String],
updateAccumulableInfo: Option[Any],
valueAccumulableInfo: Option[Any]*/
)
case class Workers(id: String,
host: String,
port: Int,
webuiaddress: String,
cores: Int,
coresused: Int,
coresfree: Int,
memory: Long,
memoryused: Long,
memoryfree: Long,
state: String,
lastheartbeat: Long)
| eleflow/uberdata | iuberdata_core/src/main/scala/eleflow/uberdata/core/data/json/Executor.scala | Scala | apache-2.0 | 3,845 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package htsjdk.samtools
import htsjdk.samtools.util.BlockCompressedFilePointerUtil
import java.io.File
import java.io.FileNotFoundException
import org.apache.hadoop.fs.{ FileSystem, Path }
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.mapreduce.JobContext
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.io.BAMFilteredRecordReader
import org.seqdoop.hadoop_bam.BAMInputFormat
import org.seqdoop.hadoop_bam.FileVirtualSplit
import org.seqdoop.hadoop_bam.SAMRecordWritable
import scala.collection.mutable
object IndexedBamInputFormat {
private var optFilePath: Option[Path] = None
private var optIndexFilePath: Option[Path] = None
private var optViewRegion: Option[ReferenceRegion] = None
private var optDict: Option[SAMSequenceDictionary] = None
def setVars(filePath: Path, indexFilePath: Path, viewRegion: ReferenceRegion, dict: SAMSequenceDictionary) {
optFilePath = Some(filePath)
optIndexFilePath = Some(indexFilePath)
optViewRegion = Some(viewRegion)
optDict = Some(dict)
}
}
class IndexedBamInputFormat extends BAMInputFormat {
override def createRecordReader(split: InputSplit, ctx: TaskAttemptContext): RecordReader[LongWritable, SAMRecordWritable] = {
val rr: RecordReader[LongWritable, SAMRecordWritable] = new BAMFilteredRecordReader()
assert(IndexedBamInputFormat.optViewRegion.isDefined)
IndexedBamInputFormat.optViewRegion.foreach { (refReg) => BAMFilteredRecordReader.setRegion(refReg) }
rr.initialize(split, ctx)
rr
}
override def getSplits(job: JobContext): java.util.List[InputSplit] = {
assert(IndexedBamInputFormat.optIndexFilePath.isDefined &&
IndexedBamInputFormat.optFilePath.isDefined &&
IndexedBamInputFormat.optViewRegion.isDefined &&
IndexedBamInputFormat.optDict.isDefined)
val indexFilePath = IndexedBamInputFormat.optIndexFilePath.get
val idxFile: File = new File(indexFilePath.toString)
if (!idxFile.exists()) {
throw new java.io.FileNotFoundException("Bam index file not provided")
} else {
// Use index to get the chunks for a specific region, then use them to create InputSplits
val filePath = IndexedBamInputFormat.optFilePath.get
val viewRegion = IndexedBamInputFormat.optViewRegion.get
val refName = viewRegion.referenceName
val dict = IndexedBamInputFormat.optDict.get
val start = viewRegion.start.toInt
val end = viewRegion.end.toInt
val dbbfi: DiskBasedBAMFileIndex = new DiskBasedBAMFileIndex(idxFile, dict)
val referenceIndex: Int = dict.getSequenceIndex(refName)
// Get the chunks in the region we want (chunks give start and end file pointers into a BAM file)
var regions: List[Chunk] = dbbfi.getSpanOverlapping(referenceIndex, start, end).getChunks
var splits = new mutable.ListBuffer[FileVirtualSplit]()
for (chunk <- regions) {
// Create InputSplits from chunks in a given region
val start: Long = chunk.getChunkStart()
val end: Long = chunk.getChunkEnd()
val locs = Array[String]()
val newSplit = new FileVirtualSplit(filePath, start, end, locs)
splits += newSplit
}
splits.toList
}
}
}
| rnpandya/adam | adam-core/src/main/java/org/bdgenomics/adam/io/IndexedBamInputFormat.scala | Scala | apache-2.0 | 4,280 |
package com.arcusys.valamis.hook
import com.liferay.portal.DuplicateGroupException
import com.liferay.portal.kernel.dao.orm.{RestrictionsFactoryUtil, QueryUtil}
import com.liferay.portal.kernel.events.SimpleAction
import com.liferay.portal.kernel.log.{Log, LogFactoryUtil}
import com.liferay.portal.model._
import com.liferay.portal.service._
import com.liferay.portal.service.permission.PortletPermissionUtil
import com.liferay.portal.util.PortalUtil
import scala.collection.JavaConverters._
import scala.collection.JavaConversions._
class CreateDashboardAction extends SimpleAction {
private val _log: Log = LogFactoryUtil.getLog(classOf[CreateDashboardAction])
override def run(companyIds: Array[String]): Unit = {
_log.info("CREATE VALAMIS SITE WITH DASHBOARD")
val defaultCompanyId = PortalUtil.getDefaultCompanyId
val defaultUserId = UserLocalServiceUtil.getDefaultUserId(defaultCompanyId)
try {
val siteGroup = addSite(defaultUserId)
val siteGroupId = siteGroup.getGroupId
setupTheme(siteGroupId)
setupDashboardPage(siteGroupId, defaultUserId)
val allUsers = UserLocalServiceUtil.getUsers(QueryUtil.ALL_POS, QueryUtil.ALL_POS).asScala
val roles = RoleLocalServiceUtil.getTypeRoles(RoleConstants.TYPE_SITE).asScala
val memberRole = roles.filter(role => role.getName.equals(RoleConstants.SITE_MEMBER)).head
val userIds = allUsers.map(user => user.getUserId).toArray
UserGroupRoleLocalServiceUtil.addUserGroupRoles(userIds, siteGroupId, memberRole.getRoleId)
} catch {
case ex: DuplicateGroupException => _log.info("Valamis site already exists")
case ex: Exception => _log.error(ex.getStackTraceString)
}
}
def setupTheme(siteGroupId: Long): LayoutSet = {
val valamisThemeName = "Valamis Theme"
val valamisThemeId = "valamistheme_WAR_valamistheme"
val layoutSetPrototypes = LayoutSetPrototypeLocalServiceUtil.getLayoutSetPrototypes(QueryUtil.ALL_POS, QueryUtil.ALL_POS).asScala
.filter(c => c.getName(c.getDefaultLanguageId).contains(valamisThemeName))
for (layoutSetPrototype <- layoutSetPrototypes) {
LayoutSetLocalServiceUtil
.updateLayoutSetPrototypeLinkEnabled(siteGroupId, false, true, layoutSetPrototype.getUuid)
// /* DO NOT REMOVE. Is used to tell liferay to REALLY UPDATE Layouts */
// val query = LayoutLocalServiceUtil.dynamicQuery()
// .add(RestrictionsFactoryUtil.eq("groupId", siteGroupId))
// .add(RestrictionsFactoryUtil.eq("privateLayout", false))
// .add(RestrictionsFactoryUtil.eq("parentLayoutId", LayoutConstants.DEFAULT_PARENT_LAYOUT_ID))
// LayoutLocalServiceUtil.dynamicQuery(query)
val newLayouts = LayoutLocalServiceUtil
.getLayouts(siteGroupId, false, LayoutConstants.DEFAULT_PARENT_LAYOUT_ID)
}
LayoutSetLocalServiceUtil
.updateLookAndFeel(siteGroupId, false, valamisThemeId, "", "", false)
LayoutSetLocalServiceUtil
.updateLookAndFeel(siteGroupId, true, valamisThemeId, "", "", false)
}
private def addSite(userId: Long): Group = {
val groupType = GroupConstants.TYPE_SITE_OPEN
val parentGroupId = GroupConstants.DEFAULT_PARENT_GROUP_ID
val liveGroupId = GroupConstants.DEFAULT_LIVE_GROUP_ID
val membershipRestriction = GroupConstants.DEFAULT_MEMBERSHIP_RESTRICTION
val siteTitle = "Valamis"
val description = ""
val manualMembership = true
val siteUrl = "/valamis"
val isSite = true
val isActive = true
val serviceContext: ServiceContext = new ServiceContext
serviceContext.setAddGuestPermissions(true)
val group = GroupLocalServiceUtil.addGroup(
userId,
parentGroupId,
classOf[Group].getName,
0, //classPK
liveGroupId,
siteTitle,
description,
groupType,
manualMembership,
membershipRestriction,
siteUrl,
isSite,
isActive,
serviceContext)
group
}
private def setupDashboardPage(groupId: Long, userId: Long) {
_log.info("Create dashboard page")
val dashboardLayout = addLayout(groupId, userId, "Dashboard")
try{
removeLayout(groupId, false, "/home")
}catch {
case e:Throwable =>
}
val layoutTypePortlet = dashboardLayout.getLayoutType.asInstanceOf[LayoutTypePortlet]
layoutTypePortlet.setLayoutTemplateId(userId, "valamisStudentDashboard")
updateLayout(dashboardLayout)
addPortletId(dashboardLayout, "ValamisStudySummary_WAR_learnportlet", "valamisStudySummary")
addPortletId(dashboardLayout, "LearningPaths_WAR_learnportlet", "learningPaths")
addPortletId(dashboardLayout, "MyLessons_WAR_learnportlet", "lessons")
addPortletId(dashboardLayout, "RecentLessons_WAR_learnportlet", "recent")
addPortletId(dashboardLayout, "AchievedCertificates_WAR_learnportlet", "achievedCertificates")
addPortletId(dashboardLayout, "ValamisActivities_WAR_learnportlet", "valamisActivities")
}
private def removeLayout(groupId: Long, isPrivate: Boolean, friendlyUrl: String) = {
val homeLayout = LayoutLocalServiceUtil.getFriendlyURLLayout(groupId, isPrivate, friendlyUrl)
LayoutLocalServiceUtil.deleteLayout(homeLayout)
}
private def addLayout(groupId: Long, userId: Long, name: String): Layout = {
val serviceContext: ServiceContext = new ServiceContext
serviceContext.setAddGuestPermissions(true)
val parentLayout = LayoutConstants.DEFAULT_PARENT_LAYOUT_ID
val title = ""
val description = ""
val layoutType = LayoutConstants.TYPE_PORTLET
val friendlyURL = "/dashboard"
val isPrivate = false
LayoutLocalServiceUtil.addLayout(
userId,
groupId,
isPrivate,
parentLayout,
name,
title,
description,
layoutType,
false, //hidden
friendlyURL,
serviceContext
)
}
protected def addPortletId(layout: Layout, portletId: String, columnId: String) = {
val layoutTypePortlet = layout.getLayoutType.asInstanceOf[LayoutTypePortlet]
val newPortletId = layoutTypePortlet.addPortletId(0, portletId, columnId, -1, false)
addResources(layout, newPortletId)
updateLayout(layout)
newPortletId
}
protected def addResources(layout: Layout, portletId: String) {
val rootPortletId = PortletConstants.getRootPortletId(portletId)
val portletPrimaryKey = PortletPermissionUtil.getPrimaryKey(layout.getPlid, portletId)
ResourceLocalServiceUtil.addResources(
layout.getCompanyId,
layout.getGroupId,
0, //userId
rootPortletId, portletPrimaryKey, true, true, true)
}
protected def updateLayout(layout: Layout) {
LayoutLocalServiceUtil.updateLayout(layout.getGroupId, layout.isPrivateLayout, layout.getLayoutId, layout.getTypeSettings)
}
} | ViLPy/Valamis | valamis-hook/src/main/scala/com/arcusys/valamis/hook/CreateDashboardAction.scala | Scala | lgpl-3.0 | 6,785 |
package com.rocketfuel.sdbc.sqlserver.jdbc
/**
* Output of this test:
* int: int
* bit: bit
* tinyint: tinyint
* smallint: smallint
* bigint: bigint
* decimal: decimal
* float: float
* real: real
* time: nvarchar
* date: nvarchar
* smalldatetime: smalldatetime
* datetime: datetime
* datetime2: nvarchar
* datetimeoffset: nvarchar
* binary: binary
* varbinary: image
* image: image
* char: char
* nchar: nchar
* varchar: varchar
* nvarchar: nvarchar
* text: text
* uniqueidentifier: uniqueidentifier
* hierarchy: varbinary
* money: money
* smallmoney: smallmoney
* xml: ntext
*/
class EnumerateTypesSpec extends SqlServerSuite {
ignore("list type map") {implicit connection =>
Update(
"""CREATE TABLE tbl (
| i int,
| bo bit,
| ti tinyint,
| si smallint,
| bi bigint,
| de decimal(3,1),
| fl float,
| re real,
| t time,
| da date,
| sts smalldatetime,
| ts datetime,
| ts2 datetime2,
| ts3 datetimeoffset,
| bin binary(1),
| varb varbinary(max),
| im image,
| c char(1),
| nc nchar(1),
| varc varchar(1),
| nvarc nvarchar(1),
| te text,
| u uniqueidentifier,
| h hierarchyid,
| mo money,
| smm smallmoney,
| x xml
|)
""".stripMargin
).update()
val rs = connection.prepareStatement("SELECT * FROM tbl").executeQuery()
val metadata = rs.getMetaData
println("Map(")
for (i <- 1 to metadata.getColumnCount) {
println(s"${metadata.getColumnName(i)} -> ${metadata.getColumnTypeName(i)},")
}
println(")")
}
}
| wdacom/sdbc | sqlserver/src/test/scala/com/rocketfuel/sdbc/sqlserver/jdbc/EnumerateTypesSpec.scala | Scala | bsd-3-clause | 1,717 |
/* *\
** \ \ / _) \ \ / \ | **
** \ \ / | __ \ _ \ __| \ \ / |\/ | **
** \ \ / | | | __/ | \ \ / | | **
** \_/ _| .__/ \___| _| \_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\* */
package org.vipervm.platform.host
import org.vipervm.platform.{HostBuffer,HostMemoryNode}
import com.sun.jna.Memory
/**
* Default implementation for the host memory node
*
* Some specialized implementations could be provided.
* For instance, NUMA architectures may have a different
* host memory node. Or a different malloc could be used.
*/
class DefaultHostMemoryNode extends HostMemoryNode {
type BufferType = DefaultHostBuffer
/**
* Allocate a buffer in host memory
*
* This uses default malloc implementation (used by JNA)
*
* @param size Size of the buffer (in bytes)
*/
def allocate(size:Long): DefaultHostBuffer = {
val b = new DefaultHostBuffer(size,this)
buffers += b
b
}
def free(buffer:DefaultHostBuffer): Unit = {
//TODO: free memory effectively (call real C free)
}
def availableMemory:Long = {
val bean = java.lang.management.ManagementFactory.getOperatingSystemMXBean
val b = bean.asInstanceOf[com.sun.management.OperatingSystemMXBean]
b.getFreePhysicalMemorySize
}
}
| hsyl20/Scala_ViperVM | src/main/scala/org/vipervm/platform/host/DefaultHostMemoryNode.scala | Scala | gpl-3.0 | 1,721 |
/*
* Copyright 2012 The Clustermeister Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nethad.clustermeister.integration.sc07
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.dispatch.Await
import akka.japi.Creator
import akka.util.Timeout
import akka.util.duration._
import com.github.nethad.clustermeister.api.Clustermeister
import com.github.nethad.clustermeister.api.impl.ClustermeisterFactory
import collection.JavaConversions._
import akka.pattern.ask
import com.typesafe.config.Config
import java.util.concurrent.Callable
import com.signalcollect.nodeprovisioning.torque._
import com.signalcollect.configuration.AkkaConfig
import com.signalcollect.implementations.messaging.AkkaProxy
import com.signalcollect.nodeprovisioning._
object LoadRequestScenario extends App {
var cm = Some(ClustermeisterFactory.create)
try {
println("Start Clustermeister")
if (cm.isDefined) {
val numberOfNodes = cm.get.getAllNodes.size
val system: ActorSystem = ActorSystem("NodeProvisioner", AkkaConfig.get)
val nodeProvisionerCreator = NodeProvisionerCreator(numberOfNodes)
val nodeProvisioner = system.actorOf(Props().withCreator(nodeProvisionerCreator.create), name = "NodeProvisioner")
val nodeProvisionerAddress = AkkaHelper.getRemoteAddress(nodeProvisioner, system)
implicit val timeout = new Timeout(1800 seconds)
for (node <- cm.get.getAllNodes) {
val actorNameFuture = node.execute(new LoadRequestCallable())
println("Started node controller: " + actorNameFuture.get)
}
val nodesFuture = nodeProvisioner ? "GetNodes"
val result = Await.result(nodesFuture, timeout.duration)
val nodes: List[Node] = result.asInstanceOf[List[ActorRef]] map (AkkaProxy.newInstance[Node](_))
nodes
} else {
throw new Exception("Clustermeister could not be initialized.")
}
} finally {
if (cm.isDefined) {
cm.get.shutdown
}
}
}
case class LoadRequestCallable() extends Callable[String] {
def call: String = {
println("Before loading Request.")
val clazz = Class.forName("com.signalcollect.implementations.messaging.Request")
println("After loading Request.")
clazz.getName
}
}
| nethad/clustermeister | integration-tests/src/main/scala/com/github/nethad/clustermeister/integration/sc07/LoadRequestScenario.scala | Scala | apache-2.0 | 2,796 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.spark.testsuite.filterexpr
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.CarbonHiveContext._
import org.apache.spark.sql.common.util.QueryTest
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.scalatest.BeforeAndAfterAll
class NullMeasureValueTestCaseFilter extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists t3")
sql(
"CREATE TABLE t3 (ID bigInt, date Timestamp, country String, name String, " +
"phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata.format'"
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/mm/dd")
sql("LOAD DATA LOCAL INPATH './src/test/resources/datawithnullmeasure.csv' into table t3");
}
test("select ID from t3 where salary is not null") {
checkAnswer(
sql("select ID from t3 where salary is not null"),
Seq(Row(1),Row(4)))
}
test("select ID from t3 where salary is null") {
checkAnswer(
sql("select ID from t3 where salary is null"),
Seq(Row(2),Row(3)))
}
override def afterAll {
sql("drop table t3")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
}
| Zhangshunyu/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/NullMeasureValueTestCaseFilter.scala | Scala | apache-2.0 | 2,213 |
/*
* Copyright 2017 Benedikt Ritter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.britter.reactive.chirps
import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
object ReactiveChirps {
final val Name = "reactive-chirps"
def props: Props = Props(new ReactiveChirps)
}
class ReactiveChirps extends Actor with ActorLogging with ActorSettings {
override val supervisorStrategy = SupervisorStrategy.stoppingStrategy
private val chirps = context.watch(createChirps())
private val httpService = context.watch(createHttpService())
log.info("Up and running")
override def receive = {
case Terminated(actor) => onTerminated(actor)
}
protected def createChirps(): ActorRef = context.actorOf(Chirps.props, Chirps.Name)
protected def createHttpService(): ActorRef = {
import settings.httpService._
context.actorOf(
HttpService.props(interface, port, timeout, chirps),
HttpService.Name
)
}
private def onTerminated(actor: ActorRef) = {
log.error("Terminating the system because {} terminated!", actor)
context.system.terminate()
}
}
| britter/reactive-chirps | src/main/scala/com/github/britter/reactive/chirps/ReactiveChirps.scala | Scala | apache-2.0 | 1,672 |
package example1
case class Tweet(
user: User,
entities: Entities,
retweeted_status: Option[Tweet])
case class Entities(
user_mentions: Set[User],
hashtags: Set[Hashtag])
case class User(screen_name: String) {
override def toString = s"@$screen_name"
}
case class Hashtag(text: String) {
override def toString = s"#$text"
}
| socialmetrix/spark-javaconf | src/main/scala/example1/Tweet.scala | Scala | mit | 342 |
/** test project to run arbitrary code */
object Main {
def main(args: Array[String]): Unit = {
println("hello world")
}
}
| papauschek/cointape | tools/src/main/scala/Main.scala | Scala | mit | 135 |
/*
* Copyright (c) 2019 Georgios Andreadakis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.tap.accepttest.importdoc.api
import org.tap.application.idgeneration.IdGenerator
import org.tap.application.importdoc.{DocImporter, DocumentParser}
import org.tap.domain.{Document, DocumentRepository, DocumentSource}
import org.tap.framework.idgeneration.UuidBasedIdGeneration
import org.tap.framework.parser.tika.DocumentParserTika
import org.tap.framework.persistence.elastic.DocumentRepositoryForElastic
/**
* Defines the import collaborators for the document import API test.
*
* @author Georgios Andreadakis (georgios@andreadakis-consulting.de)
*/
case class DocImportTestContext(source: DocumentSource) {
private val parser: DocumentParser = new DocumentParserTika
private val docRepo: DocumentRepository = new DocumentRepositoryForElastic
private val idGenerator: IdGenerator = new UuidBasedIdGeneration
def importFile(): Document = {
new DocImporter(docRepo, parser, idGenerator).importFile(source)
}
def allDocs: Either[Exception,List[Document]] = docRepo.allDocs
}
| GeorgiosAndreadakis/TextAnalyzerPlatform | test/org/tap/accepttest/importdoc/api/DocImportTestContext.scala | Scala | apache-2.0 | 1,621 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo
import java.io._
import java.lang.{Double => jDouble, Integer => jInt}
import java.util.zip.{ZipEntry, ZipOutputStream}
import java.util.{Date, Iterator => jIterator}
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom._
import org.apache.commons.csv.{CSVFormat, CSVRecord}
import org.apache.commons.io.FilenameUtils
import org.geotools.data.DefaultTransaction
import org.geotools.data.shapefile.{ShapefileDataStore, ShapefileDataStoreFactory}
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureStore}
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.locationtech.geomesa.accumulo.csv.CSVParser._
import org.locationtech.geomesa.utils.geotools.{SftBuilder, SimpleFeatureTypes}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
import scala.io.Source
case class TypeSchema(name: String, schema: String, latLonFields: Option[(String, String)])
package object csv extends LazyLogging {
def guessTypes(csvFile: File, hasHeader: Boolean): TypeSchema = {
val typename = FilenameUtils.getBaseName(csvFile.getName)
val reader = Source.fromFile(csvFile).bufferedReader()
val guess = guessTypes(typename, reader, hasHeader)
reader.close()
guess
}
def csvToFeatures(csvFile: File,
hasHeader: Boolean,
typeSchema: TypeSchema): SimpleFeatureCollection = {
val sft = SimpleFeatureTypes.createType(typeSchema.name, typeSchema.schema)
buildFeatureCollection(csvFile, hasHeader, sft, typeSchema.latLonFields)
}
protected[csv] def tryParsers(rawData: TraversableOnce[String]): Seq[CSVParser[_]] = {
def tryAllParsers(datum: String) =
CSVParser.parsers.find(_.parse(datum).isSuccess).getOrElse(StringParser)
rawData.map(tryAllParsers).toSeq
}
protected[csv] def guessHeaders(record: CSVRecord, hasHeader: Boolean = true): Seq[String] =
if (hasHeader) record.toSeq else Seq.tabulate(record.size())(n => s"C$n")
protected[csv] def guessTypes(name: String,
csvReader: Reader,
hasHeader: Boolean = true,
format: CSVFormat = CSVFormat.DEFAULT,
numSamples: Int = 5): TypeSchema = {
assert(numSamples > 0)
val records = format.parse(csvReader).iterator.take(numSamples + 1).toSeq
assert(records.size > 1 || (!hasHeader && records.size > 0))
val headers = guessHeaders(records(0), hasHeader)
val sample = records.drop(1)
// make sure type chars are valid for first few records
val parsers = sample.map(tryParsers(_)).reduceLeft { (pc1, pc2) =>
pc1.zip(pc2).map { case (p1, p2) => if (p1 == p2) p1 else StringParser }
}
val sftb = new SftBuilder
var defaultGeomSet = false
headers.zip(parsers).foreach { case (field, parser) =>
if (!defaultGeomSet && parser.isGeom) {
parser.buildSpec(sftb, field, true)
defaultGeomSet = true
} else {
parser.buildSpec(sftb, field)
}
}
TypeSchema(name, sftb.getSpec, None)
}
protected[csv] def getParser[A](clas: Class[A]) = clas match {
case c if c.isAssignableFrom(classOf[jInt]) => IntParser
case c if c.isAssignableFrom(classOf[jDouble]) => DoubleParser
case c if c.isAssignableFrom(classOf[Date]) => TimeParser
case c if c.isAssignableFrom(classOf[Point]) => PointParser
case c if c.isAssignableFrom(classOf[LineString]) => LineStringParser
case c if c.isAssignableFrom(classOf[Polygon]) => PolygonParser
case c if c.isAssignableFrom(classOf[MultiPoint]) => MultiPointParser
case c if c.isAssignableFrom(classOf[MultiLineString]) => MultiLineStringParser
case c if c.isAssignableFrom(classOf[MultiPolygon]) => MultiPolygonParser
case c if c.isAssignableFrom(classOf[Geometry]) => GeometryParser
case c if c.isAssignableFrom(classOf[String]) => StringParser
case _ => StringParser
}
val gf = new GeometryFactory
protected[csv] def buildFeatureCollection(csvFile: File,
hasHeader: Boolean,
sft: SimpleFeatureType,
latlonFields: Option[(String, String)]): SimpleFeatureCollection = {
val reader = Source.fromFile(csvFile).bufferedReader()
try {
buildFeatureCollection(reader, hasHeader, sft, latlonFields)
} finally {
reader.close()
}
}
def buildFeature(record: CSVRecord,
fb: SimpleFeatureBuilder,
parsers: Seq[CSVParser[_<:AnyRef]],
lli: Option[(Int, Int)]): Option[SimpleFeature] =
try {
val fieldVals = record.iterator.toIterable.zip(parsers).map { case (v, p) => p.parse(v).get }.toArray
fb.addAll(fieldVals)
for ((lati, loni) <- lli) {
val lat = fieldVals(lati).asInstanceOf[jDouble] // should be Doubles, as verified
val lon = fieldVals(loni).asInstanceOf[jDouble] // when determining latlonIdx
fb.add(gf.createPoint(new Coordinate(lon, lat)))
}
Some(fb.buildFeature(null))
} catch {
case ex: Exception => logger.info(s"Failed to parse CSV record:\\n$record"); None
}
// if the types in sft do not match the data in the reader, the resulting FeatureCollection will be empty.
protected[csv] def buildFeatureCollection(reader: Reader,
hasHeader: Boolean,
sft: SimpleFeatureType,
latlonFields: Option[(String, String)]): SimpleFeatureCollection = {
def idxOfField(fname: String): Int = {
sft.getType(fname)
val idx = sft.indexOf(fname)
if (idx > -1) {
val t = sft.getType(idx)
if (t.getBinding == classOf[java.lang.Double]) idx
else throw new IllegalArgumentException(s"field $fname is not a Double field")
} else throw new IllegalArgumentException(s"could not find field $fname")
}
val latlonIdx = latlonFields.map { case (latf, lonf) => (idxOfField(latf), idxOfField(lonf)) }
val fb = new SimpleFeatureBuilder(sft)
val parsers = sft.getTypes.map(t => getParser(t.getBinding))
val fc = new DefaultFeatureCollection
val records = CSVFormat.DEFAULT.parse(reader).iterator()
if (hasHeader) { records.next } // burn off header rather than try (and fail) to parse it.
for {
record <- records
// logs and discards lines that fail to parse but keeps processing
feature <- buildFeature(record, fb, parsers, latlonIdx)
} {
fc.add(feature)
}
fc
}
private val dsFactory = new ShapefileDataStoreFactory
private def shpDataStore(shpFile: File, sft: SimpleFeatureType): ShapefileDataStore = {
val params =
Map("url" -> shpFile.toURI.toURL,
"create spatial index" -> java.lang.Boolean.FALSE)
val shpDS = dsFactory.createNewDataStore(params).asInstanceOf[ShapefileDataStore]
shpDS.createSchema(sft)
shpDS
}
private def writeFeatures(fc: SimpleFeatureCollection, shpFS: SimpleFeatureStore) {
val transaction = new DefaultTransaction("create")
shpFS.setTransaction(transaction)
try {
shpFS.addFeatures(fc)
transaction.commit()
} catch {
case ex: Throwable =>
transaction.rollback()
throw ex
} finally {
transaction.close()
}
}
private def writeZipFile(shpFile: File): File = {
def byteStream(in: InputStream): Stream[Int] = { in.read() #:: byteStream(in) }
val dir = shpFile.getParent
val rootName = FilenameUtils.getBaseName(shpFile.getName)
val extensions = Seq("dbf", "fix", "prj", "shp", "shx")
val files = extensions.map(ext => new File(dir, s"$rootName.$ext"))
val zipFile = new File(dir, s"$rootName.zip")
val zip = new ZipOutputStream(new FileOutputStream(zipFile))
try {
for (file <- files if file.exists) {
zip.putNextEntry(new ZipEntry(file.getName))
val in = new FileInputStream(file.getCanonicalFile)
try {
byteStream(in).takeWhile(_ > -1).toList.foreach(zip.write)
} finally {
in.close()
}
zip.closeEntry()
}
} finally {
zip.close()
}
for (file <- files) file.delete()
zipFile
}
def ingestCSV(csvFile: File,
hasHeader: Boolean,
name: String,
schema: String,
latlonFields: Option[(String, String)] = None): File = {
val sft = SimpleFeatureTypes.createType(name, schema)
val fc = buildFeatureCollection(csvFile, hasHeader, sft, latlonFields)
val shpFile = new File(csvFile.getParentFile, s"${FilenameUtils.getBaseName(csvFile.getName)}.shp")
val shpDS = shpDataStore(shpFile, sft)
val shpFS = shpDS.getFeatureSource(name).asInstanceOf[SimpleFeatureStore]
writeFeatures(fc, shpFS)
writeZipFile(shpFile)
}
}
| vpipkt/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/csv/package.scala | Scala | apache-2.0 | 9,622 |
package com.seanshubin.cron.format.domain
import scala.collection.mutable.ArrayBuffer
sealed abstract case class MonthEnum(ordinal: Int, name: String, shortName: String) {
MonthEnum.valuesBuffer += this
}
object MonthEnum {
private val valuesBuffer = new ArrayBuffer[MonthEnum]
lazy val values = valuesBuffer.toSeq
val January = new MonthEnum(1, "January", "jan") {}
val February = new MonthEnum(2, "February", "feb") {}
val March = new MonthEnum(3, "March", "mar") {}
val April = new MonthEnum(4, "April", "apr") {}
val May = new MonthEnum(5, "May", "may") {}
val June = new MonthEnum(6, "June", "jun") {}
val July = new MonthEnum(7, "July", "jul") {}
val August = new MonthEnum(8, "August", "aug") {}
val September = new MonthEnum(9, "September", "sep") {}
val October = new MonthEnum(10, "October", "oct") {}
val November = new MonthEnum(11, "November", "nov") {}
val December = new MonthEnum(12, "December", "dec") {}
}
| SeanShubin/cron-format | domain/src/main/scala/com/seanshubin/cron/format/domain/MonthEnum.scala | Scala | unlicense | 957 |
package au.com.dius.pact.provider
import java.net.URI
package object scalatest {
trait Consumer {
val filter: ConsumerInfo => Boolean
}
/**
* Matching consumer pacts will be allowed to run against the provider
*
* @param consumer
* @return
*/
implicit def strToConsumer(consumer: String) = new Consumer {
override val filter = (consumerInfo: ConsumerInfo) => consumerInfo.getName == consumer
}
/**
* @param provider which provider pact should be tested
* @param consumer which consumer pact should be tested
* @param uri where is the pact
*/
case class Pact(provider: String, consumer: Consumer, uri: URI)
case class ServerConfig(serverStarter: ServerStarter, restartServer: Boolean = false)
case class VerificationConfig(pact: Pact, serverConfig: ServerConfig)
}
| Fitzoh/pact-jvm | pact-jvm-provider-scalatest/src/main/scala/au/com/dius/pact/provider/scalatest/package.scala | Scala | apache-2.0 | 839 |
package play.api.data
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import play.api.data.format.Formats._
import org.specs2.mutable.Specification
import org.joda.time.{DateTime, LocalDate}
object FormSpec extends Specification {
"A form" should {
"have an error due to a malformed email" in {
val f5 = ScalaForms.emailForm.fillAndValidate("john@", "John")
f5.errors.size must equalTo (1)
f5.errors.find(_.message == "error.email") must beSome
val f6 = ScalaForms.emailForm.fillAndValidate("john@zen.....com", "John")
f6.errors.size must equalTo (1)
f6.errors.find(_.message == "error.email") must beSome
}
"be valid with a well-formed email" in {
val f7 = ScalaForms.emailForm.fillAndValidate("john@zen.com", "John")
f7.errors.size must equalTo (0)
val f8 = ScalaForms.emailForm.fillAndValidate("john@zen.museum", "John")
f8.errors.size must equalTo (0)
val f9 = ScalaForms.emailForm.fillAndValidate("john@mail.zen.com", "John")
f9.errors.size must equalTo(0)
ScalaForms.emailForm.fillAndValidate("o'flynn@example.com", "O'Flynn").errors must beEmpty
}
"apply constraints on wrapped mappings" in {
"when it binds data" in {
val f1 = ScalaForms.form.bind(Map("foo"->"0"))
f1.errors.size must equalTo (1)
f1.errors.find(_.message == "first.digit") must beSome
val f2 = ScalaForms.form.bind(Map("foo"->"3"))
f2.errors.size must equalTo (0)
val f3 = ScalaForms.form.bind(Map("foo"->"50"))
f3.errors.size must equalTo (1) // Only one error because "number.42" can’t be applied since wrapped bind failed
f3.errors.find(_.message == "first.digit") must beSome
val f4 = ScalaForms.form.bind(Map("foo"->"333"))
f4.errors.size must equalTo (1)
f4.errors.find(_.message == "number.42") must beSome
}
"when it is filled with data" in {
val f1 = ScalaForms.form.fillAndValidate(0)
f1.errors.size must equalTo (1)
f1.errors.find(_.message == "first.digit") must beSome
val f2 = ScalaForms.form.fillAndValidate(3)
f2.errors.size must equalTo (0)
val f3 = ScalaForms.form.fillAndValidate(50)
f3.errors.size must equalTo (2)
f3.errors.find(_.message == "first.digit") must beSome
f3.errors.find(_.message == "number.42") must beSome
val f4 = ScalaForms.form.fillAndValidate(333)
f4.errors.size must equalTo (1)
f4.errors.find(_.message == "number.42") must beSome
}
}
"apply constraints on longNumber fields" in {
val f1 = ScalaForms.longNumberForm.fillAndValidate(0);
f1.errors.size must equalTo(1)
f1.errors.find(_.message == "error.min") must beSome
val f2 = ScalaForms.longNumberForm.fillAndValidate(9000);
f2.errors.size must equalTo(1)
f2.errors.find(_.message == "error.max") must beSome
val f3 = ScalaForms.longNumberForm.fillAndValidate(10);
f3.errors.size must equalTo(0)
val f4 = ScalaForms.longNumberForm.fillAndValidate(42);
f3.errors.size must equalTo(0)
}
}
"render form using field[Type] syntax" in {
val anyData = Map("email" -> "bob@gmail.com", "password" -> "123")
ScalaForms.loginForm.bind(anyData).get.toString must equalTo("(bob@gmail.com,123)")
}
"support default values" in {
ScalaForms.defaultValuesForm.bindFromRequest( Map() ).get must equalTo(42, "default text")
ScalaForms.defaultValuesForm.bindFromRequest( Map("name" -> Seq("another text") ) ).get must equalTo(42, "another text")
ScalaForms.defaultValuesForm.bindFromRequest( Map("pos" -> Seq("123")) ).get must equalTo(123, "default text")
ScalaForms.defaultValuesForm.bindFromRequest( Map("pos" -> Seq("123"), "name" -> Seq("another text")) ).get must equalTo(123, "another text")
val f1 = ScalaForms.defaultValuesForm.bindFromRequest( Map("pos" -> Seq("abc")) )
f1.errors.size must equalTo (1)
}
"support repeated values" in {
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki")) ).get must equalTo(("Kiki", Seq()))
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki"), "emails[0]" -> Seq("kiki@gmail.com")) ).get must equalTo(("Kiki", Seq("kiki@gmail.com")))
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki"), "emails[0]" -> Seq("kiki@gmail.com"), "emails[1]" -> Seq("kiki@zen.com")) ).get must equalTo(("Kiki", Seq("kiki@gmail.com", "kiki@zen.com")))
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki"), "emails[0]" -> Seq(), "emails[1]" -> Seq("kiki@zen.com")) ).hasErrors must equalTo(true)
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki"), "emails[]" -> Seq("kiki@gmail.com")) ).get must equalTo(("Kiki", Seq("kiki@gmail.com")))
ScalaForms.repeatedForm.bindFromRequest( Map("name" -> Seq("Kiki"), "emails[]" -> Seq("kiki@gmail.com", "kiki@zen.com")) ).get must equalTo(("Kiki", Seq("kiki@gmail.com", "kiki@zen.com")))
}
"render a form with max 18 fields" in {
ScalaForms.helloForm.bind(Map("name" -> "foo", "repeat" -> "1")).get.toString must equalTo("(foo,1,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None)")
}
"render form using jodaDate" in {
val dateForm = Form(("date" -> jodaDate))
val data = Map("date" -> "2012-01-01")
dateForm.bind(data).get mustEqual(new DateTime(2012,1,1,0,0))
}
"render form using jodaDate with format(30/1/2012)" in {
val dateForm = Form(("date" -> jodaDate("dd/MM/yyyy")))
val data = Map("date" -> "30/1/2012")
dateForm.bind(data).get mustEqual(new DateTime(2012,1,30,0,0))
}
"render form using jodaLocalDate with format(30/1/2012)" in {
val dateForm = Form(("date" -> jodaLocalDate("dd/MM/yyyy")))
val data = Map("date" -> "30/1/2012")
dateForm.bind(data).get mustEqual(new LocalDate(2012,1,30))
}
"reject input if it contains global errors" in {
Form( "value" -> nonEmptyText ).withGlobalError("some.error")
.bind( Map("value" -> "some value"))
.errors.headOption must beSome.like {
case error => error.message must equalTo("some.error")
}
}
}
object ScalaForms {
case class User(name: String, age: Int)
val userForm = Form(
mapping(
"name" -> of[String].verifying(nonEmpty),
"age" -> of[Int].verifying(min(0), max(100))
)(User.apply)(User.unapply)
)
val loginForm = Form(
tuple(
"email" -> of[String],
"password" -> of[Int]
)
)
val defaultValuesForm = Form(
tuple(
"pos" -> default(number, 42),
"name" -> default(text, "default text")
)
)
val helloForm = Form(
tuple(
"name" -> nonEmptyText,
"repeat" -> number(min = 1, max = 100),
"color" -> optional(text),
"still works" -> optional(text),
"1" -> optional(text),
"2" -> optional(text),
"3" -> optional(text),
"4" -> optional(text),
"5" -> optional(text),
"6" -> optional(text),
"7" -> optional(text),
"8" -> optional(text),
"9" -> optional(text),
"10" -> optional(text),
"11" -> optional(text),
"12" -> optional(text),
"13" -> optional(text),
"14" -> optional(text)
)
)
val repeatedForm = Form(
tuple(
"name" -> nonEmptyText,
"emails" -> list(nonEmptyText)
)
)
val form = Form(
"foo" -> Forms.text.verifying("first.digit", s => (s.headOption map {_ == '3'}) getOrElse false)
.transform[Int](Integer.parseInt _, _.toString).verifying("number.42", _ < 42)
)
val emailForm = Form(
tuple(
"email" -> email,
"name" -> of[String]
)
)
val longNumberForm = Form("longNumber" -> longNumber(10, 42))
} | michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play/src/test/scala/play/api/data/FormSpec.scala | Scala | mit | 7,830 |
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.mllib.linalg.{Vectors, Matrices, Matrix}
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by ameyapandilwar on 11/26/15.
*/
object MLDataMapper {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("ML Data Mapper").setMaster("local")
val sc = new SparkContext(sparkConf)
val conf = HBaseConfiguration.create()
val tableName = "timbre_sample"
conf.set(TableInputFormat.INPUT_TABLE, tableName)
conf.set(TableInputFormat.SCAN_ROW_START, "TRAAAAW128F429D538")
conf.set(TableInputFormat.SCAN_ROW_STOP, "TRAABYN12903CFD305")
val hBaseRows = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
println("--- number of rows :- " + hBaseRows.count())
var result: String = ""
hBaseRows.foreach(keyVal => {
var timbreStr: String = ""
for (i <- 1 to 90) {
val value = Bytes.toString(keyVal._2.getValue(Bytes.toBytes("cf"), Bytes.toBytes(i.toString())))
timbreStr += value + " "
}
timbreStr = timbreStr.trim()
val trackId = new String(keyVal._2.getRow())
val artistId = Bytes.toString(keyVal._2.getValue(Bytes.toBytes("cf"), Bytes.toBytes("ArtistId")))
val year = Bytes.toString(keyVal._2.getValue(Bytes.toBytes("cf"), Bytes.toBytes("year")))
result = trackId + "," + artistId + "," + year + ";" + timbreStr + "\\n"
print(result)
// Utils.writeToFile("/Users/ameyapandilwar/CS6240/FINAL_PROJECT/MLPrediction/data/mllib/timbre_sample.txt", result)
})
}
}
| Arulselvanmadhavan/Artist_Recognition_from_Audio_Features | MLPrediction/src/main/scala/MLDataMapper.scala | Scala | apache-2.0 | 1,853 |
package org.finra.datagenerator.scaffolding.messaging.response
/**
* Created by dkopel on 7/7/16.
*/
trait Response[+R] {
val response: R
val priority: Long
}
object Response {
def apply[R](data: R, prty: Long=0): Response[R] = new Response[R] {
override val response=data
override val priority=prty
}
} | FINRAOS/DataGenerator | rubber-scaffolding/rubber-commons/src/main/scala/org/finra/datagenerator/scaffolding/messaging/response/Response.scala | Scala | apache-2.0 | 340 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{File, IOException}
import java.lang.reflect.{InvocationTargetException, Modifier}
import java.net.{URI, URL}
import java.security.PrivilegedExceptionAction
import java.util.concurrent.{TimeoutException, TimeUnit}
import scala.collection.mutable.HashMap
import scala.concurrent.Promise
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
import org.apache.commons.lang3.{StringUtils => ComStrUtils}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException
import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.HistoryServer
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.deploy.yarn.security.YARNHadoopDelegationTokenManager
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.rpc._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, YarnSchedulerBackend}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.util._
/**
* Common application master functionality for Spark on Yarn.
*/
private[spark] class ApplicationMaster(args: ApplicationMasterArguments) extends Logging {
// TODO: Currently, task to container is computed once (TaskSetManager) - which need not be
// optimal as more containers are available. Might need to handle this better.
private val isClusterMode = args.userClass != null
private val sparkConf = new SparkConf()
if (args.propertiesFile != null) {
Utils.getPropertiesFromFile(args.propertiesFile).foreach { case (k, v) =>
sparkConf.set(k, v)
}
}
private val securityMgr = new SecurityManager(sparkConf)
private var metricsSystem: Option[MetricsSystem] = None
// Set system properties for each config entry. This covers two use cases:
// - The default configuration stored by the SparkHadoopUtil class
// - The user application creating a new SparkConf in cluster mode
//
// Both cases create a new SparkConf object which reads these configs from system properties.
sparkConf.getAll.foreach { case (k, v) =>
sys.props(k) = v
}
private val yarnConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
private val userClassLoader = {
val classpath = Client.getUserClasspath(sparkConf)
val urls = classpath.map { entry =>
new URL("file:" + new File(entry.getPath()).getAbsolutePath())
}
if (isClusterMode) {
if (Client.isUserClassPathFirst(sparkConf, isDriver = true)) {
new ChildFirstURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
} else {
new MutableURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
}
} else {
new MutableURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
}
}
private val tokenManager: Option[YARNHadoopDelegationTokenManager] = {
sparkConf.get(KEYTAB).map { _ =>
new YARNHadoopDelegationTokenManager(sparkConf, yarnConf)
}
}
private val ugi = tokenManager match {
case Some(tm) =>
// Set the context class loader so that the token renewer has access to jars distributed
// by the user.
Utils.withContextClassLoader(userClassLoader) {
tm.start()
}
case _ =>
SparkHadoopUtil.get.createSparkUser()
}
private val client = doAsUser { new YarnRMClient() }
// Default to twice the number of executors (twice the maximum number of executors if dynamic
// allocation is enabled), with a minimum of 3.
private val maxNumExecutorFailures = {
val effectiveNumExecutors =
if (Utils.isDynamicAllocationEnabled(sparkConf)) {
sparkConf.get(DYN_ALLOCATION_MAX_EXECUTORS)
} else {
sparkConf.get(EXECUTOR_INSTANCES).getOrElse(0)
}
// By default, effectiveNumExecutors is Int.MaxValue if dynamic allocation is enabled. We need
// avoid the integer overflow here.
val defaultMaxNumExecutorFailures = math.max(3,
if (effectiveNumExecutors > Int.MaxValue / 2) Int.MaxValue else (2 * effectiveNumExecutors))
sparkConf.get(MAX_EXECUTOR_FAILURES).getOrElse(defaultMaxNumExecutorFailures)
}
@volatile private var exitCode = 0
@volatile private var unregistered = false
@volatile private var finished = false
@volatile private var finalStatus = getDefaultFinalStatus
@volatile private var finalMsg: String = ""
@volatile private var userClassThread: Thread = _
@volatile private var reporterThread: Thread = _
@volatile private var allocator: YarnAllocator = _
// A flag to check whether user has initialized spark context
@volatile private var registered = false
// Lock for controlling the allocator (heartbeat) thread.
private val allocatorLock = new Object()
// Steady state heartbeat interval. We want to be reasonably responsive without causing too many
// requests to RM.
private val heartbeatInterval = {
// Ensure that progress is sent before YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS elapses.
val expiryInterval = yarnConf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 120000)
math.max(0, math.min(expiryInterval / 2, sparkConf.get(RM_HEARTBEAT_INTERVAL)))
}
// Initial wait interval before allocator poll, to allow for quicker ramp up when executors are
// being requested.
private val initialAllocationInterval = math.min(heartbeatInterval,
sparkConf.get(INITIAL_HEARTBEAT_INTERVAL))
// Next wait interval before allocator poll.
private var nextAllocationInterval = initialAllocationInterval
private var rpcEnv: RpcEnv = null
// In cluster mode, used to tell the AM when the user's SparkContext has been initialized.
private val sparkContextPromise = Promise[SparkContext]()
// Load the list of localized files set by the client. This is used when launching executors,
// and is loaded here so that these configs don't pollute the Web UI's environment page in
// cluster mode.
private val localResources = doAsUser {
logInfo("Preparing Local resources")
val resources = HashMap[String, LocalResource]()
def setupDistributedCache(
file: String,
rtype: LocalResourceType,
timestamp: String,
size: String,
vis: String): Unit = {
val uri = new URI(file)
val amJarRsrc = Records.newRecord(classOf[LocalResource])
amJarRsrc.setType(rtype)
amJarRsrc.setVisibility(LocalResourceVisibility.valueOf(vis))
amJarRsrc.setResource(ConverterUtils.getYarnUrlFromURI(uri))
amJarRsrc.setTimestamp(timestamp.toLong)
amJarRsrc.setSize(size.toLong)
val fileName = Option(uri.getFragment()).getOrElse(new Path(uri).getName())
resources(fileName) = amJarRsrc
}
val distFiles = sparkConf.get(CACHED_FILES)
val fileSizes = sparkConf.get(CACHED_FILES_SIZES)
val timeStamps = sparkConf.get(CACHED_FILES_TIMESTAMPS)
val visibilities = sparkConf.get(CACHED_FILES_VISIBILITIES)
val resTypes = sparkConf.get(CACHED_FILES_TYPES)
for (i <- 0 to distFiles.size - 1) {
val resType = LocalResourceType.valueOf(resTypes(i))
setupDistributedCache(distFiles(i), resType, timeStamps(i).toString, fileSizes(i).toString,
visibilities(i))
}
// Distribute the conf archive to executors.
sparkConf.get(CACHED_CONF_ARCHIVE).foreach { path =>
val uri = new URI(path)
val fs = FileSystem.get(uri, yarnConf)
val status = fs.getFileStatus(new Path(uri))
// SPARK-16080: Make sure to use the correct name for the destination when distributing the
// conf archive to executors.
val destUri = new URI(uri.getScheme(), uri.getRawSchemeSpecificPart(),
Client.LOCALIZED_CONF_DIR)
setupDistributedCache(destUri.toString(), LocalResourceType.ARCHIVE,
status.getModificationTime().toString, status.getLen.toString,
LocalResourceVisibility.PRIVATE.name())
}
// Clean up the configuration so it doesn't show up in the Web UI (since it's really noisy).
CACHE_CONFIGS.foreach { e =>
sparkConf.remove(e)
sys.props.remove(e.key)
}
resources.toMap
}
def getAttemptId(): ApplicationAttemptId = {
client.getAttemptId()
}
final def run(): Int = {
doAsUser {
runImpl()
}
exitCode
}
private def runImpl(): Unit = {
try {
val appAttemptId = client.getAttemptId()
var attemptID: Option[String] = None
if (isClusterMode) {
// Set the web ui port to be ephemeral for yarn so we don't conflict with
// other spark processes running on the same box
System.setProperty("spark.ui.port", "0")
// Set the master and deploy mode property to match the requested mode.
System.setProperty("spark.master", "yarn")
System.setProperty("spark.submit.deployMode", "cluster")
// Set this internal configuration if it is running on cluster mode, this
// configuration will be checked in SparkContext to avoid misuse of yarn cluster mode.
System.setProperty("spark.yarn.app.id", appAttemptId.getApplicationId().toString())
attemptID = Option(appAttemptId.getAttemptId.toString)
}
new CallerContext(
"APPMASTER", sparkConf.get(APP_CALLER_CONTEXT),
Option(appAttemptId.getApplicationId.toString), attemptID).setCurrentContext()
logInfo("ApplicationAttemptId: " + appAttemptId)
// This shutdown hook should run *after* the SparkContext is shut down.
val priority = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY - 1
ShutdownHookManager.addShutdownHook(priority) { () =>
val maxAppAttempts = client.getMaxRegAttempts(sparkConf, yarnConf)
val isLastAttempt = client.getAttemptId().getAttemptId() >= maxAppAttempts
if (!finished) {
// The default state of ApplicationMaster is failed if it is invoked by shut down hook.
// This behavior is different compared to 1.x version.
// If user application is exited ahead of time by calling System.exit(N), here mark
// this application as failed with EXIT_EARLY. For a good shutdown, user shouldn't call
// System.exit(0) to terminate the application.
finish(finalStatus,
ApplicationMaster.EXIT_EARLY,
"Shutdown hook called before final status was reported.")
}
if (!unregistered) {
// we only want to unregister if we don't want the RM to retry
if (finalStatus == FinalApplicationStatus.SUCCEEDED || isLastAttempt) {
unregister(finalStatus, finalMsg)
cleanupStagingDir()
}
}
}
if (isClusterMode) {
runDriver()
} else {
runExecutorLauncher()
}
} catch {
case e: Exception =>
// catch everything else if not specifically handled
logError("Uncaught exception: ", e)
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_UNCAUGHT_EXCEPTION,
"Uncaught exception: " + StringUtils.stringifyException(e))
} finally {
try {
metricsSystem.foreach { ms =>
ms.report()
ms.stop()
}
} catch {
case e: Exception =>
logWarning("Exception during stopping of the metric system: ", e)
}
}
}
/**
* Set the default final application status for client mode to UNDEFINED to handle
* if YARN HA restarts the application so that it properly retries. Set the final
* status to SUCCEEDED in cluster mode to handle if the user calls System.exit
* from the application code.
*/
final def getDefaultFinalStatus(): FinalApplicationStatus = {
if (isClusterMode) {
FinalApplicationStatus.FAILED
} else {
FinalApplicationStatus.UNDEFINED
}
}
/**
* unregister is used to completely unregister the application from the ResourceManager.
* This means the ResourceManager will not retry the application attempt on your behalf if
* a failure occurred.
*/
final def unregister(status: FinalApplicationStatus, diagnostics: String = null): Unit = {
synchronized {
if (registered && !unregistered) {
logInfo(s"Unregistering ApplicationMaster with $status" +
Option(diagnostics).map(msg => s" (diag message: $msg)").getOrElse(""))
unregistered = true
client.unregister(status, Option(diagnostics).getOrElse(""))
}
}
}
final def finish(status: FinalApplicationStatus, code: Int, msg: String = null): Unit = {
synchronized {
if (!finished) {
val inShutdown = ShutdownHookManager.inShutdown()
if (registered || !isClusterMode) {
exitCode = code
finalStatus = status
} else {
finalStatus = FinalApplicationStatus.FAILED
exitCode = ApplicationMaster.EXIT_SC_NOT_INITED
}
logInfo(s"Final app status: $finalStatus, exitCode: $exitCode" +
Option(msg).map(msg => s", (reason: $msg)").getOrElse(""))
finalMsg = ComStrUtils.abbreviate(msg, sparkConf.get(AM_FINAL_MSG_LIMIT).toInt)
finished = true
if (!inShutdown && Thread.currentThread() != reporterThread && reporterThread != null) {
logDebug("shutting down reporter thread")
reporterThread.interrupt()
}
if (!inShutdown && Thread.currentThread() != userClassThread && userClassThread != null) {
logDebug("shutting down user thread")
userClassThread.interrupt()
}
if (!inShutdown) {
tokenManager.foreach(_.stop())
}
}
}
}
private def sparkContextInitialized(sc: SparkContext) = {
sparkContextPromise.synchronized {
// Notify runDriver function that SparkContext is available
sparkContextPromise.success(sc)
// Pause the user class thread in order to make proper initialization in runDriver function.
sparkContextPromise.wait()
}
}
private def resumeDriver(): Unit = {
// When initialization in runDriver happened the user class thread has to be resumed.
sparkContextPromise.synchronized {
sparkContextPromise.notify()
}
}
private def registerAM(
host: String,
port: Int,
_sparkConf: SparkConf,
uiAddress: Option[String]): Unit = {
val appId = client.getAttemptId().getApplicationId().toString()
val attemptId = client.getAttemptId().getAttemptId().toString()
val historyAddress = ApplicationMaster
.getHistoryServerAddress(_sparkConf, yarnConf, appId, attemptId)
client.register(host, port, yarnConf, _sparkConf, uiAddress, historyAddress)
registered = true
}
private def createAllocator(driverRef: RpcEndpointRef, _sparkConf: SparkConf): Unit = {
val appId = client.getAttemptId().getApplicationId().toString()
val driverUrl = RpcEndpointAddress(driverRef.address.host, driverRef.address.port,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
// Before we initialize the allocator, let's log the information about how executors will
// be run up front, to avoid printing this out for every single executor being launched.
// Use placeholders for information that changes such as executor IDs.
logInfo {
val executorMemory = _sparkConf.get(EXECUTOR_MEMORY).toInt
val executorCores = _sparkConf.get(EXECUTOR_CORES)
val dummyRunner = new ExecutorRunnable(None, yarnConf, _sparkConf, driverUrl, "<executorId>",
"<hostname>", executorMemory, executorCores, appId, securityMgr, localResources)
dummyRunner.launchContextDebugInfo()
}
allocator = client.createAllocator(
yarnConf,
_sparkConf,
driverUrl,
driverRef,
securityMgr,
localResources)
tokenManager.foreach(_.setDriverRef(driverRef))
// Initialize the AM endpoint *after* the allocator has been initialized. This ensures
// that when the driver sends an initial executor request (e.g. after an AM restart),
// the allocator is ready to service requests.
rpcEnv.setupEndpoint("YarnAM", new AMEndpoint(rpcEnv, driverRef))
allocator.allocateResources()
val ms = MetricsSystem.createMetricsSystem("applicationMaster", sparkConf, securityMgr)
val prefix = _sparkConf.get(YARN_METRICS_NAMESPACE).getOrElse(appId)
ms.registerSource(new ApplicationMasterSource(prefix, allocator))
ms.start()
metricsSystem = Some(ms)
reporterThread = launchReporterThread()
}
private def runDriver(): Unit = {
addAmIpFilter(None)
userClassThread = startUserApplication()
// This a bit hacky, but we need to wait until the spark.driver.port property has
// been set by the Thread executing the user class.
logInfo("Waiting for spark context initialization...")
val totalWaitTime = sparkConf.get(AM_MAX_WAIT_TIME)
try {
val sc = ThreadUtils.awaitResult(sparkContextPromise.future,
Duration(totalWaitTime, TimeUnit.MILLISECONDS))
if (sc != null) {
rpcEnv = sc.env.rpcEnv
val userConf = sc.getConf
val host = userConf.get("spark.driver.host")
val port = userConf.get("spark.driver.port").toInt
registerAM(host, port, userConf, sc.ui.map(_.webUrl))
val driverRef = rpcEnv.setupEndpointRef(
RpcAddress(host, port),
YarnSchedulerBackend.ENDPOINT_NAME)
createAllocator(driverRef, userConf)
} else {
// Sanity check; should never happen in normal operation, since sc should only be null
// if the user app did not create a SparkContext.
throw new IllegalStateException("User did not initialize spark context!")
}
resumeDriver()
userClassThread.join()
} catch {
case e: SparkException if e.getCause().isInstanceOf[TimeoutException] =>
logError(
s"SparkContext did not initialize after waiting for $totalWaitTime ms. " +
"Please check earlier log output for errors. Failing the application.")
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_SC_NOT_INITED,
"Timed out waiting for SparkContext.")
} finally {
resumeDriver()
}
}
private def runExecutorLauncher(): Unit = {
val hostname = Utils.localHostName
val amCores = sparkConf.get(AM_CORES)
rpcEnv = RpcEnv.create("sparkYarnAM", hostname, hostname, -1, sparkConf, securityMgr,
amCores, true)
// The client-mode AM doesn't listen for incoming connections, so report an invalid port.
registerAM(hostname, -1, sparkConf, sparkConf.getOption("spark.driver.appUIAddress"))
// The driver should be up and listening, so unlike cluster mode, just try to connect to it
// with no waiting or retrying.
val (driverHost, driverPort) = Utils.parseHostPort(args.userArgs(0))
val driverRef = rpcEnv.setupEndpointRef(
RpcAddress(driverHost, driverPort),
YarnSchedulerBackend.ENDPOINT_NAME)
addAmIpFilter(Some(driverRef))
createAllocator(driverRef, sparkConf)
// In client mode the actor will stop the reporter thread.
reporterThread.join()
}
private def launchReporterThread(): Thread = {
// The number of failures in a row until Reporter thread give up
val reporterMaxFailures = sparkConf.get(MAX_REPORTER_THREAD_FAILURES)
val t = new Thread {
override def run() {
var failureCount = 0
while (!finished) {
try {
if (allocator.getNumExecutorsFailed >= maxNumExecutorFailures) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_MAX_EXECUTOR_FAILURES,
s"Max number of executor failures ($maxNumExecutorFailures) reached")
} else if (allocator.isAllNodeBlacklisted) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_MAX_EXECUTOR_FAILURES,
"Due to executor failures all available nodes are blacklisted")
} else {
logDebug("Sending progress")
allocator.allocateResources()
}
failureCount = 0
} catch {
case i: InterruptedException => // do nothing
case e: ApplicationAttemptNotFoundException =>
failureCount += 1
logError("Exception from Reporter thread.", e)
finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_REPORTER_FAILURE,
e.getMessage)
case e: Throwable =>
failureCount += 1
if (!NonFatal(e) || failureCount >= reporterMaxFailures) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_REPORTER_FAILURE, "Exception was thrown " +
s"$failureCount time(s) from Reporter thread.")
} else {
logWarning(s"Reporter thread fails $failureCount time(s) in a row.", e)
}
}
try {
val numPendingAllocate = allocator.getPendingAllocate.size
var sleepStart = 0L
var sleepInterval = 200L // ms
allocatorLock.synchronized {
sleepInterval =
if (numPendingAllocate > 0 || allocator.getNumPendingLossReasonRequests > 0) {
val currentAllocationInterval =
math.min(heartbeatInterval, nextAllocationInterval)
nextAllocationInterval = currentAllocationInterval * 2 // avoid overflow
currentAllocationInterval
} else {
nextAllocationInterval = initialAllocationInterval
heartbeatInterval
}
sleepStart = System.currentTimeMillis()
allocatorLock.wait(sleepInterval)
}
val sleepDuration = System.currentTimeMillis() - sleepStart
if (sleepDuration < sleepInterval) {
// log when sleep is interrupted
logDebug(s"Number of pending allocations is $numPendingAllocate. " +
s"Slept for $sleepDuration/$sleepInterval ms.")
// if sleep was less than the minimum interval, sleep for the rest of it
val toSleep = math.max(0, initialAllocationInterval - sleepDuration)
if (toSleep > 0) {
logDebug(s"Going back to sleep for $toSleep ms")
// use Thread.sleep instead of allocatorLock.wait. there is no need to be woken up
// by the methods that signal allocatorLock because this is just finishing the min
// sleep interval, which should happen even if this is signalled again.
Thread.sleep(toSleep)
}
} else {
logDebug(s"Number of pending allocations is $numPendingAllocate. " +
s"Slept for $sleepDuration/$sleepInterval.")
}
} catch {
case e: InterruptedException =>
}
}
}
}
// setting to daemon status, though this is usually not a good idea.
t.setDaemon(true)
t.setName("Reporter")
t.start()
logInfo(s"Started progress reporter thread with (heartbeat : $heartbeatInterval, " +
s"initial allocation : $initialAllocationInterval) intervals")
t
}
/**
* Clean up the staging directory.
*/
private def cleanupStagingDir(): Unit = {
var stagingDirPath: Path = null
try {
val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES)
if (!preserveFiles) {
stagingDirPath = new Path(System.getenv("SPARK_YARN_STAGING_DIR"))
logInfo("Deleting staging directory " + stagingDirPath)
val fs = stagingDirPath.getFileSystem(yarnConf)
fs.delete(stagingDirPath, true)
}
} catch {
case ioe: IOException =>
logError("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
/** Add the Yarn IP filter that is required for properly securing the UI. */
private def addAmIpFilter(driver: Option[RpcEndpointRef]) = {
val proxyBase = System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)
val amFilter = "org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter"
val params = client.getAmIpFilterParams(yarnConf, proxyBase)
driver match {
case Some(d) =>
d.send(AddWebUIFilter(amFilter, params.toMap, proxyBase))
case None =>
System.setProperty("spark.ui.filters", amFilter)
params.foreach { case (k, v) => System.setProperty(s"spark.$amFilter.param.$k", v) }
}
}
/**
* Start the user class, which contains the spark driver, in a separate Thread.
* If the main routine exits cleanly or exits with System.exit(N) for any N
* we assume it was successful, for all other cases we assume failure.
*
* Returns the user thread that was started.
*/
private def startUserApplication(): Thread = {
logInfo("Starting the user application in a separate Thread")
var userArgs = args.userArgs
if (args.primaryPyFile != null && args.primaryPyFile.endsWith(".py")) {
// When running pyspark, the app is run using PythonRunner. The second argument is the list
// of files to add to PYTHONPATH, which Client.scala already handles, so it's empty.
userArgs = Seq(args.primaryPyFile, "") ++ userArgs
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
// TODO(davies): add R dependencies here
}
val mainMethod = userClassLoader.loadClass(args.userClass)
.getMethod("main", classOf[Array[String]])
val userThread = new Thread {
override def run() {
try {
if (!Modifier.isStatic(mainMethod.getModifiers)) {
logError(s"Could not find static main method in object ${args.userClass}")
finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_EXCEPTION_USER_CLASS)
} else {
mainMethod.invoke(null, userArgs.toArray)
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
logDebug("Done running user class")
}
} catch {
case e: InvocationTargetException =>
e.getCause match {
case _: InterruptedException =>
// Reporter thread can interrupt to stop user class
case SparkUserAppException(exitCode) =>
val msg = s"User application exited with status $exitCode"
logError(msg)
finish(FinalApplicationStatus.FAILED, exitCode, msg)
case cause: Throwable =>
logError("User class threw exception: " + cause, cause)
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_EXCEPTION_USER_CLASS,
"User class threw exception: " + StringUtils.stringifyException(cause))
}
sparkContextPromise.tryFailure(e.getCause())
} finally {
// Notify the thread waiting for the SparkContext, in case the application did not
// instantiate one. This will do nothing when the user code instantiates a SparkContext
// (with the correct master), or when the user code throws an exception (due to the
// tryFailure above).
sparkContextPromise.trySuccess(null)
}
}
}
userThread.setContextClassLoader(userClassLoader)
userThread.setName("Driver")
userThread.start()
userThread
}
private def resetAllocatorInterval(): Unit = allocatorLock.synchronized {
nextAllocationInterval = initialAllocationInterval
allocatorLock.notifyAll()
}
/**
* An [[RpcEndpoint]] that communicates with the driver's scheduler backend.
*/
private class AMEndpoint(override val rpcEnv: RpcEnv, driver: RpcEndpointRef)
extends RpcEndpoint with Logging {
override def onStart(): Unit = {
driver.send(RegisterClusterManager(self))
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case r: RequestExecutors =>
Option(allocator) match {
case Some(a) =>
if (a.requestTotalExecutorsWithPreferredLocalities(r.requestedTotal,
r.localityAwareTasks, r.hostToLocalTaskCount, r.nodeBlacklist)) {
resetAllocatorInterval()
}
context.reply(true)
case None =>
logWarning("Container allocator is not ready to request executors yet.")
context.reply(false)
}
case KillExecutors(executorIds) =>
logInfo(s"Driver requested to kill executor(s) ${executorIds.mkString(", ")}.")
Option(allocator) match {
case Some(a) => executorIds.foreach(a.killExecutor)
case None => logWarning("Container allocator is not ready to kill executors yet.")
}
context.reply(true)
case GetExecutorLossReason(eid) =>
Option(allocator) match {
case Some(a) =>
a.enqueueGetLossReasonRequest(eid, context)
resetAllocatorInterval()
case None =>
logWarning("Container allocator is not ready to find executor loss reasons yet.")
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
// In cluster mode, do not rely on the disassociated event to exit
// This avoids potentially reporting incorrect exit codes if the driver fails
if (!isClusterMode) {
logInfo(s"Driver terminated or disconnected! Shutting down. $remoteAddress")
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
}
}
}
private def doAsUser[T](fn: => T): T = {
ugi.doAs(new PrivilegedExceptionAction[T]() {
override def run: T = fn
})
}
}
object ApplicationMaster extends Logging {
// exit codes for different causes, no reason behind the values
private val EXIT_SUCCESS = 0
private val EXIT_UNCAUGHT_EXCEPTION = 10
private val EXIT_MAX_EXECUTOR_FAILURES = 11
private val EXIT_REPORTER_FAILURE = 12
private val EXIT_SC_NOT_INITED = 13
private val EXIT_SECURITY = 14
private val EXIT_EXCEPTION_USER_CLASS = 15
private val EXIT_EARLY = 16
private var master: ApplicationMaster = _
def main(args: Array[String]): Unit = {
SignalUtils.registerLogger(log)
val amArgs = new ApplicationMasterArguments(args)
master = new ApplicationMaster(amArgs)
System.exit(master.run())
}
private[spark] def sparkContextInitialized(sc: SparkContext): Unit = {
master.sparkContextInitialized(sc)
}
private[spark] def getAttemptId(): ApplicationAttemptId = {
master.getAttemptId
}
private[spark] def getHistoryServerAddress(
sparkConf: SparkConf,
yarnConf: YarnConfiguration,
appId: String,
attemptId: String): String = {
sparkConf.get(HISTORY_SERVER_ADDRESS)
.map { text => SparkHadoopUtil.get.substituteHadoopVariables(text, yarnConf) }
.map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}/${attemptId}" }
.getOrElse("")
}
}
/**
* This object does not provide any special functionality. It exists so that it's easy to tell
* apart the client-mode AM from the cluster-mode AM when using tools such as ps or jps.
*/
object ExecutorLauncher {
def main(args: Array[String]): Unit = {
ApplicationMaster.main(args)
}
}
| ahnqirage/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala | Scala | apache-2.0 | 32,624 |
/*
* Copyright 1998-2018 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.search
import java.nio.file.Files
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.TcpClient
import org.mockito.Mockito
import org.specs2.mutable.SpecificationWithJUnit
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.annotation._
import org.springframework.stereotype.{Repository, Service}
import org.springframework.test.context.{ContextConfiguration, TestContextManager}
import play.api.libs.ws.StandaloneWSClient
import ru.org.linux.auth.FloodProtector
import ru.org.linux.search.ElasticsearchIndexService.MessageIndex
@ContextConfiguration(classes = Array(classOf[SearchIntegrationTestConfiguration]))
class ElasticsearchIndexServiceIntegrationSpec extends SpecificationWithJUnit {
new TestContextManager(this.getClass).prepareTestInstance(this)
@Autowired
var indexService: ElasticsearchIndexService = _
@Autowired
var elastic: TcpClient = _
"ElasticsearchIndexService" should {
"create index" in {
indexService.createIndexIfNeeded()
val exists = elastic execute { indexExists(MessageIndex) } await
exists.isExists must beTrue
}
}
}
@Configuration
@ImportResource(Array("classpath:common.xml", "classpath:database.xml"))
@ComponentScan(
basePackages = Array("ru.org.linux"),
lazyInit = true,
useDefaultFilters = false,
includeFilters = Array(
new ComponentScan.Filter(
`type` = FilterType.ANNOTATION,
value = Array(classOf[Service], classOf[Repository])))
)
class SearchIntegrationTestConfiguration {
class LocalNodeProvider {
val node = ElasticsearchConfiguration.createEmbedded("test-elastic", Files.createTempDirectory("test-elastic").toFile.getAbsolutePath)
def close(): Unit = node.stop(true)
}
@Bean(destroyMethod="close")
def elasticNode: LocalNodeProvider = new LocalNodeProvider()
@Bean
def elasticClient(node: LocalNodeProvider): TcpClient = {
node.node.tcp(shutdownNodeOnClose = false)
}
@Bean
def floodProtector: FloodProtector = Mockito.mock(classOf[FloodProtector])
@Bean
def httpClient: StandaloneWSClient = Mockito.mock(classOf[StandaloneWSClient])
} | hizel/lorsource | src/test/scala/ru/org/linux/search/ElasticsearchIndexServiceIntegrationSpec.scala | Scala | apache-2.0 | 2,802 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package synthesis
import purescala.Definitions._
import purescala.Expressions._
import purescala.Constructors._
import purescala.Common._
import evaluators.{TrackingEvaluator, DefaultEvaluator}
import leon.utils.ASCIIHelpers._
/** Sets of valid and invalid examples */
case class ExamplesBank(valids: Seq[Example], invalids: Seq[Example]) {
def examples = valids ++ invalids
// Minimize tests of a function so that tests that are invalid because of a
// recursive call are eliminated
def minimizeInvalids(fd: FunDef, ctx: LeonContext, program: Program): ExamplesBank = {
val evaluator = new TrackingEvaluator(ctx, program)
invalids foreach { ts =>
evaluator.eval(functionInvocation(fd, ts.ins))
}
val outInfo = invalids.collect {
case InOutExample(ins, outs) => ins -> outs
}.toMap
val callGraph = evaluator.fullCallGraph
def isFailing(fi: (FunDef, Seq[Expr])) = !evaluator.fiStatus(fi) && (fi._1 == fd)
val failing = callGraph filter { case (from, to) =>
isFailing(from) && (to forall (!isFailing(_)) )
}
val newInvalids = failing.keySet map {
case (_, args) =>
outInfo.get(args) match {
case Some(outs) =>
InOutExample(args, outs)
case None =>
InExample(args)
}
}
ExamplesBank(valids, newInvalids.toSeq)
}
def union(that: ExamplesBank) = {
ExamplesBank(
distinctIns(this.valids union that.valids),
distinctIns(this.invalids union that.invalids)
)
}
private def distinctIns(s: Seq[Example]): Seq[Example] = {
val insOuts = s.collect {
case InOutExample(ins, outs) => ins -> outs
}.toMap
s.map(_.ins).distinct.map {
case ins =>
insOuts.get(ins) match {
case Some(outs) => InOutExample(ins, outs)
case _ => InExample(ins)
}
}
}
def flatMap(f: Example => List[Example]) = {
ExamplesBank(valids.flatMap(f), invalids.flatMap(f))
}
/** Expands each input example through the function f */
def flatMapIns(f: Seq[Expr] => List[Seq[Expr]]) = {
flatMap {
case InExample(in) =>
f(in).map(InExample)
case InOutExample(in, out) =>
f(in).map(InOutExample(_, out))
}
}
/** Expands each output example through the function f */
def flatMapOuts(f: Seq[Expr] => List[Seq[Expr]]) = {
flatMap {
case InOutExample(in, out) =>
f(out).map(InOutExample(in, _))
case e =>
List(e)
}
}
def stripOuts = {
flatMap {
case InOutExample(in, out) =>
List(InExample(in))
case e =>
List(e)
}
}
def asString(title: String)(implicit ctx: LeonContext): String = {
var tt = new Table(title)
if (examples.nonEmpty) {
val ow = examples.map {
case InOutExample(_, out) => out.size
case _ => 1
}.max
val iw = examples.map(_.ins.size).max
def testsRows(section: String, ts: Seq[Example]) {
if (tt.rows.nonEmpty) {
tt += Row(Seq(
Cell(" ", iw + ow + 1)
))
}
tt += Row(Seq(
Cell(Console.BOLD+section+Console.RESET+":", iw + ow + 1)
))
tt += Separator
for (t <- ts) {
val os = t match {
case InOutExample(_, outs) =>
outs.map(o => Cell(o.asString))
case _ =>
Seq(Cell("?", ow))
}
tt += Row(
t.ins.map(i => Cell(i.asString)) ++ Seq(Cell("->")) ++ os
)
}
}
if (valids.nonEmpty) {
testsRows("Valid tests", valids)
}
if (invalids.nonEmpty) {
testsRows("Invalid tests", invalids)
}
tt.render
} else {
"No tests."
}
}
}
object ExamplesBank {
def empty = ExamplesBank(Nil, Nil)
}
/** Same as an ExamplesBank, but with identifiers corresponding to values. This
* allows us to evaluate expressions. */
case class QualifiedExamplesBank(as: List[Identifier], xs: List[Identifier], eb: ExamplesBank)(implicit hctx: SearchContext) {
// TODO: This might be slightly conservative. We might want something closer to a partial evaluator,
// to conserve things like (e: A).isInstanceOf[A] even when evaluation of e leads to choose
private lazy val evaluator = new DefaultEvaluator(hctx, hctx.program).setEvaluationFailOnChoose(true)
def removeOuts(toRemove: Set[Identifier]): QualifiedExamplesBank = {
val nxs = xs.filterNot(toRemove)
val toKeep = xs.zipWithIndex.filterNot(x => toRemove(x._1)).map(_._2)
QualifiedExamplesBank(as, nxs, eb flatMapOuts { out => List(toKeep.map(out)) })
}
def removeIns(toRemove: Set[Identifier]) = {
val nas = as.filterNot(toRemove)
val toKeep: List[Int] = as.zipWithIndex.filterNot(a => toRemove(a._1)).map(_._2)
QualifiedExamplesBank(nas, xs, eb flatMapIns { (in: Seq[Expr]) => List(toKeep.map(in)) })
}
def evalAndDiscardIns: QualifiedExamplesBank = copy( eb = flatMapIns { mapping =>
val evalAs = evaluator.evalEnv(mapping)
try {
List(as map evalAs)
} catch {
case _: NoSuchElementException =>
Nil
}
})
/** Filter inputs through expr which is an expression evaluating to a boolean */
def filterIns(expr: Expr): QualifiedExamplesBank = {
filterIns(m => evaluator.eval(expr, m).result.contains(BooleanLiteral(true)))
}
/** Filters inputs through the predicate pred, with an assignment of input variables to expressions. */
def filterIns(pred: Map[Identifier, Expr] => Boolean): QualifiedExamplesBank = {
QualifiedExamplesBank(as, xs,
eb flatMapIns { in =>
val m = (as zip in).toMap
if(pred(m)) {
List(in)
} else {
Nil
}
}
)
}
/** Maps inputs through the function f
*
* @return A new ExampleBank */
def flatMapIns(f: Seq[(Identifier, Expr)] => List[Seq[Expr]]): ExamplesBank = {
eb flatMap {
case InExample(in) =>
f(as zip in).map(InExample)
case InOutExample(in, out) =>
f(as zip in).map(InOutExample(_, out))
}
}
def asConstraint: Expr = {
andJoin(eb.valids.map {
case InOutExample(ins, outs) =>
val in = andJoin(as.map(_.toVariable).zip(ins) .map { case (a,b) => equality(a,b) })
val out = andJoin(xs.map(_.toVariable).zip(outs).map { case (a,b) => equality(a,b) })
implies(in, out)
case _ =>
BooleanLiteral(true)
})
}
}
import scala.language.implicitConversions
object QualifiedExamplesBank {
implicit def qebToEb(qeb: QualifiedExamplesBank): ExamplesBank = qeb.eb
}
| regb/leon | src/main/scala/leon/synthesis/ExamplesBank.scala | Scala | gpl-3.0 | 6,720 |
package breeze.linalg
import breeze.generic.UFunc
import breeze.macros.expand
import breeze.util.{ArrayUtil, ReflectionUtil}
import breeze.macros._
import scala.reflect.ClassTag
/**
* deduplicates the array
*
* @author stucchio
*/
object unique extends UFunc {
implicit def impl[S]: Impl[DenseVector[S], DenseVector[S]] = new Impl[DenseVector[S], DenseVector[S]] {
def apply(v: DenseVector[S]): DenseVector[S] = {
implicit val ct: ClassTag[S] = ReflectionUtil.elemClassTagFromArray(v.data)
if (v.size == 0) {
DenseVector(new Array[S](0))
} else {
val data = v.toArray
ArrayUtil.sort(data)
var elementCount = 1
var lastElement = data(0)
cforRange(0 until data.length) { i =>
val di = data(i)
if (di != lastElement) {
elementCount += 1
lastElement = di
}
}
val result = new Array[S](elementCount)
result(0) = data(0)
lastElement = data(0)
var idx = 1
cforRange(0 until data.length) { i =>
val di = data(i)
if (di != lastElement) {
result(idx) = di
lastElement = di
idx += 1
}
}
DenseVector(result)
}
}
}
}
| scalanlp/breeze | math/src/main/scala/breeze/linalg/functions/unique.scala | Scala | apache-2.0 | 1,285 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Fri Aug 19 17:10:23 EDT 2011
* @see LICENSE (MIT style license file).
*/
package apps.event
import scalation.model.Modelable
import scalation.event.{CausalLink, Entity, Event, EventNode, Model}
import scalation.random.{Exponential, Variate}
import scalation.stat.Statistic
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `CallCenter2` object defines a particular scenario under which to execute
* the Call Center model. It is the same as `CallCenter`, except that causal links
* are added to enable the model to be animated as an Event Graph.
* @see scalation.event.ModelTest for another example of test code.
* > run-main apps.event.CallCenter2
*/
object CallCenter2 extends App with Modelable
{
val stream = 1 // random number stream (0 to 99)
val lambda = 6.0 // call arrival rate (per hour)
val mu = 7.5 // call service rate (per hour)
val maxCalls = 10 // stopping rule: at maxCalls
val iArrivalRV = Exponential (HOUR/lambda, stream) // inter-arrival time random var
val serviceRV = Exponential (HOUR/mu, stream) // service time random variate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Run the simulation of `CallCenterModel2`.
* @param startTime the start time for the simulation
*/
def simulate (startTime: Double)
{
new CallCenterModel2 ("CallCenter2", maxCalls, iArrivalRV, serviceRV)
} // simulate
simulate (0.0)
} // CallCenter2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `CallCenterModel2` class defines a simple Event Graph model of a
* Call Center where service is provided by one tele-service representative and
* models an M/M/1/1 queue.
* @param name the name of the simulation model
* @param nArrivals the number of arrivals to generate (stopping condition)
* @param iArrivalRV the inter-arrival time distribution (Random Variate)
* @param serviceRV the service time distribution (Random Variate)
*/
class CallCenterModel2 (name: String, nArrivals: Int, iArrivalRV: Variate,
serviceRV: Variate)
extends Model (name, true) // true => turn on animation
{
val t_a_stat = new Statistic ("t_a") // time between Arrivals statistics
val t_s_stat = new Statistic ("t_s") // time in Service statistics
val aLoc = Array (150.0, 200.0, 50.0, 50.0) // Arrival node location
val dLoc = Array (450.0, 200.0, 50.0, 50.0) // Departure node location
val aProto = new EventNode (this, aLoc) // prototype for all Arrival events
val dProto = new EventNode (this, dLoc) // prototype for all Departure events
val aLink = Array (CausalLink ("l_A2A", this, () => nArr < nArrivals-1, aProto),
CausalLink ("l_A2D", this, () => nIn == 0, dProto))
var nArr = 0.0 // number of calls that have arrived
var nIn = 0.0 // number of calls in progress
var nOut = 0.0 // number of calls that finished and hung up
var nLost = 0.0 // number of calls dropped
aProto.displayLinks (aLink)
addStats (t_a_stat, t_s_stat)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Arrival` is a subclass of `Event` for handling arrival events 'MakeCall'.
* @param call the entity that arrives, in this case a phone call
* @param delay the time delay for this event's occurrence
*/
case class Arrival (call: Entity, delay: Double)
extends Event (call, this, delay, t_a_stat, aProto)
{
override def occur ()
{
if (aLink(0).condition ()) {
val toArrive = Entity (iArrivalRV.gen, serviceRV.gen, CallCenterModel2.this)
schedule (Arrival (toArrive, toArrive.iArrivalT))
} // if
if (aLink(1).condition ()) {
schedule (Departure (call, call.serviceT))
} // if
nArr += 1 // update the current state
if (nIn == 1) nLost += 1 else nIn = 1
} // occur
} // Arrival class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Departure is a subclass of `Event` for handling departure events 'HangUp'.
* @param call the entity that arrives, in this case a phone call
* @param delay the time delay for this event's occurrence
*/
case class Departure (call: Entity, delay: Double)
extends Event (call, this, delay, t_s_stat, dProto)
{
override def occur ()
{
leave (call) // collects time in sYstem statistics
nIn = 0 // update the current state
nOut += 1
} // occur
} // Departure class
//:: start the simulation after scheduling the first priming event
val firstArrival = Entity (iArrivalRV.gen, serviceRV.gen, this)
schedule (Arrival (firstArrival, firstArrival.iArrivalT)) // first priming event
simulate () // start simulating
Thread.sleep (20000) // wait on animation trace
report (("nArr", nArr), ("nIn", nIn), ("nLost", nLost), ("nOut", nOut))
reportStats
} // CallCenterModel2 class
| scalation/fda | scalation_1.3/scalation_models/src/main/scala/apps/event/CallCenter2.scala | Scala | mit | 5,905 |
package com.github.hexx.gaeds
import java.lang.reflect.{ Field, Method }
import scala.reflect.ClassTag
import com.google.appengine.api.datastore.{ Entity, FetchOptions, Transaction }
import org.json4s._
import org.json4s.native.JsonMethods._
abstract class Mapper[T <: Mapper[T]: ClassTag] extends DatastoreDelegate[T] {
self: T =>
assignPropertyName()
var key: Option[Key[T]] = None
def kind = concreteClass.getName // override to customize
def concreteClass = implicitly[ClassTag[T]].runtimeClass
def put() = Datastore.put(this)
def put(txn: Transaction) = Datastore.put(txn, this)
def putAsync() = Datastore.putAsync(this)
def putAsync(txn: Transaction) = Datastore.putAsync(txn, this)
def query() = Datastore.query(this)
def query[U <: Mapper[U]](ancestorKey: Key[U]) = Datastore.query(this, ancestorKey)
def query(fetchOptions: FetchOptions) = Datastore.query(this, fetchOptions)
def query[U <: Mapper[U]](ancestorKey: Key[U], fetchOptions: FetchOptions) =
Datastore.query(this, ancestorKey, fetchOptions)
def query(txn: Transaction) = Datastore.query(txn, this)
def query[U <: Mapper[U]](txn: Transaction, ancestorKey: Key[U]) = Datastore.query(txn, this, ancestorKey)
def query(txn: Transaction, fetchOptions: FetchOptions) = Datastore.query(txn, this, fetchOptions)
def query[U <: Mapper[U]](txn: Transaction, ancestorKey: Key[U], fetchOptions: FetchOptions) =
Datastore.query(txn, this, ancestorKey, fetchOptions)
def properties: Seq[BaseProperty[_]] = zipPropertyAndMethod.map(_._1)
def findProperty(name: String) = properties.find(_.__nameOfProperty == name)
def fromEntity(entity: Entity): T = Datastore.createMapper(entity)
def toEntity = {
assignPropertyName()
val entity = key match {
case Some(k) => new Entity(k.key)
case None => new Entity(kind)
}
assert(properties.size != 0, "define fields with Property[T]")
properties foreach (_.__setToEntity(entity))
entity
}
def toJObject = {
assignPropertyName()
val keyField = key.map(k => JField("key", JString(k.toWebSafeString)))
JObject((keyField ++ properties.map(_.__jfieldOfProperty)).toList)
}
def toJson = compact(render(toJObject))
def fromJObject(jobject: JObject) = Datastore.createMapperFromJObject(jobject)
def fromJson(json: String) = fromJObject(parse(json).asInstanceOf[JObject])
override def equals(that: Any) = that match {
case that: Mapper[_] => that.key == key && that.properties == properties
case _ => false
}
override val mapperClassTag = implicitly[ClassTag[T]]
private def zipPropertyAndMethod: Seq[(BaseProperty[_], Method)] = {
def isGetter(m: Method) = !m.isSynthetic && classOf[BaseProperty[_]].isAssignableFrom(m.getReturnType)
for {
m <- concreteClass.getDeclaredMethods
if isGetter(m)
p = m.invoke(this).asInstanceOf[BaseProperty[_]]
} yield (p, m)
}
private def assignPropertyName() {
for ((p, m) <- zipPropertyAndMethod) {
p.__nameOfProperty = m.getName
}
}
}
| hexx/gaeds | src/main/scala/Mapper.scala | Scala | mit | 3,054 |
package pl.touk.nussknacker.engine.util
import scala.util.Try
object convert {
abstract class StringToNumberConverter[T](tryToConvert: String => T) {
def unapply(str: String): Option[T] =
Try(tryToConvert(str)).map(Some(_)).recover {
case _: NumberFormatException => None
}.get
}
object IntValue extends StringToNumberConverter[Int](_.toInt)
object LongValue extends StringToNumberConverter[Long](_.toLong)
object DoubleValue extends StringToNumberConverter[Double](_.toDouble)
object FloatValue extends StringToNumberConverter[Double](_.toFloat)
object BooleanValue extends StringToNumberConverter[Boolean](_.toBoolean)
object BigDecimalValue extends StringToNumberConverter[BigDecimal](BigDecimal(_))
}
| TouK/nussknacker | utils/utils/src/main/scala/pl/touk/nussknacker/engine/util/convert.scala | Scala | apache-2.0 | 755 |
package outwatch
import outwatch.definitions._
object dsl extends Attributes with Tags with Styles {
object tags extends Tags {
object extra extends TagsExtra
object svg extends TagsSvg
}
object attributes extends Attributes {
object svg extends SvgAttrs
}
object svg extends SvgAttrs with TagsSvg
object styles extends Styles {
object extra extends StylesExtra
}
object events {
object window extends WindowEvents
object document extends DocumentEvents
}
}
| OutWatch/outwatch | outwatch/src/main/scala/outwatch/dsl.scala | Scala | apache-2.0 | 504 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils
import org.junit.{After, Before, Test}
import org.junit.Assert._
import java.io.File
import org.scalatest.Assertions.intercept
import org.apache.zookeeper.KeeperException.NodeExistsException
class ServerGenerateBrokerIdTest extends ZooKeeperTestHarness {
var props1: Properties = null
var config1: KafkaConfig = null
var props2: Properties = null
var config2: KafkaConfig = null
val brokerMetaPropsFile = "meta.properties"
var servers: Seq[KafkaServer] = Seq()
@Before
override def setUp() {
super.setUp()
props1 = TestUtils.createBrokerConfig(-1, zkConnect)
config1 = KafkaConfig.fromProps(props1)
props2 = TestUtils.createBrokerConfig(0, zkConnect)
config2 = KafkaConfig.fromProps(props2)
}
@After
override def tearDown() {
TestUtils.shutdownServers(servers)
super.tearDown()
}
@Test
def testAutoGenerateBrokerId() {
var server1 = new KafkaServer(config1, threadNamePrefix = Option(this.getClass.getName))
server1.startup()
server1.shutdown()
assertTrue(verifyBrokerMetadata(config1.logDirs, 1001))
// restart the server check to see if it uses the brokerId generated previously
server1 = TestUtils.createServer(config1)
servers = Seq(server1)
assertEquals(server1.config.brokerId, 1001)
server1.shutdown()
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
@Test
def testUserConfigAndGeneratedBrokerId() {
// start the server with broker.id as part of config
val server1 = new KafkaServer(config1, threadNamePrefix = Option(this.getClass.getName))
val server2 = new KafkaServer(config2, threadNamePrefix = Option(this.getClass.getName))
val props3 = TestUtils.createBrokerConfig(-1, zkConnect)
val server3 = new KafkaServer(KafkaConfig.fromProps(props3))
server1.startup()
assertEquals(server1.config.brokerId, 1001)
server2.startup()
assertEquals(server2.config.brokerId, 0)
server3.startup()
assertEquals(server3.config.brokerId, 1002)
servers = Seq(server1, server2, server3)
servers.foreach(_.shutdown())
assertTrue(verifyBrokerMetadata(server1.config.logDirs, 1001))
assertTrue(verifyBrokerMetadata(server2.config.logDirs, 0))
assertTrue(verifyBrokerMetadata(server3.config.logDirs, 1002))
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
@Test
def testDisableGeneratedBrokerId() {
val props3 = TestUtils.createBrokerConfig(3, zkConnect)
props3.put(KafkaConfig.BrokerIdGenerationEnableProp, "false")
// Set reserve broker ids to cause collision and ensure disabling broker id generation ignores the setting
props3.put(KafkaConfig.MaxReservedBrokerIdProp, "0")
val config3 = KafkaConfig.fromProps(props3)
val server3 = TestUtils.createServer(config3)
servers = Seq(server3)
assertEquals(server3.config.brokerId, 3)
server3.shutdown()
assertTrue(verifyBrokerMetadata(server3.config.logDirs, 3))
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
@Test
def testMultipleLogDirsMetaProps() {
// add multiple logDirs and check if the generate brokerId is stored in all of them
val logDirs = props1.getProperty("log.dir")+ "," + TestUtils.tempDir().getAbsolutePath +
"," + TestUtils.tempDir().getAbsolutePath
props1.setProperty("log.dir", logDirs)
config1 = KafkaConfig.fromProps(props1)
var server1 = new KafkaServer(config1, threadNamePrefix = Option(this.getClass.getName))
server1.startup()
servers = Seq(server1)
server1.shutdown()
assertTrue(verifyBrokerMetadata(config1.logDirs, 1001))
// addition to log.dirs after generation of a broker.id from zk should be copied over
val newLogDirs = props1.getProperty("log.dir") + "," + TestUtils.tempDir().getAbsolutePath
props1.setProperty("log.dir", newLogDirs)
config1 = KafkaConfig.fromProps(props1)
server1 = new KafkaServer(config1, threadNamePrefix = Option(this.getClass.getName))
server1.startup()
servers = Seq(server1)
server1.shutdown()
assertTrue(verifyBrokerMetadata(config1.logDirs, 1001))
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
@Test
def testConsistentBrokerIdFromUserConfigAndMetaProps() {
// check if configured brokerId and stored brokerId are equal or throw InconsistentBrokerException
var server1 = new KafkaServer(config1, threadNamePrefix = Option(this.getClass.getName)) //auto generate broker Id
server1.startup()
servers = Seq(server1)
server1.shutdown()
server1 = new KafkaServer(config2, threadNamePrefix = Option(this.getClass.getName)) // user specified broker id
try {
server1.startup()
} catch {
case _: kafka.common.InconsistentBrokerIdException => //success
}
server1.shutdown()
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
@Test
def testBrokerMetadataOnIdCollision() {
// Start a good server
val propsA = TestUtils.createBrokerConfig(1, zkConnect)
val configA = KafkaConfig.fromProps(propsA)
val serverA = TestUtils.createServer(configA)
// Start a server that collides on the broker id
val propsB = TestUtils.createBrokerConfig(1, zkConnect)
val configB = KafkaConfig.fromProps(propsB)
val serverB = new KafkaServer(configB)
intercept[NodeExistsException] {
serverB.startup()
}
servers = Seq(serverA)
// verify no broker metadata was written
serverB.config.logDirs.foreach { logDir =>
val brokerMetaFile = new File(logDir + File.separator + brokerMetaPropsFile)
assertFalse(brokerMetaFile.exists())
}
// adjust the broker config and start again
propsB.setProperty(KafkaConfig.BrokerIdProp, "2")
val newConfigB = KafkaConfig.fromProps(propsB)
val newServerB = TestUtils.createServer(newConfigB)
servers = Seq(serverA, newServerB)
serverA.shutdown()
newServerB.shutdown()
// verify correct broker metadata was written
assertTrue(verifyBrokerMetadata(serverA.config.logDirs, 1))
assertTrue(verifyBrokerMetadata(newServerB.config.logDirs, 2))
TestUtils.verifyNonDaemonThreadsStatus(this.getClass.getName)
}
def verifyBrokerMetadata(logDirs: Seq[String], brokerId: Int): Boolean = {
for (logDir <- logDirs) {
val brokerMetadataOpt = new BrokerMetadataCheckpoint(
new File(logDir + File.separator + brokerMetaPropsFile)).read()
brokerMetadataOpt match {
case Some(brokerMetadata) =>
if (brokerMetadata.brokerId != brokerId) return false
case _ => return false
}
}
true
}
}
| KevinLiLu/kafka | core/src/test/scala/unit/kafka/server/ServerGenerateBrokerIdTest.scala | Scala | apache-2.0 | 7,549 |
/*
* Copyright 2013 - 2015, Daniel Krzywicki <daniel.krzywicki@agh.edu.pl>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package pl.edu.agh.intob.network
import net.liftweb.json._
import pl.edu.agh.scalamas.emas.EmasSolutions
/**
* Created by rafal on 21/11/16.
*/
class FitnessDeserializer(message: String) {
def deserialize(): (Int, EmasSolutions#SolutionsType) = {
implicit val formats = DefaultFormats
val data = parse(message)
val parsedData = data.extract[SolutionWrapper]
return (parsedData.id, parsedData.solution)
}
} | ros3n/IntOb | int-ob/src/main/scala/pl/edu/agh/intob/network/FitnessDeserializer.scala | Scala | mit | 1,586 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBigDecimal, Input}
case class E165(value: Option[BigDecimal]) extends CtBoxIdentifier("Held at the end of the period (use accounts figures): Overseas investments") with CtOptionalBigDecimal with Input
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E165.scala | Scala | apache-2.0 | 895 |
/*
* Copyright (c) 2012 Michael Rose
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.xorlev.simon.request
import java.lang.String
import com.xorlev.simon.handlers.ErrorHandler
import util.matching.Regex
/**
* 2012-12-02
* @author Michael Rose <elementation@gmail.com>
*/
trait RequestMapper {
def getHandler(path: String): RequestHandler
}
object StaticRequestMapper extends RequestMapper {
var ctx: Map[String, RequestHandler] = Map()
def registerHandler(path: String, handler: RequestHandler) {
synchronized {
ctx += path -> handler
}
}
def getHandler(path: String): RequestHandler = {
ctx.find { handler =>
path.startsWith(handler._1)
}.flatMap { h =>
Some(h._2)
}.getOrElse(new ErrorHandler)
}
}
object RegexRequestMapper extends RequestMapper {
var ctx: Map[Regex, RequestHandler] = Map()
def registerHandler(path: String, handler: RequestHandler) {
synchronized {
ctx += path.replaceAll(":([a-zA-Z0-9]+)", "([\\\\^\\\\/]+)").r -> handler
}
}
def getHandler(path: String): RequestHandler = {
ctx.find { handler =>
handler._1.pattern.matcher(path).find()
}.flatMap { h =>
val x = h._1.pattern.matcher(path)
if (x.find()) {
h._2.requestParams.set(Map(
"helloparam" -> x.group(1)
))
} else {
h._2.requestParams.remove()
}
Some(h._2)
}.getOrElse(new ErrorHandler)
}
} | Xorlev/Simon | simon-http-server/src/main/scala/com/xorlev/simon/request/RequestMapper.scala | Scala | apache-2.0 | 1,984 |
package debop4s.core.utils
import org.scalactic.TolerantNumerics._
import org.scalactic.TripleEquals._
import org.slf4j.LoggerFactory
import scala.annotation.switch
import scala.util.control.NonFatal
/**
* 무게에 대한 단위를 표현합니다.
* @author sunghyouk.bae@gmail.com
*/
class WeightUnit(val gram: Double) extends Ordered[WeightUnit] {
// mm 까지만 비교할 수 있도록 한다.
private[this] implicit val dblEquality = tolerantDoubleEquality(0.001)
def inMilligram: Double = gram * 1000
def inGram: Double = gram
def inKilogram: Double = gram / 1000
def inTon: Double = gram / 1000000
def inGrain: Double = gram * 15.432
def inOnce: Double = gram / 28.3495
def inFound: Double = gram / 453.592
def +(that: WeightUnit): WeightUnit = new WeightUnit(this.gram + that.gram)
def -(that: WeightUnit): WeightUnit = new WeightUnit(this.gram - that.gram)
def *(scala: Double): WeightUnit = new WeightUnit(this.gram * scala)
def /(scala: Double): WeightUnit = new WeightUnit(this.gram / scala)
override def equals(obj: Any): Boolean = {
obj match {
case other: WeightUnit => gram === other.gram
case _ => false
}
}
override def hashCode: Int = gram.hashCode()
override def toString: String = gram + ".gram"
override def compare(that: WeightUnit): Int = {
if (gram < that.gram) -1
else if (gram > that.gram) 1
else 0
}
/**
* 수치에 따라 사람이 볼 때 쉽게 볼 수 있도록 합니다.
* @return
*/
def toHuman: String = {
var prefix = ""
var display = gram.toDouble.abs
if (display < 1.0) {
prefix = "m"
display *= 1000
}
else if (display > 1000) {
prefix = "k"
display /= 1000
} else if (display > 1000000) {
display /= 1000000
return "%.1f ton".format(display * gram.signum)
}
"%.1f %sg".format(display * gram.signum, prefix)
}
}
object WeightUnit {
private[this] val log = LoggerFactory.getLogger(getClass)
lazy val positiveInfinite = new WeightUnit(Double.PositiveInfinity)
lazy val negativeInfinite = new WeightUnit(Double.NegativeInfinity)
def parse(str: String): WeightUnit = {
try {
var (v, u) = str.splitAt(str.lastIndexOf("."))
if (Strings.isEmpty(v)) v = ""
if (u startsWith ".") u = u drop 1
log.debug(s"parsing weight. v=$v, u=$u")
val vv = v.toDouble
val uu = factor(u)
new WeightUnit(vv * uu)
} catch {
case NonFatal(e) =>
throw new NumberFormatException(s"알 수 없는 무게 표시 문자열입니다. str=$str")
}
}
private def factor(unit: String): Double = {
var lower = unit.toLowerCase
if (lower endsWith "s")
lower = lower dropRight 1
lower match {
case "milligram" => 0.001
case "gram" => 1
case "kilogram" => 1000
case badUnit => throw new NumberFormatException(s"알 수 없는 무게 표시 단위입니다. unit=$unit")
}
}
}
| debop/debop4s | debop4s-core/src/main/scala/debop4s/core/utils/WeightUnit.scala | Scala | apache-2.0 | 2,981 |
package com.aristocrat.mandrill.requests.Metadata
import com.aristocrat.mandrill.requests.MandrillRequest
case class Add(key: String, name: String, viewTemplate: String) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Metadata/Add.scala | Scala | mit | 196 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef.None
import quasar.frontend.logicalplan.LogicalPlan
import quasar.sql.Sql
import matryoshka.data.Fix
import pathy.Path.rootDir
import eu.timepit.refined.auto._
import scalaz.Scalaz._
object Fixture {
def unsafeToLP(q: Fix[Sql], vars: Variables = Variables.empty): Fix[LogicalPlan] =
quasar.queryPlan(q, vars, rootDir, 0L, None).run.value.valueOr(
err => scala.sys.error("Unexpected error compiling sql to LogicalPlan: " + err.shows))
} | jedesah/Quasar | core/src/test/scala/quasar/Fixture.scala | Scala | apache-2.0 | 1,094 |
package newts
import cats.Id
import cats.kernel.laws.discipline.{CommutativeMonoidTests, CommutativeSemigroupTests, OrderTests}
import cats.laws.discipline.{CommutativeMonadTests, DistributiveTests, TraverseTests}
class MinTest extends NewtsSuite {
checkAll("Min[Int]", CommutativeSemigroupTests[Min[Int]].commutativeSemigroup)
checkAll("Min[Int]", CommutativeMonoidTests[Min[Int]].commutativeMonoid)
checkAll("Min[Int]", OrderTests[Min[Int]].order)
checkAll("Min[Int]", CommutativeMonadTests[Min].commutativeMonad[Int, Int, Int])
checkAll("Min[Int]", TraverseTests[Min].traverse[Int, Int, Int, Int, Option, Option])
checkAll("Min[Int]", DistributiveTests[Min].distributive[Int, Int, Int, Option, Id])
test("combine"){
5.asMin |+| 1.asMin shouldEqual Min(1)
1.asMin |+| 5.asMin shouldEqual Min(1)
}
test("show") {
Min(5).show shouldEqual "Min(5)"
Min("1").show shouldEqual "Min(1)"
}
}
| julien-truffaut/newts | test/shared/src/test/scala/newts/MinTest.scala | Scala | apache-2.0 | 927 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2006-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package util.parsing.input
/** An interface for streams of values that have positions.
*
* @author Martin Odersky
* @author Adriaan Moors
*/
abstract class Reader[+T] {
/** If this is a reader over character sequences, the underlying char sequence.
* If not, throws a `NoSuchMethodError` exception.
*
* @throws [[java.lang.NoSuchMethodError]] if this not a char sequence reader.
*/
def source: java.lang.CharSequence =
throw new NoSuchMethodError("not a char sequence reader")
def offset: Int =
throw new NoSuchMethodError("not a char sequence reader")
/** Returns the first element of the reader
*/
def first: T
/** Returns an abstract reader consisting of all elements except the first
*
* @return If `atEnd` is `true`, the result will be `this';
* otherwise, it's a `Reader` containing more elements.
*/
def rest: Reader[T]
/** Returns an abstract reader consisting of all elements except the first `n` elements.
*/
def drop(n: Int): Reader[T] = {
var r: Reader[T] = this
var cnt = n
while (cnt > 0) {
r = r.rest; cnt -= 1
}
r
}
/** The position of the first element in the reader.
*/
def pos: Position
/** `true` iff there are no more elements in this reader.
*/
def atEnd: Boolean
}
| l15k4/scala-parser-combinators | src/main/scala/scala/util/parsing/input/Reader.scala | Scala | bsd-3-clause | 1,853 |
/* sbt -- Simple Build Tool
* Copyright 2010 Jason Zaugg
*/
package xsbt
import scala.tools.nsc.{CompilerCommand, Settings}
object Command
{
/**
* Construct a CompilerCommand using reflection, to be compatible with Scalac before and after
* <a href="https://lampsvn.epfl.ch/trac/scala/changeset/21274">r21274</a>
*/
def apply(arguments: List[String], settings: Settings): CompilerCommand = {
def constr(params: Class[_]*) = classOf[CompilerCommand].getConstructor(params: _*)
try {
constr(classOf[List[_]], classOf[Settings]).newInstance(arguments, settings)
} catch {
case e: NoSuchMethodException =>
constr(classOf[List[_]], classOf[Settings], classOf[Function1[_, _]], classOf[Boolean]).newInstance(arguments, settings, error _, false.asInstanceOf[AnyRef])
}
}
def getWarnFatal(settings: Settings): Boolean =
{
implicit def compat27(settings: Settings): SettingsCompat = new SettingsCompat
class SettingsCompat { def Xwarnfatal = this; def value = false }
settings.Xwarnfatal.value
}
} | ornicar/xsbt | compile/interface/Command.scala | Scala | bsd-3-clause | 1,030 |
package net.tomasherman.specus.server.grid
import akka.actor.Actor
import net.tomasherman.specus.common.api.logging.Logging
import net.tomasherman.specus.common.api.grid.messages.{WriteRequest, Register, PacketMessage}
import net.tomasherman.specus.server.api.di.DependencyConfigRepository
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
class RemoteServerConnector extends Actor with SpecusNodeManager with Logging{
protected def receive = {
case m @ PacketMessage(_,_) => balancedWrite(m)
case Register(name) => {
debug("Register request from {}", self.sender.get)
registerNode(AkkaNode(self.sender.get),name)
}
case m @ WriteRequest(i,p) => {
debug("Writing to session: {}", m)
DependencyConfigRepository().sessionManager.writeTo(i,p)
}
}
} | tomasherman/specus | server/src/main/scala/grid/RemoteServerConnector.scala | Scala | gpl-3.0 | 1,422 |
package ghpages.examples
import ghpages.GhPagesMacros
import japgolly.scalajs.react._, vdom.prefix_<^._
import org.scalajs.dom.window
import ghpages.examples.util.SideBySide
import Addons.ReactCssTransitionGroup
object AnimationExample {
def content = SideBySide.Content(jsSource, source, main())
lazy val main = addIntro(TodoList, _(scalaPortOfPage("docs/animation.html")))
val jsSource =
"""
|var ReactCSSTransitionGroup = React.addons.CSSTransitionGroup;
|
|var TodoList = React.createClass({
| getInitialState: function() {
| return {items: ['hello', 'world', 'click', 'me']};
| },
| handleAdd: function() {
| var newItems =
| this.state.items.concat([prompt('Enter some text')]);
| this.setState({items: newItems});
| },
| handleRemove: function(i) {
| var newItems = this.state.items;
| newItems.splice(i, 1);
| this.setState({items: newItems});
| },
| render: function() {
| var items = this.state.items.map(function(item, i) {
| return (
| <div key={item} onClick={this.handleRemove.bind(this, i)}>
| {item}
| </div>
| );
| }.bind(this));
| return (
| <div>
| <button onClick={this.handleAdd}>Add Item</button>
| <ReactCSSTransitionGroup transitionName="example">
| {items}
| </ReactCSSTransitionGroup>
| </div>
| );
| }
|});
""".stripMargin
val source = GhPagesMacros.exampleSource
// EXAMPLE:START
class Backend($: BackendScope[_, Vector[String]]) {
def handleAdd(): Unit =
$.modState(_ :+ window.prompt("Enter some text"))
def handleRemove(i: Int): Unit =
$.modState(_.zipWithIndex.filterNot(_._2 == i).map(_._1))
}
val TodoList = ReactComponentB[Unit]("TodoList")
.initialState(Vector("hello", "world", "click", "me"))
.backend(new Backend(_))
.render((_,S,B) =>
<.div(
<.button(^.onClick --> B.handleAdd())("Add Item"),
ReactCssTransitionGroup("example", component = "h1")(
S.zipWithIndex.map{case (s,i) =>
<.div(^.key := s, ^.onClick --> B.handleRemove(i))(s)
}: _*
)
)
).buildU
// EXAMPLE:END
}
| vcarrera/scalajs-react | gh-pages/src/main/scala/ghpages/examples/AnimationExample.scala | Scala | apache-2.0 | 2,370 |
/*
* Original implementation (C) 2010-2015 Nathan Hamblen and contributors
* Adapted and extended in 2016 by foundweekends project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package giter8
import java.io.File
class Giter8 extends Runner with xsbti.AppMain {
/** The launched conscript entry point */
def run(config: xsbti.AppConfiguration): Giter8.Exit = {
new Giter8.Exit(run(config.arguments))
}
def run(args: Array[String]): Int =
run(args, new File(".").getAbsoluteFile)
def run(args: Array[String], baseDirectory: File): Int =
run(args, baseDirectory, new AppProcessor)
}
object Giter8 extends Giter8 {
/** Main-class runner just for testing from sbt */
def main(args: Array[String]): Unit = {
System.exit(run(args))
}
class Exit(val code: Int) extends xsbti.Exit
}
/** The processor that is responsible for rendering.
*/
class AppProcessor extends Processor {
def process(
templateDirectory: File,
workingDirectory: File,
arguments: Seq[String],
forceOverwrite: Boolean,
outputDirectory: Option[File]
): Either[String, String] = {
val templateRenderer = G8TemplateRenderer
templateRenderer.render(templateDirectory, workingDirectory, arguments, forceOverwrite, outputDirectory)
}
}
| foundweekends/giter8 | app/src/main/scala/giter8.scala | Scala | apache-2.0 | 1,795 |
package ooyala.common.akka
import akka.actor.Actor
import org.slf4j.LoggerFactory
/**
* Trait that adds Logback/SLF4J logging to actors. It adds the following members:
*
* * logger
*
* It also prints a message upon actor initialization.
* Also, it fills the akkaSource MDC variable with the current actor's path, making for easier
* log tracing of a single actor's messages.
*/
trait Slf4jLogging extends ActorStack {
val logger = LoggerFactory.getLogger(getClass)
private[this] val myPath = self.path.toString
withAkkaSourceLogging {
logger.info("Starting actor " + getClass.getName)
}
override def receive: Receive = {
case x =>
withAkkaSourceLogging {
super.receive(x)
}
}
private def withAkkaSourceLogging(fn: => Unit) {
// Because each actor receive invocation could happen in a different thread, and MDC is thread-based,
// we kind of have to set the MDC anew for each receive invocation. :(
try {
org.slf4j.MDC.put("akkaSource", myPath)
fn
} finally {
org.slf4j.MDC.remove("akkaSource")
}
}
}
| nachiketa-shukla/spark-jobserver | akka-app/src/ooyala.common.akka/Slf4jLogging.scala | Scala | apache-2.0 | 1,097 |
package org.http4s
package server
import cats.implicits._
import cats.effect._
import cats.effect.concurrent.Ref
import fs2._
import fs2.concurrent.{Signal, SignallingRef}
import java.net.{InetAddress, InetSocketAddress}
import javax.net.ssl.SSLContext
import org.http4s.internal.BackendBuilder
import org.http4s.server.SSLKeyStoreSupport.StoreInfo
import scala.collection.immutable
trait ServerBuilder[F[_]] extends BackendBuilder[F, Server[F]] {
type Self <: ServerBuilder[F]
protected implicit def F: Concurrent[F]
def bindSocketAddress(socketAddress: InetSocketAddress): Self
final def bindHttp(port: Int = defaults.HttpPort, host: String = defaults.Host): Self =
bindSocketAddress(InetSocketAddress.createUnresolved(host, port))
final def bindLocal(port: Int): Self = bindHttp(port, defaults.Host)
final def bindAny(host: String = defaults.Host): Self = bindHttp(0, host)
/** Sets the handler for errors thrown invoking the service. Is not
* guaranteed to be invoked on errors on the server backend, such as
* parsing a request or handling a context timeout.
*/
def withServiceErrorHandler(serviceErrorHandler: ServiceErrorHandler[F]): Self
/** Returns a Server resource. The resource is not acquired until the
* server is started and ready to accept requests.
*/
def resource: Resource[F, Server[F]]
/**
* Runs the server as a process that never emits. Useful for a server
* that runs for the rest of the JVM's life.
*/
final def serve: Stream[F, ExitCode] =
for {
signal <- Stream.eval(SignallingRef[F, Boolean](false))
exitCode <- Stream.eval(Ref[F].of(ExitCode.Success))
serve <- serveWhile(signal, exitCode)
} yield serve
/**
* Runs the server as a Stream that emits only when the terminated signal becomes true.
* Useful for servers with associated lifetime behaviors.
*/
final def serveWhile(
terminateWhenTrue: Signal[F, Boolean],
exitWith: Ref[F, ExitCode]): Stream[F, ExitCode] =
Stream.resource(resource) *> (terminateWhenTrue.discrete
.takeWhile(_ === false)
.drain ++ Stream.eval(exitWith.get))
/** Set the banner to display when the server starts up */
def withBanner(banner: immutable.Seq[String]): Self
/** Disable the banner when the server starts up */
final def withoutBanner: Self = withBanner(immutable.Seq.empty)
}
object ServerBuilder {
@deprecated("Use InetAddress.getLoopbackAddress.getHostAddress", "0.20.0-M2")
val LoopbackAddress = InetAddress.getLoopbackAddress.getHostAddress
@deprecated("Use org.http4s.server.defaults.Host", "0.20.0-M2")
val DefaultHost = defaults.Host
@deprecated("Use org.http4s.server.defaults.HttpPort", "0.20.0-M2")
val DefaultHttpPort = defaults.HttpPort
@deprecated("Use org.http4s.server.defaults.SocketAddress", "0.20.0-M2")
val DefaultSocketAddress = defaults.SocketAddress
@deprecated("Use org.http4s.server.defaults.Banner", "0.20.0-M2")
val DefaultBanner = defaults.Banner
}
object IdleTimeoutSupport {
@deprecated("Moved to org.http4s.server.defaults.IdleTimeout", "0.20.0-M2")
val DefaultIdleTimeout = defaults.IdleTimeout
}
object AsyncTimeoutSupport {
@deprecated("Moved to org.http4s.server.defaults.AsyncTimeout", "0.20.0-M2")
val DefaultAsyncTimeout = defaults.AsyncTimeout
}
sealed trait SSLConfig
final case class KeyStoreBits(
keyStore: StoreInfo,
keyManagerPassword: String,
protocol: String,
trustStore: Option[StoreInfo],
clientAuth: SSLClientAuthMode)
extends SSLConfig
final case class SSLContextBits(sslContext: SSLContext, clientAuth: SSLClientAuthMode)
extends SSLConfig
object SSLKeyStoreSupport {
final case class StoreInfo(path: String, password: String)
}
| aeons/http4s | server/src/main/scala/org/http4s/server/ServerBuilder.scala | Scala | apache-2.0 | 3,764 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.datastream
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.TableScan
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.datastream.StreamTableSourceScan
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalTableSourceScan
import org.apache.flink.table.plan.schema.TableSourceTable
import org.apache.flink.table.sources.StreamTableSource
class StreamTableSourceScanRule
extends ConverterRule(
classOf[FlinkLogicalTableSourceScan],
FlinkConventions.LOGICAL,
FlinkConventions.DATASTREAM,
"StreamTableSourceScanRule")
{
/** Rule must only match if TableScan targets a [[StreamTableSource]] */
override def matches(call: RelOptRuleCall): Boolean = {
val scan: TableScan = call.rel(0).asInstanceOf[TableScan]
val sourceTable = scan.getTable.unwrap(classOf[TableSourceTable[_]])
sourceTable != null && sourceTable.isStreamingMode
}
def convert(rel: RelNode): RelNode = {
val scan: FlinkLogicalTableSourceScan = rel.asInstanceOf[FlinkLogicalTableSourceScan]
val traitSet: RelTraitSet = rel.getTraitSet.replace(FlinkConventions.DATASTREAM)
new StreamTableSourceScan(
rel.getCluster,
traitSet,
scan.getTable,
scan.tableSource.asInstanceOf[StreamTableSource[_]],
scan.selectedFields
)
}
}
object StreamTableSourceScanRule {
val INSTANCE: RelOptRule = new StreamTableSourceScanRule
}
| fhueske/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/datastream/StreamTableSourceScanRule.scala | Scala | apache-2.0 | 2,422 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bobby.output
import java.io.File
import java.nio.file.Files
import sbt.ConsoleLogger
import uk.gov.hmrc.bobby.domain.BobbyValidationResult
trait BobbyWriter {
val logger = ConsoleLogger()
def write(bobbyValidationResult: BobbyValidationResult, viewType: ViewType): Unit
def renderText(bobbyValidationResult: BobbyValidationResult, viewType: ViewType): String
}
trait FileWriter extends BobbyWriter {
val filepath: String
def write(bobbyValidationResult: BobbyValidationResult, viewType: ViewType) {
val file: File = new File(filepath)
file.getParentFile.mkdirs()
logger.info(s"[bobby] ${getClass.getSimpleName} - Writing Bobby report to: " + file.getAbsolutePath)
Files.write(file.toPath, renderText(bobbyValidationResult, viewType).getBytes)
}
}
| hmrc/sbt-bobby | src/main/scala/uk/gov/hmrc/bobby/output/BobbyWriter.scala | Scala | apache-2.0 | 1,411 |
//package com.sksamuel.elastic4s.streams
//
//import java.util.concurrent.Executors
//
//import akka.actor.ActorSystem
//import com.sksamuel.elastic4s.Indexes
//import com.sksamuel.elastic4s.bulk.BulkCompatibleDefinition
//import com.sksamuel.elastic4s.http.ElasticDsl
//import com.sksamuel.elastic4s.jackson.ElasticJackson
//import com.sksamuel.elastic4s.testkit.DockerTests
//import org.reactivestreams.{Publisher, Subscriber, Subscription}
//import org.scalatest.{Matchers, WordSpec}
//
//import scala.concurrent.duration._
//import scala.util.Try
//
//class SubscriberFlushAfterTest extends WordSpec with Matchers with DockerTests {
//
// import ElasticJackson.Implicits._
// import ReactiveElastic._
//
// implicit val system: ActorSystem = ActorSystem()
//
// implicit object SpaceshipRequestBuilder extends RequestBuilder[Spaceship] {
// override def request(ship: Spaceship): BulkCompatibleDefinition = {
// indexInto("subscriberflushaftertest" / "ships").source(ship)
// }
// }
//
// def deleteIndx(name: String): Unit = Try {
// http.execute {
// ElasticDsl.deleteIndex(name)
// }.await
// }
//
// def freshIndex(index: String): Unit = {
// deleteIndx(index)
// ensureIndexExists(index)
// blockUntilEmpty(index)
// }
//
// def blockUntil(explain: String)(predicate: () => Boolean): Unit = {
//
// var backoff = 0
// var done = false
//
// while (backoff <= 16 && !done) {
// if (backoff > 0) Thread.sleep(200 * backoff)
// backoff = backoff + 1
// try {
// done = predicate()
// } catch {
// case e: Throwable =>
// logger.warn("problem while testing predicate", e)
// }
// }
//
// require(done, s"Failed waiting on: $explain")
// }
//
// def blockUntilExactCount(expected: Long, index: String, types: String*): Unit = {
// blockUntil(s"Expected count of $expected") { () =>
// expected == http.execute {
// search(index / types).size(0)
// }.await.right.get.result.totalHits
// }
// }
//
// def blockUntilEmpty(index: String): Unit = {
// blockUntil(s"Expected empty index $index") { () =>
// http.execute {
// search(Indexes(index)).size(0)
// }.await.right.get.result.totalHits == 0
// }
// }
//
// def ensureIndexExists(index: String): Unit = {
// Try {
// http.execute {
// createIndex(index)
// }.await
// }
// }
//
// deleteIndx("subscriberflushaftertest")
// ensureIndexExists("subscriberflushaftertest")
//
// "Reactive streams subscriber" should {
// "send request if no document received within flush after duration" in {
// freshIndex("subscriberflushaftertest1")
//
// // batch size is 100, but we've made our publisher send only 1 every 3 seconds, so that we'll
// // aways have insufficient to complete the batch before the flushAfter kicks in
//
// val duration = 1.second
// blockUntilExactCount(0, "subscriberflushaftertest1")
//
// val config = SubscriberConfig[Spaceship](flushAfter = Some(duration), batchSize = 100)
// val subscriber = http.subscriber[Spaceship](config)
// new SpaceshipSlowPublisher(3.seconds).subscribe(subscriber)
//
// // after the flushAfter duration, the flush after should have sent the request
// blockUntilExactCount(1, "subscriberflushaftertest1")
// // after another flushAfter duration, the next flush after should have sent the next request
// blockUntilExactCount(2, "subscriberflushaftertest1")
//
// subscriber.close()
// }
// "reset flush after interval each time a document is received" in {
// freshIndex("subscriberflushaftertest2")
//
// // batch size is 100, but we only have 5. Since the flushAfter interval is longer than the
// // publication interval, it should only take effect once no more documents are published, this proving
// // that each time a document is received, the flushAfter timer is reset.
//
// val duration = 3.seconds
// blockUntilExactCount(0, "subscriberflushaftertest2")
//
// val config = SubscriberConfig[Spaceship](flushAfter = Some(duration), batchSize = 100)
// val subscriber = http.subscriber[Spaceship](config)
// new SpaceshipSlowPublisher(1.second).subscribe(subscriber)
//
// // after 6 seconds, we should not have any documents yet as the flush after should still not have kicked in,
// // even though all were published by now
// Thread.sleep(6000)
// blockUntilExactCount(0, "subscriberflushaftertest2")
//
// // then shortly after, the flushAfter should kick in
// blockUntilExactCount(5, "subscriberflushaftertest2")
// }
// }
//}
//
//
//class SpaceshipSlowPublisher(duration: FiniteDuration) extends Publisher[Spaceship] {
//
// override def subscribe(s: Subscriber[_ >: Spaceship]): Unit = {
// val sub = new Subscription {
// private val executor = Executors.newSingleThreadExecutor()
// private var remaining = Spaceship.ships
// override def cancel(): Unit = executor.shutdownNow()
// override def request(ignored: Long): Unit = {
// executor.submit(new Runnable {
// override def run(): Unit = {
// while (remaining.nonEmpty) {
// remaining.take(1).foreach(t => s.onNext(t))
// remaining = remaining.drop(1)
// Thread.sleep(duration.toMillis)
// }
// }
// })
// }
// }
// s.onSubscribe(sub)
// }
//}
//
//
//case class Spaceship(name: String)
//
//
//object Spaceship {
//
// val ships = List(
// Spaceship("clipper"),
// Spaceship("anaconda"),
// Spaceship("courier"),
// Spaceship("python"),
// Spaceship("viper")
// )
//}
| stringbean/elastic4s | elastic4s-http-streams/src/test/scala/com/sksamuel/elastic4s/streams/SubscriberFlushAfterTest.scala | Scala | apache-2.0 | 5,728 |
package yang.iterator
/**
* Created by y28yang on 3/1/2016.
*/
trait TimeoutCheckable {
@volatile
var lastTimeStamp = System.currentTimeMillis()
def touch() {
lastTimeStamp = System.currentTimeMillis()
}
def isTimeOut(maxUnusedTime: Long): Boolean = {
val currentTime = System.currentTimeMillis()
val timeHasPast = currentTime - lastTimeStamp
maxUnusedTime < timeHasPast
}
}
| wjingyao2008/firsttry | NextGenAct/src/main/scala/yang/iterator/TimeoutCheckable.scala | Scala | apache-2.0 | 412 |
trait Covariant[+A]
trait Invariant[A] extends Covariant[A @annotation.unchecked.uncheckedVariance]
trait Combinable[G] {
def combined = 0
}
trait CanBuildFrom[+C]
object C {
implicit def convert1[G, TRAVONCE[+e] <: Covariant[e]]
(xs: TRAVONCE[G]): Combinable[G] = ???
implicit def convert2[G, SET[e] <: Invariant[e]]
(xs: SET[_ <: G])
(implicit cbf: CanBuildFrom[SET[G]]): Combinable[G] = ???
implicit def cbf[A]: CanBuildFrom[Invariant[A]] = ???
}
// always failed
class Test1 {
import C.{cbf, convert1, convert2}
val s: Invariant[Nothing] = ???
s.combined // fail
}
// didn't fail, now correctly fails
class Test2 {
import C.{cbf, convert2, convert1}
val s: Invariant[Nothing] = ???
// Non-uniformity with Test1 was due to order of typechecking implicit candidates:
// the last candidate typechecked was the only one that could contribute undetermined type parameters
// to the enclosing context, due to mutation of `Context#undetparam` in `doTypedApply`.
s.combined // was okay!
}
class TestExplicit {
import C.{cbf, convert2}
val s: Invariant[Nothing] = ???
// Now the implicit Test fail uniformly as per this explicit conversion
convert2(s).combined
// Breaking this expression down doesn't make it work.
{val c1 = convert2(s); c1.combined}
}
// These ones work before and after; inferring G=Null doesn't need to contribute an undetermined type param.
class Test3 {
import C.{cbf, convert1, convert2}
val s: Invariant[Null] = ???
s.combined // okay
}
class Test4 {
import C.{cbf, convert2, convert1}
val s: Invariant[Null] = ???
s.combined // okay
}
| felixmulder/scala | test/files/neg/t8431.scala | Scala | bsd-3-clause | 1,643 |
package sclib.serialization
import org.scalatest.{FunSuite, Matchers}
import sclib.ops.either._
import sclib.serialization.simple._
class SerializationSuite extends FunSuite with Matchers {
//
// serialize
//
test("serialize string") {
Serialize("44:one string") should be("13:44:one string")
}
test("serialize char") {
Serialize('x') should be("1:x")
}
test("serialize int") {
Serialize(12345) should be("5:12345")
}
test("serialize boolean") {
Serialize(true) should be("4:true")
}
test("serialize list of int") {
Serialize(List(1, 5, 2)) should be("9:1:11:51:2")
}
test("serialize vector of int") {
Serialize(Vector(1, 5, 2)) should be("9:1:11:51:2")
}
test("serialize option") {
Serialize(Option.empty[Int]) should be("1:N")
Serialize(Option("N")) should be("3:1:N")
}
test("serialize either") {
Serialize("left".left[Int]) should be("7:L4:left")
Serialize("right".right[Int]) should be("8:R5:right")
}
//
// deserialize
//
test("deserialize invalid input") {
Deserialize[String]("BOOM") should be(
"unable to deserialize invalid string - expected: '<LENGTH>:<CONTENT>...', actual: 'BOOM'".left)
Deserialize[String]("") should be(
"unable to deserialize empty string - expected: '<LENGTH>:<CONTENT>...'".left)
}
test("deserialize string") {
Deserialize[String]("4:abcd") should be("abcd".right)
}
test("deserialize char") {
Deserialize[Char]("1:x") should be('x'.right)
Deserialize[Char]("2:xx") should be("java.lang.Exception: 'xx' contains more than a char".left)
Deserialize[Char]("0:") should be(
"java.lang.Exception: empty string doesn't contain any char".left)
}
test("deserialize int") {
Deserialize[Int]("6:123456") should be(123456.right)
}
test("deserialize boolean") {
Deserialize[Boolean]("5:false") should be(false.right)
}
test("deserialize list of int") {
Deserialize[List[Int]]("9:1:11:51:2") should be(List(1, 5, 2).right)
}
test("deserialize vector of int") {
Deserialize[Vector[Int]]("9:1:11:51:2") should be(Vector(1, 5, 2).right)
}
test("deserialize option") {
Deserialize[Option[String]]("1:N") should be(None.right)
Deserialize[Option[String]]("3:1:N") should be(Some("N").right)
}
test("deserialize either") {
// 'xxx.right' are not usable here, because we are already on a 'Either', which has a '.right'
// function to get the 'RightProjection'
Deserialize[Either[String, Int]]("7:L4:left") should be(
Right("left".left[Int]))
Deserialize[Either[Int, String]]("8:R5:right") should be(
Right("right".right[Int]))
Deserialize[Either[Int, Int]]("4:X1:1") should be(
"unable to deserialize Either: expected 'L' or 'R' prefix, found: 'X' - in: 'X1:1'".left)
}
//
// serialize / deserialize case class
//
case class C(a: Int, b: String, c: List[Long], d: (Int, String), e: Boolean)
val cs = List(
C(43, "a string", List(433L, 6534L, 1243444L), 123 -> "abc", false),
C(34, "b string", List(3L), 321 -> "cba", true)
)
val csStr = "89:2:438:a string20:3:4334:65347:12434443:1233:abc5:false2:348:b string3:1:33:3213:cba4:true"
implicit val cSer = new Serialize[C] {
override def apply(a: C): String = a match {
case C(a, b, c, d, e) => Serialize(a) + Serialize(b) + Serialize(c) + Serialize(d) + Serialize(e)
}
}
implicit val cDes = new Deserialize[C] {
override def apply: DeserializeState[C] = for {
a <- Deserialize[Int]
b <- Deserialize[String]
c <- Deserialize[List[Long]]
d <- Deserialize[(Int, String)]
e <- Deserialize[Boolean]
} yield C(a, b, c, d, e)
}
test("serialize case class") {
Serialize(cs) should be(csStr)
}
test("deserialize case class") {
Deserialize[List[C]](csStr) should be(cs.right)
}
}
| j-keck/sclib | src/test/scala/sclib/serialization/SerializationSuite.scala | Scala | mit | 3,886 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
import org.neo4j.graphdb.Node
import org.scalatest.prop.TableDrivenPropertyChecks
import scala.util.matching.Regex
class EagerizationAcceptanceTest extends ExecutionEngineFunSuite
with TableDrivenPropertyChecks
with QueryStatisticsTestSupport {
val EagerRegEx: Regex = "Eager(?!A)".r
test("should plan eagerness for delete on paths") {
val node0 = createLabeledNode("L")
val node1 = createLabeledNode("L")
relate(node0, node1)
val query = "MATCH p=(:L)-[*]-() DELETE p"
assertNumberOfEagerness(query, 1)
}
test("should plan eagerness for detach delete on paths") {
val node0 = createLabeledNode("L")
val node1 = createLabeledNode("L")
relate(node0, node1)
val query = "MATCH p=(:L)-[*]-() DETACH DELETE p"
assertNumberOfEagerness(query, 1)
}
test("github issue ##5653") {
assertNumberOfEagerness(
"MATCH (p1:Person {name:'Michal'})-[r:FRIEND_OF {since:2007}]->(p2:Person {name:'Daniela'}) DELETE r, p1, p2", 1)
}
test("should introduce eagerness between DELETE and MERGE for node") {
val query =
"""
|MATCH (b:B)
|DELETE b
|MERGE (b2:B { value: 1 })
|RETURN b2
""".stripMargin
assertNumberOfEagerness(query, 2)
}
test("should introduce eagerness between DELETE and MERGE for relationship") {
val query =
"""
|MATCH (a)-[t:T]->(b)
|DELETE t
|MERGE (a)-[t2:T]->(b)
|RETURN t2
""".stripMargin
assertNumberOfEagerness(query, 2)
}
test("should not introduce eagerness for MATCH nodes and CREATE relationships") {
val query = "MATCH a, b CREATE (a)-[:KNOWS]->(b)"
assertNumberOfEagerness(query, 0)
}
test("should introduce eagerness when doing first matching and then creating nodes") {
val query = "MATCH a CREATE (b)"
assertNumberOfEagerness(query, 1)
}
test("should not introduce eagerness for MATCH nodes and CREATE UNIQUE relationships") {
val query = "MATCH a, b CREATE UNIQUE (a)-[r:KNOWS]->(b)"
assertNumberOfEagerness(query, 0)
}
test("should not introduce eagerness for MATCH nodes and MERGE relationships") {
val query = "MATCH a, b MERGE (a)-[r:KNOWS]->(b)"
assertNumberOfEagerness(query, 0)
}
test("should not add eagerness when not writing to nodes") {
val query = "MATCH a, b CREATE (a)-[r:KNOWS]->(b) SET r = { key: 42 }"
assertNumberOfEagerness(query, 0)
}
test("should not introduce eagerness when the ON MATCH includes writing to a non-matched property") {
val query = "MATCH (a:Foo), (b:Bar) MERGE (a)-[r:KNOWS]->(b) ON MATCH SET a.prop = 42"
assertNumberOfEagerness(query, 0)
}
test("should introduce eagerness when the ON MATCH includes writing to a matched label") {
val query = "MATCH (a:Foo), (b:Bar) MERGE (a)-[r:KNOWS]->(b) ON MATCH SET b:Foo"
assertNumberOfEagerness(query, 1)
}
test("should understand symbols introduced by FOREACH") {
val query =
"""MATCH (a:Label)
|WITH collect(a) as nodes
|MATCH (b:Label2)
|FOREACH(n in nodes |
| CREATE UNIQUE (n)-[:SELF]->(b))""".stripMargin
assertNumberOfEagerness(query, 0)
}
test("LOAD CSV FROM 'file:///something' AS line MERGE (b:B {p:line[0]}) RETURN b") {
val query = "LOAD CSV FROM 'file:///something' AS line MERGE (b:B {p:line[0]}) RETURN b"
assertNumberOfEagerness(query, 0)
}
test("MATCH (a:Person),(m:Movie) OPTIONAL MATCH (a)-[r1]-(), (m)-[r2]-() DELETE a,r1,m,r2") {
val query = "MATCH (a:Person),(m:Movie) OPTIONAL MATCH (a)-[r1]-(), (m)-[r2]-() DELETE a,r1,m,r2"
assertNumberOfEagerness(query, 1)
}
test("MATCH (a:Person),(m:Movie) CREATE (a)-[:T]->(m) WITH a OPTIONAL MATCH (a) RETURN *") {
val query = "MATCH (a:Person),(m:Movie) CREATE (a)-[:T]->(m) WITH a OPTIONAL MATCH (a) RETURN *"
assertNumberOfEagerness(query, 0)
}
test("should not add eagerness when reading and merging nodes and relationships when matching different label") {
val query = "MATCH (a:A) MERGE (a)-[:BAR]->(b:B) WITH a MATCH (a) WHERE (a)-[:FOO]->() RETURN a"
assertNumberOfEagerness(query, 0)
}
test("should add eagerness when reading and merging nodes and relationships on matching same label") {
val query = "MATCH (a:A) MERGE (a)-[:BAR]->(b:A) WITH a MATCH (a) WHERE (a)-[:FOO]->() RETURN a"
assertNumberOfEagerness(query, 1)
}
test("should not add eagerness when reading nodes and merging relationships") {
val query = "MATCH (a:A), (b:B) MERGE (a)-[:BAR]->(b) WITH a MATCH (a) WHERE (a)-[:FOO]->() RETURN a"
assertNumberOfEagerness(query, 0)
}
test("matching property and writing different property should not be eager") {
val query = "MATCH (n:Node {prop:5}) SET n.value = 10"
assertNumberOfEagerness(query, 0)
}
test("matching label and writing different label should not be eager") {
val query = "MATCH (n:Node) SET n:Lol"
assertNumberOfEagerness(query, 0)
}
test("matching label and writing same label should be eager") {
val query = "MATCH (n:Lol) SET n:Lol"
assertNumberOfEagerness(query, 1)
}
test("matching property and writing label should not be eager") {
val query = "MATCH (n {name : 'thing'}) SET n:Lol"
assertNumberOfEagerness(query, 0)
}
test("matching property and also matching label, and then writing label should be eager") {
val query = "MATCH (a:Lol) MATCH (n {name : 'thing'}) SET n:Lol"
assertNumberOfEagerness(query, 1)
}
test("matching label and writing property should not be eager") {
val query = "MATCH (n:Lol) SET n.name = 'thing'"
assertNumberOfEagerness(query, 0)
}
test("matching property and writing property should be eager") {
val query = "MATCH (n:Node {prop:5}) SET n.prop = 10"
assertNumberOfEagerness(query, 1)
}
test("writing property without matching should not be eager") {
val query = "MATCH n SET n.prop = 5"
assertNumberOfEagerness(query, 0)
}
test("matching property via index and writing same property should be eager") {
execute("CREATE CONSTRAINT ON (book:Book) ASSERT book.isbn IS UNIQUE")
execute("CREATE (b:Book {isbn : '123'})")
val query = "MATCH (b :Book {isbn : '123'}) SET b.isbn = '456'"
assertNumberOfEagerness(query, 1)
}
test("matching property using AND and writing to same property should be eager") {
val query = "MATCH n WHERE n.prop1 = 10 AND n.prop2 = 10 SET n.prop1 = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using AND and writing to different property should not be eager") {
val query = "MATCH n WHERE n.prop1 = 10 AND n.prop2 = 10 SET n.prop3 = 5"
assertNumberOfEagerness(query, 0)
}
test("matching property using OR and writing to same property should be eager") {
val query = "MATCH n WHERE n.prop1 = 10 OR n.prop2 = 10 SET n.prop1 = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using OR and writing to different property should not be eager") {
val query = "MATCH n WHERE n.prop1 = 10 OR n.prop2 = 10 SET n.prop3 = 5"
assertNumberOfEagerness(query, 0)
}
test("matching property using XOR and writing to same property should be eager") {
val query = "MATCH n WHERE n.prop1 XOR n.prop2 SET n.prop1 = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using XOR and writing to different property should not be eager") {
val query = "MATCH n WHERE n.prop1 XOR n.prop2 SET n.prop3 = 5"
assertNumberOfEagerness(query, 0)
}
test("matching property using NOT and writing to same property should be eager") {
val query = "MATCH n WHERE NOT(n.prop1 = 42) SET n.prop1 = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using NOT and writing to different property should not be eager") {
val query = "MATCH n WHERE NOT(n.prop1 = 42) SET n.prop3 = 5"
assertNumberOfEagerness(query, 0)
}
test("matching property using COALESCE and writing should be eager") {
val query = "MATCH n WHERE COALESCE(n.prop, 2) = 1 SET n.prop = 3"
assertNumberOfEagerness(query, 1)
}
test("matching property using COALESCE and not writing should not be eager") {
val query = "MATCH n WHERE COALESCE(n.prop, 2) = 1 RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using IN and writing should be eager") {
val query = "MATCH n WHERE n.prop IN [1] SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using IN and not writing should not be eager") {
val query = "MATCH n WHERE n.prop IN [1] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using Collection and writing should be eager") {
val query = "MATCH n WHERE [n.prop] = [1] SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using Collection and not writing should not be eager") {
val query = "MATCH n WHERE [n.prop] = [1] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using CollectionIndex and writing should be eager") {
val query = "MATCH n WHERE [n.prop][0] = 1 SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using CollectionIndex and not writing should not be eager") {
val query = "MATCH n WHERE [n.prop][0] = 1 RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using CollectionSlice and writing should be eager") {
val query = "MATCH n WHERE [n.prop1, n.prop2][0..1] = [1, 1] SET n.prop1 = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using CollectionSlice and not writing should not be eager") {
val query = "MATCH n WHERE [n.prop1, n.prop2][0..1] = [1, 1] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using EXTRACT and writing should be eager") {
val query = "MATCH path=(n)-->(m) WHERE extract(x IN nodes(path) | x.prop) = [] SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using EXTRACT and not writing should not be eager") {
val query = "MATCH path=(n)-->(m) WHERE extract(x IN nodes(path) | x.prop) = [] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using REDUCE and writing should be eager") {
val query = "MATCH path=(n)-->(m) WHERE reduce(s = 0, x IN nodes(path) | s + x.prop) = 99 SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using REDUCE and not writing should not be eager") {
val query = "MATCH path=(n)-->(m) WHERE reduce(s = 0, x IN nodes(path) | s + x.prop) = 99 RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using FILTER and writing should be eager") {
val query = "MATCH path=(n)-->(m) WHERE filter(x IN nodes(path) WHERE x.prop = 4) = [] SET n.prop = 10"
assertNumberOfEagerness(query, 1)
}
test("matching property using FILTER and not writing should not be eager") {
val query = "MATCH path=(n)-->(m) WHERE filter(x IN nodes(path) WHERE x.prop = 4) = [] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using KEYS and writing should be eager") {
val query = "MATCH n WHERE keys(n) = [] SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using KEYS and not writing should not be eager") {
val query = "MATCH n WHERE keys(n) = [] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using HAS and writing should be eager") {
val query = "MATCH n WHERE has(n.prop) SET n.prop = 5"
assertNumberOfEagerness(query, 1)
}
test("matching property using HAS and not writing should not be eager") {
val query = "MATCH n WHERE has(n.prop) RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using RegEx and writing should be eager") {
val query = "MATCH n WHERE n.prop =~ 'Foo*' SET n.prop = 'bar'"
assertNumberOfEagerness(query, 1)
}
test("matching property using RegEx and not writing should not be eager") {
val query = "MATCH n WHERE n.prop =~ 'Foo*' RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching all nodes using LABELS and writing should not be eager") {
val query = "MATCH n WHERE labels(n) = [] SET n:Lol"
assertNumberOfEagerness(query, 0)
}
test("matching property using LABELS and not writing should not be eager") {
val query = "MATCH n WHERE labels(n) = [] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using REPLACE and writing should be eager") {
val query = "MATCH n WHERE replace(n.prop, 'foo', 'bar') = 'baz' SET n.prop = 'qux'"
assertNumberOfEagerness(query, 1)
}
test("matching property using REPLACE and not writing should not be eager") {
val query = "MATCH n WHERE replace(n.prop, 'foo', 'bar') = 'baz' RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using SUBSTRING and writing should be eager") {
val query = "MATCH n WHERE substring(n.prop, 3, 5) = 'foo' SET n.prop = 'bar'"
assertNumberOfEagerness(query, 1)
}
test("matching property using SUBSTRING and not writing should not be eager") {
val query = "MATCH n WHERE substring(n.prop, 3, 5) = 'foo' RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using LEFT and writing should be eager") {
val query = "MATCH n WHERE left(n.prop, 5) = 'foo' SET n.prop = 'bar'"
assertNumberOfEagerness(query, 1)
}
test("matching property using LEFT and not writing should not be eager") {
val query = "MATCH n WHERE left(n.prop, 5) = 'foo' RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using RIGHT and writing should be eager") {
val query = "MATCH n WHERE right(n.prop, 5) = 'foo' SET n.prop = 'bar'"
assertNumberOfEagerness(query, 1)
}
test("matching property using RIGHT and not writing should not be eager") {
val query = "MATCH n WHERE right(n.prop, 5) = 'foo' RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching property using SPLIT and writing should be eager") {
val query = "MATCH n WHERE split(n.prop, ',') = ['foo', 'bar'] SET n.prop = 'baz,qux'"
assertNumberOfEagerness(query, 1)
}
test("matching property using SPLIT and not writing should not be eager") {
val query = "MATCH n WHERE split(n.prop, ',') = ['foo', 'bar'] RETURN n"
assertNumberOfEagerness(query, 0)
}
test("matching using a pattern predicate and creating relationship should be eager") {
val query = "MATCH n WHERE n-->() CREATE n-[:T]->()"
assertNumberOfEagerness(query, 1)
}
private val MathFunctions = Table(
"function name",
"abs",
"sqrt",
"round",
"sign",
"sin",
"cos",
"cot",
"tan",
"atan",
"acos",
"asin",
"haversin",
"ceil",
"floor",
"log",
"log10",
"exp"
)
forAll(MathFunctions) {
function =>
test(s"matching property using ${function.toUpperCase} and writing should be eager") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = 0 SET n.prop = 42", 1)
}
test(s"matching property using ${function.toUpperCase} and not writing should not be eager") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = 0 SET n.prop = 42", 1)
}
}
private val MathOperators = Table(
"operator",
"+",
"-",
"/",
"*",
"%",
"^"
)
forAll(MathOperators) {
operator =>
test(s"matching using $operator should insert eagerness for writing on properties") {
assertNumberOfEagerness(s"MATCH n WHERE n.prop $operator 3 = 0 SET n.prop = 42", 1)
}
test(s"matching using $operator should not insert eagerness when no writing is performed") {
assertNumberOfEagerness(s"MATCH n WHERE n.prop $operator 3 = 0 RETURN n", 0)
}
}
private val SingleArgStringFunctions = Table(
"function name",
"toString",
"lower",
"upper",
"trim",
"ltrim",
"rtrim"
)
forAll(SingleArgStringFunctions) {
function =>
test(s"matching using ${function.toUpperCase} should insert eagerness for writing on properties") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = 'foo' SET n.prop = 'bar'", 1)
}
test(s"matching using ${function.toUpperCase} should not insert eagerness when no writing is performed") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = 'foo' RETURN n", 0)
}
}
private val ConversionFunctions = Table(
("function name", "initial value", "new value"),
("toFloat", "1.11", "2.22"),
("toInt", "5", "10"),
("toString", "'foo'", "'bar'")
)
forAll(ConversionFunctions) {
(function, initialValue, newValue) =>
test(s"matching property using $function and writing should be eager") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = $initialValue SET n.prop = $newValue", 1)
}
test(s"matching property using $function and not writing should not be eager") {
assertNumberOfEagerness(s"MATCH n WHERE $function(n.prop) = $initialValue RETURN n", 0)
}
}
private val ComparisonOperators = Table(
"operator",
"=",
"<>",
"<",
">",
"<=",
">="
)
forAll(ComparisonOperators) {
operator =>
test(s"matching property using '$operator' and writing to same property should be eager") {
assertNumberOfEagerness(s"MATCH n WHERE n.prop $operator 10 SET n.prop = 5", 1)
}
test(s"matching property using '$operator' and writing to different property should not be eager") {
assertNumberOfEagerness(s"MATCH n WHERE n.prop1 $operator 10 SET n.prop2 = 5", 0)
}
}
// tests for relationship properties
test("matching node property, writing relationship property should not be eager") {
val query = "MATCH (n {prop : 5})-[r]-() SET r.prop = 6"
assertNumberOfEagerness(query, 0)
}
test("matching relationship property, writing same relationship property should be eager") {
val query = "MATCH ()-[r {prop : 3}]-() SET r.prop = 6"
assertNumberOfEagerness(query, 1)
}
test("matching relationship property, writing node property should not be eager") {
val query = "MATCH (n)-[r {prop : 3}]-() SET n.prop = 6"
assertNumberOfEagerness(query, 0)
}
test("matching relationship property, writing different relationship property should not be eager") {
val query = "MATCH ()-[r {prop1 : 3}]-() SET r.prop2 = 6"
assertNumberOfEagerness(query, 0)
}
test("matching on relationship property existence, writing same property should be eager") {
val query = "MATCH ()-[r]-() WHERE has(r.prop) SET r.prop = 'foo'"
assertNumberOfEagerness(query, 1)
}
test("matching on relationship property existence, writing different property should not be eager") {
val query = "MATCH ()-[r]-() WHERE has(r.prop1) SET r.prop2 = 'foo'"
assertNumberOfEagerness(query, 0)
}
test("should not be eager when merging on two different labels") {
val query = "MERGE(:L1) MERGE(p:L2) ON CREATE SET p.name = 'Blaine'"
assertNumberOfEagerness(query, 0)
}
test("should be eager when merging on the same label") {
val query = "MERGE(:L1) MERGE(p:L1) ON CREATE SET p.name = 'Blaine'"
assertNumberOfEagerness(query, 1)
}
test("should be eager when only one merge has labels") {
val query = "MERGE() MERGE(p: Person) ON CREATE SET p.name = 'Blaine'"
assertNumberOfEagerness(query, 1)
}
test("should be eager when no merge has labels") {
val query = "MERGE() MERGE(p) ON CREATE SET p.name = 'Blaine'"
assertNumberOfEagerness(query, 1)
}
test("should not be eager when merging on already bound identifiers") {
val query = "MERGE (city:City) MERGE (country:Country) MERGE (city)-[:IN]->(country)"
assertNumberOfEagerness(query, 0)
}
ignore("should not be eager when creating single node after matching on pattern with relationship") {
val query = "MATCH ()--() CREATE ()"
assertNumberOfEagerness(query, 0)
}
ignore("should not be eager when creating single node after matching on pattern with relationship and also matching on label") {
val query = "MATCH (:L) MATCH ()--() CREATE ()"
assertNumberOfEagerness(query, 0)
}
test("should be eager when creating single node after matching on empty node") {
val query = "MATCH () CREATE ()"
assertNumberOfEagerness(query, 1)
}
test("should always be eager after deleted relationships if there are any subsequent expands that might load them") {
val device = createLabeledNode("Device")
val cookies = (0 until 2).foldLeft(Map.empty[String, Node]) { (nodes, index) =>
val name = s"c$index"
val cookie = createLabeledNode(Map("name" -> name), "Cookie")
relate(device, cookie)
relate(cookie, createNode())
nodes + (name -> cookie)
}
val query =
"""
|MATCH (c:Cookie {name: {cookie}})<-[r2]-(d:Device)
|WITH c, d
|MATCH (c)-[r]-()
|DELETE c, r
|WITH d
|MATCH (d)-->(c2:Cookie)
|RETURN d, c2""".stripMargin
cookies.foreach { case (name, node) =>
val result = execute(query, ("cookie" -> name))
assertStats(result, nodesDeleted = 1, relationshipsDeleted = 2)
}
assertNumberOfEagerness(query, 2)
}
test("should always be eager after deleted nodes if there are any subsequent matches that might load them") {
val cookies = (0 until 2).foldLeft(Map.empty[String, Node]) { (nodes, index) =>
val name = s"c$index"
val cookie = createLabeledNode(Map("name" -> name), "Cookie")
nodes + (name -> cookie)
}
val query = "MATCH (c:Cookie) DELETE c WITH 1 as t MATCH (x:Cookie) RETURN count(*) as count"
val result = execute(query)
result.columnAs[Int]("count").next should equal(0)
assertStats(result, nodesDeleted = 2)
assertNumberOfEagerness(query, 2)
}
test("should always be eager after deleted paths if there are any subsequent matches that might load them") {
val cookies = (0 until 2).foldLeft(Map.empty[String, Node]) { (nodes, index) =>
val name = s"c$index"
val cookie = createLabeledNode(Map("name" -> name), "Cookie")
nodes + (name -> cookie)
}
val query = "MATCH p=(:Cookie) DELETE p WITH 1 as t MATCH (x:Cookie) RETURN count(*) as count"
val result = execute(query)
result.columnAs[Int]("count").next should equal(0)
assertStats(result, nodesDeleted = 2)
assertNumberOfEagerness(query, 2)
}
private def assertNumberOfEagerness(query: String, expectedEagerCount: Int) {
val q = if (query.contains("EXPLAIN")) query else "EXPLAIN " + query
val result = execute(q)
val plan = result.executionPlanDescription().toString
result.close()
val length = EagerRegEx.findAllIn(plan).length
assert(length == expectedEagerCount, plan)
}
}
| HuangLS/neo4j | community/cypher/cypher/src/test/scala/org/neo4j/cypher/EagerizationAcceptanceTest.scala | Scala | apache-2.0 | 23,894 |
class i0(val i1: Double) extends AnyVal {
def i2 = new i0.i3
}
abstract class i5 {
def i6(i7: i0): i1 = i3
class i8(i9: i4) {
def i10 = new i6 {
def i7 = i3(, )
}
}
0
private def i11: Unit = {
val i10 = i1(2, )
def i12 = i13
val i13 = i2()
val
def >>(i10: i1) = i1
def i14: i0 => String = ???
trait i18 extends i2
abstract class i17 {
override def i1 = List("i5=i15 i11> i15"
i123 else ??? : i17
}
trait i13 extends i2
object i10 {
new i0().i5() } | som-snytt/dotty | tests/fuzzy/c8f351c9685739fcf49bd7ce406e431b2a6b7cf7.scala | Scala | apache-2.0 | 447 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.mvc
import org.specs2.mutable.Specification
import play.api.http.HttpConfiguration
import play.api.libs.typedmap.TypedMap
import play.api.mvc.{ Headers, RequestHeader }
import play.api.mvc.request.{ DefaultRequestFactory, RemoteConnection, RequestTarget }
import play.mvc.Http.HeaderNames
import scala.compat.java8.OptionConverters._
import scala.collection.JavaConverters._
class RequestHeaderSpec extends Specification {
private def requestHeader(headers: (String, String)*): RequestHeader = {
new DefaultRequestFactory(HttpConfiguration()).createRequestHeader(
connection = RemoteConnection("", secure = false, None),
method = "GET",
target = RequestTarget("/", "", Map.empty),
version = "",
headers = Headers(headers: _*),
attrs = TypedMap.empty
)
}
def headers(additionalHeaders: Map[String, java.util.List[String]] = Map.empty) = {
val headers = (Map("a" -> List("b1", "b2").asJava, "c" -> List("d1", "d2").asJava) ++ additionalHeaders).asJava
new Http.Headers(headers)
}
"RequestHeader" should {
"headers" in {
"check if the header exists" in {
headers().contains("a") must beTrue
headers().contains("non-existend") must beFalse
}
"get a single header value" in {
toScala(headers().get("a")) must beSome("b1")
toScala(headers().get("c")) must beSome("d1")
}
"get all header values" in {
headers().getAll("a").asScala must containTheSameElementsAs(Seq("b1", "b2"))
headers().getAll("c").asScala must containTheSameElementsAs(Seq("d1", "d2"))
}
"handle header names case insensitively" in {
"when getting the header" in {
toScala(headers().get("a")) must beSome("b1")
toScala(headers().get("c")) must beSome("d1")
toScala(headers().get("A")) must beSome("b1")
toScala(headers().get("C")) must beSome("d1")
}
"when checking if the header exists" in {
headers().contains("a") must beTrue
headers().contains("A") must beTrue
}
}
"can add new headers" in {
val h = headers().addHeader("new", "value")
h.contains("new") must beTrue
toScala(h.get("new")) must beSome("value")
}
"can add new headers with a list of values" in {
val h = headers().addHeader("new", List("v1", "v2", "v3").asJava)
h.getAll("new").asScala must containTheSameElementsAs(Seq("v1", "v2", "v3"))
}
"remove a header" in {
val h = headers().addHeader("to-be-removed", "value")
h.contains("to-be-removed") must beTrue
h.remove("to-be-removed").contains("to-be-removed") must beFalse
}
}
"has body" in {
"when there is a content-length greater than zero" in {
requestHeader(HeaderNames.CONTENT_LENGTH -> "10").asJava.hasBody must beTrue
}
"when there is a transfer-encoding header" in {
requestHeader(HeaderNames.TRANSFER_ENCODING -> "gzip").asJava.hasBody must beTrue
}
}
"has no body" in {
"when there is not a content-length greater than zero" in {
requestHeader(HeaderNames.CONTENT_LENGTH -> "0").asJava.hasBody must beFalse
}
"when there is not a transfer-encoding header" in {
requestHeader().asJava.hasBody must beFalse
}
}
}
}
| Shruti9520/playframework | framework/src/play/src/test/scala/play/mvc/RequestHeaderSpec.scala | Scala | apache-2.0 | 3,473 |
/*
* CordImpl.scala
* (Cord)
*
* Copyright (c) 2015-2020 Hanns Holger Rutz.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.cord
package impl
import de.sciss.cord.view.{CordView, PatcherView, View}
class CordImpl(val source: Outlet, val sink: Inlet) extends Cord {
init()
def tpe: Type = source.tpe
def parent: Patcher = source.node.parent
def view(parentView: PatcherView): View = CordView(parentView, this)
def dispose(): Unit = {
sink .removeCord(this)
source.removeCord(this)
}
private def init(): Unit = {
if (source.node.parent != sink.node.parent)
throw new Exception(s"Source ($source) and sink ($sink) do not live in the same patcher")
source.addCord(this)
sink .addCord(this)
}
}
| Sciss/Cord | src/main/scala/de/sciss/cord/impl/CordImpl.scala | Scala | lgpl-2.1 | 897 |
/*
* Copyright (c) 2017 Aaron Levin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package ca.aaronlevin
import cats.{ Functor, Id, Monad }
import cats.implicits._
import scala.language.higherKinds
package object parsec {
type SourceName = String
type Line = Int
type Column = Int
case class SourcePosition(sourceName: SourceName, line: Line, column: Column)
case class ParseError(sourcePos: SourcePosition, messages: List[String])
case class State[S, U](input: S, pos: SourcePosition, user: U)
sealed trait IConsumed[A]
case class Consumed[A](consumed: A) extends IConsumed[A]
case class Empty[A](empty: A) extends IConsumed[A]
sealed trait Reply[+S, +U, +A]
case class Ok[S, U, A](a: A, state: State[S, U], error: ParseError) extends Reply[S, U, A]
case class Error(error: ParseError) extends Reply[Nothing, Nothing, Nothing]
def mergeError(pe1: ParseError, pe2: ParseError): ParseError = pe1
def newErrorUnknown(pos: SourcePosition): ParseError = ParseError(pos, Nil)
def unknownError[S, U](state: State[S, U]): ParseError = newErrorUnknown(state.pos)
def initialPos(name: SourceName): SourcePosition = SourcePosition(name, 1, 1)
trait ParsecStream[S, M[_], Token] {
implicit def M: Monad[M]
def uncons(stream: S): M[Option[(Token, S)]]
}
implicit def listStream[T, M[_]](implicit monad: Monad[M]) = new ParsecStream[List[T], M, T] {
implicit def M = monad
def uncons(stream: List[T]): M[Option[(T, List[T])]] = stream match {
case Nil => monad.pure(None)
case t :: ts => monad.pure(Some((t, ts)))
}
}
abstract class ParsecT[S, U, M[_], A] {
def apply[B](
state: State[S, U],
consumedOk: (A, State[S, U], ParseError) => M[B],
consumedErr: ParseError => M[B],
emptyOk: (A, State[S, U], ParseError) => M[B],
emptyErr: ParseError => M[B]
): M[B]
def map[B](f: A => B): ParsecT[S, U, M, B] = parsecFunctor.map(this)(f)
}
implicit def parsecFunctor[S, U, M[_]]: Functor[ParsecT[S, U, M, ?]] =
new Functor[ParsecT[S, U, M, ?]] {
def map[A, B](fa: ParsecT[S, U, M, A])(f: A => B): ParsecT[S, U, M, B] =
new ParsecT[S, U, M, B] {
def apply[C](state: State[S, U],
consumedOk: (B, State[S, U], ParseError) => M[C],
consumedErr: ParseError => M[C],
emptyOk: (B, State[S, U], ParseError) => M[C],
emptyErr: ParseError => M[C]): M[C] =
fa.apply(
state, { (a, s, e) =>
consumedOk(f(a), s, e)
},
consumedErr, { (a, s, e) =>
emptyOk(f(a), s, e)
},
emptyErr
)
}
}
implicit def parsecMonad[S, U, M[_]]: Monad[ParsecT[S, U, M, ?]] =
new Monad[ParsecT[S, U, M, ?]] {
def flatMap[A, B](
fa: ParsecT[S, U, M, A]
)(f: A => ParsecT[S, U, M, B]): ParsecT[S, U, M, B] =
new ParsecT[S, U, M, B] {
def apply[C](state: State[S, U],
consumedOk: (B, State[S, U], ParseError) => M[C],
consumedErr: ParseError => M[C],
emptyOk: (B, State[S, U], ParseError) => M[C],
emptyErr: ParseError => M[C]): M[C] = {
// consumed ok case
val mcOk: (A, State[S, U], ParseError) => M[C] = { (a, s, e) =>
val peok: (B, State[S, U], ParseError) => M[C] = { (pa, ps, pe) =>
consumedOk(pa, ps, mergeError(e, pe))
}
val peer: ParseError => M[C] = { pe =>
consumedErr(mergeError(e, pe))
}
f(a).apply[C](s, consumedOk, consumedErr, peok, peer)
}
// empty ok case
val meok: (A, State[S, U], ParseError) => M[C] = { (a, s, e) =>
val peok: (B, State[S, U], ParseError) => M[C] = { (pa, ps, pe) =>
emptyOk(pa, ps, mergeError(e, pe))
}
val peer: ParseError => M[C] = { pe =>
emptyErr(mergeError(e, pe))
}
f(a).apply[C](s, consumedOk, consumedErr, peok, peer)
}
fa.apply[C](state, mcOk, consumedErr, meok, emptyErr)
}
}
def pure[A](a: A): ParsecT[S, U, M, A] = new ParsecT[S, U, M, A] {
def apply[B](state: State[S, U],
consumedOk: (A, State[S, U], ParseError) => M[B],
consumedErr: ParseError => M[B],
emptyOk: (A, State[S, U], ParseError) => M[B],
emptyErr: ParseError => M[B]): M[B] =
emptyOk(a, state, unknownError(state))
}
def tailRecM[A, B](a: A)(f: A => ParsecT[S, U, M, Either[A, B]]): ParsecT[S, U, M, B] =
new ParsecT[S, U, M, B] {
def apply[C](state: State[S, U],
consumedOk: (B, State[S, U], ParseError) => M[C],
consumedErr: ParseError => M[C],
emptyOk: (B, State[S, U], ParseError) => M[C],
emptyErr: ParseError => M[C]): M[C] = {
val mcOk: (Either[A, B], State[S, U], ParseError) => M[C] = { (eab, s, e) =>
eab match {
case Left(la) =>
tailRecM(la)(f).apply(state, consumedOk, consumedErr, emptyOk, emptyErr)
case Right(b) => consumedOk(b, s, e)
}
}
val meok: (Either[A, B], State[S, U], ParseError) => M[C] = { (eab, s, e) =>
eab match {
case Left(la) =>
tailRecM(la)(f).apply(state, consumedOk, consumedErr, emptyOk, emptyErr)
case Right(b) => emptyOk(b, s, e)
}
}
f(a).apply[C](state, mcOk, consumedErr, meok, emptyErr)
}
}
}
def runParsecT[S, U, M[_], A](parser: ParsecT[S, U, M, A], state: State[S, U])(
implicit monad: Monad[M]
): M[IConsumed[M[Reply[S, U, A]]]] = {
val cok: (A, State[S, U], ParseError) => M[IConsumed[M[Reply[S, U, A]]]] = { (a, s, e) =>
monad.pure(Consumed(monad.pure(Ok(a, s, e))))
}
val cerr: ParseError => M[IConsumed[M[Reply[S, U, A]]]] = { e =>
monad.pure(Consumed(monad.pure(Error(e))))
}
val eok: (A, State[S, U], ParseError) => M[IConsumed[M[Reply[S, U, A]]]] = { (a, s, e) =>
monad.pure(Empty(monad.pure(Ok(a, s, e))))
}
val eerr: ParseError => M[IConsumed[M[Reply[S, U, A]]]] = { e =>
monad.pure(Empty(monad.pure(Error(e))))
}
parser.apply(state, cok, cerr, eok, eerr)
}
def runPT[S, M[_], T, U, A](parser: ParsecT[S, U, M, A], user: U, sourceName: SourceName, s: S)(
implicit monad: Monad[M]
): M[Either[ParseError, A]] = {
val state = State(s, initialPos(sourceName), user)
monad.flatMap(runParsecT(parser, state)) { consumedRes =>
val result = consumedRes match {
case Empty(e) => e
case Consumed(c) => c
}
monad.map(result) {
case Ok(x, _, _) => Right(x)
case Error(err) => Left(err)
}
}
}
def parse[S, T, A](parser: ParsecT[S, Unit, Id, A], source: SourceName, s: S)(
implicit stream: ParsecStream[S, Id, T]
): Either[ParseError, A] =
runPT[S, Id, T, Unit, A](parser, Unit, source, s)
// this will blow the stack at n=2490 on my machine
def stackTest(n: Int) = {
val monad = parsecMonad[String, Unit, Id]
val r = monad.pure(333)
val rr: ParsecT[String, Unit, Id, Int] = (1 to n).foldLeft(r)((r, _) => r.map(_ + 1))
runPT[String, Id, Char, Unit, Int](rr, Unit, "aaron", "cool")
}
}
| aaronlevin/scala-parsec | src/main/scala/ca/aaronlevin/parsec/package.scala | Scala | mit | 8,870 |
package org.workcraft.plugins.cpog.scala.tools
import org.workcraft.dom.visual.ColorisableGraphicalContent
import org.workcraft.gui.graph.tools.GraphEditorToolUtil
import org.workcraft.gui.graph.tools.Colorisation
import org.workcraft.gui.graph.tools.AbstractTool
import org.workcraft.gui.graph.tools.GraphEditorTool.Button
import org.workcraft.dom.visual.GraphicalContent
import org.workcraft.gui.graph.Viewport
import org.workcraft.plugins.cpog.CustomToolsProvider
import org.workcraft.gui.graph.tools.selection.GenericSelectionTool
import java.awt.Color
import java.awt.BasicStroke
import org.workcraft.dom.visual.BoundedColorisableGraphicalContent
import java.awt.geom.Point2D
import org.workcraft.gui.graph.tools.selection.MoveDragHandler
import org.workcraft.plugins.cpog.scala.nodes._
import pcollections.HashTreePSet
import pcollections.PSet
import org.workcraft.scala.Util._
import org.workcraft.scala.Scalaz._
import org.workcraft.scala.Expressions._
import org.workcraft.plugins.cpog.scala.VisualArc._
import scala.collection.JavaConversions.{ collectionAsScalaIterable, asJavaCollection }
import org.workcraft.util.Maybe.Util.just
import org.workcraft.plugins.cpog.gui.TouchableProvider.bbToTouchable
import org.workcraft.graphics.Graphics._
import java.awt.geom.Path2D
import java.awt.geom.Ellipse2D
import org.workcraft.dom.visual.Touchable
import org.workcraft.plugins.cpog.scala.ControlPoint
import org.workcraft.plugins.cpog.scala.VisualArc
import org.workcraft.dom.visual.connections.RelativePoint
import org.workcraft.dependencymanager.advanced.core.{ Expressions => JExpressions }
import org.workcraft.dependencymanager.advanced.user.Setter
import org.workcraft.util.Maybe
import org.workcraft.gui.graph.tools.GraphEditorMouseListener
import org.workcraft.graphics.GraphicsHelper
import org.workcraft.scala.grapheditor.tools._
class ControlPointsTool (val mouseListener: GraphEditorMouseListener,
userSpaceGraphics: (Viewport, Expression[java.lang.Boolean]) => Expression[GraphicalContent]) {
def asGraphEditorTool (modelGraphics : Expression[GraphicalContent]) = {
def graphics (viewport:Viewport, hasFocus: Expression[java.lang.Boolean]) =
(modelGraphics <**> userSpaceGraphics (viewport, hasFocus)) (compose(_, _))
ToolHelper.asGraphEditorTool(Some(mouseListener), None, Some(graphics), None, None, ControlPointsTool.button)
}
}
object ControlPointsTool {
import org.workcraft.scala.Scalaz._
val controlPointSize = 0.15
val highlightedColorisation = new Colorisation {
override def getColorisation = new Color(99, 130, 191).brighter();
override def getBackground = null
}
val button = new Button {
override def getLabel = "Control point editor"
override def getIcon = null
override def getHotKeyCode = java.awt.event.KeyEvent.VK_Q
}
def controlPointGraphics(position: Point2D.Double) =
circle(controlPointSize, None, Some(Color.BLUE)) translate position
def bezierControlPointGraphics(position: Point2D.Double, vertexPosition: Point2D.Double) =
{
val p = new Path2D.Double
p.moveTo(vertexPosition.getX, vertexPosition.getY)
p.lineTo(position.getX, position.getY)
val cpg = controlPointGraphics(position)
cpg over (path(p, new BasicStroke(0.02f), Color.GRAY.brighter, 0), cpg.touchable)
}
def create(selectedArcs: Expression[List[(Point2D.Double, Point2D.Double, VisualArc)]], snap: Point2D.Double => Point2D.Double) = {
val selectedControlPoints = org.workcraft.dependencymanager.advanced.user.Variable.create[PSet[ControlPoint]](HashTreePSet.empty())
def getControlPoints(arc: (Point2D.Double, Point2D.Double, VisualArc)): List[ControlPoint] =
{
val (firstPos, secondPos, visual) = arc
visual match {
case Polyline(cps) => cps.map(x => new ControlPoint(x, for (x <- x) yield controlPointGraphics(x)))
case Bezier(cp1, cp2) => {
def convertCp(cp: ModifiableExpression[RelativePoint]) = ModifiableExpression(
for (cp <- cp) yield cp.toSpace(firstPos, secondPos),
(v: Point2D.Double) => Maybe.Util.doIfJust(RelativePoint.fromSpace(firstPos, secondPos, v), cp.setValue)
)
val cp1_ = convertCp(cp1)
val cp2_ = convertCp(cp2)
new ControlPoint(cp1_, for (cp1 <- cp1_) yield bezierControlPointGraphics(cp1, firstPos)) ::
new ControlPoint(cp2_, for (cp2 <- cp2_) yield bezierControlPointGraphics(cp2, secondPos)) :: Nil
}
}
}
// FIXME: eliminate the need for this
val visibleControlPoints: Expression[List[ControlPoint]] =
for (
(selectedArcs) <- selectedArcs;
val nodeLists = selectedArcs.map(getControlPoints).toList
) yield nodeLists.flatten: List[ControlPoint]
val visibleControlPointsJ: Expression[java.util.Collection[_ <: ControlPoint]] =
for (vcp <- visibleControlPoints) yield asJavaCollection[ControlPoint](vcp)
def controlPointMovableController(cp: ControlPoint) = just(cp.position)
val cpDragHandler = new MoveDragHandler[ControlPoint](selectedControlPoints, asFunctionObject(((x : Maybe[ModifiableExpression[Point2D.Double]]) => asMaybe(for(x <- x) yield x.jexpr)) compose controlPointMovableController), asFunctionObject(snap))
val cpHitTester = CustomToolsProvider.createHitTester[ControlPoint](visibleControlPointsJ.jexpr, asFunctionObject((cp: ControlPoint) => for (g <- cp.graphics) yield g.touchable))
def cpPainter(cp: ControlPoint) = for (g <- cp.graphics) yield g.graphics
val gcpet = new GenericSelectionTool[ControlPoint](selectedControlPoints, cpHitTester, cpDragHandler)
val controlPointGC =
GraphicsHelper.paintWithHighlights[ControlPoint](cpPainter, visibleControlPoints) (highlightedColorisation, selectedControlPoints)
new ControlPointsTool (gcpet.getMouseListener, (viewport, _) => (controlPointGC <**> gcpet.userSpaceContent(viewport)) (compose(_,_)))
}
}
| tuura/workcraft-2.2 | CpogsPlugin/src/main/scala/org/workcraft/plugins/cpog/scala/tools/ControlPointsTool.scala | Scala | gpl-3.0 | 6,013 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.samples.bookstore.model
/**
* @version $Revision: 1.1 $
*/
case class Book(id: String, title: String, author: String) | arashi01/scalate-samples | scalate-bookstore/src/main/scala/org/fusesource/scalate/samples/bookstore/model/Book.scala | Scala | apache-2.0 | 890 |
/*
* Copyright (C) 2014-2015 by Nokia.
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package wookie.collector.utils
import scalaz.{Id, Kleisli, Reader}
object DI {
implicit def funToKleisli[Conf, A](r: Conf => A): Kleisli[Id.Id, Conf, A] = Kleisli[Id.Id, Conf, A](Reader(r).run)
}
| elyast/wookie | collector-api/src/main/scala/wookie/collector/utils/DI.scala | Scala | apache-2.0 | 918 |
package fr.acinq.eclair.channel
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
import fr.acinq.bitcoin.DeterministicWallet._
import fr.acinq.bitcoin._
import fr.acinq.eclair._
import fr.acinq.eclair.blockchain.TxConfirmedAt
import fr.acinq.eclair.blockchain.fee.FeeratePerKw
import fr.acinq.eclair.crypto.Generators
import fr.acinq.eclair.crypto.Sphinx.PacketAndSecrets
import fr.acinq.eclair.payment.IncomingPaymentPacket
import fr.acinq.eclair.transactions.Transactions._
import fr.acinq.eclair.transactions.{CommitmentSpec, Transactions}
import fr.acinq.eclair.wire.PaymentOnion.FinalPayload
import fr.acinq.eclair.wire._
import immortan.crypto.Tools
import immortan.{LNParams, RemoteNodeInfo}
import scodec.bits.ByteVector
sealed trait LocalReject {
val localAdd: UpdateAddHtlc
}
case class ChannelOffline(localAdd: UpdateAddHtlc) extends LocalReject
case class ChannelNotAbleToSend(localAdd: UpdateAddHtlc) extends LocalReject
case class InPrincipleNotSendable(localAdd: UpdateAddHtlc) extends LocalReject
case class INPUT_INIT_FUNDEE(remoteInfo: RemoteNodeInfo, localParams: LocalParams, remoteInit: Init, channelFeatures: ChannelFeatures, theirOpen: OpenChannel)
case class INPUT_INIT_FUNDER(remoteInfo: RemoteNodeInfo, temporaryChannelId: ByteVector32, fundingAmount: Satoshi, pushAmount: MilliSatoshi, fundingFeeratePerKw: FeeratePerKw,
initialFeeratePerKw: FeeratePerKw, localParams: LocalParams, remoteInit: Init, channelFlags: Byte, channelFeatures: ChannelFeatures)
sealed trait BitcoinEvent
case class BITCOIN_PARENT_TX_CONFIRMED(childTx: Transaction) extends BitcoinEvent
case class BITCOIN_TX_CONFIRMED(tx: Transaction) extends BitcoinEvent
case object BITCOIN_FUNDING_DEPTHOK extends BitcoinEvent
case object BITCOIN_FUNDING_SPENT extends BitcoinEvent
case object BITCOIN_OUTPUT_SPENT extends BitcoinEvent
sealed trait IncomingResolution
sealed trait ReasonableResolution extends IncomingResolution {
val fullTag: FullPaymentTag // Payment type and grouping data (paymentHash x paymentSecret x type)
val secret: PrivateKey // Node secret whose pubKey is seen by peer (might be peer-specific or invoice-specific)
val add: UpdateAddHtlc
}
case class ReasonableTrampoline(packet: IncomingPaymentPacket.NodeRelayPacket, secret: PrivateKey) extends ReasonableResolution {
val fullTag: FullPaymentTag = FullPaymentTag(packet.add.paymentHash, packet.outerPayload.paymentSecret, PaymentTagTlv.TRAMPLOINE_ROUTED)
val add: UpdateAddHtlc = packet.add
}
case class ReasonableLocal(packet: IncomingPaymentPacket.FinalPacket, secret: PrivateKey) extends ReasonableResolution {
val fullTag: FullPaymentTag = FullPaymentTag(packet.add.paymentHash, packet.payload.paymentSecret, PaymentTagTlv.FINAL_INCOMING)
val add: UpdateAddHtlc = packet.add
}
sealed trait Command
sealed trait FinalResolution extends IncomingResolution with Command {
val theirAdd: UpdateAddHtlc
}
case class CMD_FULFILL_HTLC(preimage: ByteVector32, theirAdd: UpdateAddHtlc) extends FinalResolution
case class CMD_FAIL_MALFORMED_HTLC(onionHash: ByteVector32, failureCode: Int, theirAdd: UpdateAddHtlc) extends FinalResolution
case class CMD_FAIL_HTLC(reason: Either[ByteVector, FailureMessage], nodeSecret: PrivateKey, theirAdd: UpdateAddHtlc) extends FinalResolution
case class CMD_ADD_HTLC(fullTag: FullPaymentTag, firstAmount: MilliSatoshi, cltvExpiry: CltvExpiry, packetAndSecrets: PacketAndSecrets, payload: FinalPayload) extends Command {
val incompleteAdd: UpdateAddHtlc = UpdateAddHtlc(channelId = ByteVector32.Zeroes, id = 0L, firstAmount, fullTag.paymentHash, cltvExpiry, packetAndSecrets.packet, encryptedTag)
lazy val encryptedTag: PaymentTagTlv.EncryptedSecretStream = {
val shortTag = ShortPaymentTag(fullTag.paymentSecret, fullTag.tag)
val plainBytes = PaymentTagTlv.shortPaymentTagCodec.encode(shortTag).require.toByteVector
val cipherbytes = Tools.chaChaEncrypt(LNParams.secret.keys.ourNodePrivateKey.value, randomBytes(12), plainBytes)
TlvStream(EncryptedPaymentSecret(cipherbytes) :: Nil)
}
}
object CMD_CLOSE {
final val AWAITING_REMOTE_FORCE_CLOSE = "awaiting-remote-force-close"
final val INVALID_CLOSING_PUBKEY = "invalid-closing-pubkey"
final val ALREADY_IN_PROGRESS = "already-in-progress"
final val CHANNEL_BUSY = "channel-busy"
}
case class CMD_CLOSE(scriptPubKey: Option[ByteVector], force: Boolean) extends Command
case class CMD_HOSTED_STATE_OVERRIDE(so: StateOverride) extends Command
case class HC_CMD_RESIZE(delta: Satoshi) extends Command
case object CMD_SOCKET_OFFLINE extends Command
case object CMD_SOCKET_ONLINE extends Command
case object CMD_CHECK_FEERATE extends Command
case object CMD_SIGN extends Command
trait ChannelData {
def ourBalance: MilliSatoshi = 0L.msat
}
trait PersistentChannelData extends ChannelData {
def channelId: ByteVector32
}
sealed trait HasNormalCommitments extends PersistentChannelData {
override def ourBalance: MilliSatoshi = commitments.latestReducedRemoteSpec.toRemote
override def channelId: ByteVector32 = commitments.channelId
def withNewCommits(cs: NormalCommits): HasNormalCommitments
def commitments: NormalCommits
}
case class ClosingTxProposed(unsignedTx: Transaction, localClosingSigned: ClosingSigned)
sealed trait ForceCloseCommitPublished {
def isIrrevocablySpent(tx: Transaction): Boolean = irrevocablySpent.values.exists(_.tx.txid == tx.txid)
lazy val isCommitConfirmed: Boolean = isIrrevocablySpent(commitTx)
val irrevocablySpent: Map[OutPoint, TxConfirmedAt]
val delayedRefundsLeft: Seq[Transaction]
val commitTx: Transaction
}
case class LocalCommitPublished(commitTx: Transaction, claimMainDelayedOutputTx: Option[Transaction],
htlcSuccessTxs: List[Transaction], htlcTimeoutTxs: List[Transaction], claimHtlcDelayedTxs: List[Transaction],
irrevocablySpent: Map[OutPoint, TxConfirmedAt] = Map.empty) extends ForceCloseCommitPublished {
lazy val delayedRefundsLeft: Seq[Transaction] = (claimMainDelayedOutputTx.toList ++ claimHtlcDelayedTxs).filterNot(isIrrevocablySpent)
}
case class RemoteCommitPublished(commitTx: Transaction, claimMainOutputTx: Option[Transaction],
claimHtlcSuccessTxs: List[Transaction], claimHtlcTimeoutTxs: List[Transaction],
irrevocablySpent: Map[OutPoint, TxConfirmedAt] = Map.empty) extends ForceCloseCommitPublished {
lazy val delayedRefundsLeft: Seq[Transaction] = claimHtlcTimeoutTxs.filterNot(isIrrevocablySpent)
}
case class RevokedCommitPublished(commitTx: Transaction, claimMainOutputTx: Option[Transaction], mainPenaltyTx: Option[Transaction], htlcPenaltyTxs: List[Transaction],
claimHtlcDelayedPenaltyTxs: List[Transaction], irrevocablySpent: Map[OutPoint, TxConfirmedAt] = Map.empty) extends ForceCloseCommitPublished {
lazy val delayedRefundsLeft: Seq[Transaction] = claimHtlcDelayedPenaltyTxs.filterNot(isIrrevocablySpent)
lazy val penaltyTxs: Seq[Transaction] = claimMainOutputTx.toList ++ mainPenaltyTx.toList ++ htlcPenaltyTxs ++ claimHtlcDelayedPenaltyTxs
}
final case class DATA_WAIT_FOR_OPEN_CHANNEL(initFundee: INPUT_INIT_FUNDEE) extends ChannelData
final case class DATA_WAIT_FOR_ACCEPT_CHANNEL(initFunder: INPUT_INIT_FUNDER, lastSent: OpenChannel) extends ChannelData
final case class DATA_WAIT_FOR_FUNDING_INTERNAL(initFunder: INPUT_INIT_FUNDER, remoteParams: RemoteParams, remoteFirstPerCommitmentPoint: PublicKey, lastSent: OpenChannel) extends ChannelData
final case class DATA_WAIT_FOR_FUNDING_CREATED(initFundee: INPUT_INIT_FUNDEE, remoteParams: RemoteParams, lastSent: AcceptChannel) extends ChannelData
final case class DATA_WAIT_FOR_FUNDING_SIGNED(remoteInfo: RemoteNodeInfo, channelId: ByteVector32, localParams: LocalParams, remoteParams: RemoteParams, fundingTx: Transaction,
fundingTxFee: Satoshi, localSpec: CommitmentSpec, localCommitTx: CommitTx, remoteCommit: RemoteCommit, channelFlags: Byte,
channelFeatures: ChannelFeatures, lastSent: FundingCreated) extends ChannelData
final case class DATA_WAIT_FOR_FUNDING_CONFIRMED(commitments: NormalCommits, fundingTx: Option[Transaction], waitingSince: Long, lastSent: Either[FundingCreated, FundingSigned],
deferred: Option[FundingLocked] = None) extends ChannelData with HasNormalCommitments {
// Remote peer may send a tx which is unrelated to our agreed upon channel funding, that is, we won't be able to spend our commit tx, check this right away!
def checkSpend(tx: Transaction): Unit = Transaction.correctlySpends(commitments.localCommit.publishableTxs.commitTx.tx, Seq(tx), ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
final case class DATA_WAIT_FOR_FUNDING_LOCKED(commitments: NormalCommits, shortChannelId: Long, lastSent: FundingLocked) extends ChannelData with HasNormalCommitments {
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
final case class DATA_NORMAL(commitments: NormalCommits, shortChannelId: Long, feeUpdateRequired: Boolean = false, extParams: List[ByteVector] = Nil,
localShutdown: Option[Shutdown] = None, remoteShutdown: Option[Shutdown] = None) extends ChannelData with HasNormalCommitments {
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
object DATA_NEGOTIATING {
type ClosingProposed = List[ClosingTxProposed]
}
final case class DATA_NEGOTIATING(commitments: NormalCommits, localShutdown: Shutdown,
remoteShutdown: Shutdown, closingTxProposed: List[DATA_NEGOTIATING.ClosingProposed] = List(Nil),
bestUnpublishedClosingTxOpt: Option[Transaction] = None) extends ChannelData with HasNormalCommitments {
def toClosed: DATA_CLOSING = DATA_CLOSING(commitments, System.currentTimeMillis, closingTxProposed.flatten.map(_.unsignedTx), bestUnpublishedClosingTxOpt.toList)
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
final case class DATA_CLOSING(commitments: NormalCommits, waitingSince: Long, mutualCloseProposed: List[Transaction] = Nil, mutualClosePublished: List[Transaction] = Nil,
localCommitPublished: Option[LocalCommitPublished] = None, remoteCommitPublished: Option[RemoteCommitPublished] = None, nextRemoteCommitPublished: Option[RemoteCommitPublished] = None,
futureRemoteCommitPublished: Option[RemoteCommitPublished] = None, revokedCommitPublished: List[RevokedCommitPublished] = Nil) extends ChannelData with HasNormalCommitments {
lazy val balanceRefunds: Seq[Transaction] =
// Txs which are not related to HTLC UTXOs but only involved in getting our balance back
remoteCommitPublished.toList.flatMap(rcp => rcp.commitTx +: rcp.claimMainOutputTx.toList) ++
nextRemoteCommitPublished.toList.flatMap(rcp => rcp.commitTx +: rcp.claimMainOutputTx.toList) ++
futureRemoteCommitPublished.toList.flatMap(rcp => rcp.commitTx +: rcp.claimMainOutputTx.toList) ++
localCommitPublished.toList.flatMap(lcp => lcp.commitTx +: lcp.claimMainDelayedOutputTx.toList) ++
mutualCloseProposed ++ mutualClosePublished
lazy val paymentLeftoverRefunds: Seq[Transaction] =
// Txs which are involved in getting our success/timeout HTLC UTXOs back
remoteCommitPublished.toList.flatMap(rcp => rcp.claimHtlcSuccessTxs ++ rcp.claimHtlcTimeoutTxs) ++
nextRemoteCommitPublished.toList.flatMap(rcp => rcp.claimHtlcSuccessTxs ++ rcp.claimHtlcTimeoutTxs) ++
futureRemoteCommitPublished.toList.flatMap(rcp => rcp.claimHtlcSuccessTxs ++ rcp.claimHtlcTimeoutTxs) ++
localCommitPublished.toList.flatMap(lcp => lcp.claimHtlcDelayedTxs ++ lcp.htlcSuccessTxs ++ lcp.htlcTimeoutTxs)
lazy val forceCloseCommitPublished: Option[ForceCloseCommitPublished] = {
// We must select a single candidate here because its delayed refunds will be displayed to user, so we can't show a total sum of all possible refunds
val candidates = localCommitPublished ++ remoteCommitPublished ++ nextRemoteCommitPublished ++ futureRemoteCommitPublished ++ revokedCommitPublished
candidates.find(_.isCommitConfirmed).orElse(candidates.headOption)
}
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
final case class DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT(commitments: NormalCommits, remoteChannelReestablish: ChannelReestablish) extends ChannelData with HasNormalCommitments {
override def withNewCommits(cs: NormalCommits): HasNormalCommitments = copy(commitments = cs)
}
object ChannelKeys {
def fromPath(master: ExtendedPrivateKey, path: KeyPath): ChannelKeys = {
val fundingKey = derivePrivateKey(chain = path.path :+ hardened(0L), parent = master)
val revocationKey = derivePrivateKey(chain = path.path :+ hardened(1L), parent = master)
val paymentKey = derivePrivateKey(chain = path.path :+ hardened(2L), parent = master)
val delayedKey = derivePrivateKey(chain = path.path :+ hardened(3L), parent = master)
val htlcKey = derivePrivateKey(chain = path.path :+ hardened(4L), parent = master)
val shaBase = derivePrivateKey(chain = path.path :+ hardened(5L), parent = master)
val shaSeed = Crypto.sha256(shaBase.privateKey.value :+ 1.toByte)
ChannelKeys(path, shaSeed, fundingKey, revocationKey, paymentKey, delayedKey, htlcKey)
}
def newKeyPath(isFunder: Boolean): KeyPath = {
def nextHop: Long = secureRandom.nextInt & 0xFFFFFFFFL
val lastHop = if (isFunder) hardened(1) else hardened(0)
val path = Seq(nextHop, nextHop, nextHop, nextHop, nextHop, nextHop, nextHop, nextHop, lastHop)
KeyPath(path)
}
}
case class ChannelKeys(path: KeyPath, shaSeed: ByteVector32, fundingKey: ExtendedPrivateKey, revocationKey: ExtendedPrivateKey, paymentKey: ExtendedPrivateKey, delayedPaymentKey: ExtendedPrivateKey, htlcKey: ExtendedPrivateKey) {
def sign(tx: TransactionWithInputInfo, key: PrivateKey, remoteSecret: PrivateKey, txOwner: TxOwner, format: CommitmentFormat): ByteVector64 = Transactions.sign(tx, Generators.revocationPrivKey(key, remoteSecret), txOwner, format)
def sign(tx: TransactionWithInputInfo, key: PrivateKey, remotePoint: PublicKey, txOwner: TxOwner, format: CommitmentFormat): ByteVector64 = Transactions.sign(tx, Generators.derivePrivKey(key, remotePoint), txOwner, format)
def commitmentSecret(index: Long): PrivateKey = Generators.perCommitSecret(shaSeed, index)
def commitmentPoint(index: Long): PublicKey = Generators.perCommitPoint(shaSeed, index)
}
final case class LocalParams(keys: ChannelKeys, dustLimit: Satoshi, maxHtlcValueInFlightMsat: UInt64, channelReserve: Satoshi,
htlcMinimum: MilliSatoshi, toSelfDelay: CltvExpiryDelta, maxAcceptedHtlcs: Int, isFunder: Boolean,
defaultFinalScriptPubKey: ByteVector, walletStaticPaymentBasepoint: PublicKey)
final case class RemoteParams(dustLimit: Satoshi, maxHtlcValueInFlightMsat: UInt64, channelReserve: Satoshi, htlcMinimum: MilliSatoshi,
toSelfDelay: CltvExpiryDelta, maxAcceptedHtlcs: Int, fundingPubKey: PublicKey, revocationBasepoint: PublicKey,
paymentBasepoint: PublicKey, delayedPaymentBasepoint: PublicKey, htlcBasepoint: PublicKey, shutdownScript: Option[ByteVector] = None)
// Channel exceptions
case class FeerateTooSmall(channelId: ByteVector32, remoteFeeratePerKw: FeeratePerKw) extends RuntimeException {
override def toString: String = s"FeerateTooSmall, remoteFeeratePerKw=$remoteFeeratePerKw"
}
case class DustLimitTooSmall(channelId: ByteVector32, dustLimit: Satoshi, min: Satoshi) extends RuntimeException {
override def toString: String = s"DustLimitTooSmall, dustLimit=$dustLimit, min=$min"
}
case class DustLimitTooLarge(channelId: ByteVector32, dustLimit: Satoshi, max: Satoshi) extends RuntimeException {
override def toString: String = s"DustLimitTooLarge, dustLimit=$dustLimit, max=$max"
}
case class InvalidMaxAcceptedHtlcs(channelId: ByteVector32, maxAcceptedHtlcs: Int, max: Int) extends RuntimeException {
override def toString: String = s"InvalidMaxAcceptedHtlcs, maxAcceptedHtlcs=$maxAcceptedHtlcs, max=$max"
}
case class InvalidMinAcceptedHtlcs(channelId: ByteVector32, minAcceptedHtlcs: Int, min: Int) extends RuntimeException {
override def toString: String = s"InvalidMinAcceptedHtlcs, minAcceptedHtlcs=$minAcceptedHtlcs, min=$min"
}
case class InvalidChainHash(channelId: ByteVector32, local: ByteVector32, remote: ByteVector32) extends RuntimeException {
override def toString: String = s"InvalidChainHash, local=$local, remote=$remote"
}
case class InvalidPushAmount(channelId: ByteVector32, pushAmount: MilliSatoshi, max: MilliSatoshi) extends RuntimeException {
override def toString: String = s"InvalidPushAmount, pushAmount=$pushAmount, max=$max"
}
case class ToSelfDelayTooHigh(channelId: ByteVector32, toSelfDelay: CltvExpiryDelta, max: CltvExpiryDelta) extends RuntimeException {
override def toString: String = s"ToSelfDelayTooHigh, toSelfDelay=$toSelfDelay, max=$max"
}
case class InvalidFundingAmount(channelId: ByteVector32, fundingAmount: Satoshi, min: Satoshi, max: Satoshi) extends RuntimeException {
override def toString: String = s"InvalidFundingAmount, fundingAmount=$fundingAmount, min=$min, max=$max"
}
case class DustLimitAboveOurChannelReserve(channelId: ByteVector32, dustLimit: Satoshi, channelReserve: Satoshi) extends RuntimeException {
override def toString: String = s"DustLimitAboveOurChannelReserve, dustLimit=$dustLimit, channelReserve=$channelReserve"
}
case class ChannelReserveBelowOurDustLimit(channelId: ByteVector32, channelReserve: Satoshi, dustLimit: Satoshi) extends RuntimeException {
override def toString: String = s"ChannelReserveBelowOurDustLimit, channelReserve=$channelReserve, dustLimit=$dustLimit"
}
case class ChannelReserveNotMet(channelId: ByteVector32, toLocal: MilliSatoshi, toRemote: MilliSatoshi, reserve: Satoshi) extends RuntimeException {
override def toString: String = s"ChannelReserveNotMet, toLocal=$toLocal, toRemote=$toRemote, reserve=$reserve"
}
case class FeerateTooDifferent(channelId: ByteVector32, localFeeratePerKw: FeeratePerKw, remoteFeeratePerKw: FeeratePerKw) extends RuntimeException {
override def toString: String = s"FeerateTooDifferent, localFeeratePerKw=$localFeeratePerKw, remoteFeeratePerKw=$remoteFeeratePerKw"
}
case class ChannelReserveTooHigh(channelId: ByteVector32, reserveToFundingRatio: Double, maxReserveToFundingRatio: Double) extends RuntimeException {
override def toString: String = s"DustLimitTooSmall, reserveToFundingRatio=$reserveToFundingRatio, maxReserveToFundingRatio=$maxReserveToFundingRatio"
}
case class ExpiredHtlcInNormalChannel(channelId: ByteVector32, sentExpiredRouted: Boolean, expiredReceivedRevealed: Boolean) extends RuntimeException {
override def toString: String = s"ChannelTransitionFail, sentExpiredRouted=$sentExpiredRouted, expiredReceivedRevealed=$expiredReceivedRevealed"
}
case class ChannelTransitionFail(channelId: ByteVector32, message: LightningMessage) extends RuntimeException {
override def toString: String = s"ChannelTransitionFail, related message=$message"
}
case class RemoteErrorException(details: String) extends RuntimeException {
override def toString: String = s"RemoteErrorException, details=$details"
}
case class CMDException(reason: String, cmd: Command) extends RuntimeException
| btcontract/wallet | app/src/main/java/fr/acinq/eclair/channel/ChannelTypes.scala | Scala | apache-2.0 | 19,846 |
package io.plasmap.geo.mappings.impl
import com.typesafe.config.ConfigFactory
import io.plasmap.geo.mappings.{OsmWayMapping, OsmRelationMapping, OsmNodeMapping, MappingService}
import io.plasmap.model.OsmId
import org.joda.time.DateTime
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.api.commands.WriteResult
import reactivemongo.api.{MongoConnection, DefaultDB, MongoDriver}
import reactivemongo.bson._
import reactivemongo.core.nodeset.Authenticate
import scala.concurrent.{Future, ExecutionContext}
/**
* Created by janschulte on 09/12/15.
*/
object MongoMappingService {
private val config = ConfigFactory.load()
private val host = config.getString("plasmap.db.mongo.host")
private val database = config.getString("plasmap.db.mongo.database")
private val username = config.getString("plasmap.db.mongo.user")
private val password = config.getString("plasmap.db.mongo.password")
private val credentials = List(Authenticate(database, username, password))
private val driver = new MongoDriver
private val connection: MongoConnection = driver.connection(List(host), authentications = credentials)
private val FIELD_ID = "_id"
private val FIELD_HASH = "hash"
private val FIELD_TS = "ts"
private def getTimestamp(doc: BSONDocument): Long = {
doc.getAs[BSONDateTime](FIELD_TS).get.value
}
private def getHash(doc: BSONDocument): Long = {
doc.getAs[BSONLong](FIELD_HASH).get.value
}
private def getOsmId(doc: BSONDocument): Long = {
doc.getAs[BSONLong](FIELD_ID).get.value
}
implicit object OsmNodeMappingBSONReader extends BSONDocumentReader[OsmNodeMapping] {
override def read(document: BSONDocument) :OsmNodeMapping = {
val osmId = getOsmId(document)
val hash = getHash(document)
val ts = getTimestamp(document)
OsmNodeMapping(hash,OsmId(osmId), new DateTime(ts))
}
}
implicit object OsmNodeMappingBSONWriter extends BSONDocumentWriter[OsmNodeMapping] {
override def write(mapping: OsmNodeMapping) = {
BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value),
FIELD_HASH -> BSONLong(mapping.hash),
FIELD_TS -> BSONDateTime(mapping.updated.getMillis))
}
}
implicit object OsmWayMappingBSONReader extends BSONDocumentReader[OsmWayMapping] {
override def read(document: BSONDocument) :OsmWayMapping = {
val osmId = getOsmId(document)
val hash = getHash(document)
val ts = getTimestamp(document)
OsmWayMapping(hash,OsmId(osmId), new DateTime(ts))
}
}
implicit object OsmWayMappingBSONWriter extends BSONDocumentWriter[OsmWayMapping] {
override def write(mapping: OsmWayMapping) = {
BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value),
FIELD_HASH -> BSONLong(mapping.hash),
FIELD_TS -> BSONDateTime(mapping.updated.getMillis))
}
}
implicit object OsmRelationMappingBSONReader extends BSONDocumentReader[OsmRelationMapping] {
override def read(document: BSONDocument) :OsmRelationMapping = {
val osmId = getOsmId(document)
val hash = getHash(document)
val ts = getTimestamp(document)
OsmRelationMapping(hash,OsmId(osmId), new DateTime(ts))
}
}
implicit object OsmRelationMappingBSONWriter extends BSONDocumentWriter[OsmRelationMapping] {
override def write(mapping: OsmRelationMapping) = {
BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value),
FIELD_HASH -> BSONLong(mapping.hash),
FIELD_TS -> BSONDateTime(mapping.updated.getMillis))
}
}
def apply(ec: ExecutionContext) = new MongoMappingService(connection,ec)
}
class MongoMappingService(conn:MongoConnection,ec: ExecutionContext) extends MappingService {
val db: DefaultDB = conn("plasmap")(ec)
val nodeMappings: BSONCollection = db("node_mappings")
val wayMappings: BSONCollection = db("way_mappings")
val relationMappings: BSONCollection = db("relation_mappings")
import MongoMappingService._
override def findNodeMapping(osmId: OsmId)(implicit ec: ExecutionContext): Future[Option[OsmNodeMapping]] = {
val query = BSONDocument(FIELD_ID -> osmId.value)
nodeMappings.find(query).one[OsmNodeMapping]
}
private[impl] def deleteNodeMapping(mapping: OsmNodeMapping)(implicit ec: ExecutionContext): Future[Option[OsmNodeMapping]] = {
val selector = BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value))
nodeMappings.remove(selector).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
override def insertNodeMapping(mapping: OsmNodeMapping)(implicit ec: ExecutionContext): Future[Option[OsmNodeMapping]] = {
nodeMappings.insert(mapping).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
override def findWayMapping(osmId: OsmId)(implicit ec: ExecutionContext): Future[Option[OsmWayMapping]] = {
val query = BSONDocument(FIELD_ID -> osmId.value)
wayMappings.find(query).one[OsmWayMapping]
}
private[impl] def deleteWayMapping(mapping: OsmWayMapping)(implicit ec: ExecutionContext): Future[Option[OsmWayMapping]] = {
val selector = BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value))
wayMappings.remove(selector).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
override def insertWayMapping(mapping: OsmWayMapping)(implicit ec: ExecutionContext): Future[Option[OsmWayMapping]] = {
wayMappings.insert(mapping).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
override def insertRelationMapping(mapping: OsmRelationMapping)(implicit ec: ExecutionContext): Future[Option[OsmRelationMapping]] = {
relationMappings.insert(mapping).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
override def findRelationMapping(osmId: OsmId)(implicit ec: ExecutionContext): Future[Option[OsmRelationMapping]] = {
val query = BSONDocument(FIELD_ID -> osmId.value)
relationMappings.find(query).one[OsmRelationMapping]
}
private[impl] def deleteRelationMapping(mapping: OsmRelationMapping)(implicit ec: ExecutionContext): Future[Option[OsmRelationMapping]] = {
val selector = BSONDocument(
FIELD_ID -> BSONLong(mapping.osmId.value))
relationMappings.remove(selector).map((result) => if (result.ok) {
Some(mapping)
} else {
None
})
}
} | plasmap/plasmap | dal/src/main/scala/io/plasmap/geo/mappings/impl/MongoMappingService.scala | Scala | apache-2.0 | 6,445 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.scalacheck.Gen
import org.scalactic.TripleEqualsSupport.Spread
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.optimizer.SimpleTestOptimizer
import org.apache.spark.sql.catalyst.plans.logical.{OneRowRelation, Project}
import org.apache.spark.sql.catalyst.util.MapData
import org.apache.spark.sql.types.DataType
import org.apache.spark.util.Utils
/**
* A few helper functions for expression evaluation testing. Mixin this trait to use them.
*/
trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks {
self: SparkFunSuite =>
protected def create_row(values: Any*): InternalRow = {
InternalRow.fromSeq(values.map(CatalystTypeConverters.convertToCatalyst))
}
protected def checkEvaluation(
expression: => Expression, expected: Any, inputRow: InternalRow = EmptyRow): Unit = {
val catalystValue = CatalystTypeConverters.convertToCatalyst(expected)
checkEvaluationWithoutCodegen(expression, catalystValue, inputRow)
checkEvaluationWithGeneratedMutableProjection(expression, catalystValue, inputRow)
if (GenerateUnsafeProjection.canSupport(expression.dataType)) {
checkEvalutionWithUnsafeProjection(expression, catalystValue, inputRow)
}
checkEvaluationWithOptimization(expression, catalystValue, inputRow)
}
/**
* Check the equality between result of expression and expected value, it will handle
* Array[Byte], Spread[Double], and MapData.
*/
protected def checkResult(result: Any, expected: Any): Boolean = {
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
case (result: Double, expected: Spread[Double @unchecked]) =>
expected.asInstanceOf[Spread[Double]].isWithin(result)
case (result: MapData, expected: MapData) =>
result.keyArray() == expected.keyArray() && result.valueArray() == expected.valueArray()
case (result: Double, expected: Double) =>
if (expected.isNaN) result.isNaN else expected == result
case (result: Float, expected: Float) =>
if (expected.isNaN) result.isNaN else expected == result
case _ =>
result == expected
}
}
protected def evaluate(expression: Expression, inputRow: InternalRow = EmptyRow): Any = {
expression.foreach {
case n: Nondeterministic => n.setInitialValues()
case _ =>
}
expression.eval(inputRow)
}
protected def generateProject(
generator: => Projection,
expression: Expression): Projection = {
try {
generator
} catch {
case e: Throwable =>
fail(
s"""
|Code generation of $expression failed:
|$e
|${Utils.exceptionString(e)}
""".stripMargin)
}
}
protected def checkEvaluationWithoutCodegen(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val actual = try evaluate(expression, inputRow) catch {
case e: Exception => fail(s"Exception evaluating $expression", e)
}
if (!checkResult(actual, expected)) {
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
fail(s"Incorrect evaluation (codegen off): $expression, " +
s"actual: $actual, " +
s"expected: $expected$input")
}
}
protected def checkEvaluationWithGeneratedMutableProjection(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val plan = generateProject(
GenerateMutableProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
val actual = plan(inputRow).get(0, expression.dataType)
if (!checkResult(actual, expected)) {
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
fail(s"Incorrect evaluation: $expression, actual: $actual, expected: $expected$input")
}
}
protected def checkEvalutionWithUnsafeProjection(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
// SPARK-16489 Explicitly doing code generation twice so code gen will fail if
// some expression is reusing variable names across different instances.
// This behavior is tested in ExpressionEvalHelperSuite.
val plan = generateProject(
UnsafeProjection.create(
Alias(expression, s"Optimized($expression)1")() ::
Alias(expression, s"Optimized($expression)2")() :: Nil),
expression)
val unsafeRow = plan(inputRow)
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
if (expected == null) {
if (!unsafeRow.isNullAt(0)) {
val expectedRow = InternalRow(expected, expected)
fail("Incorrect evaluation in unsafe mode: " +
s"$expression, actual: $unsafeRow, expected: $expectedRow$input")
}
} else {
val lit = InternalRow(expected, expected)
val expectedRow =
UnsafeProjection.create(Array(expression.dataType, expression.dataType)).apply(lit)
if (unsafeRow != expectedRow) {
fail("Incorrect evaluation in unsafe mode: " +
s"$expression, actual: $unsafeRow, expected: $expectedRow$input")
}
}
}
protected def checkEvaluationWithOptimization(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation)
val optimizedPlan = SimpleTestOptimizer.execute(plan)
checkEvaluationWithoutCodegen(optimizedPlan.expressions.head, expected, inputRow)
}
protected def checkDoubleEvaluation(
expression: => Expression,
expected: Spread[Double],
inputRow: InternalRow = EmptyRow): Unit = {
checkEvaluationWithoutCodegen(expression, expected)
checkEvaluationWithGeneratedMutableProjection(expression, expected)
checkEvaluationWithOptimization(expression, expected)
var plan = generateProject(
GenerateMutableProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
var actual = plan(inputRow).get(0, expression.dataType)
assert(checkResult(actual, expected))
plan = generateProject(
GenerateUnsafeProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
actual = FromUnsafeProjection(expression.dataType :: Nil)(
plan(inputRow)).get(0, expression.dataType)
assert(checkResult(actual, expected))
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against unary expressions by feeding them arbitrary literals of `dataType`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: Expression => Expression,
dataType: DataType): Unit = {
forAll (LiteralGenerator.randomGen(dataType)) { (l: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against binary expressions by feeding them arbitrary literals of `dataType1`
* and `dataType2`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: (Expression, Expression) => Expression,
dataType1: DataType,
dataType2: DataType): Unit = {
forAll (
LiteralGenerator.randomGen(dataType1),
LiteralGenerator.randomGen(dataType2)
) { (l1: Literal, l2: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l1, l2))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against ternary expressions by feeding them arbitrary literals of `dataType1`,
* `dataType2` and `dataType3`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: (Expression, Expression, Expression) => Expression,
dataType1: DataType,
dataType2: DataType,
dataType3: DataType): Unit = {
forAll (
LiteralGenerator.randomGen(dataType1),
LiteralGenerator.randomGen(dataType2),
LiteralGenerator.randomGen(dataType3)
) { (l1: Literal, l2: Literal, l3: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l1, l2, l3))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against expressions take Seq[Expression] as input by feeding them
* arbitrary length Seq of arbitrary literal of `dataType`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: Seq[Expression] => Expression,
dataType: DataType,
minNumElements: Int = 0): Unit = {
forAll (Gen.listOf(LiteralGenerator.randomGen(dataType))) { (literals: Seq[Literal]) =>
whenever(literals.size >= minNumElements) {
cmpInterpretWithCodegen(EmptyRow, c(literals))
}
}
}
private def cmpInterpretWithCodegen(inputRow: InternalRow, expr: Expression): Unit = {
val interpret = try {
evaluate(expr, inputRow)
} catch {
case e: Exception => fail(s"Exception evaluating $expr", e)
}
val plan = generateProject(
GenerateMutableProjection.generate(Alias(expr, s"Optimized($expr)")() :: Nil),
expr)
val codegen = plan(inputRow).get(0, expr.dataType)
if (!compareResults(interpret, codegen)) {
fail(s"Incorrect evaluation: $expr, interpret: $interpret, codegen: $codegen")
}
}
/**
* Check the equality between result of expression and expected value, it will handle
* Array[Byte] and Spread[Double].
*/
private[this] def compareResults(result: Any, expected: Any): Boolean = {
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
case (result: Double, expected: Spread[Double @unchecked]) =>
expected.asInstanceOf[Spread[Double]].isWithin(result)
case (result: Double, expected: Double) if result.isNaN && expected.isNaN =>
true
case (result: Float, expected: Float) if result.isNaN && expected.isNaN =>
true
case _ => result == expected
}
}
}
| MrCodeYu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala | Scala | apache-2.0 | 11,540 |
package org.finra.datagenerator.scaffolding.config
/**
* Created by dkopel on 12/13/16.
*/
case class LocalConfig(nConfs: Seq[Config[_]])(implicit oConf: Configuration) extends Configuration(oConf.provider) {
implicit val self: Configuration = this
def allConfs = nConfs.toList ++ oConf.confs().filter(c => !nConfs.map(nc => nc.conf.name).contains(c.conf.name))
override def confs(name: Option[ConfigName]): List[Config[_]] = {
allConfs.filter(c => {
if(name.isEmpty) true
else if(name.isDefined) c.conf.name.equals(name.get)
else {
false
}
})
}
def apply[A](block: =>A)(implicit conf: Configuration=self) = block
}
| FINRAOS/DataGenerator | rubber-scaffolding/rubber-commons/src/main/scala/org/finra/datagenerator/scaffolding/config/LocalConfig.scala | Scala | apache-2.0 | 724 |
/**
* Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.peelframework.hadoop.beans.system
import java.util.regex.Pattern
import com.samskivert.mustache.Mustache
import org.peelframework.core.beans.system.Lifespan.Lifespan
import org.peelframework.core.beans.system.{LogCollection, SetUpTimeoutException, System}
import org.peelframework.core.config.{Model, SystemConfig}
import org.peelframework.core.util.shell
import scala.collection.JavaConverters._
import scala.util.matching.Regex
/** Wrapper class for Yarn.
*
* Implements Yarn as a Peel `System` and provides setup and teardown methods.
*
* @param version Version of the system (e.g. "7.1")
* @param configKey The system configuration resides under `system.\\${configKey}`
* @param lifespan `Lifespan` of the system
* @param dependencies Set of dependencies that this system needs
* @param mc The moustache compiler to compile the templates that are used to generate property files for the system
*/
class Yarn(
version : String,
configKey : String,
lifespan : Lifespan,
dependencies : Set[System] = Set(),
mc : Mustache.Compiler) extends System("yarn", version, configKey, lifespan, dependencies, mc)
with LogCollection {
// ---------------------------------------------------
// LogCollection.
// ---------------------------------------------------
/** The patterns of the log files to watch. */
override protected def logFilePatterns(): Seq[Regex] = {
// TODO: rework based on http://hortonworks.com/blog/simplifying-user-logs-management-and-access-in-yarn/
val user = Pattern.quote(config.getString(s"system.$configKey.user"))
config.getStringList(s"system.$configKey.config.slaves").asScala.map(Pattern.quote).map(slave =>
s"yarn-$user-resourcemanager-$slave\\\\.log".r)
}
// ---------------------------------------------------
// System.
// ---------------------------------------------------
override def configuration() = SystemConfig(config, {
val conf = config.getString(s"system.$configKey.path.config")
List(
SystemConfig.Entry[Model.Hosts](s"system.$configKey.config.slaves", s"$conf/slaves", templatePath("conf/hosts"), mc),
SystemConfig.Entry[Model.Env](s"system.$configKey.config.env", s"$conf/hadoop-env.sh", templatePath("conf/hadoop-env.sh"), mc),
SystemConfig.Entry[Model.Site](s"system.$configKey.config.core", s"$conf/core-site.xml", templatePath("conf/site.xml"), mc),
SystemConfig.Entry[Model.Site](s"system.$configKey.config.yarn", s"$conf/yarn-site.xml", templatePath("conf/site.xml"), mc)
)
})
override def start(): Unit = {
val user = config.getString(s"system.$configKey.user")
val logDir = config.getString(s"system.$configKey.path.log")
var failedStartUpAttempts = 0
while(!isUp) {
try {
val total = config.getStringList(s"system.$configKey.config.slaves").size()
// yarn does not reset the resourcemanagers log at startup
val init = Integer.parseInt((shell !! s"""cat $logDir/yarn-$user-resourcemanager-*.log | grep 'registered with capability:' | wc -l""").trim())
shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/yarn-daemon.sh start resourcemanager"
shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/yarn-daemon.sh start nodemanager"
logger.info(s"Waiting for nodes to connect")
var curr = init
var cntr = config.getInt(s"system.$configKey.startup.polling.counter")
while (curr - init < total) {
logger.info(s"Connected ${curr - init} from $total nodes")
// wait a bit
Thread.sleep(config.getInt(s"system.$configKey.startup.polling.interval"))
// get new values
curr = Integer.parseInt((shell !! s"""cat $logDir/yarn-$user-resourcemanager-*.log | grep 'registered with capability:' | wc -l""").trim())
// timeout if counter goes below zero
cntr = cntr - 1
if (cntr < 0) throw new SetUpTimeoutException(s"Cannot start system '$toString'; node connection timeout at system ")
}
logger.info(s"Connected ${curr - init} from $total nodes")
isUp = true
} catch {
case e: SetUpTimeoutException =>
failedStartUpAttempts = failedStartUpAttempts + 1
if (failedStartUpAttempts < config.getInt(s"system.$configKey.startup.max.attempts")) {
stop()
logger.info(s"Could not bring system '$toString' up in time, trying again...")
} else {
throw e
}
}
}
}
override def stop(): Unit = {
shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/yarn-daemon.sh stop resourcemanager"
shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/yarn-daemon.sh stop nodemanager"
isUp = false
}
def isRunning = {
(shell ! s""" ps -p `cat ${config.getString(s"system.$configKey.config.env.YARN_PID_DIR")}/yarn-*-resourcemanager.pid` """) == 0 ||
(shell ! s""" ps -p `cat ${config.getString(s"system.$configKey.config.env.YARN_PID_DIR")}/yarn-*-nodemanager.pid` """) == 0
}
} | carabolic/peel | peel-extensions/src/main/scala/org/peelframework/hadoop/beans/system/Yarn.scala | Scala | apache-2.0 | 5,783 |
package BIDMach.models
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,HMat,GDMat,GMat,GIMat,GSDMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.datasources._
/**
* An Abstract class with shared code for Factor Models
*/
abstract class FactorModel(override val opts:FactorModel.Opts) extends Model(opts) {
def init() = {
val data0 = mats(0)
val m = size(data0, 1)
val d = opts.dim
val sdat = (sum(data0,2).t + 1.0f).asInstanceOf[FMat]
val sp = sdat / sum(sdat)
println("corpus perplexity=%f" format math.exp(- (sp ddot ln(sp))) )
if (refresh) {
val modelmat = rand(d,m);
modelmat ~ modelmat *@ sdat;
val msum = sum(modelmat, 2);
modelmat ~ modelmat / msum;
setmodelmats(Array[Mat](1));
modelmats(0) = convertMat(modelmat);
}
if (mats.size > 1) {
while (datasource.hasNext) {
mats = datasource.next
val dmat = mats(1)
dmat.set(1.0f/d)
datasource.putBack(mats,1)
}
}
}
def uupdate(data:Mat, user:Mat, ipass:Int, pos:Long)
def mupdate(data:Mat, user:Mat, ipass:Int, pos:Long)
def mupdate2(data:Mat, user:Mat, ipass:Int) = {}
def evalfun(data:Mat, user:Mat, ipass:Int, pos:Long):FMat
def evalfun(data:Mat, user:Mat, preds:Mat, ipass:Int, pos:Long):FMat = {zeros(0,0)}
def dobatch(gmats:Array[Mat], ipass:Int, i:Long) = {
val sdata = gmats(0)
val user = if (gmats.length > 1) gmats(1) else FactorModel.reuseuser(gmats(0), opts.dim, opts.initUval)
uupdate(sdata, user, ipass, i)
mupdate(sdata, user, ipass, i)
}
def evalbatch(mats:Array[Mat], ipass:Int, here:Long):FMat = {
val sdata = gmats(0)
val user = if (gmats.length > 1) gmats(1) else FactorModel.reuseuser(gmats(0), opts.dim, opts.initUval);
uupdate(sdata, user, ipass, here);
if (gmats.length > 2) {
evalfun(sdata, user, gmats(2), ipass, here);
} else {
evalfun(sdata, user, ipass, here);
}
}
}
object FactorModel {
trait Opts extends Model.Opts {
var uiter = 5
var weps = 1e-10f
var minuser = 1e-8f
var initUval = 1f
}
def reuseuser(a:Mat, dim:Int, ival:Float):Mat = {
val out = a match {
case aa:SMat => FMat.newOrCheckFMat(dim, a.ncols, null, a.GUID, "SMat.reuseuser".##)
case aa:FMat => FMat.newOrCheckFMat(dim, a.ncols, null, a.GUID, "FMat.reuseuser".##)
case aa:GSMat => GMat.newOrCheckGMat(dim, a.ncols, null, a.GUID, "GSMat.reuseuser".##)
case aa:GMat => GMat.newOrCheckGMat(dim, a.ncols, null, a.GUID, "GMat.reuseuser".##)
case aa:GDMat => GDMat.newOrCheckGDMat(dim, a.ncols, null, a.GUID, "GDMat.reuseuser".##)
case aa:GSDMat => GDMat.newOrCheckGDMat(dim, a.ncols, null, a.GUID, "GSDMat.reuseuser".##)
}
out.set(ival)
out
}
class Options extends Opts {}
}
| uhjish/BIDMach | src/main/scala/BIDMach/models/FactorModel.scala | Scala | bsd-3-clause | 2,974 |
package scala.scalanative
package nir
package parser
import fastparse.all._
object Inst extends Base[nir.Inst] {
import Base.IgnoreWhitespace._
val None = P("none".! map (_ => nir.Inst.None))
val Label =
P(Local.parser ~ ("(" ~ Val.Local.rep(sep = ",") ~ ")").? ~ ":" map {
case (name, params) => nir.Inst.Label(name, params getOrElse Seq())
})
val Let =
P(Local.parser ~ "=" ~ Op.parser map {
case (name, op) => nir.Inst.Let(name, op)
})
val Unreachable = P("unreachable".! map (_ => nir.Inst.Unreachable))
val Ret =
P("ret" ~ Val.parser.? map (v => nir.Inst.Ret(v.getOrElse(nir.Val.None))))
val Jump = P("jump" ~ Next.parser map (nir.Inst.Jump(_)))
val If =
P("if" ~ Val.parser ~ "then" ~ Next.parser ~ "else" ~ Next.parser map {
case (cond, thenp, elsep) => nir.Inst.If(cond, thenp, elsep)
})
val Switch =
P("switch" ~ Val.parser ~ "{" ~ Next.parser.rep ~ "default:" ~ Next.parser ~ "}" map {
case (scrut, cases, default) => nir.Inst.Switch(scrut, default, cases)
})
val Invoke =
P(
"invoke[" ~ Type.parser ~ "]" ~ Val.parser ~ "(" ~ Val.parser.rep(sep =
",") ~ ")" ~ "to" ~ Next.parser ~ "unwind" ~ Next.parser map {
case (ty, ptr, args, succ, fail) =>
nir.Inst.Invoke(ty, ptr, args, succ, fail)
})
val Throw = P("throw" ~ Val.parser map (nir.Inst.Throw(_)))
val Try =
P("try" ~ Next.parser ~ "catch" ~ Next.parser map {
case (normal, exc) => nir.Inst.Try(normal, exc)
})
override val parser: P[nir.Inst] =
None | Label | Let | Unreachable | Ret | Jump | If | Switch | Invoke | Throw | Try
}
| cedricviaccoz/scala-native | tools/src/main/scala/scala/scalanative/nir/parser/Inst.scala | Scala | bsd-3-clause | 1,646 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3
import org.neo4j.cypher.internal.frontend.v2_3.{CypherException, InputPosition, SyntaxException}
class SyntaxExceptionCreator(queryText: String, preParserOffset: Option[InputPosition]) extends ((String, InputPosition) => CypherException) {
override def apply(message: String, position: InputPosition): CypherException = {
val adjustedPosition = position.withOffset(preParserOffset)
new SyntaxException(s"$message ($adjustedPosition)", queryText, adjustedPosition.offset)
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/SyntaxExceptionCreator.scala | Scala | apache-2.0 | 1,333 |
// Copyright 2011 Kiel Hodges
package sample
case class Request(widgetId: Long)
| greenbar/replicant | scala/src/test/scala/sample/Request.scala | Scala | mit | 81 |
package com.tuplejump.cobalt.query
import org.apache.cassandra.thrift.{IndexOperator, IndexExpression}
import java.nio.ByteBuffer
/**
* Created with IntelliJ IDEA.
* User: rohit
* Date: 3/1/13
* Time: 7:01 PM
* To change this template use File | Settings | File Templates.
*/
trait Query {
protected[query] var expressions = List[IndexExpression]()
}
class EmptyQuery() extends Query {
implicit val query = this
def where(colName: ByteBuffer) = {
new FirstColumn(colName)
}
}
class InitializedQuery(q: Query) extends Query {
implicit val query = this
this.expressions = q.expressions
def and(colName: ByteBuffer) = {
new Column(colName)
}
}
class FinalQuery(q: InitializedQuery) {
def getExpressions() = {
q.expressions.reverse
}
}
object FinalQuery {
implicit def Query2BuiltQuery(q: InitializedQuery) = new FinalQuery(q)
}
object Query extends EmptyQuery {
def apply = new EmptyQuery()
}
class FirstColumn(colName: ByteBuffer)(implicit query: Query) {
def isEq(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.EQ, colValue)
println(query.expressions)
new InitializedQuery(query)
}
}
class Column(colName: ByteBuffer)(implicit query: InitializedQuery) {
def isEq(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.EQ, colValue)
query
}
def isGt(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.GT, colValue)
query
}
def isGte(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.GTE, colValue)
query
}
def isLt(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.LT, colValue)
query
}
def isLte(colValue: ByteBuffer) = {
query.expressions ::= new IndexExpression(colName, IndexOperator.LTE, colValue)
query
}
}
| tuplejump/calliope-old | src/main/scala/com/tuplejump/calliope/query/Query.scala | Scala | apache-2.0 | 1,946 |
/*
* Copyright 2014 The Guardian
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lib
import org.kohsuke.github.{GHOrganization, GHUser}
import scala.util.{Success, Try}
import Implicits._
object AccountRequirements {
val All = Seq(FullNameRequirement, TwoFactorAuthRequirement, SponsorRequirement)
val RequirementsByLabel = All.map(r => r.issueLabel -> r).toMap
}
trait AccountRequirement {
trait UserEvaluator {
val requirement = AccountRequirement.this
def isSatisfiedBy(user: GHUser): Boolean
def appliesTo(user: GHUser): Boolean
}
val issueLabel: String
def fixSummary(implicit org: GHOrganization): String
def userEvaluatorFor(orgSnapshot: OrgSnapshot): Try[UserEvaluator]
}
object FullNameRequirement extends AccountRequirement {
override val issueLabel = "FullName"
override def fixSummary(implicit org: GHOrganization) =
"Enter a full name in your [GitHub profile](https://github.com/settings/profile)."
def userEvaluatorFor(orgSnapshot: OrgSnapshot) = Success(new UserEvaluator {
def appliesTo(user: GHUser) = true
def isSatisfiedBy(user: GHUser) = Option(user.getName()).map(_.length > 1).getOrElse(false)
})
}
// requires a 'users.txt' file in the people repo
object SponsorRequirement extends AccountRequirement {
override val issueLabel = "Sponsor"
override def fixSummary(implicit org: GHOrganization) =
"Get a pull request opened to add your username to our " +
s"[users.txt](https://github.com/${org.getLogin}/people/blob/master/users.txt) file " +
s"_- ideally, a Tech Lead or Dev Manager at ${org.displayName} should open this request for you_."
def userEvaluatorFor(orgSnapshot: OrgSnapshot) = Success(new UserEvaluator {
def isSatisfiedBy(user: GHUser) = orgSnapshot.sponsoredUserLoginsLowerCase.contains(user.getLogin.toLowerCase)
def appliesTo(user: GHUser) = true
})
}
// requires Owner permissions
object TwoFactorAuthRequirement extends AccountRequirement {
override val issueLabel = "TwoFactorAuth"
override def fixSummary(implicit org: GHOrganization) =
"Enable [two-factor authentication](https://help.github.com/articles/about-two-factor-authentication) " +
"in your [GitHub Account Security settings](https://github.com/settings/security)."
def userEvaluatorFor(orgSnapshot: OrgSnapshot) = for (tfaDisabledUsers <- orgSnapshot.twoFactorAuthDisabledUserLogins) yield
new UserEvaluator {
def isSatisfiedBy(user: GHUser) = !tfaDisabledUsers.contains(user)
def appliesTo(user: GHUser) = !orgSnapshot.botUsers.contains(user)
}
}
| guardian/gu-who | app/lib/AccountRequirement.scala | Scala | apache-2.0 | 3,112 |
/*
* Copyright (C) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.time.format.DateTimeFormatter
import java.time.{ZoneId, ZonedDateTime}
import controllers.{ProfileJs, RegisterJs}
import models.db.DatabaseSessionHolder
import models.users._
import play.api.db.Database
import play.api.db.evolutions.{ClassLoaderEvolutionsReader, Evolutions}
import play.api.libs.json._
import services.PortalConfig
import uk.gov.hmrc.emailaddress.EmailAddress
import utils.PasswordHashing
/**
* Test Spec for [[User]] and [[UserDAO]]
*/
class UserDAOSpec extends WithDefaultTestFullAppAndDatabase {
val sessionHolder: DatabaseSessionHolder = app.injector.instanceOf[DatabaseSessionHolder]
val database: Database = sessionHolder.db
before {
Evolutions.applyEvolutions(database, ClassLoaderEvolutionsReader.forPrefix("testh2db/"))
}
"UserDAO" can {
"handle Users with DB" in {
// Evolutions.applyEvolutions(database, ClassLoaderEvolutionsReader.forPrefix("testh2db/"))
// val sessionHolder = new SessionHolder(database)
sessionHolder.viaConnection { implicit connection =>
val portalConfig = new PortalConfig(app.configuration)
val passwordHashing = new PasswordHashing(portalConfig)
val regLinkId = java.util.UUID.randomUUID().toString
val testPass = "testpass123"
val testPassUpd = "testpass12345"
val testTime = ZonedDateTime.now.withZoneSameInstant(ZoneId.systemDefault())
val cryptPass = passwordHashing.createHash(testPass)
val cryptPassUpd = passwordHashing.createHash(testPassUpd)
val testUser1 = User(EmailAddress("test@blubb.com"),
"local:test@blubb.com",
"Hans",
"Wurst",
cryptPass,
s"${StatusToken.REGISTERED}:$regLinkId",
testTime)
val testUser2 = User(EmailAddress("test2@blubb.com"),
"local:test2@blubb.com",
"Hans",
"Wurst",
cryptPass,
s"${StatusToken.ACTIVE}:REGCONFIRMED",
testTime)
// create
UserDAO.createUser(testUser1) mustEqual Some(testUser1)
// createUser
UserDAO.createUser(testUser2) mustEqual Some(testUser2)
UserDAO.getAllUsers.size mustEqual 2
// findByAccountSubject
UserDAO.findByAccountSubject("local:test@blubb.com") mustEqual Some(testUser1)
// findUserByEmailAsString
passwordHashing.validatePassword(testPass, UserDAO.findUserByEmailAsString(testUser2.email).get.password) mustBe true
// findUserByEmailAsString
UserDAO.findUserByEmailAsString("test2@blubb.com") mustEqual Some(testUser2)
// findUsersByToken
UserDAO.findUsersByToken(StatusToken.ACTIVE, "%").size mustEqual 1
UserDAO.findUsersByToken(StatusToken.REGISTERED, "%").size mustEqual 1
// findRegisteredUsersByRegLink
UserDAO.findRegisteredUsersWithRegLink(regLinkId).size mustEqual 1
// findRegisteredOnlyUsers
val regUsers = UserDAO.findRegisteredOnlyUsers
regUsers.size mustBe 1
regUsers.headOption.get.accountSubject mustEqual "local:test@blubb.com"
// findActiveUsers
val activeUsers = UserDAO.findActiveUsers
activeUsers.size mustBe 1
activeUsers.headOption.get.accountSubject mustEqual "local:test2@blubb.com"
activeUsers.headOption.get.email mustEqual EmailAddress("test2@blubb.com")
// updateNoPass
val emailLinkId = java.util.UUID.randomUUID().toString()
val testUser2_1 = User(EmailAddress("test2@blubb.com"),
"local:test2@blubb.com",
"Hans",
"Wurst-Ebermann",
cryptPassUpd,
s"${StatusToken.EMAILVALIDATION}:$emailLinkId",
testTime)
UserDAO.updateNoPass(testUser2_1) mustEqual Some(testUser2_1)
UserDAO.findByAccountSubject("local:test2@blubb.com").get.lastname mustEqual "Wurst-Ebermann"
passwordHashing.validatePassword(testPass, UserDAO.findUserByEmailAddress(EmailAddress("test2@blubb.com")).get.password) mustBe true
passwordHashing.validatePassword(testPassUpd, UserDAO.findUserByEmailAddress(EmailAddress("test2@blubb.com")).get.password) mustBe false
UserDAO.findEmailValidationRequiredUsersWithRegLink(emailLinkId).size mustEqual 1
val resetLink = java.util.UUID.randomUUID().toString
val testUser2_2 = testUser2_1.copy(laststatustoken = s"${StatusToken.PASSWORDRESET}:$resetLink")
UserDAO.updateNoPass(testUser2_2) mustEqual Some(testUser2_2)
UserDAO.findUsersByPassResetLink(resetLink).size mustEqual 1
// updatePassword
UserDAO.updatePassword(testUser2_1) mustEqual Some(testUser2_1)
UserDAO.findUserByEmailAddress(EmailAddress("test2@blubb.com")).get.lastname mustEqual "Wurst-Ebermann"
UserDAO.findUserByEmailAsString("test2@blubb.com").get.lastname mustEqual "Wurst-Ebermann"
passwordHashing.validatePassword(testPassUpd, UserDAO.findUserByEmailAddress(EmailAddress("test2@blubb.com")).get.password) mustBe true
passwordHashing.validatePassword(testPass, UserDAO.findUserByEmailAddress(EmailAddress("test2@blubb.com")).get.password) mustBe false
// deleteUser
UserDAO.deleteUser("test2@blubb.com") mustEqual true
UserDAO.getAllUsers.size mustBe 1
UserDAO.deleteUser(testUser1) mustEqual true
UserDAO.getAllUsers.size mustBe 0
}
}
}
"User" can {
"encode Users required Json" in {
val portalConfig = new PortalConfig(app.configuration)
val passwordHashing = new PasswordHashing(portalConfig)
val testPass = "testpass123"
val testPassUpd = "testpass123upd"
val cryptPass = passwordHashing.createHash(testPass)
val testTime = ZonedDateTime.now.withZoneSameInstant(ZoneId.systemDefault())
val testUser2 = User(EmailAddress("test2@blubb.com"),
"local:test2@blubb.com",
"Hans",
"Wurst",
cryptPass,
s"${StatusToken.ACTIVE}:REGCONFIRMED",
testTime)
// User Reads and Writes
Json.toJson(testUser2)(controllers.userWrites) mustEqual Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"password":"$cryptPass",
|"laststatustoken":"${StatusToken.ACTIVE}:REGCONFIRMED",
|"laststatuschange":"${testTime.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)}"
|}""".stripMargin)
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"password":"$cryptPass",
|"laststatustoken":"${StatusToken.ACTIVE}:REGCONFIRMED",
|"laststatuschange":"${testTime.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)}"
|}""".stripMargin).validate[User](controllers.userReads).get mustEqual testUser2
// ProfileJs Reads and Writes
testUser2.asProfileJs mustEqual ProfileJs(testUser2.email, testUser2.accountSubject, testUser2.firstname, testUser2.lastname, Some(testUser2.laststatustoken), Some(testUser2.laststatuschange))
Json.toJson(testUser2.asProfileJs)(controllers.profileJsWrites) mustEqual Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"laststatustoken":"ACTIVE:REGCONFIRMED",
|"laststatuschange":"${testUser2.laststatuschange.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)}"
|}""".stripMargin)
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"laststatustoken":"ACTIVE:REGCONFIRMED",
|"laststatuschange":"${testUser2.laststatuschange.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)}"
|}""".stripMargin).validate[ProfileJs].get mustEqual testUser2.asProfileJs
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst"
|}""".stripMargin).validate[ProfileJs].get.firstname mustEqual "Hans"
// RegisterJs Reads
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"password": "$testPass"
|}""".stripMargin).validate[RegisterJs].get mustEqual
RegisterJs(testUser2.email, testUser2.accountSubject, testUser2.firstname, testUser2.lastname, testPass)
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"accountSubject":"local:test2@blubb.com",
|"firstname":"Hans",
|"lastname":"Wurst",
|"password": "short"
|}""".stripMargin).validate[RegisterJs].isError mustBe true
// LoginCredentials Reads
import controllers.LoginCredentialsFromJsonReads
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"password": "$testPass"
|}""".stripMargin).validate[LoginCredentials].get mustEqual
LoginCredentials(testUser2.email, testPass)
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"password": "short"
|}""".stripMargin).validate[LoginCredentials].isError mustBe true
// PasswordUpdateCredentials Reads {"email":"alex","oldpassword":"testpass123", "newpassword":"testpass123"}
import controllers.passwordUpdateCredentialsJsReads
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"oldpassword": "$testPass",
|"newpassword": "$testPassUpd"
|}""".stripMargin).validate[PasswordUpdateCredentials].get mustEqual
PasswordUpdateCredentials(testUser2.email, testPass, testPassUpd)
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"oldpassword": "$testPass",
|"newpassword": "$testPass"
|}""".stripMargin).validate[PasswordUpdateCredentials].isError mustBe true
Json.parse(
s"""{
|"email":"test2@blubb.com",
|"oldpassword": "$testPass",
|"newpassword": "short"
|}""".stripMargin).validate[PasswordUpdateCredentials].isError mustBe true
// GAuthCredentials Reads authcode accesstype
import controllers.GAuthCredentialsFromJsonReads
Json.parse(
s"""{
|"authcode": "$testPass",
|"accesstype": "LOGIN"
|}""".stripMargin).validate[GAuthCredentials].get mustEqual
GAuthCredentials(testPass, "LOGIN")
Json.parse(
s"""{
|"authcode": "$testPass",
|"accesstype": "REGISTER"
|}""".stripMargin).validate[GAuthCredentials].get mustEqual
GAuthCredentials(testPass, "REGISTER")
Json.parse(
s"""{
|"authcode": "$testPass",
|"accesstype": "NOTVALID"
|}""".stripMargin).validate[GAuthCredentials].isError mustBe true
}
}
}
| ZGIS/smart-portal-backend | test/UserDAOSpec.scala | Scala | apache-2.0 | 12,183 |
package com.gx.valueobject
import org.scalatest.{FlatSpec, Matchers}
/**
* Copyright 2017 josephguan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
class ValueObjectSpec extends FlatSpec with Matchers {
it should "be equal for two value objects with same value" in {
Point(1, 2) should be(Point(1, 2))
}
}
| josephguan/scala-design-patterns | creational/value-object/src/test/scala/com/gx/valueobject/ValueObjectSpec.scala | Scala | apache-2.0 | 847 |
//
// Copyright (c) 2014 Ole Krause-Sparmann
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.postwall.db
import com.typesafe.config.ConfigFactory
import org.slf4j.{ Logger, LoggerFactory }
object Configs {
val c = ConfigFactory.load()
val log: Logger = LoggerFactory.getLogger(this.getClass)
} | pixelogik/postwall | postwall/common/src/main/scala/com/postwall/Configs.scala | Scala | mit | 1,340 |
package sangria.execution.deferred
import sangria.util.Cache
trait FetcherCache {
def cacheKey(id: Any): Any
def cacheKeyRel(rel: Any, relId: Any): Any
def cacheable(id: Any): Boolean
def cacheableRel(rel: Any, relId: Any): Boolean
def get(id: Any): Option[Any]
def getRel(rel: Any, relId: Any): Option[Seq[Any]]
def update(id: Any, value: Any): Unit
def updateRel[T](rel: Any, relId: Any, idFn: T => Any, values: Seq[T]): Unit
def clear(): Unit
def clearId(id: Any): Unit
def clearRel(rel: Any): Unit
def clearRelId(rel: Any, relId: Any): Unit
}
object FetcherCache {
def simple = new SimpleFetcherCache
}
class SimpleFetcherCache extends FetcherCache {
private val cache = Cache.empty[Any, Any]
private val relCache = Cache.empty[Any, Seq[Any]]
def cacheKey(id: Any) = id
def cacheKeyRel(rel: Any, relId: Any) = rel -> relId
def cacheable(id: Any) = true
def cacheableRel(rel: Any, relId: Any) = true
def get(id: Any) = cache.get(cacheKey(id))
def getRel(rel: Any, relId: Any) = relCache.get(cacheKeyRel(rel, relId))
def update(id: Any, value: Any) =
if (cacheable(id))
cache.update(cacheKey(id), value)
def updateRel[T](rel: Any, relId: Any, idFn: T => Any, values: Seq[T]) =
if (cacheableRel(rel, relId)) {
values.foreach { v =>
update(idFn(v), v)
}
relCache.update(cacheKeyRel(rel, relId), values)
}
def clear() = {
cache.clear()
relCache.clear()
}
override def clearId(id: Any) =
cache.remove(cacheKey(id))
override def clearRel(rel: Any) =
relCache.removeKeys {
case key @ (r, _) if r == rel => true
case _ => false
}
override def clearRelId(rel: Any, relId: Any) =
relCache.remove(cacheKeyRel(rel, relId))
}
| OlegIlyenko/sangria | modules/core/src/main/scala/sangria/execution/deferred/FetcherCache.scala | Scala | apache-2.0 | 1,770 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.annotation.{Evolving, Stable}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.parseColumnPath
////////////////////////////////////////////////////////////////////////////////////////////////////
// This file defines all the filters that we can push down to the data sources.
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A filter predicate for data sources. Mapping between Spark SQL types and filter value
* types follow the convention for return type of [[org.apache.spark.sql.Row#get(int)]].
*
* @since 1.3.0
*/
@Stable
sealed abstract class Filter {
/**
* List of columns that are referenced by this filter.
*
* Note that, each element in `references` represents a column; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`, it is quoted to avoid confusion.
*
* @since 2.1.0
*/
def references: Array[String]
protected def findReferences(value: Any): Array[String] = value match {
case f: Filter => f.references
case _ => Array.empty
}
/**
* List of columns that are referenced by this filter.
*
* @return each element is a column name as an array of string multi-identifier
* @since 3.0.0
*/
def v2references: Array[Array[String]] = {
this.references.map(parseColumnPath(_).toArray)
}
/**
* If any of the references of this filter contains nested column
*/
private[sql] def containsNestedColumn: Boolean = {
this.v2references.exists(_.length > 1)
}
}
/**
* A filter that evaluates to `true` iff the column evaluates to a value
* equal to `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class EqualTo(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* Performs equality comparison, similar to [[EqualTo]]. However, this differs from [[EqualTo]]
* in that it returns `true` (rather than NULL) if both inputs are NULL, and `false`
* (rather than NULL) if one of the input is NULL and the other is not NULL.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.5.0
*/
@Stable
case class EqualNullSafe(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to a value
* greater than `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class GreaterThan(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to a value
* greater than or equal to `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class GreaterThanOrEqual(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to a value
* less than `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class LessThan(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to a value
* less than or equal to `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class LessThanOrEqual(attribute: String, value: Any) extends Filter {
override def references: Array[String] = Array(attribute) ++ findReferences(value)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to one of the values in the array.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class In(attribute: String, values: Array[Any]) extends Filter {
override def hashCode(): Int = {
var h = attribute.hashCode
values.foreach { v =>
h *= 41
h += (if (v != null) v.hashCode() else 0)
}
h
}
override def equals(o: Any): Boolean = o match {
case In(a, vs) =>
a == attribute && vs.length == values.length && vs.zip(values).forall(x => x._1 == x._2)
case _ => false
}
override def toString: String = {
s"In($attribute, [${values.mkString(",")}])"
}
override def references: Array[String] = Array(attribute) ++ values.flatMap(findReferences)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to null.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class IsNull(attribute: String) extends Filter {
override def references: Array[String] = Array(attribute)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to a non-null value.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.0
*/
@Stable
case class IsNotNull(attribute: String) extends Filter {
override def references: Array[String] = Array(attribute)
}
/**
* A filter that evaluates to `true` iff both `left` or `right` evaluate to `true`.
*
* @since 1.3.0
*/
@Stable
case class And(left: Filter, right: Filter) extends Filter {
override def references: Array[String] = left.references ++ right.references
}
/**
* A filter that evaluates to `true` iff at least one of `left` or `right` evaluates to `true`.
*
* @since 1.3.0
*/
@Stable
case class Or(left: Filter, right: Filter) extends Filter {
override def references: Array[String] = left.references ++ right.references
}
/**
* A filter that evaluates to `true` iff `child` is evaluated to `false`.
*
* @since 1.3.0
*/
@Stable
case class Not(child: Filter) extends Filter {
override def references: Array[String] = child.references
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to
* a string that starts with `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.1
*/
@Stable
case class StringStartsWith(attribute: String, value: String) extends Filter {
override def references: Array[String] = Array(attribute)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to
* a string that ends with `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.1
*/
@Stable
case class StringEndsWith(attribute: String, value: String) extends Filter {
override def references: Array[String] = Array(attribute)
}
/**
* A filter that evaluates to `true` iff the attribute evaluates to
* a string that contains the string `value`.
*
* @param attribute of the column to be evaluated; `dots` are used as separators
* for nested columns. If any part of the names contains `dots`,
* it is quoted to avoid confusion.
* @since 1.3.1
*/
@Stable
case class StringContains(attribute: String, value: String) extends Filter {
override def references: Array[String] = Array(attribute)
}
/**
* A filter that always evaluates to `true`.
*
* @since 3.0.0
*/
@Evolving
case class AlwaysTrue() extends Filter {
override def references: Array[String] = Array.empty
}
@Evolving
object AlwaysTrue extends AlwaysTrue {
}
/**
* A filter that always evaluates to `false`.
*
* @since 3.0.0
*/
@Evolving
case class AlwaysFalse() extends Filter {
override def references: Array[String] = Array.empty
}
@Evolving
object AlwaysFalse extends AlwaysFalse {
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/sources/filters.scala | Scala | apache-2.0 | 10,259 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.util
import scala.collection.JavaConverters._
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.streaming.continuous.RateStreamContinuousStream
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader.{Scan, ScanBuilder}
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousStream, MicroBatchStream}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* A source that generates increment long values with timestamps. Each generated row has two
* columns: a timestamp column for the generated time and an auto increment long column starting
* with 0L.
*
* This source supports the following options:
* - `rowsPerSecond` (e.g. 100, default: 1): How many rows should be generated per second.
* - `rampUpTime` (e.g. 5s, default: 0s): How long to ramp up before the generating speed
* becomes `rowsPerSecond`. Using finer granularities than seconds will be truncated to integer
* seconds.
* - `numPartitions` (e.g. 10, default: Spark's default parallelism): The partition number for the
* generated rows. The source will try its best to reach `rowsPerSecond`, but the query may
* be resource constrained, and `numPartitions` can be tweaked to help reach the desired speed.
*/
class RateStreamProvider extends TableProvider with DataSourceRegister {
import RateStreamProvider._
override def getTable(options: CaseInsensitiveStringMap): Table = {
val rowsPerSecond = options.getLong(ROWS_PER_SECOND, 1)
if (rowsPerSecond <= 0) {
throw new IllegalArgumentException(
s"Invalid value '$rowsPerSecond'. The option 'rowsPerSecond' must be positive")
}
val rampUpTimeSeconds = Option(options.get(RAMP_UP_TIME))
.map(JavaUtils.timeStringAsSec)
.getOrElse(0L)
if (rampUpTimeSeconds < 0) {
throw new IllegalArgumentException(
s"Invalid value '$rampUpTimeSeconds'. The option 'rampUpTime' must not be negative")
}
val numPartitions = options.getInt(
NUM_PARTITIONS, SparkSession.active.sparkContext.defaultParallelism)
if (numPartitions <= 0) {
throw new IllegalArgumentException(
s"Invalid value '$numPartitions'. The option 'numPartitions' must be positive")
}
new RateStreamTable(rowsPerSecond, rampUpTimeSeconds, numPartitions)
}
override def shortName(): String = "rate"
}
class RateStreamTable(
rowsPerSecond: Long,
rampUpTimeSeconds: Long,
numPartitions: Int)
extends Table with SupportsRead {
override def name(): String = {
s"RateStream(rowsPerSecond=$rowsPerSecond, rampUpTimeSeconds=$rampUpTimeSeconds, " +
s"numPartitions=$numPartitions)"
}
override def schema(): StructType = RateStreamProvider.SCHEMA
override def capabilities(): util.Set[TableCapability] = {
Set(TableCapability.MICRO_BATCH_READ, TableCapability.CONTINUOUS_READ).asJava
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = () => new Scan {
override def readSchema(): StructType = RateStreamProvider.SCHEMA
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream =
new RateStreamMicroBatchStream(
rowsPerSecond, rampUpTimeSeconds, numPartitions, options, checkpointLocation)
override def toContinuousStream(checkpointLocation: String): ContinuousStream =
new RateStreamContinuousStream(rowsPerSecond, numPartitions)
}
}
object RateStreamProvider {
val SCHEMA =
StructType(StructField("timestamp", TimestampType) :: StructField("value", LongType) :: Nil)
val VERSION = 1
val NUM_PARTITIONS = "numPartitions"
val ROWS_PER_SECOND = "rowsPerSecond"
val RAMP_UP_TIME = "rampUpTime"
/** Calculate the end value we will emit at the time `seconds`. */
def valueAtSecond(seconds: Long, rowsPerSecond: Long, rampUpTimeSeconds: Long): Long = {
// E.g., rampUpTimeSeconds = 4, rowsPerSecond = 10
// Then speedDeltaPerSecond = 2
//
// seconds = 0 1 2 3 4 5 6
// speed = 0 2 4 6 8 10 10 (speedDeltaPerSecond * seconds)
// end value = 0 2 6 12 20 30 40 (0 + speedDeltaPerSecond * seconds) * (seconds + 1) / 2
val speedDeltaPerSecond = rowsPerSecond / (rampUpTimeSeconds + 1)
if (seconds <= rampUpTimeSeconds) {
// Calculate "(0 + speedDeltaPerSecond * seconds) * (seconds + 1) / 2" in a special way to
// avoid overflow
if (seconds % 2 == 1) {
(seconds + 1) / 2 * speedDeltaPerSecond * seconds
} else {
seconds / 2 * speedDeltaPerSecond * (seconds + 1)
}
} else {
// rampUpPart is just a special case of the above formula: rampUpTimeSeconds == seconds
val rampUpPart = valueAtSecond(rampUpTimeSeconds, rowsPerSecond, rampUpTimeSeconds)
rampUpPart + (seconds - rampUpTimeSeconds) * rowsPerSecond
}
}
}
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RateStreamProvider.scala | Scala | apache-2.0 | 5,860 |
package latis.time
import latis.util.LatisProperties
object TimeScaleType extends Enumeration {
type TimeScaleType = Value
val NATIVE = Value("NATIVE")
val UTC = Value("UTC")
val TAI = Value("TAI")
//Note, using def instead of lazy val to support tests.
def default: TimeScaleType = LatisProperties.get("time.scale.type") match {
case Some(s) => TimeScaleType.withName(s) //TODO: handle error
case None => NATIVE
}
} | dlindhol/LaTiS | src/main/scala/latis/time/TimeScaleType.scala | Scala | epl-1.0 | 451 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xml
import cats.syntax.option._
import org.orbeon.dom.QName
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.util.StaticXPath
import org.orbeon.oxf.util.StaticXPath.{GlobalConfiguration, SaxonConfiguration, ValueRepresentationType}
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.saxon.expr.parser.ExpressionTool
import org.orbeon.saxon.expr.sort.{CodepointCollator, GenericAtomicComparer}
import org.orbeon.saxon.expr.{EarlyEvaluationContext, Expression}
import org.orbeon.saxon.functions.DeepEqual
import org.orbeon.saxon.ma.arrays.ImmutableArrayItem
import org.orbeon.saxon.ma.map.HashTrieMap
import org.orbeon.saxon.ma.parray.ImmList
import org.orbeon.saxon.model.{AtomicType, BuiltInType, Converter, Type}
import org.orbeon.saxon.om
import org.orbeon.saxon.om._
import org.orbeon.saxon.pattern.{NameTest, NodeKindTest}
import org.orbeon.saxon.tree.iter.{EmptyIterator, ListIterator, SingletonIterator}
import org.orbeon.saxon.utils.Configuration
import org.orbeon.saxon.value._
import org.orbeon.scaxon.Implicits
import org.w3c.dom.Node._
import java.io.PrintStream
import java.net.URI
import scala.jdk.CollectionConverters._
import scala.util.Try
import scala.util.control.Breaks.{break, breakable}
object SaxonUtils {
// Version of `StringValue` which supports `equals()` (universal equality).
// Saxon throws on `equals()` to make a point that a collation should be used for `StringValue` comparison.
// Here, we don't really care about equality, but we want to implement `equals()` as e.g. Jetty calls `equals()` on
// objects stored into the session. See:
// http://forge.ow2.org/tracker/index.php?func=detail&aid=315528&group_id=168&atid=350207
class StringValueWithEquals(value: CharSequence) extends StringValue(value) {
override def equals(other: Any): Boolean = {
// Compare the CharSequence
other.isInstanceOf[StringValue] && getStringValueCS == other.asInstanceOf[StringValue].getStringValueCS
}
override def hashCode(): Int = value.hashCode()
}
def fixStringValue[V <: Item](item: V): V =
item match {
case v: StringValue => new StringValueWithEquals(v.getStringValueCS).asInstanceOf[V] // we know it's ok...
case v => v
}
// Effective boolean value of the iterator
def effectiveBooleanValue(iterator: SequenceIterator): Boolean =
ExpressionTool.effectiveBooleanValue(iterator)
def iterateExpressionTree(e: Expression): Iterator[Expression] =
Iterator(e) ++
(e.operands.iterator.asScala flatMap (o => iterateExpressionTree(o.getChildExpression)))
def iterateExternalVariableReferences(expr: Expression): Iterator[String] = {
SaxonUtils.iterateExpressionTree(expr) collect {
case vr: OrbeonVariableReference =>
vr.name.getLocalPart
}
}
// Parse the given qualified name and return the separated prefix and local name
def parseQName(lexicalQName: String): (String, String) = {
val checker = NameChecker
val parts = checker.getQNameParts(lexicalQName)
(parts(0), parts(1))
}
// Make an NCName out of a non-blank string
// Any characters that do not belong in an NCName are converted to `_`.
// If `keepFirstIfPossible == true`, prepend `_` if first character is allowed within NCName and keep first character.
//@XPathFunction
def makeNCName(name: String, keepFirstIfPossible: Boolean): String = {
require(name.nonAllBlank, "name must not be blank or empty")
val name10Checker = NameChecker
if (name10Checker.isValidNCName(name)) {
name
} else {
val sb = new StringBuilder
val start = name.charAt(0)
if (name10Checker.isNCNameStartChar(start))
sb.append(start)
else if (keepFirstIfPossible && name10Checker.isNCNameChar(start)) {
sb.append('_')
sb.append(start)
} else
sb.append('_')
for (i <- 1 until name.length) {
val ch = name.charAt(i)
sb.append(if (name10Checker.isNCNameChar(ch)) ch else '_')
}
sb.toString
}
}
// 2020-12-05:
//
// - Called only from `XFormsVariableControl`
// - With Saxon 10, this will be one of :
// - `SequenceExtent` reduced to `AtomicValue`, `NodeInfo`, or `Function` (unsupported yet below)
// - `SequenceExtent` with more than one item
// - `EmptySequence`
// - Also, if a sequence, it's already reduced.
//
def compareValueRepresentations(valueRepr1: GroundedValue, valueRepr2: GroundedValue): Boolean =
(
// Ideally we wouldn't support `null` here but `XFormsVariableControl` can pass `null`
if (valueRepr1 ne null) valueRepr1 else EmptySequence,
if (valueRepr2 ne null) valueRepr2 else EmptySequence
) match {
case (EmptySequence, EmptySequence) => true // NOTE: `EmptySequence` is not an item.
case (EmptySequence, _) => false
case (_, EmptySequence) => false
case (v1: Item, v2: Item) => compareItems(v1, v2)
case ( _: Item, _) => false
case (_, _ : Item) => false
case (v1: SequenceExtent, v2: SequenceExtent) => compareSequenceExtents(v1, v2)
case (_ : SequenceExtent, _) => false
case (_, _: SequenceExtent) => false
case _ => throw new IllegalStateException
}
def compareItemSeqs(nodeset1: Iterable[Item], nodeset2: Iterable[Item]): Boolean =
nodeset1.size == nodeset2.size &&
(nodeset1.iterator.zip(nodeset2.iterator) forall (compareItems _).tupled)
private def compareSequenceExtents(v1: SequenceExtent, v2: SequenceExtent): Boolean =
v1.getLength == v2.getLength &&
(v1.iterator.asScala.zip(v2.iterator.asScala) forall (compareItems _).tupled)
def compareItems(item1: Item, item2: Item): Boolean =
(
if (item1 ne null) item1 else EmptySequence, // TODO: I don't think any caller passes `null`. If so, get rid of this once we test.
if (item2 ne null) item2 else EmptySequence
) match {
case (EmptySequence, EmptySequence) => true
case (EmptySequence, _) => false
case (_, EmptySequence) => false
// `StringValue.equals()` throws (Saxon equality requires a collation)
case (v1: StringValue, v2: StringValue) => v1.codepointEquals(v2)
case ( _: StringValue, _ ) => false
case ( _, _: StringValue) => false
case (v1: AtomicValue, v2: AtomicValue) => v1 == v2
case ( _: AtomicValue, _) => false
case (_, _ : AtomicValue) => false
case (v1: NodeInfo, v2: NodeInfo) => v1 == v2
case (_ : NodeInfo, _) => false
case (_, _ : NodeInfo) => false
case ( _: Function, _ : Function) => throw new UnsupportedOperationException
case ( _: Function, _) => throw new UnsupportedOperationException
case (_, _ : Function) => throw new UnsupportedOperationException
case _ => throw new IllegalStateException
}
def buildNodePath(node: NodeInfo): List[String] = {
def findNodePosition(node: NodeInfo): Int = {
val nodeTestForSameNode =
node.getNodeKind match {
case Type.ELEMENT | Type.ATTRIBUTE | Type.PROCESSING_INSTRUCTION | Type.NAMESPACE =>
new NameTest(node.getNodeKind, node.getURI, node.getLocalPart, node.getConfiguration.getNamePool)
case _ =>
NodeKindTest.makeNodeKindTest(node.getNodeKind)
}
val precedingAxis =
node.iterateAxis(AxisInfo.PRECEDING_SIBLING, nodeTestForSameNode)
var i = 1
while (precedingAxis.next() ne null)
i += 1
i
}
def buildOne(node: NodeInfo): List[String] = {
def buildNameTest(node: NodeInfo) =
if (node.getURI == "")
node.getLocalPart
else
s"*:${node.getLocalPart}[namespace-uri() = '${node.getURI}']"
if (node ne null) {
val parent = node.getParent
node.getNodeKind match {
case Type.DOCUMENT =>
Nil
case Type.ELEMENT =>
if (parent eq null) {
List(buildNameTest(node))
} else {
val pre = buildOne(parent)
if (pre == Nil) {
buildNameTest(node) :: pre
} else {
(buildNameTest(node) + '[' + findNodePosition(node) + ']') :: pre
}
}
case Type.ATTRIBUTE =>
("@" + buildNameTest(node)) :: buildOne(parent)
case Type.TEXT =>
("text()[" + findNodePosition(node) + ']') :: buildOne(parent)
case Type.COMMENT =>
"comment()[" + findNodePosition(node) + ']' :: buildOne(parent)
case Type.PROCESSING_INSTRUCTION =>
("processing-instruction()[" + findNodePosition(node) + ']') :: buildOne(parent)
case Type.NAMESPACE =>
var test = node.getLocalPart
if (test.isEmpty) {
test = "*[not(local-name()]"
}
("namespace::" + test) :: buildOne(parent)
case _ =>
throw new IllegalArgumentException
}
} else {
throw new IllegalArgumentException
}
}
buildOne(node).reverse
}
def convertJavaObjectToSaxonObject(o: Any): GroundedValue =
o match {
case v: GroundedValue => v
case v: String => new StringValue(v)
case v: java.lang.Boolean => BooleanValue.get(v)
case v: java.lang.Integer => new Int64Value(v.toLong)
case v: java.lang.Float => new FloatValue(v)
case v: java.lang.Double => new DoubleValue(v)
case v: URI => new AnyURIValue(v.toString)
case _ => throw new OXFException(s"Invalid variable type: ${o.getClass}")
}
// Return `true` iif `potentialAncestor` is an ancestor of `potentialDescendant`
def isFirstNodeAncestorOfSecondNode(
potentialAncestor : NodeInfo,
potentialDescendant : NodeInfo,
includeSelf : Boolean
): Boolean = {
var parent = if (includeSelf) potentialDescendant else potentialDescendant.getParent
while (parent ne null) {
if (parent.isSameNodeInfo(potentialAncestor))
return true
parent = parent.getParent
}
false
}
def deepCompare(
config : Configuration,
it1 : Iterator[om.Item],
it2 : Iterator[om.Item],
excludeWhitespaceTextNodes : Boolean
): Boolean = {
// Do our own filtering of top-level items as Saxon's `DeepEqual` doesn't
def filterWhitespaceNodes(item: om.Item): Boolean = item match {
case n: om.NodeInfo => ! Whitespace.isWhite(n.getStringValueCS)
case _ => true
}
DeepEqual.deepEqual(
Implicits.asSequenceIterator(if (excludeWhitespaceTextNodes) it1 filter filterWhitespaceNodes else it1),
Implicits.asSequenceIterator(if (excludeWhitespaceTextNodes) it2 filter filterWhitespaceNodes else it2),
new GenericAtomicComparer(CodepointCollator.getInstance, config.getConversionContext),
new EarlyEvaluationContext(config),
DeepEqual.INCLUDE_PREFIXES |
DeepEqual.INCLUDE_COMMENTS |
DeepEqual.COMPARE_STRING_VALUES |
DeepEqual.INCLUDE_PROCESSING_INSTRUCTIONS |
(if (excludeWhitespaceTextNodes) DeepEqual.EXCLUDE_WHITESPACE_TEXT_NODES else 0)
)
}
// These are here to abstract some differences between Saxon 9 and 10
def getStructuredQNameLocalPart(qName: om.StructuredQName): String = qName.getLocalPart
def getStructuredQNameURI (qName: om.StructuredQName): String = qName.getURI
def itemIterator(i: om.Item): SequenceIterator = SingletonIterator.makeIterator(i)
def listIterator(s: Seq[om.Item]): SequenceIterator = new ListIterator(s.asJava)
def emptyIterator: SequenceIterator = EmptyIterator.getInstance
def valueAsIterator(v: ValueRepresentationType): SequenceIterator = if (v eq null) emptyIterator else v.iterate()
def selectID(node: NodeInfo, id: String): Option[NodeInfo] =
Option(node.getTreeInfo.selectID(id, getParent = false))
def newMapItem(map: Map[AtomicValue, ValueRepresentationType]): Item = {
val m = new HashTrieMap
map foreach { case (k, v) => m.initialPut(k, v)}
m
}
def newArrayItem(v: Seq[GroundedValue]): Item =
new ImmutableArrayItem(ImmList.fromList(v.asJava))
def hasXPathNumberer(lang: String): Boolean =
GlobalConfiguration.makeNumberer(lang, null).getClass.getName.endsWith("Numberer_" + lang)
def isValidNCName(name: String): Boolean =
NameChecker.isValidNCName(name)
def isValidNmtoken(name: String): Boolean =
NameChecker.isValidNmtoken(name)
val ChildAxisInfo: Int = AxisInfo.CHILD
val AttributeAxisInfo: Int = AxisInfo.ATTRIBUTE
val NamespaceType: Short = Type.NAMESPACE
def getInternalPathForDisplayPath(namespaces: Map[String, String], path: String): String =
throw new NotImplementedError("getInternalPathForDisplayPath")
def attCompare(boundNodeOpt: Option[om.NodeInfo], att: om.NodeInfo): Boolean =
boundNodeOpt exists (_.getAttributeValue(att.getURI, att.getLocalPart) == att.getStringValue)
def xsiType(elem: om.NodeInfo): Option[QName] = {
val fp = om.StandardNames.XSI_TYPE
val typeQName = elem.getAttributeValue(om.StandardNames.getURI(fp), om.StandardNames.getLocalName(fp))
if (typeQName ne null) {
val parts = NameChecker.getQNameParts(typeQName)
// No prefix
if (parts(0).isEmpty)
return QName(parts(1)).some
// There is a prefix, resolve it
val namespaceNodes = elem.iterateAxis(StaticXPath.NamespaceAxisType)
breakable {
while (true) {
val currentNamespaceNode = namespaceNodes.next()
if (currentNamespaceNode eq null)
break()
val prefix = currentNamespaceNode.getLocalPart
if (prefix == parts(0))
return QName(parts(1), "", currentNamespaceNode.getStringValue).some
}
}
}
None
}
def convertType(
value : StringValue,
newTypeLocalName : String,
config : SaxonConfiguration
): Try[Option[AtomicValue]] = Try {
val targetType = BuiltInType.getSchemaTypeByLocalName(newTypeLocalName).asInstanceOf[AtomicType]
Try(Converter.convert(value, targetType, config.getConversionRules)).toOption
}
// Create a fingerprinted path of the form: `3142/1425/@1232` from a node.
def createFingerprintedPath(node: om.NodeInfo): String = {
// Create an immutable list with ancestor-or-self nodes up to but not including the document node
var ancestorOrSelf: List[om.NodeInfo] = Nil
var currentNode = node
while (currentNode != null && currentNode.getNodeKind != DOCUMENT_NODE) {
ancestorOrSelf = currentNode :: ancestorOrSelf
currentNode = currentNode.getParent
}
// Fingerprint representation of the element and attribute nodes
val pathElements =
if (ancestorOrSelf.size > 1) { // first is the root element, which we skip as that corresponds to instance('...')
val namePool = node.getConfiguration.getNamePool
ancestorOrSelf.tail map { node =>
val codePrefix =
node.getNodeKind match {
case ELEMENT_NODE => ""
case ATTRIBUTE_NODE => "@"
}
// NOTE: Our `NodeWrapper` no longer supports fingerprints, so we must support them here,
// unless we find a better path encoding.
codePrefix + (
if (node.hasFingerprint)
node.getFingerprint
else
namePool.allocateFingerprint(node.getURI, node.getLocalPart)
)
}
} else
Nil
pathElements mkString "/"
}
}
| orbeon/orbeon-forms | core-cross-platform/js/src/main/scala/org/orbeon/oxf/xml/SaxonUtils.scala | Scala | lgpl-2.1 | 17,116 |
import scala.util.parsing.combinator._
import scala.util.parsing.input.Reader
import scala.util.parsing.input.CharArrayReader.EofCh
import scala.collection.mutable.{Map, HashMap}
import scala.pickling.testing.PicklingBenchmark
import scala.io.Source
import scala.util.Random
// for Java Serialization:
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectOutputStream, ObjectInputStream}
import scala.pickling._
import scala.pickling.Defaults._
import scala.pickling.binary._
// for invalid characters in source files
import java.nio.charset.CodingErrorAction
import scala.io.Codec
final class Vertex(val label: String, var neighbors: List[Vertex]) extends Serializable {
//var graph: Graph = null
def connectTo(v: Vertex) {
neighbors = v +: neighbors
}
def sameAs(other: Vertex): Boolean = {
(this ne other) &&
this.label == other.label && (
this.neighbors.length == other.neighbors.length &&
this.neighbors.zip(other.neighbors).forall {
case (thisv, otherv) => thisv.label == otherv.label
}
)
}
override def toString = "Vertex(" + label + ")"
}
final class Graph extends Serializable {
var vertices: Vector[Vertex] = Vector()
def addVertex(v: Vertex): Vertex = {
//v.graph = this
vertices = v +: vertices
v
}
def sameAs(other: Graph): Boolean = {
(this ne other) &&
this.vertices.length == other.vertices.length &&
this.vertices.zip(other.vertices).forall {
case (thisv, otherv) => thisv.sameAs(otherv)
}
}
}
object GraphReader extends RegexParsers {
override def skipWhitespace = false
lazy val token: Parser[String] =
"""\\S+""".r
lazy val edgeline: Parser[List[String]] =
repsep(token, whiteSpace)
val vertices: Map[String, Vertex] = new HashMap[String, Vertex]
def tokenize(line: String): List[String] =
tokenize(line, x => throw new Exception(x))
def tokenize(line: String, onError: String => Unit): List[String] =
parse(edgeline, line.trim) match {
case Success(args, _) => args
case NoSuccess(msg, rest) => onError(msg); List()
}
def readChunk(lines: Iterator[String], names: Map[String, String], size: Int): Graph = {
val graph = new Graph
for (line <- lines) {
val labels = tokenize(line)
//println("read labels " + labels)
val firstLabel = labels.head.substring(0, labels.head.length - 1)
val firstVertexOpt = vertices.get(firstLabel)
val firstVertex =
if (firstVertexOpt.isEmpty) graph.addVertex(new Vertex(names(firstLabel), List()))
else firstVertexOpt.get
vertices.put(firstLabel, firstVertex)
val targetVertices = for (targetLabel <- labels.tail) yield {
val vertexOpt = vertices.get(targetLabel)
if (vertexOpt.isEmpty) {
val newVertex = graph.addVertex(new Vertex(names(targetLabel), List()))
vertices.put(targetLabel, newVertex)
newVertex
} else {
vertexOpt.get
}
}
firstVertex.neighbors = targetVertices
if (graph.vertices.length > size) return graph
}
graph
}
def printGraph(g: Graph): Unit = {
for (v <- g.vertices) {
print(v.label + ":")
for (to <- v.neighbors) {
print(" " + to.label)
}
println()
}
}
}
object WikiGraph {
val titlesPath = "benchmark/data/titles-sorted.txt"
val linksPath = "benchmark/data/links-sorted.txt"
implicit val codec = Codec("UTF-8")
codec.onMalformedInput(CodingErrorAction.REPLACE)
codec.onUnmappableCharacter(CodingErrorAction.REPLACE)
val names: Map[String, String] = new HashMap[String, String] {
override def default(label: String) = {
"no_title[" + label + "]"
}
}
// println("Building page title map...")
val titles = Source.fromFile(titlesPath).getLines()
for ((title, i) <- titles.zipWithIndex)
names.put("" + i, title)
// println("Reading wikipedia graph from file... " + linksPath)
val lines: Iterator[String] = Source.fromFile(linksPath).getLines()
def readChunk(size: Int): Graph = GraphReader.readChunk(lines, names, size)
//GraphReader.printGraph(wikigraph)
// println("#vertices: " + wikigraph.vertices.size)
}
trait WikiGraphBenchmark extends PicklingBenchmark {
val data = {
// println(size)
val result = WikiGraph.readChunk(size)
// println("#vertices: " + result.vertices.size)
result
}
}
object WikiGraphPicklingBench extends WikiGraphBenchmark {
implicit val VertexTag = FastTypeTag.materializeFastTypeTag[Vertex]
implicit val GraphTag = FastTypeTag.materializeFastTypeTag[Graph]
implicit val StringTag = FastTypeTag.materializeFastTypeTag[String]
implicit val ColonColonVertexTag = FastTypeTag.materializeFastTypeTag[::[Vertex]]
import scala.reflect.runtime.{universe => ru}
implicit val myLittlePony: ru.Mirror = ru.runtimeMirror(getClass.getClassLoader)
implicit val VectorVertexTag = FastTypeTag.materializeFastTypeTag[Vector[Vertex]]
implicit val ListVertexTag = FastTypeTag.materializeFastTypeTag[List[Vertex]]
implicit val NilTag = FastTypeTag.materializeFastTypeTag[Nil.type]
// TODO - why does this no longer compile?
implicit val picklerNil = DPickler.genDPickler[Nil.type]
implicit val unpicklerNil = implicitly[Unpickler[Nil.type]]
implicit lazy val picklerVertex: Pickler[Vertex] = {
val picklerVertex = "boom!"
implicitly[Pickler[Vertex]]
}
implicit lazy val unpicklerVertex: Unpickler[Vertex] = {
val unpicklerVertex = "boom!"
implicitly[Unpickler[Vertex]]
}
// NOTE: doesn't work well either
// implicit object PicklerUnpicklerColonColonVertex extends scala.pickling.Pickler[::[Vertex]] with scala.pickling.Unpickler[::[Vertex]] {
// import scala.reflect.runtime.universe._
// import scala.pickling._
// import scala.pickling.`package`.PickleOps
// val format = implicitly[BinaryPickleFormat]
// def pickle(picklee: ::[Vertex], builder: PBuilder): Unit = {
// builder.hintTag(ColonColonVertexTag)
// builder.beginEntry(picklee)
// val arr = picklee.toArray
// val length = arr.length
// builder.beginCollection(arr.length)
// var i = 0
// while (i < arr.length) {
// builder putElement { b =>
// b.hintTag(VertexTag)
// b.hintStaticallyElidedType()
// arr(i).pickleInto(b)
// }
// i += 1
// }
// builder.endCollection(i)
// builder.endEntry()
// }
// def unpickle(tag: => scala.pickling.FastTypeTag[_], reader: PReader): Any = {
// val arrReader = reader.beginCollection()
// val length = arrReader.readLength()
// if (length == 1) {
// List(arrReader.readElement().unpickle[Vertex])
// } else {
// var buffer = scala.collection.mutable.ListBuffer[Vertex]()
// var i = 0
// while (i < length) {
// val r = arrReader.readElement()
// val elem = r.unpickle[Vertex]
// buffer += elem
// i += 1
// }
// arrReader.endCollection()
// buffer.toList
// }
// }
// }
implicit lazy val picklerUnpicklerColonColonVertex: Pickler[::[Vertex]] with Unpickler[::[Vertex]] = implicitly
implicit lazy val picklerUnpicklerVectorVertex: Pickler[Vector[Vertex]] with Unpickler[Vector[Vertex]] = Defaults.vectorPickler[Vertex]
implicit val picklerGraph = implicitly[Pickler[Graph]]
implicit val unpicklerGraph = implicitly[Unpickler[Graph]]
override def run(): Unit = {
val pickle = data.pickle
val res = pickle.unpickle[Graph]
}
}
object WikiGraphJavaBench extends WikiGraphBenchmark {
override def run(): Unit = {
val bos = new ByteArrayOutputStream()
val out = new ObjectOutputStream(bos)
out.writeObject(data)
val ba = bos.toByteArray()
// println("Bytes: " + ba.length)
val bis = new ByteArrayInputStream(ba)
val in = new ObjectInputStream(bis)
val res = in.readObject.asInstanceOf[Graph]
}
}
object WikiGraphKryoBench extends WikiGraphBenchmark {
var ser: KryoSerializer = _
override def tearDown() {
ser = null
}
override def run() {
val rnd: Int = Random.nextInt(10)
val arr = Array.ofDim[Byte](32 * 2048 * 2048 + rnd)
ser = new KryoSerializer
val pickled = ser.toBytes(data, arr)
// println("Size: "+pickled.length)
// TODO: uncrash this
// val res = ser.fromBytes[Graph](pickled)
}
}
| phaller/pickling | benchmark/WikiGraph.scala | Scala | bsd-3-clause | 8,469 |
package cromwell.engine.backend.runtimeattributes
import AttributeMap.EnhancedBackendType
import cromwell.engine.backend.BackendType
object AttributeMap {
implicit class EnhancedBackendType(val backendType: BackendType) extends AnyVal {
def supportedKeys: Set[RuntimeKey] = for {
key <- RuntimeKey.values().toSet
if key.supports(backendType)
} yield key
}
}
case class AttributeMap(attrs: Map[String, Seq[String]]) {
def get(key: RuntimeKey): Option[String] = attrs.get(key.key).flatMap(_.headOption)
def getSeq(key: RuntimeKey): Option[Seq[String]] = attrs.get(key.key)
def unsupportedKeys(backendType: BackendType): Seq[String] = {
val supportedKeys = backendType.supportedKeys map { _.key }
val unsupportedKeys = attrs.keySet -- supportedKeys
if (unsupportedKeys.isEmpty) Vector.empty
else Vector(s"Found unsupported keys for backend '$backendType': " + unsupportedKeys.toSeq.sorted.mkString(", "))
}
}
| dgtester/cromwell | src/main/scala/cromwell/engine/backend/runtimeattributes/AttributeMap.scala | Scala | bsd-3-clause | 961 |
package org.jetbrains.plugins.scala
package lang
package refactoring
package namesSuggester
import com.intellij.openapi.util.text.StringUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScLiteral, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.refactoring.ScalaNamesValidator.isIdentifier
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.genericTypes.{GenericTypeNamesProvider, TypePluralNamesProvider}
import org.jetbrains.plugins.scala.lang.refactoring.util.{ScalaTypeValidator, ScalaValidator, ScalaVariableValidator}
import scala.collection.mutable
/**
* @author Alexander Podkhalyuzin
* @since 26.06.2008
*/
object NameSuggester {
private val DefaultName = "value"
def suggestNames(expression: ScExpression)
(implicit validator: ScalaVariableValidator = ScalaVariableValidator.empty): Seq[String] =
collectNames(namesByExpression(expression))
private[this] def namesByType(expression: ScExpression): Seq[String] = {
def collectTypes: Seq[ScType] = {
val types = expression.`type`().toOption ++
expression.getTypeWithoutImplicits().toOption ++
expression.getTypeIgnoreBaseType.toOption
types.toSeq.sortWith {
case (_, t) if t.isUnit => true
case _ => false
}.reverse
}
collectTypes.flatMap(namesByType(_))
}
private[this] def collectNames(names: Seq[String])
(implicit validator: ScalaValidator): Seq[String] = {
import scala.collection.mutable
val filteredNames = mutable.LinkedHashSet(names: _*).map {
case "class" => "clazz"
case name => name
}.filter(isIdentifier)
val collected = filteredNames.toSeq match {
case Seq() => Seq(DefaultName)
case seq => seq.reverse
}
mutable.LinkedHashSet(collected: _*)
.map(validator.validateName)
.toSeq
}
def suggestNamesByType(`type`: ScType)
(implicit validator: ScalaTypeValidator = ScalaTypeValidator.empty): Seq[String] =
collectNames(namesByType(`type`))
class UniqueNameSuggester(defaultName: String = DefaultName) extends (ScType => String) {
private val counter = mutable.Map.empty[String, Int].withDefaultValue(-1)
override def apply(`type`: ScType): String =
this (suggestNamesByType(`type`))
def apply(names: Traversable[String]): String = {
val name = names.headOption.getOrElse(defaultName)
counter(name) += 1
name + (counter(name) match {
case 0 => ""
case i => i
})
}
}
private[namesSuggester] def namesByType(`type`: ScType, withPlurals: Boolean = true, shortVersion: Boolean = true): Seq[String] = {
def toLowerCase(name: String, length: Int): String = {
val lowerCased = name.toLowerCase
if (shortVersion) lowerCased.substring(0, length) else lowerCased
}
def byName(name: String): Seq[String] = name match {
case "String" => Seq(toLowerCase(name, 3))
case _ => camelCaseNames(name)
}
val stdTypes = `type`.projectContext.stdTypes
import stdTypes._
def valTypeName(`type`: ValType): String = {
val typeName = `type`.name
val length = `type` match {
case Char | Byte | Int | Long | Double => 1
case Short | Float => 2
case Boolean => 4
case _ => typeName.length
}
toLowerCase(typeName, length)
}
`type` match {
case valType: ValType => Seq(valTypeName(valType))
case ScDesignatorType(e) => byName(e.name)
case parameterType: TypeParameterType => byName(parameterType.name)
case ScProjectionType(_, e) => byName(e.name)
case ScCompoundType(Seq(head, _*), _, _) => namesByType(head, withPlurals)
case JavaArrayType(argument) =>
TypePluralNamesProvider.pluralizeNames(argument)
case genericType: ScParameterizedType =>
GenericTypeNamesProvider.providers
.flatMap(_.names(genericType))
case _ => Seq.empty
}
}
private[this] def namesByExpression: ScExpression => Seq[String] = {
case _: ScThisReference => Seq("thisInstance")
case _: ScSuperReference => Seq("superInstance")
case reference: ScReferenceElement if reference.refName != null =>
camelCaseNames(reference.refName)
case definition: ScNewTemplateDefinition =>
val parameters = definition.constructor.toSeq
.flatMap(_.matchedParameters)
enhancedNames(definition, parameters)
case invocation: MethodInvocation =>
enhancedNames(invocation, invocation.matchedParameters)
case literal: ScLiteral if literal.isString =>
Option(literal.getValue).collect {
case string: String if isIdentifier(string.toLowerCase) => string
}.flatMap(string => camelCaseNames(string).headOption).toSeq
case expression =>
val maybeName = expression.getContext match {
case x: ScAssignStmt => x.assignName
case x: ScArgumentExprList => x.matchedParameters.collectFirst {
case (matchedExpression, parameter) if matchedExpression == expression => parameter
}.map(_.name)
case _ => None
}
maybeName.toSeq ++ namesByType(expression)
}
private[this] def enhancedNames(typeable: ScExpression, parameters: Seq[(ScExpression, Parameter)]): Seq[String] = {
val namesByParameters = parameters.collect {
case (expression, parameter) if parameter.name == "name" => expression
}.flatMap(namesByExpression)
val names = namesByType(typeable)
names ++ compoundNames(namesByParameters, names) ++ namesByParameters
}
private[namesSuggester] def compoundNames(firstNames: Seq[String],
lastNames: Seq[String],
separator: String = ""): Seq[String] =
for {
firstName <- firstNames
lastName <- lastNames
} yield s"$firstName$separator${lastName.capitalize}"
private[this] def camelCaseNames(name: String): Seq[String] = {
val actualName = name match {
case _ if StringUtil.isEmpty(name) =>
return Seq.empty
case _ if name.toUpperCase == name =>
return Seq(name.toLowerCase)
.map(_.replaceAll(isNotLetter, ""))
case _ =>
val beginIndex = name match {
case _ if name.startsWith("get") => 3
case _ if name.startsWith("set") => 3
case _ if name.startsWith("is") => 2
case _ => 0
}
name.substring(beginIndex)
}
val names = actualName.zipWithIndex.collect {
case (char, index) if index == 0 || char.isLetter && char.isUpper =>
Character.toLowerCase(char) + actualName.substring(index + 1)
}
names.map(_.replaceFirst(isNotLetter + "$", ""))
}
private[this] val isNotLetter = "[^\\\\p{IsAlphabetic}]"
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/namesSuggester/NameSuggester.scala | Scala | apache-2.0 | 7,233 |
class Foo {
val tup: (Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int) = ???
tup.size
tup.size
tup.size
tup.size
tup.size
}
| dotty-staging/dotty | tests/bench/tuple22-size.scala | Scala | apache-2.0 | 267 |
package org.tensorframes
import org.apache.spark.sql.types.{BinaryType, DataType, NumericType}
import org.tensorflow.framework.TensorShapeProto
import scala.collection.JavaConverters._
import org.tensorframes.Shape.DimType
import org.tensorframes.impl.ScalarType
import org.{tensorflow => tf}
/**
* The shape of a tensor.
* @param ds
*/
class Shape private (private val ds: Array[DimType]) extends Serializable {
final val dims: IndexedSeq[DimType] = ds
final def numDims = ds.length
def hasUnknown: Boolean = ds.contains(Shape.UNKNOWN)
/**
* The number of elements contained in this shape.
*
* If this shape has unknowns, returns None.
*/
def numElements: Option[Long] = if (hasUnknown) None else Option(ds.product)
override def toString: String =
ds.map(x => if (x == Shape.UNKNOWN) { "?" } else {x.toString}).mkString("[",",","]")
/**
* Return a shape with an extra leading dimension.
* @param x the dimension to add as the new head.
*/
def prepend(x: DimType): Shape = Shape(x +: ds)
def prepend(x: Int): Shape = Shape(x.toLong +: ds)
/**
* Drops the most inner dimension of the shape.
*/
def dropInner: Shape = Shape(ds.dropRight(1))
/**
* A shape with the first dimension dropped.
*/
def tail: Shape = Shape(ds.tail)
/**
* Checks that this shape could be used as a more precise description of the other shape.
*/
def checkMorePreciseThan(other: Shape): Boolean = {
if (dims.size != other.dims.size) {
return false
}
dims.zip(other.dims).forall { case (a, b) => b == Shape.UNKNOWN || b == a }
}
override def equals(that: Any): Boolean =
that match {
case that: Shape => that.ds.sameElements(ds)
case _ => false
}
override def hashCode: Int = {
var res: Long = 1
ds.foreach(x => res += res * 31 + x)
res.toInt
}
private[tensorframes] def toProto: TensorShapeProto = {
val b = TensorShapeProto.newBuilder()
dims.foreach { d =>
b.addDimBuilder().setSize(d).build()
}
b.build()
}
private[tensorframes] def toTFShape: tf.Shape = {
tf.Shape.make(ds.head, ds.tail: _*)
}
}
object Shape {
type DimType = Long
private val UNKNOWN: DimType = -1L
val Unknown: Int = -1
def empty: Shape = Shape()
private[tensorframes] def apply(s: Array[Long]): Shape = {
s.foreach(x => require(x >= -1, s"$s should not contain values <= -2"))
new Shape(s.toArray)
}
def apply(i: Int): Shape = Shape(Array(i.toLong))
def apply(is: Int*): Shape = Shape(is.map(_.toLong).toArray)
private[tensorframes] def from(shape: TensorShapeProto): Shape = {
Shape(shape.getDimList.asScala.map(_.getSize).toArray)
}
private[tensorframes] def from(shape: tf.Shape): Shape = {
apply((0 until shape.numDimensions()).map { shape.size }.toArray)
}
}
/**
* SparkTF information. This is the information generally required to work on a tensor.
* @param shape the shape of the column (including the number of rows). May contain some unknowns.
* @param dataType the datatype of the scalar. Note that it is either NumericType or BinaryType.
*/
// TODO(tjh) the types supported by TF are much richer (uint8, etc.) but it is not clear
// if they all map to a Catalyst memory representation
// TODO(tjh) support later basic structures for sparse types?
case class SparkTFColInfo(
shape: Shape,
dataType: ScalarType) extends Serializable {
}
/**
* Exception thrown when the user requests tensors of high order.
* @param s
*/
case class HighDimException(s: Shape)
extends Exception(s"Shape $s is too high - tensorframes only supports dimensions <= 1 (vectors)")
| databricks/tensorframes | src/main/scala/org/tensorframes/Shape.scala | Scala | apache-2.0 | 3,665 |
/*
* Copyright 2013 Norman Maurer
*
* Norman Maurer, licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.codec
import io.netty.buffer.{CompositeByteBuf, UnpooledByteBufAllocator, ByteBuf, ByteBufAllocator}
import java.nio.ByteOrder
object LittleEndianByteBufAllocator {
val INSTANCE = new LittleEndianByteBufAllocator
}
/**
* Allocates ByteBuf which have LITTLE_ENDIAN order.
*/
class LittleEndianByteBufAllocator extends ByteBufAllocator {
private val allocator = new UnpooledByteBufAllocator(false)
def isDirectBufferPooled: Boolean = false
def buffer() = littleEndian(allocator.buffer())
def buffer(initialCapacity: Int) = littleEndian(allocator.buffer(initialCapacity))
def buffer(initialCapacity: Int, maxCapacity: Int) = littleEndian(allocator.buffer(initialCapacity, maxCapacity))
def ioBuffer() = littleEndian(allocator.ioBuffer())
def ioBuffer(initialCapacity: Int) = littleEndian(allocator.ioBuffer(initialCapacity))
def ioBuffer(initialCapacity: Int, maxCapacity: Int) = littleEndian(allocator.ioBuffer(initialCapacity, maxCapacity))
def heapBuffer() = littleEndian(allocator.heapBuffer())
def heapBuffer(initialCapacity: Int) = littleEndian(allocator.heapBuffer(initialCapacity))
def heapBuffer(initialCapacity: Int, maxCapacity: Int) = littleEndian(allocator.heapBuffer(initialCapacity, maxCapacity))
def directBuffer() = littleEndian(allocator.directBuffer())
def directBuffer(initialCapacity: Int) = littleEndian(allocator.directBuffer(initialCapacity))
def directBuffer(initialCapacity: Int, maxCapacity: Int): ByteBuf = littleEndian(allocator.directBuffer(initialCapacity, maxCapacity))
def compositeBuffer() = allocator.compositeBuffer()
def compositeBuffer(maxNumComponents: Int) = allocator.compositeBuffer(maxNumComponents)
def compositeHeapBuffer() = allocator.compositeHeapBuffer()
def compositeHeapBuffer(maxNumComponents: Int) = allocator.compositeHeapBuffer(maxNumComponents)
def compositeDirectBuffer() = allocator.compositeDirectBuffer()
def compositeDirectBuffer(maxNumComponents: Int): CompositeByteBuf = allocator.compositeDirectBuffer(maxNumComponents)
private def littleEndian(b: ByteBuf) = b.order(ByteOrder.LITTLE_ENDIAN)
}
| carlosFattor/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/codec/LittleEndianByteBufAllocator.scala | Scala | apache-2.0 | 2,795 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.source
/*
* <p>
* A <code>Documenter</code> is essentially
* used to wrap a <code>Reporter</code> and provide easy ways to send markup text
* to that <code>Reporter</code> via a <code>MarkupProvided</code> event.
* <code>Documenter</code> contains an <code>apply</code> method that takes a string.
* The <code>Documenter</code> will forward the passed string to the <code>Reporter</code> as the <code>text</code>
* parameter of an <code>MarkupProvided</code> event.
* </p>
*
* <p>
* Here's an example of using an <code>Documenter</code> in a <code>Suite</code>
* subclass:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest._
*
* class MySuite extends Suite {
* def testAddition(markup: Documenter) {
* assert(1 + 1 === 2)
* markup("Addition *seems* to work")
* }
* }
* </pre>
*
* <p>
* As of 2.0, the only built-in reporter that presents markup text is the HTML reporter.
* If you run this <code>Suite</code> and specify the HTML reporter, you will see the message
* included in the HTML report:
* </p>
*
* <pre class="stREPL">
* scala> (new MySuite).execute()
* <span class="stGreen">- testAddition(Informer)
* + Addition <em>seems</em> to work</span>
* </pre>
*
* <p>
* Traits <code>FunSuite</code>, <code>Spec</code>, <code>FlatSpec</code>, <code>WordSpec</code>, <code>FeatureSpec</code>, and
* their sister traits in <code>org.scalatest.fixture</code> package declare an implicit <code>markup</code> method that returns
* an <code>Documenter</code>.
* Here's an example of a <code>Spec</code> that uses <code>markup</code>:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.refspec.RefSpec
* import scala.collection.mutable.Stack
*
* class StackSpec extends RefSpec {
*
* markup("""
*
* Stack Specification
* ===================
*
* A `Stack` is a data structure that allows you to store and retrieve objects in
* a last-in-first-out (LIFO) fashion. `Stack`s (both this class and its immutable
* cousin) are not commonly used in Scala, because a `List` gives you
* the same basic functionality. Pushing an object onto a `Stack` maps to consing
* a new element onto the front of a `List`. Peaking at the top of the `Stack` maps to
* to a `head`. Popping an object off of a `Stack` maps to a `head` followed by a `tail`.
* Nevertheless, using a `Stack` instead of a `List` can clarify your intent
* to readers of your code.
*
* """)
*
* describe("A Stack") {
*
* it("should pop values in last-in-first-out order") {
* val stack = new Stack[Int]
* stack.push(1)
* stack.push(2)
* assert(stack.pop() === 2)
* assert(stack.pop() === 1)
* }
*
* it("should throw NoSuchElementException if an empty stack is popped") {
* val emptyStack = new Stack[String]
* assertThrows[NoSuchElementException] {
* emptyStack.pop()
* }
* }
* }
* }
* </pre>
*
* <p>
* Were you to run this <code>FeatureSpec</code> in the interpreter, you would see the following output:
* </p>
*
* <pre class="stREPL">
* scala> (new ArithmeticFeatureSpec).run()
* <span class="stGreen">Feature: Integer arithmetic
* Scenario: addition
* Given two integers
* When they are added
* Then the result is the sum of the two numbers
* Scenario: subtraction
* Given two integers
* When one is subtracted from the other
* Then the result is the difference of the two numbers</span>
* </pre>
*
* @author Bill Venners
*/
/**
* Trait to which markup text tests can be reported.
*
* <p>
* Note: <code>Documenter</code> will be described in more detail in a future 2.0 milestone release. As of this release
* you can't see its effects yet.
* </p>
*
* @author Bill Venners
*/
trait Documenter {
/**
* Provide documentation to the <code>Reporter</code>.
*
* @param text an string of markup text that will be forwarded to the wrapped <code>Reporter</code>
* via a <code>MarkupProvided</code> event.
*
* @throws NullArgumentException if <code>message</code> reference is <code>null</code>
*/
def apply(text: String)(implicit pos: source.Position): Unit
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/Documenter.scala | Scala | apache-2.0 | 4,987 |
package slamdata.engine
import slamdata.engine.analysis._
import slamdata.engine.std.Library
import scalaz._
import scalaz.std.map._
import scalaz.std.string._
import scalaz.std.list._
import scalaz.std.option._
import scalaz.std.set._
import scalaz.syntax.apply._
import scalaz.syntax.traverse._
trait SemanticAnalysis {
import slamdata.engine.sql._
import SemanticError._
type Failure = NonEmptyList[SemanticError]
private def fail[A](e: SemanticError) = Validation.failure[NonEmptyList[SemanticError], A](NonEmptyList(e))
private def succeed[A](s: A) = Validation.success[NonEmptyList[SemanticError], A](s)
def tree(root: Node): AnnotatedTree[Node, Unit] = AnnotatedTree.unit(root, n => n.children)
/**
* This analyzer looks for function invocations (including operators),
* and binds them to their associated function definitions in the
* provided library. If a function definition cannot be found,
* produces an error with details on the failure.
*/
def FunctionBind[A](library: Library) = {
def findFunction(name: String) = {
val lcase = name.toLowerCase
library.functions.find(f => f.name.toLowerCase == lcase).map(f => Validation.success(Some(f))).getOrElse(
fail(FunctionNotFound(name))
)
}
Analysis.annotate[Node, A, Option[Func], Failure] {
case (InvokeFunction(name, args)) => findFunction(name)
case (Unop(expr, op)) => findFunction(op.name)
case (Binop(left, right, op)) => findFunction(op.name)
case _ => Validation.success(None)
}
}
final case class TableScope(scope: Map[String, SqlRelation])
implicit val ShowTableScope = new Show[TableScope] {
override def show(v: TableScope) = Show[Map[String, Node]].show(v.scope)
}
/**
* Inserts synthectic fields into the projections of each `select` stmt to hold
* the values that will be used in sorting. The compiler will generate a step to
* remove these synthetic fields after the sort operation.
*/
def TransformSelect[A]: Analysis[Node, A, Unit, Failure] = { tree1 =>
def transform(node: Node): Node =
node match {
case sel @ SelectStmt(projections, _, _, _, Some(sql.OrderBy(keys)), _, _) => {
def matches(key: Expr, proj: Proj): Boolean = (key, proj) match {
case (Ident(keyName), Proj(Ident(projName), None)) => keyName == projName
case (Ident(keyName), Proj(_, Some(alias))) => keyName == alias
case (Ident(keyName), Proj(Wildcard, _)) => true
case _ => false
}
// Note: order of the keys has to be preserved, so this complex fold seems
// to be the best way.
type Target = (List[Proj], List[(Expr, OrderType)], Int)
val (projs2, keys2, _) = keys.foldRight[Target]((Nil, Nil, 0)) {
case (key @ (expr, orderType), (projs, keys, index)) =>
if (!projections.exists(matches(expr, _))) {
val name = "__sd__" + index.toString() // Note: this prefix has to match what the compiler looks for
val proj2 = Proj(expr, Some(name))
val key2 = Ident(name) -> orderType
(proj2 :: projs, key2 :: keys, index + 1)
} else (projs, key :: keys, index)
}
sel.copy(projections = projections ++ projs2,
orderBy = Some(sql.OrderBy(keys2)))
}
case _ => node
}
Validation.success(tree(transform(tree1.root)))
}
/**
* This analysis identifies all the named tables within scope at each node in
* the tree. If two tables are given the same name within the same scope, then
* because this leads to an ambiguity, an error is produced containing details
* on the duplicate name.
*/
def ScopeTables[A] = Analysis.readTree[Node, A, TableScope, Failure] { tree =>
import Validation.{success, failure}
Analysis.fork[Node, A, TableScope, Failure]((scopeOf, node) => {
def parentScope(node: Node) = tree.parent(node).map(scopeOf).getOrElse(TableScope(Map()))
node match {
case SelectStmt(projections, relations, filter, groupBy, orderBy, limit, offset) =>
val parentMap = parentScope(node).scope
(relations.foldLeft[Validation[Failure, Map[String, SqlRelation]]](success(Map.empty[String, SqlRelation])) {
case (v, relation) =>
implicit val sg = Semigroup.firstSemigroup[SqlRelation]
v +++ tree.subtree(relation).foldDown[Validation[Failure, Map[String, SqlRelation]]](success(Map.empty[String, SqlRelation])) {
case (v, relation : SqlRelation) =>
v.fold(
failure,
acc => {
val name = relation match {
case r @ TableRelationAST(name, aliasOpt) => Some(aliasOpt.getOrElse(name))
case r @ SubqueryRelationAST(subquery, alias) => Some(alias)
case r @ JoinRelation(left, right, join, clause) => None
case r @ CrossRelation(left, right) => None
}
(name.map { name =>
(acc.get(name).map{ relation2 =>
failure(NonEmptyList(DuplicateRelationName(name, relation2)))
}).getOrElse(success(acc + (name -> relation)))
}).getOrElse(success(acc))
}
)
case (v, _) => v // We're only interested in relations
}
}).map(map => TableScope(parentMap ++ map))
case _ => success(parentScope(node))
}
})
}
sealed trait Provenance {
import Provenance._
def & (that: Provenance): Provenance = Both(this, that)
def | (that: Provenance): Provenance = Either(this, that)
def simplify: Provenance = this match {
case x : Either => anyOf(x.flatten.map(_.simplify).filterNot(_ == Empty))
case x : Both => allOf(x.flatten.map(_.simplify).filterNot(_ == Empty))
case _ => this
}
def namedRelations: Map[String, List[NamedRelation]] = Foldable[List].foldMap(relations)(_.namedRelations)
def relations: List[SqlRelation] = this match {
case Empty => Nil
case Value => Nil
case Relation(value) => value :: Nil
case Either(v1, v2) => v1.relations ++ v2.relations
case Both(v1, v2) => v1.relations ++ v2.relations
}
def flatten: Set[Provenance] = Set(this)
override def equals(that: Any): Boolean = (this, that) match {
case (x, y) if (x.eq(y.asInstanceOf[AnyRef])) => true
case (Relation(v1), Relation(v2)) => v1 == v2
case (Either(_, _), that @ Either(_, _)) => this.simplify.flatten == that.simplify.flatten
case (Both(_, _), that @ Both(_, _)) => this.simplify.flatten == that.simplify.flatten
case (_, _) => false
}
override def hashCode = this match {
case Either(_, _) => this.simplify.flatten.hashCode
case Both(_, _) => this.simplify.flatten.hashCode
case _ => super.hashCode
}
}
trait ProvenanceInstances {
implicit val ProvenanceShow = new Show[Provenance] { self =>
import Provenance._
override def show(v: Provenance): Cord = v match {
case Empty => Cord("Empty")
case Value => Cord("Value")
case Relation(value) => Show[Node].show(value)
case Either(left, right) => Cord("(") ++ self.show(left) ++ Cord(" | ") ++ self.show(right) ++ Cord(")")
case Both(left, right) => Cord("(") ++ self.show(left) ++ Cord(" & ") ++ self.show(right) ++ Cord(")")
}
}
}
object Provenance extends ProvenanceInstances {
case object Empty extends Provenance
case object Value extends Provenance
case class Relation(value: SqlRelation) extends Provenance
case class Either(left: Provenance, right: Provenance) extends Provenance {
override def flatten: Set[Provenance] = {
def flatten0(x: Provenance): Set[Provenance] = x match {
case Either(left, right) => flatten0(left) ++ flatten0(right)
case _ => Set(x)
}
flatten0(this)
}
}
case class Both(left: Provenance, right: Provenance) extends Provenance {
override def flatten: Set[Provenance] = {
def flatten0(x: Provenance): Set[Provenance] = x match {
case Both(left, right) => flatten0(left) ++ flatten0(right)
case _ => Set(x)
}
flatten0(this)
}
}
def allOf(xs: Iterable[Provenance]): Provenance = {
if (xs.size == 0) Empty
else if (xs.size == 1) xs.head
else xs.reduce(_ & _)
}
def anyOf(xs: Iterable[Provenance]): Provenance = {
if (xs.size == 0) Empty
else if (xs.size == 1) xs.head
else xs.reduce(_ | _)
}
}
/**
* This phase infers the provenance of every expression, issuing errors
* if identifiers are used with unknown provenance. The phase requires
* TableScope annotations on the tree.
*/
def ProvenanceInfer = Analysis.readTree[Node, TableScope, Provenance, Failure] { tree =>
Analysis.join[Node, TableScope, Provenance, Failure]((provOf, node) => {
import Validation.{success, failure}
def propagate(child: Node) = success(provOf(child))
def NA = success(Provenance.Empty)
(node match {
case SelectStmt(projections, relations, filter, groupBy, orderBy, limit, offset) =>
success(Provenance.allOf(projections.map(provOf)))
case Proj(expr, alias) => propagate(expr)
case Subselect(select) => propagate(select)
case SetLiteral(values) => success(Provenance.Value)
case Wildcard => NA // FIXME
case Binop(left, right, op) =>
success(provOf(left) & provOf(right))
case Unop(expr, op) => success(provOf(expr))
case ident @ Ident(name) =>
val tableScope = tree.attr(node).scope
(tableScope.get(name).map((Provenance.Relation.apply _) andThen success)).getOrElse {
Provenance.anyOf(tableScope.values.map(Provenance.Relation.apply)) match {
case Provenance.Empty => failure(NonEmptyList(NoTableDefined(ident)))
case x => success(x)
}
}
case InvokeFunction(name, args) => success(Provenance.allOf(args.map(provOf)))
case Case(cond, expr) => propagate(expr)
case Match(expr, cases, default) => success(cases.map(provOf).reduce(_ & _))
case Switch(cases, default) => success(cases.map(provOf).reduce(_ & _))
case IntLiteral(value) => success(Provenance.Value)
case FloatLiteral(value) => success(Provenance.Value)
case StringLiteral(value) => success(Provenance.Value)
case NullLiteral() => success(Provenance.Value)
case r @ TableRelationAST(name, alias) => success(Provenance.Relation(r))
case r @ SubqueryRelationAST(subquery, alias) => success(Provenance.Relation(r))
case r @ JoinRelation(left, right, tpe, clause) => success(Provenance.Relation(r))
case r @ CrossRelation(left, right) => success(Provenance.Relation(r))
case GroupBy(keys, having) => success(Provenance.allOf(keys.map(provOf)))
case OrderBy(keys) => success(Provenance.allOf(keys.map(_._1).toList.map(provOf)))
case _ : BinaryOperator => NA
case _ : UnaryOperator => NA
}).map(_.simplify)
})
}
sealed trait InferredType
object InferredType {
case class Specific(value: Type) extends InferredType
case object Unknown extends InferredType
implicit val ShowInferredType = new Show[InferredType] {
override def show(v: InferredType) = v match {
case Unknown => Cord("?")
case Specific(v) => Show[Type].show(v)
}
}
}
/**
* This phase works top-down to push out known types to terms with unknowable
* types (such as columns and wildcards). The annotation is the type of the node,
* which defaults to Type.Top in cases where it is not known.
*/
def TypeInfer = {
Analysis.readTree[Node, Option[Func], Map[Node, Type], Failure] { tree =>
import Validation.{success, failure}
Analysis.fork[Node, Option[Func], Map[Node, Type], Failure]((mapOf, node) => {
/**
* Retrieves the inferred type of the current node being annotated.
*/
def inferredType = for {
parent <- tree.parent(node)
selfType <- mapOf(parent).get(node)
} yield selfType
/**
* Propagates the inferred type of this node to its sole child node.
*/
def propagate(child: Node) = propagateAll(child :: Nil)
/**
* Propagates the inferred type of this node to its identically-typed
* children nodes.
*/
def propagateAll(children: Seq[Node]) = success(inferredType.map(t => Map(children.map(_ -> t): _*)).getOrElse(Map()))
def annotateFunction(args: List[Node]) =
(tree.attr(node).map { func =>
val typesV = inferredType.map(func.unapply).getOrElse(success(func.domain))
typesV map (types => (args zip types).toMap)
}).getOrElse(fail(FunctionNotBound(node)))
/**
* Indicates no information content for the children of this node.
*/
def NA = success(Map.empty[Node, Type])
node match {
case SelectStmt(projections, relations, filter, groupBy, orderBy, limit, offset) =>
inferredType match {
// TODO: If there's enough type information in the inferred type to do so, push it
// down to the projections.
case _ => NA
}
case Proj(expr, alias) => propagate(expr)
case Subselect(select) => propagate(select)
case SetLiteral(values) =>
inferredType match {
// Push the set type down to the children:
case Some(Type.Set(tpe)) => success(values.map(_ -> tpe).toMap)
case _ => NA
}
case Wildcard => NA
case Binop(left, right, _) => annotateFunction(left :: right :: Nil)
case Unop(expr, _) => annotateFunction(expr :: Nil)
case Ident(name) => NA
case InvokeFunction(_, args) => annotateFunction(args)
case Case(cond, expr) => propagate(expr)
case Match(expr, cases, default) => propagateAll(cases ++ default)
case Switch(cases, default) => propagateAll(cases ++ default)
case IntLiteral(value) => NA
case FloatLiteral(value) => NA
case StringLiteral(value) => NA
case NullLiteral() => NA
case TableRelationAST(name, alias) => NA
case SubqueryRelationAST(subquery, alias) => propagate(subquery)
case JoinRelation(left, right, tpe, clause) => NA
case CrossRelation(left, right) => NA
case GroupBy(keys, having) => NA
case OrderBy(keys) => NA
case _ : BinaryOperator => NA
case _ : UnaryOperator => NA
}
})
} >>> Analysis.readTree[Node, Map[Node, Type], InferredType, Failure] { tree =>
Analysis.fork[Node, Map[Node, Type], InferredType, Failure]((typeOf, node) => {
// Read the inferred type of this node from the parent node's attribute:
succeed((for {
parent <- tree.parent(node)
selfType <- tree.attr(parent).get(node)
} yield selfType).map(InferredType.Specific.apply).getOrElse(InferredType.Unknown))
})
}
}
/**
* This phase works bottom-up to check the type of all expressions.
* In the event of a type error, an error will be produced containing
* details on the expected versus actual type.
*/
def TypeCheck = {
Analysis.readTree[Node, (Option[Func], InferredType), Type, Failure] { tree =>
Analysis.join[Node, (Option[Func], InferredType), Type, Failure]((typeOf, node) => {
def func(node: Node): ValidationNel[SemanticError, Func] = {
tree.attr(node)._1.map(Validation.success).getOrElse(Validation.failure(NonEmptyList(FunctionNotBound(node))))
}
def inferType(default: Type): ValidationNel[SemanticError, Type] = succeed(tree.attr(node)._2 match {
case InferredType.Unknown => default
case InferredType.Specific(v) => v
})
def typecheckArgs(func: Func, actual: List[Type]): ValidationNel[SemanticError, Unit] = {
val expected = func.domain
if (expected.length != actual.length) {
fail[Unit](WrongArgumentCount(func, expected.length, actual.length))
} else {
(expected.zip(actual).map {
case (expected, actual) => Type.typecheck(expected, actual)
}).sequenceU.map(_ => Unit)
}
}
def typecheckFunc(args: List[Expr]) = {
func(node).fold(
Validation.failure,
func => {
val argTypes = args.map(typeOf)
typecheckArgs(func, argTypes).fold(
Validation.failure,
_ => func.apply(argTypes)
)
}
)
}
def NA = succeed(Type.Bottom)
def propagate(n: Node) = succeed(typeOf(n))
node match {
case s @ SelectStmt(projections, relations, filter, groupBy, orderBy, limit, offset) =>
succeed(Type.makeObject(s.namedProjections.map(t => (t._1, typeOf(t._2)))))
case Proj(expr, alias) => propagate(expr)
case Subselect(select) => propagate(select)
case SetLiteral(values) => succeed(Type.makeArray(values.map(typeOf)))
case Wildcard => inferType(Type.Top)
case Binop(left, right, op) => typecheckFunc(left :: right :: Nil)
case Unop(expr, op) => typecheckFunc(expr :: Nil)
case Ident(name) => inferType(Type.Top)
case InvokeFunction(name, args) => typecheckFunc(args.toList)
case Case(cond, expr) => succeed(typeOf(expr))
case Match(expr, cases, default) =>
succeed(cases.map(typeOf).foldLeft[Type](Type.Top)(_ | _).lub)
case Switch(cases, default) =>
succeed(cases.map(typeOf).foldLeft[Type](Type.Top)(_ | _).lub)
case IntLiteral(value) => succeed(Type.Const(Data.Int(value)))
case FloatLiteral(value) => succeed(Type.Const(Data.Dec(value)))
case StringLiteral(value) => succeed(Type.Const(Data.Str(value)))
case NullLiteral() => succeed(Type.Const(Data.Null))
case TableRelationAST(name, alias) => NA
case SubqueryRelationAST(subquery, alias) => propagate(subquery)
case JoinRelation(left, right, tpe, clause) => succeed(Type.Bool)
case CrossRelation(left, right) => succeed(typeOf(left) & typeOf(right))
case GroupBy(keys, having) =>
// Not necessary but might be useful:
succeed(Type.makeArray(keys.map(typeOf)))
case OrderBy(keys) => NA
case _ : BinaryOperator => NA
case _ : UnaryOperator => NA
}
})
}
}
val AllPhases = (TransformSelect[Unit] >>>
ScopeTables[Unit] >>>
ProvenanceInfer).dup2 >>>
FunctionBind[Provenance](std.StdLib).dup3.first >>>
TypeInfer.second.first.first >>>
TypeCheck.first.first
}
object SemanticAnalysis extends SemanticAnalysis
| sellout/slamengine-old | src/main/scala/slamdata/engine/semantics.scala | Scala | agpl-3.0 | 19,729 |
package vultura.util
/**
* Mixin for memoizing hash values of case classes (more generally products).
* @see scala.Product
*/
trait HashMemo {self: Product =>
override final val hashCode: Int = self.productIterator.toSeq.hashCode()
}
| ziggystar/vultura-factor | util/src/main/scala/vultura/util/HashMemo.scala | Scala | mit | 240 |
package spatial.transform
import argon.core._
import argon.nodes._
import argon.transform.ForwardTransformer
import spatial.analysis.SpatialTraversal
import spatial.aliases._
import spatial.metadata._
import spatial.nodes._
import spatial.utils._
import scala.collection.mutable.ArrayBuffer
/**
* Inserts UnitPipe wrappers for primitive nodes in outer control nodes, along with registers for communication
*/
case class UnitPipeTransformer(var IR: State) extends ForwardTransformer with SpatialTraversal {
override val name = "Unit Pipe Transformer"
//override val allowPretransform = true
var enable: Option[Exp[Bit]] = None
def withEnable[T](en: Exp[Bit])(blk: => T)(implicit ctx: SrcCtx): T = {
var prevEnable = enable
dbgs(s"Enable was $enable")
enable = Some(en) //Some(enable.map(bool_and(_,en)).getOrElse(en) ) TODO: Should this use ANDs?
dbgs(s"Enable is now $enable")
val result = blk
enable = prevEnable
result
}
private class PipeStage(val isControl: Boolean) {
val allocs = ArrayBuffer[Stm]()
val nodes = ArrayBuffer[Stm]()
val regReads = ArrayBuffer[Stm]()
def dynamicAllocs = allocs.filter{case TP(s,d) => isDynamicAllocation(s) }
def staticAllocs = allocs.filter{case TP(s,d) => !isDynamicAllocation(s) }
def allocDeps = allocs.flatMap{case TP(s,d) => d.inputs }.toSet
def deps = allocDeps ++ nodes.flatMap{case TP(s,d) => d.inputs }.toSet
def dump(i: Int): Unit = {
if (isControl) dbgs(s"$i. Control Stage") else dbgs(s"$i. Primitive Stage")
dbgs("Allocations: ")
allocs.foreach{case TP(s,d) => dbgs(c" $s = $d [dynamic: ${isDynamicAllocation(d)}]")}
dbgs("Nodes: ")
nodes.foreach{case TP(s,d) => dbgs(c" $s = $d")}
dbgs("Register reads: ")
regReads.foreach{case TP(s,d) => dbgs(c" $s = $d")}
}
}
private object PipeStage { def empty(isControl: Boolean) = new PipeStage(isControl) }
private def regFromSym[T](s: Exp[T])(implicit ctx: SrcCtx): Exp[Reg[T]] = s.tp match {
case Bits(bits) =>
val init = unwrap(bits.zero)(s.tp)
implicit val mT: Type[T] = s.tp
implicit val bT: Bits[T] = bits.asInstanceOf[Bits[T]]
Reg.alloc[T](init)
case _ => throw new spatial.UndefinedZeroException(s, s.tp)
}
private def regWrite[T](reg: Exp[Reg[T]], s: Exp[T])(implicit ctx: SrcCtx): Exp[MUnit] = s.tp match {
case Bits(bits) =>
implicit val mT: Type[T] = s.tp
implicit val bT: Bits[T] = bits.asInstanceOf[Bits[T]]
Reg.write(reg, s, Bit.const(true))
case _ => throw new spatial.UndefinedZeroException(s, s.tp)
}
private def regRead[T](reg: Exp[Reg[T]])(implicit ctx: SrcCtx): Exp[T] = reg.tp.typeArguments.head match {
case tp@Bits(bits) =>
implicit val mT: Type[T] = mtyp(tp)
implicit val bT: Bits[T] = mbits(bits)
Reg.read(reg)
case _ => throw new spatial.UndefinedZeroException(reg, reg.tp.typeArguments.head)
}
private def varFromSym[T](s: Exp[T])(implicit ctx: SrcCtx): Exp[VarReg[T]] = {
implicit val mT: Type[T] = s.tp
VarReg.alloc[T](s.tp)
}
private def varWrite[T](varr: Exp[VarReg[T]], s: Exp[T])(implicit ctx: SrcCtx): Exp[MUnit] = {
implicit val mT: Type[T] = s.tp
VarReg.write(varr, s, Bit.const(true))
}
private def varRead[T](varr: Exp[VarReg[T]])(implicit ctx: SrcCtx): Exp[T] = {
implicit val tp: Type[T] = varr.tp.typeArguments.head.asInstanceOf[Type[T]]
VarReg.read(varr)
}
private def wrapBlock[T:Type](block: Block[T])(implicit ctx: SrcCtx): Exp[T] = inlineBlockWith(block, {stms =>
dbgs(s"Wrapping block with type ${typ[T]}")
val stages = ArrayBuffer[PipeStage]()
def curStage = stages.last
stages += PipeStage.empty(true)
stms foreach {case stm@TP(s,d) =>
dbgs(c"$s = $d [primitive:${isPrimitiveNode(s) || isInnerSwitch(s)}, regRead:${isRegisterRead(s)}, alloc:${isAllocation(s)}, primAlloc:${isPrimitiveAllocation(s)}]")
// Consider inner switches to be primitive nodes
if (isPrimitiveNode(s) || isInnerSwitch(s)) {
if (curStage.isControl) stages += PipeStage.empty(false)
curStage.nodes += stm
}
else if (isStateless(s) && !isAllocation(s)) {
if (!curStage.isControl) curStage.nodes += stm
curStage.regReads += stm
}
else if (isStateless(s) || isAllocation(s) || isGlobal(s)) {
if (isPrimitiveAllocation(s) && !curStage.isControl) curStage.nodes += stm
else curStage.allocs += stm
}
else {
stages += PipeStage.empty(true)
curStage.nodes += stm
}
}
val deps = stages.toList.map(_.deps)
stages.zipWithIndex.foreach{case (stage,i) => stage.dump(i) }
dbgs("")
stages.zipWithIndex.foreach{
case (stage,i) if !stage.isControl =>
val calculated = stage.nodes.map{case TP(s,d) => s}
val innerDeps = calculated ++ deps.take(i).flatten // Things in this Unit Pipe
val escaping = calculated.filter{sym => (sym == block.result || (sym.dependents diff innerDeps).nonEmpty) && !isRegisterRead(sym) }
val (escapingUnits, escapingValues) = escaping.partition{_.tp == UnitType}
val (escapingBits, escapingVars) = escapingValues.partition{sym => Bits.unapply(sym.tp).isDefined }
dbgs(c"Stage #$i: ")
dbgs(c" Escaping symbols: ")
escapingValues.foreach{e => dbgs(c" ${str(e)}: ${e.dependents diff innerDeps}")}
// Create registers for escaping primitive values
val regs = escapingBits.map{sym => regFromSym(sym) }
val vars = escapingVars.map{sym => varFromSym(sym) }
stage.staticAllocs.foreach(visitStm)
val pipe = Pipe.op_unit_pipe(enable.toList, () => {
isolateSubstScope { // We shouldn't be able to see any substitutions in here from the outside by default
stage.nodes.foreach(visitStm)
escapingBits.zip(regs).foreach { case (sym, reg) => regWrite(reg, f(sym)) }
escapingVars.zip(vars).foreach { case (sym, varr) => varWrite(varr, f(sym)) }
unit
}
})
levelOf(pipe) = InnerControl
styleOf(pipe) = SeqPipe
// Outside inserted pipe, replace original escaping values with register reads
escapingBits.zip(regs).foreach{case (sym,reg) => register(sym, regRead(reg)) }
escapingVars.zip(vars).foreach{case (sym,varr) => register(sym, varRead(varr)) }
// Add (possibly redundant/unused) register reads
stage.regReads.foreach(visitStm)
// Add allocations which are known not to be used in the primitive logic in the inserted unit pipe
stage.dynamicAllocs.foreach(visitStm)
dbgs(c" Created registers: $regs")
case (stage, i) if stage.isControl =>
stage.nodes.foreach(visitStm) // Zero or one control nodes
stage.staticAllocs.foreach(visitStm) // Allocations which cannot rely on reg reads (and occur AFTER nodes)
stage.regReads.foreach(visitStm) // Register reads
stage.dynamicAllocs.foreach(visitStm) // Allocations which can rely on reg reads
}
val result = typ[T] match {
case UnitType => unit
case _ => f(block.result)
}
result.asInstanceOf[Exp[T]]
})
var wrapBlocks: List[Boolean] = Nil
var ctx: Option[SrcCtx] = None
var inAccel = false
var controlStyle: Option[ControlStyle] = None
var controlLevel: Option[ControlLevel] = None
def inControl[T](lhs: Exp[_])(block: => T): T = {
val prevStyle = controlStyle
val prevLevel = controlLevel
controlStyle = styleOf.get(lhs)
controlLevel = levelOf.get(lhs)
val result = block
controlStyle = prevStyle
controlLevel = prevLevel
result
}
def withWrap[A](wrap: List[Boolean], srcCtx: SrcCtx)(x: => A) = {
val prevWrap = wrapBlocks
val prevCtx = ctx
wrapBlocks = wrap
ctx = Some(srcCtx)
val result = x
wrapBlocks = prevWrap
ctx = prevCtx
result
}
override protected def inlineBlock[T](b: Block[T]): Exp[T] = {
val doWrap = wrapBlocks.headOption.getOrElse(false)
if (wrapBlocks.nonEmpty) wrapBlocks = wrapBlocks.drop(1)
dbgs(c"Transforming Block $b [$wrapBlocks]")
if (doWrap) {
wrapBlock(b)(mtyp(b.tp),ctx.get)
}
else super.inlineBlock(b)
}
def wrapSwitchCase[T:Type](lhs: Exp[T], body: Block[T])(implicit ctx: SrcCtx): Exp[T] = transferMetadataIfNew(lhs){
Switches.op_case { () =>
val reg: Option[Exp[Reg[T]]] = typ[T] match {
case Bits(bT) => Some(regFromSym(body.result))
case _ => None // Nothing needed for escaping values
}
val pipe = Pipe.op_unit_pipe(enable.toList, () => {
wrapBlock(body)
reg match {
case Some(r) =>
val writePipe = Pipe.op_unit_pipe(enable.toList, () => { regWrite(r, f(body.result)) })
levelOf(writePipe) = InnerControl
styleOf(writePipe) = SeqPipe
unit
case None => unit
}
})
levelOf(pipe) = OuterControl
styleOf(pipe) = SeqPipe
reg match {
case Some(r) => regRead(r)
case _ => unit.asInstanceOf[Exp[T]]
}
}
}._1
override def transform[T:Type](lhs: Sym[T], rhs: Op[T])(implicit ctx: SrcCtx): Exp[T] = rhs match {
// Only insert Unit Pipes into bodies of switch cases in outer scope contexts
case Hwblock(body,isForever) => inControl(lhs) {
inAccel = true
val wrapEnables = if (isOuterControl(lhs)) List(true) else Nil
val lhs2 = withWrap(wrapEnables, ctx) { super.transform(lhs, rhs) }
inAccel = false
lhs2
}
// Add enables to unit pipes inserted inside of switches
case op@Switch(body,selects,cases) if isOuterControl(lhs) => inControl(lhs) {
val selects2 = f(selects)
val body2 = stageHotBlock {
selects2.zip(cases).foreach {
case (en, s: Sym[_]) => withEnable(en){ visitStm(stmOf(s)) }
case (en, c) => f(c)
}
f(body.result)
}
val cases2 = f(cases)
val lhs2 = Switches.op_switch(body2, selects2, cases2)
transferMetadata(lhs, lhs2)
lhs2
}
// Insert unit pipes in outer switch cases with multiple controllers
case op@SwitchCase(body) if isOuterControl(lhs) => inControl(lhs) {
val controllers = getControlNodes(body)
val primitives = getPrimitiveNodes(body)
if (controllers.length > 1 || (primitives.nonEmpty && controllers.nonEmpty)) {
wrapSwitchCase(lhs, body)(mtyp(op.mT),ctx)
}
else {
withWrap(List(true), ctx){ super.transform(lhs, rhs) }
}
}
// Only insert unit pipes in if-then-else statements if in Accel and in an outer controller
/*case op @ IfThenElse(cond,thenp,elsep) if inAccel && controlLevel.contains(OuterControl) =>
withWrap(List(true,true), ctx) { super.transform(lhs, rhs) }*/
case _:StateMachine[_] if isOuterControl(lhs) => inControl(lhs) {
withWrap(List(false, true, false), ctx) { super.transform(lhs, rhs) } // Wrap the second block only
}
case _ if isOuterControl(lhs) => inControl(lhs) {
withWrap(List(true), ctx) { super.transform(lhs, rhs) } // Mirror with wrapping enabled for the first block
}
case _ if isControlNode(lhs) => inControl(lhs) {
withWrap(Nil, ctx){ super.transform(lhs, rhs) }
}
case _ =>
withWrap(Nil, ctx){ super.transform(lhs, rhs) } // Disable wrapping at this level
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/transform/UnitPipeTransformer.scala | Scala | mit | 11,498 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack.Continue
import monix.reactive.Observable
import monix.execution.exceptions.DummyException
import monix.reactive.observers.Subscriber
import scala.concurrent.duration._
object DelayByTimespanSuite extends BaseOperatorSuite {
def createObservable(cnt: Int) = Some {
val sourceCount = 20
val source = Observable.range(0L, sourceCount.toLong)
val o = source.delayOnNext(1.second)
val c = sourceCount
Sample(o, c, (c * (c - 1) / 2).toLong, 1.second, 1.second)
}
def observableInError(sourceCount: Int, ex: Throwable) = Some {
val source = createObservableEndingInError(Observable.range(0L, sourceCount.toLong), ex)
val o = source.delayOnNext(1.second)
val c = sourceCount
Sample(o, c - 1, (c - 1) * (c - 2) / 2, 1.second, 1.second)
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) =
None
override def cancelableObservables() = {
val o = Observable.now(1L).delayOnNext(1.second)
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
test("works for empty observables triggering onComplete") { implicit s =>
val source: Observable[Long] = Observable.empty.delayOnNext(1.second)
var wasCompleted = 0
source.unsafeSubscribeFn(new Subscriber[Long] {
val scheduler = s
def onNext(elem: Long) = {
if (1 == 1) fail("onNext should not happen")
Continue
}
def onError(ex: Throwable): Unit =
fail("onError should not happen")
def onComplete(): Unit = wasCompleted += 1
})
assertEquals(wasCompleted, 1)
}
test("works for empty observables triggering onError") { implicit s =>
val dummy = DummyException("dummy")
val source: Observable[Long] = Observable.raiseError(dummy).delayOnNext(1.second)
var errorThrown: Throwable = null
source.unsafeSubscribeFn(new Subscriber[Long] {
val scheduler = s
def onNext(elem: Long) = {
if (1 == 1) fail("onNext should not happen")
Continue
}
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
fail("onComplete should not happen")
})
assertEquals(errorThrown, dummy)
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DelayByTimespanSuite.scala | Scala | apache-2.0 | 2,908 |
object typers {
class A(x: Int) {
val x: String = "a" // error: double def
{ val y: String = ""
val y: Int = 0 // error: double def
y
}
}
class B { self => // error: double def
def self: Int = 0
def self(x: Int): Int = x
}
class C {
val x: Int
val x: String // error: double def
val y: Int
def y: String // error: double def
val z: Int
def z(): String // error: double def
def f(x: Any) = () // OK!
def f(x: AnyRef): AnyRef
def g(x: Object): Unit
def g[T](x: T): T = x // OK!
}
type L[X] = scala.collection.immutable.List[X]
type M[X, Y] <: scala.collection.immutable.Map[X, Y] // error: only classes can have declared but undefined members
object hk {
def f(x: L) // error: missing type parameter
: M = // error: missing type parameter
??? : M // error: missing type parameter
}
object returns {
def foo(x: Int) = { // error: has return; needs result type
return 3
}
return 4 // error: return outside method definition
}
object cyclic {
def factorial(acc: Int, n: Int) =
if (n == 0) acc
else factorial(acc * n, n - 1) // error: cyclic reference
def foo(x: Int) = x // error: cyclic reference
def foo() = foo(1)
}
object tries {
val x = try {
"abc"
} catch {
case ex: String => // does not work yet. We should detect that the test is non-sensical, but don't.
123
}
}
}
| reactormonk/dotty | tests/neg/typers.scala | Scala | bsd-3-clause | 1,552 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.matfast
import org.apache.spark.sql.matfast.execution.MatfastPlanner
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, FileSourceStrategy}
import org.apache.spark.sql.{execution => sparkexecution, _}
import org.apache.spark.sql.internal.SessionState
import org.apache.spark.sql.matfast.plans.{MatfastOptimizer}
import scala.collection.immutable
private[matfast] class MatfastSessionState (matfastSession: MatfastSession)
extends SessionState(matfastSession) {
self =>
protected[matfast] lazy val matfastConf = new MatfastConf
protected[matfast] def getSQLOptimizer = optimizer
protected[matfast] lazy val matfastOptimizer: MatfastOptimizer = new MatfastOptimizer
/**
* Planner that takes into account matrix opt strategies.
*/
protected[matfast] val matfastPlanner: sparkexecution.SparkPlanner = {
new MatfastPlanner(matfastSession, conf, experimentalMethods.extraStrategies)
}
override def executePlan(plan: LogicalPlan) =
new execution.QueryExecution(matfastSession, plan)
def setConf(key: String, value: String): Unit = {
if (key.startsWith("matfast.")) matfastConf.setConfString(key, value)
else conf.setConfString(key, value)
}
def getConf(key: String): String = {
if (key.startsWith("matfast.")) matfastConf.getConfString(key)
else conf.getConfString(key)
}
def getConf(key: String, defaultValue: String): String = {
if (key.startsWith("matfast.")) conf.getConfString(key, defaultValue)
else conf.getConfString(key, defaultValue)
}
def getAllConfs: immutable.Map[String, String] = {
conf.getAllConfs ++ matfastConf.getAllConfs
}
}
| yuyongyang800/SparkDistributedMatrix | src/main/scala/org/apache/spark/sql/matfast/MatfastSessionState.scala | Scala | apache-2.0 | 2,546 |
import scala.quoted._
import scala.quoted.staging._
import scala.quoted.autolift
object Macros {
inline def assert(expr: => Boolean): Unit =
${ assertImpl('expr) }
def assertImpl(expr: Expr[Boolean])(using QuoteContext) =
'{ if !($expr) then throw new AssertionError(s"failed assertion: ${${showExpr(expr)}}") }
def showExpr[T](expr: Expr[T])(using QuoteContext): Expr[String] = expr.toString
inline def power(inline n: Int, x: Double) = ${ powerCode('n, 'x) }
def powerCode(n: Expr[Int], x: Expr[Double]) (using QuoteContext): Expr[Double] =
powerCode(n.unliftOrError, x)
def powerCode(n: Int, x: Expr[Double])(using QuoteContext): Expr[Double] =
if (n == 0) '{1.0}
else if (n == 1) x
else if (n % 2 == 0) '{ { val y = $x * $x; ${ powerCode(n / 2, 'y) } } }
else '{ $x * ${ powerCode(n - 1, x) } }
}
class Test {
given Toolbox = Toolbox.make(getClass.getClassLoader)
run {
val program = '{
import Macros._
val x = 1
assert(x != 0)
${ assertImpl('{x != 0}) }
val y = math.sqrt(2.0)
power(3, y)
${ powerCode(3, '{math.sqrt(2.0)}) }
}
program
}
}
| som-snytt/dotty | tests/pos-staging/quote-0.scala | Scala | apache-2.0 | 1,163 |
package net.akmorrow13.endive.featurizers
import net.akmorrow13.endive.EndiveFunSuite
class MotifSuite extends EndiveFunSuite {
// training data of region and labels
var motifPath = resourcePath("models.yaml")
test("should read pwms from yaml file") {
val motifs = Motif.parseYamlMotifs(motifPath)
assert(motifs.length == 3)
val first = motifs.head.pwm
assert(first(0) == 0.19882676005363464 && first(4) == 0.1623602658510208)
}
}
| akmorrow13/endive | src/test/scala/net/akmorrow13/endive/featurizers/MotifSuite.scala | Scala | apache-2.0 | 460 |
package stream
import stream.Fns._
object PureStreams {
trait IntStream {
def empty : Boolean
def current : Int
def next : IntStream
def currentValid : Boolean
}
class IntFilterStream(stream : IntStream, pred : IntBoolFn) extends IntStream {
def next = new IntFilterStream(stream.next, pred)
def current = stream.current
def currentValid = pred(current) && stream.currentValid
def empty = stream.empty
}
class IntMapStream(stream : IntStream, fn : IntIntFn) extends IntStream {
def next = new IntMapStream(stream.next, fn)
def current = fn(stream.current)
def currentValid = stream.currentValid
def empty = stream.empty
}
class IntArrayStream(a : Array[Int], var index : Int, endIndex : Int) extends IntStream {
def next = new IntArrayStream(a, index + 1, endIndex)
def current = a(index)
def currentValid = true
def empty = index >= endIndex
}
def fold(s : IntStream, fn : IntIntIntFn, v : Int) = {
var r = v
var s2 = s
while (!s2.empty) {
if (s2.currentValid)
r = fn(r, s2.current)
s2 = s2.next
}
r
}
def sum(s : IntStream) : Int = {
fold(s, new IntIntIntFn { def apply(a1 : Int, a2 : Int) = a1 + a2 }, 0)
}
def sum(a : Array[Int]) : Int = {
sum(new IntArrayStream(a, 0, a.length))
}
def filterSum(a : Array[Int]) = {
val s = new IntFilterStream(new IntArrayStream(a, 0, a.length), new IntBoolFn { def apply(a : Int) = (a % 37) == 0 })
sum(s)
}
def mapFilterSum(a : Array[Int]) = {
val s = new IntMapStream(new IntFilterStream(new IntArrayStream(a, 0, a.length), new IntBoolFn { def apply(a : Int) = (a % 37) == 0 }), new IntIntFn { def apply(a : Int) = a * 3 + 7 })
sum(s)
}
}
| svn2github/metascala | src/test/stream/PureIntStream.scala | Scala | bsd-3-clause | 1,758 |
package uk.org.nbn.nbnv.importer.ingestion
import org.mockito.Mockito._
import org.mockito.Matchers._
import javax.persistence.EntityManager
import uk.org.nbn.nbnv.jpa.nbncore.{Dataset, TaxonDataset}
import uk.org.nbn.nbnv.importer.metadata.Metadata
import uk.org.nbn.nbnv.importer.testing.BaseFunSuite
import uk.org.nbn.nbnv.importer.data.{Database, Repository, KeyGenerator}
import org.mockito.Mockito
import org.apache.log4j.Logger
class DatasetIngesterSuite extends BaseFunSuite {
ignore("an existing dataset should be updated") {
// arrange
val key = "existing-dataset-key"
val metadata = mock[Metadata]
when(metadata.datasetKey) thenReturn key
val dataset = mock[Dataset]
val taxonDataset = mock[TaxonDataset]
when(taxonDataset.getDataset).thenReturn(dataset)
val em = mock[EntityManager]
when(em.find(classOf[TaxonDataset], key)).thenReturn(taxonDataset)
val keyGenerator = mock[KeyGenerator]
val db = mock[Database]
// act
val ingester = new DatasetIngester(mock[Logger], db, keyGenerator)
val result = ingester.upsertDataset(metadata)
// assert - that the entity manager was not called with the retrieved dataset
verify(em, never()).persist(dataset)
// assert - that a property was set
result.getDatasetKey should be (metadata.datasetKey)
}
ignore("a new dataset should be inserted") {
// arrange
val key = "new-dataset-key"
val metadata = mock[Metadata]
when(metadata.datasetKey) thenReturn key
val em = mock[EntityManager]
when(em.find(classOf[TaxonDataset], key)).thenReturn(null)
val dataset = mock[Dataset]
when(em.merge(any(classOf[Dataset]))).thenReturn(dataset)
val keyGenerator = mock[KeyGenerator]
val db = mock[Database]
// act
val ingester = new DatasetIngester(mock[Logger], db, keyGenerator)
val taxonDataset = ingester.upsertDataset(metadata)
//verify that setDataset is called against the new taxondataset enity with a dataset
// check that the taxondataset has got a dataset on it
//verify that em.persist is called with a new taxon dataset.
// assert - that the entity manager was called with a dataset
verify(em).persist(any(classOf[TaxonDataset])) // would be better to verify that it's called with some dataset with key=key
}
}
| JNCC-dev-team/nbn-importer | importer/src/test/scala/uk/org/nbn/nbnv/importer/ingestion/DatasetIngesterSuite.scala | Scala | apache-2.0 | 2,400 |
import leon.annotation.inline
import leon.lang._
object Array8 {
def bar(): Boolean = {
val a = foo()
a.length == 5
}.holds
@inline
def foo(): Array[Int] = {
Array.fill(5)(0)
} ensuring { res => res(0) == 0 }
}
| epfl-lara/leon | src/test/resources/regression/verification/xlang/valid/Array8.scala | Scala | gpl-3.0 | 237 |
/*
* Copyright 2016 Combined Conditional Access Development, LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ccadllc.cedi.dtrace
package logging
import cats.effect.{ IO, Sync }
import io.circe._
import io.circe.syntax._
import org.scalacheck.Arbitrary
import org.scalatest.Suite
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import shapeless.Lazy
trait TestSupport extends AnyWordSpecLike with Matchers with ScalaCheckPropertyChecks with TraceGenerators with TestData {
self: Suite =>
override def testEmitter[F[_]: Sync]: F[TraceSystem.Emitter[F]] = Sync[F].pure(LogEmitter.apply)
val salesManagementSystem = TraceSystem(testSystemData, testEmitter[IO].unsafeRunSync, TraceSystem.realTimeTimer[IO])
val calculateQuarterlySalesTraceContext = TraceContext(quarterlySalesCalculationSpan, true, salesManagementSystem)
def encodeGeneratedJson[A: Arbitrary](implicit encoder: Lazy[Encoder[A]]): Unit = {
implicit val e = encoder.value
"encode arbitrary instances to JSON" in {
forAll { (msg: A) => msg.asJson.noSpaces should not be (None) }
}
}
def encodeSpecificJson[A](a: A, json: Json)(implicit encoder: Lazy[Encoder[A]]): Unit = {
implicit val e = encoder.value
"encode specific instance to JSON and ensure it matches expected" in { a.asJson shouldBe json }
}
}
| ccadllc/cedi-dtrace | logging/jvm/src/test/scala/com/ccadllc/cedi/dtrace/logging/TestSupport.scala | Scala | apache-2.0 | 1,952 |
/*
* Copyright 2012 Alexander Bertram
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fhwedel.antscout
package snippet
import net.liftweb.http.NamedCometActorSnippet
/**
* Fügt einen UserInterfaceCometActor zum System hinzu.
*/
class AddUserInterfaceCometActor extends NamedCometActorSnippet {
/**
* Comet-Klasse.
*
* @return Comet-Klasse
*/
def cometClass = "UserInterface"
/**
* Aktor-Name.
*
* @return Aktor-Name
*/
def name = "userInterface"
}
| abertram/AntScout | src/main/scala/de.fhwedel.antscout/snippet/AddUserInterfaceCometActor.scala | Scala | apache-2.0 | 1,010 |
package edu.gemini.spModel.ags
import java.util.Comparator
/**
*
*/
sealed trait AgsStrategyKey {
def id: String
def displayName: String = id.replaceAll("_", " ")
}
object AgsStrategyKey {
case object AltairAowfsKey extends AgsStrategyKey {
val id = "ALTAIR"
override def displayName = "Altair"
}
case object Flamingos2OiwfsKey extends AgsStrategyKey {
val id = "F2_OIWFS"
}
case object GemsKey extends AgsStrategyKey {
val id = "GEMS" // TODO: was GemsAgs in GemsAgsStrategy. Does this matter?
override def displayName = "GeMS AGS"
}
case object GmosNorthOiwfsKey extends AgsStrategyKey {
val id = "GMOS-N_OIWFS"
}
case object GmosSouthOiwfsKey extends AgsStrategyKey {
val id = "GMOS-S_OIWFS"
}
case object GnirsOiwfsKey extends AgsStrategyKey {
val id = "GNIRS_OIWFS"
}
case object GpiOiwfsKey extends AgsStrategyKey {
val id = "GPI_OIWFS"
}
case object NiciOiwfsKey extends AgsStrategyKey {
val id = "NICI_OIWFS"
}
case object NifsOiwfsKey extends AgsStrategyKey {
val id = "NIFS_OIWFS"
}
case object NiriOiwfsKey extends AgsStrategyKey {
val id = "NIRI_OIWFS"
}
case object Pwfs1NorthKey extends AgsStrategyKey {
val id = "GN_PWFS1"
}
case object Pwfs2NorthKey extends AgsStrategyKey {
val id = "GN_PWFS2"
}
case object Pwfs1SouthKey extends AgsStrategyKey {
val id = "GS_PWFS1"
}
case object Pwfs2SouthKey extends AgsStrategyKey {
val id = "GS_PWFS2"
}
val All = List(
AltairAowfsKey,
Flamingos2OiwfsKey,
GemsKey,
GmosNorthOiwfsKey,
GmosSouthOiwfsKey,
GnirsOiwfsKey,
NiciOiwfsKey,
NifsOiwfsKey,
NiriOiwfsKey,
Pwfs1NorthKey,
Pwfs2NorthKey,
Pwfs1SouthKey,
Pwfs2SouthKey
)
val AllMap = All.map(k => k.id -> k).toMap
def fromString(s: String): Option[AgsStrategyKey] = AllMap.get(s)
def fromStringOrNull(s: String): AgsStrategyKey = fromString(s).orNull
// For use from Java.
object DisplayNameComparator extends Comparator[AgsStrategyKey] {
override def compare(k1: AgsStrategyKey, k2: AgsStrategyKey): Int =
k1.displayName.compareTo(k2.displayName)
}
}
| arturog8m/ocs | bundle/edu.gemini.pot/src/main/scala/edu/gemini/spModel/ags/AgsStrategyKey.scala | Scala | bsd-3-clause | 2,184 |
package sbt
import java.io.PrintWriter
object MainLogging
{
def multiLogger(config: MultiLoggerConfig): Logger =
{
import config._
val multi = new MultiLogger(console :: backed :: extra)
// sets multi to the most verbose for clients that inspect the current level
multi setLevel Level.unionAll(backingLevel :: screenLevel :: extra.map(_.getLevel))
// set the specific levels
console setLevel screenLevel
backed setLevel backingLevel
console setTrace screenTrace
backed setTrace backingTrace
multi: Logger
}
def globalDefault(writer: PrintWriter, backing: GlobalLogBacking): GlobalLogging =
globalDefault(writer, backing, ConsoleLogger.systemOut)
def globalDefault(writer: PrintWriter, backing: GlobalLogBacking, console: ConsoleOut): GlobalLogging =
{
val backed = defaultBacked()(writer)
val full = multiLogger(defaultMultiConfig(console, backed ) )
GlobalLogging(full, backed, backing)
}
@deprecated("Explicitly specify the console output.", "0.13.0")
def defaultMultiConfig(backing: AbstractLogger): MultiLoggerConfig =
defaultMultiConfig(ConsoleLogger.systemOut, backing)
def defaultMultiConfig(console: ConsoleOut, backing: AbstractLogger): MultiLoggerConfig =
new MultiLoggerConfig(defaultScreen(console, ConsoleLogger.noSuppressedMessage), backing, Nil, Level.Info, Level.Debug, -1, Int.MaxValue)
@deprecated("Explicitly specify the console output.", "0.13.0")
def defaultScreen(): AbstractLogger = ConsoleLogger()
@deprecated("Explicitly specify the console output.", "0.13.0")
def defaultScreen(suppressedMessage: SuppressedTraceContext => Option[String]): AbstractLogger = ConsoleLogger(suppressedMessage = suppressedMessage)
def defaultScreen(console: ConsoleOut): AbstractLogger = ConsoleLogger(console)
def defaultScreen(console: ConsoleOut, suppressedMessage: SuppressedTraceContext => Option[String]): AbstractLogger =
ConsoleLogger(console, suppressedMessage = suppressedMessage)
def defaultBacked(useColor: Boolean = ConsoleLogger.formatEnabled): PrintWriter => ConsoleLogger =
to => ConsoleLogger(ConsoleLogger.printWriterOut(to), useColor = useColor)
}
final case class MultiLoggerConfig(console: AbstractLogger, backed: AbstractLogger, extra: List[AbstractLogger],
screenLevel: Level.Value, backingLevel: Level.Value, screenTrace: Int, backingTrace: Int) | harrah/xsbt | util/log/src/main/scala/sbt/MainLogging.scala | Scala | bsd-3-clause | 2,341 |
object A {
def x(i: Int) = 3
}
| xeno-by/old-scalameta-sbt | sbt/src/sbt-test/actions/compile/A.scala | Scala | bsd-3-clause | 32 |
package ch.ethz.dalab.dissolve.optimization
import java.io.File
import java.io.PrintWriter
import breeze.linalg._
import breeze.linalg.DenseVector
import breeze.linalg.Vector
import breeze.linalg.csvwrite
import breeze.numerics._
import ch.ethz.dalab.dissolve.classification.StructSVMModel
import ch.ethz.dalab.dissolve.regression.LabeledObject
/**
* Train a structured SVM using standard Stochastic (Sub)Gradient Descent (SGD).
* The implementation here is single machine, not distributed.
*
* Input:
* Each data point (x_i, y_i) is composed of:
* x_i, the data example
* y_i, the label
*
* @param <X> type for the data examples
* @param <Y> type for the labels of each example
*/
class SSGSolver[X, Y](
val data: Seq[LabeledObject[X, Y]],
val dissolveFunctions: DissolveFunctions[X, Y],
val solverOptions: SolverOptions[X, Y]) {
val roundLimit = solverOptions.roundLimit
val lambda = solverOptions.lambda
val debugOn: Boolean = solverOptions.debug
val gamma0 = solverOptions.ssg_gamma0
val maxOracle = dissolveFunctions.oracleFn _
val phi = dissolveFunctions.featureFn _
val lossFn = dissolveFunctions.lossFn _
val cWeight = dissolveFunctions.classWeights _
// Number of dimensions of \\phi(x, y)
val ndims: Int = phi(data(0).pattern, data(0).label).size
// Filenames
val lossWriterFileName = "data/debug/ssg-loss.csv"
/**
* SSG optimizer
*/
def optimize(): StructSVMModel[X, Y] = {
var k: Integer = 0
val n: Int = data.length
val d: Int = phi(data(0).pattern, data(0).label).size
// Use first example to determine dimension of w
val model: StructSVMModel[X, Y] = new StructSVMModel(DenseVector.zeros(phi(data(0).pattern, data(0).label).size),
0.0,
DenseVector.zeros(ndims),
dissolveFunctions)
// Initialization in case of Weighted Averaging
var wAvg: DenseVector[Double] =
if (solverOptions.doWeightedAveraging)
DenseVector.zeros(d)
else null
var debugIter = if (solverOptions.debugMultiplier == 0) {
solverOptions.debugMultiplier = 100
n
} else {
1
}
val debugModel: StructSVMModel[X, Y] = new StructSVMModel(DenseVector.zeros(d), 0.0, DenseVector.zeros(ndims), dissolveFunctions)
val lossWriter = if (solverOptions.debug) new PrintWriter(new File(lossWriterFileName)) else null
if (solverOptions.debug) {
if (solverOptions.testData != null)
lossWriter.write("pass_num,iter,primal,dual,duality_gap,train_error,test_error\\n")
else
lossWriter.write("pass_num,iter,primal,dual,duality_gap,train_error\\n")
}
if (debugOn) {
println("Beginning training of %d data points in %d passes with lambda=%f".format(n, roundLimit, lambda))
}
for (passNum <- 0 until roundLimit) {
if (debugOn)
println("Starting pass #%d".format(passNum))
for (dummy <- 0 until n) {
// 1) Pick example
val i: Int = dummy
val pattern: X = data(i).pattern
val label: Y = data(i).label
// 2) Solve loss-augmented inference for point i
val ystar_i: Y = maxOracle(model, pattern, label)
// 3) Get the subgradient
val psi_i: Vector[Double] = (phi(pattern, label) - phi(pattern, ystar_i))*cWeight(label)
val w_s: Vector[Double] = psi_i :* (1 / (n * lambda))
if (debugOn && dummy == (n - 1))
csvwrite(new File("data/debug/scala-w-%d.csv".format(passNum + 1)), w_s.toDenseVector.toDenseMatrix)
// 4) Step size gamma
val gamma: Double = 1.0 / (gamma0*(k + 1.0))
// 5) Update the weights of the model
val newWeights: Vector[Double] = (model.getWeights() :* (1 - gamma)) + (w_s :* (gamma * n))
model.setWeights(newWeights)
k = k + 1
if (solverOptions.doWeightedAveraging) {
val rho: Double = 2.0 / (k + 2.0)
wAvg = wAvg * (1.0 - rho) + model.getWeights() * rho
}
if (debugOn && k >= debugIter) {
if (solverOptions.doWeightedAveraging) {
debugModel.setWeights(wAvg)
} else {
debugModel.setWeights(model.getWeights)
}
val primal = SolverUtils.primalObjective(data, dissolveFunctions, debugModel, lambda)
val trainError = SolverUtils.averageLoss(data, dissolveFunctions, debugModel)._1
if (solverOptions.testData != null) {
val testError =
if (solverOptions.testData.isDefined)
SolverUtils.averageLoss(solverOptions.testData.get, dissolveFunctions, debugModel)._1
else
0.00
println("Pass %d Iteration %d, SVM primal = %f, Train error = %f, Test error = %f"
.format(passNum + 1, k, primal, trainError, testError))
if (solverOptions.debug)
lossWriter.write("%d,%d,%f,%f,%f\\n".format(passNum + 1, k, primal, trainError, testError))
} else {
println("Pass %d Iteration %d, SVM primal = %f, Train error = %f"
.format(passNum + 1, k, primal, trainError))
if (solverOptions.debug)
lossWriter.write("%d,%d,%f,%f,\\n".format(passNum + 1, k, primal, trainError))
}
debugIter = min(debugIter + n, ceil(debugIter * (1 + solverOptions.debugMultiplier / 100)))
}
}
if (debugOn)
println("Completed pass #%d".format(passNum))
}
return model
}
} | dalab/dissolve-struct | dissolve-struct-lib/src/main/scala/ch/ethz/dalab/dissolve/optimization/SSGSolver.scala | Scala | apache-2.0 | 5,467 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.