code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.twitter.finagle.memcached.unit
import com.twitter.finagle.memcached.MockClient
import com.twitter.finagle.memcached.protocol.ClientError
import com.twitter.util.Await
import org.specs.SpecificationWithJUnit
class MockClientSpec extends SpecificationWithJUnit {
"MockClient" should {
"get" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.get("key")) must beSome("value")
Await.result(memcache.get("unknown")) must be_==(None)
}
"set" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.set("key", "new value")) must be_==(())
Await.result(memcache.get("key")) must beSome("new value")
Await.result(memcache.set("key2", "value2")) must be_==(())
Await.result(memcache.get("key2")) must beSome("value2")
Await.result(memcache.set("key2", "value3")) must be_==(())
Await.result(memcache.get("key2")) must beSome("value3")
}
"add" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.add("key", "new value")) must beFalse
Await.result(memcache.get("key")) must beSome("value")
Await.result(memcache.add("key2", "value2")) must beTrue
Await.result(memcache.get("key2")) must beSome("value2")
Await.result(memcache.add("key2", "value3")) must beFalse
Await.result(memcache.get("key2")) must beSome("value2")
}
"append" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.append("key", "More")) must beTrue
Await.result(memcache.get("key")) must beSome("valueMore")
Await.result(memcache.append("unknown", "value")) must beFalse
Await.result(memcache.get("unknown")) must beNone
}
"prepend" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.prepend("key", "More")) must beTrue
Await.result(memcache.get("key")) must beSome("Morevalue")
Await.result(memcache.prepend("unknown", "value")) must beFalse
Await.result(memcache.get("unknown")) must beNone
}
"replace" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.replace("key", "new value")) must beTrue
Await.result(memcache.get("key")) must beSome("new value")
Await.result(memcache.replace("unknown", "value")) must beFalse
Await.result(memcache.get("unknown")) must beNone
}
"delete" in {
val memcache = new MockClient(Map("key" -> "value")).withStrings
Await.result(memcache.delete("key")) must beTrue
Await.result(memcache.get("key")) must beNone
Await.result(memcache.delete("unknown")) must beFalse
Await.result(memcache.get("unknown")) must beNone
}
"incr" in {
val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings
Await.result(memcache.incr("key")) must throwA[ClientError]
Await.result(memcache.get("key")) must beSome("value")
Await.result(memcache.incr("count")) must beSome(2)
Await.result(memcache.get("count")) must beSome("2")
Await.result(memcache.incr("unknown")) must beNone
Await.result(memcache.get("unknown")) must beNone
}
"decr" in {
val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings
Await.result(memcache.decr("key")) must throwA[ClientError]
Await.result(memcache.get("key")) must beSome("value")
Await.result(memcache.decr("count")) must beSome(0)
Await.result(memcache.get("count")) must beSome("0")
Await.result(memcache.decr("count")) must beSome(0)
Await.result(memcache.get("count")) must beSome("0")
Await.result(memcache.decr("unknown")) must beNone
Await.result(memcache.get("unknown")) must beNone
}
}
}
| firebase/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/MockClientSpec.scala | Scala | apache-2.0 | 4,443 |
package net.yefremov.sleipnirsample.wireformat
import com.linkedin.data.DataMap
import com.linkedin.data.template.{DataTemplateUtil, JacksonDataTemplateCodec, RecordTemplate}
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.iteratee.{Done, Iteratee, Traversable}
import play.api.mvc.Results._
import play.api.mvc.{BodyParser, BodyParsers, RequestHeader, Result}
import scala.reflect.ClassTag
/**
* A body parser for instances of [[RecordTemplate]].
*/
class RecordTemplateParser[A <: RecordTemplate : ClassTag](maxBodySize: Int = BodyParsers.parse.DEFAULT_MAX_TEXT_LENGTH) extends BodyParser[A] {
private def toRecord(dataMap: DataMap): A = {
val clazz = implicitly[ClassTag[A]].runtimeClass.asInstanceOf[Class[A]]
DataTemplateUtil.wrap(dataMap, clazz)
}
def apply(requestHeader: RequestHeader): Iteratee[Array[Byte], Either[Result, A]] = {
Traversable.takeUpTo[Array[Byte]](maxBodySize)
.transform(Iteratee.consume[Array[Byte]]().map(RecordTemplateParser.codec.bytesToMap _ andThen toRecord))
.flatMap(Iteratee.eofOrElse(EntityTooLarge))
.flatMap {
case data@Left(_) => Done(data)
case data@Right(_) => Done(data)
}
}
}
object RecordTemplateParser {
private val codec = new JacksonDataTemplateCodec()
}
| dmitriy-yefremov/sleipnir-sample | app/net/yefremov/sleipnirsample/wireformat/RecordTemplateParser.scala | Scala | apache-2.0 | 1,303 |
package scalapb.proptest
import scala.tools.nsc._
import java.io.File
object CompilerInterface {
def compile(scalaFiles: Seq[File], classPath: Seq[String], outDir: File): Unit = {
val s = new Settings(error => throw new RuntimeException(error))
val breakCycles: Seq[String] = Seq("-Ybreak-cycles")
s.processArgumentString(
s"""-cp "${classPath.mkString(":")}" ${breakCycles.mkString(" ")} -d "$outDir""""
)
val g = new Global(s)
val run = new g.Run
run.compile(scalaFiles.map(_.toString).toList)
}
}
| trueaccord/ScalaPB | proptest/src/test/scala-2/scalapb/proptest/CompilerInterface.scala | Scala | apache-2.0 | 570 |
package akka.ainterface.test.arbitrary
import akka.io.Tcp.{Aborted, Closed, ConfirmedClosed, ConnectionClosed, ErrorClosed, PeerClosed}
import org.scalacheck.{Arbitrary, Gen}
import scodec.bits.BitVector
trait BitVectorArbitrary {
implicit val arbBitVector: Arbitrary[BitVector] = Arbitrary {
for {
bytes <- Arbitrary.arbitrary[Array[Byte]]
len <- Gen.posNum[Int]
} yield BitVector(bytes).take(len)
}
implicit val arbConnectionClosed: Arbitrary[ConnectionClosed] = Arbitrary {
Gen.oneOf(Closed, Aborted, ConfirmedClosed, PeerClosed, ErrorClosed("error"))
}
}
| ainterface/ainterface | ainterface/src/test/scala/akka/ainterface/test/arbitrary/BitVectorArbitrary.scala | Scala | apache-2.0 | 594 |
package ohnosequences.nispero.worker
import ohnosequences.nispero._
import ohnosequences.awstools.sqs.Message
import ohnosequences.awstools.sns.Topic
import ohnosequences.awstools.sqs.Queue
import org.clapper.avsl.Logger
import java.io.File
import ohnosequences.nispero.utils.{JSON, Utils}
import scala.concurrent.Future
class InstructionsExecutor(config: Config, instructions: Instructions, val awsClients: AWSClients) {
val MESSAGE_TIMEOUT = 5000
import awsClients._
val logger = Logger(this.getClass)
val instance = ec2.getCurrentInstance
@volatile var stopped = false
def waitForTask(queue: Queue): Message = {
var message: Option[Message] = queue.receiveMessage
while(message.isEmpty) {
logger.info("InstructionsExecutor wait for task")
instance.foreach(_.createTag(InstanceTags.IDLE))
Thread.sleep(MESSAGE_TIMEOUT)
message = queue.receiveMessage
}
message.get
}
def waitForResult(futureResult: Future[TaskResult], message: Message): (TaskResult, Int) = {
val startTime = System.currentTimeMillis()
val step = 500
def timeSpent(): Int = {
val currentTime = System.currentTimeMillis()
((currentTime - startTime) / 1000).toInt
}
var stopWaiting = false
var it = 1
var taskResult: TaskResult = Failure("internal error during waiting for task result")
while(!stopWaiting) {
it += 1;
if(timeSpent() > config.taskProcessTimeout) {
stopWaiting = true
taskResult = Failure("timeout: " + timeSpent + " > visibilityTimeoutLimit")
terminate()
} else {
futureResult.value match {
case None => {
logger.info("solving task: " + Utils.printInterval(timeSpent()))
if(it % 60==0) {
message.changeVisibilityTimeout((step / 1000) * 2)
}
Thread.sleep(step)
}
case Some(scala.util.Success(r)) => stopWaiting = true; taskResult = r
case Some(scala.util.Failure(t)) => stopWaiting = true; taskResult = Failure("future error: " + t.getMessage)
}
}
}
(taskResult, timeSpent())
}
def terminate() {
stopped = true
instance.foreach(_.createTag(InstanceTags.FINISHING))
logger.info("terminating")
instance.foreach(_.terminate())
}
def run() {
logger.info("InstructionsExecutor started at " + instance.map(_.getInstanceId))
val inputQueue = sqs.getQueueByName(config.resources.inputQueue).get
val outputTopic = sns.createTopic(config.resources.outputTopic)
val errorTopic = sns.createTopic(config.resources.errorTopic)
while(!stopped) {
var taskId = ""
var lastTimeSpent = 0
try {
val message = waitForTask(inputQueue)
instance.foreach(_.createTag(InstanceTags.PROCESSING))
logger.info("InstructionsExecutor: received message " + message)
val task = JSON.parse[Task](message.body)
taskId = task.id
logger.info("InstructionsExecutor processing message")
instructions.execute(s3, task, new File(config.workersDir))
import scala.concurrent.ExecutionContext.Implicits._
val futureResult = scala.concurrent.future {
instructions.execute(s3, task, new File(config.workersDir))
}
val (taskResult, timeSpent) = waitForResult(futureResult, message)
lastTimeSpent = timeSpent
logger.info("task result: " + taskResult)
val taskResultDescription = TaskResultDescription(
id = task.id,
message = taskResult.message,
instanceId = instance.map(_.getInstanceId()),
time = timeSpent
)
logger.info("publishing result to topic")
taskResult match {
case Success(msg) => {
outputTopic.publish(JSON.toJson(taskResultDescription.copy(message = msg)))
logger.info("InstructionsExecutor deleting message with from input queue")
inputQueue.deleteMessage(message)
}
case Failure(msg) => {
errorTopic.publish(JSON.toJson(taskResultDescription.copy(message = msg)))
}
}
} catch {
case e: Throwable => {
logger.error("fatal error instance will terminated")
e.printStackTrace()
val taskResultDescription = TaskResultDescription(
id = taskId,
message = e.getMessage,
instanceId = instance.map(_.getInstanceId()),
time = lastTimeSpent
)
errorTopic.publish(JSON.toJson(taskResultDescription))
terminate()
}
}
}
}
}
| ohnosequences/nispero | nispero-abstract/src/main/scala/ohnosequences/nispero/worker/InstructionsExecutor.scala | Scala | agpl-3.0 | 4,670 |
/*
* Copyright (c) 2014-2019 Israel Herraiz <isra@herraiz.org>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// ---------------------
// Code for example 3.15
// ---------------------
package chap03
object Ex15 {
def concatenate[A](ls: List[List[A]]): List[A] = {
// Can also be solved with foldRight (just change the name)
Ex10.foldLeft(ls, Nil: List[A]) {
(x,y) => Ex14.append(x,y)
}
}
}
| iht/fpinscala | src/main/scala/chap03/ex15.scala | Scala | mit | 1,458 |
package models.elasticsearch
import models.analysis.ActorStamp
import no.uio.musit.formatters.WithDateTimeFormatters
import no.uio.musit.models.ActorId
import org.joda.time.DateTime
import play.api.libs.json.{Json, Writes}
object Actors extends WithDateTimeFormatters {
case class ActorSearchStamp(
id: ActorId,
date: DateTime,
name: Option[String]
)
object ActorSearchStamp {
implicit val writes: Writes[ActorSearchStamp] = Json.writes[ActorSearchStamp]
def apply(
idOpt: Option[ActorId],
dateOpt: Option[DateTime],
actorNames: ActorNames
): Option[ActorSearchStamp] =
for {
id <- idOpt
date <- dateOpt
} yield ActorSearchStamp(id, date, actorNames.nameFor(id))
def apply(as: ActorStamp, actorNames: ActorNames): ActorSearchStamp =
ActorSearchStamp(as.user, as.date, actorNames.nameFor(as.user))
}
case class ActorSearch(
id: ActorId,
name: Option[String]
)
object ActorSearch {
implicit val writes: Writes[ActorSearch] = Json.writes[ActorSearch]
}
}
| MUSIT-Norway/musit | service_backend/app/models/elasticsearch/Actors.scala | Scala | gpl-2.0 | 1,086 |
package com.datawizards.sparklocal.rdd
import com.datawizards.sparklocal.SparkLocalBaseTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AggregateTest extends SparkLocalBaseTest {
val data = Seq(("a",1),("b",2),("c",3))
test("Aggregate result") {
assert(RDDAPI(data).aggregate(1)({case (a,p) => a * p._2}, _ * _) == 6)
}
test("Aggregate equal") {
assertRDDOperationReturnsSameResult(data){
rdd => rdd.aggregate(1)({case (a,p) => a * p._2}, _ * _)
}
}
} | piotr-kalanski/spark-local | src/test/scala/com/datawizards/sparklocal/rdd/AggregateTest.scala | Scala | apache-2.0 | 551 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.connector.mongodb
import java.util.regex.Pattern
import com.mongodb.casbah.Imports._
import com.mongodb.DBObject
import com.mongodb.QueryBuilder
import com.stratio.datasource.Config
import com.stratio.datasource.mongodb.MongodbConfig
import com.stratio.datasource.mongodb.schema.MongodbRowConverter._
import com.stratio.datasource.mongodb.MongodbRelation._
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{Limit => LogicalLimit, LogicalPlan}
import org.apache.spark.sql.sources.CatalystToCrossdataAdapter.{BaseLogicalPlan, FilterReport, SimpleLogicalPlan}
import org.apache.spark.sql.sources.CatalystToCrossdataAdapter
import org.apache.spark.sql.sources.{Filter => SourceFilter}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.Row
import org.apache.spark.sql.sources
object MongoQueryProcessor {
val DefaultLimit = 10000
type ColumnName = String
type Limit = Option[Int]
case class MongoPlan(basePlan: BaseLogicalPlan, limit: Limit){
def projects: Seq[NamedExpression] = basePlan.projects
def filters: Array[SourceFilter] = basePlan.filters
}
def apply(logicalPlan: LogicalPlan, config: Config, schemaProvided: Option[StructType] = None) = new MongoQueryProcessor(logicalPlan, config, schemaProvided)
def buildNativeQuery(
requiredColums: Seq[ColumnName],
filters: Array[SourceFilter],
config: Config,
name2randomAccess: Map[String, GetArrayItem] = Map.empty
): (DBObject, DBObject) = {
(filtersToDBObject(filters, name2randomAccess)(config), selectFields(requiredColums))
}
def filtersToDBObject(
sFilters: Array[SourceFilter],
name2randomAccess: Map[String, GetArrayItem],
parentFilterIsNot: Boolean = false
)(implicit config: Config): DBObject = {
def attstr2left(att: String): String =
name2randomAccess.get(att).map {
case GetArrayItem(att: AttributeReference, ordinal) =>
s"${att.name}.${ordinal.toString}"
} getOrElse att
val queryBuilder: QueryBuilder = QueryBuilder.start
if (parentFilterIsNot) queryBuilder.not()
sFilters.foreach {
case sources.EqualTo(attribute, value) =>
queryBuilder.put(attstr2left(attribute)).is(correctIdValue(attribute, value))
case sources.GreaterThan(attribute, value) =>
queryBuilder.put(attstr2left(attribute)).greaterThan(correctIdValue(attribute, value))
case sources.GreaterThanOrEqual(attribute, value) =>
queryBuilder.put(attstr2left(attribute)).greaterThanEquals(correctIdValue(attribute, value))
case sources.In(attribute, values) =>
queryBuilder.put(attstr2left(attribute)).in(values.map(value => correctIdValue(attribute, value)))
case sources.LessThan(attribute, value) =>
queryBuilder.put(attstr2left(attribute)).lessThan(correctIdValue(attribute, value))
case sources.LessThanOrEqual(attribute, value) =>
queryBuilder.put(attstr2left(attribute)).lessThanEquals(correctIdValue(attribute, value))
case sources.IsNull(attribute) =>
queryBuilder.put(attstr2left(attribute)).is(null)
case sources.IsNotNull(attribute) =>
queryBuilder.put(attstr2left(attribute)).notEquals(null)
case sources.And(leftFilter, rightFilter) if !parentFilterIsNot =>
queryBuilder.and(filtersToDBObject(Array(leftFilter), name2randomAccess),
filtersToDBObject(Array(rightFilter),name2randomAccess))
case sources.Or(leftFilter, rightFilter) if !parentFilterIsNot =>
queryBuilder.or(filtersToDBObject(Array(leftFilter),name2randomAccess),
filtersToDBObject(Array(rightFilter), name2randomAccess))
case sources.StringStartsWith(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attstr2left(attribute)).regex(Pattern.compile("^" + value + ".*$"))
case sources.StringEndsWith(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attstr2left(attribute)).regex(Pattern.compile("^.*" + value + "$"))
case sources.StringContains(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attstr2left(attribute)).regex(Pattern.compile(".*" + value + ".*"))
case sources.Not(filter) =>
filtersToDBObject(Array(filter), name2randomAccess, true)
}
queryBuilder.get
}
/**
* Check if the field is "_id" and if the user wants to filter by this field as an ObjectId
*
* @param attribute Name of the file
* @param value Value for the attribute
* @return The value in the correct data type
*/
private def correctIdValue(attribute: String, value: Any)(implicit config: Config) : Any = {
val idAsObjectId: Boolean = config.getOrElse[String](MongodbConfig.IdAsObjectId, MongodbConfig.DefaultIdAsObjectId).equalsIgnoreCase("true")
attribute match {
case "_id" if idAsObjectId => new ObjectId(value.toString)
case _ => value
}
}
/**
*
* Prepared DBObject used to specify required fields in mongodb 'find'
* @param fields Required fields
* @return A mongodb object that represents required fields.
*/
private def selectFields(fields: Seq[ColumnName]): DBObject =
{
MongoDBObject(
fields.toList.filterNot(_ == "_id").map(_ -> 1) ::: {
List("_id" -> fields.find(_ == "_id").fold(0)(_ => 1))
})
/*
For random accesses to array columns elements, a performance improvement is doable
by querying MongoDB in a way that would only select a size-1 slice of the accessed array thanks to
the "$slice" operator. However this operator can only be used once for each column in a projection
which implies that several accesses (e.g: SELECT arraystring[0] as first, arraystring[3] as fourth FROM MONGO_T)
would require to implement an smart "$slice" use selecting the minimum slice containing all requested elements.
That requires way too much effort when the performance boost is taken into consideration.
*/
}
}
// TODO logs, doc, tests
class MongoQueryProcessor(logicalPlan: LogicalPlan, config: Config, schemaProvided: Option[StructType] = None) extends Logging {
import MongoQueryProcessor._
def execute(): Option[Array[Row]] = {
// TODO convert to Spark result using an iterator with batches instead of an array
if (schemaProvided.isEmpty) {
None
} else {
try {
validatedNativePlan.map { case MongoPlan(bs: SimpleLogicalPlan, limit) =>
if (limit.exists(_ == 0)) {
Array.empty[Row]
} else {
val name2randomAccess = bs.collectionRandomAccesses.map {
case (k, v) => s"${k.name}[${v.right}]" -> v
}
val (mongoFilters, mongoRequiredColumns) = buildNativeQuery(
bs.projects.map(_.name), bs.filters,
config,
name2randomAccess
)
val resultSet = MongodbConnection.withCollectionDo(config) { collection =>
logDebug(s"Executing native query: filters => $mongoFilters projects => $mongoRequiredColumns")
val cursor = collection.find(mongoFilters, mongoRequiredColumns)
val result = cursor.limit(limit.getOrElse(DefaultLimit)).toArray[DBObject]
cursor.close()
result
}
sparkResultFromMongodb(bs.projects, bs.collectionRandomAccesses, schemaProvided.get, resultSet)
}
}
} catch {
case exc: Exception =>
log.warn(s"Exception executing the native query $logicalPlan", exc.getMessage); None
}
}
}
def validatedNativePlan: Option[_] = {// TODO
lazy val limit: Option[Int] = logicalPlan.collectFirst { case LogicalLimit(Literal(num: Int, _), _) => num }
def findBasePlan(lplan: LogicalPlan): Option[BaseLogicalPlan] = lplan match {
case LogicalLimit(_, child) =>
findBasePlan(child)
case PhysicalOperation(projectList, filterList, _) =>
CatalystToCrossdataAdapter.getConnectorLogicalPlan(logicalPlan, projectList, filterList) match {
case (_, FilterReport(filtersIgnored, _)) if filtersIgnored.nonEmpty => None
case (basePlan: SimpleLogicalPlan, _) =>
Some(basePlan)
case _ => ??? // TODO
}
}
findBasePlan(logicalPlan).collect{ case bp if checkNativeFilters(bp.filters) => MongoPlan(bp, limit) }
}
private[this] def checkNativeFilters(filters: Seq[SourceFilter]): Boolean = filters.forall {
case _: sources.EqualTo => true
case _: sources.In => true
case _: sources.LessThan => true
case _: sources.GreaterThan => true
case _: sources.LessThanOrEqual => true
case _: sources.GreaterThanOrEqual => true
case _: sources.IsNull => true
case _: sources.IsNotNull => true
case _: sources.StringStartsWith => true
case _: sources.StringEndsWith => true
case _: sources.StringContains => true
case sources.And(left, right) => checkNativeFilters(Array(left, right))
case sources.Or(left, right) => checkNativeFilters(Array(left, right))
case sources.Not(filter) => checkNativeFilters(Array(filter))
// TODO add more filters
case _ => false
}
private[this] def sparkResultFromMongodb(
requiredColumns: Seq[Attribute],
indexAccesses: Map[Attribute, GetArrayItem],
schema: StructType,
resultSet: Array[DBObject]
): Array[Row] = {
asRow(
pruneSchema(
schema,
requiredColumns.map(r => r.name -> indexAccesses.get(r).map(_.right.toString().toInt)).toArray
),
resultSet
)
}
}
| luismcl/crossdata | mongodb/src/main/scala/com/stratio/crossdata/connector/mongodb/MongoQueryProcessor.scala | Scala | apache-2.0 | 10,798 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.twofishes.indexer.scalding
import com.twitter.scalding._
import com.twitter.scalding.typed.TypedSink
import io.fsq.twofishes.gen.IntermediateDataContainer
import io.fsq.twofishes.indexer.util.SpindleSequenceFileSource
import io.fsq.twofishes.util._
import org.apache.hadoop.io.LongWritable
class BaseRelationsImporterJob(
name: String,
fromColumnIndex: Int,
toColumnIndex: Int,
lineAcceptor: (Array[String]) => Boolean,
inputSpec: TwofishesImporterInputSpec,
args: Args
) extends TwofishesImporterJob(name, inputSpec, args) {
lines
.filterNot(_.startsWith("#"))
.flatMap(line => {
val parts = line.split("[\\t|]")
val fromOpt = StoredFeatureId.fromHumanReadableString(parts(fromColumnIndex), Some(GeonamesNamespace))
val toOpt = StoredFeatureId.fromHumanReadableString(parts(toColumnIndex), Some(GeonamesNamespace))
if (lineAcceptor(parts)) {
(fromOpt, toOpt) match {
case (Some(fromId), Some(toId)) => {
Some(new LongWritable(fromId.longId), toId.longId)
}
case _ => {
// logger.error("%s: couldn't parse StoredFeatureId pair".format(line))
None
}
}
} else {
None
}
})
.group
.toList
.mapValues({ ids: List[Long] =>
IntermediateDataContainer.newBuilder.longList(ids).result
})
.write(
TypedSink[(LongWritable, IntermediateDataContainer)](
SpindleSequenceFileSource[LongWritable, IntermediateDataContainer](outputPath)
)
)
}
| foursquare/fsqio | src/jvm/io/fsq/twofishes/indexer/scalding/BaseRelationsImporterJob.scala | Scala | apache-2.0 | 1,615 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.io.data
import org.platanios.tensorflow.api.implicits.helpers.OutputToTensor
import org.platanios.tensorflow.api.ops.{Basic, Function, Op, Output}
/** Dataset that wraps the application of the `map` op.
*
* $OpDocDatasetMap
*
* @param inputDataset Input dataset.
* @param function Mapping function.
* @param name Name for this dataset.
* @tparam T Tensor type (i.e., nested structure of tensors).
* @tparam O Output type (i.e., nested structure of symbolic tensors).
* @tparam D Data type of the outputs (i.e., nested structure of TensorFlow data types).
* @tparam S Shape type of the outputs (i.e., nested structure of TensorFlow shapes).
*
* @author Emmanouil Antonios Platanios
*/
case class MapDataset[T, O, D, S, RT, RO, RD, RS](
inputDataset: Dataset[T, O, D, S],
function: (O) => RO,
override val name: String = "MapDataset"
)(implicit
evOToT: OutputToTensor.Aux[O, T] = inputDataset.evOToT,
evData: Data.Aux[T, O, D, S] = inputDataset.evData,
evFunctionInput: Function.ArgType[O] = inputDataset.evFunctionInput,
evROToRT: OutputToTensor.Aux[RO, RT],
evRData: Data.Aux[RT, RO, RD, RS],
evFunctionOutput: Function.ArgType[RO]
) extends Dataset[RT, RO, RD, RS](name)(evROToRT, evRData, evFunctionOutput) {
private[this] lazy val instantiatedFunction = {
Function(s"$name/Function", function).instantiate(
inputDataset.flattenedOutputDataTypes, inputDataset.flattenedOutputShapes,
appendHashToName = true)
}
override def createHandle(): Output = {
Op.Builder(opType = "MapDataset", name = name)
.addInput(Op.createWithNameScope(name)(inputDataset.createHandle()))
.addInputList(instantiatedFunction.extraInputs)
.setAttribute("f", instantiatedFunction)
.setAttribute("output_types", flattenedOutputDataTypes.toArray)
.setAttribute("output_shapes", flattenedOutputShapes.toArray)
.build().outputs(0)
}
private[this] lazy val (_outputDataTypes, _outputShapes): (RD, RS) = {
val dataTypes = evRData.dataTypesFromO(instantiatedFunction.dummyOutputs)
(evRData.unflattenDataTypes(dataTypes, instantiatedFunction.outputDataTypes),
evRData.unflattenShapes(dataTypes, instantiatedFunction.outputShapes))
}
override def outputDataTypes: RD = _outputDataTypes
override def outputShapes: RS = _outputShapes
}
/** Dataset that wraps the application of the `parallelMap` op.
*
* $OpDocDatasetMap
*
* @param inputDataset Input dataset.
* @param function Mapping function.
* @param numParallelCalls Number of concurrent invocations of `function` that process elements from `inputDataset` in
* parallel.
* @param name Name for this dataset.
* @tparam T Tensor type (i.e., nested structure of tensors).
* @tparam O Output type (i.e., nested structure of symbolic tensors).
* @tparam D Data type of the outputs (i.e., nested structure of TensorFlow data types).
* @tparam S Shape type of the outputs (i.e., nested structure of TensorFlow shapes).
*
* @author Emmanouil Antonios Platanios
*/
case class ParallelMapDataset[T, O, D, S, RT, RO, RD, RS](
inputDataset: Dataset[T, O, D, S],
function: (O) => RO,
numParallelCalls: Int,
override val name: String = "ParallelMapDataset"
)(implicit
evOToT: OutputToTensor.Aux[O, T] = inputDataset.evOToT,
evData: Data.Aux[T, O, D, S] = inputDataset.evData,
evFunctionInput: Function.ArgType[O] = inputDataset.evFunctionInput,
evROToRT: OutputToTensor.Aux[RO, RT],
evRData: Data.Aux[RT, RO, RD, RS],
evFunctionOutput: Function.ArgType[RO]
) extends Dataset[RT, RO, RD, RS](name)(evROToRT, evRData, evFunctionOutput) {
private[this] lazy val instantiatedFunction = {
Function(s"$name/Function", function).instantiate(
inputDataset.flattenedOutputDataTypes, inputDataset.flattenedOutputShapes,
appendHashToName = true)
}
override def createHandle(): Output = {
Op.Builder(opType = "ParallelMapDataset", name = name)
.addInput(Op.createWithNameScope(name)(inputDataset.createHandle()))
.addInputList(instantiatedFunction.extraInputs)
.addInput(Op.createWithNameScope(name)(Basic.constant(numParallelCalls, name = "NumParallelCalls")))
.setAttribute("f", instantiatedFunction)
.setAttribute("output_types", flattenedOutputDataTypes.toArray)
.setAttribute("output_shapes", flattenedOutputShapes.toArray)
.build().outputs(0)
}
private[this] lazy val (_outputDataTypes, _outputShapes): (RD, RS) = {
val dataTypes = evRData.dataTypesFromO(instantiatedFunction.dummyOutputs)
(evRData.unflattenDataTypes(dataTypes, instantiatedFunction.outputDataTypes),
evRData.unflattenShapes(dataTypes, instantiatedFunction.outputShapes))
}
override def outputDataTypes: RD = _outputDataTypes
override def outputShapes: RS = _outputShapes
}
object MapDataset {
case class MapDatasetOps[T, O, D, S](dataset: Dataset[T, O, D, S]) {
/** $OpDocDatasetMap
*
* @param function Mapping function.
* @param numParallelCalls Number elements to process in parallel. If not specified, elements will be processed
* sequentially.
* @param name Name for the created dataset.
* @return Created dataset.
*/
def map[RT, RO, RD, RS](
function: (O) => RO,
numParallelCalls: Int = 1,
name: String = "Map"
)(implicit
evROToRT: OutputToTensor.Aux[RO, RT],
evRData: Data.Aux[RT, RO, RD, RS],
evFunctionOutput: Function.ArgType[RO]
): Dataset[RT, RO, RD, RS] = {
Op.createWithNameScope(dataset.name) {
if (numParallelCalls > 1)
ParallelMapDataset(dataset, function, numParallelCalls, name)
else
MapDataset(dataset, function, name)
}
}
}
/** @define OpDocDatasetMap
* The dataset `map` op creates a new dataset by a function across all elements of another dataset.
*
* The op has similar semantics to the built-in Scala collections `map` function.
*/
private[data] trait Documentation
}
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/io/data/MapDataset.scala | Scala | apache-2.0 | 6,996 |
package org.jetbrains.plugins.scala.failed.annotator
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
/**
* @author Roman.Shein
* @since 28.03.2016.
*/
class OverridingAnnotatorTest2 extends ScalaLightCodeInsightFixtureTestAdapter {
override protected def shouldPass: Boolean = false
//TODO: the issue does not reproduce when test is performed using OverridingAnnotatorTest
def testSCL3807(): Unit = {
checkTextHasNoErrors(
"""
|trait A {
| def foo(f: (=> A) => Int) = {_: A => 42}
|}
|
|object A extends A{
| def foo(f: (A) => Int) = null
|}
""".stripMargin)
}
def testScl9034(): Unit = {
checkTextHasNoErrors(
"""
| def apply(list: Iterable[Int]): ErrorHighlighting = { this ("int") }
| def apply(list: => Iterable[String]): ErrorHighlighting = { this ("string") }
""".stripMargin)
}
def testScl12605(): Unit = {
checkTextHasNoErrors(
"""
|class Bug {
| def main(args: Array[String]): Unit = {
| val bug = new Bug()
| bug.buggy(bug, (x, y) => x + y)
| }
|
| def buggy(y: Bug): Bug = ???
|
| def buggy(y: Bug, function: DDFunction): Bug = ???
|}
|
|trait DDFunction {
| def apply(x: Double, y: Double): Double
|}
""".stripMargin)
}
def testScl13039(): Unit = {
checkTextHasNoErrors(
"""
|trait Test[T] {
| def foo[S](x : T) : Unit = {
| val t = new Test[S] {
| override def foo[U](x: S): Unit = { }
| }
| }
|}
""".stripMargin)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/failed/annotator/OverridingAnnotatorTest2.scala | Scala | apache-2.0 | 1,730 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.expressions.{CheckOverflow, CreateNamedStruct, Expression, IsNull, UnsafeArrayData}
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.util.{DateTimeUtils, GenericArrayData, IntervalUtils}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
object SerializerBuildHelper {
private def nullOnOverflow: Boolean = !SQLConf.get.ansiEnabled
def createSerializerForBoolean(inputObject: Expression): Expression = {
Invoke(inputObject, "booleanValue", BooleanType)
}
def createSerializerForByte(inputObject: Expression): Expression = {
Invoke(inputObject, "byteValue", ByteType)
}
def createSerializerForShort(inputObject: Expression): Expression = {
Invoke(inputObject, "shortValue", ShortType)
}
def createSerializerForInteger(inputObject: Expression): Expression = {
Invoke(inputObject, "intValue", IntegerType)
}
def createSerializerForLong(inputObject: Expression): Expression = {
Invoke(inputObject, "longValue", LongType)
}
def createSerializerForFloat(inputObject: Expression): Expression = {
Invoke(inputObject, "floatValue", FloatType)
}
def createSerializerForDouble(inputObject: Expression): Expression = {
Invoke(inputObject, "doubleValue", DoubleType)
}
def createSerializerForString(inputObject: Expression): Expression = {
StaticInvoke(
classOf[UTF8String],
StringType,
"fromString",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaInstant(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"instantToMicros",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaEnum(inputObject: Expression): Expression =
createSerializerForString(Invoke(inputObject, "name", ObjectType(classOf[String])))
def createSerializerForSqlTimestamp(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"fromJavaTimestamp",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForAnyTimestamp(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"anyToMicros",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForLocalDateTime(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
TimestampNTZType,
"localDateTimeToMicros",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaLocalDate(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"localDateToDays",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForSqlDate(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"fromJavaDate",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForAnyDate(inputObject: Expression): Expression = {
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"anyToDays",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaDuration(inputObject: Expression): Expression = {
StaticInvoke(
IntervalUtils.getClass,
DayTimeIntervalType(),
"durationToMicros",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaPeriod(inputObject: Expression): Expression = {
StaticInvoke(
IntervalUtils.getClass,
YearMonthIntervalType(),
"periodToMonths",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForJavaBigDecimal(inputObject: Expression): Expression = {
CheckOverflow(StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil,
returnNullable = false), DecimalType.SYSTEM_DEFAULT, nullOnOverflow)
}
def createSerializerForScalaBigDecimal(inputObject: Expression): Expression = {
createSerializerForJavaBigDecimal(inputObject)
}
def createSerializerForJavaBigInteger(inputObject: Expression): Expression = {
CheckOverflow(StaticInvoke(
Decimal.getClass,
DecimalType.BigIntDecimal,
"apply",
inputObject :: Nil,
returnNullable = false), DecimalType.BigIntDecimal, nullOnOverflow)
}
def createSerializerForScalaBigInt(inputObject: Expression): Expression = {
createSerializerForJavaBigInteger(inputObject)
}
def createSerializerForPrimitiveArray(
inputObject: Expression,
dataType: DataType): Expression = {
StaticInvoke(
classOf[UnsafeArrayData],
ArrayType(dataType, false),
"fromPrimitiveArray",
inputObject :: Nil,
returnNullable = false)
}
def createSerializerForGenericArray(
inputObject: Expression,
dataType: DataType,
nullable: Boolean): Expression = {
NewInstance(
classOf[GenericArrayData],
inputObject :: Nil,
dataType = ArrayType(dataType, nullable))
}
def createSerializerForMapObjects(
inputObject: Expression,
dataType: ObjectType,
funcForNewExpr: Expression => Expression): Expression = {
MapObjects(funcForNewExpr, inputObject, dataType)
}
case class MapElementInformation(
dataType: DataType,
nullable: Boolean,
funcForNewExpr: Expression => Expression)
def createSerializerForMap(
inputObject: Expression,
keyInformation: MapElementInformation,
valueInformation: MapElementInformation): Expression = {
ExternalMapToCatalyst(
inputObject,
keyInformation.dataType,
keyInformation.funcForNewExpr,
keyNullable = keyInformation.nullable,
valueInformation.dataType,
valueInformation.funcForNewExpr,
valueNullable = valueInformation.nullable
)
}
private def argumentsForFieldSerializer(
fieldName: String,
serializerForFieldValue: Expression): Seq[Expression] = {
expressions.Literal(fieldName) :: serializerForFieldValue :: Nil
}
def createSerializerForObject(
inputObject: Expression,
fields: Seq[(String, Expression)]): Expression = {
val nonNullOutput = CreateNamedStruct(fields.flatMap { case(fieldName, fieldExpr) =>
argumentsForFieldSerializer(fieldName, fieldExpr)
})
if (inputObject.nullable) {
val nullOutput = expressions.Literal.create(null, nonNullOutput.dataType)
expressions.If(IsNull(inputObject), nullOutput, nonNullOutput)
} else {
nonNullOutput
}
}
def createSerializerForUserDefinedType(
inputObject: Expression,
udt: UserDefinedType[_],
udtClass: Class[_]): Expression = {
val obj = NewInstance(udtClass, Nil, dataType = ObjectType(udtClass))
Invoke(obj, "serialize", udt, inputObject :: Nil)
}
}
| gengliangwang/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SerializerBuildHelper.scala | Scala | apache-2.0 | 7,907 |
package com.googlecode.sarasvati.util.doc
import scala.io.Source;
object ReferenceFixer
{
def main(args:Array[String])
{
// <xi:include href="what-is-workflow.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
val Include = """(.*href=")(.*)(\.xml".*\n)""".r;
for ( arg <- args )
{
for ( line <- Source.fromFile( arg ).getLines )
{
line match {
case Include(prefix, href, suffix) =>
System.out.print( "%s%s-html%s".format( prefix, href, suffix ) )
case _ => System.out.print( line );
}
}
}
}
}
| plorenz/sarasvati | doc/reference/util/src/com/googlecode/sarasvati/util/doc/ReferenceFixer.scala | Scala | lgpl-3.0 | 588 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable
import org.scalatest.BeforeAndAfter
import org.apache.spark.SparkException
import org.apache.spark.sql.ForeachWriter
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.streaming.{OutputMode, StreamingQueryException, StreamTest}
import org.apache.spark.sql.test.SharedSQLContext
class ForeachSinkSuite extends StreamTest with SharedSQLContext with BeforeAndAfter {
import testImplicits._
after {
sqlContext.streams.active.foreach(_.stop())
}
test("foreach() with `append` output mode") {
withTempDir { checkpointDir =>
val input = MemoryStream[Int]
val query = input.toDS().repartition(2).writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.outputMode(OutputMode.Append)
.foreach(new TestForeachWriter())
.start()
def verifyOutput(expectedVersion: Int, expectedData: Seq[Int]): Unit = {
import ForeachSinkSuite._
val events = ForeachSinkSuite.allEvents()
assert(events.size === 2) // one seq of events for each of the 2 partitions
// Verify both seq of events have an Open event as the first event
assert(events.map(_.head).toSet === Set(0, 1).map(p => Open(p, expectedVersion)))
// Verify all the Process event correspond to the expected data
val allProcessEvents = events.flatMap(_.filter(_.isInstanceOf[Process[_]]))
assert(allProcessEvents.toSet === expectedData.map { data => Process(data) }.toSet)
// Verify both seq of events have a Close event as the last event
assert(events.map(_.last).toSet === Set(Close(None), Close(None)))
}
// -- batch 0 ---------------------------------------
ForeachSinkSuite.clear()
input.addData(1, 2, 3, 4)
query.processAllAvailable()
verifyOutput(expectedVersion = 0, expectedData = 1 to 4)
// -- batch 1 ---------------------------------------
ForeachSinkSuite.clear()
input.addData(5, 6, 7, 8)
query.processAllAvailable()
verifyOutput(expectedVersion = 1, expectedData = 5 to 8)
query.stop()
}
}
test("foreach() with `complete` output mode") {
withTempDir { checkpointDir =>
val input = MemoryStream[Int]
val query = input.toDS()
.groupBy().count().as[Long].map(_.toInt)
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.outputMode(OutputMode.Complete)
.foreach(new TestForeachWriter())
.start()
// -- batch 0 ---------------------------------------
input.addData(1, 2, 3, 4)
query.processAllAvailable()
var allEvents = ForeachSinkSuite.allEvents()
assert(allEvents.size === 1)
var expectedEvents = Seq(
ForeachSinkSuite.Open(partition = 0, version = 0),
ForeachSinkSuite.Process(value = 4),
ForeachSinkSuite.Close(None)
)
assert(allEvents === Seq(expectedEvents))
ForeachSinkSuite.clear()
// -- batch 1 ---------------------------------------
input.addData(5, 6, 7, 8)
query.processAllAvailable()
allEvents = ForeachSinkSuite.allEvents()
assert(allEvents.size === 1)
expectedEvents = Seq(
ForeachSinkSuite.Open(partition = 0, version = 1),
ForeachSinkSuite.Process(value = 8),
ForeachSinkSuite.Close(None)
)
assert(allEvents === Seq(expectedEvents))
query.stop()
}
}
testQuietly("foreach with error") {
withTempDir { checkpointDir =>
val input = MemoryStream[Int]
val query = input.toDS().repartition(1).writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.foreach(new TestForeachWriter() {
override def process(value: Int): Unit = {
super.process(value)
throw new RuntimeException("error")
}
}).start()
input.addData(1, 2, 3, 4)
// Error in `process` should fail the Spark job
val e = intercept[StreamingQueryException] {
query.processAllAvailable()
}
assert(e.getCause.isInstanceOf[SparkException])
assert(e.getCause.getCause.getMessage === "error")
assert(query.isActive === false)
val allEvents = ForeachSinkSuite.allEvents()
assert(allEvents.size === 1)
assert(allEvents(0)(0) === ForeachSinkSuite.Open(partition = 0, version = 0))
assert(allEvents(0)(1) === ForeachSinkSuite.Process(value = 1))
// `close` should be called with the error
val errorEvent = allEvents(0)(2).asInstanceOf[ForeachSinkSuite.Close]
assert(errorEvent.error.get.isInstanceOf[RuntimeException])
assert(errorEvent.error.get.getMessage === "error")
}
}
test("foreach with watermark: complete") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"count".as[Long])
.map(_.toInt)
.repartition(1)
val query = windowedAggregation
.writeStream
.outputMode(OutputMode.Complete)
.foreach(new TestForeachWriter())
.start()
try {
inputData.addData(10, 11, 12)
query.processAllAvailable()
val allEvents = ForeachSinkSuite.allEvents()
assert(allEvents.size === 1)
val expectedEvents = Seq(
ForeachSinkSuite.Open(partition = 0, version = 0),
ForeachSinkSuite.Process(value = 3),
ForeachSinkSuite.Close(None)
)
assert(allEvents === Seq(expectedEvents))
} finally {
query.stop()
}
}
test("foreach with watermark: append") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"count".as[Long])
.map(_.toInt)
.repartition(1)
val query = windowedAggregation
.writeStream
.outputMode(OutputMode.Append)
.foreach(new TestForeachWriter())
.start()
try {
inputData.addData(10, 11, 12)
query.processAllAvailable()
inputData.addData(25) // Advance watermark to 15 seconds
query.processAllAvailable()
inputData.addData(25) // Evict items less than previous watermark
query.processAllAvailable()
// There should be 3 batches and only does the last batch contain a value.
val allEvents = ForeachSinkSuite.allEvents()
assert(allEvents.size === 3)
val expectedEvents = Seq(
Seq(
ForeachSinkSuite.Open(partition = 0, version = 0),
ForeachSinkSuite.Close(None)
),
Seq(
ForeachSinkSuite.Open(partition = 0, version = 1),
ForeachSinkSuite.Close(None)
),
Seq(
ForeachSinkSuite.Open(partition = 0, version = 2),
ForeachSinkSuite.Process(value = 3),
ForeachSinkSuite.Close(None)
)
)
assert(allEvents === expectedEvents)
} finally {
query.stop()
}
}
test("foreach sink should support metrics") {
val inputData = MemoryStream[Int]
val query = inputData.toDS()
.writeStream
.foreach(new TestForeachWriter())
.start()
try {
inputData.addData(10, 11, 12)
query.processAllAvailable()
val recentProgress = query.recentProgress.filter(_.numInputRows != 0).headOption
assert(recentProgress.isDefined && recentProgress.get.numInputRows === 3,
s"recentProgress[${query.recentProgress.toList}] doesn't contain correct metrics")
} finally {
query.stop()
}
}
}
/** A global object to collect events in the executor */
object ForeachSinkSuite {
trait Event
case class Open(partition: Long, version: Long) extends Event
case class Process[T](value: T) extends Event
case class Close(error: Option[Throwable]) extends Event
private val _allEvents = new ConcurrentLinkedQueue[Seq[Event]]()
def addEvents(events: Seq[Event]): Unit = {
_allEvents.add(events)
}
def allEvents(): Seq[Seq[Event]] = {
_allEvents.toArray(new Array[Seq[Event]](_allEvents.size()))
}
def clear(): Unit = {
_allEvents.clear()
}
}
/** A [[ForeachWriter]] that writes collected events to ForeachSinkSuite */
class TestForeachWriter extends ForeachWriter[Int] {
ForeachSinkSuite.clear()
private val events = mutable.ArrayBuffer[ForeachSinkSuite.Event]()
override def open(partitionId: Long, version: Long): Boolean = {
events += ForeachSinkSuite.Open(partition = partitionId, version = version)
true
}
override def process(value: Int): Unit = {
events += ForeachSinkSuite.Process(value)
}
override def close(errorOrNull: Throwable): Unit = {
events += ForeachSinkSuite.Close(error = Option(errorOrNull))
ForeachSinkSuite.addEvents(events)
}
}
| ioana-delaney/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ForeachSinkSuite.scala | Scala | apache-2.0 | 10,037 |
package com.twitter.finagle.postgresql.transport
import com.twitter.finagle.postgresql.BackendMessage
import com.twitter.finagle.postgresql.PgSqlClientError
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationCleartextPassword
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationGSS
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationGSSContinue
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationKerberosV5
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationMD5Password
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationMessage
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationOk
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSASL
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSASLContinue
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSASLFinal
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSCMCredential
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSSPI
import com.twitter.finagle.postgresql.BackendMessage.BackendKeyData
import com.twitter.finagle.postgresql.BackendMessage.BindComplete
import com.twitter.finagle.postgresql.BackendMessage.CloseComplete
import com.twitter.finagle.postgresql.BackendMessage.CommandComplete
import com.twitter.finagle.postgresql.BackendMessage.CommandTag
import com.twitter.finagle.postgresql.BackendMessage.CopyData
import com.twitter.finagle.postgresql.BackendMessage.CopyDone
import com.twitter.finagle.postgresql.BackendMessage.CopyInResponse
import com.twitter.finagle.postgresql.BackendMessage.CopyOutResponse
import com.twitter.finagle.postgresql.BackendMessage.DataRow
import com.twitter.finagle.postgresql.BackendMessage.EmptyQueryResponse
import com.twitter.finagle.postgresql.BackendMessage.ErrorResponse
import com.twitter.finagle.postgresql.BackendMessage.FailedTx
import com.twitter.finagle.postgresql.BackendMessage.Field
import com.twitter.finagle.postgresql.BackendMessage.InTx
import com.twitter.finagle.postgresql.BackendMessage.NoData
import com.twitter.finagle.postgresql.BackendMessage.NoTx
import com.twitter.finagle.postgresql.BackendMessage.NoticeResponse
import com.twitter.finagle.postgresql.BackendMessage.Parameter
import com.twitter.finagle.postgresql.BackendMessage.ParameterDescription
import com.twitter.finagle.postgresql.BackendMessage.ParameterStatus
import com.twitter.finagle.postgresql.BackendMessage.ParseComplete
import com.twitter.finagle.postgresql.BackendMessage.PortalSuspended
import com.twitter.finagle.postgresql.BackendMessage.ReadyForQuery
import com.twitter.finagle.postgresql.BackendMessage.RowDescription
import com.twitter.finagle.postgresql.Types.AttributeId
import com.twitter.finagle.postgresql.Types.FieldDescription
import com.twitter.finagle.postgresql.Types.Format
import com.twitter.finagle.postgresql.Types.Oid
import com.twitter.io.Buf
import scala.annotation.tailrec
/**
* A typeclass for decoding [[BackendMessage]] from a [[Packet]].
*
* @see [[MessageEncoder]]
* @see [[PgBuf.Reader]]
*/
trait MessageDecoder[M <: BackendMessage] {
def decode(b: PgBuf.Reader): M
}
object MessageDecoder {
def decode[M <: BackendMessage](reader: PgBuf.Reader)(implicit decoder: MessageDecoder[M]): M =
decoder.decode(reader)
def fromBuf(buf: Buf): BackendMessage = {
val reader = PgBuf.reader(buf)
val cmd = reader.byte()
if (reader.remaining >= 4) {
// skip the 4 byte packet length
reader.skip(4)
}
val ret = cmd match {
case '1' => ParseComplete
case '2' => BindComplete
case '3' => CloseComplete
case 'c' => CopyDone
case 'C' => decode[CommandComplete](reader)
case 'd' => decode[CopyData](reader)
case 'D' => decode[DataRow](reader)
case 'E' => decode[ErrorResponse](reader)
case 'G' => decode[CopyInResponse](reader)
case 'H' => decode[CopyOutResponse](reader)
case 'I' => EmptyQueryResponse
case 'K' => decode[BackendKeyData](reader)
case 'n' => NoData
case 'N' => decode[NoticeResponse](reader)
case 'R' => decode[AuthenticationMessage](reader)
case 's' => PortalSuspended
case 'S' => decode[ParameterStatus](reader)
case 't' => decode[ParameterDescription](reader)
case 'T' => decode[RowDescription](reader)
case 'Z' => decode[ReadyForQuery](reader)
case byte => throw new PgSqlClientError(s"unimplemented message '${byte.toChar}'")
}
if (reader.remaining != 0) {
throw new PgSqlClientError("message decoding did not consume the entire packet")
}
ret
}
def apply[M <: BackendMessage](f: PgBuf.Reader => M): MessageDecoder[M] = reader => f(reader)
def readFields(reader: PgBuf.Reader): Map[Field, String] = {
import Field._
def nextField: Option[Field] =
reader.byte().toChar match {
case 0 => None
case 'S' => Some(LocalizedSeverity)
case 'V' => Some(Severity)
case 'C' => Some(Code)
case 'M' => Some(Message)
case 'D' => Some(Detail)
case 'H' => Some(Hint)
case 'P' => Some(Position)
case 'p' => Some(InternalPosition)
case 'q' => Some(InternalQuery)
case 'W' => Some(Where)
case 's' => Some(Schema)
case 't' => Some(Table)
case 'c' => Some(Column)
case 'd' => Some(DataType)
case 'n' => Some(Constraint)
case 'F' => Some(File)
case 'L' => Some(Line)
case 'R' => Some(Routine)
case unk => Some(Unknown(unk))
}
@tailrec
def loop(fields: Map[Field, String]): Map[Field, String] =
nextField match {
case None => fields
case Some(field) =>
val value = reader.cstring()
loop(fields + (field -> value))
}
loop(Map.empty)
}
implicit lazy val errorResponseDecoder: MessageDecoder[ErrorResponse] = MessageDecoder { reader =>
ErrorResponse(readFields(reader))
}
implicit lazy val noticeResponseDecoder: MessageDecoder[NoticeResponse] = MessageDecoder {
reader =>
NoticeResponse(readFields(reader))
}
implicit lazy val backendKeyDataDecoder: MessageDecoder[BackendKeyData] = MessageDecoder {
reader =>
BackendKeyData(reader.int(), reader.int())
}
def commandTag(value: String): CommandTag =
value.split(" ", 3).toList match {
case "INSERT" :: _ :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Insert, rows.toInt)
case "DELETE" :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Delete, rows.toInt)
case "UPDATE" :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Update, rows.toInt)
case "SELECT" :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Select, rows.toInt)
case "MOVE" :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Move, rows.toInt)
case "FETCH" :: rows :: Nil => CommandTag.AffectedRows(CommandTag.Fetch, rows.toInt)
case _ => CommandTag.Other(value)
}
implicit lazy val commandCompleteDecoder: MessageDecoder[CommandComplete] = MessageDecoder {
reader =>
CommandComplete(commandTag(reader.cstring()))
}
implicit lazy val authenticationMessageDecoder: MessageDecoder[AuthenticationMessage] =
MessageDecoder { reader =>
reader.int() match {
case 0 => AuthenticationOk
case 2 => AuthenticationKerberosV5
case 3 => AuthenticationCleartextPassword
case 5 => AuthenticationMD5Password(reader.buf(4))
case 6 => AuthenticationSCMCredential
case 7 => AuthenticationGSS
case 8 => AuthenticationGSSContinue(reader.remainingBuf())
case 9 => AuthenticationSSPI
case 10 => AuthenticationSASL(reader.cstring())
case 11 => AuthenticationSASLContinue(reader.remainingBuf())
case 12 => AuthenticationSASLFinal(reader.remainingBuf())
}
}
implicit lazy val parameterStatusDecoder: MessageDecoder[ParameterStatus] = MessageDecoder {
reader =>
val parameter = reader.cstring() match {
case "server_version" => Parameter.ServerVersion
case "server_encoding" => Parameter.ServerEncoding
case "client_encoding" => Parameter.ClientEncoding
case "application_name" => Parameter.ApplicationName
case "is_superuser" => Parameter.IsSuperUser
case "session_authorization" => Parameter.SessionAuthorization
case "DateStyle" => Parameter.DateStyle
case "IntervalStyle" => Parameter.IntervalStyle
case "TimeZone" => Parameter.TimeZone
case "integer_datetimes" => Parameter.IntegerDateTimes
case "standard_conforming_strings" => Parameter.StandardConformingStrings
case other => Parameter.Other(other)
}
ParameterStatus(parameter, reader.cstring())
}
implicit lazy val readyForQueryDecoder: MessageDecoder[ReadyForQuery] = MessageDecoder { reader =>
val state = reader.byte().toChar match {
case 'I' => NoTx
case 'T' => InTx
case 'F' => FailedTx
}
ReadyForQuery(state)
}
implicit lazy val rowDescriptionDecoder: MessageDecoder[RowDescription] = MessageDecoder {
reader =>
RowDescription(
reader.collect { r =>
FieldDescription(
name = r.cstring(),
tableOid = r.unsignedInt() match {
case 0 => None
case oid => Some(Oid(oid))
},
tableAttributeId = r.short() match {
case 0 => None
case attrId => Some(AttributeId(attrId))
},
dataType = Oid(r.unsignedInt()),
dataTypeSize = r.short(),
typeModifier = r.int(),
format = r.format()
)
}
)
}
implicit lazy val dataRowDecoder: MessageDecoder[DataRow] = MessageDecoder { reader =>
DataRow(
reader.collect(_.value())
)
}
implicit lazy val parameterDescriptionDecoder: MessageDecoder[ParameterDescription] =
MessageDecoder { reader =>
ParameterDescription(
reader.collect(r => Oid(r.unsignedInt()))
)
}
implicit lazy val copyInResponseDecoder: MessageDecoder[CopyInResponse] = MessageDecoder {
reader =>
CopyInResponse(
overallFormat = reader.byte() match {
case 0 => Format.Text
case 1 => Format.Binary
},
columnsFormat = reader.collect(_.format()),
)
}
implicit lazy val copyOutResponseDecoder: MessageDecoder[CopyOutResponse] = MessageDecoder {
reader =>
CopyOutResponse(
overallFormat = reader.byte() match {
case 0 => Format.Text
case 1 => Format.Binary
},
columnsFormat = reader.collect(_.format()),
)
}
implicit lazy val copyDataDecoder: MessageDecoder[CopyData] = MessageDecoder { reader =>
CopyData(reader.remainingBuf())
}
}
| twitter/finagle | finagle-postgresql/src/main/scala/com/twitter/finagle/postgresql/transport/MessageDecoder.scala | Scala | apache-2.0 | 10,916 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.LabelPrinterFunction
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 20/10/17.
*/
/**
* Label Printer Function Mapping
*/
trait LabelPrinterFunctionMapping {
val queryLabelPrinterFunction = quote {
querySchema[LabelPrinterFunction]("AD_LabelPrinterFunction",
_.labelPrinterFunctionId-> "AD_LabelPrinterFunction_ID",
_.tenantId-> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.name-> "Name",
_.description-> "Description",
_.labelPrinterId-> "AD_LabelPrinter_ID",
_.functionPrefix-> "FunctionPrefix",
_.functionSuffix-> "FunctionSuffix",
_.isXYPosition-> "IsXYPosition",
_.xySeparator-> "XYSeparator",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/LabelPrinterFunctionMapping.scala | Scala | gpl-3.0 | 1,927 |
package ls
import sbt._
import sbt.Def.Initialize
import sbt.Keys._
import sbt.{ ModuleID => SbtModuleID }
import scala.language.postfixOps
import scala.util.control.NonFatal
import scala.collection.JavaConversions._
import java.io.File
import java.net.URL
import dispatch._
import dispatch.Defaults._
object Plugin extends sbt.Plugin
with Requesting with JsonParsing {
import LsKeys.{ ls => lskey, _ }
object LsKeys {
// general
val colors = SettingKey[Boolean](key("colors"), "Colorize logging")
val host = SettingKey[String](key("host"), "Host ls server")
val usage = TaskKey[Unit]("usage", "Displays usage information for tasks")
// github
val ghUser = SettingKey[Option[String]](key("gh-user"), "Github user name")
val ghRepo = SettingKey[Option[String]](key("gh-repo"), "Github repository name")
val ghBranch = SettingKey[Option[String]](key("gh-branch"), "Github branch name")
// syncing
val lint = TaskKey[Boolean](key("lint"), "Verifies the structure of serialized version info")
val cat = TaskKey[Unit](key("cat"),
"Prints the contents of the current serialized version file to the console")
val versionInfo = TaskKey[VersionInfo](key("version-info"), "Information about a version of a project")
val versionFile = SettingKey[File](key("version-file"), "File storing version descriptor file")
val writeVersion = TaskKey[Unit](key("write-version"), "Writes version data to descriptor file")
val lsync = TaskKey[Unit]("lsync", "Synchronizes github project info with ls server")
val dependencyFilter = SettingKey[SbtModuleID => Boolean]("dependency-filter",
"Filters library dependencies included in version-info")
// optional attributes
val tags = SettingKey[Seq[String]](key("tags"), "List of taxonomy tags for the library")
val docsUrl = SettingKey[Option[URL]](key("doc-url"), "Url for library documentation")
val optionals = SettingKey[Optionals](key("optionals"), "Optional project info")
val skipWrite = SettingKey[Boolean](key("skip-write"), "Skip this module in write-version")
// discovery
val ls = InputKey[Unit]("ls", "Search for remote libraries")
val lsDocs = InputKey[Unit](key("docs"), "Launch library documentation")
private def key(name: String) = "ls-%s" format name
}
val DefaultLsHost = "http://ls.implicit.ly"
val DefaultBranch = "master"
private def catTask: Def.Initialize[Task[Unit]] =
(streams, versionFile, versionInfo) map {
(out, vfile, vinfo) =>
if (vfile.exists) {
out.log.info("version %s @ %s" format(vinfo.name, vinfo.version))
println(IO.read(vfile))
} else {
out.log.warn(
"Version %s @ %s did not exist. Create one with `ls-write`" format(
vinfo.name, vinfo.version))
}
}
private def lintTask: Def.Initialize[Task[Boolean]] =
(streams, versionFile, versionInfo) map {
(out, vfile, vinfo) => try {
if (vfile.exists) {
val json = IO.read(vfile)
parseJson[Library](json)
}
out.log.debug("Valid version info")
if (snapshot(vinfo.version)) out.log.warn(SnapshotWarning)
true
} catch {
case NonFatal(e) =>
out.log.error("Invalid version-info %s. %s"
.format(e.getMessage,
if (vfile.exists) "\n" + IO.read(vfile) else ""))
false
}
}
private def lsyncTask: Def.Initialize[Task[Unit]] =
(streams, ghUser in lsync, ghRepo in lsync, ghBranch in lsync, version in lsync,
host in lsync, versionFile, lint) map {
(out, maybeUser, maybeRepo, branch, vers, host, vfile, lint) =>
(maybeUser, maybeRepo) match {
case (Some(user), Some(repo)) =>
if (lint) {
if (snapshot(vers)) out.log.warn(SnapshotWarning)
else {
out.log.info("lsyncing project %s/%s@%s..." format(user, repo, vers))
http(Client(host).lsync(user, repo, branch.getOrElse(DefaultBranch), vers) OK as.String)
.either.map(_.fold({
case NonFatal(e) =>
out.log.warn("Error synchronizing project libraries %s" format e.getMessage)
}, { _ =>
out.log.info("Project was synchronized")
}))
.apply()
}
} else sys.error("Your version descriptor was invalid. %s"
.format(IO.read(vfile)))
case _ => sys.error("Could not resolve a Github git remote")
}
}
private def snapshot(vstr: String) = vstr.toUpperCase.endsWith("SNAPSHOT")
private val SnapshotWarning = "ls only supports release versions not likely to change. This excludes snapshot versions."
private def writeVersionTask: Def.Initialize[Task[Unit]] =
(streams, versionFile, versionInfo, skipWrite) map {
(out, f, info, skip) =>
def write() {
out.log.debug("version info: %s" format(info.json))
IO.write(f, info.json)
out.log.info("Wrote %s" format(f))
}
if (skip) out.log.info("Skipping %s".format(f))
else if (!f.exists) {
f.getParentFile().mkdirs()
write()
} else Prompt.ask(
"Overwrite existing version info for %s@%s? [Y/n] "
.format(info.name, info.version)) { r =>
val a = r.trim.toLowerCase
if (Prompt.Yes.contains(a) || a.trim.isEmpty) {
write()
}
else if (Prompt.No contains a) out.log.info("Skipped.")
else sys.error("Unexpected answer %s" format a)
}
}
object LibraryParser {
trait LibraryParserResult
case object Fail extends LibraryParserResult
case class Pass(user: Option[String], repo: Option[String],
library: String, version: Option[String],
config: Option[String]) extends LibraryParserResult
val Pat = """^((\S+)/(\S+)/)?(\S+(?=@)|\S+(?!@))(?:@(\S+(?=[:])|\S+(?![:])))?(?:[:](\S+))?$""".r
/**
* format specification
*
* [user/repository/]library[@version][:config1->config2]
*
* library is *required
* version is optional
* :config is optional will translate to an ivy config
* user/repository/ is optional and namespaces the library (the common case
* being that most people will use the same name for their libraries
* )
*/
def apply(raw: String): LibraryParserResult = raw match {
case Pat(_, user, repo, library, version, config) =>
Pass(Option(user), Option(repo),
library, Option(version), Option(config))
case undefined => Fail
}
}
/**
* Shortcut for adding abbreviated library dependencies.
* use ls-try for testing out transient library dependencies
* or ls-install to persist the library depenency to your project
* configuration
*
* examples:
* ls-try unfiltered/unfiltered/unfiltered-netty@0.4.2
* ls-try unfiltered-netty
*/
private def depend(persistently: Boolean)(state: State, dep: String) =
LibraryParser(dep) match {
case LibraryParser.Pass(user, repo, lib, version, config) =>
http(Client(DefaultLsHost).lib(lib, version)(user)(repo) OK as.String)
.either.map(_.fold({
case dispatch.StatusCode(404) =>
sys.error("Library not found")
case Conflicts.Conflict(_, _, _, msg) =>
sys.error(msg)
}, { str =>
val ls = parseJson[List[LibraryVersions]](str)
Depends(state, ls, version, config, persistently)
}))
.apply()
case LibraryParser.Fail => sys.error(
"Unexpected library format %s. Try something like %s" format(
dep, "[user/repository/]library[@version][:config1->config2]"
)
)
}
def lsTry = Command.single("ls-try")(depend(false))
def lsInstall = Command.single("ls-install")(depend(true))
def lsSearchSettings: Seq[Setting[_]] = Seq(
host in lskey := DefaultLsHost,
colors in lskey := true,
usage in lskey <<= (streams) map {
(out) =>
out.log.info("""
|Usage: ls [-n] [terms...]
|
|Examples
| # find a library named unfiltered
| ls -n unfiltered
| # find a library named unfiltered at version of 0.5.1
| ls -n unfiltered@0.5.1
| # find libraries taged with terms web or http
| ls web http
""".stripMargin)
},
lsDocs := {
val args = Def.spaceDelimited("<args>").parsed
val color = (colors in lskey).value
val log = streams.value.log
args match {
case Seq(name) =>
val cli = Client((host in lskey).value)
def named(name: String) = name.split("@") match {
case Array(name) => cli.lib(name)_
case Array(name, version) => cli.lib(name, version = Some(version))_
}
log.info("Fetching library docs for %s" format name)
def version(name: String) = name.split("@") match {
case Array(_) => None
case Array(_, version) => Some(version)
}
http(named(name)(None)(None) OK as.String)
.either.map(_.fold({
case StatusCode(404) =>
log.info("`%s` library not found" format name)
case _ => sys.error(
"Please provide a name and optionally a version of the library you want docs for in the form ls-docs <name> or ls-docs <name>@<version>")
}, { str =>
docsFor(
parseJson[List[LibraryVersions]](str),
version(name), log)
}))
.apply()
}
},
lskey := {//inputTask { (argsTask: TaskKey[Seq[String]]) =>
val args = Def.spaceDelimited("<args>").parsed
val log = streams.value.log
val color = (colors in lskey).value
val currentState = state.value
args match {
case Seq() => sys.error(
"Please provide at least one or more search keywords or -n <name of library>"
)
case Seq("-n", name) =>
val cli = Client((host in lskey).value)
def named(name: String) = name.split("@") match {
case Array(name) => cli.lib(name)_
case Array(name, version) => cli.lib(name, version = Some(version))_
}
log.info("Fetching library info for %s" format name)
http(named(name)(None)(None) OK as.String)
.either.map(_.fold({
case StatusCode(404) =>
log.info("`%s` library not found" format name)
},{ str =>
libraries(
parseJson[List[LibraryVersions]](str),
log, color, -1,
name.split("@"):_*)
}))
.apply()
case kwords =>
log.info("Fetching library info matching %s" format kwords.mkString(", "))
http(Client((host in lskey).value).search(kwords.toSeq) OK as.String)
.either.map(_.fold({
case StatusCode(404) =>
log.info("Library not found for keywords %s" format kwords.mkString(", "))
}, { str =>
libraries(
parseJson[List[LibraryVersions]](str),
log, color, 3,
args:_*
)
}))
.apply()
}
},
(aggregate in lskey) := false,
(aggregate in lsDocs) := false,
commands ++= Seq(lsTry, lsInstall)
)
/** Assume the default resolver is the sonotype oss repo */
lazy val DefaultResolvers = Seq(Opts.resolver.sonatypeReleases)
def lsPublishSettings: Seq[Setting[_]] = Seq(
host in lsync := DefaultLsHost,
colors in lsync := true,
version in lsync <<= (version in Runtime)(_.replace("-SNAPSHOT","")),
sourceDirectory in lsync <<= (sourceDirectory in Compile)( _ / "ls"),
versionFile <<= (sourceDirectory in lsync, version in lsync)(_ / "%s.json".format(_)),
// exclude scala lib and test dependencies by default
dependencyFilter := { m =>
!(scalaLib(m) || testDependency(m))
},
docsUrl in lsync := None,
tags in lsync := Nil,
description in lsync <<= (description in Runtime),
homepage in lsync <<= (homepage in Runtime),
optionals <<= (description in lsync, homepage in lsync, tags in lsync,
docsUrl in lsync, licenses in lsync)(
(desc, homepage, tags, docs, lics) =>
Optionals(desc, homepage, tags, docs, lics.map {
case (name, url) => License(name, url.toString)
}
)
),
skipWrite := false,
externalResolvers in lsync := DefaultResolvers,
licenses in lsync <<= licenses in Runtime,
versionInfo <<=
(organization,
name,
version,
optionals,
externalResolvers in lsync,
libraryDependencies,
dependencyFilter,
sbtPlugin,
crossScalaVersions) map {
(o, n, v, opts, rsvrs, ldeps, dfilter, csv, pi) =>
VersionInfo(o, n, v, opts, rsvrs, ldeps.filter(dfilter), pi, csv)
},
lint <<= lintTask,
cat <<= catTask,
writeVersion <<= writeVersionTask,
ghUser in lsync := (Git.ghRepo match {
case Some((user, _)) => Some(user)
case _ => None
}),
ghRepo in lsync := (Git.ghRepo match {
case Some((_, repo)) => Some(repo)
case _ => None
}),
ghBranch in lsync := Git.branch.orElse(Some("master")),
lsync <<= lsyncTask,
(aggregate in lsync) := false
)
def lsSettings: Seq[Setting[_]] = lsSearchSettings ++ lsPublishSettings
/** Preforms a `best-attempt` at retrieving a uri for library documentation before
* attempting to launch it */
private def docsFor(libs: Seq[LibraryVersions], targetVersion: Option[String], log: sbt.Logger) =
libs match {
case Seq() => log.info("Library not found")
case Seq(lib) =>
(targetVersion match {
case Some(v) => lib.versions.find(_.version == v)
case _ => lib.versions.headOption
}) match {
case Some(vers) =>
(vers.docs match {
case s if (!s.isEmpty) => Some(s)
case _ => lib.site match {
case s if (!s.isEmpty) => Some(s)
case _ => ((lib.ghuser, lib.ghrepo)) match {
case (Some(u), Some(r)) => Some("https://github.com/%s/%s/" format(
u, r
))
case _ => None
}
}
}) match {
case Some(d) => launch(d) match {
case Some(err) => log.warn("Unable to launch docs %s" format d)
case _ => ()
}
case _ => log.info("No docs available for %s@%s" format(
lib.name, vers.version
))
}
case _ => log.info("No docs available for %s" format lib.name)
}
case _ => log.info("More than one library found, try to narrow your search")
}
/** Attempts to launch the provided uri */
private def launch(uri: String) =
uri match {
case u if (!u.isEmpty) =>
try {
import java.net.URI
val dsk = Class.forName("java.awt.Desktop")
dsk.getMethod("browse", classOf[URI]).invoke(
dsk.getMethod("getDesktop").invoke(null), new URI(u)
)
None
} catch { case NonFatal(e) => Some(e) }
case empty => None
}
private def libraries(libs: Seq[LibraryVersions], log: sbt.Logger, colors: Boolean, maxVersions: Int, terms: String*) = {
def versions(l: LibraryVersions) =
if (maxVersions == -1 || l.versions.size < maxVersions) l.versions.map(_.version)
else l.versions.take(maxVersions).map(_.version) :+ "..."
val tups = libs.map(l => (l.name, versions(l).mkString(", "), l.description))
val len = math.max(tups.map{ case (n, vs, _ ) => "%s (%s)".format(n, vs).size }.sortWith(_>_).head, 10)
val fmt = "%s %-" + len + "s # %s"
val lterms: Seq[String] = terms.toList
def hl(txt: String, terms: Seq[String],
cw: Wheel[String] = Wheels.default): String =
if (colors) terms match {
case head :: Nil =>
txt.replaceAll("""(?i)(\?\S+)?(""" + head + """)(\?\S+)?""", cw.get + "$0\033[0m").trim
case head :: tail =>
hl(txt.replaceAll("""(?i)(\?\S+)?(""" + head + """)(\\S+)?""", cw.get + "$0\033[0m").trim,
tail, cw.turn)
case Nil => txt
}
else txt
val clrs = Wheels.shuffle
tups map { case (n, v, d) =>
println(
hl(fmt format(
" -",
"%s (%s)".format(n,v),
d
), lterms, Wheels.colorWheel(clrs))
)
}
if (libs.isEmpty) log.info("(no projects matching the terms %s)" format terms.mkString(" "))
}
/* https://github.com/harrah/xsbt/wiki/Configurations */
private def testDependency(m: sbt.ModuleID) =
m.configurations match {
case Some(conf) => conf.trim.toLowerCase.startsWith("test")
case _ => false
}
private def scalaLib(m: sbt.ModuleID) =
m.organization.trim.toLowerCase.equals(
"org.scala-lang"
)
}
| softprops/ls | plugin/src/main/scala/ls.scala | Scala | mit | 17,429 |
package com.amichalo.smooolelo.providers
import java.io.File
class BasicWorkingDirectoryProvider extends WorkingDirectoryProvider {
private def initialize(): String = new File(".").getCanonicalPath
private lazy val path: String = initialize()
override def apply(): String = path
}
| amichalo/smooolelo | src/main/scala/com/amichalo/smooolelo/providers/BasicWorkingDirectoryProvider.scala | Scala | apache-2.0 | 292 |
package com.sfxcode.sapphire.core.showcase.controller
import com.sfxcode.sapphire.core.fxml.FxmlLocation
import com.sfxcode.sapphire.core.showcase.bean.SimpleBeanController
import com.sfxcode.sapphire.core.showcase.controller.control.{
TableCellController,
TableValueController,
TreeTableValueController
}
import com.sfxcode.sapphire.core.showcase.{ ShowcaseController, ShowcaseItem }
import com.typesafe.scalalogging.LazyLogging
class ShowcaseViewController extends ShowcaseController with LazyLogging {
private val welcomeItem = ShowcaseItem("Welcome", "Welcome", () => getController[WelcomeController]())
private val controlTableValueItem =
ShowcaseItem("Control", "Table Value Factory", () => getController[TableValueController]())
private val controlTableCellItem =
ShowcaseItem("Control", "Table Cell Factory", () => getController[TableCellController]())
private val controlTreeTableValueItem =
ShowcaseItem("Control", "TreeTable", () => getController[TreeTableValueController]())
private val beanBindingsCellItem =
ShowcaseItem("FXBean", "Bindings", () => getController[SimpleBeanController]())
private val items =
List(welcomeItem, controlTableValueItem, controlTableCellItem, controlTreeTableValueItem, beanBindingsCellItem)
override def didGainVisibilityFirstTime(): Unit = {
super.didGainVisibilityFirstTime()
//infoLabel.setText(Application.title)
updateShowcaseItems(items)
changeShowcaseItem(welcomeItem)
}
}
| sfxcode/sapphire-core | demos/showcase/src/main/scala/com/sfxcode/sapphire/core/showcase/controller/ShowcaseViewController.scala | Scala | apache-2.0 | 1,490 |
package loom.models
/**
*
* @author chaosky
*/
object Model {
val semicolon = ";"
val Ret = "ret"
val Msg = "msg"
val Error = -1
val Success = 0
val PageNo = 1
val PageSize = 5
//models
val M_Accounts = "accounts"
val M_Roles = "roles"
val M_Permissions = "permissions"
val M_Users = "users"
val M_App = "app"
//session key
val user_loginname = "username"
val admin_loginname = "email"
}
object UModel {
val M_Member = "member"
val M_Permission = "permission"
} | chaosky/loom | app/loom/models/Model.scala | Scala | mit | 507 |
package LearningScalaSnippets
import LearningScalaSnippets.Implicits.{ImplicitToolbox, shared}
import scala.language.implicitConversions
object Implicits {
object ImplicitToolbox {
implicit def thisIsMyFirstImplicitButItsNameDoesntReallyMatter(z: Int): String = {
s"Hello, ${z.toString}"
}
implicit def thisIsMySecondImplicitAndItsNameDoesntMatterAsWell(z: Int): String = {
s"Bye, ${z.toString}"
}
}
object shared {
def f(x: String): Option[String] = {
Some(x)
}
}
}
object Test1 extends App {
import ImplicitToolbox.thisIsMyFirstImplicitButItsNameDoesntReallyMatter
val blah: Int = 100
val blah2: Int = 200
println(shared.f(blah))
println(shared.f(blah2))
}
object Test2 extends App {
import ImplicitToolbox.thisIsMySecondImplicitAndItsNameDoesntMatterAsWell
val blah: Int = 100
val blah2: Int = 200
println(shared.f(blah))
println(shared.f(blah2))
}
/**
* This wont compile!
* The compiler doesnt know whether to use `thisIsMyFirstImplicitButItsNameDoesntReallyMatter` or
* `thisIsMySecondImplicitAndItsNameDoesntMatterAsWell` to turn the Int into a String!
*/
//object Test3 extends App {
// import ImplicitToolbox._
// val blah: Int = 100
// val blah2: Int = 200
// println(shared.f(blah))
// println(shared.f(blah2))
//}
| mr-uuid/snippets | scala/src/main/scala/LearningScalaSnippets/UnderstandingImplicits.scala | Scala | mit | 1,329 |
package slick.jdbc
import java.sql.ResultSet
/** Represents a result set type. */
sealed abstract class ResultSetType(val intValue: Int) { self =>
/** Run a block of code on top of a JDBC session with this result set type */
@deprecated("Use the new Action-based API instead", "3.0")
def apply[T](base: JdbcBackend#Session)(f: JdbcBackend#Session => T): T = f(base.forParameters(rsType = self))
/** Run a block of code on top of the dynamic, thread-local JDBC session with this result set type */
@deprecated("Use the new Action-based API instead", "3.0")
def apply[T](f: => T)(implicit base: JdbcBackend#Session): T = apply(base)(_.asDynamicSession(f))
/** Return this `ResultSetType`, unless it is `Auto` in which case
* the specified result set type is returned instead. */
def withDefault(r: ResultSetType) = this
}
object ResultSetType {
/** The current result set type of the JDBC driver */
case object Auto extends ResultSetType(ResultSet.TYPE_FORWARD_ONLY) {
override def withDefault(r: ResultSetType) = r
}
/** Represents a result set type that only allows result sets to be read sequentially
* (i.e. the cursor may only move forward). */
case object ForwardOnly extends ResultSetType(ResultSet.TYPE_FORWARD_ONLY)
/** Represents a result set type that allows result sets to be navigated in a
* non-linear way while keeping the original data in the result set intact. */
case object ScrollInsensitive extends ResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
/** Represents a result set type that allows result sets to be navigated in a
* non-linear way, and changes in the underlying data to be observed. */
case object ScrollSensitive extends ResultSetType(ResultSet.TYPE_SCROLL_SENSITIVE)
}
| adamkozuch/slick | slick/src/main/scala/slick/jdbc/ResultSetType.scala | Scala | bsd-2-clause | 1,763 |
/*
*
* Copyright 2015 David Hall
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
package breeze.linalg
import breeze.generic.UFunc
import breeze.linalg.support.CanMapValues
import breeze.math.Complex
import scala.reflect.ClassTag
import scala.{specialized => spec}
/**
* UFunc for being able to map the keys and values in a value collection
* to new values.
*
* @author dramage
* @author dlwh
*/
object mapValues extends UFunc with mapValuesLowPrio {
implicit def canMapSelfDouble[V2]: Impl2[Double, Double => V2, V2] = canMapSelf[Double, V2]
implicit def canMapSelfInt[V2]: Impl2[Int, Int => V2, V2] = canMapSelf[Int, V2]
implicit def canMapSelfFloat[V2]: Impl2[Float, Float => V2, V2] = canMapSelf[Float, V2]
implicit def canMapSelfLong[V2]: Impl2[Long, Long => V2, V2] = canMapSelf[Long, V2]
implicit def canMapSelfShort[V2]: Impl2[Short, Short => V2, V2] = canMapSelf[Short, V2]
implicit def canMapSelfByte[V2]: Impl2[Byte, Byte => V2, V2] = canMapSelf[Byte, V2]
implicit def canMapSelfChar[V2]: Impl2[Char, Char => V2, V2] = canMapSelf[Char, V2]
//
// Arrays
//
class OpArray[@spec(Double, Int, Float, Long) A, @spec(Double, Int, Float, Long) B: ClassTag]
extends Impl2[Array[A], A => B, Array[B]] {
/**Maps all values from the given collection. */
def apply(from: Array[A], fn: (A) => B): Array[B] = {
val arr = new Array[B](from.length)
for(i <- 0 until from.length) {
arr(i) = fn(from(i))
}
arr
}
}
implicit def opArray[@spec A, @spec B: ClassTag]: OpArray[A, B] =
new OpArray[A, B]
implicit object OpArrayII extends OpArray[Int, Int]
implicit object OpArraySS extends OpArray[Short, Short]
implicit object OpArrayLL extends OpArray[Long, Long]
implicit object OpArrayFF extends OpArray[Float, Float]
implicit object OpArrayDD extends OpArray[Double, Double]
implicit object OpArrayCC extends OpArray[Complex, Complex]
implicit object OpArrayID extends OpArray[Int, Double]
implicit object OpArraySD extends OpArray[Short, Double]
implicit object OpArrayLD extends OpArray[Long, Double]
implicit object OpArrayFD extends OpArray[Float, Double]
}
sealed trait mapValuesLowPrio { this: mapValues.type =>
/*implicit*/ def canMapSelf[V, V2]: Impl2[V, V => V2, V2] = {
new Impl2[V, V => V2, V2] {
def apply(from: V, fn: (V) => V2) = fn(from)
def mapActive(from: V, fn: (V) => V2) = fn(from)
}
}
}
object mapActiveValues extends UFunc {
implicit def implFromCanMapValues[T, V, V2, R](implicit cmv: CanMapValues[T, V, V2, R]): Impl2[T, V=>V2, R] = new Impl2[T, V=>V2, R] {
override def apply(v: T, v2: (V) => V2): R = cmv(v, v2)
}
} | chen0031/breeze | math/src/main/scala/breeze/linalg/functions/mapValues.scala | Scala | apache-2.0 | 3,225 |
// Check that calling `asInstanceOf[Nothing]` throws a ClassCastException.
// In particular, the compiler needs access to the right method to throw
// the exception, and identifying the method uses some explicit nulls related
// logic (see ClassCastExceptionClass in Definitions.scala).
object Test {
def main(args: Array[String]): Unit = {
val x: String = "hello"
try {
val y: Nothing = x.asInstanceOf[Nothing]
assert(false)
} catch {
case e: ClassCastException =>
// ok
}
val n: Null = null
try {
val y: Nothing = n.asInstanceOf[Nothing]
assert(false)
} catch {
case e: ClassCastException =>
// ok
}
}
}
| som-snytt/dotty | tests/explicit-nulls/run/instanceof-nothing.scala | Scala | apache-2.0 | 695 |
class Outer {
class Inner {
class Inner2
}
}
class HasA { type A }
class Foo[A]
object Test {
def test = {
val a: Outer#Inner = {
val o = new Outer
new o.Inner
}
val b: Outer#Inner#Inner2 = {
val o = new Outer
val i = new o.Inner
new i.Inner2
}
val c: HasA { type A = Int } = {
val h = new HasA {
type A = Int
}
val x: HasA { type A = h.A } = h
x
}
val d: Foo[Int] = {
class Bar[B] extends Foo[B]
new Bar[Int]
}
val e: Foo[_] = {
class Bar[B] extends Foo[B]
new Bar[Int]: Bar[_ <: Int]
}
}
}
| densh/dotty | tests/pos/escapingRefs.scala | Scala | bsd-3-clause | 637 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object FlowershopBuild extends Build {
val Organization = "com.example"
val Name = "Flowershop"
val Version = "0.1.0-SNAPSHOT"
val ScalaVersion = "2.11.5"
val ScalatraVersion = "2.4.0.M2"
lazy val project = Project (
"flowershop",
file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += "Sonatype OSS Snapshots" at "http://oss.sonatype.org/content/repositories/snapshots/",
libraryDependencies ++= Seq(
"org.json4s" %% "json4s-native" % "3.2.9",
"org.scalatra" %% "scalatra-swagger" % ScalatraVersion,
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"ch.qos.logback" % "logback-classic" % "1.1.2" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "9.1.5.v20140505" % "container",
"org.eclipse.jetty" % "jetty-plus" % "9.1.5.v20140505" % "container",
"javax.servlet" % "javax.servlet-api" % "3.1.0" % "container;provided;test"
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq.empty, /* add extra bindings here */
Some("templates")
)
)
}
)
)
}
| jason-womack/flowershop | project/build.scala | Scala | mit | 1,778 |
package week3
import java.util.NoSuchElementException
/**
* Created by keid on 25/09/2016.
*/
trait List[T] {
def isEmpty: Boolean
def head: T
def tail: List[T]
}
// val only eval at initialisation and vals are field overriding the abstract method of the trait
class Cons[T](val head: T, val tail: List[T]) extends List[T]{
def isEmpty = false
}
class Nil[T] extends List[T]{
def isEmpty: Boolean = true
def head: Nothing = throw new NoSuchElementException("Nil.head")
def tail: Nothing = throw new NoSuchElementException("Nil.tail")
} | kevllino/scala-specialization | 00-ProgFun/worksheets/src/main/scala/week3/List.scala | Scala | mit | 560 |
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.model.domain
case class Author(`type`: String, name: String)
| NDLANO/learningpath-api | src/main/scala/no/ndla/learningpathapi/model/domain/Author.scala | Scala | gpl-3.0 | 184 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import psi.ScalaPsiElementImpl
import com.intellij.lang.ASTNode
import api.expr._
import types.Unit
import types.result.{Success, TypingContext}
import com.intellij.psi.{PsiField, ResolveState, PsiElementVisitor}
import api.ScalaElementVisitor
import resolve.{StdKinds, ScalaResolveResult}
import api.statements.{ScFunction, ScVariable}
import resolve.processor.MethodResolveProcessor
import psi.types.Compatibility.Expression
import api.statements.params.ScClassParameter
/**
* @author Alexander Podkhalyuzin
*/
class ScAssignStmtImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScAssignStmt {
override def toString: String = "AssignStatement"
protected override def innerType(ctx: TypingContext) = {
getLExpression match {
case call: ScMethodCall => call.getType(ctx)
case _ =>
resolveAssignment match {
case Some(resolveResult) =>
mirrorMethodCall match {
case Some(call) => call.getType(TypingContext.empty)
case None => Success(Unit, Some(this))
}
case _ => Success(Unit, Some(this))
}
}
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case visitor: ScalaElementVisitor => super.accept(visitor)
case _ => super.accept(visitor)
}
}
@volatile
private var assignment: Option[ScalaResolveResult] = null
@volatile
private var assignmentModCount: Long = 0L
def resolveAssignment: Option[ScalaResolveResult] = {
val count = getManager.getModificationTracker.getModificationCount
var res = assignment
if (res != null && count == assignmentModCount) return assignment
res = resolveAssignmentInner(shapeResolve = false)
assignmentModCount = count
assignment = res
res
}
@volatile
private var shapeAssignment: Option[ScalaResolveResult] = null
@volatile
private var shapeAssignmentModCount: Long = 0L
def shapeResolveAssignment: Option[ScalaResolveResult] = {
val count = getManager.getModificationTracker.getModificationCount
var res = shapeAssignment
if (res != null && count == shapeAssignmentModCount) return shapeAssignment
res = resolveAssignmentInner(shapeResolve = true)
shapeAssignmentModCount = count
shapeAssignment = res
res
}
@volatile
private var methodCall: Option[ScMethodCall] = null
@volatile
private var methodCallModCount: Long = 0L
def mirrorMethodCall: Option[ScMethodCall] = {
def impl(): Option[ScMethodCall] = {
getLExpression match {
case ref: ScReferenceExpression =>
val text = s"${ref.refName}_=(${getRExpression.map(_.getText).getOrElse("")})"
val mirrorExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(text, getContext, this)
mirrorExpr match {
case call: ScMethodCall =>
call.getInvokedExpr.asInstanceOf[ScReferenceExpression].setupResolveFunctions(
() => resolveAssignment.toArray, () => shapeResolveAssignment.toArray
)
Some(call)
case _ => None
}
case _ => None
}
}
val count = getManager.getModificationTracker.getModificationCount
var res = methodCall
if (res != null && count == methodCallModCount) return methodCall
res = impl()
methodCallModCount = count
methodCall = res
res
}
private def resolveAssignmentInner(shapeResolve: Boolean): Option[ScalaResolveResult] = {
getLExpression match {
case ref: ScReferenceExpression =>
ref.bind() match {
case Some(r: ScalaResolveResult) =>
ScalaPsiUtil.nameContext(r.element) match {
case v: ScVariable => None
case c: ScClassParameter if c.isVar => None
case f: PsiField => None
case fun: ScFunction if ScalaPsiUtil.isViableForAssignmentFunction(fun) =>
val processor = new MethodResolveProcessor(ref, fun.name + "_=",
getRExpression.map(expr => List(Seq(new Expression(expr)))).getOrElse(Nil), Nil, ref.getPrevTypeInfoParams,
isShapeResolve = shapeResolve, kinds = StdKinds.methodsOnly)
r.fromType match {
case Some(tp) => processor.processType(tp, ref)
case None =>
fun.getContext match {
case d: ScDeclarationSequenceHolder =>
d.processDeclarations(processor, ResolveState.initial(), fun, ref)
case _ =>
}
}
val candidates = processor.candidatesS
if (candidates.size == 1) Some(candidates.toArray.apply(0))
else None
case _ => None
}
case _ => None
}
case _ => None
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScAssignStmtImpl.scala | Scala | apache-2.0 | 4,948 |
package it.chalmers.recieveit
import org.scalatra._
class MainServlet extends ScalatraServlet {
get("/") {
Ok("Hello, World!")
}
get("/:name") {
val name = params("name")
Ok(s"Hello, $name!")
}
}
| Dr-Horv/recieveIT | src/main/scala/it/chalmers/recieveit/MainServlet.scala | Scala | mit | 221 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
/**
* A <code>Reporter</code> that prints test status information to
* the standard output stream.
*
* @author Bill Venners
*/
private[scalatest] class StandardOutReporter(
presentAllDurations: Boolean,
presentInColor: Boolean,
presentShortStackTraces: Boolean,
presentFullStackTraces: Boolean,
presentUnformatted: Boolean,
presentReminder: Boolean,
presentReminderWithShortStackTraces: Boolean,
presentReminderWithFullStackTraces: Boolean,
presentReminderWithoutCanceledTests: Boolean,
presentFilePathname: Boolean,
presentJson: Boolean
) extends PrintReporter(
Console.out,
presentAllDurations,
presentInColor,
presentShortStackTraces,
presentFullStackTraces,
presentUnformatted,
presentReminder,
presentReminderWithShortStackTraces,
presentReminderWithFullStackTraces,
presentReminderWithoutCanceledTests,
presentFilePathname,
presentJson
) {
/**
* Does nothing, because don't want to dispose the standard output stream.
*/
override def dispose(): Unit = {
}
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/tools/StandardOutReporter.scala | Scala | apache-2.0 | 1,658 |
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.validation
import no.ndla.learningpathapi.model.api.ValidationMessage
import no.ndla.mapping.ISO639.get6391CodeFor6392CodeMappings
trait LanguageValidator {
val languageValidator: LanguageValidator
class LanguageValidator {
private def languageCodeSupported6391(languageCode: String, allowUnknownLanguage: Boolean): Boolean = {
val languageCodes = get6391CodeFor6392CodeMappings.values.toSeq ++ (if (allowUnknownLanguage)
Seq("unknown")
else
Seq.empty)
languageCodes.contains(languageCode)
}
def validate(fieldPath: String, languageCode: String, allowUnknownLanguage: Boolean): Option[ValidationMessage] = {
languageCode.nonEmpty && languageCodeSupported6391(languageCode, allowUnknownLanguage) match {
case true => None
case false =>
Some(ValidationMessage(fieldPath, s"Language '$languageCode' is not a supported value."))
}
}
}
}
| NDLANO/learningpath-api | src/main/scala/no/ndla/learningpathapi/validation/LanguageValidator.scala | Scala | gpl-3.0 | 1,258 |
package com.github.mdr.ascii.layout.layering
class CrossingCalculator(layer1: Layer, layer2: Layer, edges: List[Edge]) {
/**
* @param u, v, two vertices in layer2 such that u is v or u is ordered before v in the layer
*/
def crossingNumber(u: Vertex, v: Vertex): Int =
if (u == v)
0
else {
var count = 0
for {
Edge(w, u2) ← edges if u2 == u
Edge(z, v2) ← edges if v2 == v
if layer1.positionOf(z) < layer1.positionOf(w)
} count += 1
count
}
/**
* The number of crossings between two layers
*/
def numberOfCrossings: Int =
(for {
u ← layer2.vertices
v ← layer2.vertices
if layer2.positionOf(u) < layer2.positionOf(v)
} yield crossingNumber(u, v)).sum
} | jlmauduy/ascii-graphs | src/main/scala/com/github/mdr/ascii/layout/layering/CrossingCalculator.scala | Scala | mit | 774 |
package com.gilt.aws.lambda
sealed trait Result[+T]
case class Success[T](result: T) extends Result[T]
case class Failure(exception: Throwable) extends Result[Nothing]
case class S3BucketId(value: String)
case class S3Key(value: String)
case class LambdaName(value: String)
case class LambdaARN(value: String)
case class HandlerName(value: String)
case class RoleARN(value: String)
object EnvironmentVariables {
val bucketId = "AWS_LAMBDA_BUCKET_ID"
val lambdaName = "AWS_LAMBDA_NAME"
val handlerName = "AWS_LAMBDA_HANDLER_NAME"
val roleArn = "AWS_LAMBDA_IAM_ROLE_ARN"
}
| Jimdo/sbt-aws-lambda | src/main/scala/com/gilt/aws/lambda/DomainModels.scala | Scala | apache-2.0 | 582 |
package actor
import org.apache.http.client.HttpClient
import akka.actor.{ActorRef, Actor}
import org.apache.http.client.methods.HttpGet
import play.api.Logger
import org.apache.http.HttpStatus
import org.apache.http.util.EntityUtils
import javax.imageio.ImageIO
import java.io.ByteArrayInputStream
import model.Article
import scala.util.control.Breaks._
/**
* The Class ImageFetcher.
*
* @author Nguyen Duc Dung
* @since 2/23/14 3:42 PM
*
*/
class ImageFetcher(httpClient: HttpClient, persistent: ActorRef) extends Actor {
// youtube thumb image size
val best_size = 500
val min_size = 150
override def receive = {
case article: Article =>
if (article.featureImage.isEmpty) {
var bestImage: Option[String] = None
var bestSize: Int = 0
breakable {
article.potentialImages.foreach(url => {
fetch(url).map(tuple => {
if (bestSize < tuple._2) {
bestImage = Some(tuple._1)
bestSize = tuple._2
if (bestSize >= best_size) {
break()
}
}
})
})
}
persistent ! article.copy(featureImage = bestImage)
} else {
persistent ! article
}
}
def fetch(url: String): Option[(String, Int)] = {
var result: Option[(String, Int)] = None
try {
val response = httpClient.execute(new HttpGet(url))
Logger.info(s"Download ${response.getStatusLine.getStatusCode} : $url")
if (response.getStatusLine.getStatusCode == HttpStatus.SC_OK) {
val entity = response.getEntity
val content = EntityUtils.toByteArray(entity)
if (content != null && content.length > 0) {
val input = new ByteArrayInputStream(content)
val image = ImageIO.read(input)
if (image != null) {
val width = image.getWidth
val height = image.getHeight
val size = width + height
if (size >= min_size) {
result = Some(url, size)
}
}
input.close()
}
}
EntityUtils.consume(response.getEntity)
} catch {
case ex: Exception =>
Logger.error("Error: " + url, ex)
}
result
}
}
| SunriseSoftVN/hayhayblog | app/actor/ImageFetcher.scala | Scala | gpl-2.0 | 2,272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic
import org.apache.toree.plugins.dependencies.Dependency
import org.apache.toree.plugins._
import org.mockito.Mockito._
import org.mockito.Matchers.{eq => mockEq, _}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpec, Matchers, OneInstancePerTest}
import test.utils
import MagicManagerSpec._
import scala.runtime.BoxedUnit
object MagicManagerSpec {
val TestCellMagicOutput = CellMagicOutput("test" -> "value")
}
class SomeLineMagic extends LineMagic {
override def execute(code: String): Unit = {}
}
class SomeCellMagic extends CellMagic {
override def execute(code: String): CellMagicOutput = TestCellMagicOutput
}
private class SomePlugin extends Plugin
private class SomeMagic extends Magic {
override def execute(code: String): Any = ???
}
class LineMagicException extends Exception
private class ExceptionLineMagic extends LineMagic {
override def execute(code: String): Unit = throw new LineMagicException
}
class CellMagicException extends Exception
private class ExceptionCellMagic extends CellMagic {
override def execute(code: String): CellMagicOutput = throw new CellMagicException
}
class MagicManagerSpec
extends FunSpec with Matchers with MockitoSugar with OneInstancePerTest
{
private val TestPluginName = "SomePlugin"
private val TestMagicName = "SomeMagic"
private val mockPluginManager = mock[PluginManager]
private val magicManager = spy(new MagicManager(mockPluginManager))
describe("MagicManager") {
describe("#isLineMagic") {
it("should return true if the magic extends the line magic interface") {
val expected = true
val mockLineMagic = mock[LineMagic]
val actual = magicManager.isLineMagic(mockLineMagic)
actual should be (expected)
}
it("should return false if the magic does not extend the line magic interface") {
val expected = false
val mockMagic = mock[Magic]
val actual = magicManager.isLineMagic(mockMagic)
actual should be (expected)
}
it("should throw an exception if provided null") {
intercept[NullPointerException] {
magicManager.isLineMagic(null)
}
}
}
describe("#isCellMagic") {
it("should return true if the magic extends the cell magic interface") {
val expected = true
val mockCellMagic = mock[CellMagic]
val actual = magicManager.isCellMagic(mockCellMagic)
actual should be (expected)
}
it("should return false if the magic does not extend the cell magic interface") {
val expected = false
val mockMagic = mock[Magic]
val actual = magicManager.isCellMagic(mockMagic)
actual should be (expected)
}
it("should throw an exception if provided null") {
intercept[NullPointerException] {
magicManager.isCellMagic(null)
}
}
}
describe("#findMagic") {
it("should throw a MagicNotFoundException if no magic matches the name") {
intercept[MagicNotFoundException] {
doReturn(Seq(new Plugin {}).toIterable).when(mockPluginManager).plugins
magicManager.findMagic(TestMagicName)
}
}
it("should throw a MagicNotFoundException if there are no loaded plugins") {
intercept[MagicNotFoundException] {
doReturn(Nil).when(mockPluginManager).plugins
magicManager.findMagic(TestMagicName)
}
}
it("should throw a MagicNotFoundException if a plugin matches but is not a magic") {
intercept[MagicNotFoundException] {
doReturn(Seq(new SomePlugin).toIterable).when(mockPluginManager).plugins
magicManager.findMagic(TestPluginName)
}
}
it("should return the magic if exactly one is found") {
val expected = new SomeMagic
doReturn(Seq(expected).toIterable).when(mockPluginManager).plugins
val actual = magicManager.findMagic(TestMagicName)
actual should be (expected)
}
it("should return a magic whose name matches even if casing is different") {
val expected = new SomeMagic
doReturn(Seq(expected).toIterable).when(mockPluginManager).plugins
val actual = magicManager.findMagic(TestMagicName.toUpperCase())
actual should be (expected)
}
it("should return the first match if more than one magic matches the name") {
val expected = new SomeMagic
doReturn(Seq(expected, new utils.SomeMagic).toIterable)
.when(mockPluginManager).plugins
val actual = magicManager.findMagic(TestMagicName)
actual should be (expected)
}
}
describe("#applyDynamic") {
it("should return CellMagicOutput if the invocation of a magic throws an exception") {
doReturn(Some(FailurePluginMethodResult(
mock[PluginMethod],
new LineMagicException()
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic("TEST")()
result.asMap.get("text/plain") should not be(empty)
}
it("should fire an event with the lowercase of the magic name") {
val arg: java.lang.String = "some arg"
val pluginName = "TEST"
val expected = Dependency.fromValueWithName("input", arg)
doReturn(Some(FailurePluginMethodResult(
mock[PluginMethod],
new LineMagicException()
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
magicManager.applyDynamic(pluginName)(arg :: Nil: _*)
verify(mockPluginManager).fireEventFirstResult(mockEq(pluginName.toLowerCase), any())
}
it("should take the first argument and convert it to a string to pass to the magic") {
val arg: java.lang.String = "some arg"
val pluginName = "TEST"
val expected = Dependency.fromValueWithName("input", arg)
doReturn(Some(FailurePluginMethodResult(
mock[PluginMethod],
new LineMagicException()
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
magicManager.applyDynamic(pluginName)(arg :: Nil: _*)
verify(mockPluginManager).fireEventFirstResult(anyString(), mockEq(Seq(expected)): _*)
}
it("should pass an empty string to the line magic if no arguments are provided") {
val arg: java.lang.String = ""
val pluginName = "TEST"
val expected = Dependency.fromValueWithName("input", arg)
doReturn(Some(FailurePluginMethodResult(
mock[PluginMethod],
new LineMagicException()
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
magicManager.applyDynamic(pluginName)(Nil: _*)
verify(mockPluginManager).fireEventFirstResult(anyString(), mockEq(Seq(expected)): _*)
}
it("should return a Right[LineMagicOutput] if line magic execution is successful and returns null") {
val pluginName = "TEST"
val expected = LineMagicOutput
doReturn(Some(SuccessPluginMethodResult(
mock[PluginMethod],
null
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic(pluginName)(Nil: _*)
result should be(expected)
}
it("should return a Right[LineMagicOutput] if line magic execution is successful and returns BoxedUnit") {
val pluginName = "TEST"
val expected = LineMagicOutput
doReturn(Some(SuccessPluginMethodResult(
mock[PluginMethod],
BoxedUnit.UNIT
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic(pluginName)(Nil: _*)
result should be(expected)
}
it("should return a Left[CellMagicOutput] if cell magic execution is successful") {
val pluginName = "TEST"
val cellMagicOutput = CellMagicOutput("our/type" -> "TEST CONTENT")
doReturn(Some(SuccessPluginMethodResult(
mock[PluginMethod],
cellMagicOutput
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic(pluginName)(Nil: _*)
result should be(cellMagicOutput)
}
it("should return a Left[CellMagicOutput] if is a magic but not a line or cell") {
val pluginName = "TEST"
doReturn(Some(SuccessPluginMethodResult(
mock[PluginMethod],
new AnyRef
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic(pluginName)(Nil: _*)
result.asMap.get("text/plain") should not be (empty)
}
it("should return a Left[CellMagicOutput] if magic fails") {
val pluginName = "TEST"
doReturn(Some(FailurePluginMethodResult(
mock[PluginMethod],
new Throwable
))).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
val result = magicManager.applyDynamic(pluginName)(Nil: _*)
result.asMap.get("text/plain") should not be (empty)
}
it("should throw a MagicNotFoundException when a magic cannot be found") {
val pluginName = "THISMAGICDOESN'TEXIST"
doReturn(None).when(mockPluginManager).fireEventFirstResult(
anyString(), any(classOf[Dependency[_ <: AnyRef]])
)
intercept[MagicNotFoundException] {
magicManager.applyDynamic(pluginName)(Nil: _*)
}
}
}
}
}
| Myllyenko/incubator-toree | kernel-api/src/test/scala/org/apache/toree/magic/MagicManagerSpec.scala | Scala | apache-2.0 | 10,887 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.registration.kickout
import iht.views.ViewTestHelper
import iht.views.html.registration.kickout.kickout_template
class KickoutTemplateViewTest extends ViewTestHelper{
val summaryMessage = "kickout summary message"
val returnLinkUrl = iht.controllers.registration.deceased.routes.DeceasedDateOfDeathController.onPageLoad
val seqOfContents = Seq("lineOne", "lineTwo")
lazy val kickoutTemplate: kickout_template = app.injector.instanceOf[kickout_template]
def kickOutTemplateView() = {
implicit val request = createFakeRequest()
val view = kickoutTemplate(summaryMessage,
returnLinkUrl)(seqOfContents).toString()
asDocument(view)
}
"KickoutTemplateView View" must {
"have the correct title and summary message" in {
val view = kickOutTemplateView()
titleShouldBeCorrect(view.toString, messagesApi("iht.notPossibleToUseService"))
browserTitleShouldBeCorrect(view.toString, messagesApi("iht.notPossibleToUseService"))
messagesShouldBePresent(view.toString, summaryMessage)
}
"have 'Next steps' heading" in {
val view = kickOutTemplateView
val headers = view.getElementsByTag("h2")
headers.first.text() mustBe messagesApi("iht.nextSteps")
}
"have the sequence of contents" in {
val view = kickOutTemplateView
for (content <- seqOfContents) view.toString must include(content)
}
"have details are correct button " in {
val view = kickOutTemplateView
val detailsAreCorrectButton = view.getElementById("finish")
detailsAreCorrectButton.attr("value") mustBe messagesApi("site.button.details.correct.exitToGovK")
}
"have return link with correct text" in {
val view = kickOutTemplateView
val detailsAreCorrectButton = view.getElementById("return-button")
detailsAreCorrectButton.attr("href") mustBe returnLinkUrl.url
detailsAreCorrectButton.text mustBe messagesApi("iht.registration.kickout.returnToTheLastPageVisited")
}
}
}
| hmrc/iht-frontend | test/iht/views/registration/kickout/KickoutTemplateViewTest.scala | Scala | apache-2.0 | 2,643 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
import java.io.FileNotFoundException
import java.net.URLEncoder
import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.{ScheduledFuture, TimeUnit}
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.language.postfixOps
import scala.util.Random
import org.apache.hadoop.fs.Path
import org.apache.spark.rpc._
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.{ApplicationDescription, DriverDescription,
ExecutorState, SparkHadoopUtil}
import org.apache.spark.deploy.DeployMessages._
import org.apache.spark.deploy.history.HistoryServer
import org.apache.spark.deploy.master.DriverState.DriverState
import org.apache.spark.deploy.master.MasterMessages._
import org.apache.spark.deploy.master.ui.MasterWebUI
import org.apache.spark.deploy.rest.StandaloneRestServer
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.scheduler.{EventLoggingListener, ReplayListenerBus}
import org.apache.spark.serializer.{JavaSerializer, Serializer}
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{ThreadUtils, SignalLogger, Utils}
private[deploy] class Master(
override val rpcEnv: RpcEnv,
address: RpcAddress,
webUiPort: Int,
val securityMgr: SecurityManager,
val conf: SparkConf)
extends ThreadSafeRpcEndpoint with Logging with LeaderElectable {
private val forwardMessageThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("master-forward-message-thread")
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private def createDateFormat = new SimpleDateFormat("yyyyMMddHHmmss") // For application IDs
private val WORKER_TIMEOUT_MS = conf.getLong("spark.worker.timeout", 60) * 1000
private val RETAINED_APPLICATIONS = conf.getInt("spark.deploy.retainedApplications", 200)
private val RETAINED_DRIVERS = conf.getInt("spark.deploy.retainedDrivers", 200)
private val REAPER_ITERATIONS = conf.getInt("spark.dead.worker.persistence", 15)
private val RECOVERY_MODE = conf.get("spark.deploy.recoveryMode", "NONE")
val workers = new HashSet[WorkerInfo]
val idToApp = new HashMap[String, ApplicationInfo]
val waitingApps = new ArrayBuffer[ApplicationInfo]
val apps = new HashSet[ApplicationInfo]
private val idToWorker = new HashMap[String, WorkerInfo]
private val addressToWorker = new HashMap[RpcAddress, WorkerInfo]
private val endpointToApp = new HashMap[RpcEndpointRef, ApplicationInfo]
private val addressToApp = new HashMap[RpcAddress, ApplicationInfo]
private val completedApps = new ArrayBuffer[ApplicationInfo]
private var nextAppNumber = 0
private val appIdToUI = new HashMap[String, SparkUI]
private val drivers = new HashSet[DriverInfo]
private val completedDrivers = new ArrayBuffer[DriverInfo]
// Drivers currently spooled for scheduling
private val waitingDrivers = new ArrayBuffer[DriverInfo]
private var nextDriverNumber = 0
Utils.checkHost(address.host, "Expected hostname")
private val masterMetricsSystem = MetricsSystem.createMetricsSystem("master", conf, securityMgr)
private val applicationMetricsSystem = MetricsSystem.createMetricsSystem("applications", conf,
securityMgr)
private val masterSource = new MasterSource(this)
// After onStart, webUi will be set
private var webUi: MasterWebUI = null
private val masterPublicAddress = {
val envVar = conf.getenv("SPARK_PUBLIC_DNS")
if (envVar != null) envVar else address.host
}
private val masterUrl = address.toSparkURL
private var masterWebUiUrl: String = _
private var state = RecoveryState.STANDBY
private var persistenceEngine: PersistenceEngine = _
private var leaderElectionAgent: LeaderElectionAgent = _
private var recoveryCompletionTask: ScheduledFuture[_] = _
private var checkForWorkerTimeOutTask: ScheduledFuture[_] = _
// As a temporary workaround before better ways of configuring memory, we allow users to set
// a flag that will perform round-robin scheduling across the nodes (spreading out each app
// among all the nodes) instead of trying to consolidate each app onto a small # of nodes.
private val spreadOutApps = conf.getBoolean("spark.deploy.spreadOut", true)
// Default maxCores for applications that don't specify it (i.e. pass Int.MaxValue)
private val defaultCores = conf.getInt("spark.deploy.defaultCores", Int.MaxValue)
if (defaultCores < 1) {
throw new SparkException("spark.deploy.defaultCores must be positive")
}
// Alternative application submission gateway that is stable across Spark versions
private val restServerEnabled = conf.getBoolean("spark.master.rest.enabled", true)
private var restServer: Option[StandaloneRestServer] = None
private var restServerBoundPort: Option[Int] = None
override def onStart(): Unit = {
logInfo("Starting Spark master at " + masterUrl)
logInfo(s"Running Spark version ${org.apache.spark.SPARK_VERSION}")
webUi = new MasterWebUI(this, webUiPort)
webUi.bind()
masterWebUiUrl = "http://" + masterPublicAddress + ":" + webUi.boundPort
checkForWorkerTimeOutTask = forwardMessageThread.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(CheckForWorkerTimeOut)
}
}, 0, WORKER_TIMEOUT_MS, TimeUnit.MILLISECONDS)
if (restServerEnabled) {
val port = conf.getInt("spark.master.rest.port", 6066)
restServer = Some(new StandaloneRestServer(address.host, port, conf, self, masterUrl))
}
restServerBoundPort = restServer.map(_.start())
masterMetricsSystem.registerSource(masterSource)
masterMetricsSystem.start()
applicationMetricsSystem.start()
// Attach the master and app metrics servlet handler to the web ui after the metrics systems are
// started.
masterMetricsSystem.getServletHandlers.foreach(webUi.attachHandler)
applicationMetricsSystem.getServletHandlers.foreach(webUi.attachHandler)
val serializer = new JavaSerializer(conf)
val (persistenceEngine_, leaderElectionAgent_) = RECOVERY_MODE match {
case "ZOOKEEPER" =>
logInfo("Persisting recovery state to ZooKeeper")
val zkFactory =
new ZooKeeperRecoveryModeFactory(conf, serializer)
(zkFactory.createPersistenceEngine(), zkFactory.createLeaderElectionAgent(this))
case "FILESYSTEM" =>
val fsFactory =
new FileSystemRecoveryModeFactory(conf, serializer)
(fsFactory.createPersistenceEngine(), fsFactory.createLeaderElectionAgent(this))
case "CUSTOM" =>
val clazz = Utils.classForName(conf.get("spark.deploy.recoveryMode.factory"))
val factory = clazz.getConstructor(classOf[SparkConf], classOf[Serializer])
.newInstance(conf, serializer)
.asInstanceOf[StandaloneRecoveryModeFactory]
(factory.createPersistenceEngine(), factory.createLeaderElectionAgent(this))
case _ =>
(new BlackHolePersistenceEngine(), new MonarchyLeaderAgent(this))
}
persistenceEngine = persistenceEngine_
leaderElectionAgent = leaderElectionAgent_
}
override def onStop() {
masterMetricsSystem.report()
applicationMetricsSystem.report()
// prevent the CompleteRecovery message sending to restarted master
if (recoveryCompletionTask != null) {
recoveryCompletionTask.cancel(true)
}
if (checkForWorkerTimeOutTask != null) {
checkForWorkerTimeOutTask.cancel(true)
}
forwardMessageThread.shutdownNow()
webUi.stop()
restServer.foreach(_.stop())
masterMetricsSystem.stop()
applicationMetricsSystem.stop()
persistenceEngine.close()
leaderElectionAgent.stop()
}
override def electedLeader() {
self.send(ElectedLeader)
}
override def revokedLeadership() {
self.send(RevokedLeadership)
}
override def receive: PartialFunction[Any, Unit] = {
case ElectedLeader => {
val (storedApps, storedDrivers, storedWorkers) = persistenceEngine.readPersistedData(rpcEnv)
state = if (storedApps.isEmpty && storedDrivers.isEmpty && storedWorkers.isEmpty) {
RecoveryState.ALIVE
} else {
RecoveryState.RECOVERING
}
logInfo("I have been elected leader! New state: " + state)
if (state == RecoveryState.RECOVERING) {
beginRecovery(storedApps, storedDrivers, storedWorkers)
recoveryCompletionTask = forwardMessageThread.schedule(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(CompleteRecovery)
}
}, WORKER_TIMEOUT_MS, TimeUnit.MILLISECONDS)
}
}
case CompleteRecovery => completeRecovery()
case RevokedLeadership => {
logError("Leadership has been revoked -- master shutting down.")
System.exit(0)
}
case RegisterApplication(description, driver) => {
// TODO Prevent repeated registrations from some driver
if (state == RecoveryState.STANDBY) {
// ignore, don't send response
} else {
logInfo("Registering app " + description.name)
val app = createApplication(description, driver)
registerApplication(app)
logInfo("Registered app " + description.name + " with ID " + app.id)
persistenceEngine.addApplication(app)
driver.send(RegisteredApplication(app.id, self))
schedule()
}
}
case ExecutorStateChanged(appId, execId, state, message, exitStatus) => {
val execOption = idToApp.get(appId).flatMap(app => app.executors.get(execId))
execOption match {
case Some(exec) => {
val appInfo = idToApp(appId)
exec.state = state
if (state == ExecutorState.RUNNING) { appInfo.resetRetryCount() }
exec.application.driver.send(ExecutorUpdated(execId, state, message, exitStatus))
if (ExecutorState.isFinished(state)) {
// Remove this executor from the worker and app
logInfo(s"Removing executor ${exec.fullId} because it is $state")
// If an application has already finished, preserve its
// state to display its information properly on the UI
if (!appInfo.isFinished) {
appInfo.removeExecutor(exec)
}
exec.worker.removeExecutor(exec)
val normalExit = exitStatus == Some(0)
// Only retry certain number of times so we don't go into an infinite loop.
if (!normalExit) {
if (appInfo.incrementRetryCount() < ApplicationState.MAX_NUM_RETRY) {
schedule()
} else {
val execs = appInfo.executors.values
if (!execs.exists(_.state == ExecutorState.RUNNING)) {
logError(s"Application ${appInfo.desc.name} with ID ${appInfo.id} failed " +
s"${appInfo.retryCount} times; removing it")
removeApplication(appInfo, ApplicationState.FAILED)
}
}
}
}
}
case None =>
logWarning(s"Got status update for unknown executor $appId/$execId")
}
}
case DriverStateChanged(driverId, state, exception) => {
state match {
case DriverState.ERROR | DriverState.FINISHED | DriverState.KILLED | DriverState.FAILED =>
removeDriver(driverId, state, exception)
case _ =>
throw new Exception(s"Received unexpected state update for driver $driverId: $state")
}
}
case Heartbeat(workerId, worker) => {
idToWorker.get(workerId) match {
case Some(workerInfo) =>
workerInfo.lastHeartbeat = System.currentTimeMillis()
case None =>
if (workers.map(_.id).contains(workerId)) {
logWarning(s"Got heartbeat from unregistered worker $workerId." +
" Asking it to re-register.")
worker.send(ReconnectWorker(masterUrl))
} else {
logWarning(s"Got heartbeat from unregistered worker $workerId." +
" This worker was never registered, so ignoring the heartbeat.")
}
}
}
case MasterChangeAcknowledged(appId) => {
idToApp.get(appId) match {
case Some(app) =>
logInfo("Application has been re-registered: " + appId)
app.state = ApplicationState.WAITING
case None =>
logWarning("Master change ack from unknown app: " + appId)
}
if (canCompleteRecovery) { completeRecovery() }
}
case WorkerSchedulerStateResponse(workerId, executors, driverIds) => {
idToWorker.get(workerId) match {
case Some(worker) =>
logInfo("Worker has been re-registered: " + workerId)
worker.state = WorkerState.ALIVE
val validExecutors = executors.filter(exec => idToApp.get(exec.appId).isDefined)
for (exec <- validExecutors) {
val app = idToApp.get(exec.appId).get
val execInfo = app.addExecutor(worker, exec.cores, Some(exec.execId))
worker.addExecutor(execInfo)
execInfo.copyState(exec)
}
for (driverId <- driverIds) {
drivers.find(_.id == driverId).foreach { driver =>
driver.worker = Some(worker)
driver.state = DriverState.RUNNING
worker.drivers(driverId) = driver
}
}
case None =>
logWarning("Scheduler state from unknown worker: " + workerId)
}
if (canCompleteRecovery) { completeRecovery() }
}
case UnregisterApplication(applicationId) =>
logInfo(s"Received unregister request from application $applicationId")
idToApp.get(applicationId).foreach(finishApplication)
case CheckForWorkerTimeOut => {
timeOutDeadWorkers()
}
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterWorker(
id, workerHost, workerPort, workerRef, cores, memory, workerUiPort, publicAddress) => {
logInfo("Registering worker %s:%d with %d cores, %s RAM".format(
workerHost, workerPort, cores, Utils.megabytesToString(memory)))
if (state == RecoveryState.STANDBY) {
context.reply(MasterInStandby)
} else if (idToWorker.contains(id)) {
context.reply(RegisterWorkerFailed("Duplicate worker ID"))
} else {
val worker = new WorkerInfo(id, workerHost, workerPort, cores, memory,
workerRef, workerUiPort, publicAddress)
if (registerWorker(worker)) {
persistenceEngine.addWorker(worker)
context.reply(RegisteredWorker(self, masterWebUiUrl))
schedule()
} else {
val workerAddress = worker.endpoint.address
logWarning("Worker registration failed. Attempted to re-register worker at same " +
"address: " + workerAddress)
context.reply(RegisterWorkerFailed("Attempted to re-register worker at same address: "
+ workerAddress))
}
}
}
case RequestSubmitDriver(description) => {
if (state != RecoveryState.ALIVE) {
val msg = s"${Utils.BACKUP_STANDALONE_MASTER_PREFIX}: $state. " +
"Can only accept driver submissions in ALIVE state."
context.reply(SubmitDriverResponse(self, false, None, msg))
} else {
logInfo("Driver submitted " + description.command.mainClass)
val driver = createDriver(description)
persistenceEngine.addDriver(driver)
waitingDrivers += driver
drivers.add(driver)
schedule()
// TODO: It might be good to instead have the submission client poll the master to determine
// the current status of the driver. For now it's simply "fire and forget".
context.reply(SubmitDriverResponse(self, true, Some(driver.id),
s"Driver successfully submitted as ${driver.id}"))
}
}
case RequestKillDriver(driverId) => {
if (state != RecoveryState.ALIVE) {
val msg = s"${Utils.BACKUP_STANDALONE_MASTER_PREFIX}: $state. " +
s"Can only kill drivers in ALIVE state."
context.reply(KillDriverResponse(self, driverId, success = false, msg))
} else {
logInfo("Asked to kill driver " + driverId)
val driver = drivers.find(_.id == driverId)
driver match {
case Some(d) =>
if (waitingDrivers.contains(d)) {
waitingDrivers -= d
self.send(DriverStateChanged(driverId, DriverState.KILLED, None))
} else {
// We just notify the worker to kill the driver here. The final bookkeeping occurs
// on the return path when the worker submits a state change back to the master
// to notify it that the driver was successfully killed.
d.worker.foreach { w =>
w.endpoint.send(KillDriver(driverId))
}
}
// TODO: It would be nice for this to be a synchronous response
val msg = s"Kill request for $driverId submitted"
logInfo(msg)
context.reply(KillDriverResponse(self, driverId, success = true, msg))
case None =>
val msg = s"Driver $driverId has already finished or does not exist"
logWarning(msg)
context.reply(KillDriverResponse(self, driverId, success = false, msg))
}
}
}
case RequestDriverStatus(driverId) => {
if (state != RecoveryState.ALIVE) {
val msg = s"${Utils.BACKUP_STANDALONE_MASTER_PREFIX}: $state. " +
"Can only request driver status in ALIVE state."
context.reply(
DriverStatusResponse(found = false, None, None, None, Some(new Exception(msg))))
} else {
(drivers ++ completedDrivers).find(_.id == driverId) match {
case Some(driver) =>
context.reply(DriverStatusResponse(found = true, Some(driver.state),
driver.worker.map(_.id), driver.worker.map(_.hostPort), driver.exception))
case None =>
context.reply(DriverStatusResponse(found = false, None, None, None, None))
}
}
}
case RequestMasterState => {
context.reply(MasterStateResponse(
address.host, address.port, restServerBoundPort,
workers.toArray, apps.toArray, completedApps.toArray,
drivers.toArray, completedDrivers.toArray, state))
}
case BoundPortsRequest => {
context.reply(BoundPortsResponse(address.port, webUi.boundPort, restServerBoundPort))
}
case RequestExecutors(appId, requestedTotal) =>
context.reply(handleRequestExecutors(appId, requestedTotal))
case KillExecutors(appId, executorIds) =>
val formattedExecutorIds = formatExecutorIds(executorIds)
context.reply(handleKillExecutors(appId, formattedExecutorIds))
}
override def onDisconnected(address: RpcAddress): Unit = {
// The disconnected client could've been either a worker or an app; remove whichever it was
logInfo(s"$address got disassociated, removing it.")
addressToWorker.get(address).foreach(removeWorker)
addressToApp.get(address).foreach(finishApplication)
if (state == RecoveryState.RECOVERING && canCompleteRecovery) { completeRecovery() }
}
private def canCompleteRecovery =
workers.count(_.state == WorkerState.UNKNOWN) == 0 &&
apps.count(_.state == ApplicationState.UNKNOWN) == 0
private def beginRecovery(storedApps: Seq[ApplicationInfo], storedDrivers: Seq[DriverInfo],
storedWorkers: Seq[WorkerInfo]) {
for (app <- storedApps) {
logInfo("Trying to recover app: " + app.id)
try {
registerApplication(app)
app.state = ApplicationState.UNKNOWN
app.driver.send(MasterChanged(self, masterWebUiUrl))
} catch {
case e: Exception => logInfo("App " + app.id + " had exception on reconnect")
}
}
for (driver <- storedDrivers) {
// Here we just read in the list of drivers. Any drivers associated with now-lost workers
// will be re-launched when we detect that the worker is missing.
drivers += driver
}
for (worker <- storedWorkers) {
logInfo("Trying to recover worker: " + worker.id)
try {
registerWorker(worker)
worker.state = WorkerState.UNKNOWN
worker.endpoint.send(MasterChanged(self, masterWebUiUrl))
} catch {
case e: Exception => logInfo("Worker " + worker.id + " had exception on reconnect")
}
}
}
private def completeRecovery() {
// Ensure "only-once" recovery semantics using a short synchronization period.
if (state != RecoveryState.RECOVERING) { return }
state = RecoveryState.COMPLETING_RECOVERY
// Kill off any workers and apps that didn't respond to us.
workers.filter(_.state == WorkerState.UNKNOWN).foreach(removeWorker)
apps.filter(_.state == ApplicationState.UNKNOWN).foreach(finishApplication)
// Reschedule drivers which were not claimed by any workers
drivers.filter(_.worker.isEmpty).foreach { d =>
logWarning(s"Driver ${d.id} was not found after master recovery")
if (d.desc.supervise) {
logWarning(s"Re-launching ${d.id}")
relaunchDriver(d)
} else {
removeDriver(d.id, DriverState.ERROR, None)
logWarning(s"Did not re-launch ${d.id} because it was not supervised")
}
}
state = RecoveryState.ALIVE
schedule()
logInfo("Recovery complete - resuming operations!")
}
/**
* Schedule executors to be launched on the workers.
* Returns an array containing number of cores assigned to each worker.
*
* There are two modes of launching executors. The first attempts to spread out an application's
* executors on as many workers as possible, while the second does the opposite (i.e. launch them
* on as few workers as possible). The former is usually better for data locality purposes and is
* the default.
*
* The number of cores assigned to each executor is configurable. When this is explicitly set,
* multiple executors from the same application may be launched on the same worker if the worker
* has enough cores and memory. Otherwise, each executor grabs all the cores available on the
* worker by default, in which case only one executor may be launched on each worker.
*
* It is important to allocate coresPerExecutor on each worker at a time (instead of 1 core
* at a time). Consider the following example: cluster has 4 workers with 16 cores each.
* User requests 3 executors (spark.cores.max = 48, spark.executor.cores = 16). If 1 core is
* allocated at a time, 12 cores from each worker would be assigned to each executor.
* Since 12 < 16, no executors would launch [SPARK-8881].
*/
private def scheduleExecutorsOnWorkers(
app: ApplicationInfo,
usableWorkers: Array[WorkerInfo],
spreadOutApps: Boolean): Array[Int] = {
val coresPerExecutor = app.desc.coresPerExecutor
val minCoresPerExecutor = coresPerExecutor.getOrElse(1)
val oneExecutorPerWorker = coresPerExecutor.isEmpty
val memoryPerExecutor = app.desc.memoryPerExecutorMB
val numUsable = usableWorkers.length
val assignedCores = new Array[Int](numUsable) // Number of cores to give to each worker
val assignedExecutors = new Array[Int](numUsable) // Number of new executors on each worker
var coresToAssign = math.min(app.coresLeft, usableWorkers.map(_.coresFree).sum)
/** Return whether the specified worker can launch an executor for this app. */
def canLaunchExecutor(pos: Int): Boolean = {
val keepScheduling = coresToAssign >= minCoresPerExecutor
val enoughCores = usableWorkers(pos).coresFree - assignedCores(pos) >= minCoresPerExecutor
// If we allow multiple executors per worker, then we can always launch new executors.
// Otherwise, if there is already an executor on this worker, just give it more cores.
val launchingNewExecutor = !oneExecutorPerWorker || assignedExecutors(pos) == 0
if (launchingNewExecutor) {
val assignedMemory = assignedExecutors(pos) * memoryPerExecutor
val enoughMemory = usableWorkers(pos).memoryFree - assignedMemory >= memoryPerExecutor
val underLimit = assignedExecutors.sum + app.executors.size < app.executorLimit
keepScheduling && enoughCores && enoughMemory && underLimit
} else {
// We're adding cores to an existing executor, so no need
// to check memory and executor limits
keepScheduling && enoughCores
}
}
// Keep launching executors until no more workers can accommodate any
// more executors, or if we have reached this application's limits
var freeWorkers = (0 until numUsable).filter(canLaunchExecutor)
while (freeWorkers.nonEmpty) {
freeWorkers.foreach { pos =>
var keepScheduling = true
while (keepScheduling && canLaunchExecutor(pos)) {
coresToAssign -= minCoresPerExecutor
assignedCores(pos) += minCoresPerExecutor
// If we are launching one executor per worker, then every iteration assigns 1 core
// to the executor. Otherwise, every iteration assigns cores to a new executor.
if (oneExecutorPerWorker) {
assignedExecutors(pos) = 1
} else {
assignedExecutors(pos) += 1
}
// Spreading out an application means spreading out its executors across as
// many workers as possible. If we are not spreading out, then we should keep
// scheduling executors on this worker until we use all of its resources.
// Otherwise, just move on to the next worker.
if (spreadOutApps) {
keepScheduling = false
}
}
}
freeWorkers = freeWorkers.filter(canLaunchExecutor)
}
assignedCores
}
/**
* Schedule and launch executors on workers
*/
private def startExecutorsOnWorkers(): Unit = {
// Right now this is a very simple FIFO scheduler. We keep trying to fit in the first app
// in the queue, then the second app, etc.
for (app <- waitingApps if app.coresLeft > 0) {
val coresPerExecutor: Option[Int] = app.desc.coresPerExecutor
// Filter out workers that don't have enough resources to launch an executor
val usableWorkers = workers.toArray.filter(_.state == WorkerState.ALIVE)
.filter(worker => worker.memoryFree >= app.desc.memoryPerExecutorMB &&
worker.coresFree >= coresPerExecutor.getOrElse(1))
.sortBy(_.coresFree).reverse
val assignedCores = scheduleExecutorsOnWorkers(app, usableWorkers, spreadOutApps)
// Now that we've decided how many cores to allocate on each worker, let's allocate them
for (pos <- 0 until usableWorkers.length if assignedCores(pos) > 0) {
allocateWorkerResourceToExecutors(
app, assignedCores(pos), coresPerExecutor, usableWorkers(pos))
}
}
}
/**
* Allocate a worker's resources to one or more executors.
* @param app the info of the application which the executors belong to
* @param assignedCores number of cores on this worker for this application
* @param coresPerExecutor number of cores per executor
* @param worker the worker info
*/
private def allocateWorkerResourceToExecutors(
app: ApplicationInfo,
assignedCores: Int,
coresPerExecutor: Option[Int],
worker: WorkerInfo): Unit = {
// If the number of cores per executor is specified, we divide the cores assigned
// to this worker evenly among the executors with no remainder.
// Otherwise, we launch a single executor that grabs all the assignedCores on this worker.
val numExecutors = coresPerExecutor.map { assignedCores / _ }.getOrElse(1)
val coresToAssign = coresPerExecutor.getOrElse(assignedCores)
for (i <- 1 to numExecutors) {
val exec = app.addExecutor(worker, coresToAssign)
launchExecutor(worker, exec)
app.state = ApplicationState.RUNNING
}
}
/**
* Schedule the currently available resources among waiting apps. This method will be called
* every time a new app joins or resource availability changes.
*/
private def schedule(): Unit = {
if (state != RecoveryState.ALIVE) { return }
// Drivers take strict precedence over executors
val shuffledWorkers = Random.shuffle(workers) // Randomization helps balance drivers
for (worker <- shuffledWorkers if worker.state == WorkerState.ALIVE) {
for (driver <- waitingDrivers) {
if (worker.memoryFree >= driver.desc.mem && worker.coresFree >= driver.desc.cores) {
launchDriver(worker, driver)
waitingDrivers -= driver
}
}
}
startExecutorsOnWorkers()
}
private def launchExecutor(worker: WorkerInfo, exec: ExecutorDesc): Unit = {
logInfo("Launching executor " + exec.fullId + " on worker " + worker.id)
worker.addExecutor(exec)
worker.endpoint.send(LaunchExecutor(masterUrl,
exec.application.id, exec.id, exec.application.desc, exec.cores, exec.memory))
exec.application.driver.send(ExecutorAdded(
exec.id, worker.id, worker.hostPort, exec.cores, exec.memory))
}
private def registerWorker(worker: WorkerInfo): Boolean = {
// There may be one or more refs to dead workers on this same node (w/ different ID's),
// remove them.
workers.filter { w =>
(w.host == worker.host && w.port == worker.port) && (w.state == WorkerState.DEAD)
}.foreach { w =>
workers -= w
}
val workerAddress = worker.endpoint.address
if (addressToWorker.contains(workerAddress)) {
val oldWorker = addressToWorker(workerAddress)
if (oldWorker.state == WorkerState.UNKNOWN) {
// A worker registering from UNKNOWN implies that the worker was restarted during recovery.
// The old worker must thus be dead, so we will remove it and accept the new worker.
removeWorker(oldWorker)
} else {
logInfo("Attempted to re-register worker at same address: " + workerAddress)
return false
}
}
workers += worker
idToWorker(worker.id) = worker
addressToWorker(workerAddress) = worker
true
}
private def removeWorker(worker: WorkerInfo) {
logInfo("Removing worker " + worker.id + " on " + worker.host + ":" + worker.port)
worker.setState(WorkerState.DEAD)
idToWorker -= worker.id
addressToWorker -= worker.endpoint.address
for (exec <- worker.executors.values) {
logInfo("Telling app of lost executor: " + exec.id)
exec.application.driver.send(ExecutorUpdated(
exec.id, ExecutorState.LOST, Some("worker lost"), None))
exec.application.removeExecutor(exec)
}
for (driver <- worker.drivers.values) {
if (driver.desc.supervise) {
logInfo(s"Re-launching ${driver.id}")
relaunchDriver(driver)
} else {
logInfo(s"Not re-launching ${driver.id} because it was not supervised")
removeDriver(driver.id, DriverState.ERROR, None)
}
}
persistenceEngine.removeWorker(worker)
}
private def relaunchDriver(driver: DriverInfo) {
driver.worker = None
driver.state = DriverState.RELAUNCHING
waitingDrivers += driver
schedule()
}
private def createApplication(desc: ApplicationDescription, driver: RpcEndpointRef):
ApplicationInfo = {
val now = System.currentTimeMillis()
val date = new Date(now)
new ApplicationInfo(now, newApplicationId(date), desc, date, driver, defaultCores)
}
private def registerApplication(app: ApplicationInfo): Unit = {
val appAddress = app.driver.address
if (addressToApp.contains(appAddress)) {
logInfo("Attempted to re-register application at same address: " + appAddress)
return
}
applicationMetricsSystem.registerSource(app.appSource)
apps += app
idToApp(app.id) = app
endpointToApp(app.driver) = app
addressToApp(appAddress) = app
waitingApps += app
}
private def finishApplication(app: ApplicationInfo) {
removeApplication(app, ApplicationState.FINISHED)
}
def removeApplication(app: ApplicationInfo, state: ApplicationState.Value) {
if (apps.contains(app)) {
logInfo("Removing app " + app.id)
apps -= app
idToApp -= app.id
endpointToApp -= app.driver
addressToApp -= app.driver.address
if (completedApps.size >= RETAINED_APPLICATIONS) {
val toRemove = math.max(RETAINED_APPLICATIONS / 10, 1)
completedApps.take(toRemove).foreach( a => {
appIdToUI.remove(a.id).foreach { ui => webUi.detachSparkUI(ui) }
applicationMetricsSystem.removeSource(a.appSource)
})
completedApps.trimStart(toRemove)
}
completedApps += app // Remember it in our history
waitingApps -= app
// If application events are logged, use them to rebuild the UI
rebuildSparkUI(app)
for (exec <- app.executors.values) {
killExecutor(exec)
}
app.markFinished(state)
if (state != ApplicationState.FINISHED) {
app.driver.send(ApplicationRemoved(state.toString))
}
persistenceEngine.removeApplication(app)
schedule()
// Tell all workers that the application has finished, so they can clean up any app state.
workers.foreach { w =>
w.endpoint.send(ApplicationFinished(app.id))
}
}
}
/**
* Handle a request to set the target number of executors for this application.
*
* If the executor limit is adjusted upwards, new executors will be launched provided
* that there are workers with sufficient resources. If it is adjusted downwards, however,
* we do not kill existing executors until we explicitly receive a kill request.
*
* @return whether the application has previously registered with this Master.
*/
private def handleRequestExecutors(appId: String, requestedTotal: Int): Boolean = {
idToApp.get(appId) match {
case Some(appInfo) =>
logInfo(s"Application $appId requested to set total executors to $requestedTotal.")
appInfo.executorLimit = requestedTotal
schedule()
true
case None =>
logWarning(s"Unknown application $appId requested $requestedTotal total executors.")
false
}
}
/**
* Handle a kill request from the given application.
*
* This method assumes the executor limit has already been adjusted downwards through
* a separate [[RequestExecutors]] message, such that we do not launch new executors
* immediately after the old ones are removed.
*
* @return whether the application has previously registered with this Master.
*/
private def handleKillExecutors(appId: String, executorIds: Seq[Int]): Boolean = {
idToApp.get(appId) match {
case Some(appInfo) =>
logInfo(s"Application $appId requests to kill executors: " + executorIds.mkString(", "))
val (known, unknown) = executorIds.partition(appInfo.executors.contains)
known.foreach { executorId =>
val desc = appInfo.executors(executorId)
appInfo.removeExecutor(desc)
killExecutor(desc)
}
if (unknown.nonEmpty) {
logWarning(s"Application $appId attempted to kill non-existent executors: "
+ unknown.mkString(", "))
}
schedule()
true
case None =>
logWarning(s"Unregistered application $appId requested us to kill executors!")
false
}
}
/**
* Cast the given executor IDs to integers and filter out the ones that fail.
*
* All executors IDs should be integers since we launched these executors. However,
* the kill interface on the driver side accepts arbitrary strings, so we need to
* handle non-integer executor IDs just to be safe.
*/
private def formatExecutorIds(executorIds: Seq[String]): Seq[Int] = {
executorIds.flatMap { executorId =>
try {
Some(executorId.toInt)
} catch {
case e: NumberFormatException =>
logError(s"Encountered executor with a non-integer ID: $executorId. Ignoring")
None
}
}
}
/**
* Ask the worker on which the specified executor is launched to kill the executor.
*/
private def killExecutor(exec: ExecutorDesc): Unit = {
exec.worker.removeExecutor(exec)
exec.worker.endpoint.send(KillExecutor(masterUrl, exec.application.id, exec.id))
exec.state = ExecutorState.KILLED
}
/**
* Rebuild a new SparkUI from the given application's event logs.
* Return the UI if successful, else None
*/
private[master] def rebuildSparkUI(app: ApplicationInfo): Option[SparkUI] = {
val appName = app.desc.name
val notFoundBasePath = HistoryServer.UI_PATH_PREFIX + "/not-found"
try {
val eventLogDir = app.desc.eventLogDir
.getOrElse {
// Event logging is not enabled for this application
app.desc.appUiUrl = notFoundBasePath
return None
}
val eventLogFilePrefix = EventLoggingListener.getLogPath(
eventLogDir, app.id, app.desc.eventLogCodec)
val fs = Utils.getHadoopFileSystem(eventLogDir, hadoopConf)
val inProgressExists = fs.exists(new Path(eventLogFilePrefix +
EventLoggingListener.IN_PROGRESS))
if (inProgressExists) {
// Event logging is enabled for this application, but the application is still in progress
logWarning(s"Application $appName is still in progress, it may be terminated abnormally.")
}
val (eventLogFile, status) = if (inProgressExists) {
(eventLogFilePrefix + EventLoggingListener.IN_PROGRESS, " (in progress)")
} else {
(eventLogFilePrefix, " (completed)")
}
val logInput = EventLoggingListener.openEventLog(new Path(eventLogFile), fs)
val replayBus = new ReplayListenerBus()
val ui = SparkUI.createHistoryUI(new SparkConf, replayBus, new SecurityManager(conf),
appName, HistoryServer.UI_PATH_PREFIX + s"/${app.id}", app.startTime)
val maybeTruncated = eventLogFile.endsWith(EventLoggingListener.IN_PROGRESS)
try {
replayBus.replay(logInput, eventLogFile, maybeTruncated)
} finally {
logInput.close()
}
appIdToUI(app.id) = ui
webUi.attachSparkUI(ui)
// Application UI is successfully rebuilt, so link the Master UI to it
app.desc.appUiUrl = ui.basePath
Some(ui)
} catch {
case fnf: FileNotFoundException =>
// Event logging is enabled for this application, but no event logs are found
val title = s"Application history not found (${app.id})"
var msg = s"No event logs found for application $appName in ${app.desc.eventLogDir.get}."
logWarning(msg)
msg += " Did you specify the correct logging directory?"
msg = URLEncoder.encode(msg, "UTF-8")
app.desc.appUiUrl = notFoundBasePath + s"?msg=$msg&title=$title"
None
case e: Exception =>
// Relay exception message to application UI page
val title = s"Application history load error (${app.id})"
val exception = URLEncoder.encode(Utils.exceptionString(e), "UTF-8")
var msg = s"Exception in replaying log for application $appName!"
logError(msg, e)
msg = URLEncoder.encode(msg, "UTF-8")
app.desc.appUiUrl = notFoundBasePath + s"?msg=$msg&exception=$exception&title=$title"
None
}
}
/** Generate a new app ID given a app's submission date */
private def newApplicationId(submitDate: Date): String = {
val appId = "app-%s-%04d".format(createDateFormat.format(submitDate), nextAppNumber)
nextAppNumber += 1
appId
}
/** Check for, and remove, any timed-out workers */
private def timeOutDeadWorkers() {
// Copy the workers into an array so we don't modify the hashset while iterating through it
val currentTime = System.currentTimeMillis()
val toRemove = workers.filter(_.lastHeartbeat < currentTime - WORKER_TIMEOUT_MS).toArray
for (worker <- toRemove) {
if (worker.state != WorkerState.DEAD) {
logWarning("Removing %s because we got no heartbeat in %d seconds".format(
worker.id, WORKER_TIMEOUT_MS / 1000))
removeWorker(worker)
} else {
if (worker.lastHeartbeat < currentTime - ((REAPER_ITERATIONS + 1) * WORKER_TIMEOUT_MS)) {
workers -= worker // we've seen this DEAD worker in the UI, etc. for long enough; cull it
}
}
}
}
private def newDriverId(submitDate: Date): String = {
val appId = "driver-%s-%04d".format(createDateFormat.format(submitDate), nextDriverNumber)
nextDriverNumber += 1
appId
}
private def createDriver(desc: DriverDescription): DriverInfo = {
val now = System.currentTimeMillis()
val date = new Date(now)
new DriverInfo(now, newDriverId(date), desc, date)
}
private def launchDriver(worker: WorkerInfo, driver: DriverInfo) {
logInfo("Launching driver " + driver.id + " on worker " + worker.id)
worker.addDriver(driver)
driver.worker = Some(worker)
worker.endpoint.send(LaunchDriver(driver.id, driver.desc))
driver.state = DriverState.RUNNING
}
private def removeDriver(
driverId: String,
finalState: DriverState,
exception: Option[Exception]) {
drivers.find(d => d.id == driverId) match {
case Some(driver) =>
logInfo(s"Removing driver: $driverId")
drivers -= driver
if (completedDrivers.size >= RETAINED_DRIVERS) {
val toRemove = math.max(RETAINED_DRIVERS / 10, 1)
completedDrivers.trimStart(toRemove)
}
completedDrivers += driver
persistenceEngine.removeDriver(driver)
driver.state = finalState
driver.exception = exception
driver.worker.foreach(w => w.removeDriver(driver))
schedule()
case None =>
logWarning(s"Asked to remove unknown driver: $driverId")
}
}
}
private[deploy] object Master extends Logging {
val SYSTEM_NAME = "sparkMaster"
val ENDPOINT_NAME = "Master"
def main(argStrings: Array[String]) {
SignalLogger.register(log)
val conf = new SparkConf
val args = new MasterArguments(argStrings, conf)
val (rpcEnv, _, _) = startRpcEnvAndEndpoint(args.host, args.port, args.webUiPort, conf)
rpcEnv.awaitTermination()
}
/**
* Start the Master and return a three tuple of:
* (1) The Master RpcEnv
* (2) The web UI bound port
* (3) The REST server bound port, if any
*/
def startRpcEnvAndEndpoint(
host: String,
port: Int,
webUiPort: Int,
conf: SparkConf): (RpcEnv, Int, Option[Int]) = {
val securityMgr = new SecurityManager(conf)
val rpcEnv = RpcEnv.create(SYSTEM_NAME, host, port, conf, securityMgr)
val masterEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME,
new Master(rpcEnv, rpcEnv.address, webUiPort, securityMgr, conf))
val portsResponse = masterEndpoint.askWithRetry[BoundPortsResponse](BoundPortsRequest)
(rpcEnv, portsResponse.webUIPort, portsResponse.restPort)
}
}
| pronix/spark | core/src/main/scala/org/apache/spark/deploy/master/Master.scala | Scala | apache-2.0 | 44,083 |
package fpinscala.parallelism
import java.util.concurrent._
object Par {
type Par[A] = ExecutorService => Future[A]
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
def unit[A](a: A): Par[A] = (es: ExecutorService) => UnitFuture(a) // `unit` is represented as a function that returns a `UnitFuture`, which is a simple implementation of `Future` that just wraps a constant value. It doesn't use the `ExecutorService` at all. It's always done and can't be cancelled. Its `get` method simply returns the value that we gave it.
private case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def get(timeout: Long, units: TimeUnit) = get
def isCancelled = false
def cancel(evenIfRunning: Boolean): Boolean = false
}
def map2[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] = // `map2` doesn't evaluate the call to `f` in a separate logical thread, in accord with our design choice of having `fork` be the sole function in the API for controlling parallelism. We can always do `fork(map2(a,b)(f))` if we want the evaluation of `f` to occur in a separate thread.
(es: ExecutorService) => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get, bf.get)) // This implementation of `map2` does _not_ respect timeouts. It simply passes the `ExecutorService` on to both `Par` values, waits for the results of the Futures `af` and `bf`, applies `f` to them, and wraps them in a `UnitFuture`. In order to respect timeouts, we'd need a new `Future` implementation that records the amount of time spent evaluating `af`, then subtracts that time from the available time allocated for evaluating `bf`.
}
def fork[A](a: => Par[A]): Par[A] = // This is the simplest and most natural implementation of `fork`, but there are some problems with it--for one, the outer `Callable` will block waiting for the "inner" task to complete. Since this blocking occupies a thread in our thread pool, or whatever resource backs the `ExecutorService`, this implies that we're losing out on some potential parallelism. Essentially, we're using two threads when one should suffice. This is a symptom of a more serious problem with the implementation, and we will discuss this later in the chapter.
es => es.submit(new Callable[A] {
def call = a(es).get
})
def map[A,B](pa: Par[A])(f: A => B): Par[B] =
map2(pa, unit(()))((a,_) => f(a))
def sortPar(parList: Par[List[Int]]) = map(parList)(_.sorted)
def equal[A](e: ExecutorService)(p: Par[A], p2: Par[A]): Boolean =
p(e).get == p2(e).get
def delay[A](fa: => Par[A]): Par[A] =
es => fa(es)
def choice[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
es =>
if (run(es)(cond).get) t(es) // Notice we are blocking on the result of `cond`.
else f(es)
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
class ParOps[A](p: Par[A]) {
}
}
object Examples {
import Par._
def sum(ints: IndexedSeq[Int]): Int = // `IndexedSeq` is a superclass of random-access sequences like `Vector` in the standard library. Unlike lists, these sequences provide an efficient `splitAt` method for dividing them into two parts at a particular index.
if (ints.size <= 1)
ints.headOption getOrElse 0 // `headOption` is a method defined on all collections in Scala. We saw this function in chapter 3.
else {
val (l,r) = ints.splitAt(ints.length/2) // Divide the sequence in half using the `splitAt` function.
sum(l) + sum(r) // Recursively sum both halves and add the results together.
}
} | markvrensburg/fpinscala | exercises/src/main/scala/fpinscala/parallelism/Par.scala | Scala | mit | 3,641 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.data
import com.beust.jcommander.{Parameter, Parameters}
import org.apache.hadoop.util.ToolRunner
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.index.AccumuloFeatureIndex
import org.locationtech.geomesa.accumulo.tools.data.AddIndexCommand.AddIndexParameters
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
import org.locationtech.geomesa.jobs.accumulo.AccumuloJobUtils
import org.locationtech.geomesa.jobs.accumulo.index.{WriteIndexArgs, WriteIndexJob}
import org.locationtech.geomesa.tools._
import org.locationtech.geomesa.tools.utils.Prompt
import org.locationtech.geomesa.utils.index.IndexMode
import scala.util.control.NonFatal
/**
*
* 1. Add the new index in write-only mode
* 2. Pause and indicate that the user should bounce live ingestion to pick up the changes -
* after this it will be writing to both the new and old index
* 3. Migrate data through a m/r job, with an optional CQL filter for what gets migrated
* 4. Turn old index off, put new index in read/write mode
* 5. Pause and indicate that the user should bounce live ingestion again
*/
class AddIndexCommand extends AccumuloDataStoreCommand {
override val name = "add-index"
override val params = new AddIndexParameters
override def execute(): Unit = {
// We instantiate the class at runtime to avoid classpath dependencies from commands that are not being used.
new AddIndexCommandExecutor(params).run()
}
}
object AddIndexCommand {
@Parameters(commandDescription = "Add or update indices for an existing GeoMesa feature type")
class AddIndexParameters extends AccumuloDataStoreParams with RequiredTypeNameParam with OptionalCqlFilterParam {
@Parameter(names = Array("--index"), description = "Name of index(es) to add - comma-separate or use multiple flags", required = true)
var indexNames: java.util.List[String] = null
@Parameter(names = Array("--no-back-fill"), description = "Do not copy any existing data into the new index")
var noBackFill: java.lang.Boolean = null
}
}
class AddIndexCommandExecutor(override val params: AddIndexParameters) extends Runnable with AccumuloDataStoreCommand {
import org.locationtech.geomesa.index.metadata.GeoMesaMetadata.ATTRIBUTES_KEY
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
override val name = ""
override def execute(): Unit = {}
override def run(): Unit = {
try { withDataStore(addIndex) }
}
def addIndex(ds: AccumuloDataStore): Unit = {
import scala.collection.JavaConversions._
val sft = ds.getSchema(params.featureName)
require(sft != null, s"Schema '${params.featureName}' does not exist in the specified data store")
val indices = params.indexNames.map { name =>
AccumuloFeatureIndex.CurrentIndices.find(_.name == name).getOrElse {
throw new IllegalArgumentException(s"Invalid index '$name'. Valid values are " +
s"${AccumuloFeatureIndex.CurrentIndices.map(_.name).mkString(", ")}")
}
}
val existing = AccumuloFeatureIndex.indices(sft, IndexMode.Any)
require(indices.forall(i => !existing.contains(i)),
s"Requested indices already exist: ${existing.map(_.identifier).mkString("[", "][", "]")}")
require(indices.forall(_.supports(sft)), "Requested indices are not compatible with the simple feature type")
val toDisable = indices.flatMap(i => AccumuloFeatureIndex.replaces(i, existing).map(r => (i, r)))
if (toDisable.nonEmpty) {
if (!Prompt.confirm("The following index versions will be replaced: " +
s"${toDisable.map { case (n, o) => s"[${o.identifier}] by [${n.identifier}]" }.mkString(", ")} " +
"Continue? (y/n): ")) {
return
}
}
if (!Prompt.confirm("If you are ingesting streaming data, you will be required to restart " +
"the streaming ingestion when prompted. Continue? (y/n): ")) {
return
}
// write a backup meta-data entry in case the process fails part-way
val backupKey = s"$ATTRIBUTES_KEY.bak"
ds.metadata.insert(sft.getTypeName, backupKey, ds.metadata.readRequired(sft.getTypeName, ATTRIBUTES_KEY))
val toKeep = sft.getIndices.filter { case (n, v, _) =>
!toDisable.map(_._2).contains(AccumuloFeatureIndex.lookup(n, v))
}
if (params.noBackFill != null && params.noBackFill) {
Command.user.info("Adding new indices and disabling old ones")
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.ReadWrite)) ++ toKeep)
ds.updateSchema(sft.getTypeName, sft)
} else {
Command.user.info("Adding new indices in write-only mode")
// add new index in write-only mode
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.Write)) ++ sft.getIndices)
ds.updateSchema(sft.getTypeName, sft)
// wait for the user to bounce ingestion
Prompt.acknowledge("Indices have been added in write-only mode. To pick up the changes, " +
"please bounce any streaming ingestion. Once ingestion has resumed, press 'enter' to continue.")
// run migration job
Command.user.info("Running index back-fill job")
val args = new WriteIndexArgs(Array.empty)
args.inZookeepers = params.zookeepers
args.inInstanceId = params.instance
args.inUser = params.user
args.inPassword = params.password
args.inTableName = params.catalog
args.inFeature = params.featureName
args.inCql = params.cqlFilter
args.indexNames.addAll(indices.map(_.identifier))
val libjars = Some(AccumuloJobUtils.defaultLibJars, AccumuloJobUtils.defaultSearchPath)
val result = try { ToolRunner.run(new WriteIndexJob(libjars), args.unparse()) } catch {
case NonFatal(e) => Command.user.error("Error running back-fill job:", e); -1
}
def setReadWrite(): Unit = {
Command.user.info("Setting index to read-write mode and disabling old indices")
// set new indices to read-write and turn off disabled indices
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.ReadWrite)) ++ toKeep)
Command.user.info(sft.getIndices.toString)
ds.updateSchema(sft.getTypeName, sft)
}
if (result == 0) {
setReadWrite()
} else {
var response: String = null
do {
response = Prompt.read("Index back-fill job failed. You may:\\n" +
" 1. Switch the indices to read-write mode without existing data (you may manually back-fill later)\\n" +
" 2. Roll-back index creation\\n" +
"Select an option: ")
} while (response != "1" && response != "2")
response match {
case "1" => setReadWrite()
case "2" =>
val bak = ds.metadata.readRequired(sft.getTypeName, backupKey)
ds.metadata.insert(sft.getTypeName, ATTRIBUTES_KEY, bak)
}
}
}
// final bounce
Command.user.info("Operation complete. Please bounce any streaming ingestion to pick up the changes.")
}
}
| ronq/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/data/AddIndexCommand.scala | Scala | apache-2.0 | 7,653 |
package com.twitter.util
import com.fasterxml.jackson.databind.{ObjectMapper => JacksonObjectMapper}
import com.fasterxml.jackson.module.scala.{ScalaObjectMapper => JacksonScalaObjectMapper}
package object jackson {
type JacksonScalaObjectMapperType = JacksonObjectMapper with JacksonScalaObjectMapper
}
| twitter/util | util-jackson/src/main/scala/com/twitter/util/jackson/package.scala | Scala | apache-2.0 | 308 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic
import org.scalatest.{Matchers, FunSpec}
import org.scalatestplus.mockito.MockitoSugar
class InternalClassLoaderSpec extends FunSpec with Matchers with MockitoSugar {
abstract class MockClassLoader extends ClassLoader(null) {
override def loadClass(name: String): Class[_] = null
}
describe("InternalClassLoader") {
describe("#loadClass") {
it("should invoke super loadClass with loader's package prepended") {
val expected = classOf[Class[_]]
val packageName = "org.apache.toree.magic"
val className = "SomeClass"
var parentLoadClassCorrectlyInvoked = false
val internalClassLoader = new InternalClassLoader(null) {
override private[magic] def parentLoadClass(name: String, resolve: Boolean): Class[_] = {
parentLoadClassCorrectlyInvoked =
name == s"$packageName.$className" && resolve
expected
}
}
internalClassLoader.loadClass(className, true) should be (expected)
parentLoadClassCorrectlyInvoked should be (true)
}
it("should use loader's package instead of provided package first") {
val expected = classOf[Class[_]]
val forcedPackageName = "org.apache.toree.magic"
val packageName = "some.other.package"
val className = "SomeClass"
var parentLoadClassCorrectlyInvoked = false
val internalClassLoader = new InternalClassLoader(null) {
override private[magic] def parentLoadClass(name: String, resolve: Boolean): Class[_] = {
parentLoadClassCorrectlyInvoked =
name == s"$forcedPackageName.$className" && resolve
expected
}
}
internalClassLoader.loadClass(s"$packageName.$className", true) should be (expected)
parentLoadClassCorrectlyInvoked should be (true)
}
it("should invoke super loadClass with given package if internal missing") {
val expected = classOf[Class[_]]
val packageName = "some.other.package"
val className = "SomeClass"
var parentLoadClassCorrectlyInvoked = false
var methodCalled = false
val internalClassLoader = new InternalClassLoader(null) {
override private[magic] def parentLoadClass(name: String, resolve: Boolean): Class[_] = {
if (!methodCalled) {
methodCalled = true
throw new ClassNotFoundException()
}
parentLoadClassCorrectlyInvoked =
name == s"$packageName.$className" && resolve
expected
}
}
internalClassLoader.loadClass(s"$packageName.$className", true) should
be (expected)
parentLoadClassCorrectlyInvoked should be (true)
}
}
}
}
| apache/incubator-toree | kernel-api/src/test/scala/org/apache/toree/magic/InternalClassLoaderSpec.scala | Scala | apache-2.0 | 3,636 |
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common
* Development and Distribution License("CDDL") (collectively, the
* "License"). You may not use this file except in compliance with the
* License. You can obtain a copy of the License at
* http://www.netbeans.org/cddl-gplv2.html
* or nbbuild/licenses/CDDL-GPL-2-CP. See the License for the
* specific language governing permissions and limitations under the
* License. When distributing the software, include this License Header
* Notice in each file and include the License file at
* nbbuild/licenses/CDDL-GPL-2-CP. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the
* License Header, with the fields enclosed by brackets [] replaced by
* your own identifying information:
* "Portions Copyrighted [year] [name of copyright owner]"
*
* If you wish your version of this file to be governed by only the CDDL
* or only the GPL Version 2, indicate your decision by adding
* "[Contributor] elects to include this software in this distribution
* under the [CDDL or GPL Version 2] license." If you do not indicate a
* single choice of license, a recipient has the option to distribute
* your version of this file under either the CDDL, the GPL Version 2 or
* to extend the choice of license to its licensees as provided above.
* However, if you add GPL Version 2 code and therefore, elected the GPL
* Version 2 license, then the option applies only if the new code is
* made subject to such option by the copyright holder.
*
* Contributor(s):
*
* Portions Copyrighted 2009 Sun Microsystems, Inc.
*/
package org.netbeans.modules.scala.editor
import javax.swing.text.{BadLocationException, Document}
import org.netbeans.api.lexer.{Token, TokenId}
import org.netbeans.editor.{BaseDocument, Utilities}
import org.netbeans.modules.csl.api.Formatter
import org.netbeans.modules.csl.spi.{GsfUtilities, ParserResult}
import org.netbeans.modules.editor.indent.spi.Context
import org.openide.filesystems.FileUtil
import org.openide.loaders.DataObject
import org.openide.util.Exceptions
import org.netbeans.modules.scala.core.lexer.{ScalaLexUtil, ScalaTokenId}
import org.netbeans.modules.scala.editor.options.{CodeStyle}
import scala.collection.mutable.{ArrayBuffer, Stack}
/**
* Formatting and indentation.
*
*
* @author Caoyuan Deng
*/
object ScalaFormatter {
val BRACE_MATCH_MAP: Map[TokenId, Set[TokenId]] =
Map(ScalaTokenId.LParen -> Set(ScalaTokenId.RParen),
ScalaTokenId.LBrace -> Set(ScalaTokenId.RBrace),
ScalaTokenId.LBracket -> Set(ScalaTokenId.RBracket),
ScalaTokenId.Case -> Set(ScalaTokenId.Case,
ScalaTokenId.RBrace),
ScalaTokenId.DocCommentStart -> Set(ScalaTokenId.DocCommentEnd),
ScalaTokenId.BlockCommentStart -> Set(ScalaTokenId.BlockCommentEnd),
ScalaTokenId.XmlLt -> Set(ScalaTokenId.XmlSlashGt,
ScalaTokenId.XmlLtSlash)
)
}
class ScalaFormatter(acodeStyle: CodeStyle, rightMarginOverride: Int) extends Formatter {
import ScalaFormatter._
import org.netbeans.modules.csl.api.OffsetRange
def this() = this(null, -1)
private var codeStyle = if (acodeStyle != null) acodeStyle else CodeStyle.getDefault(null)
def needsParserResult: Boolean = {
false
}
override def reindent(context: Context): Unit = {
reindent(context, context.document, context.startOffset, context.endOffset, null, true)
}
override def reformat(context: Context, info: ParserResult): Unit = {
reindent(context, context.document, context.startOffset, context.endOffset, info, false)
}
def indentSize: Int = {
codeStyle.getIndentSize
}
def hangingIndentSize: Int = {
codeStyle.getContinuationIndentSize
}
/** Compute the initial balance of brackets at the given offset. */
private def getFormatStableStart(doc: BaseDocument, offset: Int): Int = {
val ts = ScalaLexUtil.getTokenSequence(doc, offset).getOrElse(return 0)
ts.move(offset)
if (!ts.movePrevious) {
return 0
}
// Look backwards to find a suitable context - a class, module or method definition
// which we will assume is properly indented and balanced
do {
val token = ts.token
token.id match {
case ScalaTokenId.Object | ScalaTokenId.Trait | ScalaTokenId.Class =>
// * is this `class`/`object`/`trait` enlcosed in an outer `class`/`object`/`trait`?
ScalaLexUtil.findBwd(ts, ScalaTokenId.LBrace, ScalaTokenId.RBrace) match {
case OffsetRange.NONE => return ts.offset
case range => // go on for outer `class`/`object`/`trait`
}
case _ =>
}
} while (ts.movePrevious)
ts.offset
}
/**
* Get the first token on the given line. Similar to LexUtilities.getToken(doc, lineBegin)
* except (a) it computes the line begin from the offset itself, and more importantly,
* (b) it handles RHTML tokens specially; e.g. if a line begins with
* {@code
* <% if %>
* }
* then the "if" embedded token will be returned rather than the RHTML delimiter, or even
* the whitespace token (which is the first Ruby token in the embedded sequence).
*
* </pre>
*/
@throws(classOf[BadLocationException])
private def getFirstTokenOnLine(doc: BaseDocument, offset: Int): Option[Token[_]] = {
val lineBegin = Utilities.getRowFirstNonWhite(doc, offset)
if (lineBegin != -1) {
return ScalaLexUtil.getToken(doc, lineBegin)
}
None
}
def reindent(context: Context, document:Document, astartOffset: Int, aendOffset: Int, info: ParserResult, indentOnly: Boolean): Unit = {
var startOffset = astartOffset
var endOffset = aendOffset
try {
val doc = document.asInstanceOf[BaseDocument]
syncOptions(doc, codeStyle)
if (endOffset > doc.getLength) {
endOffset = doc.getLength
}
startOffset = Utilities.getRowStart(doc, startOffset)
val lineStart = startOffset
var initialOffset = 0
var initialIndent = 0
if (startOffset > 0) {
val prevOffset = Utilities.getRowStart(doc, startOffset - 1)
initialOffset = getFormatStableStart(doc, prevOffset)
initialIndent = GsfUtilities.getLineIndent(doc, initialOffset)
}
// When we're formatting sections, include whitespace on empty lines; this
// is used during live code template insertions for example. However, when
// wholesale formatting a whole document, leave these lines alone.
val indentEmptyLines = startOffset != 0 || endOffset != doc.getLength
// In case of indentOnly (use press <enter>), the endOffset will be the
// position of newline inserted, to compute the new added line's indent,
// we need includeEnd.
val includeEnd = endOffset == doc.getLength || indentOnly
// Build up a set of offsets and indents for lines where I know I need
// to adjust the offset. I will then go back over the document and adjust
// lines that are different from the intended indent. By doing piecemeal
// replacements in the document rather than replacing the whole thing,
// a lot of things will work better: breakpoints and other line annotations
// will be left in place, semantic coloring info will not be temporarily
// damaged, and the caret will stay roughly where it belongs.
// TODO - remove initialbalance etc.
val (offsets, indents) = computeIndents(doc, initialIndent, initialOffset, endOffset, info, indentEmptyLines, includeEnd);
doc.runAtomic(new Runnable {
def run {
try {
// Iterate in reverse order such that offsets are not affected by our edits
assert(indents.size == offsets.size)
val editorFormatter = doc.getFormatter
var break = false
var i = indents.size - 1
while (i >= 0 && !break) {
val lineBegin = offsets(i)
if (lineBegin < lineStart) {
// We're now outside the region that the user wanted reformatting;
// these offsets were computed to get the correct continuation context etc.
// for the formatter
break = true
} else {
var indent = indents(i)
if (lineBegin == lineStart && i > 0) {
// Look at the previous line, and see how it's indented
// in the buffer. If it differs from the computed position,
// offset my computed position (thus, I'm only going to adjust
// the new line position relative to the existing editing.
// This avoids the situation where you're inserting a newline
// in the middle of "incorrectly" indented code (e.g. different
// size than the IDE is using) and the newline position ending
// up "out of sync"
val prevOffset = offsets(i - 1)
val prevIndent = indents(i - 1)
val actualPrevIndent = GsfUtilities.getLineIndent(doc, prevOffset)
if (actualPrevIndent != prevIndent) {
// For blank lines, indentation may be 0, so don't adjust in that case
if (!(Utilities.isRowEmpty(doc, prevOffset) || Utilities.isRowWhite(doc, prevOffset))) {
indent = actualPrevIndent + (indent - prevIndent);
}
}
}
if (indent >= 0) { // @todo why? #150319
// Adjust the indent at the given line (specified by offset) to the given indent
val currentIndent = GsfUtilities.getLineIndent(doc, lineBegin)
if (currentIndent != indent) {
if (context != null) {
context.modifyIndent(lineBegin, indent)
} else {
editorFormatter.changeRowIndent(doc, lineBegin, indent)
}
}
}
}
i -= 1
}
if (!indentOnly /* && codeStyle.reformatComments */) {
// reformatComments(doc, startOffset, endOffset);
}
} catch {case ble: BadLocationException => Exceptions.printStackTrace(ble)}
}
})
} catch {case ble: BadLocationException => Exceptions.printStackTrace(ble)}
}
protected class Brace {
var token: Token[TokenId] = _
var lineIdx: Int = _ // idx of `offsets` and `indents` arrays
var offsetOnline: Int = _ // offset of this token on its line after indent
var isLatestOnLine: Boolean = _ // last one on this line?
var onProcessingLine: Boolean = _ // on the processing line?
var lasestTokenOnLine: Token[TokenId] = _ // lastest non-white token on this line
override def toString = {
token.text.toString
}
}
def computeIndents(doc: BaseDocument, initialIndent: Int, startOffset: Int, endOffset: Int, info: ParserResult,
indentEmptyLines: Boolean, includeEnd: Boolean): (Array[Int], Array[Int]) = {
val offsets = new ArrayBuffer[Int]
val indents = new ArrayBuffer[Int]
// PENDING:
// The reformatting APIs in NetBeans should be lexer based. They are still
// based on the old TokenID apis. Once we get a lexer version, convert this over.
// I just need -something- in place until that is provided.
try {
// Algorithm:
// Iterate over the range.
// Accumulate a token balance ( {,(,[, and keywords like class, case, etc. increases the balance,
// },),] and "end" decreases it
// If the line starts with an end marker, indent the line to the level AFTER the token
// else indent the line to the level BEFORE the token (the level being the balance * indentationSize)
// Compute the initial balance and indentation level and use that as a "base".
// If the previous line is not "done" (ends with a comma or a binary operator like "+" etc.
// add a "hanging indent" modifier.
// At the end of the day, we're recording a set of line offsets and indents.
// This can be used either to reformat the buffer, or indent a new line.
// State:
var offset = Utilities.getRowStart(doc, startOffset) // The line's offset
val end = endOffset
// Pending - apply comment formatting too?
// XXX Look up RHTML too
//int indentSize = EditorOptions.get(RubyInstallation.RUBY_MIME_TYPE).getSpacesPerTab();
//int hangingIndentSize = indentSize;
// Build up a set of offsets and indents for lines where I know I need
// to adjust the offset. I will then go back over the document and adjust
// lines that are different from the intended indent. By doing piecemeal
// replacements in the document rather than replacing the whole thing,
// a lot of things will work better: breakpoints and other line annotations
// will be left in place, semantic coloring info will not be temporarily
// damaged, and the caret will stay roughly where it belongs.
// The token balance at the offset
var indent = 0 // The indentation to be used for the current line
var prevIndent = 0
var nextIndent = 0
var continueIndent = -1
val openingBraces = new Stack[Brace]
val specialTokens = new Stack[Brace]
var idx = 0
while (!includeEnd && offset < end || includeEnd && offset <= end) {
val lineBegin = Utilities.getRowFirstNonWhite(doc, offset)
val lineEnd = Utilities.getRowEnd(doc, offset)
if (lineBegin != -1) {
val results = computeLineIndent(indent, prevIndent, continueIndent,
openingBraces, specialTokens,
offsets, indents,
doc, lineBegin, lineEnd, idx)
indent = results(0)
nextIndent = results(1)
continueIndent = results(2)
}
if (indent == -1) {
// Skip this line - leave formatting as it is prior to reformatting
indent = GsfUtilities.getLineIndent(doc, offset)
}
if (indent < 0) {
indent = 0
}
// Insert whitespace on empty lines too -- needed for abbreviations expansion
if (lineBegin != -1 || indentEmptyLines) {
indents += indent
offsets += offset
idx += 1
}
// Shift to next line
offset = lineEnd + 1
prevIndent = indent
indent = nextIndent
}
} catch {case ble: BadLocationException => Exceptions.printStackTrace(ble)}
(offsets.toArray, indents.toArray)
}
/**
* Compute indent for next line, and adjust this line's indent if necessary
* @return Array[Int]
* int(0) - adjusted indent of this line
* int(1) - indent for next line
*/
private def computeLineIndent(aindent: Int, prevIndent: Int, acontinueIndent: Int,
openingBraces: Stack[Brace], specialBraces: Stack[Brace],
offsets: ArrayBuffer[Int], indents: ArrayBuffer[Int],
doc: BaseDocument, lineBegin: Int, lineEnd: Int, lineIdx: Int): Array[Int] = {
val ts = ScalaLexUtil.getTokenSequence(doc, lineBegin).getOrElse(return Array(aindent, aindent, -1))
// * Well, a new line begin
openingBraces foreach {_.onProcessingLine = false}
specialBraces foreach {_.onProcessingLine = false}
//val sb = new StringBuilder(); // for debug
// --- Compute new balance and adjust indent (computed by previous `computeLineIndent`) of this line
var indent = aindent
var continueIndent = acontinueIndent
val rawStart = Utilities.getRowStart(doc, lineBegin) // lineBegin is the RowFirstNonWhite
// * token index on this line (we only count not-white tokens,
// * if noWSIdx == 0, means the first non-white token on this line
var noWSIdx = -1
var latestNoWSToken: Token[TokenId] = null
var latestNoWSTokenOffset: Int = -1
try {
ts.move(lineBegin)
do {
val token = ts.token
if (token != null) {
val offset = ts.offset
val id = token.id
//sb.append(text); // for debug
if (!ScalaLexUtil.isWsComment(id)) {
noWSIdx += 1
latestNoWSToken = token
latestNoWSTokenOffset = offset
}
// * match/add brace
id.primaryCategory match {
case "keyword" | "s_keyword" | "separator" | "operator" | "xml" | "comment" =>
var justClosedBrace: Brace = null
if (!openingBraces.isEmpty) {
val brace = openingBraces.top
val braceId = brace.token.id
// * `if`, `else`, `for` etc is not contained in BRACE_MATCH_MAP, we'll process them late
val matchingIds = BRACE_MATCH_MAP.get(braceId)
if (matchingIds.isDefined && matchingIds.get.contains(id)) { // matched
var numClosed = 1 // default
// we may need to lookahead 2 steps for some cases:
if (braceId == ScalaTokenId.Case) {
val backup = openingBraces.pop
if (!openingBraces.isEmpty) {
//TokenId lookaheadId = openingBraces.peek().token.id();
// if resolved is "=>", we may have matched two braces:
if (id == ScalaTokenId.RBrace) {
numClosed = 2
}
}
openingBraces push backup
}
for (i <- 0 until numClosed) {
justClosedBrace = openingBraces.pop
}
if (noWSIdx == 0) {
// * this token is at the beginning of this line, adjust this line's indent if necessary
indent = id match {
case ScalaTokenId.Case | ScalaTokenId.RParen | ScalaTokenId.RBracket | ScalaTokenId.RBrace =>
openingBraces.size * indentSize
case _ => justClosedBrace.offsetOnline
}
}
}
}
// * determine some cases when first token of this line is:
if (offset == rawStart) {
id match {
case ScalaTokenId.LineComment =>
indent = -1
case _ =>
}
}
// * determine some cases when first noWSComment token is:
if (noWSIdx == 0) {
id match {
case ScalaTokenId.With =>
specialBraces.find{_.token.id == ScalaTokenId.Extends} match {
case Some(x) =>
indent = x.offsetOnline + 3 // indent `with` right align with `extends`
case _ =>
}
/* case ScalaTokenId.Else =>
specialBraces.find{_.token.id == ScalaTokenId.If} match {
case Some(x) if x.lineIdx != lineIdx => // not on same line
val nextLineOfIf = x.lineIdx + 1
if (indents.size > nextLineOfIf) {
indent = indents(nextLineOfIf) - indentSize // indent `else` left align with next line of `if`
}
case _ =>
} */
case _ =>
}
}
// * add new special brace
id match {
case ScalaTokenId.Extends =>
val newBrace = new Brace
newBrace.token = token
newBrace.lineIdx = lineIdx
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
specialBraces push newBrace
/* case ScalaTokenId.If =>
// * remember `if` for `else`
val newBrace = new Brace
newBrace.token = token
newBrace.lineIdx = lineIdx
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
specialBraces push newBrace */
case _ =>
}
// * add new opening brace
if (BRACE_MATCH_MAP.contains(id)) {
var ignore = false
// is it a case object or class?, if so, do not indent
if (id == ScalaTokenId.Case) {
if (ts.moveNext) {
val next = ScalaLexUtil.findNextNoWs(ts).get
next.id match {
case ScalaTokenId.Object | ScalaTokenId.Class =>
ignore = true
case _ =>
}
ts.movePrevious
}
}
if (!ignore) {
val newBrace = new Brace
newBrace.token = token
// will add indent of this line to offsetOnline later
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
}
}
case _ if id == ScalaTokenId.XmlCDData || (id == ScalaTokenId.StringLiteral && offset < lineBegin) =>
// * A literal string with more than one line is a whole token and when goes
// * to second or following lines, will has offset < lineBegin
if (noWSIdx == 0 || noWSIdx == -1) {
// * No indentation for literal strings from 2nd line.
indent = -1
}
case _ =>
}
}
} while (ts.moveNext && ts.offset < lineEnd)
// --- now tokens of this line have been processed totally, let's go on some special cases
if (!openingBraces.isEmpty) {
openingBraces.top.token.id match {
case ScalaTokenId.Eq | ScalaTokenId.Else | ScalaTokenId.If | ScalaTokenId.For | ScalaTokenId.Yield | ScalaTokenId.While =>
// * close these braces here, now, since the indentation has been done in previous computation
openingBraces pop
case _ =>
}
}
// * special case for next line indent with `=` is at the end of this line, or unfinished `if` `else` `for`
// * since this line has been processed totally, we can now traverse ts freely
if (latestNoWSToken != null) {
// * move `ts` to `latestNoWSToken` by following 2 statements:
ts.move(latestNoWSTokenOffset); ts.moveNext
// * is the next token LBrace? if true, don't open any brace
ts.moveNext
ScalaLexUtil.findNextNoWsNoComment(ts) match {
case Some(x) if x.id == ScalaTokenId.LBrace =>
case _ =>
// * go back to latestNoWSToken
ts.move(latestNoWSTokenOffset); ts.moveNext
latestNoWSToken.id match {
case ScalaTokenId.Eq | ScalaTokenId.Else | ScalaTokenId.Yield =>
val offset = ts.offset
val newBrace = new Brace
newBrace.token = latestNoWSToken
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
case ScalaTokenId.RParen =>
ScalaLexUtil.skipPair(ts, true, ScalaTokenId.LParen, ScalaTokenId.RParen)
// * check if there is a `if` or `for` extractly before the matched `LParen`
ScalaLexUtil.findPreviousNoWsNoComment(ts) match {
case Some(x) => x.id match {
case ScalaTokenId.If =>
val offset = ts.offset
val newBrace = new Brace
newBrace.token = x
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
ts.movePrevious
ScalaLexUtil.findPreviousNoWsNoComment(ts) match {
case Some(y) if y.id == ScalaTokenId.Else =>
// * "else if": keep `offset` to `else` for later hanging
newBrace.offsetOnline = ts.offset - lineBegin
case _ =>
}
case ScalaTokenId.While =>
val offset = ts.offset
ts.movePrevious
ScalaLexUtil.findPreviousNoWsNoComment(ts) match {
case Some(x) if x.id == ScalaTokenId.RBrace =>
ScalaLexUtil.skipPair(ts, true, ScalaTokenId.LBrace, ScalaTokenId.RBrace)
ScalaLexUtil.findPreviousNoWsNoComment(ts) match {
case Some(y) if y.id != ScalaTokenId.Do =>
val newBrace = new Brace
newBrace.token = x
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
case _ =>
}
case _ =>
val newBrace = new Brace
newBrace.token = x
// will add indent of this line to offsetOnline later
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
}
case ScalaTokenId.For =>
val offset = ts.offset
val newBrace = new Brace
newBrace.token = x
// will add indent of this line to offsetOnline late
newBrace.offsetOnline = offset - lineBegin
newBrace.onProcessingLine = true
openingBraces push newBrace
case _ =>
}
case _ =>
}
case _ =>
}
}
}
} catch {case e: AssertionError =>
doc.getProperty(Document.StreamDescriptionProperty).asInstanceOf[DataObject] match {
case null =>
case dobj => Exceptions.attachMessage(e, FileUtil.getFileDisplayName(dobj.getPrimaryFile))
}
throw e
}
// * Now we've got the final indent of this line, adjust offset for new added
// * braces (which should be on this line)
for (brace <- openingBraces if brace.onProcessingLine) {
brace.offsetOnline += indent
if (brace.token == latestNoWSToken) {
brace.isLatestOnLine = true
}
brace.lasestTokenOnLine = latestNoWSToken
}
for (brace <- specialBraces if brace.onProcessingLine) {
brace.offsetOnline += indent
if (brace.token == latestNoWSToken) {
brace.isLatestOnLine = true
}
brace.lasestTokenOnLine = latestNoWSToken
}
// --- Compute indent for next line
var nextIndent = 0
val latestOpenBrace = if (!openingBraces.isEmpty) {
openingBraces.top
} else null
// * decide if next line is new or continued continute line
val isContinueLine = if (latestNoWSToken == null) {
// * empty line or comment line
false
} else {
if (latestOpenBrace != null && latestOpenBrace.isLatestOnLine) {
// * we have special case
(latestOpenBrace.token.id, latestNoWSToken.id) match {
//case (ScalaTokenId.LParen | ScalaTokenId.LBracket | ScalaTokenId.LBrace, ScalaTokenId.Comma) => true
case _ => false
}
} else false
}
if (isContinueLine) {
// Compute or reset continue indent
if (continueIndent == -1) {
// new continue indent
continueIndent = indent + hangingIndentSize
} else {
// keep the same continue indent
}
// Continue line
nextIndent = continueIndent
} else {
// Reset continueIndent
continueIndent = -1
if (latestOpenBrace == null) {
// All braces resolved
nextIndent = 0
} else {
nextIndent = latestOpenBrace.token.id match {
case ScalaTokenId.RArrow =>
var nearestHangableBrace: Brace = null
var depth = 0
var break = false
val braces = openingBraces.toList
for (i <- openingBraces.size - 1 to 0 if !break) {
val brace = braces(i)
depth += 1
if (brace.token.id != ScalaTokenId.RArrow) {
nearestHangableBrace = brace
break = true
}
}
if (nearestHangableBrace != null) {
// * Hang it from this brace
nearestHangableBrace.offsetOnline + depth * indentSize
} else {
openingBraces.size * indentSize
}
case ScalaTokenId.LParen | ScalaTokenId.LBracket | ScalaTokenId.LBrace
if !latestOpenBrace.isLatestOnLine && (latestOpenBrace.lasestTokenOnLine == null ||
latestOpenBrace.lasestTokenOnLine.id != ScalaTokenId.RArrow) =>
latestOpenBrace.offsetOnline + latestOpenBrace.token.text.length
case ScalaTokenId.BlockCommentStart | ScalaTokenId.DocCommentStart =>
latestOpenBrace.offsetOnline + 1
case ScalaTokenId.Eq | ScalaTokenId.Else | ScalaTokenId.If | ScalaTokenId.For | ScalaTokenId.Yield | ScalaTokenId.While =>
indent + indentSize
case _ => openingBraces.size * indentSize // default
}
}
}
Array(indent, nextIndent, continueIndent)
}
/**
* Ensure that the editor-settings for tabs match our code style, since the
* primitive "doc.getFormatter.changeRowIndent" calls will be using
* those settings
*/
private def syncOptions(doc: BaseDocument, style: CodeStyle) {
val formatter = doc.getFormatter
if (formatter.getSpacesPerTab != style.getIndentSize) {
formatter.setSpacesPerTab(style.getIndentSize)
}
}
}
| richardfontana/fontana2007-t | ScalaEditorLite/src/org/netbeans/modules/scala/editor/ScalaFormatter.scala | Scala | gpl-3.0 | 31,861 |
/* *\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\* */
package squants.energy
import org.scalatest.{ Matchers, FlatSpec }
import scala.language.postfixOps
import squants.{ QuantityParseException, MetricSystem }
import squants.time.Hours
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class PowerSpec extends FlatSpec with Matchers {
behavior of "Power and its Units of Measure"
it should "create values using UOM factories" in {
Watts(1).toWatts should be(1)
Milliwatts(1).toMilliwatts should be(1)
Kilowatts(1).toKilowatts should be(1)
Megawatts(1).toMegawatts should be(1)
Gigawatts(1).toGigawatts should be(1)
BtusPerHour(1).toBtusPerHour should be(1)
}
it should "create values from properly formatted Strings" in {
Power("10.22 mW").get should be(Milliwatts(10.22))
Power("10.22 W").get should be(Watts(10.22))
Power("10.22 kW").get should be(Kilowatts(10.22))
Power("10.22 MW").get should be(Megawatts(10.22))
Power("10.22 GW").get should be(Gigawatts(10.22))
Power("10.22 Btu/hr").get should be(BtusPerHour(10.22))
Power("10.22 zz").failed.get should be(QuantityParseException("Unable to parse Power", "10.22 zz"))
Power("ZZ W").failed.get should be(QuantityParseException("Unable to parse Power", "ZZ W"))
}
it should "properly convert to all supported Units of Measure" in {
val x = Watts(1)
x.toWatts should be(1)
x.toMilliwatts should be(1 / MetricSystem.Milli)
x.toKilowatts should be(1 / MetricSystem.Kilo)
x.toMegawatts should be(1 / MetricSystem.Mega)
x.toGigawatts should be(1 / MetricSystem.Giga)
x.toBtusPerHour should be(1 / EnergyConversions.btuMultiplier)
}
it should "return properly formatted strings for all supported Units of Measure" in {
Watts(1).toString(Watts) should be("1.0 W")
Milliwatts(1).toString(Milliwatts) should be("1.0 mW")
Kilowatts(1).toString(Kilowatts) should be("1.0 kW")
Megawatts(1).toString(Megawatts) should be("1.0 MW")
Gigawatts(1).toString(Gigawatts) should be("1.0 GW")
BtusPerHour(1).toString(BtusPerHour) should be("1.0 Btu/hr")
}
it should "return Energy when multiplied by Time" in {
Watts(1) * Hours(1) should be(WattHours(1))
}
behavior of "PowerConversions"
it should "provide aliases for single unit values" in {
import PowerConversions._
milliwatt should be(Milliwatts(1))
mW should be(Milliwatts(1))
watt should be(Watts(1))
W should be(Watts(1))
kilowatt should be(Kilowatts(1))
kW should be(Kilowatts(1))
megawatt should be(Megawatts(1))
MW should be(Megawatts(1))
gigawatt should be(Gigawatts(1))
GW should be(Gigawatts(1))
}
it should "provide implicit conversion from Double" in {
import PowerConversions._
val d = 10d
d.mW should be(Milliwatts(d))
d.W should be(Watts(d))
d.kW should be(Kilowatts(d))
d.MW should be(Megawatts(d))
d.GW should be(Gigawatts(d))
d.milliwatts should be(Milliwatts(d))
d.watts should be(Watts(d))
d.kilowatts should be(Kilowatts(d))
d.megawatts should be(Megawatts(d))
d.gigawatts should be(Gigawatts(d))
d.BTUph should be(BtusPerHour(d))
}
it should "provide implicit conversions from String" in {
import PowerConversions._
"10.22 mW".toPower.get should be(Milliwatts(10.22))
"10.22 W".toPower.get should be(Watts(10.22))
"10.22 kW".toPower.get should be(Kilowatts(10.22))
"10.22 MW".toPower.get should be(Megawatts(10.22))
"10.22 GW".toPower.get should be(Gigawatts(10.22))
"10.22 Btu/hr".toPower.get should be(BtusPerHour(10.22))
"10.22 zz".toPower.failed.get should be(QuantityParseException("Unable to parse Power", "10.22 zz"))
"ZZ W".toPower.failed.get should be(QuantityParseException("Unable to parse Power", "ZZ W"))
}
it should "provide Numeric support in" in {
import PowerConversions.PowerNumeric
val ps = List(Watts(1000), Kilowatts(10), Megawatts(.1))
ps.sum should be(Kilowatts(111))
}
}
| rmihael/squants | shared/src/test/scala/squants/energy/PowerSpec.scala | Scala | apache-2.0 | 4,493 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.sync.client
import java.io.{BufferedOutputStream, File, FileOutputStream, InputStream}
import java.security.{DigestOutputStream, MessageDigest}
import com.waz.api.impl.ErrorResponse
import com.waz.api.impl.ProgressIndicator.{Callback, ProgressData}
import com.waz.cache.{CacheEntry, CacheService, Expiration, LocalData}
import com.waz.model.{Mime, _}
import com.waz.utils.crypto.AESUtils
import com.waz.utils.{IoUtils, JsonDecoder, JsonEncoder}
import com.waz.znet2.http.HttpClient.AutoDerivationOld._
import com.waz.znet2.http.HttpClient.dsl._
import com.waz.znet2.http.HttpClient.{Progress, ProgressCallback}
import com.waz.znet2.http.MultipartBodyMixed.Part
import com.waz.znet2.http.Request.UrlCreator
import com.waz.znet2.http._
import org.json.JSONObject
import org.threeten.bp.Instant
import scala.concurrent.duration._
trait AssetClient {
import com.waz.sync.client.AssetClient._
//TODO Request should be constructed inside "*Client" classes
def loadAsset[T: RequestSerializer](
req: Request[T],
key: Option[AESKey] = None,
sha: Option[Sha256] = None,
callback: Callback
): ErrorOrResponse[CacheEntry]
//TODO Add callback parameter. https://github.com/wireapp/wire-android-sync-engine/issues/378
def uploadAsset(metadata: Metadata, data: LocalData, mime: Mime): ErrorOrResponse[UploadResponse]
}
class AssetClientImpl(cacheService: CacheService)
(implicit
urlCreator: UrlCreator,
client: HttpClient,
authRequestInterceptor: RequestInterceptor = RequestInterceptor.identity)
extends AssetClient {
import AssetClient._
private implicit def fileWithShaBodyDeserializer: RawBodyDeserializer[FileWithSha] =
RawBodyDeserializer.create { body =>
val tempFile = File.createTempFile("http_client_download", null)
val out = new DigestOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile)),
MessageDigest.getInstance("SHA-256"))
IoUtils.copy(body.data(), out)
FileWithSha(tempFile, Sha256(out.getMessageDigest.digest()))
}
private def cacheEntryBodyDeserializer(key: Option[AESKey], sha: Option[Sha256]): RawBodyDeserializer[CacheEntry] =
RawBodyDeserializer.create { body =>
val entry = cacheService.createManagedFile(key)
val out = new DigestOutputStream(new BufferedOutputStream(new FileOutputStream(entry.cacheFile)),
MessageDigest.getInstance("SHA-256"))
IoUtils.copy(body.data(), out)
if (sha.exists(_ != Sha256(out.getMessageDigest.digest()))) {
throw new IllegalArgumentException(
s"SHA256 not match. \\nExpected: $sha \\nCurrent: ${Sha256(out.getMessageDigest.digest())}"
)
}
entry
}
private def localDataRawBodySerializer(mime: Mime): RawBodySerializer[LocalData] =
RawBodySerializer.create { data =>
RawBody(mediaType = Some(mime.str), () => data.inputStream, dataLength = Some(data.length))
}
override def loadAsset[T: RequestSerializer](request: Request[T],
key: Option[AESKey] = None,
sha: Option[Sha256] = None,
callback: Callback): ErrorOrResponse[CacheEntry] = {
val progressCallback: ProgressCallback = new ProgressCallback {
override def updated(progress: Long, total: Option[Long]): Unit = callback(ProgressData(progress, total))
}
implicit val bodyDeserializer: RawBodyDeserializer[CacheEntry] = cacheEntryBodyDeserializer(key, sha)
request
.withDownloadCallback(progressCallback)
.withResultType[CacheEntry]
.withErrorType[ErrorResponse]
.executeSafe
}
override def uploadAsset(metadata: Metadata, data: LocalData, mime: Mime): ErrorOrResponse[UploadResponse] = {
implicit val rawBodySerializer: RawBodySerializer[LocalData] = localDataRawBodySerializer(mime)
Request
.Post(
relativePath = AssetsV3Path,
body = MultipartBodyMixed(Part(metadata), Part(data, Headers("Content-MD5" -> md5(data))))
)
.withResultType[UploadResponse]
.withErrorType[ErrorResponse]
.executeSafe
}
private implicit def RawAssetRawBodySerializer: RawBodySerializer[RawAsset] =
RawBodySerializer.create { asset =>
RawBody(mediaType = Some(asset.mime.str), asset.data, dataLength = asset.dataLength)
}
}
object AssetClient {
case class FileWithSha(file: File, sha256: Sha256)
case class RawAsset(mime: Mime, data: () => InputStream, dataLength: Option[Long])
implicit val DefaultExpiryTime: Expiration = 1.hour
val AssetsV3Path = "/assets/v3"
sealed abstract class Retention(val value: String)
object Retention {
case object Eternal extends Retention("eternal") //Only used for profile pics currently
case object EternalInfrequentAccess extends Retention("eternal-infrequent_access")
case object Persistent extends Retention("persistent")
case object Expiring extends Retention("expiring")
case object Volatile extends Retention("volatile")
}
case class Metadata(public: Boolean = false, retention: Retention = Retention.Persistent)
object Metadata {
implicit val jsonEncoder: JsonEncoder[Metadata] = JsonEncoder.build[Metadata] { metadata => o =>
o.put("public", metadata.public)
o.put("retention", metadata.retention.value)
}
}
case class UploadResponse(rId: RAssetId, expires: Option[Instant], token: Option[AssetToken])
case object UploadResponse {
implicit val jsonDecoder: JsonDecoder[UploadResponse] = new JsonDecoder[UploadResponse] {
import JsonDecoder._
override def apply(implicit js: JSONObject): UploadResponse =
UploadResponse(RAssetId('key), decodeOptISOInstant('expires), decodeOptString('token).map(AssetToken))
}
}
def getAssetPath(rId: RAssetId, otrKey: Option[AESKey], conv: Option[RConvId]): String =
(conv, otrKey) match {
case (None, _) => s"/assets/v3/${rId.str}"
case (Some(c), None) => s"/conversations/${c.str}/assets/${rId.str}"
case (Some(c), Some(_)) => s"/conversations/${c.str}/otr/assets/${rId.str}"
}
/**
* Computes base64 encoded md5 sum of image data.
*/
def md5(data: LocalData): String = md5(data.inputStream)
def md5(is: InputStream): String = AESUtils.base64(IoUtils.md5(is))
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/sync/client/AssetClient.scala | Scala | gpl-3.0 | 7,238 |
package slick.test.stream
import org.testng.annotations.{AfterClass, BeforeClass}
import slick.jdbc.{JdbcProfile, H2Profile}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
class JdbcPublisherTest extends RelationalPublisherTest[JdbcProfile](H2Profile, 1000L) {
import profile.api._
def createDB = {
val db = Database.forURL("jdbc:h2:mem:DatabasePublisherTest", driver = "org.h2.Driver", keepAliveConnection = true)
// Wait until the database has been initialized and can process queries:
try { Await.result(db.run(sql"select 1".as[Int]), Duration.Inf) } catch { case NonFatal(ex) => }
db
}
}
| AtkinsChang/slick | reactive-streams-tests/src/test/scala/slick/test/stream/JdbcPublisherTest.scala | Scala | bsd-2-clause | 678 |
package edu.duke.oit.vw.models
import edu.duke.oit.vw.utils._
import edu.duke.oit.vw.solr.Vivo
case class Address(uri:String,
vivoType: String,
label: String,
attributes:Option[Map[String, String]])
extends VivoAttributes(uri, vivoType, label, attributes) with AddToJson
{
override def uris():List[String] = {
uri :: super.uris
}
}
object Address extends AttributeParams {
def fromUri(vivo: Vivo, uriContext:Map[String, Any], templatePath: String="sparql/addresses.ssp") = {
val data = vivo.selectFromTemplate(templatePath, uriContext)
val existingData = data.filter(datum => !datum.isEmpty)
existingData.map(build(_)).asInstanceOf[List[Address]]
}
def build(address:Map[Symbol,String]) = {
new Address(uri = address('uri).stripBrackets(),
vivoType = address('type).stripBrackets(),
label = address('label),
attributes = parseAttributes(address, List('uri,'type,'label)))
}
}
| OIT-ADS-Web/vivo_widgets | src/main/scala/models/Address.scala | Scala | bsd-3-clause | 1,046 |
package de.leanovate.dose.cart.model
case class ProductOption(
name: String,
description: Option[String],
priceInCent: Int
)
| leanovate/microzon-cart | src/main/scala/de/leanovate/dose/cart/model/ProductOption.scala | Scala | mit | 230 |
package cromwell.engine.workflow
import akka.actor.{Actor, ActorRef}
import akka.testkit.{TestActorRef, TestFSMRef, TestProbe}
import com.typesafe.config.{Config, ConfigFactory}
import cromwell._
import cromwell.backend.{AllBackendInitializationData, JobExecutionMap}
import cromwell.core._
import cromwell.core.path.DefaultPathBuilder
import cromwell.engine.EngineWorkflowDescriptor
import cromwell.engine.backend.BackendSingletonCollection
import cromwell.engine.workflow.WorkflowActor._
import cromwell.engine.workflow.lifecycle.EngineLifecycleActorAbortCommand
import cromwell.engine.workflow.lifecycle.execution.WorkflowExecutionActor.{WorkflowExecutionAbortedResponse, WorkflowExecutionFailedResponse, WorkflowExecutionSucceededResponse}
import cromwell.engine.workflow.lifecycle.finalization.CopyWorkflowLogsActor
import cromwell.engine.workflow.lifecycle.finalization.WorkflowFinalizationActor.{StartFinalizationCommand, WorkflowFinalizationSucceededResponse}
import cromwell.engine.workflow.lifecycle.initialization.WorkflowInitializationActor.{WorkflowInitializationAbortedResponse, WorkflowInitializationFailedResponse}
import cromwell.engine.workflow.lifecycle.materialization.MaterializeWorkflowDescriptorActor.MaterializeWorkflowDescriptorFailureResponse
import cromwell.engine.workflow.workflowstore.{StartableState, Submitted}
import cromwell.util.SampleWdl.ThreeStep
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._
class WorkflowActorSpec extends CromwellTestKitWordSpec with WorkflowDescriptorBuilder with BeforeAndAfter with Eventually {
override implicit val actorSystem = system
val mockServiceRegistryActor = TestActorRef(new Actor {
override def receive = {
case _ => // No action
}
})
val mockDir = DefaultPathBuilder.get("/where/to/copy/wf/logs")
val mockWorkflowOptions = s"""{ "final_workflow_log_dir" : "$mockDir" }"""
var currentWorkflowId: WorkflowId = _
val currentLifecycleActor = TestProbe()
val workflowSources = ThreeStep.asWorkflowSources(workflowOptions = mockWorkflowOptions)
val descriptor = createMaterializedEngineWorkflowDescriptor(WorkflowId.randomId(), workflowSources = workflowSources)
val supervisorProbe = TestProbe()
val deathwatch = TestProbe()
val finalizationProbe = TestProbe()
var copyWorkflowLogsProbe: TestProbe = _
val AwaitAlmostNothing = 100.milliseconds
before {
currentWorkflowId = WorkflowId.randomId()
copyWorkflowLogsProbe = TestProbe()
}
private def createWorkflowActor(state: WorkflowActorState) = {
val actor = TestFSMRef(
factory = new MockWorkflowActor(
finalizationProbe = finalizationProbe,
workflowId = currentWorkflowId,
startState = Submitted,
workflowSources = workflowSources,
conf = ConfigFactory.load,
ioActor = system.actorOf(SimpleIoActor.props),
serviceRegistryActor = mockServiceRegistryActor,
workflowLogCopyRouter = copyWorkflowLogsProbe.ref,
jobStoreActor = system.actorOf(AlwaysHappyJobStoreActor.props),
subWorkflowStoreActor = system.actorOf(AlwaysHappySubWorkflowStoreActor.props),
callCacheReadActor = system.actorOf(EmptyCallCacheReadActor.props),
callCacheWriteActor = system.actorOf(EmptyCallCacheWriteActor.props),
dockerHashActor = system.actorOf(EmptyDockerHashActor.props),
jobTokenDispenserActor = TestProbe().ref
),
supervisor = supervisorProbe.ref)
actor.setState(stateName = state, stateData = WorkflowActorData(Option(currentLifecycleActor.ref), Option(descriptor),
AllBackendInitializationData.empty, StateCheckpoint(InitializingWorkflowState), Submitted))
actor
}
implicit val TimeoutDuration = CromwellTestKitSpec.TimeoutDuration
"WorkflowActor" should {
"run Finalization actor if Initialization fails" in {
val actor = createWorkflowActor(InitializingWorkflowState)
deathwatch watch actor
actor ! WorkflowInitializationFailedResponse(Seq(new Exception("Materialization Failed")))
finalizationProbe.expectMsg(StartFinalizationCommand)
actor.stateName should be(FinalizingWorkflowState)
actor ! WorkflowFinalizationSucceededResponse
supervisorProbe.expectMsgPF(TimeoutDuration) { case x: WorkflowFailedResponse => x.workflowId should be(currentWorkflowId) }
deathwatch.expectTerminated(actor)
}
"run Finalization actor if Initialization is aborted" in {
val actor = createWorkflowActor(InitializingWorkflowState)
deathwatch watch actor
actor ! AbortWorkflowCommand
eventually { actor.stateName should be(WorkflowAbortingState) }
currentLifecycleActor.expectMsgPF(TimeoutDuration) {
case EngineLifecycleActorAbortCommand => actor ! WorkflowInitializationAbortedResponse
}
finalizationProbe.expectMsg(StartFinalizationCommand)
actor.stateName should be(FinalizingWorkflowState)
actor ! WorkflowFinalizationSucceededResponse
supervisorProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
"run Finalization if Execution fails" in {
val actor = createWorkflowActor(ExecutingWorkflowState)
deathwatch watch actor
actor ! WorkflowExecutionFailedResponse(Map.empty, new Exception("Execution Failed"))
finalizationProbe.expectMsg(StartFinalizationCommand)
actor.stateName should be(FinalizingWorkflowState)
actor ! WorkflowFinalizationSucceededResponse
supervisorProbe.expectMsgPF(TimeoutDuration) { case x: WorkflowFailedResponse => x.workflowId should be(currentWorkflowId) }
deathwatch.expectTerminated(actor)
}
"run Finalization actor if Execution is aborted" in {
val actor = createWorkflowActor(ExecutingWorkflowState)
deathwatch watch actor
actor ! AbortWorkflowCommand
eventually { actor.stateName should be(WorkflowAbortingState) }
currentLifecycleActor.expectMsgPF(CromwellTestKitSpec.TimeoutDuration) {
case EngineLifecycleActorAbortCommand =>
actor ! WorkflowExecutionAbortedResponse(Map.empty)
}
finalizationProbe.expectMsg(StartFinalizationCommand)
actor.stateName should be(FinalizingWorkflowState)
actor ! WorkflowFinalizationSucceededResponse
supervisorProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
"run Finalization actor if Execution succeeds" in {
val actor = createWorkflowActor(ExecutingWorkflowState)
deathwatch watch actor
actor ! WorkflowExecutionSucceededResponse(Map.empty, CallOutputs.empty)
finalizationProbe.expectMsg(StartFinalizationCommand)
actor.stateName should be(FinalizingWorkflowState)
actor ! WorkflowFinalizationSucceededResponse
supervisorProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
"not run Finalization actor if aborted when in WorkflowUnstartedState" in {
val actor = createWorkflowActor(WorkflowUnstartedState)
deathwatch watch actor
actor ! AbortWorkflowCommand
finalizationProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
"not run Finalization actor if aborted when in MaterializingWorkflowDescriptorState" in {
val actor = createWorkflowActor(MaterializingWorkflowDescriptorState)
deathwatch watch actor
actor ! AbortWorkflowCommand
finalizationProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
"copy workflow logs in the event of MaterializeWorkflowDescriptorFailureResponse" in {
val actor = createWorkflowActor(MaterializingWorkflowDescriptorState)
deathwatch watch actor
copyWorkflowLogsProbe.expectNoMsg(AwaitAlmostNothing)
actor ! MaterializeWorkflowDescriptorFailureResponse(new Exception("Intentionally failing workflow materialization to test log copying"))
copyWorkflowLogsProbe.expectMsg(CopyWorkflowLogsActor.Copy(currentWorkflowId, mockDir))
finalizationProbe.expectNoMsg(AwaitAlmostNothing)
deathwatch.expectTerminated(actor)
}
}
}
class MockWorkflowActor(val finalizationProbe: TestProbe,
workflowId: WorkflowId,
startState: StartableState,
workflowSources: WorkflowSourceFilesCollection,
conf: Config,
ioActor: ActorRef,
serviceRegistryActor: ActorRef,
workflowLogCopyRouter: ActorRef,
jobStoreActor: ActorRef,
subWorkflowStoreActor: ActorRef,
callCacheReadActor: ActorRef,
callCacheWriteActor: ActorRef,
dockerHashActor: ActorRef,
jobTokenDispenserActor: ActorRef) extends WorkflowActor(workflowId, startState, workflowSources, conf, ioActor, serviceRegistryActor, workflowLogCopyRouter, jobStoreActor, subWorkflowStoreActor, callCacheReadActor, callCacheWriteActor, dockerHashActor, jobTokenDispenserActor, BackendSingletonCollection(Map.empty), serverMode = true) {
override def makeFinalizationActor(workflowDescriptor: EngineWorkflowDescriptor, jobExecutionMap: JobExecutionMap, worfklowOutputs: CallOutputs) = finalizationProbe.ref
}
| ohsu-comp-bio/cromwell | engine/src/test/scala/cromwell/engine/workflow/WorkflowActorSpec.scala | Scala | bsd-3-clause | 9,417 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.test.behavior
import java.io.ByteArrayOutputStream
import java.util.Base64
import akka.http.scaladsl.model.{ContentTypes, Uri}
import akka.stream.IOResult
import akka.stream.scaladsl.{Sink, StreamConverters}
import akka.util.{ByteString, ByteStringBuilder}
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.{AttachmentSupport, CacheChangeNotification, NoDocumentException}
import org.apache.openwhisk.core.entity.Attachments.{Attached, Attachment, Inline}
import org.apache.openwhisk.core.entity.test.ExecHelpers
import org.apache.openwhisk.core.entity.{CodeExec, DocInfo, EntityName, WhiskAction}
import scala.concurrent.duration.DurationInt
trait ArtifactStoreAttachmentBehaviors extends ArtifactStoreBehaviorBase with ExecHelpers {
behavior of s"${storeType}ArtifactStore attachments"
private val namespace = newNS()
private val attachmentHandler = Some(WhiskAction.attachmentHandler _)
private implicit val cacheUpdateNotifier: Option[CacheChangeNotification] = None
private val retriesOnTestFailures = 5
private val waitBeforeRetry = 1.second
it should "generate different attachment name on update" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
implicit val tid: TransactionId = transid()
val exec = javaDefault(nonInlinedCode(entityStore), Some("hello"))
val javaAction =
WhiskAction(namespace, EntityName("attachment_unique-" + System.currentTimeMillis()), exec)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
//Change attachment to inline one otherwise WhiskAction would not go for putAndAttach
val action2Updated = action2.copy(exec = exec).revision[WhiskAction](i1.rev)
val i2 = WhiskAction.put(entityStore, action2Updated, old = Some(action2)).futureValue
val action3 = entityStore.get[WhiskAction](i2, attachmentHandler).futureValue
docsToDelete += ((entityStore, i2))
attached(action2).attachmentName should not be attached(action3).attachmentName
//Check that attachment name is actually a uri
val attachmentUri = Uri(attached(action2).attachmentName)
attachmentUri.isAbsolute shouldBe true
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should generate different attachment name on update not successful, retrying.."))
}
/**
* This test asserts that old attachments are deleted and cannot be read again
*/
it should "fail on reading with old non inlined attachment" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
implicit val tid: TransactionId = transid()
val code1 = nonInlinedCode(entityStore)
val exec = javaDefault(code1, Some("hello"))
val javaAction =
WhiskAction(namespace, EntityName("attachment_update_2-" + System.currentTimeMillis()), exec)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
val code2 = nonInlinedCode(entityStore)
val exec2 = javaDefault(code2, Some("hello"))
val action2Updated = action2.copy(exec = exec2).revision[WhiskAction](i1.rev)
val i2 = WhiskAction.put(entityStore, action2Updated, old = Some(action2)).futureValue
val action3 = entityStore.get[WhiskAction](i2, attachmentHandler).futureValue
docsToDelete += ((entityStore, i2))
getAttachmentBytes(i2, attached(action3)).futureValue.result() shouldBe decode(code2)
getAttachmentBytes(i1, attached(action2)).failed.futureValue shouldBe a[NoDocumentException]
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should fail on reading with old non inlined attachment not successful, retrying.."))
}
/**
* Variant of previous test where read with old attachment should still work
* if attachment is inlined
*/
it should "work on reading with old inlined attachment" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
assumeAttachmentInliningEnabled(entityStore)
implicit val tid: TransactionId = transid()
val code1 = encodedRandomBytes(inlinedAttachmentSize(entityStore))
val exec = javaDefault(code1, Some("hello"))
val javaAction =
WhiskAction(namespace, EntityName("attachment_update_2-" + System.currentTimeMillis()), exec)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
val code2 = nonInlinedCode(entityStore)
val exec2 = javaDefault(code2, Some("hello"))
val action2Updated = action2.copy(exec = exec2).revision[WhiskAction](i1.rev)
val i2 = WhiskAction.put(entityStore, action2Updated, old = Some(action2)).futureValue
val action3 = entityStore.get[WhiskAction](i2, attachmentHandler).futureValue
docsToDelete += ((entityStore, i2))
getAttachmentBytes(i2, attached(action3)).futureValue.result() shouldBe decode(code2)
getAttachmentBytes(i2, attached(action2)).futureValue.result() shouldBe decode(code1)
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should work on reading with old inlined attachment not successful, retrying.."))
}
it should "put and read large attachment" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
implicit val tid: TransactionId = transid()
val size = Math.max(nonInlinedAttachmentSize(entityStore), getAttachmentSizeForTest(entityStore))
val base64 = encodedRandomBytes(size)
val exec = javaDefault(base64, Some("hello"))
val javaAction =
WhiskAction(namespace, EntityName("attachment_large-" + System.currentTimeMillis()), exec)
//Have more patience as reading large attachments take time specially for remote
//storage like Cosmos
implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 1.minute)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
val action3 = WhiskAction.get(entityStore, i1.id, i1.rev).futureValue
docsToDelete += ((entityStore, i1))
attached(action2).attachmentType shouldBe ContentTypes.`application/octet-stream`
attached(action2).length shouldBe Some(size)
attached(action2).digest should not be empty
action3.exec shouldBe exec
inlined(action3).value shouldBe base64
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should put and read large attachment not successful, retrying.."))
}
it should "inline small attachments" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
assumeAttachmentInliningEnabled(entityStore)
implicit val tid: TransactionId = transid()
val attachmentSize = inlinedAttachmentSize(entityStore) - 1
val base64 = encodedRandomBytes(attachmentSize)
val exec = javaDefault(base64, Some("hello"))
val javaAction = WhiskAction(namespace, EntityName("attachment_inline" + System.currentTimeMillis()), exec)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
val action3 = WhiskAction.get(entityStore, i1.id, i1.rev).futureValue
docsToDelete += ((entityStore, i1))
action3.exec shouldBe exec
inlined(action3).value shouldBe base64
val a = attached(action2)
val attachmentUri = Uri(a.attachmentName)
attachmentUri.scheme shouldBe AttachmentSupport.MemScheme
a.length shouldBe Some(attachmentSize)
a.digest should not be empty
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should inline small attachments not successful, retrying.."))
}
it should "throw NoDocumentException for non existing attachment" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
implicit val tid: TransactionId = transid()
val attachmentName = "foo-" + System.currentTimeMillis()
val attachmentId =
getAttachmentStore(entityStore).map(s => s"${s.scheme}:$attachmentName").getOrElse(attachmentName)
val sink = StreamConverters.fromOutputStream(() => new ByteArrayOutputStream())
entityStore
.readAttachment[IOResult](
DocInfo ! ("non-existing-doc", "42"),
Attached(attachmentId, ContentTypes.`application/octet-stream`),
sink)
.failed
.futureValue shouldBe a[NoDocumentException]
},
retriesOnTestFailures,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should throw NoDocumentException for non existing attachment not successful, retrying.."))
}
it should "delete attachment on document delete" in {
org.apache.openwhisk.utils
.retry(
{
afterEach()
val attachmentStore = getAttachmentStore(entityStore)
assume(attachmentStore.isDefined, "ArtifactStore does not have attachmentStore configured")
implicit val tid: TransactionId = transid()
val size = nonInlinedAttachmentSize(entityStore)
val base64 = encodedRandomBytes(size)
val exec = javaDefault(base64, Some("hello"))
val javaAction =
WhiskAction(namespace, EntityName("attachment_unique-" + System.currentTimeMillis()), exec)
val i1 = WhiskAction.put(entityStore, javaAction, old = None).futureValue
val action2 = entityStore.get[WhiskAction](i1, attachmentHandler).futureValue
WhiskAction.del(entityStore, i1).futureValue shouldBe true
val attachmentName = Uri(attached(action2).attachmentName).path.toString
attachmentStore.get
.readAttachment(i1.id, attachmentName, byteStringSink())
.failed
.futureValue shouldBe a[NoDocumentException]
},
if (getAttachmentStore(entityStore).isDefined) retriesOnTestFailures else 1,
Some(waitBeforeRetry),
Some(
s"${this.getClass.getName} > ${storeType}ArtifactStore attachments should delete attachment on document delete not successful, retrying.."))
}
private def attached(a: WhiskAction): Attached =
a.exec.asInstanceOf[CodeExec[Attachment[Nothing]]].code.asInstanceOf[Attached]
private def inlined(a: WhiskAction): Inline[String] =
a.exec.asInstanceOf[CodeExec[Attachment[String]]].code.asInstanceOf[Inline[String]]
private def getAttachmentBytes(docInfo: DocInfo, attached: Attached) = {
implicit val tid: TransactionId = transid()
entityStore.readAttachment(docInfo, attached, byteStringSink())
}
private def byteStringSink() = {
Sink.fold[ByteStringBuilder, ByteString](new ByteStringBuilder)((builder, b) => builder ++= b)
}
private def decode(s: String): ByteString = ByteString(Base64.getDecoder.decode(s))
}
| RSulzmann/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/database/test/behavior/ArtifactStoreAttachmentBehaviors.scala | Scala | apache-2.0 | 12,889 |
package spire
package example
import spire.algebra._
import spire.implicits._
import scala.util.Random.nextInt
import CrossValidation._
import spire.scalacompat.parallelSeq
/**
* An example of constructing Random Forests for both regression and
* classification. This example shows off the utility of vector spaces (in this
* case `CoordinateSpace`), fields, and orders to create random forests.
*/
object RandomForestExample extends App {
// The Iris data set uses `Vector[Rational]`.
testClassification(DataSet.Iris, RandomForestOptions())
// The Yeast data set uses `Array[Double]`.
testClassification(DataSet.Yeast, RandomForestOptions(
numAxesSample = Some(2), numPointsSample = Some(200),
numTrees = Some(200), minSplitSize = Some(3)))
// The MPG data set uses `Array[Double]`.
testRegression[Array[Double], Double](DataSet.MPG, RandomForestOptions(numPointsSample = Some(200), numTrees = Some(50)))
def testClassification[V, @sp(Double) F, K](dataset: DataSet[V, F, K], opts: RandomForestOptions)(implicit order: Order[F], classTagV: ClassTag[V], classTagK: ClassTag[K], real: IsReal[F]): Unit = {
println(s"\\n${dataset.describe}\\n")
println(s"Cross-validating ${dataset.name} with random forest classification...")
val accuracy = crossValidateClassification(dataset) { implicit space => data =>
RandomForest.classification(data, opts)
}
println("... accuracy of %.2f%%\\n" format (real.toDouble(accuracy) * 100))
}
def testRegression[V, @sp(Double) F](dataset: DataSet[V, F, F], opts: RandomForestOptions)(implicit order: Order[F], classTagV: ClassTag[V], classTagF: ClassTag[F], real: IsReal[F]): Unit = {
println(s"\\n${dataset.describe}\\n")
println(s"Cross-validating ${dataset.name} with random forest regression...")
val rSquared = crossValidateRegression(dataset) { implicit space => data =>
RandomForest.regression(data, opts)
}
println("... R^2 of %.3f" format real.toDouble(rSquared))
}
}
/**
* Random forests have a lot of knobs, so they are all stored in this class
* for ease-of-use.
*/
case class RandomForestOptions(
numAxesSample: Option[Int] = None, // # of variables sampled each split.
numPointsSample: Option[Int] = None, // # of points sampled per tree.
numTrees: Option[Int] = None, // # of trees created.
minSplitSize: Option[Int] = None, // Min. node size required for split.
parallel: Boolean = true) // Build trees in parallel.
/**
* The common bits between regression and classification random forests. The
* only real difference is how we determine the "disparity" or "error" in a
* region of the tree. So, our outputs all belong to some type we don't really
* care about. We then have a way of determining the error of some subset of
* these outputs using the `Region`.
*/
trait RandomForest[V, @sp(Double) F, @sp(Double) K] {
implicit def V: CoordinateSpace[V, F]
implicit def F: Field[F] = V.scalar
implicit def order: Order[F]
implicit def vectorClassTag: ClassTag[V]
// We need to be able to incrementally update the disparity. This is because,
// for performance reasons, we want to do a linear sweep of some axis in a
// region, maintaining the disparity of the region before the sweep line and
// the region after the sweep line. We do this by updating the disparity as
// the sweep line passes over a point, removing it from one region and adding
// it to the other.
protected trait RegionLike {
def +(k: K): Region
def -(k: K): Region
def error: F
def value: K
}
protected trait RegionCompanion {
def empty: Region
}
protected type Region <: RegionLike
protected def Region: RegionCompanion
// A forest is just a bunch of trees.
protected case class Forest(trees: List[DecisionTree[V, F, K]])
// A version `RandomForestOptions` that doesn't have any unknown values.
protected case class FixedOptions(
numAxesSample: Int,
numPointsSample: Int,
numTrees: Int,
minSplitSize: Int,
parallel: Boolean)
/**
* Construct a random forest.
*/
protected def randomForest(data: Array[V], outputs: Array[K], opts: FixedOptions): Forest = {
require(opts.numAxesSample <= V.dimensions, "Cannot sample more dimension than exist in V.")
require(data.length == outputs.length, "Number of dependent and independent variables must match.")
// Selects a set of `m` predictors to use as coordinate indices. The
// sampling is done using a variant of Knuth's shuffle.
def predictors(): Array[Int] = {
val indices = new Array[Int](opts.numAxesSample)
cfor(0)(_ < indices.length, _ + 1) { i => indices(i) = i }
cfor(V.dimensions - 1)(_ >= indices.length, _ - 1) { i =>
val j = nextInt(i + 1)
if (j < indices.length)
indices(j) = i
}
indices
}
// Randomly samples `n` points with replacement from `data`. Note that our
// sample is actually an array of indices.
def sample(): Array[Int] = {
val sample = new Array[Int](opts.numPointsSample)
cfor(0)(_ < sample.length, _ + 1) { i =>
sample(i) = nextInt(data.length)
}
sample
}
// Convenience method to quickly create a full region from a set of
// members.
def region(members: Array[Int]): Region = {
var d = Region.empty
cfor(0)(_ < members.length, _ + 1) { i =>
d += outputs(members(i))
}
d
}
// Grows a decision tree from a single region. The tree will keep growing
// until we hit the minimum region size.
def growTree(members: Array[Int]): DecisionTree[V, F, K] = {
if (members.length < opts.minSplitSize) {
Leaf(region(members).value)
} else {
val region0 = region(members)
val vars = predictors()
var minError = region0.error
var minVar = -1
var minIdx = -1
cfor(0)(_ < vars.length, _ + 1) { i =>
var axis = vars(i)
var leftRegion = Region.empty
var rightRegion = region0
// To determine the optimal split point along an axis, we first sort
// all the members along this axis. This let's us use a sweep-line to
// update the left/right regions in O(1) time, so our total time to
// check is dominated by sorting in O(n log n).
members.qsortBy(data(_).coord(axis))
cfor(0)(_ < (members.length - 1), _ + 1) { j =>
// We move point j from the right region to the left and see if our
// error is reduced.
leftRegion += outputs(members(j))
rightRegion -= outputs(members(j))
val error = (leftRegion.error * (j + 1) +
rightRegion.error * (members.length - j - 1)) / members.length
if (error < minError) {
minError = error
minVar = axis
minIdx = j
}
}
}
// If we can never do better than our initial region, then split the
// middle of some random axis -- we can probably do better here. It
// would actually be nice try splitting again with a new set of
// predictors, but we'd need a way to bound the number of retries.
if (minIdx < 0) {
minVar = vars(vars.length - 1)
minIdx = members.length / 2
}
// We could do this in a single linear scan, but this is an example.
if (minVar != vars(vars.length - 1)) { // Try to avoid a sort if we can.
members.qsortBy(data(_).coord(minVar))
}
// We split the region directly between the left's furthest right point
// and the right's furthest left point.
val boundary = (data(members(minIdx)).coord(minVar) +
data(members(minIdx + 1)).coord(minVar)) / 2
val left = members take (minIdx + 1)
val right = members drop (minIdx + 1)
Split(minVar, boundary, growTree(left), growTree(right))
}
}
// Random forests are embarassingly parallel. Except for very small
// datasets, there is no reason not to parallelize the algorithm.
if (opts.parallel) {
Forest(parallelSeq((1 to opts.numTrees).toList).map({ _ =>
growTree(sample())
}).toList)
} else {
Forest(List.fill(opts.numTrees)(growTree(sample())))
}
}
protected def fromForest(forest: Forest): V => K
protected def defaultOptions(size: Int): FixedOptions
private def fixOptions(size: Int, options: RandomForestOptions): FixedOptions = {
val defaults = defaultOptions(size)
FixedOptions(
options.numAxesSample getOrElse defaults.numAxesSample,
options.numPointsSample getOrElse defaults.numPointsSample,
options.numTrees getOrElse defaults.numTrees,
options.minSplitSize getOrElse defaults.minSplitSize,
options.parallel)
}
def apply(data: Array[V], out: Array[K], options: RandomForestOptions) = {
fromForest(randomForest(data, out, fixOptions(data.length, options)))
}
}
/**
* A `RandomForest` implementation for regression. In regression, the output
* type is assumed to lie in the same field as the input vectors scalars. The
* final predicted output is the average of the individual tress output (which
* itself is just the mean of all outputs in the region the point lands in.
*/
class RandomForestRegression[V, @sp(Double) F](implicit val V: CoordinateSpace[V, F],
val order: Order[F], val vectorClassTag: ClassTag[V]) extends RandomForest[V, F, F] {
// Our "disparity" measure is just the squared error of the region.
// We could be more careful here and use a "stable" incremental mean and
// variance, like that described in [1], but this is simpler for now.
// [1]: http://nfs-uxsup.csx.cam.ac.uk/~fanf2/hermes/doc/antiforgery/stats.pdf
protected final class SquaredError(sum: F, sumSq: F, count: Int) extends RegionLike {
def +(k: F) = new SquaredError(sum + k, sumSq + (k * k), count + 1)
def -(k: F) = new SquaredError(sum - k, sumSq - (k * k), count - 1)
def error: F = sumSq / count - (sum / count) ** 2 // Error = variance.
def value: F = sum / count
}
protected type Region = SquaredError
object Region extends RegionCompanion {
def empty = new SquaredError(F.zero, F.zero, 0)
}
protected def defaultOptions(size: Int): FixedOptions = {
val axes = math.max(V.dimensions / 3, math.min(V.dimensions, 2))
val sampleSize = math.max(size * 2 / 3, 1)
FixedOptions(axes, sampleSize, size, 5, true)
}
protected def fromForest(forest: Forest): V => F = { v =>
forest.trees.map(_(v)).qmean
}
}
/**
* A `RandomForest` implementation for classification. In this case, the
* outputs (dependent variable) belongs to some type `K`. This type needs to be
* a well behaved Java object as its `equals` and `hashCode` will be used to
* determine equality of classes. This implementation uses a majority vote
* method to determine classification. Each region in a tree is associated with
* the most popular class in that region. Ties are broken randomly (not really).
* Within a forest, each tree casts its vote for classification of a point and
* the majority wins. Again, ties are broken randomly (again, not really).
*/
class RandomForestClassification[V, @sp(Double) F, K](implicit val V: CoordinateSpace[V, F],
val order: Order[F], val vectorClassTag: ClassTag[V]) extends RandomForest[V, F, K] {
// Our "disparity" measure here is the Gini index. It basically measures how
// homogeneous our region is, giving regions of high variability higher
// scores.
protected final class GiniIndex(m: Map[K, Int]) extends RegionLike {
def +(k: K) = new GiniIndex(m + (k -> (m.getOrElse(k, 0) + 1)))
def -(k: K) = new GiniIndex(m + (k -> (m.getOrElse(k, 0) - 1)))
def error: F = {
val n = F.fromInt(m.foldLeft(0)(_ + _._2))
m.foldLeft(F.zero) { case (idx, (k, cnt)) =>
idx + (F.fromInt(cnt) / n)
}
}
def value: K = m.maxBy(_._2)._1
}
protected type Region = GiniIndex
object Region extends RegionCompanion {
def empty = new GiniIndex(Map.empty)
}
protected def defaultOptions(size: Int): FixedOptions = {
val axes = math.max(math.sqrt(V.dimensions.toDouble).toInt, math.min(V.dimensions, 2))
val sampleSize = math.max(size * 2 / 3, 1)
FixedOptions(axes, sampleSize, size, 5, true)
}
protected def fromForest(forest: Forest): V => K = { v =>
forest.trees.foldLeft(Map.empty[K, Int]) { (acc, classify) =>
val k = classify(v)
acc + (k -> (acc.getOrElse(k, 0) + 1))
}.maxBy(_._2)._1
}
}
object RandomForest {
def regression[V, @sp(Double) F](data: Array[V], out: Array[F], options: RandomForestOptions)(implicit
V: CoordinateSpace[V, F], order: Order[F], ev: ClassTag[V]): V => F = {
val rfr = new RandomForestRegression[V, F]
rfr(data, out, options)
}
def regression[V, @sp(Double) F](data: Iterable[V], out: Iterable[F],
options: RandomForestOptions)(implicit V: CoordinateSpace[V, F], order: Order[F],
classTagV: ClassTag[V], classTagF: ClassTag[F]): V => F = {
regression(data.toArray, out.toArray, options)
}
def regression[V, @sp(Double) F](data: Iterable[(V, F)], options: RandomForestOptions)(implicit
V: CoordinateSpace[V, F], order: Order[F],
classTagV: ClassTag[V], classTagF: ClassTag[F]): V => F = {
val (in, out) = data.unzip
regression(in.toArray, out.toArray, options)
}
def classification[V, @sp(Double) F, K](data: Array[V], out: Array[K], options: RandomForestOptions)(implicit
V: CoordinateSpace[V, F], order: Order[F], ev: ClassTag[V]): V => K = {
val rfc = new RandomForestClassification[V, F, K]
rfc(data, out, options)
}
def classification[V, @sp(Double) F, K](data: Iterable[V], out: Iterable[K],
options: RandomForestOptions)(implicit V: CoordinateSpace[V, F],
order: Order[F], classTagV: ClassTag[V], classTagK: ClassTag[K]): V => K = {
classification(data.toArray, out.toArray, options)
}
def classification[V, @sp(Double) F, K](data: Iterable[(V, K)], options: RandomForestOptions)(implicit
V: CoordinateSpace[V, F], order: Order[F],
classTagV: ClassTag[V], classTagK: ClassTag[K]): V => K = {
val (in, out) = data.unzip
classification(in.toArray, out.toArray, options)
}
}
/**
* A simple decision tree. Each internal node is assigned an axis aligned
* boundary which divides the space in 2 (left and right). To determine the
* value of an input point, we simple determine which side of the boundary line
* the input lies on, then recurse on that side. When we reach a leaf node, we
* output its value.
*/
sealed trait DecisionTree[V, F, K] {
def apply(v: V)(implicit V: CoordinateSpace[V, F], F: Order[F]): K = {
@tailrec def loop(tree: DecisionTree[V, F, K]): K = tree match {
case Split(i, boundary, left, right) =>
if (v.coord(i) <= boundary) loop(left) else loop(right)
case Leaf(k) =>
k
}
loop(this)
}
}
case class Split[V, F, K](variable: Int, boundary: F,
left: DecisionTree[V, F, K], right: DecisionTree[V, F, K]) extends DecisionTree[V, F, K]
case class Leaf[V, F, K](value: K) extends DecisionTree[V, F, K]
| non/spire | examples/src/main/scala/spire/example/randomforest.scala | Scala | mit | 15,355 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.optimizer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.CastExpressionOptimization
import org.apache.spark.sql.CarbonBoundReference
import org.apache.spark.sql.CastExpr
import org.apache.spark.sql.SparkUnknownExpression
import org.apache.spark.sql.sources
import org.apache.spark.sql.types._
import org.apache.spark.sql.CarbonContainsWith
import org.apache.spark.sql.CarbonEndsWith
import org.apache.carbondata.core.metadata.datatype.DataType
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn
import org.apache.carbondata.core.scan.expression.{ColumnExpression => CarbonColumnExpression, Expression => CarbonExpression, LiteralExpression => CarbonLiteralExpression}
import org.apache.carbondata.core.scan.expression.conditional._
import org.apache.carbondata.core.scan.expression.logical.{AndExpression, FalseExpression, OrExpression}
import org.apache.carbondata.spark.CarbonAliasDecoderRelation
import org.apache.carbondata.spark.util.CarbonScalaUtil
/**
* All filter conversions are done here.
*/
object CarbonFilters {
/**
* Converts data sources filters to carbon filter predicates.
*/
def createCarbonFilter(schema: StructType,
predicate: sources.Filter): Option[CarbonExpression] = {
val dataTypeOf = schema.map(f => f.name -> f.dataType).toMap
def createFilter(predicate: sources.Filter): Option[CarbonExpression] = {
predicate match {
case sources.EqualTo(name, value) =>
Some(new EqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.Not(sources.EqualTo(name, value)) =>
Some(new NotEqualsExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.EqualNullSafe(name, value) =>
Some(new EqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.Not(sources.EqualNullSafe(name, value)) =>
Some(new NotEqualsExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.GreaterThan(name, value) =>
Some(new GreaterThanExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.LessThan(name, value) =>
Some(new LessThanExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.GreaterThanOrEqual(name, value) =>
Some(new GreaterThanEqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.LessThanOrEqual(name, value) =>
Some(new LessThanEqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value)))
case sources.In(name, values) =>
Some(new InExpression(getCarbonExpression(name),
new ListExpression(
convertToJavaList(values.map(f => getCarbonLiteralExpression(name, f)).toList))))
case sources.Not(sources.In(name, values)) =>
Some(new NotInExpression(getCarbonExpression(name),
new ListExpression(
convertToJavaList(values.map(f => getCarbonLiteralExpression(name, f)).toList))))
case sources.IsNull(name) =>
Some(new EqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, null), true))
case sources.IsNotNull(name) =>
Some(new NotEqualsExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, null), true))
case sources.And(lhs, rhs) =>
(createFilter(lhs) ++ createFilter(rhs)).reduceOption(new AndExpression(_, _))
case sources.Or(lhs, rhs) =>
for {
lhsFilter <- createFilter(lhs)
rhsFilter <- createFilter(rhs)
} yield {
new OrExpression(lhsFilter, rhsFilter)
}
case sources.StringStartsWith(name, value) if value.length > 0 =>
val l = new GreaterThanEqualToExpression(getCarbonExpression(name),
getCarbonLiteralExpression(name, value))
val maxValueLimit = value.substring(0, value.length - 1) +
(value.charAt(value.length - 1).toInt + 1).toChar
val r = new LessThanExpression(
getCarbonExpression(name), getCarbonLiteralExpression(name, maxValueLimit))
Some(new AndExpression(l, r))
case CarbonEndsWith(expr: Expression) =>
Some(new SparkUnknownExpression(expr.transform {
case AttributeReference(name, dataType, _, _) =>
CarbonBoundReference(new CarbonColumnExpression(name.toString,
CarbonScalaUtil.convertSparkToCarbonDataType(dataType)), dataType, expr.nullable)
}))
case CarbonContainsWith(expr: Expression) =>
Some(new SparkUnknownExpression(expr.transform {
case AttributeReference(name, dataType, _, _) =>
CarbonBoundReference(new CarbonColumnExpression(name.toString,
CarbonScalaUtil.convertSparkToCarbonDataType(dataType)), dataType, expr.nullable)
}))
case CastExpr(expr: Expression) =>
Some(transformExpression(expr))
case _ => None
}
}
def getCarbonExpression(name: String) = {
new CarbonColumnExpression(name,
CarbonScalaUtil.convertSparkToCarbonDataType(dataTypeOf(name)))
}
def getCarbonLiteralExpression(name: String, value: Any): CarbonExpression = {
val dataTypeOfAttribute = CarbonScalaUtil.convertSparkToCarbonDataType(dataTypeOf(name))
val dataType = if (Option(value).isDefined
&& dataTypeOfAttribute == DataType.STRING
&& value.isInstanceOf[Double]) {
DataType.DOUBLE
} else {
dataTypeOfAttribute
}
new CarbonLiteralExpression(value, dataType)
}
createFilter(predicate)
}
// Check out which filters can be pushed down to carbon, remaining can be handled in spark layer.
// Mostly dimension filters are only pushed down since it is faster in carbon.
// TODO - The Filters are first converted Intermediate sources filters expression and then these
// expressions are again converted back to CarbonExpression. Instead of two step process of
// evaluating the filters it can be merged into a single one.
def selectFilters(filters: Seq[Expression],
attrList: java.util.HashSet[AttributeReferenceWrapper],
aliasMap: CarbonAliasDecoderRelation): Unit = {
def translate(expr: Expression, or: Boolean = false): Option[sources.Filter] = {
expr match {
case or@Or(left, right) =>
val leftFilter = translate(left, or = true)
val rightFilter = translate(right, or = true)
if (leftFilter.isDefined && rightFilter.isDefined) {
Some(sources.Or(leftFilter.get, rightFilter.get))
} else {
or.collect {
case attr: AttributeReference =>
attrList.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
None
}
case And(left, right) =>
val leftFilter = translate(left, or)
val rightFilter = translate(right, or)
if (or) {
if (leftFilter.isDefined && rightFilter.isDefined) {
(leftFilter ++ rightFilter).reduceOption(sources.And)
} else {
None
}
} else {
(leftFilter ++ rightFilter).reduceOption(sources.And)
}
case EqualTo(a: Attribute, Literal(v, t)) =>
Some(sources.EqualTo(a.name, v))
case EqualTo(l@Literal(v, t), a: Attribute) =>
Some(sources.EqualTo(a.name, v))
case c@EqualTo(Cast(a: Attribute, _), Literal(v, t)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@EqualTo(Literal(v, t), Cast(a: Attribute, _)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case Not(EqualTo(a: Attribute, Literal(v, t))) =>
Some(sources.Not(sources.EqualTo(a.name, v)))
case Not(EqualTo(Literal(v, t), a: Attribute)) =>
Some(sources.Not(sources.EqualTo(a.name, v)))
case c@Not(EqualTo(Cast(a: Attribute, _), Literal(v, t))) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@Not(EqualTo(Literal(v, t), Cast(a: Attribute, _))) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case IsNotNull(a: Attribute) =>
Some(sources.IsNotNull(a.name))
case IsNull(a: Attribute) =>
Some(sources.IsNull(a.name))
case Not(In(a: Attribute, list)) if !list.exists(!_.isInstanceOf[Literal]) =>
val hSet = list.map(e => e.eval(EmptyRow))
Some(sources.Not(sources.In(a.name, hSet.toArray)))
case In(a: Attribute, list) if !list.exists(!_.isInstanceOf[Literal]) =>
val hSet = list.map(e => e.eval(EmptyRow))
Some(sources.In(a.name, hSet.toArray))
case c@Not(In(Cast(a: Attribute, _), list)) if !list.exists(!_.isInstanceOf[Literal]) =>
Some(CastExpr(c))
case c@In(Cast(a: Attribute, _), list) if !list.exists(!_.isInstanceOf[Literal]) =>
Some(CastExpr(c))
case InSet(a: Attribute, set) =>
Some(sources.In(a.name, set.toArray))
case Not(InSet(a: Attribute, set)) =>
Some(sources.Not(sources.In(a.name, set.toArray)))
case GreaterThan(a: Attribute, Literal(v, t)) =>
Some(sources.GreaterThan(a.name, v))
case GreaterThan(Literal(v, t), a: Attribute) =>
Some(sources.LessThan(a.name, v))
case c@GreaterThan(Cast(a: Attribute, _), Literal(v, t)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@GreaterThan(Literal(v, t), Cast(a: Attribute, _)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case LessThan(a: Attribute, Literal(v, t)) =>
Some(sources.LessThan(a.name, v))
case LessThan(Literal(v, t), a: Attribute) =>
Some(sources.GreaterThan(a.name, v))
case c@LessThan(Cast(a: Attribute, _), Literal(v, t)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@LessThan(Literal(v, t), Cast(a: Attribute, _)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case GreaterThanOrEqual(a: Attribute, Literal(v, t)) =>
Some(sources.GreaterThanOrEqual(a.name, v))
case GreaterThanOrEqual(Literal(v, t), a: Attribute) =>
Some(sources.LessThanOrEqual(a.name, v))
case c@GreaterThanOrEqual(Cast(a: Attribute, _), Literal(v, t)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@GreaterThanOrEqual(Literal(v, t), Cast(a: Attribute, _)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case LessThanOrEqual(a: Attribute, Literal(v, t)) =>
Some(sources.LessThanOrEqual(a.name, v))
case LessThanOrEqual(Literal(v, t), a: Attribute) =>
Some(sources.GreaterThanOrEqual(a.name, v))
case c@LessThanOrEqual(Cast(a: Attribute, _), Literal(v, t)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case c@LessThanOrEqual(Literal(v, t), Cast(a: Attribute, _)) =>
CastExpressionOptimization.checkIfCastCanBeRemove(c)
case StartsWith(a: Attribute, Literal(v, t)) =>
Some(sources.StringStartsWith(a.name, v.toString))
case c@EndsWith(a: Attribute, Literal(v, t)) =>
Some(CarbonEndsWith(c))
case c@Contains(a: Attribute, Literal(v, t)) =>
Some(CarbonContainsWith(c))
case c@Cast(a: Attribute, _) =>
Some(CastExpr(c))
case others =>
if (!or) {
others.collect {
case attr: AttributeReference =>
attrList.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
None
}
}
filters.flatMap(translate(_, false)).toArray
}
def transformExpression(expr: Expression): CarbonExpression = {
expr match {
case Or(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) => new
OrExpression(transformExpression(left), transformExpression(right))
case And(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) => new
AndExpression(transformExpression(left), transformExpression(right))
case EqualTo(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) => new
EqualToExpression(transformExpression(left), transformExpression(right))
case Not(EqualTo(left, right))
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) => new
NotEqualsExpression(transformExpression(left), transformExpression(right))
case IsNotNull(child)
if (isCarbonSupportedDataTypes(child)) => new
NotEqualsExpression(transformExpression(child), transformExpression(Literal(null)), true)
case IsNull(child)
if (isCarbonSupportedDataTypes(child)) => new
EqualToExpression(transformExpression(child), transformExpression(Literal(null)), true)
case Not(In(left, right)) if (isCarbonSupportedDataTypes(left)) =>
if (right.contains(null)) {
new FalseExpression(transformExpression(left))
}
else {
new NotInExpression(transformExpression(left),
new ListExpression(convertToJavaList(right.map(transformExpression)))
)
}
case In(left, right) if (isCarbonSupportedDataTypes(left)) =>
new InExpression(transformExpression(left),
new ListExpression(convertToJavaList(right.filter(_ != null).filter(!isNullLiteral(_))
.map(transformExpression))))
case InSet(left, right) if (isCarbonSupportedDataTypes(left)) =>
val validData = right.filter(_ != null).map { x =>
val e = Literal(x.toString)
transformExpression(e)
}.toList
new InExpression(transformExpression(left),
new ListExpression(convertToJavaList(validData)))
case Not(InSet(left, right)) if (isCarbonSupportedDataTypes(left)) =>
if (right.contains(null)) {
new FalseExpression(transformExpression(left))
}
else {
val r = right.map { x =>
val strVal = if (null == x) {
x
} else {
x.toString
}
val e = Literal(strVal)
transformExpression(e)
}.toList
new NotInExpression(transformExpression(left), new ListExpression(convertToJavaList(r)))
}
case GreaterThan(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) =>
new GreaterThanExpression(transformExpression(left), transformExpression(right))
case GreaterThanOrEqual(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) =>
new GreaterThanEqualToExpression(transformExpression(left), transformExpression(right))
case LessThan(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) =>
new LessThanExpression(transformExpression(left), transformExpression(right))
case LessThanOrEqual(left, right)
if (isCarbonSupportedDataTypes(left) && isCarbonSupportedDataTypes(right)) =>
new LessThanEqualToExpression(transformExpression(left), transformExpression(right))
case AttributeReference(name, dataType, _, _) =>
new CarbonColumnExpression(name.toString,
CarbonScalaUtil.convertSparkToCarbonDataType(dataType))
case Literal(name, dataType) => new
CarbonLiteralExpression(name, CarbonScalaUtil.convertSparkToCarbonDataType(dataType))
case StartsWith(left, right@Literal(pattern, dataType)) if pattern.toString.size > 0 &&
isCarbonSupportedDataTypes(left) &&
isCarbonSupportedDataTypes
(right) =>
val l = new GreaterThanEqualToExpression(transformExpression(left),
transformExpression(right))
val maxValueLimit = pattern.toString.substring(0, pattern.toString.length - 1) +
(pattern.toString.charAt(pattern.toString.length - 1).toInt + 1)
.toChar
val r = new LessThanExpression(
transformExpression(left),
new CarbonLiteralExpression(maxValueLimit,
CarbonScalaUtil.convertSparkToCarbonDataType(dataType)))
new AndExpression(l, r)
case StringTrim(child) => transformExpression(child)
case _ =>
new SparkUnknownExpression(expr.transform {
case AttributeReference(name, dataType, _, _) =>
CarbonBoundReference(new CarbonColumnExpression(name.toString,
CarbonScalaUtil.convertSparkToCarbonDataType(dataType)), dataType, expr.nullable)
}
)
}
}
private def isNullLiteral(exp: Expression): Boolean = {
if (null != exp
&& exp.isInstanceOf[Literal]
&& (exp.asInstanceOf[Literal].dataType == org.apache.spark.sql.types.DataTypes.NullType)
|| (exp.asInstanceOf[Literal].value == null)) {
true
} else {
false
}
}
def isCarbonSupportedDataTypes(expr: Expression): Boolean = {
expr.dataType match {
case StringType => true
case IntegerType => true
case LongType => true
case DoubleType => true
case FloatType => true
case BooleanType => true
case TimestampType => true
case ArrayType(_, _) => true
case StructType(_) => true
case DecimalType() => true
case _ => false
}
}
private def getActualCarbonDataType(column: String, carbonTable: CarbonTable) = {
var carbonColumn: CarbonColumn =
carbonTable.getDimensionByName(carbonTable.getFactTableName, column)
val dataType = if (carbonColumn != null) {
carbonColumn.getDataType
} else {
carbonColumn = carbonTable.getMeasureByName(carbonTable.getFactTableName, column)
carbonColumn.getDataType match {
case DataType.INT => DataType.INT
case DataType.SHORT => DataType.SHORT
case DataType.LONG => DataType.LONG
case DataType.DECIMAL => DataType.DECIMAL
case _ => DataType.DOUBLE
}
}
CarbonScalaUtil.convertCarbonToSparkDataType(dataType)
}
// Convert scala list to java list, Cannot use scalaList.asJava as while deserializing it is
// not able find the classes inside scala list and gives ClassNotFoundException.
private def convertToJavaList(
scalaList: Seq[CarbonExpression]): java.util.List[CarbonExpression] = {
val javaList = new java.util.ArrayList[CarbonExpression]()
scalaList.foreach(javaList.add)
javaList
}
def preProcessExpressions(expressions: Seq[Expression]): Seq[Expression] = {
expressions match {
case left :: right :: rest => preProcessExpressions(List(And(left, right)) ::: rest)
case List(left, right) => List(And(left, right))
case _ => expressions
}
}
}
| aniketadnaik/carbondataStreamIngest | integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala | Scala | apache-2.0 | 20,490 |
/****************************************************************************
* Copyright (C) 2015 Łukasz Szpakowski. *
* *
* This software is licensed under the GNU General Public License *
* v3 or later. See the LICENSE file for the full licensing terms. *
****************************************************************************/
package pl.luckboy.issuenotifier
import java.text.SimpleDateFormat
import java.util.Date
import java.util.Calendar
import java.util.GregorianCalendar
object TextUtils
{
private val dateSimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
private val timeSimpleDateFormat = new SimpleDateFormat("HH:mm:ss")
private val longSimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
def textFromRepository(repos: Repository) = repos.userName + "/" + repos.name
def textFromDate(date: Date, currentDate: Date) = {
val calendar = new GregorianCalendar()
calendar.setTime(date)
val currentCalendar = new GregorianCalendar()
currentCalendar.setTime(currentDate)
if(calendar.get(Calendar.YEAR) == currentCalendar.get(Calendar.YEAR) &&
calendar.get(Calendar.DAY_OF_YEAR) == currentCalendar.get(Calendar.DAY_OF_YEAR))
timeSimpleDateFormat.format(date)
else
dateSimpleDateFormat.format(date)
}
def textFromDateForLongFormat(date: Date) = longSimpleDateFormat.format(date)
}
| luckboy/IssueNotifier | src/main/scala/pl/luckboy/issuenotifier/TextUtils.scala | Scala | gpl-3.0 | 1,513 |
package com.datastax.spark.connector
import com.datastax.spark.connector.cql._
import org.apache.spark.SparkContext
import org.apache.spark.sql.DataFrame
/** Provides Cassandra-specific methods on [[org.apache.spark.sql.DataFrame]] */
class DataFrameFunctions(dataFrame: DataFrame) extends Serializable {
val sparkContext: SparkContext = dataFrame.sqlContext.sparkContext
/**
* Creates a C* table based on the DataFrame Struct provided. Optionally
* takes in a list of partition columns or clustering columns names. When absent
* the first column will be used as the partition key and there will be no clustering
* keys.
*/
def createCassandraTable(
keyspaceName: String,
tableName: String,
partitionKeyColumns: Option[Seq[String]] = None,
clusteringKeyColumns: Option[Seq[String]] = None)(
implicit
connector: CassandraConnector = CassandraConnector(sparkContext)): Unit = {
val protocolVersion = connector.
withClusterDo(_.getConfiguration.getProtocolOptions.getProtocolVersion)
val rawTable = TableDef.fromDataFrame(dataFrame, keyspaceName, tableName, protocolVersion)
val columnMapping = rawTable.columnByName
val columnNames = columnMapping.keys.toSet
val partitionKeyNames = partitionKeyColumns.getOrElse(rawTable.partitionKey.map(_.columnName))
val clusteringKeyNames = clusteringKeyColumns.getOrElse(Seq.empty)
val regularColumnNames = (columnNames -- (partitionKeyNames ++ clusteringKeyNames)).toSeq
def missingColumnException(columnName: String, columnType: String) = {
new IllegalArgumentException(
s""""$columnName" not Found. Unable to make specified column $columnName a $columnType.
|Available Columns: $columnNames""".stripMargin)
}
val table = rawTable.copy (
partitionKey = partitionKeyNames
.map(partitionKeyName =>
columnMapping.getOrElse(partitionKeyName,
throw missingColumnException(partitionKeyName, "Partition Key Column")))
.map(_.copy(columnRole = PartitionKeyColumn))
,
clusteringColumns = clusteringKeyNames
.map(clusteringKeyName =>
columnMapping.getOrElse(clusteringKeyName,
throw missingColumnException(clusteringKeyName, "Clustering Column")))
.zipWithIndex
.map { case (col, index) => col.copy(columnRole = ClusteringColumn(index))}
,
regularColumns = regularColumnNames
.map(regularColumnName =>
columnMapping.getOrElse(regularColumnName,
throw missingColumnException(regularColumnName, "Regular Column")))
.map(_.copy(columnRole = RegularColumn))
)
connector.withSessionDo(session => session.execute(table.cql))
}
}
| shashwat7/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/DataFrameFunctions.scala | Scala | apache-2.0 | 2,779 |
package at.logic.gapt.utils
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
object glob {
private val dirAndRest = """([^*?\\[]*/)(.*)""".r
def paths( pattern: String ): Seq[Path] = {
val ( dir, rest ) = pattern match {
case dirAndRest( d, r ) => ( d, r )
case _ => ( ".", pattern )
}
val pathMatcher = FileSystems.getDefault.getPathMatcher( s"glob:$rest" )
val res = Seq.newBuilder[Path]
Files.walkFileTree( Paths get dir, new SimpleFileVisitor[Path] {
override def visitFile( file: Path, attrs: BasicFileAttributes ): FileVisitResult = {
if ( pathMatcher matches file )
res += file
FileVisitResult.CONTINUE
}
} )
res.result()
}
def apply( pattern: String ): Seq[String] = paths( pattern ).map( _.normalize.toString )
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/utils/glob.scala | Scala | gpl-3.0 | 854 |
package com.bostontechnologies.amqp
import ChannelModel.{QueueBinding, Queue, Exchange}
trait BindingDSL {
def from(exchange: Exchange): To = {
new To {
def to(q: Queue): this.type#UsingKey = new this.type#UsingKey {
def usingKey(key: String): QueueBinding = QueueBinding(q.name, exchange.name, key)
}
}
}
trait To {
def to(q: Queue): UsingKey
trait UsingKey {
def usingKey(key: String): QueueBinding
}
}
}
object BindingDSL extends BindingDSL | Forexware/scala-amqp | src/main/scala/com/bostontechnologies/amqp/BindingDSL.scala | Scala | apache-2.0 | 507 |
package org.jetbrains.plugins.scala
package lang.resolve.processor
import com.intellij.psi._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScTypeAliasDeclaration, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility.Expression
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.types.recursiveUpdate.ScSubstitutor
import org.jetbrains.plugins.scala.lang.resolve.{ResolveTargets, ScalaResolveResult}
import scala.collection.Set
/**
* User: Alexander Podkhalyuzin
* Date: 30.04.2010
*/
class ConstructorResolveProcessor(constr: PsiElement, refName: String, args: List[Seq[Expression]],
typeArgs: Seq[ScTypeElement], kinds: Set[ResolveTargets.Value],
shapeResolve: Boolean, allConstructors: Boolean)
extends MethodResolveProcessor(constr, refName, args, typeArgs, Seq.empty, kinds,
isShapeResolve = shapeResolve, enableTupling = true) {
override def execute(element: PsiElement, state: ResolveState): Boolean = {
val named = element.asInstanceOf[PsiNamedElement]
val fromType = getFromType(state)
val initialSubstitutor = getSubst(state)
val defaultSubstitutor = fromType map {
initialSubstitutor.followUpdateThisType
} getOrElse initialSubstitutor
if (nameAndKindMatch(named, state)) {
val accessible = isAccessible(named, ref)
if (accessibility && !accessible) return true
def constructorIsAccessible(constructor: PsiMethod) =
isAccessible(constructor, ref)
def constructors(clazz: PsiClass, substitutor: ScSubstitutor) = clazz.constructors filter {
case constructor if accessibility =>
constructorIsAccessible(constructor)
case _ => true
} map {
(_, substitutor, Some(named))
}
def orDefault(tuples: Seq[(PsiNamedElement, ScSubstitutor, Option[PsiNamedElement])] = Seq.empty) =
tuples match {
case Seq() => Seq((named, defaultSubstitutor, None))
case seq => seq
}
val tuples: Seq[(PsiNamedElement, ScSubstitutor, Option[PsiNamedElement])] = named match {
case clazz: PsiClass =>
orDefault(constructors(clazz, defaultSubstitutor))
case _: ScTypeAliasDeclaration =>
orDefault()
case definition: ScTypeAliasDefinition =>
val result = definition.aliasedType.toOption.toSeq flatMap {
_.extractClassType
} flatMap {
case (clazz, substitutor) =>
constructors(clazz, defaultSubstitutor.followed(substitutor))
}
orDefault(result)
case _ => Seq()
}
addResults(tuples map {
case (namedElement, substitutor, parentElement) =>
val elementIsAccessible = namedElement match {
case _: ScTypeAliasDefinition => true
case constructor: PsiMethod if accessible =>
constructorIsAccessible(constructor)
case _ => accessible
}
new ScalaResolveResult(namedElement,
substitutor,
getImports(state),
Option(state.get(ResolverEnv.nameKey)),
parentElement = parentElement,
boundClass = getBoundClass(state),
fromType = fromType,
isAccessible = elementIsAccessible)
})
}
true
}
override def candidatesS: Set[ScalaResolveResult] = {
def updateResult(result: ScalaResolveResult) =
new ScalaResolveResult(result.getActualElement,
result.substitutor,
result.importsUsed,
boundClass = result.boundClass,
fromType = result.fromType,
isAccessible = result.isAccessible)
val candidates = super.candidatesS
candidates.toSeq match {
case _ if allConstructors => candidates
case Seq() => Set.empty
case Seq(result) => Set(result)
case _ => candidates.map(updateResult)
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/resolve/processor/ConstructorResolveProcessor.scala | Scala | apache-2.0 | 4,124 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.calcite.{FlinkRelBuilder, FlinkRelFactories}
import org.apache.flink.table.planner.plan.utils.{AggregateUtil, ExpandUtil}
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.logical.LogicalAggregate
import org.apache.calcite.rex.{RexBuilder, RexNode}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.ImmutableBitSet
import scala.collection.JavaConversions._
/**
* This rule rewrites an aggregation query with grouping sets into
* an regular aggregation query with expand.
*
* This rule duplicates the input data by two or more times (# number of groupSets +
* an optional non-distinct group). This will put quite a bit of memory pressure of the used
* aggregate and exchange operators.
*
* This rule will be used for the plan with grouping sets or the plan with distinct aggregations
* after [[FlinkAggregateExpandDistinctAggregatesRule]] applied.
*
* `FlinkAggregateExpandDistinctAggregatesRule` rewrites an aggregate query with
* distinct aggregations into an expanded double aggregation. The first aggregate has
* grouping sets in which the regular aggregation expressions and every distinct clause
* are aggregated in a separate group. The results are then combined in a second aggregate.
*
* Examples:
*
* MyTable: a: INT, b: BIGINT, c: VARCHAR(32), d: VARCHAR(32)
*
* Original records:
* +-----+-----+-----+-----+
* | a | b | c | d |
* +-----+-----+-----+-----+
* | 1 | 1 | c1 | d1 |
* +-----+-----+-----+-----+
* | 1 | 2 | c1 | d2 |
* +-----+-----+-----+-----+
* | 2 | 1 | c1 | d1 |
* +-----+-----+-----+-----+
*
* Example1 (expand for DISTINCT aggregates):
*
* SQL:
* SELECT a, SUM(DISTINCT b) as t1, COUNT(DISTINCT c) as t2, COUNT(d) as t3 FROM MyTable GROUP BY a
*
* Logical plan:
* {{{
* LogicalAggregate(group=[{0}], t1=[SUM(DISTINCT $1)], t2=[COUNT(DISTINCT $2)], t3=[COUNT($3)])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after `FlinkAggregateExpandDistinctAggregatesRule` applied:
* {{{
* LogicalProject(a=[$0], t1=[$1], t2=[$2], t3=[CAST($3):BIGINT NOT NULL])
* LogicalProject(a=[$0], t1=[$1], t2=[$2], $f3=[CASE(IS NOT NULL($3), $3, 0)])
* LogicalAggregate(group=[{0}], t1=[SUM($1) FILTER $4], t2=[COUNT($2) FILTER $5],
* t3=[MIN($3) FILTER $6])
* LogicalProject(a=[$0], b=[$1], c=[$2], t3=[$3], $g_1=[=($4, 1)], $g_2=[=($4, 2)],
* $g_3=[=($4, 3)])
* LogicalAggregate(group=[{0, 1, 2}], groups=[[{0, 1}, {0, 2}, {0}]], t3=[COUNT($3)],
* $g=[GROUPING($0, $1, $2)])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after this rule applied:
* {{{
* LogicalCalc(expr#0..3=[{inputs}], expr#4=[IS NOT NULL($t3)], ...)
* LogicalAggregate(group=[{0}], t1=[SUM($1) FILTER $4], t2=[COUNT($2) FILTER $5],
* t3=[MIN($3) FILTER $6])
* LogicalCalc(expr#0..4=[{inputs}], ... expr#10=[CASE($t6, $t5, $t8, $t7, $t9)],
* expr#11=[1], expr#12=[=($t10, $t11)], ... $g_1=[$t12], ...)
* LogicalAggregate(group=[{0, 1, 2, 4}], groups=[[]], t3=[COUNT($3)])
* LogicalExpand(projects=[{a=[$0], b=[$1], c=[null], d=[$3], $e=[1]},
* {a=[$0], b=[null], c=[$2], d=[$3], $e=[2]}, {a=[$0], b=[null], c=[null], d=[$3], $e=[3]}])
* LogicalTableSourceScan(table=[[builtin, default, MyTable]], fields=[a, b, c, d])
* }}}
*
* '$e = 1' is equivalent to 'group by a, b'
* '$e = 2' is equivalent to 'group by a, c'
* '$e = 3' is equivalent to 'group by a'
*
* Expanded records:
* +-----+-----+-----+-----+-----+
* | a | b | c | d | $e |
* +-----+-----+-----+-----+-----+ ---+---
* | 1 | 1 | null| d1 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 1 | null| c1 | d1 | 2 | records expanded by record1
* +-----+-----+-----+-----+-----+ |
* | 1 | null| null| d1 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
* | 1 | 2 | null| d2 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 1 | null| c1 | d2 | 2 | records expanded by record2
* +-----+-----+-----+-----+-----+ |
* | 1 | null| null| d2 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
* | 2 | 1 | null| d1 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 2 | null| c1 | d1 | 2 | records expanded by record3
* +-----+-----+-----+-----+-----+ |
* | 2 | null| null| d1 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
*
* Example2 (Some fields are both in DISTINCT aggregates and non-DISTINCT aggregates):
*
* SQL:
* SELECT MAX(a) as t1, COUNT(DISTINCT a) as t2, count(DISTINCT d) as t3 FROM MyTable
*
* Field `a` is both in DISTINCT aggregate and `MAX` aggregate,
* so, `a` should be outputted as two individual fields, one is for `MAX` aggregate,
* another is for DISTINCT aggregate.
*
* Expanded records:
* +-----+-----+-----+-----+
* | a | d | $e | a_0 |
* +-----+-----+-----+-----+ ---+---
* | 1 | null| 1 | 1 | |
* +-----+-----+-----+-----+ |
* | null| d1 | 2 | 1 | records expanded by record1
* +-----+-----+-----+-----+ |
* | null| null| 3 | 1 | |
* +-----+-----+-----+-----+ ---+---
* | 1 | null| 1 | 1 | |
* +-----+-----+-----+-----+ |
* | null| d2 | 2 | 1 | records expanded by record2
* +-----+-----+-----+-----+ |
* | null| null| 3 | 1 | |
* +-----+-----+-----+-----+ ---+---
* | 2 | null| 1 | 2 | |
* +-----+-----+-----+-----+ |
* | null| d1 | 2 | 2 | records expanded by record3
* +-----+-----+-----+-----+ |
* | null| null| 3 | 2 | |
* +-----+-----+-----+-----+ ---+---
*
* Example3 (expand for CUBE/ROLLUP/GROUPING SETS):
*
* SQL:
* SELECT a, c, SUM(b) as b FROM MyTable GROUP BY GROUPING SETS (a, c)
*
* Logical plan:
* {{{
* LogicalAggregate(group=[{0, 1}], groups=[[{0}, {1}]], b=[SUM($2)])
* LogicalProject(a=[$0], c=[$2], b=[$1])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after this rule applied:
* {{{
* LogicalCalc(expr#0..3=[{inputs}], proj#0..1=[{exprs}], b=[$t3])
* LogicalAggregate(group=[{0, 2, 3}], groups=[[]], b=[SUM($1)])
* LogicalExpand(projects=[{a=[$0], b=[$1], c=[null], $e=[1]},
* {a=[null], b=[$1], c=[$2], $e=[2]}])
* LogicalNativeTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* '$e = 1' is equivalent to 'group by a'
* '$e = 2' is equivalent to 'group by c'
*
* Expanded records:
* +-----+-----+-----+-----+
* | a | b | c | $e |
* +-----+-----+-----+-----+ ---+---
* | 1 | 1 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record1
* | null| 1 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
* | 1 | 2 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record2
* | null| 2 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
* | 2 | 1 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record3
* | null| 1 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
*/
class DecomposeGroupingSetsRule extends RelOptRule(
operand(classOf[LogicalAggregate], any),
FlinkRelFactories.FLINK_REL_BUILDER,
"DecomposeGroupingSetsRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val agg: LogicalAggregate = call.rel(0)
val groupIdExprs = AggregateUtil.getGroupIdExprIndexes(agg.getAggCallList)
agg.getGroupSets.size() > 1 || groupIdExprs.nonEmpty
}
override def onMatch(call: RelOptRuleCall): Unit = {
val agg: LogicalAggregate = call.rel(0)
// Long data type is used to store groupValue in FlinkAggregateExpandDistinctAggregatesRule,
// and the result of grouping function is a positive value,
// so the max groupCount must be less than 64.
if (agg.getGroupCount >= 64) {
throw new TableException("group count must be less than 64.")
}
val aggInput = agg.getInput
val groupIdExprs = AggregateUtil.getGroupIdExprIndexes(agg.getAggCallList)
val aggCallsWithIndexes = agg.getAggCallList.zipWithIndex
val cluster = agg.getCluster
val rexBuilder = cluster.getRexBuilder
val needExpand = agg.getGroupSets.size() > 1
val relBuilder = call.builder().asInstanceOf[FlinkRelBuilder]
relBuilder.push(aggInput)
val (newGroupSet, duplicateFieldMap) = if (needExpand) {
val (duplicateFieldMap, expandIdIdxInExpand) = ExpandUtil.buildExpandNode(
cluster, relBuilder, agg.getAggCallList, agg.getGroupSet, agg.getGroupSets)
// new groupSet contains original groupSet and expand_id('$e') field
val newGroupSet = agg.getGroupSet.union(ImmutableBitSet.of(expandIdIdxInExpand))
(newGroupSet, duplicateFieldMap)
} else {
// no need add expand node, only need care about group functions
(agg.getGroupSet, Map.empty[Integer, Integer])
}
val newGroupCount = newGroupSet.cardinality()
val newAggCalls = aggCallsWithIndexes.collect {
case (aggCall, idx) if !groupIdExprs.contains(idx) =>
val newArgList = aggCall.getArgList.map(a => duplicateFieldMap.getOrElse(a, a)).toList
aggCall.adaptTo(
relBuilder.peek(), newArgList, aggCall.filterArg, agg.getGroupCount, newGroupCount)
}
// create simple aggregate
relBuilder.aggregate(
relBuilder.groupKey(newGroupSet, ImmutableList.of[ImmutableBitSet](newGroupSet)),
newAggCalls)
val newAgg = relBuilder.peek()
// create a project to mapping original aggregate's output
// get names of original grouping fields
val groupingFieldsName = Seq.range(0, agg.getGroupCount)
.map(x => agg.getRowType.getFieldNames.get(x))
// create field access for all original grouping fields
val groupingFields = agg.getGroupSet.toList.zipWithIndex.map {
case (_, idx) => rexBuilder.makeInputRef(newAgg, idx)
}.toArray[RexNode]
val groupSetsWithIndexes = agg.getGroupSets.zipWithIndex
// output aggregate calls including `normal` agg call and grouping agg call
var aggCnt = 0
val aggFields = aggCallsWithIndexes.map {
case (aggCall, idx) if groupIdExprs.contains(idx) =>
if (needExpand) {
// reference to expand_id('$e') field in new aggregate
val expandIdIdxInNewAgg = newGroupCount - 1
val expandIdField = rexBuilder.makeInputRef(newAgg, expandIdIdxInNewAgg)
// create case when for group expression
val whenThenElse = groupSetsWithIndexes.flatMap {
case (subGroupSet, i) =>
val groupExpr = lowerGroupExpr(rexBuilder, aggCall, groupSetsWithIndexes, i)
if (i < agg.getGroupSets.size() - 1) {
// WHEN/THEN
val expandIdVal = ExpandUtil.genExpandId(agg.getGroupSet, subGroupSet)
val expandIdType = newAgg.getRowType.getFieldList.get(expandIdIdxInNewAgg).getType
val expandIdLit = rexBuilder.makeLiteral(expandIdVal, expandIdType, false)
Seq(
// when $e = $e_value
rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, expandIdField, expandIdLit),
// then return group expression literal value
groupExpr
)
} else {
// ELSE
Seq(
// else return group expression literal value
groupExpr
)
}
}
rexBuilder.makeCall(SqlStdOperatorTable.CASE, whenThenElse)
} else {
// create literal for group expression
lowerGroupExpr(rexBuilder, aggCall, groupSetsWithIndexes, 0)
}
case _ =>
// create access to aggregation result
val aggResult = rexBuilder.makeInputRef(newAgg, newGroupCount + aggCnt)
aggCnt += 1
aggResult
}
// add a projection to establish the result schema and set the values of the group expressions.
relBuilder.project(
groupingFields.toSeq ++ aggFields,
groupingFieldsName ++ agg.getAggCallList.map(_.name))
relBuilder.convert(agg.getRowType, true)
call.transformTo(relBuilder.build())
}
/** Returns a literal for a given group expression. */
private def lowerGroupExpr(
builder: RexBuilder,
call: AggregateCall,
groupSetsWithIndexes: Seq[(ImmutableBitSet, Int)],
indexInGroupSets: Int): RexNode = {
val groupSet = groupSetsWithIndexes(indexInGroupSets)._1
val groups = groupSet.asSet()
call.getAggregation.getKind match {
case SqlKind.GROUP_ID =>
// https://issues.apache.org/jira/browse/CALCITE-1824
// GROUP_ID is not in the SQL standard. It is implemented only by Oracle.
// GROUP_ID is useful only if you have duplicate grouping sets,
// If grouping sets are distinct, GROUP_ID() will always return zero;
// Else return the index in the duplicate grouping sets.
// e.g. SELECT deptno, GROUP_ID() AS g FROM Emp GROUP BY GROUPING SETS (deptno, (), ())
// As you can see, the grouping set () occurs twice.
// So there is one row in the result for each occurrence:
// the first occurrence has g = 0; the second has g = 1.
val duplicateGroupSetsIndices = groupSetsWithIndexes.filter {
case (gs, _) => gs.compareTo(groupSet) == 0
}.map(_._2).toArray[Int]
require(duplicateGroupSetsIndices.nonEmpty)
val id: Long = duplicateGroupSetsIndices.indexOf(indexInGroupSets)
builder.makeLiteral(id, call.getType, false)
case SqlKind.GROUPING | SqlKind.GROUPING_ID =>
// GROUPING function is defined in the SQL standard,
// but the definition of GROUPING is different from in Oracle and in SQL standard:
// https://docs.oracle.com/cd/B28359_01/server.111/b28286/functions064.htm#SQLRF00647
//
// GROUPING_ID function is not defined in the SQL standard, and has the same
// functionality with GROUPING function in Calcite.
// our implementation is consistent with Oracle about GROUPING_ID function.
//
// NOTES:
// In Calcite, the java-document of SqlGroupingFunction is not consistent with agg.iq.
val res: Long = call.getArgList.foldLeft(0L)((res, arg) =>
(res << 1L) + (if (groups.contains(arg)) 0L else 1L)
)
builder.makeLiteral(res, call.getType, false)
case _ => builder.constantNull()
}
}
}
object DecomposeGroupingSetsRule {
val INSTANCE: RelOptRule = new DecomposeGroupingSetsRule
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/DecomposeGroupingSetsRule.scala | Scala | apache-2.0 | 16,159 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box
trait NotInPdf
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/box/NotInPdf.scala | Scala | apache-2.0 | 649 |
package util.implicits
import org.joda.time.{DateTime, LocalDate}
import scala.language.implicitConversions
object ConversionImplicits {
implicit def dateTimeToLocalDate(dateTime: DateTime): LocalDate = dateTime.toLocalDate
}
| PriscH/Foosball | app/util/implicits/ConversionImplicits.scala | Scala | gpl-3.0 | 232 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.util
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable.Map
import org.apache.spark.sql.execution.command.{ColumnProperty, Field}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.lcm.status.SegmentStatusManager
import org.apache.carbondata.processing.model.CarbonLoadModel
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
object CommonUtil {
def validateColumnGroup(colGroup: String, noDictionaryDims: Seq[String],
msrs: Seq[Field], retrievedColGrps: Seq[String], dims: Seq[Field]) {
val colGrpCols = colGroup.split(',').map(_.trim)
colGrpCols.foreach { x =>
// if column is no dictionary
if (noDictionaryDims.contains(x)) {
throw new MalformedCarbonCommandException(
"Column group is not supported for no dictionary columns:" + x)
} else if (msrs.exists(msr => msr.column.equals(x))) {
// if column is measure
throw new MalformedCarbonCommandException("Column group is not supported for measures:" + x)
} else if (foundIndExistingColGrp(x)) {
throw new MalformedCarbonCommandException("Column is available in other column group:" + x)
} else if (isComplex(x, dims)) {
throw new MalformedCarbonCommandException(
"Column group doesn't support Complex column:" + x)
} else if (isTimeStampColumn(x, dims)) {
throw new MalformedCarbonCommandException(
"Column group doesn't support Timestamp datatype:" + x)
}// if invalid column is
else if (!dims.exists(dim => dim.column.equalsIgnoreCase(x))) {
// present
throw new MalformedCarbonCommandException(
"column in column group is not a valid column: " + x
)
}
}
// check if given column is present in other groups
def foundIndExistingColGrp(colName: String): Boolean = {
retrievedColGrps.foreach { colGrp =>
if (colGrp.split(",").contains(colName)) {
return true
}
}
false
}
}
def isTimeStampColumn(colName: String, dims: Seq[Field]): Boolean = {
dims.foreach { dim =>
if (dim.column.equalsIgnoreCase(colName)) {
if (dim.dataType.isDefined && null != dim.dataType.get &&
"timestamp".equalsIgnoreCase(dim.dataType.get)) {
return true
}
}
}
false
}
def isComplex(colName: String, dims: Seq[Field]): Boolean = {
dims.foreach { x =>
if (x.children.isDefined && null != x.children.get && x.children.get.nonEmpty) {
val children = x.children.get
if (x.column.equals(colName)) {
return true
} else {
children.foreach { child =>
val fieldName = x.column + "." + child.column
if (fieldName.equalsIgnoreCase(colName)) {
return true
}
}
}
}
}
false
}
def getColumnProperties(column: String,
tableProperties: Map[String, String]): Option[util.List[ColumnProperty]] = {
val fieldProps = new util.ArrayList[ColumnProperty]()
val columnPropertiesStartKey = CarbonCommonConstants.COLUMN_PROPERTIES + "." + column + "."
tableProperties.foreach {
case (key, value) =>
if (key.startsWith(columnPropertiesStartKey)) {
fieldProps.add(ColumnProperty(key.substring(columnPropertiesStartKey.length(),
key.length()), value))
}
}
if (fieldProps.isEmpty) {
None
} else {
Some(fieldProps)
}
}
def validateTblProperties(tableProperties: Map[String, String], fields: Seq[Field]): Boolean = {
val itr = tableProperties.keys
var isValid: Boolean = true
tableProperties.foreach {
case (key, value) =>
if (!validateFields(key, fields)) {
isValid = false
throw new MalformedCarbonCommandException(s"Invalid table properties ${ key }")
}
}
isValid
}
def validateFields(key: String, fields: Seq[Field]): Boolean = {
var isValid: Boolean = false
fields.foreach { field =>
if (field.children.isDefined && field.children.get != null) {
field.children.foreach(fields => {
fields.foreach(complexfield => {
val column = if ("val" == complexfield.column) {
field.column
} else {
field.column + "." + complexfield.column
}
if (validateColumnProperty(key, column)) {
isValid = true
}
}
)
}
)
} else {
if (validateColumnProperty(key, field.column)) {
isValid = true
}
}
}
isValid
}
def validateColumnProperty(key: String, column: String): Boolean = {
if (!key.startsWith(CarbonCommonConstants.COLUMN_PROPERTIES)) {
return true
}
val columnPropertyKey = CarbonCommonConstants.COLUMN_PROPERTIES + "." + column + "."
if (key.startsWith(columnPropertyKey)) {
true
} else {
false
}
}
/**
* @param colGrps
* @param dims
* @return columns of column groups in schema order
*/
def arrangeColGrpsInSchemaOrder(colGrps: Seq[String], dims: Seq[Field]): Seq[String] = {
def sortByIndex(colGrp1: String, colGrp2: String) = {
val firstCol1 = colGrp1.split(",")(0)
val firstCol2 = colGrp2.split(",")(0)
val dimIndex1: Int = getDimIndex(firstCol1, dims)
val dimIndex2: Int = getDimIndex(firstCol2, dims)
dimIndex1 < dimIndex2
}
val sortedColGroups: Seq[String] = colGrps.sortWith(sortByIndex)
sortedColGroups
}
/**
* @param colName
* @param dims
* @return return index for given column in dims
*/
def getDimIndex(colName: String, dims: Seq[Field]): Int = {
var index: Int = -1
dims.zipWithIndex.foreach { h =>
if (h._1.column.equalsIgnoreCase(colName)) {
index = h._2.toInt
}
}
index
}
/**
* This method will validate the table block size specified by the user
*
* @param tableProperties
*/
def validateTableBlockSize(tableProperties: Map[String, String]): Unit = {
var tableBlockSize: Integer = 0
if (tableProperties.get(CarbonCommonConstants.TABLE_BLOCKSIZE).isDefined) {
val blockSizeStr: String =
parsePropertyValueStringInMB(tableProperties(CarbonCommonConstants.TABLE_BLOCKSIZE))
try {
tableBlockSize = Integer.parseInt(blockSizeStr)
} catch {
case e: NumberFormatException =>
throw new MalformedCarbonCommandException("Invalid table_blocksize value found: " +
s"$blockSizeStr, only int value from 1 MB to " +
s"2048 MB is supported.")
}
if (tableBlockSize < CarbonCommonConstants.BLOCK_SIZE_MIN_VAL ||
tableBlockSize > CarbonCommonConstants.BLOCK_SIZE_MAX_VAL) {
throw new MalformedCarbonCommandException("Invalid table_blocksize value found: " +
s"$blockSizeStr, only int value from 1 MB to " +
s"2048 MB is supported.")
}
tableProperties.put(CarbonCommonConstants.TABLE_BLOCKSIZE, blockSizeStr)
}
}
/**
* This method will parse the configure string from 'XX MB/M' to 'XX'
*
* @param propertyValueString
*/
def parsePropertyValueStringInMB(propertyValueString: String): String = {
var parsedPropertyValueString: String = propertyValueString
if (propertyValueString.trim.toLowerCase.endsWith("mb")) {
parsedPropertyValueString = propertyValueString.trim.toLowerCase
.substring(0, propertyValueString.trim.toLowerCase.lastIndexOf("mb")).trim
}
if (propertyValueString.trim.toLowerCase.endsWith("m")) {
parsedPropertyValueString = propertyValueString.trim.toLowerCase
.substring(0, propertyValueString.trim.toLowerCase.lastIndexOf("m")).trim
}
parsedPropertyValueString
}
def readLoadMetadataDetails(model: CarbonLoadModel, storePath: String): Unit = {
val metadataPath = model.getCarbonDataLoadSchema.getCarbonTable.getMetaDataFilepath
val details = SegmentStatusManager.readLoadMetadata(metadataPath)
model.setLoadMetadataDetails(details.toList.asJava)
}
}
| ashokblend/incubator-carbondata | integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala | Scala | apache-2.0 | 9,222 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit.submitsteps
import io.fabric8.kubernetes.api.model.ContainerBuilder
import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.KubernetesFileUtils
private[spark] class PythonStep(
primaryPyFile: String,
otherPyFiles: Seq[String],
filesDownloadPath: String) extends DriverConfigurationStep {
override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val resolvedOtherPyFilesString = if (otherPyFiles.isEmpty) {
"null"
} else {
KubernetesFileUtils.resolveFilePaths(otherPyFiles, filesDownloadPath).mkString(",")
}
val withPythonPrimaryFileContainer = new ContainerBuilder(driverSpec.driverContainer)
.addNewEnv()
.withName(ENV_PYSPARK_PRIMARY)
.withValue(KubernetesFileUtils.resolveFilePath(primaryPyFile, filesDownloadPath))
.endEnv()
.addNewEnv()
.withName(ENV_PYSPARK_FILES)
.withValue(resolvedOtherPyFilesString)
.endEnv()
driverSpec.copy(driverContainer = withPythonPrimaryFileContainer.build())
}
}
| kimoonkim/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/kubernetes/submit/submitsteps/PythonStep.scala | Scala | apache-2.0 | 1,941 |
package rere.ql.options
import rere.ql.queries.values
trait EmergencyRepairOptions {
sealed trait EmergencyRepairOptions extends ComposableOptions
case object UnsafeRollback extends EmergencyRepairOptions with NonDefaultOption {
def view = "emergency_repair" -> values.expr("unsafe_rollback") :: Nil
}
case object UnsafeRollbackOrErase extends EmergencyRepairOptions with NonDefaultOption {
def view = "emergency_repair" -> values.expr("unsafe_rollback_or_erase") :: Nil
}
}
| pbaun/rere | modules/ql/src/main/scala/rere/ql/options/EmergencyRepairOptions.scala | Scala | apache-2.0 | 499 |
package org.scalajs.openui5.sap.m
import org.scalajs.openui5.sap.ui.core.{Control, VerticalAlign}
import org.scalajs.openui5.util.{Settings, SettingsMap, noSettings}
import scala.scalajs.js
import scala.scalajs.js.annotation.{JSName, ScalaJSDefined}
@ScalaJSDefined
trait ColumnListItemSettings extends ListItemBaseSettings
object ColumnListItemSettings extends ColumnListItemSettingsBuilder(noSettings)
class ColumnListItemSettingsBuilder(val dict: SettingsMap)
extends Settings[ColumnListItemSettings, ColumnListItemSettingsBuilder](new ColumnListItemSettingsBuilder(_))
with ColumnListItemSetters[ColumnListItemSettings, ColumnListItemSettingsBuilder]
trait ColumnListItemSetters[T <: js.Object, B <: Settings[T,_]]
extends ListItemBaseSetters[T, B] {
def vAlign(v: VerticalAlign) = setting("vAlign", v)
def cells(v: js.Array[_ <: Control]) = setting("cells", v)
}
@JSName("sap.m.ColumnListItem")
@js.native
class ColumnListItem(id: js.UndefOr[String] = js.native,
settings: js.UndefOr[ColumnListItemSettings] = js.native)
extends ListItemBase {
def this(id: String) = this(id, js.undefined)
def this(settings: ColumnListItemSettings) = this(js.undefined, settings)
}
| lastsys/scalajs-openui5 | src/main/scala/org/scalajs/openui5/sap/m/ColumnListItem.scala | Scala | mit | 1,219 |
/*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.drelephant.spark.data
import java.util.Date
import scala.collection.JavaConverters
import com.linkedin.drelephant.spark.fetchers.statusapiv1.{ApplicationAttemptInfoImpl, ApplicationInfoImpl}
import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate
import org.scalatest.{FunSpec, Matchers}
class SparkApplicationDataTest extends FunSpec with Matchers {
import SparkApplicationDataTest._
import JavaConverters._
describe("SparkApplicationData") {
val appId = "application_1"
val attemptId = Some("1")
val applicationAttemptInfo = {
val now = System.currentTimeMillis
val duration = 8000000L
newFakeApplicationAttemptInfo(attemptId, startTime = new Date(now - duration), endTime = new Date(now))
}
val restDerivedData = SparkRestDerivedData(
new ApplicationInfoImpl(appId, "app", Seq(applicationAttemptInfo)),
jobDatas = Seq.empty,
stageDatas = Seq.empty,
executorSummaries = Seq.empty
)
val configurationProperties = Map(
"spark.serializer" -> "org.apache.spark.serializer.KryoSerializer",
"spark.storage.memoryFraction" -> "0.3",
"spark.driver.memory" -> "2G",
"spark.executor.instances" -> "900",
"spark.executor.memory" -> "1g",
"spark.shuffle.memoryFraction" -> "0.5"
)
val logDerivedData = SparkLogDerivedData(
SparkListenerEnvironmentUpdate(Map("Spark Properties" -> configurationProperties.toSeq))
)
describe(".getConf") {
it("returns the Spark properties") {
val data = SparkApplicationData(appId, restDerivedData, Some(logDerivedData))
data.getConf.asScala should contain theSameElementsAs(configurationProperties)
}
}
}
}
object SparkApplicationDataTest {
def newFakeApplicationAttemptInfo(
attemptId: Option[String],
startTime: Date,
endTime: Date
): ApplicationAttemptInfoImpl = new ApplicationAttemptInfoImpl(
attemptId,
startTime,
endTime,
sparkUser = "foo",
completed = true
)
}
| shankar37/dr-elephant | test/com/linkedin/drelephant/spark/data/SparkApplicationDataTest.scala | Scala | apache-2.0 | 2,633 |
/*******************************************************************************
Copyright (c) 2012-2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing
import scala.collection.immutable.{ TreeMap, TreeSet, HashSet, HashMap, Stack => IStack }
import scala.collection.mutable.{ HashMap => MHashMap, HashSet => MHashSet, Stack => MStack }
import kr.ac.kaist.jsaf.analysis.cfg._
import kr.ac.kaist.jsaf.analysis.lib.graph.DGraph
import kr.ac.kaist.jsaf.analysis.lib.WorkTreeSet
import kr.ac.kaist.jsaf.Shell
import kr.ac.kaist.jsaf.scala_src.useful.WorkTrait
/**
* Worklist manager
*/
abstract class Worklist {
////////////////////////////////////////////////////////////////////////////////
// Variables
////////////////////////////////////////////////////////////////////////////////
private var cfg: CFG = null
private var order: OrderMap = null
private var headorder: OrderMap = TreeMap[Node, Int]()
private var backedges: HashMap[Node, HashSet[Node]] = HashMap()
private var quiet: Boolean = false
////////////////////////////////////////////////////////////////////////////////
// Abstract Functions
////////////////////////////////////////////////////////////////////////////////
def head: ControlPoint
def isEmpty: Boolean
def getSize: Int
def getWorkList: WorkTreeSet
def toString: String
protected def insertWork(work: OrderEntry): Unit
protected def removeHead: ControlPoint
////////////////////////////////////////////////////////////////////////////////
// Initialization
////////////////////////////////////////////////////////////////////////////////
// Dense init
def init(cfg: CFG, order: OrderMap, quiet: Boolean): Unit = {
this.cfg = cfg
this.order = order
this.quiet = quiet
}
// Sparse init
def init(cfg: CFG, order: OrderMap, quiet: Boolean, headorder: OrderMap, backedges: HashMap[Node, HashSet[Node]]): Unit = {
init(cfg, order, quiet)
this.headorder = headorder
this.backedges = backedges
}
////////////////////////////////////////////////////////////////////////////////
// Add a Work
////////////////////////////////////////////////////////////////////////////////
def add(cp: ControlPoint, callerCPSetOpt: Option[CPStackSet], increaseRefCount: Boolean): Unit = this.synchronized {
val ov = if(order == null) 0 else order.getOrElse(cp._1, 0) // 0 => case for an empty block
insertWork((ov, cp))
addCallerCPSet(cp, callerCPSetOpt, increaseRefCount)
if(useWorkManager) Shell.workManager.pushWork(workTrait)
}
def add(origin: Node, cp: ControlPoint, callerCPSetOpt: Option[CPStackSet], increaseRefCount: Boolean): Unit = this.synchronized {
backedges.get(cp._1) match {
case Some(backnodes) if backnodes.contains(origin) =>
insertWork((headorder(cp._1), cp))
addCallerCPSet(cp, callerCPSetOpt, increaseRefCount)
if(useWorkManager) Shell.workManager.pushWork(workTrait)
case Some(backnodes) => add(cp, callerCPSetOpt, increaseRefCount)
case _ => add(cp, callerCPSetOpt, increaseRefCount)
}
}
def add(cp_pred: ControlPoint, cp: ControlPoint, cfg: CFG, callerCPSetOpt: Option[CPStackSet], increaseRefCount: Boolean, updateTable: Unit => Unit): Boolean = this.synchronized {
var isWorkAdded = false
callerCPSetOpt match {
case Some(callerCPSet) =>
cp match {
// Call -> Entry
case ((_, LEntry), _) =>
val newCallerCPStackSet = new CPStackSet
for(callerCPStack <- callerCPSet) newCallerCPStackSet.add(callerCPStack.push(cp_pred))
updateTable()
add(cp, Some(newCallerCPStackSet), increaseRefCount)
isWorkAdded = true
// Exit or ExitExc -> Aftercall
case _ =>
var doesExist = false
val newCallerCPStackSet = new CPStackSet
for(callerCPStack <- callerCPSet) {
val topCP = callerCPStack.top
if(cp._1 == cfg.getAftercallFromCallMap.getOrElse(topCP._1, null) ||
cp._1 == cfg.getAftercatchFromCallMap.getOrElse(topCP._1, null)) {
doesExist = true
if(callerCPStack.size > 1) newCallerCPStackSet.add(callerCPStack.pop)
}
}
if(doesExist) {
updateTable()
if(newCallerCPStackSet.size == 0) add(cp, None, increaseRefCount)
else add(cp, Some(newCallerCPStackSet), increaseRefCount)
isWorkAdded = true
}
}
case None =>
updateTable()
cp match {
// Call -> Entry
case ((_, LEntry), _) => add(cp, Some(MHashSet(IStack(cp_pred))), increaseRefCount)
// Exit or ExitExc -> Aftercall
case _ => add(cp, None, increaseRefCount)
}
isWorkAdded = true
}
isWorkAdded
}
////////////////////////////////////////////////////////////////////////////////
// etc.
////////////////////////////////////////////////////////////////////////////////
def getOrder(): OrderMap = order
def getHead(): (ControlPoint, Option[CPStackSet]) = this.synchronized {
val cp: ControlPoint = this.synchronized { removeHead }
val callerCPStackSet = callerCPMap.get(cp) match {
case Some(callerCPStackSetRef) =>
if(callerCPStackSetRef.refCount == 1) {
callerCPMap.remove(cp)
cpStackSetRefPool.push(callerCPStackSetRef)
}
else callerCPStackSetRef.refCount-= 1
if(callerCPStackSetRef.cpStackSet != null) Some(callerCPStackSetRef.cpStackSet) else None
case None =>
callerCPMap.remove(cp)
None
}
(cp, callerCPStackSet)
}
def dump() = if (!quiet) System.out.print("next: " + head + " ")
////////////////////////////////////////////////////////////////////////////////
// Caller ControlPoint Stack Set
////////////////////////////////////////////////////////////////////////////////
class CPStackSetRef {
var refCount = 1
var cpStackSet: CPStackSet = null
}
private val callerCPMap = new MHashMap[ControlPoint, CPStackSetRef]
private val cpStackSetRefPool = new MStack[CPStackSetRef]
private def getNewCPStackSetRef(_cpStackSet: CPStackSet = null): CPStackSetRef = {
val cpStackSetRef = if(cpStackSetRefPool.isEmpty) new CPStackSetRef else cpStackSetRefPool.pop()
cpStackSetRef.refCount = 1
cpStackSetRef.cpStackSet = _cpStackSet
cpStackSetRef
}
private def addCallerCPSet(cp: ControlPoint, callerCPSetOpt: Option[CPStackSet], increaseRefCount: Boolean): Unit = {
callerCPSetOpt match {
case Some(callerCPSet) =>
callerCPMap.get(cp) match {
case Some(prevCallerCPSetRef) =>
if(increaseRefCount) prevCallerCPSetRef.refCount+= 1
if(prevCallerCPSetRef.cpStackSet == null) prevCallerCPSetRef.cpStackSet = callerCPSet
else prevCallerCPSetRef.cpStackSet++= callerCPSet
case None => callerCPMap.put(cp, getNewCPStackSetRef(callerCPSet))
}
case None => callerCPMap.put(cp, getNewCPStackSetRef())
}
}
////////////////////////////////////////////////////////////////////////////////
// For WorkManager (Thread library)
////////////////////////////////////////////////////////////////////////////////
private var useWorkManager = false
private var workTrait: WorkTrait = null
def setUseWorkManager(_useWorkManager: Boolean, _workTrait: WorkTrait): Unit = {
useWorkManager = _useWorkManager
workTrait = _workTrait
}
}
object Worklist {
////////////////////////////////////////////////////////////////////////////////
// Worklist Order Types
////////////////////////////////////////////////////////////////////////////////
final val WORKLIST_ORDER_DEFAULT: Int = 0
final val WORKLIST_ORDER_FIFO: Int = 1
final val WORKLIST_ORDER_LIFO: Int = 2
final val WORKLIST_ORDER_COUNT: Int = 3
////////////////////////////////////////////////////////////////////////////////
// Worklist Computes
////////////////////////////////////////////////////////////////////////////////
def computes(cfg: CFG) : Worklist = computes(Shell.params.opt_WorklistOrder, cfg, false)
def computes(cfg: CFG, quiet: Boolean) : Worklist = computes(Shell.params.opt_WorklistOrder, cfg, quiet)
def computes(orderType: Int, cfg: CFG, quiet: Boolean) : Worklist = {
val startTime = System.nanoTime
var worklist: Worklist = null
orderType match {
case WORKLIST_ORDER_DEFAULT =>
val empty = TreeMap[Node, Int]()
val (map, _) = cfg.getNodes.foldLeft((empty, 0))((m, n) => (m._1 + (n -> m._2), m._2 + 1))
worklist = new WorklistDefault
worklist.init(cfg, map, quiet)
case WORKLIST_ORDER_FIFO =>
worklist = new WorklistFIFO
worklist.init(cfg, null, quiet)
case WORKLIST_ORDER_LIFO =>
worklist = new WorklistLIFO
worklist.init(cfg, null, quiet)
case WORKLIST_ORDER_COUNT =>
worklist = new WorklistCount
worklist.init(cfg, null, quiet)
}
if (!quiet) {
val elapsedTime = (System.nanoTime - startTime) / 1000000000.0
System.out.format("# Time for worklist order computation(s): %.2f\\n", new java.lang.Double(elapsedTime))
}
worklist
}
def computesSparse(interDDG: DGraph[Node], quiet: Boolean): Worklist = {
val s = System.nanoTime
var map = TreeMap[Node, Int]()
var headmap = TreeMap[Node, Int]()
var backedges = HashMap[Node, HashSet[Node]]()
var order_i = 0
def findLoophead(g: DGraph[Node]): Node = {
// check back-edges to find a loop head.
var refs: HashMap[Node, Int] = g.getNodes.foldLeft(HashMap[Node,Int]())((M, n) => M + (n -> 0))
g.succs.foreach(kv => {
kv._2.foreach(n => refs += (n -> (refs(n) + 1)))
})
val (entry, _) = refs.foldLeft(refs.head)((h, ref) => if (ref._2 > h._2) ref else h)
entry
}
def makeSCCGraph(g: DGraph[Node]): Unit = {
val nodes = g.sccs
def getNode(n: Node) = nodes.filter(ns => ns.contains(n)).head
// constructs abstract graph for a given graph.
val entry = getNode(g.entry)
val (max, ntoi) = nodes.foldLeft((0, HashMap[HashSet[Node], Int]()))((pair, n) => (pair._1 + 1, pair._2 + (n -> pair._1)))
val nodes_i = (0 to (max-1)).foldLeft(HashSet[Int]())((S, i) => S + i)
val entry_i = ntoi(entry)
val iton = new Array[HashSet[Node]](max)
ntoi.foreach(kv => iton(kv._2) = kv._1)
val edges_i =
g.getNodes.foldLeft(HashSet[(Int, Int)]())((S, n) => {
val succs = g.getSuccs(n)
val src = getNode(n)
succs.foldLeft(S)((S_, n2) => {
val dst = getNode(n2)
S_ + ((ntoi(src), ntoi(dst)))
})
})
val agraph = DGraph.fromEdges[Int](nodes_i, entry_i, edges_i)
// computes topological order for the abstract graph.
agraph.topologicalOrder.foreach(n => {
val sets: HashSet[Node] = iton(n)
if (sets.size > 1) {
// travers each of concrete graph
val subgraph = DGraph.pruning(g,sets)
val loophead = findLoophead(subgraph)
val backnodes = subgraph.removeInedges(loophead)
backedges += (loophead -> backnodes)
subgraph.entry = loophead
makeSCCGraph(subgraph)
} else {
map += (sets.head -> order_i)
order_i += 1
}
})
headmap += (g.entry -> order_i)
order_i += 1
}
makeSCCGraph(interDDG.prunedGraph)
val wl = new WorklistDefault
wl.init(null, map, quiet, headmap, backedges)
val elapsedTime = (System.nanoTime - s) / 1000000000.0
if (!quiet)
System.out.format("# Time for worklist order computation(s): %.2f\\n", new java.lang.Double(elapsedTime))
wl
}
}
| daejunpark/jsaf | src/kr/ac/kaist/jsaf/analysis/typing/Worklist.scala | Scala | bsd-3-clause | 12,088 |
package monocle.std
import monocle.TestUtil._
import monocle.function._
import monocle.law.{OptionalLaws, PrismLaws}
import org.specs2.scalaz.Spec
class ByteSpec extends Spec {
checkAll("Byte index bit", OptionalLaws(index[Byte, Int, Boolean](0)))
checkAll("Byte to Boolean", PrismLaws(byteToBoolean))
}
| CapeSepias/Monocle | test/src/test/scala/monocle/std/ByteSpec.scala | Scala | mit | 314 |
package capitulo06
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
abstract class UndoableAction(val description: String){
def undo(): Unit
def redo(): Unit
}
object DoNothingAction extends UndoableAction("Do nothing"){
override def undo() {}
override def redo() {}
}
@RunWith(classOf[JUnitRunner])
class ObjetoExtendendoClasseOuTrait extends FunSuite{
test("O objeto DoNothingAction pode ser compartilhado em vários lugares"){
val actions = Map("open" -> DoNothingAction, "save" -> DoNothingAction);
assert("Do nothing" == DoNothingAction.description);
}
} | celioeduardo/scala-impatient | src/test/scala/capitulo06/ObjetoExtendendoClasseOuTrait.scala | Scala | mit | 649 |
import sbt._
import Keys._
import AndroidKeys._
object AndroidDefaults {
val DefaultAaaptName = "aapt"
val DefaultAadbName = "adb"
val DefaultAaidlName = "aidl"
val DefaultDxName = "dx"
val DefaultAndroidManifestName = "AndroidManifest.xml"
val DefaultAndroidJarName = "android.jar"
val DefaultAssetsDirectoryName = "assets"
val DefaultResDirectoryName = "res"
val DefaultClassesMinJarName = "classes.min.jar"
val DefaultClassesDexName = "classes.dex"
val DefaultResourcesApkName = "resources.apk"
val DefaultDxOpts = ("-JXmx512m", None)
val DefaultManifestSchema = "http://schemas.android.com/apk/res/android"
val DefaultEnvs = List("ANDROID_SDK_HOME", "ANDROID_SDK_ROOT", "ANDROID_HOME")
lazy val settings: Seq[Setting[_]] = Seq (
aaptName := DefaultAaaptName,
adbName := DefaultAadbName,
aidlName := DefaultAaidlName,
dxName := DefaultDxName,
manifestName := DefaultAndroidManifestName,
jarName := DefaultAndroidJarName,
assetsDirectoryName := DefaultAssetsDirectoryName,
resDirectoryName := DefaultResDirectoryName,
classesMinJarName := DefaultClassesMinJarName,
classesDexName := DefaultClassesDexName,
resourcesApkName := DefaultResourcesApkName,
dxOpts := DefaultDxOpts,
manifestSchema := DefaultManifestSchema,
envs := DefaultEnvs,
// a list of modules which are already included in Android
preinstalledModules := Seq[ModuleID](
ModuleID("org.apache.httpcomponents", "httpcore", null),
ModuleID("org.apache.httpcomponents", "httpclient", null),
ModuleID("org.json", "json" , null),
ModuleID("commons-logging", "commons-logging", null),
ModuleID("commons-codec", "commons-codec", null)
)
)
}
| taisukeoe/sbt-android-plugin | src/main/scala/AndroidDefault.scala | Scala | bsd-3-clause | 1,732 |
package com.eigengo.lift.analysis.exercise.ld
import java.util.Properties
import java.util.concurrent.Executors
import com.eigengo.lift.analysis.exercise.rt.ExerciseClassificationProtocol.{Payload, Train, ExerciseClassificationRequest}
import com.eigengo.lift.analysis.exercise.rt.{Exercise, MessageEncoder, MessageDecoder, JavaSerializationCodecs}
import kafka.consumer.{ConsumerConfig, Consumer}
import kafka.producer.{KeyedMessage, Producer, ProducerConfig}
import kafka.serializer.StringDecoder
import scala.io.Source
import scala.util.Random
object AccelerometerDataLocalLoader extends JavaSerializationCodecs {
val encoder = implicitly[MessageEncoder[ExerciseClassificationRequest]]
def messagesPerSec = 1 + Random.nextInt(10)
def wordsPerMessage = 10 + Random.nextInt(100)
val topic = "accelerometer-data"
val producer = {
val brokers = "192.168.59.103:9092"
val props = new Properties()
props.put("metadata.broker.list", brokers)
val config = new ProducerConfig(props)
new Producer[String, Payload](config)
}
val consumer = {
val zkQuorum = "192.168.59.103"
val group = "lift"
val props = new Properties()
props.put("zookeeper.connect", zkQuorum)
props.put("group.id", group)
props.put("zookeeper.connection.timeout.ms", "10000")
props.put("zookeeper.session.timeout.ms", "400")
props.put("zookeeper.sync.time.ms", "200")
props.put("auto.commit.interval.ms", "1000")
val config = new ConsumerConfig(props)
Consumer.create(config)
}
def load[U](name: String)(f: Payload => ExerciseClassificationRequest): Array[Byte] = {
val msg = f(Source.fromInputStream(getClass.getResourceAsStream(name)).map(_.toByte).toArray)
encoder.encode(msg).toOption.get
}
def main(args: Array[String]) {
// Send some messages
val executor = Executors.newFixedThreadPool(10)
val streams = consumer.createMessageStreams(Map("classified-exercise" → 1), keyDecoder = new StringDecoder(), valueDecoder = new StringDecoder())
executor.submit(new Runnable {
override def run(): Unit = {
streams.foreach {
case (key, stream) ⇒
println(s"Consuming from $key at ${System.currentTimeMillis()}")
stream.foreach { x ⇒
x.iterator().foreach { mam ⇒
println(mam.message())
}
}
println(s"Done")
}
}
})
while(true) {
val msg = load("/arms1.adv1")(Train(_, Exercise("Arms", 1.0)))
producer.send(new KeyedMessage[String, Payload](topic, msg))
Thread.sleep(100)
}
}
}
| lachatak/lift | analysis/exercise-ld/src/main/scala/com/eigengo/lift/analysis/exercise/ld/AccelerometerDataLocalLoader.scala | Scala | apache-2.0 | 2,624 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
package xml
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 21.04.2008
*/
trait ScXmlCDSect extends ScalaPsiElement {
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/api/expr/xml/ScXmlCDSect.scala | Scala | apache-2.0 | 266 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate.crdt.pure
import akka.actor._
import akka.serialization.Serializer
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.crdt.pure.CRDTFormats._
import com.rbmhtechnology.eventuate.crdt.pure.CRDTService._
import com.rbmhtechnology.eventuate.serializer.CommonSerializer
import scala.collection.JavaConverters._
class CRDTSerializer(system: ExtendedActorSystem) extends Serializer {
val commonSerializer = new CommonSerializer(system)
import commonSerializer.payloadSerializer
private val ValueUpdatedClass = classOf[ValueUpdated]
private val CRDTClass = classOf[CRDT[_]]
private val UpdatedOpClass = classOf[UpdateOp]
private val AssignOpClass = classOf[AssignOp]
private val AddOpClass = classOf[AddOp]
private val RemoveOpClass = classOf[RemoveOp]
private val AWCartEntryClass = classOf[AWCartEntry[_]]
private val ClearClass = ClearOp.getClass
override def identifier: Int = 22567
override def includeManifest: Boolean = true
override def toBinary(o: AnyRef): Array[Byte] = o match {
case c: CRDT[_] =>
crdtFormatBuilder(c).build().toByteArray
case v: ValueUpdated =>
valueUpdatedFormat(v).build().toByteArray
case o: UpdateOp =>
updateOpFormatBuilder(o).build().toByteArray
case o: AddOp =>
addOpFormatBuilder(o).build().toByteArray
case o: RemoveOp =>
removeOpFormatBuilder(o).build().toByteArray
case o: AssignOp =>
assignOpFormatBuilder(o).build().toByteArray
case e: AWCartEntry[_] =>
awCartEntryFormatBuilder(e).build().toByteArray
case ClearOp =>
ClearFormat.newBuilder().build().toByteArray
case _ =>
throw new IllegalArgumentException(s"can't serialize object of type ${o.getClass}")
}
override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = manifest match {
case None => throw new IllegalArgumentException("manifest required")
case Some(clazz) => clazz match {
case CRDTClass =>
crdt(CRDTPureOpFormat.parseFrom(bytes))
case ValueUpdatedClass =>
valueUpdated(ValueUpdatedFormat.parseFrom(bytes))
case UpdatedOpClass =>
updateOp(UpdateOpFormat.parseFrom(bytes))
case AssignOpClass =>
assignOp(AssignOpFormat.parseFrom(bytes))
case AddOpClass =>
addOp(AddOpFormat.parseFrom(bytes))
case RemoveOpClass =>
removeOp(RemoveOpFormat.parseFrom(bytes))
case AWCartEntryClass =>
awCartEntry(AWCartEntryFormat.parseFrom(bytes))
case ClearClass => ClearOp
case _ =>
throw new IllegalArgumentException(s"can't deserialize object of type ${clazz}")
}
}
// --------------------------------------------------------------------------------
// toBinary helpers
// --------------------------------------------------------------------------------
private def addOpFormatBuilder(op: AddOp): AddOpFormat.Builder =
AddOpFormat.newBuilder.setEntry(payloadSerializer.payloadFormatBuilder(op.entry.asInstanceOf[AnyRef]))
private def removeOpFormatBuilder(op: RemoveOp): RemoveOpFormat.Builder =
RemoveOpFormat.newBuilder.setEntry(payloadSerializer.payloadFormatBuilder(op.entry.asInstanceOf[AnyRef]))
private def assignOpFormatBuilder(op: AssignOp): AssignOpFormat.Builder =
AssignOpFormat.newBuilder.setValue(payloadSerializer.payloadFormatBuilder(op.value.asInstanceOf[AnyRef]))
private def updateOpFormatBuilder(op: UpdateOp): UpdateOpFormat.Builder =
UpdateOpFormat.newBuilder.setDelta(payloadSerializer.payloadFormatBuilder(op.delta.asInstanceOf[AnyRef]))
private def valueUpdatedFormat(valueUpdated: ValueUpdated): ValueUpdatedFormat.Builder =
ValueUpdatedFormat.newBuilder.setOperation(payloadSerializer.payloadFormatBuilder(valueUpdated.operation.asInstanceOf[AnyRef]))
private def awCartEntryFormatBuilder(orCartEntry: AWCartEntry[_]): AWCartEntryFormat.Builder = {
val builder = AWCartEntryFormat.newBuilder
builder.setKey(payloadSerializer.payloadFormatBuilder(orCartEntry.key.asInstanceOf[AnyRef]))
builder.setQuantity(orCartEntry.quantity)
builder
}
private def pologBuilder(polog: POLog): POLogFormat.Builder = {
val builder = POLogFormat.newBuilder
polog.log.foreach { ve =>
builder.addVersionedEntries(commonSerializer.versionedFormatBuilder(ve))
}
builder
}
private def crdtFormatBuilder(c: CRDT[_]): CRDTPureOpFormat.Builder = {
CRDTPureOpFormat.newBuilder.setPolog(pologBuilder(c.polog)).setState(payloadSerializer.payloadFormatBuilder(c.state.asInstanceOf[AnyRef]))
}
// --------------------------------------------------------------------------------
// fromBinary helpers
// --------------------------------------------------------------------------------
private def addOp(opFormat: AddOpFormat): AddOp =
AddOp(payloadSerializer.payload(opFormat.getEntry))
private def removeOp(opFormat: RemoveOpFormat): RemoveOp =
RemoveOp(payloadSerializer.payload(opFormat.getEntry))
private def assignOp(opFormat: AssignOpFormat): AssignOp =
AssignOp(payloadSerializer.payload(opFormat.getValue))
private def updateOp(opFormat: UpdateOpFormat): UpdateOp =
UpdateOp(payloadSerializer.payload(opFormat.getDelta))
private def valueUpdated(valueUpdatedFormat: ValueUpdatedFormat): ValueUpdated =
ValueUpdated(payloadSerializer.payload(valueUpdatedFormat.getOperation))
private def awCartEntry(orCartEntryFormat: AWCartEntryFormat): AWCartEntry[Any] =
AWCartEntry(payloadSerializer.payload(orCartEntryFormat.getKey), orCartEntryFormat.getQuantity)
private def polog(pologFormat: POLogFormat): POLog = {
val rs = pologFormat.getVersionedEntriesList.iterator.asScala.foldLeft(Set.empty[Versioned[Any]]) {
case (acc, r) => acc + commonSerializer.versioned(r)
}
POLog(rs)
}
private def crdt(crdtFormat: CRDTPureOpFormat): CRDT[_] =
CRDT(polog(crdtFormat.getPolog), payloadSerializer.payload(crdtFormat.getState))
}
| RBMHTechnology/eventuate | eventuate-crdt-pure/src/main/scala/com/rbmhtechnology/eventuate/crdt/pure/CRDTSerializer.scala | Scala | apache-2.0 | 6,707 |
package com.github.scalaz_examples.util
import scalaz._
import Scalaz._
object ShortHandExamples extends App {
// working with Option
val leftOpt = 1.some // shorthand for Some(1)
val rightOpt = 3.some
// short-hand for if condition, return Option(data)
(1 < 10) option 1
// Some(1)
(1 > 10) option 1
// None
// match against both
// tuple is different than ->, since it uses Apply, which is like point, which will create a new Option[(_, _)]
leftOpt tuple rightOpt match {
case Some((left, right)) => (left, right) // (1, 3)
case None => assert(false, "shouldn't happen")
}
leftOpt tuple none match {
case Some((_,_)) => assert(false, "shouldn't happen")
case None => "None found!"
}
// shorthand for getOrElse
leftOpt | 20
// 1
none | 20
// 20
// shorthand for getOrElse Monoid#zero
~leftOpt
// 1
// have you ever wished you could map from a type to another type (not a monad[A] to monad[B], but "i have an A, give me a B")?
20 |> {(value) => "hi"}
// "hi"
// with the |> syntax, you can apply the function after the expression has completed
1 + 2 + 3 |> {_.point[List]}
// List(6)
1 + 2 + 3 |> {_ * 6}
// 36
// what if you are working with java, and something can be null and you want to getOrElse it?
val javaVal: String = null
javaVal ?? "not null!"
// "not null!"
// have you ever missed Java's ? true : false syntax?
// you can do something similar now on any boolean
true ? "worked" | "broken"
// "worked"
// the eval of true and false is lazy, so if true, then false doesn't eval!
// second, until the | is given, the function does nothing, so you could make true ? "default" a method!
def canDo = false ? "should not have come"
canDo | "couldn't do!"
// "couldn't do!"
// ranges
1 |-> 10
// List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
// have you ever wanted to "lift" a value into a applicative functor and if the value matches some expression
// to apply a different constructor? Neither have I, but thats where visit comes in!
42 visit { case x if x % 2 == 0 => List(x / 2) }
// List(21)
43 visit { case x if x % 2 == 0 => List(x / 2) }
// List(43)
// if the partial function matches the element, then the function is applied
// if not, then Pointed[F].point(_) is used
}
| dcapwell/scalaz-examples | src/main/scala/com/github/scalaz_examples/util/ShortHandExamples.scala | Scala | mit | 2,319 |
package akka.persistence.hazelcast.journal
import akka.actor.ActorRef
import scala.collection.mutable
private[hazelcast] trait TagSubscribersTrait {
private val tagSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef]
protected def hasTagSubscribers: Boolean = tagSubscribers.nonEmpty
}
| dlisin/akka-persistence-hazelcast | src/main/scala/akka/persistence/hazelcast/journal/TagSubscribersTrait.scala | Scala | apache-2.0 | 346 |
package showmyns
import javax.swing.SwingUtilities
object MainClass {
def main(args: Array[String]) {
val orgLayout = args.contains("--orglayout")
SwingUtilities.invokeLater(new Runnable() {
def run() {
val frame = new MFrame2("Show my network state", orgLayout)
frame.setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE) //for interoperability with nailgun
}
})
}
}
| gargarozz/ShowMyNS | src/main/scala/showmyns/Main.scala | Scala | bsd-3-clause | 435 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.ingest
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.{Job, JobStatus, Mapper}
import org.geotools.data.DataUtilities
import org.geotools.factory.Hints
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.jobs.GeoMesaConfigurator
import org.locationtech.geomesa.jobs.mapreduce.{GeoMesaOutputFormat, JobWithLibJars}
import org.locationtech.geomesa.tools.Command
import org.locationtech.geomesa.tools.ingest.AbstractIngest.StatusCallback
import org.opengis.feature.simple.SimpleFeature
/**
* Abstract class that handles configuration and tracking of the remote job
*/
abstract class AbstractIngestJob(dsParams: Map[String, String],
typeName: String,
paths: Seq[String],
libjarsFile: String,
libjarsPaths: Iterator[() => Seq[File]]) extends JobWithLibJars {
def inputFormatClass: Class[_ <: FileInputFormat[_, SimpleFeature]]
def written(job: Job): Long
def failed(job: Job): Long
def run(statusCallback: StatusCallback): (Long, Long) = {
val job = Job.getInstance(new Configuration, "GeoMesa Tools Ingest")
setLibJars(job, libjarsFile, libjarsPaths)
configureJob(job)
Command.user.info("Submitting job - please wait...")
job.submit()
Command.user.info(s"Tracking available at ${job.getStatus.getTrackingUrl}")
def counters = Seq(("ingested", written(job)), ("failed", failed(job)))
while (!job.isComplete) {
if (job.getStatus.getState != JobStatus.State.PREP) {
// we don't have any reducers, just track mapper progress
statusCallback("", job.mapProgress(), counters, done = false)
}
Thread.sleep(500)
}
statusCallback("", job.mapProgress(), counters, done = true)
if (!job.isSuccessful) {
Command.user.error(s"Job failed with state ${job.getStatus.getState} due to: ${job.getStatus.getFailureInfo}")
}
(written(job), failed(job))
}
def configureJob(job: Job): Unit = {
job.setJarByClass(getClass)
job.setMapperClass(classOf[IngestMapper])
job.setInputFormatClass(inputFormatClass)
job.setOutputFormatClass(classOf[GeoMesaOutputFormat])
job.setMapOutputKeyClass(classOf[Text])
job.setOutputValueClass(classOf[ScalaSimpleFeature])
job.setNumReduceTasks(0)
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
job.getConfiguration.set("mapred.reduce.tasks.speculative.execution", "false")
job.getConfiguration.set("mapreduce.job.user.classpath.first", "true")
FileInputFormat.setInputPaths(job, paths.mkString(","))
GeoMesaConfigurator.setFeatureTypeOut(job.getConfiguration, typeName)
GeoMesaOutputFormat.configureDataStore(job, dsParams)
}
}
/**
* Takes the input and writes it to the output - all our main work is done in the input format
*/
class IngestMapper extends Mapper[LongWritable, SimpleFeature, Text, SimpleFeature] with LazyLogging {
type Context = Mapper[LongWritable, SimpleFeature, Text, SimpleFeature]#Context
private val text: Text = new Text
override def map(key: LongWritable, sf: SimpleFeature, context: Context): Unit = {
logger.debug(s"map key ${key.toString}, map value ${DataUtilities.encodeFeature(sf)}")
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
context.write(text, sf)
}
}
| ddseapy/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/ingest/AbstractIngestJob.scala | Scala | apache-2.0 | 4,130 |
/*
* Server.scala
* (ScalaCollider)
*
* Copyright (c) 2008-2021 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.synth
import de.sciss.audiofile.{AudioFileType, SampleFormat}
import de.sciss.model.Model
import de.sciss.numbers.{IntFunctions => ri}
import de.sciss.osc
import de.sciss.osc.{Browser, TCP, UDP}
import de.sciss.processor.Processor
import de.sciss.synth.impl.ServerImpl
import java.net.{DatagramSocket, InetAddress, ServerSocket}
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.Try
object Server extends ServerPlatform {
def default: Server = ServerImpl.default
/** The base trait for `Config` and `ConfigBuilder` describes the settings used to boot scsynth in
* realtime or non-realtime mode, as well as its server address and port.
*
* You obtain a `ConfigBuilder` by calling `Server.Config()`. This builder can then be mutated and
* will be implicitly converted to an immutable `Config` when required.
*
* See `ConfigBuilder` for its default values.
*
* @see [[de.sciss.synth.Server.ConfigBuilder]]
* @see [[de.sciss.synth.Server.Config]]
*/
trait ConfigLike {
/** The path to `scsynth`, used when booting a server. This can be either a relative path
* (relating to the JVM's working directory), or an absolute path.
*
* @see [[de.sciss.synth.Server#defaultProgram]]
*/
def program: String
/** The maximum number of control bus channels. */
def controlBusChannels: Int
/** The maximum number of audio bus channels. This includes the channels connected
* to hardware (`outputBusChannels`) as well as all channels for internal routing.
*/
def audioBusChannels: Int
/** The number of connected audio hardware output channels. This does not need to
* correspond to the actual number of channels your sound card provides, but can
* be lower or higher, although a higher value doesn't have any effect as channel
* indices above the number of channels of the sound card will be treated as
* internal channels.
*/
def outputBusChannels: Int
/** The calculation block size. That is, the number of audio samples calculated en-bloc.
* This corresponds with the control rate, such that
* `controlRate := audioRate / blockSize`. It should be a power of two.
*/
def blockSize: Int
/** The audio hardware sampling rate to use. A value of `0` indicates that scsynth
* should use the current sampling rate of the audio hardware. An explicit setting
* will make scsynth try to switch the sound card's sample rate if necessary.
*/
def sampleRate: Int
/** The maximum number of audio buffers (for the `Buffer` class). */
def audioBuffers: Int
/** The maximum number of concurrent nodes (synths and groups). */
def maxNodes: Int
/** The maximum number of synth defs. */
def maxSynthDefs: Int
/** The maximum number of pre-allocated realtime memory in bytes. This memory
* is used for many UGens such as `Limiter`, `DelayN` etc. It does not
* affect dynamically allocated memory such as audio buffers.
*/
def memorySize: Int
/** The maximum number of concurrent connections between UGens in a single synth.
* ScalaCollider performs a depth-first topological sorting of the synth defs,
* so you should not worry too much about this value. It can become important
* in very heavy channel expansions and mix-down.
*
* This value will be automatically increased if a more complex def is loaded
* at startup, but it cannot be increased thereafter without rebooting.
*/
def wireBuffers: Int
/** The number of individual random number generators allocated. */
def randomSeeds: Int
/** Whether scsynth should load synth definitions stored on the hard-disk when booted. */
def loadSynthDefs: Boolean
/** ? */
def machPortName: Option[(String, String)]
/** The verbosity level of scsynth. The standard value is `0`, while
* `-1` suppresses informational messages, `-2` also suppresses many error messages.
*/
def verbosity: Int
/** An explicit list of paths where DSP plugins are found. Usually this is not
* specified, and scsynth looks for plugins in their default location.
*/
def plugInsPaths: List[String]
/** An option to restrict access to files (e.g. for loading and saving buffers) to
* a particular directory. This is a security measure, preventing malicious clients from
* accessing parts of the hard-disk which they shouldn't.
*/
def restrictedPath: Option[String]
// ---- realtime only ----
/** (Realtime) Host address of scsynth, when trying to `connect` to an already running server on the net. */
def host: String
/** (Realtime) UDP or TCP port used by scsynth. */
def port: Int
/** (Realtime) Open Sound Control transport used by scsynth. (Either of `UDP` and `TCP`). */
def transport: osc.Transport
/** (Realtime) An option to enable particular input 'streams' or 'bundles' of a sound card.
* This is a 'binary' String made of `'0'` and `'1'` characters.
* If the string is `"01100"`, for example, then only the second and third input streams on
* the device will be enabled.
*/
def inputStreamsEnabled: Option[String]
/** (Realtime) An option to enable particular output 'streams' or 'bundles' of a sound card.
* This is a 'binary' String made of `'0'` and `'1'` characters.
* If the string is `"01100"`, for example, then only the second and third output streams on
* the device will be enabled.
*/
def outputStreamsEnabled: Option[String]
/** (Realtime) An option denoting the name of the sound card to use. On systems which distinguish
* input and output devices (OS X), this implies that both are the same. Otherwise, you can
* use the `deviceNames` method instead.
*
* @see deviceNames
*/
def deviceName: Option[String]
/** (Realtime) An option denoting the name of the input and output sound device to use. This is for
* systems which distinguish input and output devices (OS X). If you use a single device both for
* input and output (applies to most professional audio interfaces), you can simply use the
* single string method `deviceName`.
*
* @see deviceName
*/
def deviceNames: Option[(String, String)]
/** (Realtime) The number of connected audio hardware input channels. This does not need to
* correspond to the actual number of channels your sound card provides, but can
* be lower or higher, although a higher value doesn't have any effect as channel
* indices above the number of channels of the sound card will be treated as
* internal channels.
*/
def inputBusChannels: Int
/** (Realtime) A value to adjust the sound card's hardware block size. Typically you will leave
* this to `0` which means that the current block size is used. The block sizes supported depend
* on the particular sound card. Lower values decrease latency but may increase CPU load.
*/
def hardwareBlockSize: Int
/** (Realtime) Whether to announce scsynth's OSC service via zero conf. See
* [[http://en.wikipedia.org/wiki/Zero_configuration_networking Wikipedia]] for more details.
*/
def zeroConf: Boolean
/** (Realtime) The maximum number of client connections when using TCP transport. */
def maxLogins: Int
/** (Realtime) A requires session password when using TCP transport. When using TCP and the password option
* is set, each client must send the correct password as the first command to the server, otherwise it is
* rejected.
*/
def sessionPassword: Option[String]
// ---- non-realtime only ----
/** (Non-Realtime) Path to the binary OSC file. */
def nrtCommandPath: String
/** (Non-Realtime) Path to the audio input file used as audio input bus supplement. */
def nrtInputPath: Option[String]
/** (Non-Realtime) Path to the audio output file used as audio output bus supplement. */
def nrtOutputPath: String
/** (Non-Realtime) Audio file format for writing the output. */
def nrtHeaderFormat: AudioFileType
/** (Non-Realtime) Audio sample format for writing the output. */
def nrtSampleFormat: SampleFormat
/** Produces a command line for booting scsynth in realtime mode. */
final def toRealtimeArgs: List[String] = Config.toRealtimeArgs(this)
/** Produces a command line for booting scsynth in non-realtime mode. */
final def toNonRealtimeArgs: List[String] = Config.toNonRealtimeArgs(this)
/** A utility method providing the audio bus offset for the start of
* the internal channels. (simply the sum of `outputBusChannels` and `inputBusChannels`).
*/
final def internalBusIndex: Int = outputBusChannels + inputBusChannels
}
object Config {
/** Creates a new configuration builder with default settings */
def apply(): ConfigBuilder = new ConfigBuilder()
/** Implicit conversion which allows you to use a `ConfigBuilder`
* wherever a `Config` is required.
*/
implicit def build(cb: ConfigBuilder): Config = cb.build
private[Server] def toNonRealtimeArgs(o: ConfigLike): List[String] = {
val b = List.newBuilder[String]
// -N <cmd-filename> <input-filename> <output-filename> <sample-rate> <header-format> <sample-format> <...other scsynth arguments>
b += o.program
b += "-N"
b += o.nrtCommandPath
b += o.nrtInputPath.getOrElse("_")
b += o.nrtOutputPath
b += o.sampleRate.toString
b += o.nrtHeaderFormat.id
b += o.nrtSampleFormat.id
addCommonArgs( o, b )
b.result()
}
private[Server] def toRealtimeArgs(o: ConfigLike): List[String] = {
val b = List.newBuilder[String]
b += o.program
o.transport match {
case TCP =>
b += "-t"
b += o.port.toString
case UDP | Browser =>
b += "-u"
b += o.port.toString
}
if (o.host != "0.0.0.0") {
b += "-B"
b += o.host
}
addCommonArgs(o, b)
if (o.hardwareBlockSize != 0) {
b += "-Z"
b += o.hardwareBlockSize.toString
}
if (o.sampleRate != 0) {
b += "-S"
b += o.sampleRate.toString
}
if (o.maxLogins != 64) {
b += "-l"
b += o.maxLogins.toString
}
o.sessionPassword.foreach { pwd =>
b += "-p"
b += pwd
}
o.inputStreamsEnabled.foreach { stream =>
b += "-I"
b += stream
}
o.outputStreamsEnabled.foreach { stream =>
b += "-O"
b += stream
}
if (!o.zeroConf) {
b += "-R"
b += "0"
}
o.deviceNames.foreach { case (inDev, outDev) =>
b += "-H"
b += inDev
b += outDev
}
o.deviceName.foreach { n =>
b += "-H"
b += n
}
o.restrictedPath.foreach { path =>
b += "-P"
b += path
}
b.result()
}
private[Server] def addCommonArgs(o: ConfigLike, b: mutable.Builder[String, Any]): Unit = {
// some dude is going around changing scsynth
// defaults without thinking about the consequences.
// we now pessimistically pass all options and do
// not assume any longer defaults.
// if (o.controlBusChannels != 4096) {
b += "-c"
b += o.controlBusChannels.toString
// }
// if (o.audioBusChannels != 128) {
b += "-a"
b += o.audioBusChannels.toString
// }
// if (o.inputBusChannels != 8) {
b += "-i"
b += o.inputBusChannels.toString
// }
// if (o.outputBusChannels != 8) {
b += "-o"
b += o.outputBusChannels.toString
// }
// if (o.blockSize != 64) {
b += "-z"
b += o.blockSize.toString
// }
// if (o.audioBuffers != 1024) {
b += "-b"
b += o.audioBuffers.toString
// }
// if (o.maxNodes != 1024) {
b += "-n"
b += o.maxNodes.toString
// }
// if (o.maxSynthDefs != 1024) {
b += "-d"
b += o.maxSynthDefs.toString
// }
// if (o.memorySize != 8192) {
b += "-m"
b += o.memorySize.toString
// }
// if (o.wireBuffers != 64) {
b += "-w"
b += o.wireBuffers.toString
// }
// if (o.randomSeeds != 64) {
b += "-r"
b += o.randomSeeds.toString
// }
if (!o.loadSynthDefs) {
b += "-D"
b += "0"
}
o.machPortName.foreach {
case (send, reply) =>
b += "-M"
b += send
b += reply
}
if (o.verbosity != 0) {
b += "-V"
b += o.verbosity.toString
}
if (o.plugInsPaths.nonEmpty) {
b += "-U"
b += o.plugInsPaths.mkString(":")
}
}
}
/** @see [[de.sciss.synth.Server.ConfigBuilder]]
* @see [[de.sciss.synth.Server.ConfigLike]]
*/
final class Config private[Server](val program: String,
val controlBusChannels: Int,
val audioBusChannels: Int,
val outputBusChannels: Int,
val blockSize: Int,
val sampleRate: Int,
val audioBuffers: Int,
val maxNodes: Int,
val maxSynthDefs: Int,
val memorySize: Int,
val wireBuffers: Int,
val randomSeeds: Int,
val loadSynthDefs: Boolean,
val machPortName: Option[(String, String)],
val verbosity: Int,
val plugInsPaths: List[String],
val restrictedPath: Option[String],
/* val memoryLocking: Boolean, */
val host: String,
val port: Int,
val transport: osc.Transport,
val inputStreamsEnabled: Option[String],
val outputStreamsEnabled: Option[String],
val deviceNames: Option[(String, String)],
val deviceName: Option[String],
val inputBusChannels: Int,
val hardwareBlockSize: Int,
val zeroConf: Boolean,
val maxLogins: Int,
val sessionPassword: Option[String],
val nrtCommandPath: String,
val nrtInputPath: Option[String],
val nrtOutputPath: String,
val nrtHeaderFormat: AudioFileType,
val nrtSampleFormat: SampleFormat)
extends ConfigLike {
override def toString = "Server.Config"
}
object ConfigBuilder {
def apply(config: Config): ConfigBuilder = {
val b = new ConfigBuilder
b.read(config)
b
}
}
/** @see [[de.sciss.synth.Server.Config]]
* @see [[de.sciss.synth.Server.ConfigLike]]
*/
final class ConfigBuilder private[Server]() extends ConfigLike {
/** The default `program` is read from `defaultProgram`
*
* @see [[de.sciss.synth.Server#defaultProgram]]
*/
var program: String = defaultProgram
private[this] var controlBusChannelsVar = 4096
/** The default number of control bus channels is `4096` (scsynth default).
* Must be greater than zero and a power of two.
*/
def controlBusChannels: Int = controlBusChannelsVar
/** The default number of control bus channels is `4096` (scsynth default).
* Must be greater than zero and a power of two.
*/
def controlBusChannels_=(value: Int): Unit = {
require (value > 0 && ri.isPowerOfTwo(value))
controlBusChannelsVar = value
}
private[this] var audioBusChannelsVar = 128
/** The default number of audio bus channels is `128` (scsynth default).
* Must be greater than zero and a power of two.
* When the builder is converted to a `Config`, this value may be increased
* to ensure that `audioBusChannels > inputBusChannels + outputBusChannels`.
*/
def audioBusChannels: Int = audioBusChannelsVar
/** The default number of audio bus channels is `128` (scsynth default).
* Must be greater than zero and a power of two.
* When the builder is converted to a `Config`, this value may be increased
* to ensure that `audioBusChannels > inputBusChannels + outputBusChannels`.
*/
def audioBusChannels_=(value: Int): Unit = {
require (value > 0 && ri.isPowerOfTwo(value))
audioBusChannelsVar = value
}
private[this] var outputBusChannelsVar = 8
/** The default number of output bus channels is `8` (scsynth default) */
def outputBusChannels: Int = outputBusChannelsVar
/** The default number of output bus channels is `8` (scsynth default) */
def outputBusChannels_=(value: Int): Unit = {
require (value >= 0)
outputBusChannelsVar = value
}
private[this] var blockSizeVar = 64
/** The default calculation block size is `64` (scsynth default).
* Must be greater than zero and a power of two.
*/
def blockSize: Int = blockSizeVar
/** The default calculation block size is `64` (scsynth default).
* Must be greater than zero and a power of two.
*/
def blockSize_=(value: Int): Unit = {
require (value > 0 && ri.isPowerOfTwo(value))
blockSizeVar = value
}
private[this] var sampleRateVar = 0
/** The default sample rate is `0` (meaning that it is adjusted to
* the sound card's current rate; scsynth default)
*/
def sampleRate: Int = sampleRateVar
/** The default sample rate is `0` (meaning that it is adjusted to
* the sound card's current rate; scsynth default)
*/
def sampleRate_=(value: Int): Unit = {
require (value >= 0)
sampleRateVar = value
}
private[this] var audioBuffersVar = 1024
/** The default number of audio buffers is `1024` (scsynth default).
* Must be greater than zero and a power of two.
*/
def audioBuffers: Int = audioBuffersVar
/** The default number of audio buffers is `1024` (scsynth default).
* Must be greater than zero and a power of two.
*/
def audioBuffers_=(value: Int): Unit = {
require (value > 0 && ri.isPowerOfTwo(value))
audioBuffersVar = value
}
/** The default maximum number of nodes is `1024` (scsynth default) */
var maxNodes: Int = 1024
/** The default maximum number of synth defs is `1024` (scsynth default) */
var maxSynthDefs: Int = 1024
/** The default memory size is `65536` (64 KB) (higher than scsynth's default of 8 KB) */
var memorySize: Int = 65536 // 8192
/** The default number of wire buffers is `256` (higher than scsynth's default of `64`). */
var wireBuffers: Int = 256 // 64
/** The default number of random number generators is `64` (scsynth default) */
var randomSeeds: Int = 64
/** The default setting for loading synth defs is `false` (this is not the scsynth default!) */
var loadSynthDefs: Boolean = false
/** The default settings for mach port name is `None` (scsynth default) */
var machPortName: Option[(String, String)] = None
/** The default verbosity level is `0` (scsynth default).
*
* '''Note:''' currently, decreasing the verbosity prevents the server connection
* to notice when the server has booted (issue no. 98).
*/
var verbosity: Int = 0
/** The default setting for plugin path redirection is `Nil`
* (use standard paths; scsynth default)
*/
var plugInsPaths: List[String] = Nil
/** The default setting for restricting file access is `None` (scsynth default) */
var restrictedPath: Option[String] = None
// ---- realtime only ----
/** (Realtime) The default host name is `127.0.0.1`. When booting, this is used
* to force scsynth to bind to a particular address (`-B` switch). To avoid the `-B`
* switch, you can use `"0.0.0.0"` (server will be reachable via network).
*/
var host: String = "127.0.0.1"
/** (Realtime) The default port is `57110`. */
var port: Int = 57110
/** (Realtime) The default transport is `UDP`. */
var transport: osc.Transport = UDP
/** (Realtime) The default settings for enabled input streams is `None` */
var inputStreamsEnabled: Option[String] = None
/** (Realtime) The default settings for enabled output streams is `None` */
var outputStreamsEnabled: Option[String] = None
private[this] var deviceNameVar = Option.empty[String]
private[this] var deviceNamesVar = Option.empty[(String, String)]
/** (Realtime) The default input/output device names is `None` (scsynth default; it will
* use the system default sound card)
*/
def deviceName: Option[String] = deviceNameVar
/** (Realtime) The default input/output device names is `None` (scsynth default; it will
* use the system default sound card)
*/
def deviceName_=(value: Option[String]): Unit = {
deviceNameVar = value
if (value.isDefined) deviceNamesVar = None
}
/** (Realtime) The default input/output device names is `None` (scsynth default; it will
* use the system default sound card)
*/
def deviceNames: Option[(String, String)] = deviceNamesVar
/** (Realtime) The default input/output device names is `None` (scsynth default; it will
* use the system default sound card)
*/
def deviceNames_=(value: Option[(String, String)]): Unit = {
deviceNamesVar = value
if (value.isDefined) deviceNameVar = None
}
private[this] var inputBusChannelsVar = 8
/** (Realtime) The default number of input bus channels is `8` (scsynth default) */
def inputBusChannels: Int = inputBusChannelsVar
/** (Realtime) The default number of input bus channels is `8` (scsynth default) */
def inputBusChannels_=(value: Int): Unit = {
require (value >= 0)
inputBusChannelsVar = value
}
/** (Realtime) The default setting for hardware block size is `0` (meaning that
* scsynth uses the hardware's current block size; scsynth default)
*/
var hardwareBlockSize: Int = 0
/** (Realtime) The default setting for zero-conf is `false` (other than
* scsynth's default which is `true`)
*/
var zeroConf: Boolean = false
/** (Realtime) The maximum number of TCP clients is `64` (scsynth default) */
var maxLogins: Int = 64
/** (Realtime) The default TCP session password is `None` */
var sessionPassword: Option[String] = None
// ---- non-realtime only ----
var nrtCommandPath : String = ""
var nrtInputPath : Option[String] = None
var nrtOutputPath : String = ""
var nrtHeaderFormat : AudioFileType = AudioFileType.AIFF
var nrtSampleFormat : SampleFormat = SampleFormat.Float
/** Picks and assigns a random free port for the server. This implies that
* the server will be running on the local machine.
*
* As a result, this method will change this config builder's `port` value.
* The caller must ensure that the `host` and `transport` fields have been
* decided on before calling this method. Later changes of either of these
* will render the result invalid.
*
* This method will fail with runtime exception if the host is not local.
*/
def pickPort(): Unit = {
require(isLocal)
transport match {
case UDP =>
val tmp = new DatagramSocket()
port = tmp.getLocalPort
tmp.close()
case TCP =>
val tmp = new ServerSocket(0)
port = tmp.getLocalPort
tmp.close()
case Browser =>
port = 57120 // default for now -- we could look into BrowserDriver free keys
}
}
/** Checks if the currently set `host` is located on the local machine. */
def isLocal: Boolean = {
val hostAddr = InetAddress.getByName(host)
hostAddr.isLoopbackAddress || hostAddr.isSiteLocalAddress || hostAddr.isAnyLocalAddress
}
def build: Config = {
val minAudioBuses = inputBusChannels + outputBusChannels + 1
val audioBusesAdjust = if (audioBusChannels >= minAudioBuses) audioBusChannels else {
ri.nextPowerOfTwo(minAudioBuses)
}
new Config(
program = program,
controlBusChannels = controlBusChannels,
audioBusChannels = audioBusesAdjust, /*audioBusChannels,*/
outputBusChannels = outputBusChannels,
blockSize = blockSize,
sampleRate = sampleRate,
audioBuffers = audioBuffers,
maxNodes = maxNodes,
maxSynthDefs = maxSynthDefs,
memorySize = memorySize,
wireBuffers = wireBuffers,
randomSeeds = randomSeeds,
loadSynthDefs = loadSynthDefs,
machPortName = machPortName,
verbosity = verbosity,
plugInsPaths = plugInsPaths,
restrictedPath = restrictedPath,
/* memoryLocking, */
host = host,
port = port,
transport = transport,
inputStreamsEnabled = inputStreamsEnabled,
outputStreamsEnabled = outputStreamsEnabled,
deviceNames = deviceNames,
deviceName = deviceName,
inputBusChannels = inputBusChannels,
hardwareBlockSize = hardwareBlockSize,
zeroConf = zeroConf,
maxLogins = maxLogins,
sessionPassword = sessionPassword,
nrtCommandPath = nrtCommandPath,
nrtInputPath = nrtInputPath,
nrtOutputPath = nrtOutputPath,
nrtHeaderFormat = nrtHeaderFormat,
nrtSampleFormat = nrtSampleFormat
)
}
def read(config: Config): Unit = {
program = config.program
controlBusChannels = config.controlBusChannels
audioBusChannels = config.audioBusChannels
outputBusChannels = config.outputBusChannels
blockSize = config.blockSize
sampleRate = config.sampleRate
audioBuffers = config.audioBuffers
maxNodes = config.maxNodes
maxSynthDefs = config.maxSynthDefs
memorySize = config.memorySize
wireBuffers = config.wireBuffers
randomSeeds = config.randomSeeds
loadSynthDefs = config.loadSynthDefs
machPortName = config.machPortName
verbosity = config.verbosity
plugInsPaths = config.plugInsPaths
restrictedPath = config.restrictedPath
host = config.host
port = config.port
transport = config.transport
inputStreamsEnabled = config.inputStreamsEnabled
outputStreamsEnabled= config.outputStreamsEnabled
deviceNames = config.deviceNames
deviceName = config.deviceName
inputBusChannels = config.inputBusChannels
hardwareBlockSize = config.hardwareBlockSize
zeroConf = config.zeroConf
maxLogins = config.maxLogins
sessionPassword = config.sessionPassword
nrtCommandPath = config.nrtCommandPath
nrtInputPath = config.nrtInputPath
nrtOutputPath = config.nrtOutputPath
nrtHeaderFormat = config.nrtHeaderFormat
nrtSampleFormat = config.nrtSampleFormat
}
}
def boot: ServerConnection = boot()()
def boot(name: String = "localhost", config: Config = Config().build,
clientConfig: Client.Config = Client.Config().build)
(listener: ServerConnection.Listener = PartialFunction.empty): ServerConnection = {
val sc = initBoot(name, config, clientConfig)
if (!(listener eq PartialFunction.empty)) sc.addListener(listener)
sc.start()
sc
}
private def initBoot(name: String = "localhost", config: Config,
clientConfig: Client.Config = Client.Config().build) = {
val (addr, c) = prepareConnection(config, clientConfig)
new impl.Booting(name, c, addr, config, clientConfig, true)
}
def connect: ServerConnection = connect()()
def connect(name: String = "localhost", config: Config = Config().build,
clientConfig: Client.Config = Client.Config().build)
(listener: ServerConnection.Listener = PartialFunction.empty): ServerConnection = {
val (addr, c) = prepareConnection(config, clientConfig)
val sc = new impl.Connection(name, c, addr, config, clientConfig, true)
if (!(listener eq PartialFunction.empty)) sc.addListener(listener)
sc.start()
sc
}
def run(code: Server => Unit): Unit = run()(code)
/** Utility method to test code quickly with a running server. This boots a
* server and executes the passed in code when the server is up. A shutdown
* hook is registered to make sure the server is destroyed when the VM exits.
*/
def run(config: Config = Config().build)(code: Server => Unit): Unit = {
// val b = boot( config = config )
val sync = new AnyRef
var s: Server = null
val sc = initBoot(config = config)
val li: ServerConnection.Listener = {
case ServerConnection.Running(srv) => sync.synchronized {
s = srv
}; code(srv)
}
sc.addListener(li)
Runtime.getRuntime.addShutdownHook(new Thread {
override def run(): Unit =
sync.synchronized {
if (s != null) {
if (s.condition != Server.Offline) s.quit()
} else sc.abort()
}
})
sc.start()
}
/** Creates an unconnected server proxy. This may be useful for creating NRT command files.
* Any attempt to try to send messages to the server will fail.
*/
def dummy(name: String = "dummy", config: Config = Config().build,
clientConfig: Client.Config = Client.Config().build): Server = {
val sr = config.sampleRate
val status = message.StatusReply(numUGens = 0, numSynths = 0, numGroups = 0, numDefs = 0,
avgCPU = 0f, peakCPU = 0f, sampleRate = sr, actualSampleRate = sr)
new impl.OfflineServerImpl(name, /* c, addr, */ config, clientConfig, status)
}
def allocPort(transport: osc.Transport): Int = {
transport match {
case TCP =>
val ss = new ServerSocket(0)
try {
ss.getLocalPort
} finally {
ss.close()
}
case UDP =>
val ds = new DatagramSocket()
try {
ds.getLocalPort
} finally {
ds.close()
}
case other => sys.error(s"Unsupported transport : ${other.name}")
}
}
def printError(name: String, t: Throwable): Unit = {
println(s"$name :")
t.printStackTrace()
}
implicit def defaultGroup(s: Server): Group = s.defaultGroup
type Listener = Model.Listener[Update]
sealed trait Update
sealed trait Condition extends Update
case object Running extends Condition
case object Offline extends Condition
private[synth] case object NoPending extends Condition
final case class Counts(c: message.StatusReply) extends Update
/** Starts an NRT rendering process based on the NRT parameters of the configuration argument.
*
* '''Note:''' The returned process must be explicitly started by calling `start()`
*
* @param dur the duration of the bounce, used to emit process updates
* @param config the server configuration in which `nrtCommandPath` must be set
*
* @return the process whose return value is the process exit code of scsynth (0 indicating success)
*/
def renderNRT(dur: Double, config: Server.Config): Processor[Int] with Processor.Prepared =
new impl.NRTImpl(dur, config)
def version: Try[(String, String)] = version()
def version(config: Config = Config().build): Try[(String, String)] = Try {
import scala.sys.process._
val output = Seq(config.program, "-v").!!
val i = output.indexOf(' ') + 1
val j0 = output.indexOf(' ', i)
val j1 = if (j0 > i) j0 else output.indexOf('\n', i)
val j = if (j1 > i) j1 else output.length
val k = output.indexOf('(', j) + 1
val m = output.indexOf(')', k)
val version = output.substring(i, j)
val build = if (m > k) output.substring(k, m) else ""
(version, build)
}
}
sealed trait ServerLike {
def name : String
def config: Server.Config
def addr : Server.Address // InetSocketAddress
}
object ServerConnection {
type Listener = Model.Listener[Condition]
sealed abstract class Condition
case class Preparing(server: Server) extends Condition
case class Running (server: Server) extends Condition
case object Aborted extends Condition
}
trait ServerConnection extends ServerLike with Model[ServerConnection.Condition] {
def abort(): Unit
}
/** The client-side representation of the SuperCollider server.
*
* Additional operations are available by importing `Ops._`.
*/
trait Server extends ServerLike with Model[Server.Update] {
server =>
import de.sciss.synth.Server._
val clientConfig : Client.Config
def rootNode : Group
def defaultGroup : Group
def nodeManager : NodeManager
def bufManager : BufferManager
def isLocal : Boolean
def isConnected : Boolean
def isRunning : Boolean
def isOffline : Boolean
def nextNodeId(): Int
def nextSyncId(): Int
def allocControlBus(numChannels: Int): Int
def allocAudioBus (numChannels: Int): Int
def freeControlBus(index: Int): Unit
def freeAudioBus (index: Int): Unit
def allocBuffer(numChannels: Int): Int
def freeBuffer (index : Int): Unit
/** Sends out an OSC packet without waiting for any replies. */
def ! (p: osc.Packet): Unit
/** Sends out an OSC packet that generates some kind of reply, and
* returns immediately. It registers a handler to parse that reply.
* The handler is tested for each incoming OSC message (using its
* `isDefinedAt` method) and invoked and removed in case of a
* match, completing the returned future.
*
* If the handler does not match in the given timeout period,
* the future fails with a `Timeout` exception, and the handler is removed.
*
* @param packet the packet to send out
* @param timeout the timeout duration
* @param handler the handler to match against incoming messages
* @return a future of the successfully completed handler or timeout exception
*
* @see [[de.sciss.synth.message.Timeout]]
*/
def !! [A](packet: osc.Packet, timeout: Duration = 6.seconds)(handler: PartialFunction[osc.Message, A]): Future[A]
/** The last reported server data, such as number of synths and groups, sample rate. */
def counts: message.StatusReply
/** Shortcut to `counts.sampleRate`. */
def sampleRate: Double
def condition: Condition
/** Starts a repeatedly running status watcher that updates the `condition` and `counts`
* information.
*/
def startAliveThread(delay: Float = 0.25f, period: Float = 0.25f, deathBounces: Int = 25): Unit
def stopAliveThread(): Unit
/** Shortcut to `this ! message.Status`. If the 'alive thread' is running,
* it will take care of querying the counts frequently.
*/
def queryCounts(): Unit
/** Allocates a new unique synchronization identifier,
* and returns the corresponding `/sync` message.
*/
def syncMsg(): message.Sync
def dumpOSC (mode: osc.Dump = osc.Dump.Text, filter: osc.Packet => Boolean = _ => true): Unit
def dumpInOSC (mode: osc.Dump = osc.Dump.Text, filter: osc.Packet => Boolean = _ => true): Unit
def dumpOutOSC(mode: osc.Dump = osc.Dump.Text, filter: osc.Packet => Boolean = _ => true): Unit
/** Sends a `quitMsg` and then invokes `dispose()`. */
def quit(): Unit
def quitMsg: message.ServerQuit.type
/** Disconnects the client, and frees any resources on the client-side. */
def dispose(): Unit
private[synth] def addResponder (resp: message.Responder): Unit
private[synth] def removeResponder(resp: message.Responder): Unit
override def toString = s"<$name>"
} | Sciss/ScalaCollider | shared/src/main/scala/de/sciss/synth/Server.scala | Scala | lgpl-2.1 | 37,624 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.bib.parser
import scala.languageFeature.postfixOps
private[parser] object AST {
// these types all have very generic names, so wrap them in an "AST" prefix
sealed trait Entry
sealed trait Value
final case class Document(entries: List[Entry])
final case class StringEntry(abbrev: String, value: Value) extends Entry
final case class PreambleEntry(content: Value) extends Entry
final case class CommentEntry(comment: String) extends Entry
final case class RegularEntry(
ty: String, citationKey: String, tags: List[(String, Value)]) extends Entry
final case class Literal(content: String) extends Value
final case class Abbrev(name: String) extends Value
final case class Concat(left: Value, right: Value) extends Value
}
private[parser] object DocumentParser {
import AST._
// this should return an Either or a custom error object instead of a useless "None"
def parseString(input: String): Either[String, Document] = {
val res = Impl.parseAll(Impl.bibTex, input)
res.map(r => Right(Document(r))).getOrElse(Left(res.toString))
}
object Impl extends SharedParsers {
lazy val bibTex =
((freeComment | WS2) ~> anyEntry <~ (freeComment | WS2)).+ ^^
(_ flatMap { case x => List(x): List[Entry] })
// FIXME: lines starting with %%% are comments
lazy val freeComment = "[^@]*".? ^^ (s => CommentEntry(s.getOrElse("")))
lazy val WS2 = WS ^^ (CommentEntry(_))
lazy val anyEntry = AT ~> (commentEntry | stringEntry | preambleEntry | regularEntry)
lazy val commentEntry =
COMMENT ~> (WS ~> (('{' ~> "[^}]*" <~ '}') | ('(' ~> "[^)]*" <~ ')')) | "[^@\r\n]*") ^^
(CommentEntry(_))
lazy val stringEntry = STRING ~> WS ~> entryBody { tag } ^^ (StringEntry(_, _)).tupled
lazy val preambleEntry = PREAMBLE ~> WS ~> entryBody { value } ^^ (PreambleEntry(_))
lazy val regularEntry =
(SYMBOL <~ WS) ~ entryBody { SYMBOL_CAN_START_WITH_NUMBER ~ rep((COMMA_WS | WS) ~> tag) <~ COMMA_WS.? } ^^ {
case ty ~ (key ~ tags) => RegularEntry(ty, key, tags)
}
def entryBody[T](parser: => Parser[T]): Parser[T] = {
lazy val p = parser
("\\{\\s*" ~> p <~ "\\s*\\}") |
("\\(\\s*" ~> p <~ "\\s*\\)")
}
lazy val tag = (SYMBOL <~ "\\s*=\\s*") ~ value ^^ {
case sym ~ v => (sym, v)
}
lazy val value: Parser[Value] = literalOrSymbol ~ ("\\s*#\\s*" ~> value).? ^^ {
case left ~ Some(right) => Concat(left, right)
case left ~ _ => left
}
lazy val literalOrSymbol = (SYMBOL ^^ (Abbrev(_))) | literal
lazy val literal = numericLiteral | braceDelimitedNoOuterLiteral | quoteDelimitedLiteral
lazy val numericLiteral = "\\d+(\\.\\d+)?" ^^ (Literal(_))
lazy val quoteDelimitedLiteral =
'"' ~> (BRACE_DELIMITED_STRING | """\\.""" | """[^"]""").* <~ '"' ^^ (xs => Literal(xs.mkString))
lazy val braceDelimitedNoOuterLiteral = BRACE_DELIMITED_STRING_NO_OUTER ^^ (Literal(_))
lazy val AT = c('@')
lazy val COMMA_WS = r("\\s*,\\s*")
lazy val COMMENT = r("(c|C)(o|O)(m|M)(m|M)(e|E)(n|N)(t|T)")
lazy val STRING = r("(s|S)(t|T)(r|R)(i|I)(n|N)(g|G)")
lazy val PREAMBLE = r("(p|P)(r|R)(e|E)(a|A)(m|M)(b|B)(l|L)(e|E)")
// anything except can't start with number, quotes, braces/parens, '#', commas, whitespace, or '='
lazy val SYMBOL = r("[^0-9\"}{)(,\\s#=][^\"}{)(,\\s#=]*")
// can start with (or even be entirely) a number
lazy val SYMBOL_CAN_START_WITH_NUMBER = r("[^\"}{)(,\\s#=][^\"}{)(,\\s#=]*")
}
}
| melisabok/factorie | src/main/scala/cc/factorie/app/bib/parser/DocumentParser.scala | Scala | apache-2.0 | 5,142 |
package com.github.andr83.parsek
import com.github.andr83.parsek.meta.RequiredFieldError
import com.github.andr83.parsek.pipe.Pipe
import com.typesafe.config._
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.util.control.NonFatal
/**
* @author andr83
*/
@SerialVersionUID(1L)
class Pipeline(pipes: Pipe*) extends Serializable with LazyLogging {
import PipeContext._
def run(value: PValue)(implicit context: PipeContext): List[PValue] = {
try {
val res = nextPipe(pipes, value)
// context.getCounter(InfoGroup, "OUTPUT_ROWS") += res.length
res
} catch {
case NonFatal(ex) =>
val e = ex match {
case error: RequiredFieldError => error.cause
case _ => ex
}
logger.error(e.toString, e)
if (context.path.isEmpty) {
context.getCounter(ErrorGroup, e.getClass.getSimpleName) += 1
} else {
context.getCounter(ErrorGroup, (e.getClass.getSimpleName, context.path.mkString(".")).toString()) += 1
}
List.empty[PValue]
} finally {
// context.getCounter(InfoGroup, "INPUT_ROWS") += 1
}
}
private def nextPipe(pipeline: Seq[Pipe], value: PValue)(implicit context: PipeContext): List[PValue] = if (pipeline.nonEmpty) {
context.path = Seq.empty[String]
context.row = value match {
case map: PMap => map
case _ => PMap.empty
}
val pipe = pipeline.head
pipe.run(value) map {
case PList(list) => list flatMap(nextPipe(pipeline.tail, _))
case pipeResult => nextPipe(pipeline.tail, pipeResult) //to-do fix iterator reset!!!
} getOrElse List.empty[PValue]
} else value match {
case PList(list) => list
case _ => List(value)
}
}
object Pipeline {
def apply(pipes: Seq[Config]): Pipeline = new Pipeline(pipes.map(Pipe.apply):_*)
}
| andr83/parsek | core/src/main/scala/com/github/andr83/parsek/Pipeline.scala | Scala | mit | 1,839 |
package io.neons.collector.application.guice.infrastructure.log.builder
import com.google.inject.AbstractModule
import io.neons.collector.infrastructure.log.builder.AkkaHttpLogBuilder
import io.neons.collector.model.log.LogBuilder
import net.codingwell.scalaguice.ScalaModule
class LogBuilderModule extends AbstractModule with ScalaModule {
override def configure(): Unit = {
bind[LogBuilder].to[AkkaHttpLogBuilder].asEagerSingleton()
}
}
| NeonsIo/collector | src/main/scala/io/neons/collector/application/guice/infrastructure/log/builder/LogBuilderModule.scala | Scala | mit | 449 |
/*******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.nodes_util
import kr.ac.kaist.jsaf.nodes._
import kr.ac.kaist.jsaf.nodes_util.{NodeUtil => NU}
import kr.ac.kaist.jsaf.scala_src.nodes._
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
import kr.ac.kaist.jsaf.scala_src.useful.Options._
import kr.ac.kaist.jsaf.useful.Useful
import edu.rice.cs.plt.tuple.{Option => JOption}
import _root_.java.lang.{Double => JDouble}
import _root_.java.lang.{Integer => JInt}
import _root_.java.util.{List => JList}
import _root_.java.io.BufferedReader
import _root_.java.io.BufferedWriter
import _root_.java.io.File
import _root_.java.math.BigInteger
import _root_.java.math.BigDecimal
import _root_.java.util.ArrayList
import _root_.java.util.Arrays
import _root_.java.util.Collections
import _root_.java.util.Set
import _root_.java.util.StringTokenizer
import scala.collection.mutable.{HashMap => MHashMap}
object NodeFactory {
// Maps the unique ids for IR nodes to their corresponding AST nodes
private var ir2astMap = new MHashMap[Long, ASTNode] // IRNode.uid -> ASTNode
private var irinfo2irMap = new MHashMap[Long, IRNode] // IRInfoNode.uid -> IRNode
def initIr2ast: Unit = {ir2astMap = new MHashMap; irinfo2irMap = new MHashMap}
def ir2ast(ir: IRNode): Option[ASTNode] = ir2astMap.get(ir.asInstanceOf[UIDObject].getUID)
def irinfo2ir(info: IRInfoNode): Option[IRNode] = irinfo2irMap.get(info.getUID)
def putIr2ast[A <: IRNode](ir: A, ast: ASTNode): A = {
ir2astMap.put(ir.asInstanceOf[UIDObject].getUID, ast)
ir match {
case ir: IRAbstractNode => irinfo2irMap.put(ir.getInfo.getUID, ir)
case ir: IRExpr => irinfo2irMap.put(ir.getInfo.getUID, ir)
case ir: IRInfoNode => irinfo2irMap.put(ir.getUID, ir)
case _ =>
}
ir
}
// For use only when there is no hope of attaching a true span.
def makeSpan(villain: String): Span = {
val sl = new SourceLocRats(villain,0,0,0)
new Span(sl,sl)
}
def makeSpan(node: ASTNode): Span = NU.getSpan(node)
def makeSpan(start: Span, finish: Span): Span =
new Span(start.getBegin, finish.getEnd)
def makeSpan(file: String, line: Int, startC: Int, endC: Int): Span =
new Span(new SourceLocRats(file, line, startC, 0),
new SourceLocRats(file, line, endC, 0))
def makeSpan(start: ASTNode, finish: ASTNode): Span =
makeSpan(NU.getSpan(start), NU.getSpan(finish))
def makeSpan(start: ASTNode, l: JList[ASTNode]): Span = {
val s = l.size
if (s==0) makeSpan(start, start) else makeSpan(start, l.get(s-1))
}
def makeSpan(l: JList[ASTNode], finish: ASTNode): Span = {
val s = l.size
if (s==0) makeSpan(finish, finish) else makeSpan(l.get(0), finish)
}
def makeSpan(ifEmpty: String, l: JList[ASTNode]): Span = {
val s = l.size
if (s==0) makeSpan(ifEmpty) else makeSpan(l.get(0), l.get(s-1))
}
/**
* In some situations, a begin-to-end span is not really right, and something
* more like a set of spans ought to be used. Even though this is not yet
* implemented, the name is provided to allow expression of intent.
*/
def makeSetSpan(start: ASTNode, l: JList[ASTNode]): Span = makeSpan(start, l)
/**
* In some situations, a begin-to-end span is not really right, and something
* more like a set of spans ought to be used. Even though this is not yet
* implemented, the name is provided to allow expression of intent.
*/
def makeSetSpan(a: ASTNode, b: ASTNode): Span = makeSpan(a,b)
/**
* In some situations, a begin-to-end span is not really right, and something
* more like a set of spans ought to be used. Even though this is not yet
* implemented, the name is provided to allow expression of intent.
*
*/
def makeSetSpan(ifEmpty: String, l: JList[ASTNode]): Span = makeSpan(ifEmpty, l)
private var comment = none[Comment]
def initComment = { comment = none[Comment] }
def commentLog(span: Span, message: String) =
if (NU.getKeepComments) {
if (!comment.isDefined ||
(!comment.get.getComment.startsWith("/*") && !comment.get.getComment.startsWith("//")))
comment = some[Comment](makeComment(span, message))
else {
val com = comment.get
if (!com.getComment.equals(message))
comment = some[Comment](makeComment(NU.spanAll(com.getInfo.getSpan, span),
com.getComment+"\\n"+message))
}
}
def makeSpanInfoComment(span: Span): ASTSpanInfo =
if (NU.getKeepComments && comment.isDefined) {
val result = new ASTSpanInfo(span, comment)
comment = none[Comment]
result
} else new ASTSpanInfo(span, none[Comment])
def makeSpanInfo(span: Span, comment: String): ASTSpanInfo =
new ASTSpanInfo(span, some[Comment](makeComment(span, comment)))
def makeSpanInfo(span: Span): ASTSpanInfo =
new ASTSpanInfo(span, none[Comment])
def makeOnlySpanInfo(span: Span): SpanInfo = new SpanInfo(span)
def makeTopLevel(info: ASTSpanInfo, body: JList[SourceElement], strict: Boolean): TopLevel =
makeTopLevel(info, toJavaList(Nil), toJavaList(Nil), List(new SourceElements(info, body, strict)))
def makeTopLevel(info: ASTSpanInfo, body: List[SourceElements]): TopLevel =
makeTopLevel(info, toJavaList(Nil), toJavaList(Nil), body)
def makeTopLevel(info: ASTSpanInfo, fds: JList[FunDecl], vds: JList[VarDecl],
body: List[SourceElements]): TopLevel =
new TopLevel(fds, vds, body)
def makeProgram(span: Span, elements: JList[SourceElement], strict: Boolean): Program = {
val info = makeSpanInfoComment(span)
makeProgram(info, makeTopLevel(info, elements, strict))
}
def makeProgram(info: ASTSpanInfo, body: List[SourceElement], strict: Boolean): Program =
makeProgram(info, makeTopLevel(info, toJavaList(body), strict))
def makeProgram(info: ASTSpanInfo, toplevel: TopLevel): Program =
new Program(info, toplevel)
def makeModDecl(span: Span, name: Id, body: JList[SourceElement], strict: Boolean) = {
val info = makeSpanInfoComment(span)
new ModDecl(info, name, makeTopLevel(info, body, strict))
}
def makeModExpVarStmt(span: Span, vds: JList[VarDecl]) =
new ModExpVarStmt(makeSpanInfoComment(span), vds)
def makeModExpFunDecl(span: Span, fd: FunDecl) =
new ModExpFunDecl(makeSpanInfoComment(span), fd)
def makeModExpGetter(span: Span, name: Id, body: JList[SourceElement], strict: Boolean) =
new ModExpGetter(makeSpanInfoComment(span),
makeGetProp(span, makePropId(span, name), body, strict))
def makeModExpSetter(span: Span, name: Id, param: Id, body: JList[SourceElement], strict: Boolean) =
new ModExpSetter(makeSpanInfoComment(span),
makeSetProp(span, makePropId(span, name), param, body, strict))
def makeModExpSpecifiers(span: Span, names: JList[ModExpSpecifier]) =
new ModExpSpecifiers(makeSpanInfoComment(span), names)
def makeExportName(span: Span, name: Id): ModExpSpecifier =
new ModExpName(makeSpanInfoComment(span), makePath(name))
def makeExportName(span: Span, name: Id, path: Path): ModExpSpecifier =
new ModExpName(makeSpanInfoComment(span), makePath(name, path))
def makeStarFromPath(span: Span, path: Path): ModExpSpecifier =
new ModExpStarFromPath(makeSpanInfoComment(span), path)
def makeStar(span: Span): ModExpSpecifier =
new ModExpStar(makeSpanInfoComment(span))
def makeExportAlias(span: Span, name: Id, alias: Path): ModExpSpecifier =
new ModExpAlias(makeSpanInfoComment(span), name, alias)
def makeModImpDecl(span: Span, imports: JList[ModImport]) =
new ModImpDecl(makeSpanInfoComment(span), imports)
def makeModImpSpecifierSet(span: Span, imports: JList[ModImpSpecifier], module: Path): ModImport =
new ModImpSpecifierSet(makeSpanInfoComment(span), imports, module)
def makeModImpAlias(span: Span, name: Path, alias: Id): ModImport =
new ModImpAliasClause(makeSpanInfoComment(span), name, alias)
def makeImportAlias(span: Span, name: Id, alias: Id): ModImpSpecifier =
new ModImpAlias(makeSpanInfoComment(span), name, alias)
def makeImportName(span: Span, name: Id): ModImpSpecifier =
new ModImpName(makeSpanInfoComment(span), name)
def makeFunctional(info: ASTSpanInfo, name: Id, fds: JList[FunDecl], vds: JList[VarDecl],
body: JList[SourceElement], params: JList[Id], strict: Boolean) =
new Functional(fds, vds, new SourceElements(info, body, strict), name, params)
def makeFunDecl(span: Span, name: Id, params: JList[Id],
body: JList[SourceElement], strict: Boolean) = {
val info = makeSpanInfoComment(span)
new FunDecl(info,
makeFunctional(info, name, toJavaList(Nil), toJavaList(Nil), body, params, strict))
}
def makeFunExpr(span: Span, name: Id, params: JList[Id],
body: JList[SourceElement], strict: Boolean) = {
val info = makeSpanInfoComment(span)
new FunExpr(info,
makeFunctional(info, name, toJavaList(Nil), toJavaList(Nil), body, params, strict))
}
def makeBlock(span: Span, stmts: JList[Stmt]) =
new Block(makeSpanInfoComment(span), stmts)
def makeVarStmt(span: Span, vds: JList[VarDecl]) =
new VarStmt(makeSpanInfoComment(span), vds)
def makeEmptyStmt(span: Span) =
new EmptyStmt(makeSpanInfoComment(span))
def makeExprStmt(span: Span, expr: Expr) =
new ExprStmt(makeSpanInfoComment(span), expr)
def makeIf(span: Span, cond: Expr, trueB: Stmt, falseB: JOption[Stmt]) =
new If(makeSpanInfoComment(span), cond, trueB, falseB)
def makeDoWhile(span: Span, body: Stmt, cond: Expr) =
new DoWhile(makeSpanInfoComment(span), body, cond)
def makeWhile(span: Span, cond: Expr, body: Stmt) =
new While(makeSpanInfoComment(span), cond, body)
def makeFor(span: Span, init: JOption[Expr], cond: JOption[Expr],
action: JOption[Expr], body: Stmt) =
new For(makeSpanInfoComment(span), init, cond, action, body)
def makeForVar(span: Span, vars: JList[VarDecl], cond: JOption[Expr],
action: JOption[Expr], body: Stmt) =
new ForVar(makeSpanInfoComment(span), vars, cond, action, body)
def makeForIn(span: Span, lhs: LHS, expr: Expr, body: Stmt) =
new ForIn(makeSpanInfoComment(span), lhs, expr, body)
def makeForVarIn(span: Span, vd: VarDecl, expr: Expr, body: Stmt) =
new ForVarIn(makeSpanInfoComment(span), vd, expr, body)
def makeContinue(span: Span, target: JOption[Label]) =
new Continue(makeSpanInfoComment(span), target)
def makeBreak(span: Span, target: JOption[Label]) =
new Break(makeSpanInfoComment(span), target)
def makeReturn(span: Span, expr: JOption[Expr]) =
new Return(makeSpanInfoComment(span), expr)
def makeWith(span: Span, expr: Expr, stmt: Stmt) =
new With(makeSpanInfoComment(span), expr, stmt)
def makeSwitch(span: Span, expr: Expr, front: JList[Case]): Switch =
makeSwitch(span, expr, front, none[JList[Stmt]], toJavaList(Nil))
def makeSwitch(span: Span, expr: Expr, front: JList[Case],
defaultC: JOption[JList[Stmt]], back: JList[Case]): Switch =
new Switch(makeSpanInfoComment(span), expr, front, defaultC, back)
def makeLabelStmt(span: Span, label: Label, stmt: Stmt) =
new LabelStmt(makeSpanInfoComment(span), label, stmt)
def makeThrow(span: Span, expr: Expr) =
new Throw(makeSpanInfoComment(span), expr)
def makeTry(span: Span, body: JList[Stmt], catchB: Catch): Try =
makeTry(span, body, some(catchB), none[JList[Stmt]])
def makeTry(span: Span, body: JList[Stmt], fin: JList[Stmt]): Try =
makeTry(span, body, none[Catch], some(fin))
def makeTry(span: Span, body: JList[Stmt], catchB: Catch, fin: JList[Stmt]): Try =
makeTry(span, body, some(catchB), some(fin))
def makeTry(span: Span, body: JList[Stmt], catchB: JOption[Catch], fin: JOption[JList[Stmt]]): Try =
new Try(makeSpanInfoComment(span), body, catchB, fin)
def makeDebugger(span: Span) =
new Debugger(makeSpanInfoComment(span))
def makeVarDecl(span: Span, name: Id, expr: JOption[Expr]) =
new VarDecl(makeSpanInfoComment(span), name, expr)
def makeCase(span: Span, cond: Expr, body: JList[Stmt]) =
new Case(makeSpanInfoComment(span), cond, body)
def makeCatch(span: Span, id: Id, body: JList[Stmt]) =
new Catch(makeSpanInfoComment(span), id, body)
def makeExprList(span: Span, es: JList[Expr]) =
new ExprList(makeSpanInfoComment(span), es)
def makeCond(span: Span, cond: Expr, trueB: Expr, falseB: Expr) =
new Cond(makeSpanInfoComment(span), cond, trueB, falseB)
def makeInfixOpApp(span: Span, left: Expr, op: Op, right: Expr) =
new InfixOpApp(makeSpanInfoComment(span), left, op, right)
def makePrefixOpApp(span: Span, op: Op, right: Expr) =
new PrefixOpApp(makeSpanInfoComment(span), op, right)
def makeUnaryAssignOpApp(span: Span, lhs: LHS, op: Op) =
new UnaryAssignOpApp(makeSpanInfoComment(span), lhs, op)
def makeAssignOpApp(span: Span, lhs: LHS, op: Op, right: Expr) =
new AssignOpApp(makeSpanInfoComment(span), lhs, op, right)
def makeBracket(span: Span, lhs: LHS, index: Expr) =
new Bracket(makeSpanInfoComment(span), lhs, index)
def makeDot(span: Span, lhs: LHS, member: Id) =
new Dot(makeSpanInfoComment(span), lhs, member)
def makeNew(span: Span, lhs: LHS) =
new New(makeSpanInfoComment(span), lhs)
def makeFunApp(span: Span, lhs: LHS, args: JList[Expr]) =
new FunApp(makeSpanInfoComment(span), lhs, args)
def makeThis(span: Span) =
new This(makeSpanInfoComment(span))
def makeNull(span: Span) =
new Null(makeSpanInfoComment(span))
def makeBool(span: Span, bool: Boolean) =
new Bool(makeSpanInfoComment(span), bool)
def makeVarRef(span: Span, id: Id) =
new VarRef(makeSpanInfoComment(span), id)
def makeArrayNumberExpr(span: Span, elmts: JList[JDouble]) = {
if (elmts.size > 1000)
new ArrayNumberExpr(makeSpanInfoComment(span), elmts)
else
makeArrayExpr(span, toJavaList(toList(elmts).map(e => some(makeNumericLiteral(span, e.toString, e).asInstanceOf[Expr]))))
}
def makeArrayExpr(span: Span, elmts: JList[JOption[Expr]]) =
new ArrayExpr(makeSpanInfoComment(span), elmts)
def makeObjectExpr(span: Span, elmts: JList[Member]) =
new ObjectExpr(makeSpanInfoComment(span), elmts)
def makeParenthesized(span: Span, expr: Expr) =
new Parenthesized(makeSpanInfoComment(span), expr)
/*
* DecimalLiteral ::=
* DecimalIntegerLiteral . DecimalDigits? ExponentPart?
* | DecimalIntegerLiteral ExponentPart?
* | . DecimalDigits ExponentPart?
*
* DecimalIntegerLiteral ::=
* 0
* | NonZeroDigit DecimalDigits?
*
* DecimalDigit ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
*
* NonZeroDigit ::= 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
*
* ExponentPart ::= (e | E) (+ | -)? DecimalDigit+
*/
def makeNumericLiteral(writer: BufferedWriter, span: Span,
beforeDot: String, dot: String,
afterDot: String, exponent: String) = {
if ((beforeDot+dot).equals("") ||
((beforeDot+afterDot).equals("") && !dot.equals("")) ||
(!beforeDot.equals("") && dot.equals("") && !afterDot.equals("")))
NU.log(writer, span, "Syntax Error: expected a numeral but got "+
beforeDot+dot+afterDot+exponent)
if (!beforeDot.equals("") && !beforeDot.equals("0") && beforeDot.charAt(0) == '0')
NU.log(writer, span, "Syntax Error: a numeral begins with 0.")
if (dot.equals("")) {
if (exponent.equals("")) makeIntLiteral(span, new BigInteger(beforeDot))
else {
var exp = 0
val second = exponent.charAt(1)
if (Character.isDigit(second))
exp = JInt.parseInt(exponent.substring(1))
else if (second.equals('-'))
exp = -1 * JInt.parseInt(exponent.substring(2))
else exp = JInt.parseInt(exponent.substring(2))
if (exp < 0) {
var str = beforeDot+dot+afterDot+exponent
str = new BigDecimal(str).toString
makeDoubleLiteral(span, str, JDouble.valueOf(str))
} else makeIntLiteral(span, new BigInteger(beforeDot).multiply(BigInteger.TEN.pow(exp)))
}
} else {
val str = beforeDot+dot+afterDot+exponent
makeDoubleLiteral(span, str, JDouble.valueOf(str))
}
}
def makeNumericLiteral(writer: BufferedWriter, span: Span,
beforeDot: String) = {
if (beforeDot.equals(""))
NU.log(writer, span, "Syntax Error: expected a numeral but got "+
beforeDot)
if (!beforeDot.equals("") && !beforeDot.equals("0") && beforeDot.charAt(0) == '0')
NU.log(writer, span, "Syntax Error: a numeral begins with 0.")
JDouble.valueOf(beforeDot)
}
def makeNumericLiteral(span: Span, str: String, doubleVal: Double) =
if (str.endsWith(".0"))
new IntLiteral(makeSpanInfoComment(span), new BigInteger(str.substring(0, str.length-2), 10), 10)
else new DoubleLiteral(makeSpanInfoComment(span), str, doubleVal)
def makeIntLiteral(span: Span, intVal: BigInteger, radix: Int = 10) =
new IntLiteral(makeSpanInfoComment(span), intVal, radix)
def makeDoubleLiteral(span: Span, str: String, doubleVal: Double) =
new DoubleLiteral(makeSpanInfoComment(span), str, doubleVal)
def makeHexIntegerLiteral(span: Span, num: String) =
makeIntLiteral(span, new BigInteger(num, 16), 16)
def makeOctalIntegerLiteral(span: Span, num: String) =
makeIntLiteral(span, new BigInteger(num, 8), 8)
def makeStringLiteral(span: Span, str: String, quote: String) =
new StringLiteral(makeSpanInfoComment(span), quote, str)
def makeRegularExpression(span: Span, body: String, flags: String) =
new RegularExpression(makeSpanInfoComment(span), body, flags)
def makeField(span: Span, prop: Property, expr: Expr) =
new Field(makeSpanInfoComment(span), prop, expr)
def makeGetProp(span: Span, prop: Property, body: JList[SourceElement], strict: Boolean) = {
val info = makeSpanInfoComment(span)
new GetProp(info, prop,
makeFunctional(info, NU.prop2Id(prop), toJavaList(Nil), toJavaList(Nil), body,
toJavaList(Nil), strict))
}
def makeSetProp(span: Span, prop: Property, id: Id,
body: JList[SourceElement], strict: Boolean) = {
val info = makeSpanInfoComment(span)
new SetProp(info, prop,
makeFunctional(info, NU.prop2Id(prop), toJavaList(Nil), toJavaList(Nil), body,
toJavaList(List(id)), strict))
}
def makePropId(span: Span, id: Id) =
new PropId(makeSpanInfoComment(span), id)
def makePropStr(span: Span, str: String) =
new PropStr(makeSpanInfoComment(span), str)
def makePropNum(span: Span, num: NumberLiteral) =
new PropNum(makeSpanInfoComment(span), num)
def makeId(span: Span, name: String, uniq: String): Id =
makeId(span, name, some(uniq))
def makeId(span: Span, name: String): Id =
makeId(span, name, None)
def makeId(span: Span, name: String, uniq: Option[String]): Id =
new Id(makeSpanInfoComment(span), name, uniq, false)
def makeOp(span: Span, name: String) =
new Op(makeSpanInfo(span), name)
def makeLabel(span: Span, id: Id) =
new Label(makeSpanInfoComment(span), id)
def makeComment(span: Span, comment: String): Comment =
new Comment(makeSpanInfoComment(span), comment)
def makePath(id: Id): Path =
makePath(NU.getSpan(id), toJavaList(List(id)))
def makePath(id: Id, path: Path): Path =
makePath(makeSpan(id, path), toJavaList(toList(path.getNames):+id))
def makePath(p: Path, path: Path): Path =
makePath(makeSpan(p, path), toJavaList(toList(path.getNames)++toList(p.getNames)))
def makePath(span: Span, ids: JList[Id]): Path =
new Path(makeSpanInfoComment(span), ids)
def makeNoOp(span: Span, desc: String): NoOp =
makeNoOp(makeSpanInfoComment(span), desc)
def makeNoOp(info: ASTSpanInfo, desc: String): NoOp =
new NoOp(info, desc)
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/nodes_util/NodeFactory.scala | Scala | bsd-3-clause | 20,441 |
package smtlib
package theories
import FixedSizeBitVectors._
import parser.Parser
import org.scalatest.funsuite.AnyFunSuite
class FixedSizeBitVectorsTests extends AnyFunSuite {
override def suiteName = "Bit Vector theory test suite"
test("BitVector sort") {
BitVectorSort(32) match {
case BitVectorSort(n) if n == 14 => assert(false)
case BitVectorSort(n) if n == 32 => assert(true)
case _ => assert(false)
}
BitVectorSort(12) match {
case BitVectorSort(n) if n == 14 => assert(false)
case BitVectorSort(n) if n == 32 => assert(false)
case BitVectorSort(n) if n == 12 => assert(true)
case _ => assert(false)
}
}
test("literals") {
val l1 = BitVectorLit(List(true, true, false))
l1 match {
case BitVectorLit(List(true, false, true)) => assert(false)
case BitVectorLit(List(true, true, false, false)) => assert(false)
case BitVectorLit(List(true, true, false)) => assert(true)
case _ => assert(false)
}
}
test("smtlib bv constant notation") {
Parser.fromString("(_ bv13 32)").parseTerm match {
case BitVectorConstant(x, n) if x == 12 && n == 32 => assert(false)
case BitVectorConstant(x, n) if x == 13 && n == 31 => assert(false)
case BitVectorConstant(x, n) if x == 13 && n == 32 => assert(true)
case _ => assert(false)
}
Parser.fromString("(_ bv11 17)").parseTerm match {
case BitVectorConstant(x, n) if x == 12 && n == 17 => assert(false)
case BitVectorConstant(x, n) if x == 11 && n == 32 => assert(false)
case BitVectorConstant(x, n) if x == 11 && n == 17 => assert(true)
case _ => assert(false)
}
Parser.fromString("(_ bv10242 77)").parseTerm match {
case BitVectorConstant(x, n) if x == 10242 && n == 77 => assert(true)
case _ => assert(false)
}
Parser.fromString("(_ bv1234567891234 200)").parseTerm match {
case BitVectorConstant(x, n) if x == BigInt("1234567891234") && n == 200 => assert(true)
case _ => assert(false)
}
val cst = BitVectorConstant(13, 32)
cst match {
case BitVectorConstant(x, n) if x == 13 && n == 32 => assert(true)
case _ => assert(false)
}
}
test("smtlib bv constant with int overflow") {
val cst = BitVectorConstant(BigInt("2147483648"), 32)
cst match {
case BitVectorConstant(x, n) if x.toInt == -2147483648 && n == 32 => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv literals") {
Parser.fromString("#b101").parseTerm match {
case BitVectorLit(List(true, false, true)) => assert(true)
case _ => assert(false)
}
Parser.fromString("#xf0").parseTerm match {
case BitVectorLit(List(true, true, true, true, false, false, false, false)) => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv manipulation operations") {
Parser.fromString("(concat #b101 #b01)").parseTerm match {
case Concat(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ extract 1 2) #b101)").parseTerm match {
case Extract(x, y, BitVectorLit(List(true, false, true))) if x == 1 && y == 2 => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ repeat 5) #b101)").parseTerm match {
case Repeat(n, BitVectorLit(List(true, false, true))) if n == 5 => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ zero_extend 3) #b101)").parseTerm match {
case ZeroExtend(n, BitVectorLit(List(true, false, true))) if n == 3 => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ sign_extend 3) #b101)").parseTerm match {
case SignExtend(n, BitVectorLit(List(true, false, true))) if n == 3 => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ rotate_left 3) #b101)").parseTerm match {
case RotateLeft(n, BitVectorLit(List(true, false, true))) if n == 3 => assert(true)
case _ => assert(false)
}
Parser.fromString("((_ rotate_right 3) #b101)").parseTerm match {
case RotateRight(n, BitVectorLit(List(true, false, true))) if n == 3 => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv logical operations") {
Parser.fromString("(bvnot #b101)").parseTerm match {
case Not(
BitVectorLit(List(true, false, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvand #b101 #b011)").parseTerm match {
case And(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvor #b101 #b011)").parseTerm match {
case Or(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvnand #b101 #b011)").parseTerm match {
case NAnd(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvnor #b101 #b011)").parseTerm match {
case NOr(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvxor #b101 #b011)").parseTerm match {
case XOr(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvxnor #b101 #b011)").parseTerm match {
case XNOr(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvcomp #b101 #b011)").parseTerm match {
case Comp(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv arithmetic operations") {
Parser.fromString("(bvneg #b101)").parseTerm match {
case Neg(
BitVectorLit(List(true, false, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvadd #b101 #b011)").parseTerm match {
case Add(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsub #b101 #b011)").parseTerm match {
case Sub(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvmul #b101 #b011)").parseTerm match {
case Mul(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvudiv #b101 #b011)").parseTerm match {
case UDiv(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsdiv #b101 #b011)").parseTerm match {
case SDiv(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvurem #b101 #b011)").parseTerm match {
case URem(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsrem #b101 #b011)").parseTerm match {
case SRem(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsmod #b101 #b011)").parseTerm match {
case SMod(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv shifting operations") {
Parser.fromString("(bvshl #b101 #b011)").parseTerm match {
case ShiftLeft(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvlshr #b101 #b011)").parseTerm match {
case LShiftRight(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvashr #b101 #b011)").parseTerm match {
case AShiftRight(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
}
test("smtlib is correctly parsed with bv arithmetic comparisons operations") {
Parser.fromString("(bvult #b101 #b011)").parseTerm match {
case ULessThan(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvslt #b101 #b011)").parseTerm match {
case SLessThan(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvule #b101 #b011)").parseTerm match {
case ULessEquals(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsle #b101 #b011)").parseTerm match {
case SLessEquals(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvugt #b101 #b011)").parseTerm match {
case UGreaterThan(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsgt #b101 #b011)").parseTerm match {
case SGreaterThan(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvuge #b101 #b011)").parseTerm match {
case UGreaterEquals(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
Parser.fromString("(bvsge #b101 #b011)").parseTerm match {
case SGreaterEquals(
BitVectorLit(List(true, false, true)),
BitVectorLit(List(false, true, true))
) => assert(true)
case _ => assert(false)
}
}
}
| regb/scala-smtlib | src/test/scala/smtlib/theories/FixedSizeBitVectorsTests.scala | Scala | mit | 11,513 |
package edu.umass.ciir.kbbridge.tac
import xml.XML
import java.io.{StringReader, FileInputStream}
import java.util.zip.GZIPInputStream
import edu.umass.ciir.kbbridge.nlp.TextNormalizer
import org.ccil.cowan.tagsoup.jaxp.SAXFactoryImpl
import edu.umass.ciir.models.StopWordList
/**
*
*/
object TacDocumentXmlLoader {
val parser = XML.withSAXParser((new SAXFactoryImpl).newSAXParser())
def buildTermVector(text:String):Seq[String] = {
TextNormalizer.normalizeText(text).split(" ").filterNot(StopWordList.isStopWord(_))
}
/**
* Use a sax parser that only fetches content from <p> nodes.
*/
def contextViaSaxFromString(content:String):Seq[String] = {
val contentStream = new StringReader(content)
parser.synchronized{
val ns = parser.load(contentStream)
(ns \\\\ "p").flatMap { p =>
val pText = p.text.replaceAll("\\n", " ")
TextNormalizer.normalizeText(pText).split(" ")
}
}
}
/**
* Use a sax parser that only fetches content from <p> nodes.
*/
def contextViaSax(fileLoc:String):Seq[String] = {
val contentStream = new FileInputStream(fileLoc)
parser.synchronized{
val ns = parser.load(contentStream)
(ns \\\\ "p").flatMap { p =>
val pText = p.text.replaceAll("\\n", " ")
TextNormalizer.normalizeText(pText).split(" ")
}
}
}
def contextViaGzipSax(fileLoc:String):Seq[String] = {
val contentStream = new GZIPInputStream(new FileInputStream(fileLoc+".gz"))
parser.synchronized{
val ns = parser.load(contentStream)
(ns \\\\ "p").flatMap { p =>
val pText = p.text.replaceAll("\\n", " ")
TextNormalizer.normalizeText(pText).split(" ")
}
}
}
def fullTextViaSax(fileLoc:String):String = {
val contentStream = new FileInputStream(fileLoc)
parser.synchronized{
val ns = parser.load(contentStream)
(ns \\\\ "p").map({ p =>
val pText = p.text.replaceAll("\\n", " ")
pText
}).mkString("")
}
}
def fullTextViaGzipSax(fileLoc:String):String = {
val contentStream = new GZIPInputStream(new FileInputStream(fileLoc+".gz"))
parser.synchronized{
val ns = parser.load(contentStream)
(ns \\\\ "p").map({ p =>
val pText = p.text.replaceAll("\\n", " ")
pText
}).mkString("")
}
}
/**
* Use a picky sax parser
*/
def contextViaSax2(fileLoc:String):Seq[String] = {
val contentFile = io.Source.fromFile(fileLoc)
val text = scala.xml.parsing.XhtmlParser.apply(contentFile).text
TextNormalizer.normalizeText(text).split(" ")
}
def fullTextViaTacSource(fileLoc:String):String = {
var doc1:TacSourceDocument = null
val handler = new TacSourceHandler(){
def foundDoc(doc: TacSourceDocument) {
doc1 = doc
}
}
val re = new TacSourceReader(handler)
re.readSourceFile(fileLoc)
doc1.getFulltext
}
def contextViaTacSource(fileLoc:String):Seq[String] = {
val text = fullTextViaTacSource(fileLoc)
TextNormalizer.normalizeText(text).split(" ")
}
/**
* Load a plain text.
*/
def contextPlain(fileLoc:String):Seq[String] = {
val contentFile = io.Source.fromFile(fileLoc)
contentFile.getLines().flatMap(l => TextNormalizer.normalizeText(l).split(" ")).toSeq
}
/**
* Load a plain text.
*/
def contextPlainFromString(content:String):Seq[String] = {
TextNormalizer.normalizeText(content).split(" ")
}
} | daltonj/KbBridge | src/main/scala/edu/umass/ciir/kbbridge/tac/TacDocumentXmlLoader.scala | Scala | apache-2.0 | 3,465 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.api.dag.Transformation
import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, CorrelateCodeGenerator}
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableFunctionScan
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.{Correlate, JoinRelType}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rex.{RexNode, RexProgram}
/**
* Batch physical RelNode for [[Correlate]] (Java/Scala user defined table function).
*/
class BatchExecCorrelate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
scan: FlinkLogicalTableFunctionScan,
condition: Option[RexNode],
projectProgram: Option[RexProgram],
outputRowType: RelDataType,
joinType: JoinRelType)
extends BatchExecCorrelateBase(
cluster,
traitSet,
inputRel,
scan,
condition,
projectProgram,
outputRowType,
joinType) {
def copy(
traitSet: RelTraitSet,
child: RelNode,
projectProgram: Option[RexProgram],
outputType: RelDataType): RelNode = {
new BatchExecCorrelate(
cluster,
traitSet,
child,
scan,
condition,
projectProgram,
outputType,
joinType)
}
override protected def translateToPlanInternal(
planner: BatchPlanner): Transformation[RowData] = {
val config = planner.getTableConfig
val inputTransformation = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
val operatorCtx = CodeGeneratorContext(config)
CorrelateCodeGenerator.generateCorrelateTransformation(
config,
operatorCtx,
inputTransformation,
input.getRowType,
projectProgram,
scan,
condition,
outputRowType,
joinType,
inputTransformation.getParallelism,
retainHeader = false,
getExpressionString,
"BatchExecCorrelate",
getRelDetailedDescription)
}
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecCorrelate.scala | Scala | apache-2.0 | 3,021 |
package uk.gov.dvla.vehicles.presentation.common.controllers
import org.scalatest.mock.MockitoSugar
import play.api.mvc.{Request, Result}
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import uk.gov.dvla.vehicles.presentation.common.model.CacheKeyPrefix
object VehicleLookupFailureTesting extends MockitoSugar {
import play.api.mvc.Results.Ok
val presentTestResult = Ok("presentResult")
val missingPresentCookieDataTestResult = Ok("missingPresentCookieResult")
val submitTestResult = Ok("submitResult")
val missingSubmitCookieDataTestResult = Ok("missingSubmitCookieResult")
val vehicleLookupResponseCodeCacheKey = VehicleLookupFormModel.VehicleLookupResponseCodeCacheKey
}
case class VehicleLookupFormModel(referenceNumber: String,
registrationNumber: String) extends VehicleLookupFormModelBase
import play.api.libs.json.Json
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CacheKey
object VehicleLookupFormModel {
implicit val JsonFormat = Json.format[VehicleLookupFormModel]
final val VehicleLookupFormModelCacheKey = "test-vehicleLookupFormModel"
implicit val Key = CacheKey[VehicleLookupFormModel](VehicleLookupFormModelCacheKey)
final val VehicleLookupResponseCodeCacheKey = "test-vehicleLookupResponseCode"
}
class VehicleLookupFailureTesting(implicit clientSideSessionFactory: ClientSideSessionFactory,
prefix: CacheKeyPrefix) extends VehicleLookupFailureBase[VehicleLookupFormModel] {
import VehicleLookupFailureTesting._
protected override def presentResult(model: VehicleLookupFormModel)
(implicit request: Request[_]): Result =
presentTestResult
protected override def missingPresentCookieDataResult()(implicit request: Request[_]): Result =
missingPresentCookieDataTestResult
protected override def submitResult()(implicit request: Request[_]): Result =
submitTestResult
protected override def missingSubmitCookieDataResult()(implicit request: Request[_]): Result =
missingSubmitCookieDataTestResult
}
| dvla/vehicles-presentation-common | test/uk/gov/dvla/vehicles/presentation/common/controllers/VehicleLookupFailureTesting.scala | Scala | mit | 2,146 |
package io.buoyant.transformer
package k8s
import com.fasterxml.jackson.annotation.JsonIgnore
import com.twitter.finagle.Path
import io.buoyant.namer._
import java.net.InetAddress
class LocalNodeTransformerInitializer extends TransformerInitializer {
val configClass = classOf[LocalNodeTransformerConfig]
override val configId = "io.l5d.k8s.localnode"
}
case class LocalNodeTransformerConfig(hostNetwork: Option[Boolean])
extends TransformerConfig {
@JsonIgnore
val defaultPrefix = Path.read("/io.l5d.k8s.localnode")
@JsonIgnore
override def mk(): NameTreeTransformer = {
if (hostNetwork.getOrElse(false)) {
val nodeName = sys.env.getOrElse(
"NODE_NAME",
throw new IllegalArgumentException(
"NODE_NAME env variable must be set to the node's name"
)
)
new MetadataFiltertingNameTreeTransformer(prefix ++ Path.Utf8(nodeName), Metadata.nodeName, nodeName)
} else {
val ip = sys.env.getOrElse(
"POD_IP",
throw new IllegalArgumentException(
"POD_IP env variable must be set to the pod's IP"
)
)
val local = InetAddress.getByName(ip)
new SubnetLocalTransformer(prefix ++ Path.Utf8(ip), Seq(local), Netmask("255.255.255.0"))
}
}
}
| hhtpcd/linkerd | interpreter/k8s/src/main/scala/io/buoyant/transformer/k8s/LocalNodeTransformerInitializer.scala | Scala | apache-2.0 | 1,268 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spark
import org.apache.ignite._
import org.apache.ignite.configuration.{CacheConfiguration, IgniteConfiguration}
import org.apache.ignite.internal.IgnitionEx
import org.apache.ignite.internal.util.IgniteUtils
import org.apache.spark.sql.SQLContext
import org.apache.spark.{Logging, SparkContext}
/**
* Ignite context.
*
* @param sparkContext Spark context.
* @param cfgF Configuration factory.
* @tparam K Key type.
* @tparam V Value type.
*/
class IgniteContext[K, V](
@transient val sparkContext: SparkContext,
cfgF: () ⇒ IgniteConfiguration,
standalone: Boolean = true
) extends Serializable with Logging {
private val cfgClo = new Once(cfgF)
private val igniteHome = IgniteUtils.getIgniteHome
if (!standalone) {
// Get required number of executors with default equals to number of available executors.
val workers = sparkContext.getConf.getInt("spark.executor.instances",
sparkContext.getExecutorStorageStatus.length)
if (workers <= 0)
throw new IllegalStateException("No Spark executors found to start Ignite nodes.")
logInfo("Will start Ignite nodes on " + workers + " workers")
// Start ignite server node on each worker in server mode.
sparkContext.parallelize(1 to workers, workers).foreachPartition(it ⇒ ignite())
}
// Make sure to start Ignite on context creation.
ignite()
/**
* Creates an instance of IgniteContext with the given spring configuration.
*
* @param sc Spark context.
* @param springUrl Spring configuration path.
*/
def this(
sc: SparkContext,
springUrl: String,
client: Boolean
) {
this(sc, () ⇒ IgnitionEx.loadConfiguration(springUrl).get1(), client)
}
/**
* Creates an instance of IgniteContext with the given spring configuration.
*
* @param sc Spark context.
* @param springUrl Spring configuration path.
*/
def this(
sc: SparkContext,
springUrl: String
) {
this(sc, () ⇒ IgnitionEx.loadConfiguration(springUrl).get1())
}
/**
* Creates an instance of IgniteContext with default Ignite configuration.
* By default this method will use grid configuration defined in `IGNITE_HOME/config/default-config.xml`
* configuration file.
*
* @param sc Spark context.
*/
def this(sc: SparkContext) {
this(sc, IgnitionEx.DFLT_CFG)
}
val sqlContext = new SQLContext(sparkContext)
/**
* Creates an `IgniteRDD` instance from the given cache name. If the cache does not exist, it will be
* automatically started from template on the first invoked RDD action.
*
* @param cacheName Cache name.
* @return `IgniteRDD` instance.
*/
def fromCache(cacheName: String): IgniteRDD[K, V] = {
new IgniteRDD[K, V](this, cacheName, null)
}
/**
* Creates an `IgniteRDD` instance from the given cache configuration. If the cache does not exist, it will be
* automatically started using the configuration provided on the first invoked RDD action.
*
* @param cacheCfg Cache configuration to use.
* @return `IgniteRDD` instance.
*/
def fromCache(cacheCfg: CacheConfiguration[K, V]) = {
new IgniteRDD[K, V](this, cacheCfg.getName, cacheCfg)
}
/**
* Get or start Ignite instance it it's not started yet.
* @return
*/
def ignite(): Ignite = {
val home = IgniteUtils.getIgniteHome
if (home == null && igniteHome != null) {
logInfo("Setting IGNITE_HOME from driver not as it is not available on this worker: " + igniteHome)
IgniteUtils.nullifyHomeDirectory()
System.setProperty(IgniteSystemProperties.IGNITE_HOME, igniteHome)
}
val igniteCfg = cfgClo()
// check if called from driver
if (sparkContext != null) igniteCfg.setClientMode(true)
try {
Ignition.getOrStart(igniteCfg)
}
catch {
case e: IgniteException ⇒
logError("Failed to start Ignite.", e)
throw e
}
}
/**
* Stops supporting ignite instance. If ignite instance has been already stopped, this operation will be
* a no-op.
*/
def close(shutdownIgniteOnWorkers: Boolean = false) = {
// additional check if called from driver
if (sparkContext != null && shutdownIgniteOnWorkers) {
// Get required number of executors with default equals to number of available executors.
val workers = sparkContext.getConf.getInt("spark.executor.instances",
sparkContext.getExecutorStorageStatus.length)
if (workers > 0) {
logInfo("Will stop Ignite nodes on " + workers + " workers")
// Start ignite server node on each worker in server mode.
sparkContext.parallelize(1 to workers, workers).foreachPartition(it ⇒ doClose())
}
}
doClose()
}
private def doClose() = {
val igniteCfg = cfgClo()
Ignition.stop(igniteCfg.getGridName, false)
}
}
/**
* Auxiliary closure that ensures that passed in closure is executed only once.
*
* @param clo Closure to wrap.
*/
private class Once(clo: () ⇒ IgniteConfiguration) extends Serializable {
@transient @volatile var res: IgniteConfiguration = null
def apply(): IgniteConfiguration = {
if (res == null) {
this.synchronized {
if (res == null)
res = clo()
}
}
res
}
}
| DoudTechData/ignite | modules/spark/src/main/scala/org/apache/ignite/spark/IgniteContext.scala | Scala | apache-2.0 | 6,527 |
package billboardparser
/**
* Created by Dakota on 6/24/2015.
*/
object TheoryUtils {
val beatsPerBar = Map("1/4" -> 1,
"2/4" -> 2,
"3/4" -> 3,
"4/4" -> 4,
"5/4" -> 5,
"6/4" -> 6,
"7/4" -> 7,
"9/4" -> 9,
"3/8" -> 1,
"5/8" -> 2,
"6/8" -> 2,
"9/8" -> 3,
"12/8" -> 4)
val beatStrengths = Map(
"4/4"-> List(3,1,2,1),
"6/4"-> List(3,1,1,2,1,1),
"9/4"-> List(3,1,1,2,1,1,2,1,1),
"3/4"-> List(3,1,1),
"3/8"-> List(3),
"5/8"-> List(3,1))
def getStrengths(sig: String, beat: Int) = beatStrengths.get(sig) map (l => l(beat))
}
| dakotalk/billboardparser | src/main/scala/billboardparser/TheoryUtils.scala | Scala | mit | 601 |
/*
* ExpoKit Shared Library Scala Language bindings
* Copyright (C) 2014 University of Edinburgh
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package uk.ac.ed.inf.expokit
private class ExpoKitC {
@native def dgpadm(ideg: Int, m: Int, t: Double, H: Array[Double], R: Array[Double]): Int
@native def dgchbv(m: Int, t: Double, H: Array[Double], y: Array[Double]): Int
@native def dgexpv(n: Int, m: Int, t: Double, A: Array[Double], v: Array[Double],
w: Array[Double], tol: Double, anorm: Double): Int
@native def dgphiv(n: Int, m: Int, t: Double, A: Array[Double], u: Array[Double],
v: Array[Double], w: Array[Double], tol: Double, anorm: Double): Int
}
| edinburgh-rbm/expokit | src/main/scala/uk/ac/ed/inf/expokit/ExpoKit.scala | Scala | gpl-3.0 | 1,290 |
package net.mentalarray.doozie.tests
/**
* Created by kdivincenzo on 9/24/14.
*/
class FakeWorkflowTask extends WorkflowTask("l33tTaskYo") {
private var _setting: Int = 0
private var _requiredSetting: String = null
def setting: Int = _setting
def setting_=(value: => Int) = _setting = value
def required: String = _requiredSetting
def required_=(value: => String) = _requiredSetting = value
override def validate: Unit = {
if (_requiredSetting.isNullOrWhitespace) {
throw new WorkflowException("Required value not set.")
}
}
}
/*
class FakeBuilder extends TaskBuilder("BobTheBuilder") with Builder {
override type TTask = FakeWorkflowTask
// Must be implemented by the implementing class
override protected def composeTask(name: String): TTask = new FakeWorkflowTask
// Setters used during configuration
def setting(value: => Int) = {
this.task.setting = value
this
}
def required(value: => String) = {
this.task.required = value
this
}
}
object FakeBuilder {
def apply(fn: FakeBuilder => Unit): FakeBuilder = {
val builder = new FakeBuilder
fn(builder)
builder
}
}
*/ | antagonist112358/tomahawk | workflow-engine/test/net/mentalarray/doozie/tests/FakeWorkflowTask.scala | Scala | apache-2.0 | 1,159 |
package scalaxy.streams;
import org.junit.runners.Parameterized
import org.junit.runners.model.RunnerScheduler
import java.util.concurrent.TimeUnit
import java.util.concurrent.Executors
private[streams] class ThreadPoolScheduler extends RunnerScheduler
{
private[this] val numThreads = {
val n = Integer.parseInt(System.getProperty(
"junit.parallel.threads",
(Runtime.getRuntime().availableProcessors * 2) + ""))
println("scalaxy.streams.ThreadPoolScheduler.numThreads = " + n)
n
}
private[this] val executor = Executors.newFixedThreadPool(numThreads)
override def finished() {
executor.shutdown()
try {
executor.awaitTermination(30, TimeUnit.MINUTES)
} catch {
case ex: InterruptedException =>
throw new RuntimeException(ex)
}
}
override def schedule(statement: Runnable) = executor.submit(statement)
}
class Parallelized(cls: Class[_]) extends Parameterized(cls) {
setScheduler(new ThreadPoolScheduler());
}
| nativelibs4java/scalaxy-streams | src/test/scala/Parallelized.scala | Scala | bsd-3-clause | 992 |
package Utils
/**
* Created by faganpe on 31/03/15.
*/
import SimpleLib.SimpleLibContext
import com.typesafe.config._
object NetflowProperties {
def main(args: Array[String]): Unit = {
// example of how system properties override; note this
// must be set before the config lib is used
System.setProperty("netflow-lib.whatever", "This value comes from a system property")
// Load our own config values from the default location, application.conf
val conf = ConfigFactory.load()
println("The application name is: " + conf.getString("netflow-app.name"))
// In this simple app, we're allowing SimpleLibContext() to
// use the default config in application.conf ; this is exactly
// the same as passing in ConfigFactory.load() here, so we could
// also write "new SimpleLibContext(conf)" and it would be the same.
// (simple-lib is a library in this same examples/ directory).
// The point is that SimpleLibContext defaults to ConfigFactory.load()
// but also allows us to pass in our own Config.
val netflowContext = new SimpleLibContext()
netflowContext.printSetting("netflow-lib.foo")
// context.printSetting("netflow-lib.foo")
// context.printSetting("netflow-lib.hello")
// context.printSetting("netflow-lib.whatever")
}
}
| faganpe/KafkaStreamingPOC | src/main/scala/Utils/NetflowProperties.scala | Scala | apache-2.0 | 1,309 |
// scalac: -Xsource:2.13
import scala.language.higherKinds
final case class Getter[S, A](get: S => A)
final case class Wrap[F[_], A](value: F[A])
object Wrap {
// Helper to defer specifying second argument to Wrap.
// Basically a type lambda specialized for Wrap.
// Wr[F]#ap[A] =:= Wrap[F, A]
type Wr[F[_]] = { type ap[A] = Wrap[F, A] }
implicit def unwrapper[F[_], A]: Getter[Wrap[F, A], F[A]] =
Getter(w => w.value)
}
object Test {
import Wrap._
type Foo[A] = List[A]
type Bar[A] = String
type WrapFoo1[A] = Wrap[Foo, A]
type WrapBar1[A] = Wrap[Bar, A]
implicitly[Getter[WrapFoo1[Int], Foo[Int]]]
implicitly[Getter[WrapBar1[Int], Bar[Int]]]
type WrapFoo2[A] = Wr[Foo]#ap[A]
type WrapBar2[A] = Wr[Bar]#ap[A]
// here's evidence that the new types are the same as the old ones
implicitly[WrapFoo2[Int] =:= WrapFoo1[Int]]
implicitly[WrapBar2[Int] =:= WrapBar1[Int]]
implicitly[Getter[WrapFoo2[Int], Foo[Int]]]
implicitly[Getter[WrapBar2[Int], Bar[Int]]]
}
| scala/scala | test/files/pos/t10197.scala | Scala | apache-2.0 | 1,009 |
import sbt._
import Import._
object TestBuild extends Build
{
lazy val root = Project("root", file("."), aggregate = Seq(sub)) settings(
TaskKey[Unit]("f") := sys.error("f")
)
lazy val sub = Project("sub", file("sub")) settings(
TaskKey[Unit]("f") := {}
)
}
| dansanduleac/sbt | sbt/src/sbt-test/actions/reload/project/TestProject.scala | Scala | bsd-3-clause | 267 |
package beam.agentsim.agents.choice.mode
import beam.agentsim.agents.modalbehaviors.ModeChoiceCalculator
import beam.router.Modes
import beam.router.Modes.BeamMode.RIDE_HAIL
import beam.router.model.EmbodiedBeamTrip
import beam.sim.BeamServices
import beam.sim.config.BeamConfig
import beam.sim.population.AttributesOfIndividual
import org.matsim.api.core.v01.population.Activity
import org.matsim.api.core.v01.population.Person
import scala.collection.mutable.ListBuffer
/**
* BEAM
*/
class ModeChoiceRideHailIfAvailable(val beamServices: BeamServices) extends ModeChoiceCalculator {
override lazy val beamConfig: BeamConfig = beamServices.beamConfig
override def apply(
alternatives: IndexedSeq[EmbodiedBeamTrip],
attributesOfIndividual: AttributesOfIndividual,
destinationActivity: Option[Activity],
person: Option[Person] = None
): Option[EmbodiedBeamTrip] = {
val containsRideHailAlt = alternatives.zipWithIndex.collect {
case (trip, idx) if trip.tripClassifier == RIDE_HAIL => idx
}
if (containsRideHailAlt.nonEmpty) {
Some(alternatives(containsRideHailAlt.head))
} else if (alternatives.nonEmpty) {
Some(alternatives(chooseRandomAlternativeIndex(alternatives)))
} else {
None
}
}
override def utilityOf(
alternative: EmbodiedBeamTrip,
attributesOfIndividual: AttributesOfIndividual,
destinationActivity: Option[Activity]
): Double = 0.0
override def utilityOf(mode: Modes.BeamMode, cost: Double, time: Double, numTransfers: Int): Double = 0.0
override def computeAllDayUtility(
trips: ListBuffer[EmbodiedBeamTrip],
person: Person,
attributesOfIndividual: AttributesOfIndividual
): Double = 0.0
}
| colinsheppard/beam | src/main/scala/beam/agentsim/agents/choice/mode/ModeChoiceRideHailIfAvailable.scala | Scala | gpl-3.0 | 1,721 |
package com.twitter.hashing
import scala.collection.mutable
class DistributionTester[A](distributor: Distributor[A]) {
/**
* Returns a normalized standard deviation indicating how well the keys
* are distributed between the nodes. The closer to 0 the better.
*/
def distributionDeviation(keys: Seq[Long]): Double = {
val keysPerNode = mutable.Map[A, Int]()
keys map { distributor.nodeForHash(_) } foreach { key =>
if (!keysPerNode.contains(key)) keysPerNode(key) = 0
keysPerNode(key) += 1
}
var frequencies = keysPerNode.values.toList
frequencies ++= 0 until (distributor.nodeCount - frequencies.size) map { _ => 0 }
val average = frequencies.sum.toDouble / frequencies.size
val diffs = frequencies.map { v => math.pow((v - average), 2) }
val sd = math.sqrt(diffs.sum / (frequencies.size - 1))
sd / average
}
}
| folone/util | util-hashing/src/test/scala/com/twitter/hashing/DistributionTester.scala | Scala | apache-2.0 | 875 |
package com.sksamuel.elastic4s.http.bulk
import com.sksamuel.elastic4s.Show
import com.sksamuel.elastic4s.bulk.BulkRequest
import com.sksamuel.elastic4s.http._
import com.sksamuel.exts.Logging
import org.apache.http.entity.ContentType
trait BulkHandlers {
implicit object BulkHandler extends Handler[BulkRequest, BulkResponse] with Logging {
override def build(bulk: BulkRequest): ElasticRequest = {
val rows = BulkBuilderFn(bulk)
// es seems to require a trailing new line as well
val entity = HttpEntity(rows.mkString("\\n") + "\\n", ContentType.APPLICATION_JSON.getMimeType)
logger.debug("Sending bulk request")
logger.debug(rows.mkString("\\n"))
val params = scala.collection.mutable.Map.empty[String, String]
bulk.timeout.foreach(params.put("timeout", _))
bulk.refresh.map(RefreshPolicyHttpValue.apply).foreach(params.put("refresh", _))
ElasticRequest("POST", "/_bulk", params.toMap, entity)
}
}
}
| Tecsisa/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/bulk/BulkHandlers.scala | Scala | apache-2.0 | 973 |
package definiti.core.parser.project
import definiti.common.ast.{CalculatorOperator, LogicalOperator}
import scala.util.parsing.input.{NoPosition, Position, Positional}
sealed trait TokenProject extends Positional {
var posEnd: Position = NoPosition
}
case class PACKAGE() extends TokenProject
case class IMPORT() extends TokenProject
case class TYPE() extends TokenProject
case class IF() extends TokenProject
case class ELSE() extends TokenProject
case class VERIFICATION() extends TokenProject
case class VERIFY() extends TokenProject
case class VERIFYING() extends TokenProject
case class DEF() extends TokenProject
case class CONTEXT() extends TokenProject
case class ENUM() extends TokenProject
case class MESSAGE() extends TokenProject
case class OK() extends TokenProject
case class KO() extends TokenProject
case class AS() extends TokenProject
case class LEFT_BRACKET() extends TokenProject
case class RIGHT_BRACKET() extends TokenProject
case class LEFT_BRACE() extends TokenProject
case class RIGHT_BRACE() extends TokenProject
case class LEFT_PARENTHESIS() extends TokenProject
case class RIGHT_PARENTHESIS() extends TokenProject
case class COLON() extends TokenProject
case class COMMA() extends TokenProject
case class RIGHT_ARROW() extends TokenProject
case class DOT() extends TokenProject
case class EQUAL() extends TokenProject
case class QUESTION() extends TokenProject
case class BOOLEAN(value: Boolean) extends TokenProject
case class NUMBER(value: BigDecimal) extends TokenProject
case class INTEGER(value: BigInt) extends TokenProject
case class STRING(value: String) extends TokenProject
case class IDENTIFIER(value: String) extends TokenProject
case class CALCULATOR_OPERATOR_LEVEL_1(kind: CalculatorOperator.Value) extends TokenProject
case class CALCULATOR_OPERATOR_LEVEL_2(kind: CalculatorOperator.Value) extends TokenProject
case class LOGICAL_OPERATOR(kind: LogicalOperator.Value) extends TokenProject
case class LOGICAL_COMBINATION_OPERATOR(kind: LogicalOperator.Value) extends TokenProject
case class NOT() extends TokenProject
case class DOC_COMMENT(value: String) extends TokenProject
case class MULTILINE_COMMENT(value: String) extends TokenProject
case class LINE_COMMENT(value: String) extends TokenProject
case class CONTEXT_CONTENT(content: String) extends TokenProject | definiti/definiti-core | src/main/scala/definiti/core/parser/project/TokenProject.scala | Scala | mit | 2,319 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.di
import example.SealedTrait.Adt
import wvlet.airframe.Design
import wvlet.airspec.AirSpec
/**
*/
object SealedTraitBindingTest extends AirSpec {
// This code compilation may fail when using Mill.
case class Service(adt: Adt) {}
test("compile test") {
// Just need to check the compilation failure
val design = Design.newSilentDesign
.bind[Adt].toInstance(Adt.Foo)
design.build[Service] { s =>
s.adt shouldBeTheSameInstanceAs Adt.Foo
}
}
}
| wvlet/airframe | airframe-di/src/test/scala/wvlet/airframe/di/SealedTraitBindingTest.scala | Scala | apache-2.0 | 1,066 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.