code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Launch following commands:
* export MASTER='local-cluster[3,2,1024]'
* bin/sparkling-shell -i examples/scripts/chicagoCrimeSmall.script.scala
*
* When running using spark shell or using scala rest API:
* SQLContext is available as sqlContext
* - if you want to use sqlContext implicitly, you have to redefine it like: implicit val sqlContext = sqlContext,
* butter better is to use it like this: implicit val sqlContext = SQLContext.getOrCreate(sc)
* SparkContext is available as sc
*/
// 1. Create an environment
import org.apache.spark.SparkFiles
import org.apache.spark.examples.h2o.DemoUtils._
import org.apache.spark.examples.h2o.{Crime, ChicagoCrimeApp}
import org.apache.spark.h2o.H2OContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
// 2. Register local files
addFiles(sc,
"examples/smalldata/chicagoAllWeather.csv",
"examples/smalldata/chicagoCensus.csv",
"examples/smalldata/chicagoCrimes10k.csv"
)
// 3. Create SQL support
implicit val sqlContext = SQLContext.getOrCreate(sc)
// 4. Start H2O services
implicit val h2oContext = H2OContext.getOrCreate(sc)
// 5. Create App
val app = new ChicagoCrimeApp(
weatherFile = SparkFiles.get("chicagoAllWeather.csv"),
censusFile = SparkFiles.get("chicagoCensus.csv"),
crimesFile = SparkFiles.get("chicagoCrimes10k.csv"))(sc, sqlContext, h2oContext)
// 6. Load data
val (weatherTable,censusTable,crimesTable) = app.loadAll()
// 7. Train model
val (gbmModel, dlModel) = app.train(weatherTable, censusTable, crimesTable)
// 8. Create list of crimes to predict
val crimeExamples = Seq(
Crime("02/08/2015 11:43:58 PM", 1811, "NARCOTICS", "STREET",false, 422, 4, 7, 46, 18),
Crime("02/08/2015 11:00:39 PM", 1150, "DECEPTIVE PRACTICE", "RESIDENCE",false, 923, 9, 14, 63, 11))
// 8. Score each crime and predict probability of arrest
for (crime <- crimeExamples) {
val arrestProbGBM = 100*app.scoreEvent(crime,
gbmModel,
censusTable)(sqlContext, h2oContext)
val arrestProbDL = 100*app.scoreEvent(crime,
dlModel,
censusTable)(sqlContext, h2oContext)
println(
s"""
|Crime: $crime
| Probability of arrest best on DeepLearning: ${arrestProbDL} %
| Probability of arrest best on GBM: ${arrestProbGBM} %
""".stripMargin)
}
| nilbody/sparkling-water | examples/scripts/chicagoCrimeSmall.script.scala | Scala | apache-2.0 | 2,309 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.Utils
/**
* :: DeveloperApi ::
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
* tasks several times for "ephemeral" failures, and only report back failures that require some
* old stages to be resubmitted, such as shuffle map fetch failures.
*/
@DeveloperApi
sealed trait TaskEndReason
/**
* :: DeveloperApi ::
* Task succeeded.
*/
@DeveloperApi
case object Success extends TaskEndReason
/**
* :: DeveloperApi ::
* Various possible reasons why a task failed.
*/
@DeveloperApi
sealed trait TaskFailedReason extends TaskEndReason {
/** Error message displayed in the web UI. */
def toErrorString: String
}
/**
* :: DeveloperApi ::
* A [[org.apache.spark.scheduler.ShuffleMapTask]] that completed successfully earlier, but we
* lost the executor before the stage completed. This means Spark needs to reschedule the task
* to be re-executed on a different executor.
*/
@DeveloperApi
case object Resubmitted extends TaskFailedReason {
override def toErrorString: String = "Resubmitted (resubmitted due to lost executor)"
}
/**
* :: DeveloperApi ::
* Task failed to fetch shuffle data from a remote node. Probably means we have lost the remote
* executors the task is trying to fetch from, and thus need to rerun the previous stage.
*/
@DeveloperApi
case class FetchFailed(
bmAddress: BlockManagerId, // Note that bmAddress can be null
shuffleId: Int,
mapId: Int,
reduceId: Int,
message: String)
extends TaskFailedReason {
override def toErrorString: String = {
val bmAddressString = if (bmAddress == null) "null" else bmAddress.toString
s"FetchFailed($bmAddressString, shuffleId=$shuffleId, mapId=$mapId, reduceId=$reduceId, " +
s"message=\\n$message\\n)"
}
}
/**
* :: DeveloperApi ::
* Task failed due to a runtime exception. This is the most common failure case and also captures
* user program exceptions.
*
* `stackTrace` contains the stack trace of the exception itself. It still exists for backward
* compatibility. It's better to use `this(e: Throwable, metrics: Option[TaskMetrics])` to
* create `ExceptionFailure` as it will handle the backward compatibility properly.
*
* `fullStackTrace` is a better representation of the stack trace because it contains the whole
* stack trace including the exception and its causes
*/
@DeveloperApi
case class ExceptionFailure(
className: String,
description: String,
stackTrace: Array[StackTraceElement],
fullStackTrace: String,
metrics: Option[TaskMetrics])
extends TaskFailedReason {
private[spark] def this(e: Throwable, metrics: Option[TaskMetrics]) {
this(e.getClass.getName, e.getMessage, e.getStackTrace, Utils.exceptionString(e), metrics)
}
override def toErrorString: String =
if (fullStackTrace == null) {
// fullStackTrace is added in 1.2.0
// If fullStackTrace is null, use the old error string for backward compatibility
exceptionString(className, description, stackTrace)
} else {
fullStackTrace
}
/**
* Return a nice string representation of the exception, including the stack trace.
* Note: It does not include the exception's causes, and is only used for backward compatibility.
*/
private def exceptionString(
className: String,
description: String,
stackTrace: Array[StackTraceElement]): String = {
val desc = if (description == null) "" else description
val st = if (stackTrace == null) "" else stackTrace.map(" " + _).mkString("\\n")
s"$className: $desc\\n$st"
}
}
/**
* :: DeveloperApi ::
* The task finished successfully, but the result was lost from the executor's block manager before
* it was fetched.
*/
@DeveloperApi
case object TaskResultLost extends TaskFailedReason {
override def toErrorString: String = "TaskResultLost (result lost from block manager)"
}
/**
* :: DeveloperApi ::
* Task was killed intentionally and needs to be rescheduled.
*/
@DeveloperApi
case object TaskKilled extends TaskFailedReason {
override def toErrorString: String = "TaskKilled (killed intentionally)"
}
/**
* :: DeveloperApi ::
* Task requested the driver to commit, but was denied.
*/
@DeveloperApi
case class TaskCommitDenied(
jobID: Int,
partitionID: Int,
attemptNumber: Int) extends TaskFailedReason {
override def toErrorString: String = s"TaskCommitDenied (Driver denied task commit)" +
s" for job: $jobID, partition: $partitionID, attemptNumber: $attemptNumber"
}
/**
* :: DeveloperApi ::
* The task failed because the executor that it was running on was lost. This may happen because
* the task crashed the JVM.
*/
@DeveloperApi
case class ExecutorLostFailure(execId: String) extends TaskFailedReason {
override def toErrorString: String = s"ExecutorLostFailure (executor ${execId} lost)"
}
/**
* :: DeveloperApi ::
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
* deserializing the task result.
*/
@DeveloperApi
case object UnknownReason extends TaskFailedReason {
override def toErrorString: String = "UnknownReason"
}
| andrewor14/iolap | core/src/main/scala/org/apache/spark/TaskEndReason.scala | Scala | apache-2.0 | 6,126 |
package name.bshelden.pouches
import cpw.mods.fml.common.network.IGuiHandler
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.world.World
/**
* Proxy common to client and server
*
* (c) 2013 Byron Shelden
* See COPYING for details
*/
class CommonProxy extends IGuiHandler {
def getServerGuiElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int): AnyRef = {
val curItem = player.getCurrentEquippedItem
if (Option(curItem).flatMap(ci => Option(ci.getItem)).map(_ == Pouches.pouch).getOrElse(false)) {
val pouchInventory = new InventoryPouch(curItem, 9*3)
new ContainerPouch(world, player, pouchInventory)
} else {
null
}
}
def getClientGuiElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int): AnyRef = null
}
object CommonProxy {
val POUCH_PNG_FORMAT: String = "/mods/Pouches/textures/items/pouch_%d.png"
val POUCH_OPEN_PNG_FORMAT: String = "/mods/Pouches/textures/items/pouchopen_%d.png"
}
| bshelden/Pouches | src/main/scala/name/bshelden/pouches/CommonProxy.scala | Scala | bsd-2-clause | 1,008 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkahttpjson4s
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.stream.{ ActorMaterializer, Materializer }
import org.json4s.{ jackson, DefaultFormats }
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.io.StdIn
object ExampleApp {
final case class Foo(bar: String)
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem()
implicit val mat = ActorMaterializer()
Http().bindAndHandle(route, "127.0.0.1", 8000)
StdIn.readLine("Hit ENTER to exit")
Await.ready(system.terminate(), Duration.Inf)
}
def route(implicit mat: Materializer) = {
import Directives._
import Json4sSupport._
implicit val serialization = jackson.Serialization // or native.Serialization
implicit val formats = DefaultFormats
pathSingleSlash {
post {
entity(as[Foo]) { foo =>
complete {
foo
}
}
}
}
}
}
| el-dom/akka-http-json | akka-http-json4s/src/test/scala/de/heikoseeberger/akkahttpjson4s/ExampleApp.scala | Scala | apache-2.0 | 1,651 |
package regolic.sat
import scala.collection.mutable.HashMap
object ProofChecker {
def apply(inferences: Array[Inference], fact: Set[Literal]): Boolean = {
val lastInference = inferences.last
if(lastInference.clause != fact) false else {
val infToIndex = new HashMap[Inference, Int]
var i = 0
var isValid = true
//make sure that every resolution step is valid
while(i < inferences.size && isValid) {
try {
inferences(i) match {
case inf@InputInference(_) => //input clause is valid
infToIndex(inf) = i
case inf@ResolutionInference(cl, left, right) => {
val leftIndex = infToIndex(left)
val rightIndex = infToIndex(right)
if(leftIndex >= i || rightIndex >= i) //only refer to previous inferences
isValid = false
else if(!isResolvent(cl, left.clause, right.clause)) {
println("INVALID INFERENCE !")
println("\tleft premise: " + left.clause)
println("\tright premise: " + right.clause)
println("\t => " + cl)
isValid = false
} else
infToIndex(inf) = i
}
}
} catch {
case (_: Throwable) => isValid = false
}
i += 1
}
isValid
}
}
def isResolvent(resolvent: Set[Literal], left: Set[Literal], right: Set[Literal]): Boolean = {
left.exists(l1 => right.exists(l2 =>
l1.getID == l2.getID && l1.polarity != l2.polarity &&
resolvent == (left.filterNot(_ == l1) ++ right.filterNot(_ == l2))
))
}
}
/*
* From tree of proof get a graph of proof
*/
//val inferences: Array[Inference] = {
// var inf2index: HashMap[Inference, Int] = new HashMap()
// var buffer: ArrayBuffer[Inference] = new ArrayBuffer()
// var i = 0
// var queue: Queue[Inference] = new Queue()
// queue.enqueue(contradiction)
// while(!queue.isEmpty) {
// val current = queue.dequeue
// if(!inf2index.contains(current)) {
// current match {
// case InputInference(cl) =>
// val newInf = RefInputInference(i, cl)
// inf2index(current) = i
// i += 1
// buffer.append(newInf)
// case ResolutionInference(cl, left, right) =>
// (inf2index.get(left), inf2index.get(right)) match {
// case (None, None) =>
// queue.enqueue(left)
// queue.enqueue(right)
// queue.enqueue(current)
// case (None, _) =>
// queue.enqueue(left)
// queue.enqueue(current)
// case (_, None) =>
// queue.enqueue(right)
// queue.enqueue(current)
// case (Some(il), Some(ir)) =>
// val newInf = RefResolutionInference(i, cl, il, ir)
// inf2index(current) = i
// i += 1
// buffer.append(newInf)
// }
// case _ => sys.error("unexpected")
// }
// }
// }
// buffer.toArray
//}
| regb/scabolic | src/main/scala/regolic/sat/ProofChecker.scala | Scala | mit | 3,155 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.calcite
import org.apache.flink.sql.parser.SqlProperty
import org.apache.flink.sql.parser.dml.RichSqlInsert
import org.apache.flink.table.planner.calcite.PreValidateReWriter.appendPartitionProjects
import org.apache.calcite.plan.RelOptTable
import org.apache.calcite.prepare.CalciteCatalogReader
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory, RelDataTypeField}
import org.apache.calcite.runtime.{CalciteContextException, Resources}
import org.apache.calcite.sql.`type`.SqlTypeUtil
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.calcite.sql.util.SqlBasicVisitor
import org.apache.calcite.sql.validate.{SqlValidatorException, SqlValidatorTable, SqlValidatorUtil}
import org.apache.calcite.sql.{SqlCall, SqlIdentifier, SqlLiteral, SqlNode, SqlNodeList, SqlSelect, SqlUtil}
import org.apache.calcite.util.Static.RESOURCE
import java.util
import scala.collection.JavaConversions._
/** Implements [[org.apache.calcite.sql.util.SqlVisitor]]
* interface to do some rewrite work before sql node validation. */
class PreValidateReWriter(
val catalogReader: CalciteCatalogReader,
val typeFactory: RelDataTypeFactory) extends SqlBasicVisitor[Unit] {
override def visit(call: SqlCall): Unit = {
call match {
case r: RichSqlInsert if r.getStaticPartitions.nonEmpty
&& r.getSource.isInstanceOf[SqlSelect] =>
appendPartitionProjects(r, catalogReader, typeFactory,
r.getSource.asInstanceOf[SqlSelect], r.getStaticPartitions)
case _ =>
}
}
}
object PreValidateReWriter {
//~ Tools ------------------------------------------------------------------
/**
* Append the static partitions to the data source projection list. The columns are appended to
* the corresponding positions.
*
* <p>If we have a table A with schema (<a>, <b>, <c>) whose
* partition columns are (<a>, <c>), and got a query
* <blockquote><pre>
* insert into A partition(a='11', c='22')
* select b from B
* </pre></blockquote>
* The query would be rewritten to:
* <blockquote><pre>
* insert into A partition(a='11', c='22')
* select cast('11' as tpe1), b, cast('22' as tpe2) from B
* </pre></blockquote>
* Where the "tpe1" and "tpe2" are data types of column a and c of target table A.
*
* @param sqlInsert RichSqlInsert instance
* @param calciteCatalogReader catalog reader
* @param typeFactory type factory
* @param select Source sql select
* @param partitions Static partition statements
*/
def appendPartitionProjects(sqlInsert: RichSqlInsert,
calciteCatalogReader: CalciteCatalogReader,
typeFactory: RelDataTypeFactory,
select: SqlSelect,
partitions: SqlNodeList): Unit = {
val names = sqlInsert.getTargetTable.asInstanceOf[SqlIdentifier].names
val table = calciteCatalogReader.getTable(names)
if (table == null) {
// There is no table exists in current catalog,
// just skip to let other validation error throw.
return
}
val targetRowType = createTargetRowType(typeFactory,
calciteCatalogReader, table, sqlInsert.getTargetColumnList)
// validate partition fields first.
val assignedFields = new util.LinkedHashMap[Integer, SqlNode]
val relOptTable = table match {
case t: RelOptTable => t
case _ => null
}
for (node <- partitions.getList) {
val sqlProperty = node.asInstanceOf[SqlProperty]
val id = sqlProperty.getKey
val targetField = SqlValidatorUtil.getTargetField(targetRowType,
typeFactory, id, calciteCatalogReader, relOptTable)
validateField(idx => !assignedFields.contains(idx), id, targetField)
val value = sqlProperty.getValue.asInstanceOf[SqlLiteral]
assignedFields.put(targetField.getIndex,
maybeCast(value, value.createSqlType(typeFactory), targetField.getType, typeFactory))
}
val currentNodes = new util.ArrayList[SqlNode](select.getSelectList.getList)
val fixedNodes = new util.ArrayList[SqlNode]
0 until targetRowType.getFieldList.length foreach {
idx =>
if (assignedFields.containsKey(idx)) {
fixedNodes.add(assignedFields.get(idx))
} else if (currentNodes.size() > 0) {
fixedNodes.add(currentNodes.remove(0))
}
}
// Although it is error case, we still append the old remaining
// projection nodes to new projection.
if (currentNodes.size > 0) {
fixedNodes.addAll(currentNodes)
}
select.setSelectList(new SqlNodeList(fixedNodes, select.getSelectList.getParserPosition))
}
/**
* Derives a row-type for INSERT and UPDATE operations.
*
* <p>This code snippet is almost inspired by
* [[org.apache.calcite.sql.validate.SqlValidatorImpl#createTargetRowType]].
* It is the best that the logic can be merged into Apache Calcite,
* but this needs time.
*
* @param typeFactory TypeFactory
* @param catalogReader CalciteCatalogReader
* @param table Target table for INSERT/UPDATE
* @param targetColumnList List of target columns, or null if not specified
* @return Rowtype
*/
private def createTargetRowType(
typeFactory: RelDataTypeFactory,
catalogReader: CalciteCatalogReader,
table: SqlValidatorTable,
targetColumnList: SqlNodeList): RelDataType = {
val baseRowType = table.getRowType
if (targetColumnList == null) return baseRowType
val fields = new util.ArrayList[util.Map.Entry[String, RelDataType]]
val assignedFields = new util.HashSet[Integer]
val relOptTable = table match {
case t: RelOptTable => t
case _ => null
}
for (node <- targetColumnList) {
val id = node.asInstanceOf[SqlIdentifier]
val targetField = SqlValidatorUtil.getTargetField(baseRowType,
typeFactory, id, catalogReader, relOptTable)
validateField(assignedFields.add, id, targetField)
fields.add(targetField)
}
typeFactory.createStructType(fields)
}
/** Check whether the field is valid. **/
private def validateField(tester: Function[Integer, Boolean],
id: SqlIdentifier,
targetField: RelDataTypeField): Unit = {
if (targetField == null) {
throw newValidationError(id, RESOURCE.unknownTargetColumn(id.toString))
}
if (!tester.apply(targetField.getIndex)) {
throw newValidationError(id, RESOURCE.duplicateTargetColumn(targetField.getName))
}
}
private def newValidationError(node: SqlNode,
e: Resources.ExInst[SqlValidatorException]): CalciteContextException = {
assert(node != null)
val pos = node.getParserPosition
SqlUtil.newContextException(pos, e)
}
// This code snippet is copied from the SqlValidatorImpl.
private def maybeCast(node: SqlNode,
currentType: RelDataType,
desiredType: RelDataType,
typeFactory: RelDataTypeFactory): SqlNode = {
if (currentType == desiredType
|| (currentType.isNullable != desiredType.isNullable
&& typeFactory.createTypeWithNullability(currentType, desiredType.isNullable)
== desiredType)) {
node
} else {
SqlStdOperatorTable.CAST.createCall(SqlParserPos.ZERO,
node, SqlTypeUtil.convertTypeToSpec(desiredType))
}
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/calcite/PreValidateReWriter.scala | Scala | apache-2.0 | 8,239 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.firefly.sinks.http
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.{Flow, GraphDSL, MergePreferred, Sink}
import akka.stream.{ActorMaterializer, KillSwitch, SinkShape}
import com.flipkart.connekt.commons.entities.{HTTPEventSink, Subscription, SubscriptionEvent}
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.utils.StringUtils._
import com.flipkart.connekt.firefly.flows.dispatchers.HttpDispatcher
import scala.collection._
import scala.concurrent.ExecutionContext
class HttpSink(subscription: Subscription, retryLimit: Int, killSwitch: KillSwitch)(implicit am: ActorMaterializer, sys: ActorSystem, ec: ExecutionContext) extends Instrumented {
private val httpCachedClient = HttpDispatcher.httpFlow
def getHttpSink: Sink[SubscriptionEvent, NotUsed] = {
Sink.fromGraph(GraphDSL.create() { implicit b =>
val httpResponseHandler = b.add(new HttpResponseHandler(retryLimit, subscription.shutdownThreshold, subscription.id, killSwitch))
val event2HttpRequestMapper = b.add(Flow[SubscriptionEvent].map(httpPrepare))
val httpRequestMergePref = b.add(MergePreferred[(HttpRequest, HttpRequestTracker)](1))
event2HttpRequestMapper ~> httpRequestMergePref.in(0)
httpRequestMergePref.out ~> httpCachedClient ~> httpResponseHandler.in
httpResponseHandler.out(0) ~> httpRequestMergePref.preferred
httpResponseHandler.out(1) ~> Sink.foreach[(HttpRequest, HttpRequestTracker)] { event =>
ConnektLogger(LogFile.SERVICE).info(s"HttpSink message delivered to ${event._1._2}")
ConnektLogger(LogFile.SERVICE).debug(s"HttpSink message delivered to {}", supplier(event))
}
httpResponseHandler.out(2) ~> Sink.foreach[(HttpRequest, HttpRequestTracker)] { event =>
ConnektLogger(LogFile.SERVICE).warn(s"HttpSink message failed to deliver to ${event._1._2}")
ConnektLogger(LogFile.SERVICE).debug(s"HttpSink message failed {}", supplier(event))
}
SinkShape(event2HttpRequestMapper.in)
})
}
private def httpPrepare(event: SubscriptionEvent): (HttpRequest, HttpRequestTracker) = {
val sink = subscription.sink.asInstanceOf[HTTPEventSink]
val httpEntity = HttpEntity(ContentTypes.`application/json`, event.payload.toString)
val url = Option(event.destination).getOrElse(sink.url)
val httpRequest = event.header match {
case null => HttpRequest(method = HttpMethods.getForKey(sink.method.toUpperCase).get, uri = url, entity = httpEntity)
case _ => HttpRequest(method = HttpMethods.getForKey(sink.method.toUpperCase).get, uri = url, entity = httpEntity,
headers = immutable.Seq[HttpHeader]( event.header.map { case (key, value) => RawHeader(key, value) }.toArray: _ *))
}
val requestTracker = HttpRequestTracker(httpRequest)
httpRequest -> requestTracker
}
}
| Flipkart/connekt | firefly/src/main/scala/com/flipkart/connekt/firefly/sinks/http/HttpSink.scala | Scala | mit | 3,723 |
package keystoneml.nodes.misc
import keystoneml.nodes.util.{SparseFeatureVectorizer, AllSparseFeatures, CommonSparseFeatures}
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import keystoneml.pipelines.Logging
import keystoneml.workflow.PipelineContext
class SparseFeatureVectorizerSuite extends FunSuite with PipelineContext with Logging {
test("sparse feature vectorization") {
sc = new SparkContext("local", "test")
val featureVectorizer = new SparseFeatureVectorizer(Map("First" -> 0, "Second" -> 1, "Third" -> 2))
val test = Seq(("Third", 4.0), ("Fourth", 6.0), ("First", 1.0))
val vector = featureVectorizer.apply(sc.parallelize(Seq(test))).first()
assert(vector.size == 3)
assert(vector(0) == 1)
assert(vector(1) == 0)
assert(vector(2) == 4)
}
test("all sparse feature selection") {
sc = new SparkContext("local", "test")
val train = sc.parallelize(List(Seq(("First", 0.0), ("Second", 6.0)), Seq(("Third", 3.0), ("Second", 4.0))))
val featureVectorizer = AllSparseFeatures().fit(train.map(x => x))
// The selected features should now be "First", "Second", and "Third"
val test = Seq(("Third", 4.0), ("Fourth", 6.0), ("First", 1.0))
val out = featureVectorizer.apply(sc.parallelize(Seq(test))).first().toArray
assert(out === Array(1.0, 0.0, 4.0))
}
test("common sparse feature selection") {
sc = new SparkContext("local", "test")
val train = sc.parallelize(List(
Seq(("First", 0.0), ("Second", 6.0)),
Seq(("Third", 3.0), ("Second", 4.8)),
Seq(("Third", 7.0), ("Fourth", 5.0)),
Seq(("Fifth", 5.0), ("Second", 7.3))
))
val featureVectorizer = CommonSparseFeatures(2).fit(train.map(x => x))
// The selected features should now be "Second", and "Third"
val test = Seq(("Third", 4.0), ("Seventh", 8.0), ("Second", 1.3), ("Fourth", 6.0), ("First", 1.0))
val out = featureVectorizer.apply(sc.parallelize(Seq(test))).first().toArray
assert(out === Array(1.3, 4.0))
}
}
| amplab/keystone | src/test/scala/keystoneml/nodes/misc/SparseFeatureVectorizerSuite.scala | Scala | apache-2.0 | 2,026 |
package com.ing.baker.runtime.javadsl
import java.util.Optional
import com.ing.baker.runtime.common.LanguageDataStructures.JavaApi
import com.ing.baker.runtime.{common, scaladsl}
import com.ing.baker.types.Type
case class InteractionInstanceInput(name: Optional[String], `type`: Type) extends common.InteractionInstanceInput with JavaApi {
def getName: Optional[String] = name
def getType: Type = `type`
def asScala: scaladsl.InteractionInstanceInput = scaladsl.InteractionInstanceInput(Option.apply(name.orElse(null)), `type`)
}
| ing-bank/baker | core/baker-interface/src/main/scala/com/ing/baker/runtime/javadsl/InteractionInstanceInput.scala | Scala | mit | 542 |
package com.twitter.finagle.http
import com.twitter.finagle.Service
import com.twitter.util.Future
import org.jboss.netty.buffer.ChannelBuffers
import org.jboss.netty.handler.codec.http.{DefaultHttpResponse, HttpRequest,
HttpResponse, HttpResponseStatus, HttpVersion}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
@RunWith(classOf[JUnitRunner])
class HttpMuxerTest extends FunSuite {
// todo: add other metrics when they are supported
class DummyService(reply: String) extends Service[HttpRequest, HttpResponse] {
def apply(request: HttpRequest): Future[HttpResponse] = {
val response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
response.setContent(ChannelBuffers.wrappedBuffer(reply.getBytes("UTF-8")))
Future.value(response)
}
}
val (reply1, reply2, reply3) = ("dumb", "dumber", "dumbest")
val muxService = new HttpMuxer()
.withHandler("foo/bar/", new DummyService(reply1)) // prefix match
.withHandler("foo/bar", new DummyService(reply2)) // exact match
.withHandler("foo/boo/baz/", new DummyService(reply3))
test("handles params properly") {
assert(Response(muxService(Request("/foo/bar/blah?j={}"))()).contentString == reply1)
}
test("prefix matching request path correctly") {
assert(Response(muxService(Request("/fooblah"))()).status == HttpResponseStatus.NOT_FOUND)
assert(Response(muxService(Request("/foo/bar/blah"))()).contentString == reply1)
// after normalization, it should match "/foo/bar/"
assert(Response(muxService(Request("/foo//bar/blah"))()).contentString == reply1)
assert(Response(muxService(Request("/foo/bar"))()).contentString == reply2)
assert(Response(muxService(Request("/foo/bar/"))()).contentString == reply1)
assert(Response(muxService(Request("/foo/boo/baz"))()).status == HttpResponseStatus.NOT_FOUND)
assert(Response(muxService(Request("/foo/boo/baz/blah"))()).contentString == reply3)
assert(Response(muxService(Request("/foo/barblah"))()).status == HttpResponseStatus.NOT_FOUND)
}
test("Registering a service with an existing name will overwrite the old") {
val (r1, r2, r3) = ("smart", "smarter", "smartest")
val mux2 = muxService
.withHandler("foo/bar/", new DummyService(r1)) // prefix match
.withHandler("foo/bar", new DummyService(r2)) // exact match
.withHandler("foo/boo/baz/", new DummyService(r3))
assert(Response(mux2(Request("/fooblah"))()).status == HttpResponseStatus.NOT_FOUND)
assert(Response(mux2(Request("/foo/bar/blah"))()).contentString == r1)
assert(Response(mux2(Request("/foo/bar"))()).contentString == r2)
assert(Response(mux2(Request("/foo/bar/"))()).contentString == r1)
assert(Response(mux2(Request("/foo/boo/baz"))()).status == HttpResponseStatus.NOT_FOUND)
assert(Response(mux2(Request("/foo/boo/baz/blah"))()).contentString == r3)
assert(Response(mux2(Request("/foo/barblah"))()).status == HttpResponseStatus.NOT_FOUND)
}
}
| joshbedo/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/HttpMuxerTest.scala | Scala | apache-2.0 | 3,052 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
/* Uncomment this when remove the deprecated type aliases in the org.scalatest.prop package object.
import org.scalatest.exceptions.DiscardedEvaluationException
*/
/**
* Trait that contains the <code>whenever</code> clause that can be used in table- or generator-driven property checks.
*
* @author Bill Venners
*/
trait Whenever {
/**
* Evaluates the passed code block if the passed boolean condition is true, else throws <code>DiscardedEvaluationException</code>.
*
* <p>
* The <code>whenever</code> method can be used inside property check functions to discard invocations of the function with
* data for which it is known the property would fail. For example, given the following <code>Fraction</code> class:
* </p>
*
* <pre class="stHighlight">
* class Fraction(n: Int, d: Int) {
*
* require(d != 0)
* require(d != Integer.MIN_VALUE)
* require(n != Integer.MIN_VALUE)
*
* val numer = if (d < 0) -1 * n else n
* val denom = d.abs
*
* override def toString = numer + " / " + denom
* }
* </pre>
*
* <pre class="stHighlight">
* import org.scalatest.prop.TableDrivenPropertyChecks._
*
* val fractions =
* Table(
* ("n", "d"),
* ( 1, 2),
* ( -1, 2),
* ( 1, -2),
* ( -1, -2),
* ( 3, 1),
* ( -3, 1),
* ( -3, 0),
* ( 3, -1),
* ( 3, Integer.MIN_VALUE),
* (Integer.MIN_VALUE, 3),
* ( -3, -1)
* )
* </pre>
*
* <p>
* Imagine you wanted to check a property against this class with data that includes some
* value that are rejected by the constructor, such as a denominator of zero, which should
* result in an <code>IllegalArgumentException</code>. You could use <code>whenever</code>
* to discard any rows in the <code>fraction</code> that represent illegal arguments, like this:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.matchers.ShouldMatchers._
*
* forAll (fractions) { (n: Int, d: Int) =>
*
* whenever (d != 0 && d != Integer.MIN_VALUE
* && n != Integer.MIN_VALUE) {
*
* val f = new Fraction(n, d)
*
* if (n < 0 && d < 0 || n > 0 && d > 0)
* f.numer should be > 0
* else if (n != 0)
* f.numer should be < 0
* else
* f.numer should be === 0
*
* f.denom should be > 0
* }
* }
* </pre>
*
* <p>
* In this example, rows 6, 8, and 9 have values that would cause a false to be passed
* to <code>whenever</code>. (For example, in row 6, <code>d</code> is 0, which means <code>d</code> <code>!=</code> <code>0</code>
* will be false.) For those rows, <code>whenever</code> will throw <code>DiscardedEvaluationException</code>,
* which will cause the <code>forAll</code> method to discard that row.
* </p>
*
* @param condition the boolean condition that determines whether <code>whenever</code> will evaluate the
* <code>fun</code> function (<code>condition</code> is true) or throws <code>DiscardedEvaluationException</code> (<code>condition</code> is false)
* @param fun the function to evaluate if the specified <code>condition</code> is true
*/
def whenever(condition: Boolean)(fun: => Unit) {
if (!condition)
throw new DiscardedEvaluationException
else
fun
}
}
| travisbrown/scalatest | src/main/scala/org/scalatest/prop/Whenever.scala | Scala | apache-2.0 | 4,008 |
package com.twitter.algebird
import com.twitter.algebird.BaseProperties._
import com.twitter.algebird.scalacheck.arbitrary._
import com.twitter.algebird.scalacheck.NonEmptyVector
import org.scalacheck.Arbitrary
import org.scalacheck.Prop.forAll
class MaxLaws extends CheckProperties {
def maxTest[T: Arbitrary: Ordering] =
forAll { (l: Max[T], r: Max[T]) =>
val realMax = Max(Ordering[T].max(l.get, r.get))
l + r == realMax && (l.max(r)) == realMax
}
def maxSemiGroupTest[T: Arbitrary: Ordering] =
forAll { v: NonEmptyVector[T] =>
val maxItems = v.items.map { Max(_) }
v.items.max == Max.semigroup[T].combineAllOption(maxItems).get.get
}
// Test equiv import.
val equiv = implicitly[Equiv[Max[Int]]]
// Testing that these ones can be found
val sgInt = implicitly[Semigroup[Max[Int]]]
val sgString = implicitly[Semigroup[Max[String]]]
val monoidString = implicitly[Monoid[Max[String]]]
property("Max.{ +, max } works on ints") { maxTest[Int] }
property("Max.aggregator returns the maximum item") {
forAll { v: NonEmptyVector[Int] =>
v.items.max == Max.aggregator[Int].apply(v.items)
}
}
property("Max.semigroup[Int] returns the maximum item") {
maxSemiGroupTest[Int]
}
property("Max.semigroup[Char] returns the maximum item") {
maxSemiGroupTest[Char]
}
property("Max[Long] is a commutative monoid") {
commutativeMonoidLaws[Max[Long]]
}
property("Max[Double] is a commutative monoid") {
commutativeMonoidLaws[Max[Double]]
}
property("Max[String] is a commutative monoid") {
commutativeMonoidLaws[Max[String]]
}
property("Max[List[Int]] is a commutative monoid") {
commutativeMonoidLaws[Max[List[Int]]]
}
property("Max[Vector[Int]] is a commutative monoid") {
commutativeMonoidLaws[Max[Vector[Int]]]
}
property("Max[Stream[Int]] is a commutative monoid") {
commutativeMonoidLaws[Max[Stream[Int]]]
}
}
| nevillelyh/algebird | algebird-test/src/test/scala/com/twitter/algebird/MaxLaws.scala | Scala | apache-2.0 | 1,953 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.config
import java.util.{Map => JMap}
import scala.util.matching.Regex
import org.apache.spark.SparkConf
/**
* An entry contains all meta information for a configuration.
*
* When applying variable substitution to config values, only references starting with "spark." are
* considered in the default namespace. For known Spark configuration keys (i.e. those created using
* `ConfigBuilder`), references will also consider the default value when it exists.
*
* Variable expansion is also applied to the default values of config entries that have a default
* value declared as a string.
*
* @param key the key for the configuration
* @param defaultValue the default value for the configuration
* @param valueConverter how to convert a string to the value. It should throw an exception if the
* string does not have the required format.
* @param stringConverter how to convert a value to a string that the user can use it as a valid
* string value. It's usually `toString`. But sometimes, a custom converter
* is necessary. E.g., if T is List[String], `a, b, c` is better than
* `List(a, b, c)`.
* @param doc the documentation for the configuration
* @param isPublic if this configuration is public to the user. If it's `false`, this
* configuration is only used internally and we should not expose it to users.
* @tparam T the value type
*/
private[spark] abstract class ConfigEntry[T] (
val key: String,
val valueConverter: String => T,
val stringConverter: T => String,
val doc: String,
val isPublic: Boolean) {
import ConfigEntry._
registerEntry(this)
def defaultValueString: String
def readFrom(reader: ConfigReader): T
def defaultValue: Option[T] = None
override def toString: String = {
s"ConfigEntry(key=$key, defaultValue=$defaultValueString, doc=$doc, public=$isPublic)"
}
}
private class ConfigEntryWithDefault[T] (
key: String,
_defaultValue: T,
valueConverter: String => T,
stringConverter: T => String,
doc: String,
isPublic: Boolean)
extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) {
override def defaultValue: Option[T] = Some(_defaultValue)
override def defaultValueString: String = stringConverter(_defaultValue)
def readFrom(reader: ConfigReader): T = {
reader.get(key).map(valueConverter).getOrElse(_defaultValue)
}
}
private class ConfigEntryWithDefaultString[T] (
key: String,
_defaultValue: String,
valueConverter: String => T,
stringConverter: T => String,
doc: String,
isPublic: Boolean)
extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) {
override def defaultValue: Option[T] = Some(valueConverter(_defaultValue))
override def defaultValueString: String = _defaultValue
def readFrom(reader: ConfigReader): T = {
val value = reader.get(key).getOrElse(reader.substitute(_defaultValue))
valueConverter(value)
}
}
/**
* A config entry that does not have a default value.
*/
private[spark] class OptionalConfigEntry[T](
key: String,
val rawValueConverter: String => T,
val rawStringConverter: T => String,
doc: String,
isPublic: Boolean)
extends ConfigEntry[Option[T]](key, s => Some(rawValueConverter(s)),
v => v.map(rawStringConverter).orNull, doc, isPublic) {
override def defaultValueString: String = "<undefined>"
override def readFrom(reader: ConfigReader): Option[T] = {
reader.get(key).map(rawValueConverter)
}
}
/**
* A config entry whose default value is defined by another config entry.
*/
private class FallbackConfigEntry[T] (
key: String,
doc: String,
isPublic: Boolean,
private[config] val fallback: ConfigEntry[T])
extends ConfigEntry[T](key, fallback.valueConverter, fallback.stringConverter, doc, isPublic) {
override def defaultValueString: String = s"<value of ${fallback.key}>"
override def readFrom(reader: ConfigReader): T = {
reader.get(key).map(valueConverter).getOrElse(fallback.readFrom(reader))
}
}
private[spark] object ConfigEntry {
private val knownConfigs = new java.util.concurrent.ConcurrentHashMap[String, ConfigEntry[_]]()
def registerEntry(entry: ConfigEntry[_]): Unit = {
val existing = knownConfigs.putIfAbsent(entry.key, entry)
require(existing == null, s"Config entry ${entry.key} already registered!")
}
def findEntry(key: String): ConfigEntry[_] = knownConfigs.get(key)
}
| sh-cho/cshSpark | internal/config/ConfigEntry.scala | Scala | apache-2.0 | 5,393 |
package us.feliscat.util
import org.kohsuke.args4j.Option
import scala.beans.BeanProperty
/**
* @author K.Sakamoto
* Created on 2016/11/05
*/
class JCasGenOption() {
@Option(name = "-ts", aliases = Array[String]("--typeSystem"), usage = "type system descriptor directory", required = false)
@BeanProperty
var tsDir: String = Config.jCasGenTypeSystemDir
@Option(name = "-o", aliases = Array[String]("--output"), usage = "output directory", required = false)
@BeanProperty
var outputDir: String = Config.jCasGenOutputDir
}
| ktr-skmt/FelisCatusZero-multilingual | jcasgen/src/main/scala/us/feliscat/util/JCasGenOption.scala | Scala | apache-2.0 | 551 |
package slick.jdbc.meta
import java.sql._
import slick.jdbc.ResultSetAction
import slick.driver.JdbcTypesComponent
/** A wrapper for a row in the ResultSet returned by DatabaseMetaData.getProcedureColumns(). */
case class MProcedureColumn(
procedure: MQName, column: String, columnType: Short, sqlType: Int, typeName: String,
precision: Option[Int], length: Int, scale: Option[Short], radix: Short,
nullable: Option[Boolean], remarks: String, columnDef: Option[String], charOctetLength: Option[Int],
ordinalPosition: Option[Int], isNullable: Option[Boolean], specificName: Option[String]) {
def sqlTypeName = JdbcTypesComponent.typeNames.get(sqlType)
}
object MProcedureColumn {
def getProcedureColumns(procedurePattern: MQName, columnNamePattern: String = "%") = ResultSetAction[MProcedureColumn](
_.metaData.getProcedureColumns(procedurePattern.catalog_?, procedurePattern.schema_?,
procedurePattern.name, columnNamePattern) ) { r =>
MProcedureColumn(MQName.from(r), r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.nextShort match {
case DatabaseMetaData.procedureNoNulls => Some(false)
case DatabaseMetaData.procedureNullable => Some(true)
case _ => None
}, r.<<, r.<<?, r.skip.skip.<<?, r.<<?, DatabaseMeta.yesNoOpt(r), r.<<?)
}
}
| jkutner/slick | slick/src/main/scala/slick/jdbc/meta/MProcedureColumn.scala | Scala | bsd-2-clause | 1,342 |
package org.jetbrains.plugins.scala.lang.psi.api.base.patterns
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
trait ScTypePatternArgs extends ScalaPsiElement {
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/base/patterns/ScTypePatternArgs.scala | Scala | apache-2.0 | 237 |
package org.jboss.perf
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import io.gatling.http.request.builder.Http
/**
* @author John O'Hara <johara@redhat.com>
* */
object HashingServletSimulations {
class Get extends BaseSimulation with AppHtml {
def run(http: Http) = {
http.get("/HashingTest").check(status.is(200), bodyBytes.exists)
}
}
}
| redhatperf/web-benchmark | src/test/scala/org/jboss/perf/HashingServletSimulations.scala | Scala | apache-2.0 | 391 |
package test
object Test {
def bar = "bar"
def f = {
val foo = "bar"
"An important $foo message!"
}
def g = {
val foo = "bar"
"A doubly important ${foo * 2} message!"
}
def h = s"Try using '$$bar' instead." // no warn
def i = s"Try using '${ "$bar" }' instead." // no warn on space test
def j = s"Try using '${ "something like $bar" }' instead." // warn
def k = f"Try using '$bar' instead." // no warn on other std interps
}
| yusuke2255/dotty | tests/untried/neg/t7848-interp-warn.scala | Scala | bsd-3-clause | 465 |
package org.clulab
import com.typesafe.config.ConfigFactory
import edu.arizona.sista.reach.nxml.indexer.NxmlSearcher
import edu.arizona.sista.embeddings.word2vec.Word2Vec
import edu.arizona.sista.reach.PaperReader
import edu.arizona.sista.processors.Document
import org.apache.lucene.document.{Document => LuceneDocument}
import scala.collection.parallel.ForkJoinTaskSupport
import java.util.zip.GZIPOutputStream
import org.apache.commons.compress.compressors.gzip._
import java.io.{FileOutputStream, Writer, OutputStreamWriter, File}
/**
* Takes a path to a lucene index, <br>
* tokenizes the text of each doc using BioNLPProcessor, <br>
* sanitizes each word (prep for for w2v), <br>
* and writes each doc to a .gz file where each line is a tokenized sentence of sanitized tokens
*/
object DumpIndex extends App {
val config = ConfigFactory.load()
val indexDir = config.getString("indexDir")
val searcher = new NxmlSearcher(indexDir)
val threadLimit = config.getInt("threadLimit")
val bioproc = PaperReader.rs.processor
val outDir = config.getString("indexDump")
/** Dumps processed text to paperid.txt.gz file */
def writeToCompressedFile(text: String, outFile: String): Unit = {
try {
val output: FileOutputStream = new FileOutputStream(outFile)
val writer: Writer = new OutputStreamWriter(new GZIPOutputStream(output), "UTF-8")
writer.write(text)
writer.close()
output.close()
} catch {
case e: Exception => println(s"Couldn't write $outFile")
}
}
/** Prepares text of LuceneDocument for input to embedding generation procedure <br>
* Dumps text to paperid.txt.gz file
* */
def processEntry(entry: LuceneDocument): Unit = {
// get text and id
val text = entry.getField("text").stringValue
val pmid = entry.getField("id").stringValue
println(s"Processing $pmid ...")
// tokenize
val doc = tokenize(text)
val outFile = new File(outDir, s"$pmid.txt")
// iterate over each sentence
val sanitizedLines: Seq[String] = doc.sentences.map{ s =>
// sanitize each word
s.words.map(w => Word2Vec.sanitizeWord(w)).mkString(" ")
}
// write to disk...
val gzipOutFile = GzipUtils.getCompressedFilename(outFile.getAbsolutePath)
println(s"writing $gzipOutFile ...")
writeToCompressedFile(sanitizedLines.mkString("\\n"), gzipOutFile)
}
/** Split sentences and tokenize */
def tokenize(text: String): Document = bioproc.mkDocument(text)
/** Process each doc in Lucene index */
def dumpFilesFromIndex(searcher: NxmlSearcher, nThreads: Int): Unit = {
val docs = (0 until searcher.reader.maxDoc).par
// limit threads
docs.tasksupport = new ForkJoinTaskSupport(new scala.concurrent.forkjoin.ForkJoinPool(nThreads))
for {
// limited parallel iteration over the indexed documents
i <- docs
} {
val entry = searcher.reader.document(i)
processEntry(entry)
}
}
println("Processing lucene documents ...")
// prepare indexed papers for generation of embeddings
dumpFilesFromIndex(searcher, threadLimit)
} | myedibleenso/this-before-that | src/main/scala/org/clulab/DumpIndex.scala | Scala | apache-2.0 | 3,103 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.responsiblepeople
import forms.{EmptyForm, InvalidForm}
import jto.validation.{Path, ValidationError}
import org.scalatest.MustMatchers
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.responsiblepeople.person_non_uk_passport
class person_non_uk_passportSpec extends AmlsViewSpec with MustMatchers {
trait ViewFixture extends Fixture {
lazy val person_non_uk_passport = app.injector.instanceOf[person_non_uk_passport]
implicit val requestWithToken = addTokenForView()
}
"person_uk_passport view" must {
"have correct title, headings and form fields" in new ViewFixture {
val form2 = EmptyForm
val name = "firstName lastName"
def view = person_non_uk_passport(form2, true, 1, None, name)
doc.getElementsByAttributeValue("class", "link-back") must not be empty
doc.title must startWith(Messages("responsiblepeople.non.uk.passport.title"))
heading.html must be(Messages("responsiblepeople.non.uk.passport.heading", name))
subHeading.html must include(Messages("summary.responsiblepeople"))
doc.getElementsByAttributeValue("name", "nonUKPassport") must not be empty
doc.getElementsByAttributeValue("name", "nonUKPassportNumber") must not be empty
}
"show errors in the correct locations" in new ViewFixture {
val form2: InvalidForm = InvalidForm(Map.empty,
Seq(
(Path \\ "nonUKPassport") -> Seq(ValidationError("not a message Key")),
(Path \\ "nonUKPassportNumber") -> Seq(ValidationError("second not a message Key"))
))
def view = person_non_uk_passport(form2, true, 1, None, "firstName lastName")
errorSummary.html() must include("not a message Key")
errorSummary.html() must include("second not a message Key")
doc.getElementsByAttributeValue("name", "nonUKPassport") must not be empty
doc.getElementsByAttributeValue("name", "nonUKPassportNumber") must not be empty
}
}
}
| hmrc/amls-frontend | test/views/responsiblepeople/person_non_uk_passportSpec.scala | Scala | apache-2.0 | 2,608 |
// FindPolitical.scala
//
// Copyright (C) 2012 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.fieldspring.preprocess
import collection.mutable
import java.io._
import org.apache.commons.logging
import com.nicta.scoobi.Scoobi._
import opennlp.fieldspring.{util => tgutil}
import tgutil.argparser._
import tgutil.textdbutil._
import tgutil.hadoop._
import tgutil.ioutil._
import tgutil.collectionutil._
import tgutil.osutil._
import tgutil.printutil._
class FindPoliticalParams(ap: ArgParser) extends
ScoobiProcessFilesParams(ap) {
var political_twitter_accounts = ap.option[String](
"political-twitter-accounts", "pta",
help="""File containing list of politicians and associated twitter
accounts, for identifying liberal and conservative tweeters.""")
var political_twitter_accounts_format = ap.option[String](
"political-twitter-accounts-format", "ptaf",
default = "officeholders",
choices = Seq("officeholders", "ideo-users"),
help="""Format for file specified in --political-twitter-accounts.
Possibilities: 'officeholders' (a file containing data gleaned from
the Internet, specifying holders of political offices and their parties),
'ideo-users' (output from a previous run of FindPolitical, in
Fieldspring corpus format, with ideology identified by a numeric
score).""")
var min_accounts = ap.option[Int]("min-accounts", default = 2,
help="""Minimum number of political accounts referenced by Twitter users
in order for users to be considered. Default %default.""")
var min_conservative = ap.option[Double]("min-conservative", "mc",
default = 0.75,
help="""Minimum ideology score to consider a user as an "ideological
conservative". On the ideology scale, greater values indicate more
conservative. Currently, the scale runs from 0 to 1; hence, this value
should be greater than 0.5. Default %default.""")
var max_liberal = ap.option[Double]("max-liberal", "ml",
help="""Maximum ideology score to consider a user as an "ideological
liberal". On the ideology scale, greater values indicate more
conservative. Currently, the scale runs from 0 to 1; hence, this value
should be less than 0.5. If unspecified, computed as the mirror image of
the value of '--min-conservative' (e.g. 0.25 if
--min-conservative=0.75).""")
var iterations = ap.option[Int]("iterations", "i",
default = 1,
help="""Number of iterations when generating ideological users.""")
var corpus_name = ap.option[String]("corpus-name",
help="""Name of output corpus; for identification purposes.
Default to name taken from input directory.""")
var include_text = ap.flag("include-text",
help="""Include text of users sending tweets referencing a feature.""")
var ideological_ref_type = ap.option[String]("ideological-ref-type", "ilt",
default = "retweets", choices = Seq("retweets", "mentions", "followers"),
help="""Type of references to other accounts to use when determining the
ideology of a user. Possibilities are 'retweets' (accounts that tweets
are retweeted from); 'mentions' (any @-mention of an account, including
retweets); 'following' (accounts that a user is following). Default
%default.""")
var political_feature_type = ap.multiOption[String]("political-feature-type",
"pft",
choices = Seq("retweets", "followers", "hashtags", "urls", "images",
"unigrams", "bigrams", "trigrams", "ngrams"),
aliasedChoices = Seq(Seq("user-mentions", "mentions")),
help="""Type of political features to track when searching for data that may
be associated with particular ideologies. Possibilities are 'retweets'
(accounts that tweets are retweeted from); 'mentions' (any @-mention of an
account, including retweets); 'following' (accounts that a user is
following); 'hashtags'; 'unigrams'; 'bigrams'; 'trigrams'; 'ngrams'.
DOCUMENT THESE; NOT YET IMPLEMENTED. Multiple features can be tracked
simultaneously by specifying this option multiple times.""")
// FIXME: Should be able to specify multiple features separated by commas.
// This requires that we fix argparser.scala to allow this. Probably
// should add an extra field to provide a way of splitting -- maybe a regexp,
// maybe a function.
// Schema for the input file, after file read
var schema: Schema = _
override def check_usage() {
if (political_twitter_accounts == null)
ap.error("--political-twitter-accounts must be specified")
if (!ap.specified("max-liberal"))
max_liberal = 1 - min_conservative
if (iterations <= 0)
ap.error("--iterations must be > 0")
}
}
/**
* A simple field-text file processor that just records the users and ideology.
*
* @param suffix Suffix used to select document metadata files in a directory
*/
class IdeoUserFileProcessor extends
TextDBProcessor[(String, Double)]("ideo-users") {
def handle_row(fieldvals: Seq[String]) = {
val user = schema.get_field(fieldvals, "user")
val ideology =
schema.get_field(fieldvals, "ideology").toDouble
Some((user.toLowerCase, ideology))
}
}
object FindPolitical extends
ScoobiProcessFilesApp[FindPoliticalParams] {
abstract class FindPoliticalAction(opts: FindPoliticalParams)
extends ScoobiProcessFilesAction {
val progname = "FindPolitical"
}
/**
* Count of total number of references given a sequence of
* (data, weight, times) pairs of references to a particular data point.
*/
def count_refs[T](seq: Seq[(T, Double, Int)]) = seq.map(_._3).sum
/**
* Count of total weight given a sequence of (data, weight, times) pairs
* of references to a particular data point.
*/
def count_weight[T](seq: Seq[(T, Double, Int)]) =
seq.map{ case (_, weight, times) => weight*times }.sum
/**
* Count of total number of accounts given a sequence of (data, times) pairs
* of references to a particular data point.
*/
def count_accounts[T](seq: Seq[(T, Double, Int)]) = seq.length
/**
* Description of a "politico" -- a politician along their party and
* known twitter accounts.
*/
case class Politico(last: String, first: String, title: String,
party: String, where: String, accounts: Seq[String]) {
def full_name = first + " " + last
}
implicit val politico_wire =
mkCaseWireFormat(Politico.apply _, Politico.unapply _)
def encode_ideo_refs_map(seq: Seq[(String, Double, Int)]) =
(for ((account, ideology, count) <- seq sortWith (_._3 > _._3)) yield
("%s:%.2f:%s" format (
encode_string_for_count_map_field(account), ideology, count))
) mkString " "
def empty_ideo_refs_map = Seq[(String, Double, Int)]()
/**
* Description of a user and the accounts referenced, both political and
* nonpolitical, along with ideology.
*
* @param user Twitter account of the user
* @param ideology Computed ideology of the user (higher values indicate
* more conservative)
* @param ideo_refs Set of references to other accounts used in computing
* the ideology (either mentions, retweets or following, based on
* --ideological-ref-type); this is a sequence of tuples of
* (account, ideology, times), i.e. an account, its ideology and the number
* of times it was seen
* @param lib_ideo_refs Subset of `ideo_refs` that refer to liberal users
* @param cons_ideo_refs Subset of `ideo_refs` that refer to conservative users
* @param fields Field values of user's tweets (concatenated)
*/
case class IdeologicalUser(user: String, ideology: Double,
ideo_refs: Seq[(String, Double, Int)],
lib_ideo_refs: Seq[(String, Double, Int)],
cons_ideo_refs: Seq[(String, Double, Int)],
fields: Seq[String]) {
def get_feature_values(factory: IdeologicalUserAction, ty: String) = {
ty match {
case field@("retweets" | "user-mentions" | "hashtags") =>
decode_count_map(
factory.user_subschema.get_field(fields, field))
// case "followers" => FIXME
// case "unigrams" => FIXME
// case "bigrams" => FIXME
// case "trigrams" => FIXME
// case "ngrams" => FIXME
}
}
def to_row(opts: FindPoliticalParams) =
Seq(user, "%.3f" format ideology,
count_accounts(ideo_refs),
count_refs(ideo_refs),
encode_ideo_refs_map(ideo_refs),
count_accounts(lib_ideo_refs),
count_refs(lib_ideo_refs),
encode_ideo_refs_map(lib_ideo_refs),
count_accounts(cons_ideo_refs),
count_refs(cons_ideo_refs),
encode_ideo_refs_map(cons_ideo_refs),
fields mkString "\\t"
) mkString "\\t"
}
implicit val ideological_user_wire =
mkCaseWireFormat(IdeologicalUser.apply _, IdeologicalUser.unapply _)
class IdeologicalUserAction(opts: FindPoliticalParams) extends
FindPoliticalAction(opts) {
val operation_category = "IdeologicalUser"
val user_subschema_fieldnames =
opts.schema.fieldnames filterNot (_ == "user")
val user_subschema = new SubSchema(user_subschema_fieldnames,
opts.schema.fixed_values, opts.schema)
def row_fields =
Seq("user", "ideology",
"num-ideo-accounts", "num-ideo-refs", "ideo-refs",
"num-lib-ideo-accounts", "num-lib-ideo-refs", "lib-ideo-refs",
"num-cons-ideo-accounts", "num-cons-ideo-refs", "cons-ideo-refs") ++
user_subschema_fieldnames
/**
* For a given user, determine if the user is an "ideological user"
* and if so, return an object describing the user.
*
* @param line Line of data describing a user, from `ParseTweets --grouping=user`
* @param accounts Mapping of ideological accounts and their ideology
* @param include_extra_fields True if we should include extra fields
* in the object specifying the references to ideological users that
* were found; only if we're writing the objects out for human inspection,
* not when we're iterating further
*/
def get_ideological_user(line: String, accounts: Map[String, Double],
include_extra_fields: Boolean) = {
error_wrap(line, None: Option[IdeologicalUser]) { line => {
val fields = line.split("\\t", -1)
def subsetted_fields =
if (include_extra_fields)
user_subschema.map_original_fieldvals(fields)
else Seq[String]()
// get list of (refs, times) pairs
val ideo_ref_field =
if (opts.ideological_ref_type == "mentions") "user-mentions"
else opts.ideological_ref_type
val ideo_refs =
decode_count_map(opts.schema.get_field(fields, ideo_ref_field))
val text = opts.schema.get_field(fields, "text")
val user = opts.schema.get_field(fields, "user")
//errprint("For user %s, ideo_refs: %s", user, ideo_refs.toList)
// find references to a politician
val libcons_ideo_refs =
for {(ideo_ref, times) <- ideo_refs
lower_ideo_ref = ideo_ref.toLowerCase
if accounts contains lower_ideo_ref
ideology = accounts(lower_ideo_ref)}
yield (lower_ideo_ref, ideology, times)
//errprint("libcons_ideo_refs: %s", libcons_ideo_refs.toList)
val num_libcons_ideo_refs = count_refs(libcons_ideo_refs)
if (num_libcons_ideo_refs > 0) {
val ideology = count_weight(libcons_ideo_refs)/num_libcons_ideo_refs
if (include_extra_fields) {
val lib_ideo_refs = libcons_ideo_refs.filter {
case (lower_ideo_ref, ideology, times) =>
ideology <= opts.max_liberal
}
val num_lib_ideo_refs = count_refs(lib_ideo_refs)
val cons_ideo_refs = libcons_ideo_refs.filter {
case (lower_ideo_ref, ideology, times) =>
ideology >= opts.min_conservative
}
val num_cons_ideo_refs = count_refs(cons_ideo_refs)
val ideo_user =
IdeologicalUser(user, ideology, libcons_ideo_refs, lib_ideo_refs,
cons_ideo_refs, subsetted_fields)
Some(ideo_user)
} else {
val ideo_user =
IdeologicalUser(user, ideology, empty_ideo_refs_map,
empty_ideo_refs_map, empty_ideo_refs_map, Seq[String]())
Some(ideo_user)
}
} else if (accounts contains user.toLowerCase) {
val ideology = accounts(user.toLowerCase)
val ideo_user =
IdeologicalUser(user, ideology, empty_ideo_refs_map,
empty_ideo_refs_map, empty_ideo_refs_map, subsetted_fields)
Some(ideo_user)
} else
None
}}
}
}
/**
* A political data point -- a piece of data (e.g. user mention, retweet,
* hash tag, URL, n-gram, etc.) in a tweet by an ideological user.
*
* @param data Data of the data point
* @param ty Type of data point
* @param spellings Map of actual (non-lowercased) spellings of data point
* by usage
* @param num_accounts Total number of accounts referencing data point
* @param num_refs Total number of references to data point
* @param num_lib_accounts Number of accounts with a noticeably
* "liberal" ideology referencing data point
* @param num_lib_refs Number of references to data point from accounts
* with a noticeably "liberal" ideology
* @param num_cons_accounts Number of accounts with a noticeably
* "conservative" ideology referencing data point
* @param num_cons_refs Number of references to data point from accounts
* with a noticeably "conservative" ideology
* @param num_refs_ideo_weighted Sum of references weighted by ideology of
* person doing the referenceing, so that we can compute a weighted
* average to determine their ideology.
* @param num_mentions Total number of mentions
* @param num_lib_mentions Number of times mentioned by people with
* a noticeably "liberal" ideology
* @param num_conserv_mentions Number of times mentioned by people with
* a noticeably "conservative" ideology
* @param num_ideo_mentions Sum of mentions weighted by ideology of
* person doing the mentioning, so that we can compute a weighted
* average to determine their ideology.
* @param all_text Text of all users referencing the politico.
*/
case class PoliticalFeature(value: String, spellings: Map[String, Int],
num_accounts: Int, num_refs: Int,
num_lib_accounts: Int, num_lib_refs: Int,
num_cons_accounts: Int, num_cons_refs: Int,
num_refs_ideo_weighted: Double, all_text: Seq[String]) {
def to_row(opts: FindPoliticalParams) =
Seq(value, encode_count_map(spellings.toSeq),
num_accounts, num_refs,
num_lib_accounts, num_lib_refs,
num_cons_accounts, num_cons_refs,
num_refs_ideo_weighted/num_refs,
if (opts.include_text) all_text mkString " !! " else "(omitted)"
) mkString "\\t"
}
object PoliticalFeature {
def row_fields =
Seq("value", "spellings", "num-accounts", "num-refs",
"num-lib-accounts", "num-lib-refs",
"num-cons-accounts", "num-cons-refs",
"ideology", "all-text")
/**
* For a given ideological user, generate the "potential politicos": other
* people referenced, along with their ideological scores.
*/
def get_political_features(factory: IdeologicalUserAction,
user: IdeologicalUser, ty: String,
opts: FindPoliticalParams) = {
for {(ref, times) <- user.get_feature_values(factory, ty)
lcref = ref.toLowerCase } yield {
val is_lib = user.ideology <= opts.max_liberal
val is_conserv = user.ideology >= opts.min_conservative
PoliticalFeature(
lcref, Map(ref->times), 1, times,
if (is_lib) 1 else 0,
if (is_lib) times else 0,
if (is_conserv) 1 else 0,
if (is_conserv) times else 0,
times * user.ideology,
Seq("FIXME fill-in text maybe")
)
}
}
/**
* Merge two PoliticalFeature objects, which must refer to the same user.
* Add up the references and combine the set of spellings.
*/
def merge_political_features(u1: PoliticalFeature, u2: PoliticalFeature) = {
assert(u1.value == u2.value)
PoliticalFeature(u1.value, combine_maps(u1.spellings, u2.spellings),
u1.num_accounts + u2.num_accounts,
u1.num_refs + u2.num_refs,
u1.num_lib_accounts + u2.num_lib_accounts,
u1.num_lib_refs + u2.num_lib_refs,
u1.num_cons_accounts + u2.num_cons_accounts,
u1.num_cons_refs + u2.num_cons_refs,
u1.num_refs_ideo_weighted + u2.num_refs_ideo_weighted,
u1.all_text ++ u2.all_text)
}
}
implicit val political_feature =
mkCaseWireFormat(PoliticalFeature.apply _, PoliticalFeature.unapply _)
class FindPoliticalDriver(opts: FindPoliticalParams)
extends FindPoliticalAction(opts) {
val operation_category = "Driver"
/**
* Read the set of ideological accounts. Create a "Politico" object for
* each such account, and return a map from a normalized (lowercased)
* version of each account to the corresponding Politico object (which
* may refer to multiple accounts).
*/
def read_ideological_accounts(filename: String) = {
val politico =
"""^([^ .]+)\\. (.*?), (.*?) (-+ |(?:@[^ ]+ )+)([RDI?]) \\((.*)\\)$""".r
val all_accounts =
// Open the file and read line by line.
for ((line, lineind) <- (new LocalFileHandler).openr(filename).zipWithIndex
// Skip comments and blank lines
if !line.startsWith("#") && !(line.trim.length == 0)) yield {
lineno = lineind + 1
line match {
// Match the line.
case politico(title, last, first, accountstr, party, where) => {
// Compute the list of normalized accounts.
val accounts =
if (accountstr.startsWith("-")) Seq[String]()
// `tail` removes the leading @; lowercase to normalize
else accountstr.split(" ").map(_.tail.toLowerCase).toSeq
val obj = Politico(last, first, title, party, where, accounts)
for (account <- accounts) yield (account, obj)
}
case _ => {
warning(line, "Unable to match")
Seq[(String, Politico)]()
}
}
}
lineno = 0
// For each account read in, we generated multiple pairs; flatten and
// convert to a map. Reverse because the last of identical keys will end
// up in the map but we want the first one taken.
all_accounts.flatten.toSeq.reverse.toMap
}
/**
* Convert map of accounts->politicos to accounts->ideology
*/
def politico_accounts_map_to_ideo_users_map(
accounts: Map[String, Politico]) = {
accounts.
filter { case (string, politico) => "DR".contains(politico.party) }.
map { case (string, politico) =>
(string, politico.party match { case "D" => 0.0; case "R" => 1.0 }) }
}
/*
2. We go through users looking for references to these politicians. For
users that reference politicians, we can compute an "ideology" score of
the user by a weighted average of the references by the ideology of
the politicians.
3. For each such user, look at all other people referenced -- the idea is
we want to look for people referenced a lot especially by users with
a consistent ideology (e.g. Glenn Beck or Rush Limbaugh for
conservatives), which we can then use to mark others as having a
given ideology. For each person, we generate a record with their
name, the number of times they were referenced and an ideology score
and merge these all together.
*/
}
def create_params(ap: ArgParser) = new FindPoliticalParams(ap)
val progname = "FindPolitical"
def run() {
// For testing
// errprint("Calling error_wrap ...")
// error_wrap(1,0) { _ / 0 }
val opts = init_scoobi_app()
/*
We are doing the following:
1. We are given a list of known politicians, their twitter accounts, and
their ideology -- either determined simply by their party, or using
the DW-NOMINATE score or similar.
2. We go through users looking for references to these politicians. For
users that reference politicians, we can compute an "ideology" score of
the user by a weighted average of the references by the ideology of
the politicians.
3. For each such user, look at all other people referenced -- the idea is
we want to look for people referenced a lot especially by users with
a consistent ideology (e.g. Glenn Beck or Rush Limbaugh for
conservatives), which we can then use to mark others as having a
given ideology. For each person, we generate a record with their
name, the number of times they were referenced and an ideology score
and merge these all together.
*/
val ptp = new FindPoliticalDriver(opts)
val filehand = new HadoopFileHandler(configuration)
if (opts.corpus_name == null) {
val (_, last_component) = filehand.split_filename(opts.input)
opts.corpus_name = last_component.replace("*", "_")
}
var accounts: Map[String, Double] =
if (opts.political_twitter_accounts_format == "officeholders") {
val politico_accounts =
ptp.read_ideological_accounts(opts.political_twitter_accounts)
ptp.politico_accounts_map_to_ideo_users_map(politico_accounts)
}
else {
val processor = new IdeoUserFileProcessor
processor.read_textdb(filehand, opts.political_twitter_accounts).
flatten.toMap
}
// errprint("Accounts: %s", accounts)
val suffix = "tweets"
opts.schema =
TextDBProcessor.read_schema_from_textdb(filehand, opts.input, suffix)
def output_directory_for_suffix(corpus_suffix: String) =
opts.output + "-" + corpus_suffix
/**
* For the given sequence of lines and related info for writing output a corpus,
* return a tuple of two thunks: One for persisting the data, the other for
* fixing up the data into a proper corpus.
*/
def output_lines(lines: DList[String], corpus_suffix: String,
fields: Seq[String]) = {
val outdir = output_directory_for_suffix(corpus_suffix)
(TextOutput.toTextFile(lines, outdir), () => {
rename_output_files(outdir, opts.corpus_name, corpus_suffix)
output_schema_for_suffix(corpus_suffix, fields)
})
}
def output_schema_for_suffix(corpus_suffix: String, fields: Seq[String]) {
val outdir = output_directory_for_suffix(corpus_suffix)
val fixed_fields =
Map("corpus-name" -> opts.corpus_name,
"generating-app" -> "FindPolitical",
"corpus-type" -> "twitter-%s".format(corpus_suffix)) ++
opts.non_default_params_string.toMap ++
Map(
"ideological-ref-type" -> opts.ideological_ref_type,
"political-feature-type" -> "%s".format(opts.political_feature_type)
)
val out_schema = new Schema(fields, fixed_fields)
out_schema.output_constructed_schema_file(filehand, outdir,
opts.corpus_name, corpus_suffix)
}
var ideo_users: DList[IdeologicalUser] = null
val ideo_fact = new IdeologicalUserAction(opts)
val matching_patterns = TextDBProcessor.
get_matching_patterns(filehand, opts.input, suffix)
val lines: DList[String] = TextInput.fromTextFile(matching_patterns: _*)
errprint("Step 1, pass 0: %d ideological users on input",
accounts.size)
for (iter <- 1 to opts.iterations) {
errprint(
"Step 1, pass %d: Filter corpus for conservatives/liberals, compute ideology."
format iter)
val last_pass = iter == opts.iterations
ideo_users =
lines.flatMap(ideo_fact.get_ideological_user(_, accounts, last_pass))
if (!last_pass) {
accounts =
persist(ideo_users.materialize).map(x =>
(x.user.toLowerCase, x.ideology)).toMap
errprint("Step 1, pass %d: %d ideological users on input",
iter, accounts.size)
errprint("Step 1, pass %d: done." format iter)
}
}
val (ideo_users_persist, ideo_users_fixup) =
output_lines(ideo_users.map(_.to_row(opts)), "ideo-users",
ideo_fact.row_fields)
/* This is a separate function because including it inline in the for loop
below results in a weird deserialization error. */
def handle_political_feature_type(ty: String) = {
errprint("Step 2: Handling feature type '%s' ..." format ty)
val political_features = ideo_users.
flatMap(PoliticalFeature.
get_political_features(ideo_fact, _, ty, opts)).
groupBy(_.value).
combine(PoliticalFeature.merge_political_features).
map(_._2)
output_lines(political_features.map(_.to_row(opts)),
"political-features-%s" format ty, PoliticalFeature.row_fields)
}
errprint("Step 2: Generate political features.")
val (ft_persists, ft_fixups) = (
for (ty <- opts.political_feature_type) yield
handle_political_feature_type(ty)
).unzip
persist(Seq(ideo_users_persist) ++ ft_persists)
ideo_users_fixup()
for (fixup <- ft_fixups)
fixup()
errprint("Step 1, pass %d: done." format opts.iterations)
errprint("Step 2: done.")
finish_scoobi_app(opts)
}
/*
To build a classifier for conserv vs liberal:
1. Look for people retweeting congressmen or governor tweets, possibly at
some minimum level of retweeting (or rely on followers, for some
minimum number of people following)
2. Make sure either they predominate having retweets from one party,
and/or use the DW-NOMINATE scores to pick out people whose average
ideology score of their retweets is near the extremes.
*/
}
| utcompling/fieldspring | src/main/scala/opennlp/fieldspring/preprocess/FindPolitical.scala | Scala | apache-2.0 | 26,859 |
package memnets.model
object OscPop {
/**
* can use Osc.toFreq if want to convert raw freq into sim's cycles/sec
*
* damping/dampingTie is applied directly to decay, so use negative values.
*
* caller must factor tau in
*
* NOTE : should not supply both damping and dampingTie
*
* @param freq no units assumed. must calculate desired cycles/sec
*/
def apply(
size: Int,
freq: Double = Osc.toFreq(0.5),
damping: Double = 0.0,
dampingTie: Option[Param] = None
)(implicit sys: DynamicSystem): OscPop = {
val oscs = new OscPop(size)
oscs.frequency = freq
oscs.decay.w = damping
for (dt <- dampingTie) oscs.decay tie = dt
oscs
}
}
class OscPop(val size: Int)(implicit sys: DynamicSystem) extends LinkableLayer with Oscillator {
val y = Layer(n = size, name = "y")
val x = Layer(n = size, name = "x")
x.ui.hide()
x --> y
val decay: OnetoOne = x --> x
val y2x: OnetoOne = y --> x
decay.w = 0.0
def init(i: Int, phase: Double, scale: Double = 1.0): Unit = {
y(i) = scale * initY(phase)
x(i) = scale * initX(phase)
}
def phase(i: Int): Double = phaseHelper(y(i), x(i))
def rawFrequency: Double = y2x.w
def rawFrequency_=(rf: Double): Unit = y2x.w = rf
def src: Layer = y
}
object HeteroOscPop {
/**
* can use Osc.toFreq if want to convert raw freq into sim's cycles/sec
*
* damping is applied directly to decay, so use negative values.
*
* caller must factor tau in
*
* @param freq no units assumed. must calculate desired cycles/sec
*/
def apply(
size: Int,
freq: Int => Double,
damping: Int => Double
)(implicit sys: DynamicSystem): HeteroOscPop = {
val oscPop = new HeteroOscPop(size)
for (i <- 0 until size) {
oscPop.frequency(i, freq(i))
oscPop.decay(i) = damping(i)
}
oscPop
}
}
class HeteroOscPop(val size: Int)(implicit sys: DynamicSystem) extends LinkableLayer {
val y = Layer(n = size, name = "y")
val x = Layer(n = size, name = "x")
x.ui.hide()
x --> y
val decay: DenseLink = x ==> x
val y2x: DenseLink = y ==> x
def src: Layer = y
def init(i: Int, phase: Double, scale: Double = 1.0): Unit = {
y(i) = scale * Math.sin(phase)
x(i) = scale * Math.cos(phase) * frequency(i)
}
def frequency(i: Int): Double = {
if (y2x(i) < 0.0)
Math.sqrt(-y2x(i))
else
0.0
}
def frequency(i: Int, f: Double): Unit = { y2x(i) = -f * f }
}
| MemoryNetworks/memnets | api/src/main/scala/memnets/model/OscPop.scala | Scala | apache-2.0 | 2,484 |
package extruder.laws
import cats.{Monad, Monoid}
import extruder.core._
import cats.laws._
import extruder.data.PathElement
import cats.syntax.flatMap._
import cats.syntax.functor._
trait EncoderDecoderDerivedLaws[F[_], S <: Settings, E, D, O] extends EncoderDecoderLaws[F, S, E, D, O] {
implicit def hasValue: HasValue[F, S, O]
private def encodePrepare(path: List[PathElement], input: F[E]): F[O] =
for {
i <- input
f <- finalise.run(path, settings, i)
o <- prepare.run(path, settings, f)
} yield o
def eitherLeftEncodeDecode[A, B](path: List[PathElement], a: A)(
implicit aEncoder: Encoder[F, S, A, E],
bEncoder: Encoder[F, S, B, E],
aDecoder: Decoder[F, S, A, O],
bDecoder: Decoder[F, S, B, O]
): IsEq[F[Either[A, B]]] = {
val eitherEncoder: Encoder[F, S, Either[A, B], E] = Encoder[F, S, Either[A, B], E]
val eitherDecoder: Decoder[F, S, Either[A, B], O] = Decoder[F, S, Either[A, B], O]
(for {
left <- encodePrepare(path, eitherEncoder.write(path, settings, Left(a)))
l <- eitherDecoder.read(path, settings, None, left)
} yield l) <-> F.pure[Either[A, B]](Left(a))
}
def eitherRightEncodeDecode[A, B](path: List[PathElement], b: B)(
implicit aEncoder: Encoder[F, S, A, E],
bEncoder: Encoder[F, S, B, E],
aDecoder: Decoder[F, S, A, O],
bDecoder: Decoder[F, S, B, O]
): IsEq[F[Either[A, B]]] = {
val eitherEncoder: Encoder[F, S, Either[A, B], E] = Encoder[F, S, Either[A, B], E]
val eitherDecoder: Decoder[F, S, Either[A, B], O] = Decoder[F, S, Either[A, B], O]
(for {
right <- encodePrepare(path, eitherEncoder.write(path, settings, Right(b)))
r <- eitherDecoder.read(path, settings, None, right)
} yield r) <-> F.pure[Either[A, B]](Right(b))
}
def eitherLeftDefaultDecode[A, B](
path: List[PathElement],
a: A
)(implicit aDecoder: Decoder[F, S, A, O], bDecoder: Decoder[F, S, B, O]): IsEq[F[Either[A, B]]] = {
val eitherDecoder: Decoder[F, S, Either[A, B], O] = Decoder[F, S, Either[A, B], O]
eitherDecoder.read(path, settings, Some(Left(a)), outMonoid.empty) <-> F.pure(Left(a))
}
def eitherRightDefaultDecode[A, B](
path: List[PathElement],
b: B
)(implicit aDecoder: Decoder[F, S, A, O], bDecoder: Decoder[F, S, B, O]): IsEq[F[Either[A, B]]] = {
val eitherDecoder: Decoder[F, S, Either[A, B], O] = Decoder[F, S, Either[A, B], O]
eitherDecoder.read(path, settings, Some(Right(b)), outMonoid.empty) <-> F.pure(Right(b))
}
}
object EncoderDecoderDerivedLaws {
def apply[F[_]: Monad: ExtruderErrors, S <: Settings, E: Monoid, D, O: Monoid](s: S)(
implicit fin: Transform[F, S, E, D],
prep: Transform[F, S, D, O],
hv: HasValue[F, S, O]
): EncoderDecoderDerivedLaws[F, S, E, D, O] =
new EncoderDecoderDerivedLaws[F, S, E, D, O] {
override def F: Monad[F] = Monad[F]
override def settings: S = s
override def finalise: Transform[F, S, E, D] = fin
override def prepare: Transform[F, S, D, O] = prep
override def monoid: Monoid[E] = Monoid[E]
override def outMonoid: Monoid[O] = Monoid[O]
override def errors: ExtruderErrors[F] = ExtruderErrors[F]
override def hasValue: HasValue[F, S, O] = hv
}
}
| janstenpickle/extruder | laws/src/main/scala/extruder/laws/EncoderDecoderDerivedLaws.scala | Scala | mit | 3,255 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Date, Timestamp}
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.Partition
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{AnalysisException, DataFrame, Row, SaveMode, SparkSession, SQLContext}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.jdbc.JdbcDialects
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.{DataType, DateType, NumericType, StructType, TimestampType}
/**
* Instructions on how to partition the table among workers.
*/
private[sql] case class JDBCPartitioningInfo(
column: String,
columnType: DataType,
lowerBound: Long,
upperBound: Long,
numPartitions: Int)
private[sql] object JDBCRelation extends Logging {
/**
* Given a partitioning schematic (a column of integral type, a number of
* partitions, and upper and lower bounds on the column's value), generate
* WHERE clauses for each partition so that each row in the table appears
* exactly once. The parameters minValue and maxValue are advisory in that
* incorrect values may cause the partitioning to be poor, but no data
* will fail to be represented.
*
* Null value predicate is added to the first partition where clause to include
* the rows with null value for the partitions column.
*
* @param schema resolved schema of a JDBC table
* @param resolver function used to determine if two identifiers are equal
* @param timeZoneId timezone ID to be used if a partition column type is date or timestamp
* @param jdbcOptions JDBC options that contains url
* @return an array of partitions with where clause for each partition
*/
def columnPartition(
schema: StructType,
resolver: Resolver,
timeZoneId: String,
jdbcOptions: JDBCOptions): Array[Partition] = {
val partitioning = {
import JDBCOptions._
val partitionColumn = jdbcOptions.partitionColumn
val lowerBound = jdbcOptions.lowerBound
val upperBound = jdbcOptions.upperBound
val numPartitions = jdbcOptions.numPartitions
if (partitionColumn.isEmpty) {
assert(lowerBound.isEmpty && upperBound.isEmpty, "When 'partitionColumn' is not " +
s"specified, '$JDBC_LOWER_BOUND' and '$JDBC_UPPER_BOUND' are expected to be empty")
null
} else {
assert(lowerBound.nonEmpty && upperBound.nonEmpty && numPartitions.nonEmpty,
s"When 'partitionColumn' is specified, '$JDBC_LOWER_BOUND', '$JDBC_UPPER_BOUND', and " +
s"'$JDBC_NUM_PARTITIONS' are also required")
val (column, columnType) = verifyAndGetNormalizedPartitionColumn(
schema, partitionColumn.get, resolver, jdbcOptions)
val lowerBoundValue = toInternalBoundValue(lowerBound.get, columnType)
val upperBoundValue = toInternalBoundValue(upperBound.get, columnType)
JDBCPartitioningInfo(
column, columnType, lowerBoundValue, upperBoundValue, numPartitions.get)
}
}
if (partitioning == null || partitioning.numPartitions <= 1 ||
partitioning.lowerBound == partitioning.upperBound) {
return Array[Partition](JDBCPartition(null, 0))
}
val lowerBound = partitioning.lowerBound
val upperBound = partitioning.upperBound
require (lowerBound <= upperBound,
"Operation not allowed: the lower bound of partitioning column is larger than the upper " +
s"bound. Lower bound: $lowerBound; Upper bound: $upperBound")
val boundValueToString: Long => String =
toBoundValueInWhereClause(_, partitioning.columnType, timeZoneId)
val numPartitions =
if ((upperBound - lowerBound) >= partitioning.numPartitions || /* check for overflow */
(upperBound - lowerBound) < 0) {
partitioning.numPartitions
} else {
logWarning("The number of partitions is reduced because the specified number of " +
"partitions is less than the difference between upper bound and lower bound. " +
s"Updated number of partitions: ${upperBound - lowerBound}; Input number of " +
s"partitions: ${partitioning.numPartitions}; " +
s"Lower bound: ${boundValueToString(lowerBound)}; " +
s"Upper bound: ${boundValueToString(upperBound)}.")
upperBound - lowerBound
}
// Overflow and silliness can happen if you subtract then divide.
// Here we get a little roundoff, but that's (hopefully) OK.
val stride: Long = upperBound / numPartitions - lowerBound / numPartitions
var i: Int = 0
val column = partitioning.column
var currentValue = lowerBound
val ans = new ArrayBuffer[Partition]()
while (i < numPartitions) {
val lBoundValue = boundValueToString(currentValue)
val lBound = if (i != 0) s"$column >= $lBoundValue" else null
currentValue += stride
val uBoundValue = boundValueToString(currentValue)
val uBound = if (i != numPartitions - 1) s"$column < $uBoundValue" else null
val whereClause =
if (uBound == null) {
lBound
} else if (lBound == null) {
s"$uBound or $column is null"
} else {
s"$lBound AND $uBound"
}
ans += JDBCPartition(whereClause, i)
i = i + 1
}
val partitions = ans.toArray
logInfo(s"Number of partitions: $numPartitions, WHERE clauses of these partitions: " +
partitions.map(_.asInstanceOf[JDBCPartition].whereClause).mkString(", "))
partitions
}
// Verify column name and type based on the JDBC resolved schema
private def verifyAndGetNormalizedPartitionColumn(
schema: StructType,
columnName: String,
resolver: Resolver,
jdbcOptions: JDBCOptions): (String, DataType) = {
val dialect = JdbcDialects.get(jdbcOptions.url)
val column = schema.find { f =>
resolver(f.name, columnName) || resolver(dialect.quoteIdentifier(f.name), columnName)
}.getOrElse {
val maxNumToStringFields = SQLConf.get.maxToStringFields
throw new AnalysisException(s"User-defined partition column $columnName not " +
s"found in the JDBC relation: ${schema.simpleString(maxNumToStringFields)}")
}
column.dataType match {
case _: NumericType | DateType | TimestampType =>
case _ =>
throw new AnalysisException(
s"Partition column type should be ${NumericType.simpleString}, " +
s"${DateType.catalogString}, or ${TimestampType.catalogString}, but " +
s"${column.dataType.catalogString} found.")
}
(dialect.quoteIdentifier(column.name), column.dataType)
}
private def toInternalBoundValue(value: String, columnType: DataType): Long = columnType match {
case _: NumericType => value.toLong
case DateType => DateTimeUtils.fromJavaDate(Date.valueOf(value)).toLong
case TimestampType => DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(value))
}
private def toBoundValueInWhereClause(
value: Long,
columnType: DataType,
timeZoneId: String): String = {
def dateTimeToString(): String = {
val timeZone = DateTimeUtils.getTimeZone(timeZoneId)
val dateTimeStr = columnType match {
case DateType => DateTimeUtils.dateToString(value.toInt, timeZone)
case TimestampType => DateTimeUtils.timestampToString(value, timeZone)
}
s"'$dateTimeStr'"
}
columnType match {
case _: NumericType => value.toString
case DateType | TimestampType => dateTimeToString()
}
}
/**
* Takes a (schema, table) specification and returns the table's Catalyst schema.
* If `customSchema` defined in the JDBC options, replaces the schema's dataType with the
* custom schema's type.
*
* @param resolver function used to determine if two identifiers are equal
* @param jdbcOptions JDBC options that contains url, table and other information.
* @return resolved Catalyst schema of a JDBC table
*/
def getSchema(resolver: Resolver, jdbcOptions: JDBCOptions): StructType = {
val tableSchema = JDBCRDD.resolveTable(jdbcOptions)
jdbcOptions.customSchema match {
case Some(customSchema) => JdbcUtils.getCustomSchema(
tableSchema, customSchema, resolver)
case None => tableSchema
}
}
/**
* Resolves a Catalyst schema of a JDBC table and returns [[JDBCRelation]] with the schema.
*/
def apply(
parts: Array[Partition],
jdbcOptions: JDBCOptions)(
sparkSession: SparkSession): JDBCRelation = {
val schema = JDBCRelation.getSchema(sparkSession.sessionState.conf.resolver, jdbcOptions)
JDBCRelation(schema, parts, jdbcOptions)(sparkSession)
}
}
private[sql] case class JDBCRelation(
override val schema: StructType,
parts: Array[Partition],
jdbcOptions: JDBCOptions)(@transient val sparkSession: SparkSession)
extends BaseRelation
with PrunedFilteredScan
with InsertableRelation {
override def sqlContext: SQLContext = sparkSession.sqlContext
override val needConversion: Boolean = false
// Check if JDBCRDD.compileFilter can accept input filters
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = {
if (jdbcOptions.pushDownPredicate) {
filters.filter(JDBCRDD.compileFilter(_, JdbcDialects.get(jdbcOptions.url)).isEmpty)
} else {
filters
}
}
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
// Rely on a type erasure hack to pass RDD[InternalRow] back as RDD[Row]
JDBCRDD.scanTable(
sparkSession.sparkContext,
schema,
requiredColumns,
filters,
parts,
jdbcOptions).asInstanceOf[RDD[Row]]
}
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
data.write
.mode(if (overwrite) SaveMode.Overwrite else SaveMode.Append)
.jdbc(jdbcOptions.url, jdbcOptions.tableOrQuery, jdbcOptions.asProperties)
}
override def toString: String = {
val partitioningInfo = if (parts.nonEmpty) s" [numPartitions=${parts.length}]" else ""
// credentials should not be included in the plan output, table information is sufficient.
s"JDBCRelation(${jdbcOptions.tableOrQuery})" + partitioningInfo
}
}
| guoxiaolongzte/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala | Scala | apache-2.0 | 11,199 |
/*
* =========================================================================================
* Copyright © 2013-2017 the kamon project <http://kamon.io/>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package kamon.netty
import kamon.Kamon
import kamon.context.Context
import kamon.netty.Clients.withNioClient
import kamon.netty.Servers.withNioServer
import kamon.testkit.{MetricInspection, Reconfigure, TestSpanReporter}
import kamon.trace.Span
import kamon.trace.Span.TagValue
import kamon.util.Registration
import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.SpanSugar._
class NettyHTTPTracingSpec extends WordSpec with Matchers with MetricInspection with Eventually
with Reconfigure with BeforeAndAfterAll with OptionValues {
"The Netty HTTP span propagation" should {
"propagate the span from the client to the server" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val clientSpan = Kamon.buildSpan("test-span").start()
Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
httpClient.execute(httpGet)
eventually(timeout(2 seconds)) {
val serverFinishedSpan = reporter.nextSpan().value
val clientFinishedSpan = reporter.nextSpan().value
serverFinishedSpan.operationName shouldBe "route.get"
serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
clientFinishedSpan.operationName shouldBe s"localhost:$port/route"
clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
serverFinishedSpan.context.traceID shouldBe clientFinishedSpan.context.traceID
serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
reporter.nextSpan() shouldBe empty
}
}
}
}
}
"contain a span error when an internal server error(500) occurs" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val clientSpan = Kamon.buildSpan("test-span-with-error").start()
Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
val httpGet = httpClient.get(s"http://localhost:$port/error")
httpClient.execute(httpGet)
eventually(timeout(2 seconds)) {
val serverFinishedSpan = reporter.nextSpan().value
val clientFinishedSpan = reporter.nextSpan().value
serverFinishedSpan.operationName shouldBe "error.get"
serverFinishedSpan.tags should contain allOf("span.kind" -> TagValue.String("server"), "error" -> TagValue.True)
clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
clientFinishedSpan.operationName shouldBe s"localhost:$port/error"
serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
serverFinishedSpan.context.traceID shouldBe clientFinishedSpan.context.traceID
serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
reporter.nextSpan() shouldBe empty
}
}
}
}
}
"propagate the span from the client to the server with chunk-encoded request" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val clientSpan = Kamon.buildSpan("client-chunk-span").start()
Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
val (httpPost, chunks) = httpClient.postWithChunks(s"http://localhost:$port/fetch-in-chunks", "test 1", "test 2")
httpClient.executeWithContent(httpPost, chunks)
eventually(timeout(2 seconds)) {
val serverFinishedSpan = reporter.nextSpan().value
val clientFinishedSpan = reporter.nextSpan().value
serverFinishedSpan.operationName shouldBe "fetch-in-chunks.post"
serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
clientFinishedSpan.operationName shouldBe s"localhost:$port/fetch-in-chunks"
clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
reporter.nextSpan() shouldBe empty
}
}
}
}
}
"propagate the span from the client to the server with chunk-encoded response" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val clientSpan = Kamon.buildSpan("client-chunk-span").start()
Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
val (httpPost, chunks) = httpClient.postWithChunks(s"http://localhost:$port/fetch-in-chunks", "test 1", "test 2")
httpClient.executeWithContent(httpPost, chunks)
eventually(timeout(2 seconds)) {
val serverFinishedSpan = reporter.nextSpan().value
val clientFinishedSpan = reporter.nextSpan().value
serverFinishedSpan.operationName shouldBe "fetch-in-chunks.post"
serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
clientFinishedSpan.operationName shouldBe s"localhost:$port/fetch-in-chunks"
clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
reporter.nextSpan() shouldBe empty
}
}
}
}
}
"create a new span when it's coming a request without one" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
httpClient.execute(httpGet)
eventually(timeout(2 seconds)) {
val serverFinishedSpan = reporter.nextSpan().value
serverFinishedSpan.operationName shouldBe "route.get"
serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
serverFinishedSpan.context.parentID.string shouldBe ""
reporter.nextSpan() shouldBe empty
}
}
}
}
"create a new span for each request" in {
withNioServer() { port =>
withNioClient(port) { httpClient =>
val clientSpan = Kamon.buildSpan("test-span").start()
Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
httpClient.execute(httpClient.get(s"http://localhost:$port/route?param=123"))
httpClient.execute(httpClient.get(s"http://localhost:$port/route?param=123"))
eventually(timeout(2 seconds)) {
val serverFinishedSpan1 = reporter.nextSpan().value
val clientFinishedSpan1 = reporter.nextSpan().value
val serverFinishedSpan2 = reporter.nextSpan().value
val clientFinishedSpan2 = reporter.nextSpan().value
serverFinishedSpan1.operationName shouldBe "route.get"
serverFinishedSpan1.tags should contain ("span.kind" -> TagValue.String("server"))
clientFinishedSpan1.operationName shouldBe s"localhost:$port/route"
clientFinishedSpan1.tags should contain ("span.kind" -> TagValue.String("client"))
serverFinishedSpan1.context.traceID shouldBe clientFinishedSpan1.context.traceID
serverFinishedSpan1.context.parentID shouldBe clientFinishedSpan1.context.spanID
serverFinishedSpan2.operationName shouldBe "route.get"
serverFinishedSpan2.tags should contain ("span.kind" -> TagValue.String("server"))
clientFinishedSpan2.operationName shouldBe s"localhost:$port/route"
clientFinishedSpan2.tags should contain ("span.kind" -> TagValue.String("client"))
serverFinishedSpan2.context.traceID shouldBe clientFinishedSpan2.context.traceID
serverFinishedSpan2.context.parentID shouldBe clientFinishedSpan2.context.spanID
clientFinishedSpan1.context.parentID shouldBe clientFinishedSpan2.context.parentID
clientFinishedSpan1.context.parentID shouldBe clientSpan.context.spanID
reporter.nextSpan() shouldBe empty
}
}
}
}
}
}
@volatile var registration: Registration = _
val reporter = new TestSpanReporter()
override protected def beforeAll(): Unit = {
enableFastSpanFlushing()
sampleAlways()
registration = Kamon.addReporter(reporter)
}
override protected def afterAll(): Unit = {
registration.cancel()
}
}
| kamon-io/kamon-netty | src/test/scala/kamon/netty/NettyHTTPTracingSpec.scala | Scala | apache-2.0 | 9,814 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.wiki.pages
import com.netflix.atlas.core.model.StyleVocabulary
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.stacklang.Word
import com.netflix.atlas.wiki.StackWordPage
case object Line extends StackWordPage {
val vocab: Vocabulary = StyleVocabulary
val word: Word = vocab.words.find(_.name == "line").get
override def signature: String =
s"""
|```
|TimeSeriesExpr -- StyleExpr
|```
""".stripMargin
override def summary: String =
"""
|Change the line style to be line. This is the default mode and usually
|does not need to be set explicitly.
|
|See the [line style examples](Line-Styles) page for more information.
""".stripMargin.trim
}
| brharrington/atlas | atlas-wiki/src/main/scala/com/netflix/atlas/wiki/pages/Line.scala | Scala | apache-2.0 | 1,375 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.jforeign.internal
import scala.collection.JavaConversions.asScalaBuffer
import de.ust.skill.generator.jforeign.GeneralOutputMaker
import de.ust.skill.ir.ConstantLengthArrayType
import de.ust.skill.ir.Declaration
import de.ust.skill.ir.Field
import de.ust.skill.ir.GroundType
import de.ust.skill.ir.InterfaceType
import de.ust.skill.ir.ListType
import de.ust.skill.ir.MapType
import de.ust.skill.ir.SetType
import de.ust.skill.ir.Type
import de.ust.skill.ir.VariableLengthArrayType
import de.ust.skill.ir.restriction.AbstractRestriction
import de.ust.skill.ir.restriction.FloatRangeRestriction
import de.ust.skill.ir.restriction.IntRangeRestriction
import de.ust.skill.ir.restriction.NonNullRestriction
trait AccessMaker extends GeneralOutputMaker {
abstract override def make {
super.make
for (t ← IR) {
val isBasePool = (null == t.getSuperType)
val nameT = name(t)
val typeT = mapType(t)
val abstrct : Boolean = t.getRestrictions.filter { p ⇒ p.isInstanceOf[AbstractRestriction] }.nonEmpty
val out = files.open(s"internal/${nameT}Access.java")
//package & imports
out.write(s"""package ${packagePrefix}internal;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import de.ust.skill.common.jforeign.api.SkillException;
import de.ust.skill.common.jforeign.internal.*;
import de.ust.skill.common.jforeign.internal.fieldDeclarations.AutoField;
import de.ust.skill.common.jforeign.internal.fieldTypes.*;
import de.ust.skill.common.jforeign.internal.parts.Block;
import de.ust.skill.common.jforeign.restrictions.FieldRestriction;
""")
//class declaration
out.write(s"""
${
comment(t)
}${
suppressWarnings
}public class ${nameT}Access extends ${
if (isBasePool) s"BasePool<${typeT}>"
else s"SubPool<${typeT}, ${mapType(t.getBaseType)}>"
} {
${
if (isBasePool) s"""
@Override
final protected $typeT[] newArray(int size) {
return new $typeT[size];
}
"""
else ""
}
/**
* Can only be constructed by the SkillFile in this package.
*/
${nameT}Access(int poolIndex${
if (isBasePool) ""
else s", ${name(t.getSuperType)}Access superPool"
}) {
super(poolIndex, "${t.getName.getInternalName}"${
if (isBasePool) ""
else ", superPool"
}, new HashSet<String>(Arrays.asList(new String[] { ${
t.getFields.map { f ⇒ s""""${f.getName.getInternalName}"""" }.mkString(", ")
} })), ${
t.getFields.count(_.isAuto) match {
case 0 ⇒ "noAutoFields()"
case c ⇒ s"(AutoField<?, ${mapType(t)}>[]) java.lang.reflect.Array.newInstance(AutoField.class, $c)"
}
});
}${
// export data for sub pools
if (isBasePool) s"""
final ${mapType(t.getBaseType)}[] data() {
return data;
}"""
else ""
}
@Override
public void insertInstances() {
${
if (abstrct)
s""" // do nothing for abstract classes
}"""
else
s"""${
if (isBasePool) ""
else s"""
${mapType(t.getBaseType)}[] data = ((${name(t.getBaseType)}Access)basePool).data();"""
}
final Block last = blocks().getLast();
int i = (int) last.bpo;
int high = (int) (last.bpo + last.count);
while (i < high) {
if (null != data[i])
return;
$typeT r = new $typeT();
r.setSkillID(i + 1);
data[i] = r;
staticData.add(r);
i += 1;
}
}"""
}
${
if (t.getFields.isEmpty()) ""
else s"""
@SuppressWarnings("unchecked")
@Override
public void addKnownField(
String name,
de.ust.skill.common.jforeign.internal.fieldTypes.StringType string,
de.ust.skill.common.jforeign.internal.fieldTypes.Annotation annotation) {
final FieldDeclaration<?, $typeT> f;
switch (name) {${
(for (f ← t.getFields if !f.isAuto)
yield s"""
case "${f.getName.getInternalName}":
f = new KnownField_${nameT}_${name(f)}(${mapToFieldType(f)}, 1 + dataFields.size(), this);
break;
""").mkString
}${
var index = 0;
(for (f ← t.getFields if f.isAuto)
yield s"""
case "${f.getName.getInternalName}":
f = new KnownField_${nameT}_${name(f)}(${mapToFieldType(f)}, this);
autoFields[${index += 1; index - 1}] = (AutoField<?, $typeT>) f;
break;
""").mkString
}
default:
super.addKnownField(name, string, annotation);
return;
}
if (!(f instanceof AutoField))
dataFields.add(f);
}
@SuppressWarnings("unchecked")
@Override
public <R> FieldDeclaration<R, $typeT> addField(int ID, FieldType<R> type, String name,
HashSet<FieldRestriction<?>> restrictions) {
final FieldDeclaration<R, $typeT> f;
switch (name) {${
(for (f ← t.getFields if !f.isAuto)
yield s"""
case "${f.getName.getInternalName}":
f = (FieldDeclaration<R, $typeT>) new KnownField_${nameT}_${name(f)}((FieldType<${mapType(f, true)}>) type, ID, this);
break;
""").mkString
}${
(for (f ← t.getFields if f.isAuto)
yield s"""
case "${f.getName.getInternalName}":
throw new SkillException(String.format(
"The file contains a field declaration %s.%s, but there is an auto field of similar name!",
this.name(), name));
""").mkString
}
default:
return super.addField(ID, type, name, restrictions);
}${
if (t.getFields.forall(_.isAuto())) ""
else """
for (FieldRestriction<?> r : restrictions)
f.addRestriction(r);
dataFields.add(f);
return f;"""
}
}"""
}
/**
* @return a new $nameT instance with default field values
*/
@Override
public $typeT make() {${
if (abstrct) s"""
throw new RuntimeException("Cannot instantiate abstract class $nameT");"""
else {
s"""
$typeT rval = new $typeT();
add(rval);
return rval;"""
}
}
}
${
if (t.getAllFields.filterNot { f ⇒ f.isConstant() || f.isIgnored() }.isEmpty) ""
else s"""
/**
* @return a new age instance with the argument field values
*/
public $typeT make(${makeConstructorArguments(t)}) {${
if (abstrct) s"""
throw new RuntimeException("Cannot instantiate abstract class $nameT");"""
else {
s"""
$typeT rval = new $typeT(${
t.getAllFields.filterNot { f ⇒ f.isConstant || f.isIgnored }.map { f ⇒ s"""${name(f)}, """ }.mkString("")
}-1, null);
add(rval);
return rval;"""
}
}
}
"""
}
${
if (!abstrct) {
s"""
public ${nameT}Builder build() {
return new ${nameT}Builder(this, new $typeT());
}
/**
* Builder for new $nameT instances.
*
* @author Timm Felden
*/
public static final class ${nameT}Builder extends Builder<$typeT> {
protected ${nameT}Builder(StoragePool<$typeT, ? super $typeT> pool, $typeT instance) {
super(pool, instance);
}${
(for (f ← t.getAllFields if !f.isIgnored() && !f.isConstant())
yield s"""
public ${nameT}Builder ${name(f)}(${mapType(f, false)} ${name(f)}) {
instance.${setterOrFieldAccess(t, f)}(${name(f)});
return this;
}""").mkString
}
}
/**
* used internally for type forest construction
*/
@Override
public StoragePool<? extends ${mapType(t)}, ${mapType(t.getBaseType)}> makeSubPool(int index, String name) {
return new UnknownSubPool(index, name, this);
}
private static final class UnknownSubPool extends SubPool<${packagePrefix() + "internal." + name(t)}SubType, ${mapType(t.getBaseType)}> {
UnknownSubPool(int poolIndex, String name, StoragePool<? super ${packagePrefix()}internal.${name(t)}SubType,
${mapType(t.getBaseType)}> superPool) {
super(poolIndex, name, superPool, Collections.emptySet(), noAutoFields());
}
@Override
public StoragePool<? extends ${packagePrefix() + "internal." + name(t)}SubType, ${mapType(t.getBaseType)}> makeSubPool(int index, String name) {
return new UnknownSubPool(index, name, this);
}
@Override
public void insertInstances() {
final Block last = lastBlock();
int i = (int) last.bpo;
int high = (int) (last.bpo + last.count);
${mapType(t.getBaseType)}[] data = ((${name(t.getBaseType)}Access) basePool).data();
while (i < high) {
if (null != data[i])
return;
@SuppressWarnings("unchecked")
${packagePrefix() + "internal." + name(t)}SubType r = new ${packagePrefix() + "internal." + name(t)}SubType(this, i + 1);
data[i] = r;
staticData.add(r);
i += 1;
}
}
}"""
} else ""
}
/**
* punch a hole into the java type system :)
*/
@SuppressWarnings("unchecked")
static <T, U> FieldType<T> cast(FieldType<U> f) {
return (FieldType<T>) f;
}
}
""")
out.close()
}
}
private def mapToFieldType(f : Field) : String = {
//@note temporary string & annotation will be replaced later on
@inline def mapGroundType(t : Type) : String = t.getSkillName match {
case "annotation" ⇒ "annotation"
case "bool" ⇒ "BoolType.get()"
case "i8" ⇒ if (f.isConstant) s"new ConstantI8((byte)${f.constantValue})" else "I8.get()"
case "i16" ⇒ if (f.isConstant) s"new ConstantI16((short)${f.constantValue})" else "I16.get()"
case "i32" ⇒ if (f.isConstant) s"new ConstantI32(${f.constantValue})" else "I32.get()"
case "i64" ⇒ if (f.isConstant) s"new ConstantI64(${f.constantValue}L)" else "I64.get()"
case "v64" ⇒ if (f.isConstant) s"new ConstantV64(${f.constantValue}L)" else "V64.get()"
case "f32" ⇒ "F32.get()"
case "f64" ⇒ "F64.get()"
case "string" ⇒ "string"
case s ⇒ t match {
case t : InterfaceType ⇒ s"cast(${mapGroundType(t.getSuperType)})"
case _ ⇒ s"""(FieldType<${mapType(t)}>)(owner().poolByName().get("${t.getName.getInternalName}"))"""
}
}
f.getType match {
case t : GroundType ⇒ mapGroundType(t)
case t : ConstantLengthArrayType ⇒
s"new ConstantLengthArray<>(${t.getLength}, ${mapGroundType(t.getBaseType)})"
case t : VariableLengthArrayType ⇒
s"new VariableLengthArray<>(${mapGroundType(t.getBaseType)})"
case t : ListType ⇒
s"new ListType<>(${mapGroundType(t.getBaseType)})"
case t : SetType ⇒
s"new SetType<>(${mapGroundType(t.getBaseType)})"
case t : MapType ⇒
t.getBaseTypes().map(mapGroundType).reduceRight((k, v) ⇒ s"new MapType<>($k, $v)")
case t : InterfaceType ⇒ s"cast(${mapGroundType(t.getSuperType)})"
case t : Declaration ⇒
s"""(FieldType<${mapType(t)}>)(owner().poolByName().get("${t.getName.getInternalName}"))"""
}
}
private def mkFieldRestrictions(f : Field) : String = {
f.getRestrictions.map(_ match {
case r : NonNullRestriction ⇒ s"_root_.${packagePrefix}internal.restrictions.NonNull"
case r : IntRangeRestriction ⇒
s"_root_.${packagePrefix}internal.restrictions.Range(${
r.getLow
}L.to${mapType(f.getType)}, ${r.getHigh}L.to${mapType(f.getType)})"
case r : FloatRangeRestriction ⇒ f.getType.getSkillName match {
case "f32" ⇒ s"_root_.${packagePrefix}internal.restrictions.Range(${r.getLowFloat}f, ${r.getHighFloat}f)"
case "f64" ⇒ s"_root_.${packagePrefix}internal.restrictions.Range(${r.getLowDouble}, ${r.getHighDouble})"
}
}).mkString(", ")
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/jforeign/internal/AccessMaker.scala | Scala | bsd-3-clause | 12,869 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue, ThreadPoolExecutor}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import org.apache.spark.broadcast.{Broadcast, BroadcastManager}
import org.apache.spark.internal.Logging
import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.MetadataFetchFailedException
import org.apache.spark.storage.{BlockId, BlockManagerId, ShuffleBlockId}
import org.apache.spark.util._
private[spark] sealed trait MapOutputTrackerMessage
private[spark] case class GetMapOutputStatuses(shuffleId: Int)
extends MapOutputTrackerMessage
private[spark] case object StopMapOutputTracker extends MapOutputTrackerMessage
private[spark] case class GetMapOutputMessage(shuffleId: Int, context: RpcCallContext)
/** RpcEndpoint class for MapOutputTrackerMaster */
private[spark] class MapOutputTrackerMasterEndpoint(
override val rpcEnv: RpcEnv, tracker: MapOutputTrackerMaster, conf: SparkConf)
extends RpcEndpoint with Logging {
logDebug("init") // force eager creation of logger
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case GetMapOutputStatuses(shuffleId: Int) =>
val hostPort = context.senderAddress.hostPort
logInfo("Asked to send map output locations for shuffle " + shuffleId + " to " + hostPort)
val mapOutputStatuses = tracker.post(new GetMapOutputMessage(shuffleId, context))
case StopMapOutputTracker =>
logInfo("MapOutputTrackerMasterEndpoint stopped!")
context.reply(true)
stop()
}
}
/**
* Class that keeps track of the location of the map output of
* a stage. This is abstract because different versions of MapOutputTracker
* (driver and executor) use different HashMap to store its metadata.
*/
private[spark] abstract class MapOutputTracker(conf: SparkConf) extends Logging {
/** Set to the MapOutputTrackerMasterEndpoint living on the driver. */
var trackerEndpoint: RpcEndpointRef = _
/**
* This HashMap has different behavior for the driver and the executors.
*
* On the driver, it serves as the source of map outputs recorded from ShuffleMapTasks.
* On the executors, it simply serves as a cache, in which a miss triggers a fetch from the
* driver's corresponding HashMap.
*
* Note: because mapStatuses is accessed concurrently, subclasses should make sure it's a
* thread-safe map.
*/
protected val mapStatuses: Map[Int, Array[MapStatus]]
/**
* Incremented every time a fetch fails so that client nodes know to clear
* their cache of map output locations if this happens.
*/
protected var epoch: Long = 0
protected val epochLock = new AnyRef
/** Remembers which map output locations are currently being fetched on an executor. */
private val fetching = new HashSet[Int]
/**
* Send a message to the trackerEndpoint and get its result within a default timeout, or
* throw a SparkException if this fails.
*/
protected def askTracker[T: ClassTag](message: Any): T = {
try {
trackerEndpoint.askWithRetry[T](message)
} catch {
case e: Exception =>
logError("Error communicating with MapOutputTracker", e)
throw new SparkException("Error communicating with MapOutputTracker", e)
}
}
/** Send a one-way message to the trackerEndpoint, to which we expect it to reply with true. */
protected def sendTracker(message: Any) {
val response = askTracker[Boolean](message)
if (response != true) {
throw new SparkException(
"Error reply received from MapOutputTracker. Expecting true, got " + response.toString)
}
}
/**
* Adds information about output for a given shuffle.
* This method is synchronized because the BlockManager may call it multiple times concurrently
* with updates from different map tasks.
*/
def addStatus(
shuffleId: Int,
mapId: Int,
mapStatus: MapStatus): Unit = synchronized {
val statuses = {
// NOTE: We don't know the number of mapTasks here, so we create an array of size mapId + 1
val currentStatuses = mapStatuses.getOrElseUpdate(shuffleId, new Array[MapStatus](mapId + 1))
if (mapId >= currentStatuses.length) {
// Create a bigger array and copy over the statuses.
val newSize = mapId + 1
val newStatuses = currentStatuses ++ new Array[MapStatus](newSize - currentStatuses.length)
mapStatuses.put(shuffleId, newStatuses)
newStatuses
} else {
currentStatuses
}
}
statuses(mapId) = mapStatus
}
def getAvailableMapOutputs(shuffleId: Int): Set[Int] = synchronized {
val statuses = mapStatuses.get(shuffleId)
if (statuses.isDefined) {
statuses.get.zipWithIndex.filter(x => x._1 != null).map(_._2).toSet
} else {
Set.empty
}
}
/**
* Called from executors to get the server URIs and output sizes for each shuffle block that
* needs to be read from a given reduce task.
*
* @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
* and the second item is a sequence of (shuffle block id, shuffle block size) tuples
* describing the shuffle blocks that are stored at that block manager.
*/
def getMapSizesByExecutorId(shuffleId: Int, reduceId: Int)
: Seq[(BlockManagerId, Seq[(BlockId, Long)])] = {
getMapSizesByExecutorId(shuffleId, reduceId, reduceId + 1)
}
/**
* Called from executors to get the server URIs and output sizes for each shuffle block that
* needs to be read from a given range of map output partitions (startPartition is included but
* endPartition is excluded from the range).
*
* @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
* and the second item is a sequence of (shuffle block id, shuffle block size) tuples
* describing the shuffle blocks that are stored at that block manager.
*/
def getMapSizesByExecutorId(shuffleId: Int, startPartition: Int, endPartition: Int)
: Seq[(BlockManagerId, Seq[(BlockId, Long)])] = {
logDebug(s"Fetching outputs for shuffle $shuffleId, partitions $startPartition-$endPartition")
val statuses = getStatuses(shuffleId)
// Synchronize on the returned array because, on the driver, it gets mutated in place
statuses.synchronized {
return MapOutputTracker.convertMapStatuses(shuffleId, startPartition, endPartition, statuses)
}
}
/**
* Return statistics about all of the outputs for a given shuffle.
*/
def getStatistics(dep: ShuffleDependency[_, _, _]): MapOutputStatistics = {
val statuses = getStatuses(dep.shuffleId)
// Synchronize on the returned array because, on the driver, it gets mutated in place
statuses.synchronized {
val totalSizes = new Array[Long](dep.partitioner.numPartitions)
for (s <- statuses) {
for (i <- 0 until totalSizes.length) {
totalSizes(i) += s.getSizeForBlock(i)
}
}
new MapOutputStatistics(dep.shuffleId, totalSizes)
}
}
/**
* Get or fetch the array of MapStatuses for a given shuffle ID. NOTE: clients MUST synchronize
* on this array when reading it, because on the driver, we may be changing it in place.
*
* (It would be nice to remove this restriction in the future.)
*/
private def getStatuses(shuffleId: Int): Array[MapStatus] = {
val statuses = mapStatuses.get(shuffleId).orNull
if (statuses == null) {
logInfo("Don't have map outputs for shuffle " + shuffleId + ", fetching them")
val startTime = System.currentTimeMillis
var fetchedStatuses: Array[MapStatus] = null
fetching.synchronized {
// Someone else is fetching it; wait for them to be done
while (fetching.contains(shuffleId)) {
try {
fetching.wait()
} catch {
case e: InterruptedException =>
}
}
// Either while we waited the fetch happened successfully, or
// someone fetched it in between the get and the fetching.synchronized.
fetchedStatuses = mapStatuses.get(shuffleId).orNull
if (fetchedStatuses == null) {
// We have to do the fetch, get others to wait for us.
fetching += shuffleId
}
}
if (fetchedStatuses == null) {
// We won the race to fetch the statuses; do so
logInfo("Doing the fetch; tracker endpoint = " + trackerEndpoint)
// This try-finally prevents hangs due to timeouts:
try {
val fetchedBytes = askTracker[Array[Byte]](GetMapOutputStatuses(shuffleId))
fetchedStatuses = MapOutputTracker.deserializeMapStatuses(fetchedBytes)
logInfo("Got the output locations")
mapStatuses.put(shuffleId, fetchedStatuses)
} finally {
fetching.synchronized {
fetching -= shuffleId
fetching.notifyAll()
}
}
}
logDebug(s"Fetching map output statuses for shuffle $shuffleId took " +
s"${System.currentTimeMillis - startTime} ms")
if (fetchedStatuses != null) {
return fetchedStatuses
} else {
logError("Missing all output locations for shuffle " + shuffleId)
throw new MetadataFetchFailedException(
shuffleId, -1, "Missing all output locations for shuffle " + shuffleId)
}
} else {
return statuses
}
}
/** Called to get current epoch number. */
def getEpoch: Long = {
epochLock.synchronized {
return epoch
}
}
/**
* Called from executors to update the epoch number, potentially clearing old outputs
* because of a fetch failure. Each executor task calls this with the latest epoch
* number on the driver at the time it was created.
*/
def updateEpoch(newEpoch: Long) {
epochLock.synchronized {
if (newEpoch > epoch) {
logInfo("Updating epoch to " + newEpoch + " and clearing cache")
epoch = newEpoch
mapStatuses.clear()
}
}
}
/** Unregister shuffle data. */
def unregisterShuffle(shuffleId: Int) {
mapStatuses.remove(shuffleId)
}
/** Stop the tracker. */
def stop() { }
}
/**
* MapOutputTracker for the driver.
*/
private[spark] class MapOutputTrackerMaster(conf: SparkConf,
broadcastManager: BroadcastManager, isLocal: Boolean)
extends MapOutputTracker(conf) {
/** Cache a serialized version of the output statuses for each shuffle to send them out faster */
private var cacheEpoch = epoch
// The size at which we use Broadcast to send the map output statuses to the executors
private val minSizeForBroadcast =
conf.getSizeAsBytes("spark.shuffle.mapOutput.minSizeForBroadcast", "512k").toInt
/** Whether to compute locality preferences for reduce tasks */
private val shuffleLocalityEnabled = conf.getBoolean("spark.shuffle.reduceLocality.enabled", true)
// Number of map and reduce tasks above which we do not assign preferred locations based on map
// output sizes. We limit the size of jobs for which assign preferred locations as computing the
// top locations by size becomes expensive.
private val SHUFFLE_PREF_MAP_THRESHOLD = 1000
// NOTE: This should be less than 2000 as we use HighlyCompressedMapStatus beyond that
private val SHUFFLE_PREF_REDUCE_THRESHOLD = 1000
// Fraction of total map output that must be at a location for it to considered as a preferred
// location for a reduce task. Making this larger will focus on fewer locations where most data
// can be read locally, but may lead to more delay in scheduling if those locations are busy.
private val REDUCER_PREF_LOCS_FRACTION = 0.2
// HashMaps for storing mapStatuses and cached serialized statuses in the driver.
// Statuses are dropped only by explicit de-registering.
protected val mapStatuses = new ConcurrentHashMap[Int, Array[MapStatus]]().asScala
private val cachedSerializedStatuses = new ConcurrentHashMap[Int, Array[Byte]]().asScala
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
// Kept in sync with cachedSerializedStatuses explicitly
// This is required so that the Broadcast variable remains in scope until we remove
// the shuffleId explicitly or implicitly.
private val cachedSerializedBroadcast = new HashMap[Int, Broadcast[Array[Byte]]]()
// This is to prevent multiple serializations of the same shuffle - which happens when
// there is a request storm when shuffle start.
private val shuffleIdLocks = new ConcurrentHashMap[Int, AnyRef]()
// requests for map output statuses
private val mapOutputRequests = new LinkedBlockingQueue[GetMapOutputMessage]
// Thread pool used for handling map output status requests. This is a separate thread pool
// to ensure we don't block the normal dispatcher threads.
private val threadpool: ThreadPoolExecutor = {
val numThreads = conf.getInt("spark.shuffle.mapOutput.dispatcher.numThreads", 8)
val pool = ThreadUtils.newDaemonFixedThreadPool(numThreads, "map-output-dispatcher")
for (i <- 0 until numThreads) {
pool.execute(new MessageLoop)
}
pool
}
// Make sure that that we aren't going to exceed the max RPC message size by making sure
// we use broadcast to send large map output statuses.
if (minSizeForBroadcast > maxRpcMessageSize) {
val msg = s"spark.shuffle.mapOutput.minSizeForBroadcast ($minSizeForBroadcast bytes) must " +
s"be <= spark.rpc.message.maxSize ($maxRpcMessageSize bytes) to prevent sending an rpc " +
"message that is to large."
logError(msg)
throw new IllegalArgumentException(msg)
}
def post(message: GetMapOutputMessage): Unit = {
mapOutputRequests.offer(message)
}
/** Message loop used for dispatching messages. */
private class MessageLoop extends Runnable {
override def run(): Unit = {
try {
while (true) {
try {
val data = mapOutputRequests.take()
if (data == PoisonPill) {
// Put PoisonPill back so that other MessageLoops can see it.
mapOutputRequests.offer(PoisonPill)
return
}
val context = data.context
val shuffleId = data.shuffleId
val hostPort = context.senderAddress.hostPort
logDebug("Handling request to send map output locations for shuffle " + shuffleId +
" to " + hostPort)
val mapOutputStatuses = getSerializedMapOutputStatuses(shuffleId)
context.reply(mapOutputStatuses)
} catch {
case NonFatal(e) => logError(e.getMessage, e)
}
}
} catch {
case ie: InterruptedException => // exit
}
}
}
/** A poison endpoint that indicates MessageLoop should exit its message loop. */
private val PoisonPill = new GetMapOutputMessage(-99, null)
// Exposed for testing
private[spark] def getNumCachedSerializedBroadcast = cachedSerializedBroadcast.size
def registerShuffle(shuffleId: Int, numMaps: Int) {
if (mapStatuses.put(shuffleId, new Array[MapStatus](numMaps)).isDefined) {
throw new IllegalArgumentException("Shuffle ID " + shuffleId + " registered twice")
}
// add in advance
shuffleIdLocks.putIfAbsent(shuffleId, new Object())
}
def registerMapOutput(shuffleId: Int, mapId: Int, status: MapStatus) {
val array = mapStatuses(shuffleId)
array.synchronized {
array(mapId) = status
}
}
/** Register multiple map output information for the given shuffle */
def registerMapOutputs(shuffleId: Int, statuses: Array[MapStatus], changeEpoch: Boolean = false) {
mapStatuses.put(shuffleId, statuses.clone())
if (changeEpoch) {
incrementEpoch()
}
}
/** Unregister map output information of the given shuffle, mapper and block manager */
def unregisterMapOutput(shuffleId: Int, mapId: Int, bmAddress: BlockManagerId) {
val arrayOpt = mapStatuses.get(shuffleId)
if (arrayOpt.isDefined && arrayOpt.get != null) {
val array = arrayOpt.get
array.synchronized {
if (array(mapId) != null && array(mapId).location == bmAddress) {
array(mapId) = null
}
}
incrementEpoch()
} else {
throw new SparkException("unregisterMapOutput called for nonexistent shuffle ID")
}
}
/** Unregister shuffle data */
override def unregisterShuffle(shuffleId: Int) {
mapStatuses.remove(shuffleId)
cachedSerializedStatuses.remove(shuffleId)
cachedSerializedBroadcast.remove(shuffleId).foreach(v => removeBroadcast(v))
shuffleIdLocks.remove(shuffleId)
}
/** Check if the given shuffle is being tracked */
def containsShuffle(shuffleId: Int): Boolean = {
cachedSerializedStatuses.contains(shuffleId) || mapStatuses.contains(shuffleId)
}
/**
* Return the preferred hosts on which to run the given map output partition in a given shuffle,
* i.e. the nodes that the most outputs for that partition are on.
*
* @param dep shuffle dependency object
* @param partitionId map output partition that we want to read
* @return a sequence of host names
*/
def getPreferredLocationsForShuffle(dep: ShuffleDependency[_, _, _], partitionId: Int)
: Seq[String] = {
if (shuffleLocalityEnabled && dep.rdd.partitions.length < SHUFFLE_PREF_MAP_THRESHOLD &&
dep.partitioner.numPartitions < SHUFFLE_PREF_REDUCE_THRESHOLD) {
val blockManagerIds = getLocationsWithLargestOutputs(dep.shuffleId, partitionId,
dep.partitioner.numPartitions, REDUCER_PREF_LOCS_FRACTION)
if (blockManagerIds.nonEmpty) {
blockManagerIds.get.map(_.host)
} else {
Nil
}
} else {
Nil
}
}
/**
* Return a list of locations that each have fraction of map output greater than the specified
* threshold.
*
* @param shuffleId id of the shuffle
* @param reducerId id of the reduce task
* @param numReducers total number of reducers in the shuffle
* @param fractionThreshold fraction of total map output size that a location must have
* for it to be considered large.
*/
def getLocationsWithLargestOutputs(
shuffleId: Int,
reducerId: Int,
numReducers: Int,
fractionThreshold: Double)
: Option[Array[BlockManagerId]] = {
val statuses = mapStatuses.get(shuffleId).orNull
if (statuses != null) {
statuses.synchronized {
if (statuses.nonEmpty) {
// HashMap to add up sizes of all blocks at the same location
val locs = new HashMap[BlockManagerId, Long]
var totalOutputSize = 0L
var mapIdx = 0
while (mapIdx < statuses.length) {
val status = statuses(mapIdx)
// status may be null here if we are called between registerShuffle, which creates an
// array with null entries for each output, and registerMapOutputs, which populates it
// with valid status entries. This is possible if one thread schedules a job which
// depends on an RDD which is currently being computed by another thread.
if (status != null) {
val blockSize = status.getSizeForBlock(reducerId)
if (blockSize > 0) {
locs(status.location) = locs.getOrElse(status.location, 0L) + blockSize
totalOutputSize += blockSize
}
}
mapIdx = mapIdx + 1
}
val topLocs = locs.filter { case (loc, size) =>
size.toDouble / totalOutputSize >= fractionThreshold
}
// Return if we have any locations which satisfy the required threshold
if (topLocs.nonEmpty) {
return Some(topLocs.keys.toArray)
}
}
}
}
None
}
def incrementEpoch() {
epochLock.synchronized {
epoch += 1
logDebug("Increasing epoch to " + epoch)
}
}
private def removeBroadcast(bcast: Broadcast[_]): Unit = {
if (null != bcast) {
broadcastManager.unbroadcast(bcast.id,
removeFromDriver = true, blocking = false)
}
}
private def clearCachedBroadcast(): Unit = {
for (cached <- cachedSerializedBroadcast) removeBroadcast(cached._2)
cachedSerializedBroadcast.clear()
}
def getSerializedMapOutputStatuses(shuffleId: Int): Array[Byte] = {
var statuses: Array[MapStatus] = null
var retBytes: Array[Byte] = null
var epochGotten: Long = -1
// Check to see if we have a cached version, returns true if it does
// and has side effect of setting retBytes. If not returns false
// with side effect of setting statuses
def checkCachedStatuses(): Boolean = {
epochLock.synchronized {
if (epoch > cacheEpoch) {
cachedSerializedStatuses.clear()
clearCachedBroadcast()
cacheEpoch = epoch
}
cachedSerializedStatuses.get(shuffleId) match {
case Some(bytes) =>
retBytes = bytes
true
case None =>
logDebug("cached status not found for : " + shuffleId)
statuses = mapStatuses.getOrElse(shuffleId, Array.empty[MapStatus])
epochGotten = epoch
false
}
}
}
if (checkCachedStatuses()) return retBytes
var shuffleIdLock = shuffleIdLocks.get(shuffleId)
if (null == shuffleIdLock) {
val newLock = new Object()
// in general, this condition should be false - but good to be paranoid
val prevLock = shuffleIdLocks.putIfAbsent(shuffleId, newLock)
shuffleIdLock = if (null != prevLock) prevLock else newLock
}
// synchronize so we only serialize/broadcast it once since multiple threads call
// in parallel
shuffleIdLock.synchronized {
// double check to make sure someone else didn't serialize and cache the same
// mapstatus while we were waiting on the synchronize
if (checkCachedStatuses()) return retBytes
// If we got here, we failed to find the serialized locations in the cache, so we pulled
// out a snapshot of the locations as "statuses"; let's serialize and return that
val (bytes, bcast) = MapOutputTracker.serializeMapStatuses(statuses, broadcastManager,
isLocal, minSizeForBroadcast)
logInfo("Size of output statuses for shuffle %d is %d bytes".format(shuffleId, bytes.length))
// Add them into the table only if the epoch hasn't changed while we were working
epochLock.synchronized {
if (epoch == epochGotten) {
cachedSerializedStatuses(shuffleId) = bytes
if (null != bcast) cachedSerializedBroadcast(shuffleId) = bcast
} else {
logInfo("Epoch changed, not caching!")
removeBroadcast(bcast)
}
}
bytes
}
}
override def stop() {
mapOutputRequests.offer(PoisonPill)
threadpool.shutdown()
sendTracker(StopMapOutputTracker)
mapStatuses.clear()
trackerEndpoint = null
cachedSerializedStatuses.clear()
clearCachedBroadcast()
shuffleIdLocks.clear()
}
}
/**
* MapOutputTracker for the executors, which fetches map output information from the driver's
* MapOutputTrackerMaster.
*/
private[spark] class MapOutputTrackerWorker(conf: SparkConf) extends MapOutputTracker(conf) {
protected val mapStatuses: Map[Int, Array[MapStatus]] =
new ConcurrentHashMap[Int, Array[MapStatus]]().asScala
}
private[spark] object MapOutputTracker extends Logging {
val ENDPOINT_NAME = "MapOutputTracker"
private val DIRECT = 0
private val BROADCAST = 1
// Serialize an array of map output locations into an efficient byte format so that we can send
// it to reduce tasks. We do this by compressing the serialized bytes using GZIP. They will
// generally be pretty compressible because many map outputs will be on the same hostname.
def serializeMapStatuses(statuses: Array[MapStatus], broadcastManager: BroadcastManager,
isLocal: Boolean, minBroadcastSize: Int): (Array[Byte], Broadcast[Array[Byte]]) = {
val out = new ByteArrayOutputStream
out.write(DIRECT)
val objOut = new ObjectOutputStream(new GZIPOutputStream(out))
Utils.tryWithSafeFinally {
// Since statuses can be modified in parallel, sync on it
statuses.synchronized {
objOut.writeObject(statuses)
}
} {
objOut.close()
}
val arr = out.toByteArray
if (arr.length >= minBroadcastSize) {
// Use broadcast instead.
// Important arr(0) is the tag == DIRECT, ignore that while deserializing !
val bcast = broadcastManager.newBroadcast(arr, isLocal)
// toByteArray creates copy, so we can reuse out
out.reset()
out.write(BROADCAST)
val oos = new ObjectOutputStream(new GZIPOutputStream(out))
oos.writeObject(bcast)
oos.close()
val outArr = out.toByteArray
logInfo("Broadcast mapstatuses size = " + outArr.length + ", actual size = " + arr.length)
(outArr, bcast)
} else {
(arr, null)
}
}
// Opposite of serializeMapStatuses.
def deserializeMapStatuses(bytes: Array[Byte]): Array[MapStatus] = {
assert (bytes.length > 0)
def deserializeObject(arr: Array[Byte], off: Int, len: Int): AnyRef = {
val objIn = new ObjectInputStream(new GZIPInputStream(
new ByteArrayInputStream(arr, off, len)))
Utils.tryWithSafeFinally {
objIn.readObject()
} {
objIn.close()
}
}
bytes(0) match {
case DIRECT =>
deserializeObject(bytes, 1, bytes.length - 1).asInstanceOf[Array[MapStatus]]
case BROADCAST =>
// deserialize the Broadcast, pull .value array out of it, and then deserialize that
val bcast = deserializeObject(bytes, 1, bytes.length - 1).
asInstanceOf[Broadcast[Array[Byte]]]
logInfo("Broadcast mapstatuses size = " + bytes.length +
", actual size = " + bcast.value.length)
// Important - ignore the DIRECT tag ! Start from offset 1
deserializeObject(bcast.value, 1, bcast.value.length - 1).asInstanceOf[Array[MapStatus]]
case _ => throw new IllegalArgumentException("Unexpected byte tag = " + bytes(0))
}
}
/**
* Given an array of map statuses and a range of map output partitions, returns a sequence that,
* for each block manager ID, lists the shuffle block IDs and corresponding shuffle block sizes
* stored at that block manager.
*
* If any of the statuses is null (indicating a missing location due to a failed mapper),
* throws a FetchFailedException.
*
* @param shuffleId Identifier for the shuffle
* @param startPartition Start of map output partition ID range (included in range)
* @param endPartition End of map output partition ID range (excluded from range)
* @param statuses List of map statuses, indexed by map ID.
* @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
* and the second item is a sequence of (shuffle block ID, shuffle block size) tuples
* describing the shuffle blocks that are stored at that block manager.
*/
private def convertMapStatuses(
shuffleId: Int,
startPartition: Int,
endPartition: Int,
statuses: Array[MapStatus]): Seq[(BlockManagerId, Seq[(BlockId, Long)])] = {
assert (statuses != null)
val splitsByAddress = new HashMap[BlockManagerId, ArrayBuffer[(BlockId, Long)]]
for ((status, mapId) <- statuses.zipWithIndex) {
if (status == null) {
val errorMessage = s"Missing an output location for shuffle $shuffleId map $mapId"
logError(errorMessage)
throw new MetadataFetchFailedException(shuffleId, startPartition, errorMessage)
} else {
for (part <- startPartition until endPartition) {
splitsByAddress.getOrElseUpdate(status.location, ArrayBuffer()) +=
((ShuffleBlockId(shuffleId, mapId, part), status.getSizeForBlock(part)))
}
}
}
splitsByAddress.toSeq
}
}
| likithkailas/StreamingSystems | core/src/main/scala/org/apache/spark/MapOutputTracker.scala | Scala | apache-2.0 | 29,227 |
/**
* Copyright (c) 2014-2016 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch
package clients
// Amazon
import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration
// Jest
import io.searchbox.client.{
JestClient,
JestClientFactory
}
import io.searchbox.core._
import io.searchbox.client.config.HttpClientConfig
import io.searchbox.cluster.Health
// Java
import java.util.concurrent.TimeUnit
// Joda-Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.DateTimeFormat
// Scala
import scala.collection.JavaConversions._
import scala.annotation.tailrec
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.JsonDSL._
// Logging
import org.apache.commons.logging.{
Log,
LogFactory
}
// Tracker
import com.snowplowanalytics.snowplow.scalatracker.Tracker
// This project
import sinks._
/**
* Sends Elasticsearch documents via the HTTP Jest Client.
* Batches are bulk sent to the end point.
*/
class ElasticsearchSenderHTTP(
configuration: KinesisConnectorConfiguration,
tracker: Option[Tracker] = None,
maxConnectionWaitTimeMs: Long = 60000
) extends ElasticsearchSender {
private val Log = LogFactory.getLog(getClass)
// An ISO valid timestamp formatter
private val TstampFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZone(DateTimeZone.UTC)
/**
* Prepare the elasticsearch client
*/
private val factory: JestClientFactory = new JestClientFactory()
factory.setHttpClientConfig(new HttpClientConfig
.Builder("http://" + configuration.ELASTICSEARCH_ENDPOINT + ":" + configuration.ELASTICSEARCH_PORT)
.multiThreaded(true)
.discoveryEnabled(false)
.maxConnectionIdleTime(30L, TimeUnit.SECONDS)
.connTimeout(5000)
.readTimeout(5000)
.build()
)
private val elasticsearchClient: JestClient = factory.getObject()
/**
* The Elasticsearch endpoint.
*/
private val elasticsearchEndpoint = configuration.ELASTICSEARCH_ENDPOINT
/**
* The Elasticsearch port.
*/
private val elasticsearchPort = configuration.ELASTICSEARCH_PORT
/**
* The amount of time to wait in between unsuccessful index requests (in milliseconds).
* 10 seconds = 10 * 1000 = 10000
*/
private val BackoffPeriod = 10000
Log.info("ElasticsearchSender using elasticsearch endpoint " + elasticsearchEndpoint + ":" + elasticsearchPort)
/**
* Emits good records to Elasticsearch and bad records to Kinesis.
* All valid records in the buffer get sent to Elasticsearch in a bulk request.
* All invalid requests and all requests which failed transformation get sent to Kinesis.
*
* @param records List of records to send to Elasticsearch
* @return List of inputs which Elasticsearch rejected
*/
def sendToElasticsearch(records: List[EmitterInput]): List[EmitterInput] = {
val actions: List[io.searchbox.core.Index] = for {
(_, Success(record)) <- records
} yield {
new io.searchbox.core.Index.Builder(record.getSource)
.index(record.getIndex)
.`type`(record.getType)
.id(record.getId)
.build()
}
val bulkRequest = new Bulk.Builder()
.addAction(actions)
.build()
val connectionAttemptStartTime = System.currentTimeMillis()
/**
* Keep attempting to execute the buldRequest until it succeeds
*
* @return List of inputs which Elasticsearch rejected
*/
@tailrec def attemptEmit(attemptNumber: Long = 1): List[EmitterInput] = {
if (attemptNumber > 1 && System.currentTimeMillis() - connectionAttemptStartTime > maxConnectionWaitTimeMs) {
forceShutdown()
}
try {
val bulkResponse: BulkResult = elasticsearchClient.execute(bulkRequest)
val responses = bulkResponse.getItems
val allFailures = responses.toList.zip(records).filter(_._1.error != null).map(pair => {
val (response, record) = pair
val failure = response.error
Log.error("Record failed with message: " + failure)
if (failure.contains("DocumentAlreadyExistsException") || failure.contains("VersionConflictEngineException")) {
None
} else {
Some(record._1 -> List("Elasticsearch rejected record with message: %s".format(failure)).fail)
}
})
val numberOfSkippedRecords = allFailures.count(_.isEmpty)
val failures = allFailures.flatten
Log.info("Emitted " + (records.size - failures.size - numberOfSkippedRecords) + " records to Elasticsearch")
if (!failures.isEmpty) {
printClusterStatus
Log.warn("Returning " + failures.size + " records as failed")
}
failures
} catch {
case e: Exception => {
Log.error("ElasticsearchEmitter threw an unexpected exception ", e)
sleep(BackoffPeriod)
tracker foreach {
t => SnowplowTracking.sendFailureEvent(t, BackoffPeriod, attemptNumber, connectionAttemptStartTime, e.toString)
}
attemptEmit(attemptNumber + 1)
}
}
}
attemptEmit()
}
/**
* Shuts the client down
*/
def close(): Unit = {
elasticsearchClient.shutdownClient
}
/**
* Logs the Elasticsearch cluster's health
*/
private def printClusterStatus: Unit = {
val response = elasticsearchClient.execute(new Health.Builder().build())
val status = response.getValue("status").toString
if (status == "red") {
Log.error("Cluster health is RED. Indexing ability will be limited")
} else if (status == "yellow") {
Log.warn("Cluster health is YELLOW.")
} else if (status == "green") {
Log.info("Cluster health is GREEN.")
}
}
/**
* Terminate the application in a way the KCL cannot stop
*
* Prevents shutdown hooks from running
*/
private def forceShutdown() {
Log.error(s"Shutting down application as unable to connect to Elasticsearch for over $maxConnectionWaitTimeMs ms")
tracker foreach {
t =>
// TODO: Instead of waiting a fixed time, use synchronous tracking or futures (when the tracker supports futures)
SnowplowTracking.trackApplicationShutdown(t)
sleep(5000)
}
Runtime.getRuntime.halt(1)
}
}
| bigdecisions/snowplow | 4-storage/kinesis-elasticsearch-sink/src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/elasticsearch/clients/ElasticsearchSenderHTTP.scala | Scala | apache-2.0 | 7,056 |
package co.uk.DRUK.flink.windwoing
/**
* Created by satyasatyasheel on 14/02/2017.
*/
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
object TumblingWindowExample {
def main(args: Array[String]) {
// Obtain an execution environment
val env = StreamExecutionEnvironment.getExecutionEnvironment
// create a stream using socket
val socketStream = env.socketTextStream("localhost",9000)
// data transform
val wordsStream = socketStream.flatMap(value => value.split("\\s+")).map(value => (value,1))
val keyValuePair = wordsStream.keyBy(0).timeWindow(Time.seconds(15)) // Time window of 15 secs
val countStream = keyValuePair.sum(1)
// to the sink
countStream.print() // to stdout
// execute the environment
env.execute()
}
}
| DataReplyUK/FlinkGroupLondon | 2017-02-22_Windowing_And_Time/src/main/scala/co/uk/DRUK/flink/windwoing/TumblingWindowExample.scala | Scala | apache-2.0 | 843 |
package org.apache.spark.sql.cassandra
import com.datastax.spark.connector.rdd.CassandraTableScanRDD
import scala.collection.JavaConversions._
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import org.apache.spark.Logging
import org.apache.spark.sql.cassandra.CassandraSourceRelation._
import org.apache.spark.sql.catalyst.analysis.Catalog
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Subquery}
import org.apache.spark.sql.catalyst.{CatalystConf, SimpleCatalystConf, TableIdentifier}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import com.datastax.spark.connector.cql.{CassandraConnector, CassandraConnectorConf, Schema}
private[cassandra] class CassandraCatalog(csc: CassandraSQLContext) extends Catalog with Logging {
val caseSensitive: Boolean = true
/** A cache of Spark SQL data source tables that have been accessed. Cache is thread safe.*/
private[cassandra] val cachedDataSourceTables: LoadingCache[TableIdentifier, LogicalPlan] = {
val cacheLoader = new CacheLoader[TableIdentifier, LogicalPlan]() {
override def load(tableIdent: TableIdentifier): LogicalPlan = {
logDebug(s"Creating new cached data source for $tableIdent")
buildRelation(tableIdent)
}
}
CacheBuilder.newBuilder().maximumSize(1000).build(cacheLoader)
}
override def lookupRelation(tableIdent: TableIdentifier, alias: Option[String]): LogicalPlan = {
val tableLogicPlan = cachedDataSourceTables.get(tableIdent)
alias.map(a => Subquery(a, tableLogicPlan)).getOrElse(tableLogicPlan)
}
/** Build logic plan from a CassandraSourceRelation */
private def buildRelation(tableIdentifier: TableIdentifier): LogicalPlan = {
val (cluster, database, table) = getClusterDBTableNames(tableIdentifier)
val tableRef = TableRef(table, database, Option(cluster))
val sourceRelation = CassandraSourceRelation(tableRef, csc, CassandraSourceOptions())
Subquery(table, LogicalRelation(sourceRelation))
}
/** Return cluster, database and table names from a table identifier*/
private def getClusterDBTableNames(tableIdent: TableIdentifier): (String, String, String) = {
val database = tableIdent.database.getOrElse(csc.getKeyspace)
val table = tableIdent.table
(csc.getCluster, database, table)
}
override def registerTable(tableIdent: TableIdentifier, plan: LogicalPlan): Unit = {
cachedDataSourceTables.put(tableIdent, plan)
}
override def unregisterTable(tableIdent: TableIdentifier): Unit = {
cachedDataSourceTables.invalidate(tableIdent)
}
override def unregisterAllTables(): Unit = {
cachedDataSourceTables.invalidateAll()
}
override def tableExists(tableIdent: TableIdentifier): Boolean = {
val (cluster, database, table) = getClusterDBTableNames(tableIdent)
val cached = cachedDataSourceTables.asMap().containsKey(tableIdent)
if (cached) {
true
} else {
val tableRef = TableRef(table, database, Option(cluster))
val schema = Schema.fromCassandra(getCassandraConnector(tableRef))
val tabDef =
for (ksDef <- schema.keyspaceByName.get(database);
tabDef <- ksDef.tableByName.get(table)) yield tabDef
tabDef.nonEmpty
}
}
override def getTables(databaseName: Option[String]): Seq[(String, Boolean)] = {
val cluster = csc.getCluster
val tableNamesFromCache = getTablesFromCache(databaseName, Option(cluster)).map(_._1)
val tablesFromCassandra = getTablesFromCassandra(databaseName)
val tablesOnlyInCache =
tableNamesFromCache.diff(tablesFromCassandra.map(_._1)).map(name => (name, true))
tablesFromCassandra ++ tablesOnlyInCache
}
/** List all tables for a given database name and cluster directly from Cassandra */
def getTablesFromCassandra(databaseName: Option[String]): Seq[(String, Boolean)] = {
val cluster = csc.getCluster
val tableRef = TableRef("", databaseName.getOrElse(""), Option(cluster))
val schema = Schema.fromCassandra(getCassandraConnector(tableRef), databaseName)
for {
ksDef <- schema.keyspaces.toSeq
tableDef <- ksDef.tables
} yield (s"${ksDef.keyspaceName}.${tableDef.tableName}", false)
}
/** List all tables for a given database name and cluster from local cache */
def getTablesFromCache(
databaseName: Option[String],
cluster: Option[String] = None): Seq[(String, Boolean)] = {
val clusterName = cluster.getOrElse(csc.getCluster)
for (Seq(c, db, table) <- cachedDataSourceTables.asMap().keySet().toSeq
if c == clusterName && databaseName.forall(_ == db)) yield {
(s"$db.$table", true)
}
}
private def getCassandraConnector(tableRef: TableRef) : CassandraConnector = {
val sparkConf = csc.sparkContext.getConf.clone()
val sqlConf = csc.getAllConfs
val conf = consolidateConfs(sparkConf, sqlConf, tableRef, Map.empty)
new CassandraConnector(CassandraConnectorConf(conf))
}
override val conf: CatalystConf = SimpleCatalystConf(caseSensitive)
override def refreshTable(tableIdent: TableIdentifier): Unit = {
cachedDataSourceTables.refresh(tableIdent)
}
}
| maasg/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/org/apache/spark/sql/cassandra/CassandraCatalog.scala | Scala | apache-2.0 | 5,164 |
package com.tribbloids.spookystuff.actions
import com.tribbloids.spookystuff.dsl.DriverFactories
class TestTrace_HtmlUnit extends TestTrace_PhantomJS {
override lazy val driverFactory = DriverFactories.HtmlUnit()
//TODO: find the cause and a more stable test case
// test("click should not double click") {
// spooky.conf.remoteResourceTimeout = 180.seconds
//
// try {
// val results = (Visit("https://ca.vwr.com/store/search?&pimId=582903")
// +> Paginate("a[title=Next]", delay = 2.second)).head.self.resolve(spooky)
//
// val numPages = results.head.asInstanceOf[Page].findAll("div.right a").size
//
// assert(results.size == numPages)
// }
//
// finally {
// spooky.conf.remoteResourceTimeout = 60.seconds
// }
// }
// test("dynamic paginate should returns right number of pages") {
// spooky.conf.remoteResourceTimeout = 180.seconds
//
// try {
// val results = (Visit("https://ca.vwr.com/store/search?label=Blotting%20Kits&pimId=3617065")
// +> Paginate("a[title=Next]", delay = 2.second)).head.self.resolve(spooky)
//
// val numPages = results.head.asInstanceOf[Page].findAll("div.right a").size
//
// assert(results.size == numPages)
// }
//
// finally {
// spooky.conf.remoteResourceTimeout = 60.seconds
// }
// }
} | tribbloid/spookystuff | core/src/test/scala/com/tribbloids/spookystuff/actions/TestTrace_HtmlUnit.scala | Scala | apache-2.0 | 1,322 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
package event
abstract class TableEvent(override val source: Table) extends ComponentEvent
abstract class TableChange(override val source: Table) extends TableEvent(source)
/**
* The most general table change. The table might have changed completely,
* i.e., columns might have been reordered, rows added or removed, etc.
* No other event indicates that the structure might have changed.
*/
case class TableStructureChanged(override val source: Table) extends TableChange(source)
/**
* The table structure, i.e., the column order, names, and types stay the same,
* but anything else might have changed.
*/
case class TableChanged(override val source: Table) extends TableChange(source)
/**
* The size of the table stays the same, but the given range of rows might
* have changed but only in the given column. A value of -1 for the column
* denotes all columns.
*/
case class TableUpdated(override val source: Table, range: Range, column: Int)
extends TableChange(source)
/**
* Any change that caused the table to change it's size
*/
class TableResized(override val source: Table) extends TableChange(source)
case class TableRowsAdded(override val source: Table, range: Range) extends TableResized(source)
case class TableRowsRemoved(override val source: Table, range: Range) extends TableResized(source)
case class TableColumnsSelected(override val source: Table, range: Range, adjusting: Boolean)
extends TableEvent(source) with AdjustingEvent with ListSelectionEvent
case class TableRowsSelected(override val source: Table, range: Range, adjusting: Boolean)
extends TableEvent(source) with AdjustingEvent with ListSelectionEvent
| SethTisue/scala-swing | src/main/scala/scala/swing/event/TableEvent.scala | Scala | bsd-3-clause | 2,229 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import org.specs._
import org.apache.hadoop.conf.Configuration
class MultiTsvInputJob(args: Args) extends Job(args) {
try {
MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)).read.write(Tsv("output0"))
} catch {
case e : Exception => e.printStackTrace()
}
}
class SequenceFileInputJob(args: Args) extends Job(args) {
try {
SequenceFile("input0").read.write(SequenceFile("output0"))
WritableSequenceFile("input1", ('query, 'queryStats)).read.write(WritableSequenceFile("output1", ('query, 'queryStats)))
} catch {
case e: Exception => e.printStackTrace()
}
}
class FileSourceTest extends Specification {
noDetailedDiffs()
import Dsl._
"A MultipleTsvFile Source" should {
JobTest(new MultiTsvInputJob(_)).
source(MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)),
List(("foobar", 1), ("helloworld", 2))).
sink[(String, Int)](Tsv("output0")) {
outBuf =>
"take multiple Tsv files as input sources" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar", 1), ("helloworld", 2)))
}
}
.run
.finish
}
"A WritableSequenceFile Source" should {
JobTest(new SequenceFileInputJob(_)).
source(SequenceFile("input0"),
List(("foobar0", 1), ("helloworld0", 2))).
source(WritableSequenceFile("input1", ('query, 'queryStats)),
List(("foobar1", 1), ("helloworld1", 2))).
sink[(String, Int)](SequenceFile("output0")) {
outBuf =>
"sequence file input" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar0", 1), ("helloworld0", 2)))
}
}
.sink[(String, Int)](WritableSequenceFile("output1", ('query, 'queryStats))) {
outBuf =>
"writable sequence file input" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar1", 1), ("helloworld1", 2)))
}
}
.run
.finish
}
/**
* The layout of the test data looks like this:
*
* /test_data/2013/03 (dir with a single data file in it)
* /test_data/2013/03/2013-03.txt
* /test_data/2013/04 (dir with a single data file and a _SUCCESS file)
* /test_data/2013/04/2013-04.txt
* /test_data/2013/04/_SUCCESS
* /test_data/2013/05 (empty dir)
* /test_data/2013/06 (dir with only a _SUCCESS file)
* /test_data/2013/06/_SUCCESS
*/
"default pathIsGood" should {
import TestFileSource.pathIsGood
"accept a directory with data in it" in {
pathIsGood("test_data/2013/03/") must be_==(true)
pathIsGood("test_data/2013/03/*") must be_==(true)
}
"accept a directory with data and _SUCCESS in it" in {
pathIsGood("test_data/2013/04/") must be_==(true)
pathIsGood("test_data/2013/04/*") must be_==(true)
}
"reject an empty directory" in {
pathIsGood("test_data/2013/05/") must be_==(false)
pathIsGood("test_data/2013/05/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified as a glob" in {
pathIsGood("test_data/2013/06/*") must be_==(false)
}
"accept a directory with only _SUCCESS when specified without a glob" in {
pathIsGood("test_data/2013/06/") must be_==(true)
}
}
"success file source pathIsGood" should {
import TestSuccessFileSource.pathIsGood
"reject a directory with data in it but no _SUCCESS file" in {
pathIsGood("test_data/2013/03/") must be_==(false)
pathIsGood("test_data/2013/03/*") must be_==(false)
}
"accept a directory with data and _SUCCESS in it when specified as a glob" in {
pathIsGood("test_data/2013/04/*") must be_==(true)
}
"reject a directory with data and _SUCCESS in it when specified without a glob" in {
pathIsGood("test_data/2013/04/") must be_==(false)
}
"reject an empty directory" in {
pathIsGood("test_data/2013/05/") must be_==(false)
pathIsGood("test_data/2013/05/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified as a glob" in {
pathIsGood("test_data/2013/06/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified without a glob" in {
pathIsGood("test_data/2013/06/") must be_==(false)
}
}
}
object TestFileSource extends FileSource {
override def hdfsPaths: Iterable[String] = Iterable.empty
override def localPath: String = ""
val testfsPathRoot = "scalding-core/src/test/resources/com/twitter/scalding/test_filesystem/"
val conf = new Configuration()
def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf)
}
object TestSuccessFileSource extends FileSource with SuccessFileSource {
override def hdfsPaths: Iterable[String] = Iterable.empty
override def localPath: String = ""
val testfsPathRoot = "scalding-core/src/test/resources/com/twitter/scalding/test_filesystem/"
val conf = new Configuration()
def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf)
} | danosipov/scalding | scalding-core/src/test/scala/com/twitter/scalding/FileSourceTest.scala | Scala | apache-2.0 | 5,751 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl.nn.{CrossEntropyCriterion, Linear, Sequential}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.{Engine, RandomGenerator, T, TestUtils}
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class AdamSpec extends FlatSpec with Matchers with BeforeAndAfter {
before {
System.setProperty("bigdl.localMode", "true")
System.setProperty("spark.master", "local[2]")
Engine.init
}
after {
System.clearProperty("bigdl.localMode")
System.clearProperty("spark.master")
}
val start = System.currentTimeMillis()
"adam" should "perform well on rosenbrock function" in {
val x = Tensor[Double](2).fill(0)
val config = T("learningRate" -> 0.002)
val optm = new Adam[Double]
var fx = new ArrayBuffer[Double]
for (i <- 1 to 10001) {
val result = optm.optimize(TestUtils.rosenBrock, x, config)
if ((i - 1) % 1000 == 0) {
fx += result._2(0)
}
}
println(s"x is \\n$x")
println("fx is")
for (i <- 1 to fx.length) {
println(s"${(i - 1) * 1000 + 1}, ${fx(i - 1)}")
}
val spend = System.currentTimeMillis() - start
println("Time Cost: " + spend + "ms")
(fx.last < 1e-9) should be(true)
x(Array(1)) should be(1.0 +- 0.01)
x(Array(2)) should be(1.0 +- 0.01)
}
"ParallelAdam" should "perform well on rosenbrock function" in {
val x = Tensor[Double](2).fill(0)
val optm = new ParallelAdam[Double](learningRate = 0.002, parallelNum = 2)
var fx = new ArrayBuffer[Double]
for (i <- 1 to 10001) {
val result = optm.optimize(TestUtils.rosenBrock, x)
if ((i - 1) % 1000 == 0) {
fx += result._2(0)
}
}
println(s"x is \\n$x")
println("fx is")
for (i <- 1 to fx.length) {
println(s"${(i - 1) * 1000 + 1}, ${fx(i - 1)}")
}
val spend = System.currentTimeMillis() - start
println("Time Cost: " + spend + "ms")
(fx.last < 1e-9) should be(true)
x(Array(1)) should be(1.0 +- 0.01)
x(Array(2)) should be(1.0 +- 0.01)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/optim/AdamSpec.scala | Scala | apache-2.0 | 2,836 |
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import org.specs2.mutable.Specification
import shapeless._
import _root_.spray.json.DefaultJsonProtocol._
import com.netaporter.uri.Uri
import com.netaporter.uri.dsl._
final class CoproductSpec extends Specification {
implicit val apiRoot: com.qvantel.jsonapi.ApiRoot = ApiRoot(None)
private[this] final case class Limb(id: String, fingers: Int)
private[this] final case class LightBulb(id: String, color: Long)
private[this] final case class Eye(id: String, fov: Float, friend: Option[ToOne[Robot]])
private[this] type Module = Limb :+: LightBulb :+: Eye :+: CNil
private[this] final case class Robot(id: String, arm: ToOne[Limb], module: PolyToOne[Module])
private[this] final case class CrippledRobot(id: String, module: Option[PolyToOne[Module]])
private[this] final case class AdvancedRobot(id: String, modules: PolyToMany[Module])
private[this] implicit object ModulePolyIdentifiable extends PolyIdentifiable[Module] {
private[this] object polyIdentify extends Poly1 {
implicit def caseLimb = at[Limb](obj => obj.id)
implicit def caseLightBulb = at[LightBulb](obj => obj.id)
implicit def caseEye = at[Eye](obj => obj.id)
}
private[this] object polyResourceType extends Poly1 {
implicit def caseLimb = at[Limb](_ => implicitly[ResourceType[Limb]].resourceType)
implicit def caseLightBulb = at[LightBulb](_ => implicitly[ResourceType[LightBulb]].resourceType)
implicit def caseEye = at[Eye](_ => implicitly[ResourceType[Eye]].resourceType)
}
override def identify(a: Module): String = a fold polyIdentify
override def resourceType(a: Module): String = a fold polyResourceType
}
private[this] object Limb {
implicit val limbResourceType: ResourceType[Limb] = ResourceType[Limb]("limbs")
implicit val limbIdentifiable: Identifiable[Limb] = Identifiable.by(_.id)
implicit val limbPathTo: PathTo[Limb] = new PathToId[Limb] {
override def root: Uri = "/limbs"
}
implicit val limbJsonApiFormat: JsonApiFormat[Limb] = jsonApiFormat[Limb]
implicit lazy val limbIncludes: Includes[Limb] = includes[Limb]
}
private[this] object Eye {
implicit val eyeResourceType: ResourceType[Eye] = ResourceType[Eye]("eyes")
implicit val eyeIdentifiable: Identifiable[Eye] = Identifiable.by(_.id)
implicit val eyePathTo: PathTo[Eye] = new PathToId[Eye] {
override def root: Uri = "/eyes"
}
implicit val eyeJsonApiFormat: JsonApiFormat[Eye] = jsonApiFormat[Eye]
implicit lazy val eyeIncludes: Includes[Eye] = includes[Eye]
}
private[this] object LightBulb {
implicit val lightBulbResourceType: ResourceType[LightBulb] = ResourceType[LightBulb]("light-bulbs")
implicit val lightBulbIdentifiable: Identifiable[LightBulb] = Identifiable.by(_.id)
implicit val lightBulbPathTo: PathTo[LightBulb] = new PathToId[LightBulb] {
override def root: Uri = "/light-bulbs"
}
implicit val lightBulbJsonApiFormat: JsonApiFormat[LightBulb] = jsonApiFormat[LightBulb]
implicit lazy val lightBulbIncludes: Includes[LightBulb] = includes[LightBulb]
}
private[this] object Robot {
implicit val robotResourceType: ResourceType[Robot] = ResourceType[Robot]("robots")
implicit val robotIdentifiable: Identifiable[Robot] = Identifiable.by(_.id)
implicit val robotPathTo: PathTo[Robot] = new PathToId[Robot] {
override def root: Uri = "/robots"
}
implicit val robotJsonApiFormat: JsonApiFormat[Robot] = jsonApiFormat[Robot]
implicit lazy val robotIncludes: Includes[Robot] = includes[Robot]
}
private[this] object AdvancedRobot {
implicit val advancedRobotResourceType: ResourceType[AdvancedRobot] =
ResourceType[AdvancedRobot]("advanced-robots")
implicit val advancedRobotIdentifiable: Identifiable[AdvancedRobot] = Identifiable.by(_.id)
implicit val advancedRobotPathTo: PathTo[AdvancedRobot] = new PathToId[AdvancedRobot] {
override def root: Uri = "/advanced-robots"
}
implicit val advancedRobotJsonApiFormat: JsonApiFormat[AdvancedRobot] = jsonApiFormat[AdvancedRobot]
implicit lazy val advancedRobotIncludes: Includes[AdvancedRobot] = includes[AdvancedRobot]
}
private[this] object Examples {
val limb = Limb("l", 3)
val limb2 = Limb("y", 9)
val lightBulb = LightBulb("b", 0xff1100L)
val eye = Eye("e", 60.0f, None)
val robot1 = Robot("r1", ToOne.loaded(limb2), PolyToOne.reference[Module, Limb](limb.id))
val robot2 = Robot("r2", ToOne.loaded(limb2), PolyToOne.loaded(lightBulb))
val robot3 = Robot("r3", ToOne.loaded(limb2), PolyToOne.loaded(eye))
val advancedRobot1 = AdvancedRobot("a1", PolyToMany.reference)
val advancedRobot2 =
AdvancedRobot("a2", PolyToMany.loaded(Seq(Coproduct[Module](eye), Coproduct[Module](lightBulb))))
}
"jsonApiFormat" should {
"at least compile" in {
val pjr1 = implicitly[JsonApiFormat[Robot]].write(Examples.robot1)
val pjr2 = implicitly[JsonApiFormat[Robot]].write(Examples.robot2)
val pjr3 = implicitly[JsonApiFormat[Robot]].write(Examples.robot3)
val ijr1 = implicitly[JsonApiFormat[Robot]].included(Examples.robot1)
val ijr2 = implicitly[JsonApiFormat[Robot]].included(Examples.robot2)
val ijr3 = implicitly[JsonApiFormat[Robot]].included(Examples.robot3)
val pja1 = implicitly[JsonApiFormat[AdvancedRobot]].write(Examples.advancedRobot1)
val pja2 = implicitly[JsonApiFormat[AdvancedRobot]].write(Examples.advancedRobot2)
val ija1 = implicitly[JsonApiFormat[AdvancedRobot]].included(Examples.advancedRobot1)
val ija2 = implicitly[JsonApiFormat[AdvancedRobot]].included(Examples.advancedRobot2)
ok
}
"includes work" in {
Eye.eyeIncludes.includesAllowed("friend", "friend.arm", "friend.module") must beTrue
Robot.robotIncludes.includesAllowed("arm", "module") must beTrue
AdvancedRobot.advancedRobotIncludes.includesAllowed("modules") must beTrue
}
}
}
| Doikor/jsonapi-scala | core/src/test/scala/com/qvantel/jsonapi/CoproductSpec.scala | Scala | bsd-3-clause | 7,559 |
/*
* Copyright 2015 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.numerics
object BSpline {
private val twoByThree = 2.0 / 3.0; // a constant used later on
/**
* The bspline basis function for degrees 0 to 3
*/
def nthOrderBSpline(n: Int)(x: Double): Double = {
val absX: Double = scala.math.abs(x)
val absXSquared: Double = absX * absX
val absXCube: Double = absXSquared * absX
val twoMinAbsX: Double = 2.0 - absX
n match {
case 0 => {
if (-0.5 < x && x < 0.5) 1.0
else if (absX == 0.5) 0.5
else 0
}
case 1 => {
if (-1 <= x && x <= 0) 1.0 + x
else if (0 < x && x <= 1) 1.0 - x
else 0
}
case 2 => {
if (-1.5 <= x && x < -0.5) 0.5 * (x + 1.5) * (x + 1.5)
else if (-0.5 <= x && x < 0.5) -(x + 0.5) * (x + 0.5) + (x - 0.5) + 1.5
else if (x >= 0.5 && x < 1.5) 0.5 * (1 - (x - 0.5)) * (1 - (x - 0.5))
else 0
}
case 3 => {
if (absX >= 0 && absX < 1)
twoByThree - absXSquared + 0.5 * absXCube
else if (absX >= 1 && absX < 2)
twoMinAbsX * twoMinAbsX * twoMinAbsX / 6.0
else 0
}
case _ => throw new NotImplementedError("Bspline of order " + n + " is not implemented yet")
}
}
}
| unibas-gravis/scalismo | src/main/scala/scalismo/numerics/BSpline.scala | Scala | apache-2.0 | 1,883 |
package com.ferega.cmrfs.containers
import javax.swing.border.BevelBorder
import scala.swing._
object MainContainer {
val controlContainer = new ControlContainer
val resultList = new ListView[String] {
preferredSize = new Dimension(0, 200)
border = new BevelBorder(1)
}
}
class MainContainer extends BorderPanel {
import MainContainer._
layoutManager.setHgap(5)
layoutManager.setVgap(5)
layout(controlContainer) = BorderPanel.Position.North
layout(resultList) = BorderPanel.Position.Center
}
| tferega/cmrfs | src/main/scala/com/ferega/cmrfs/containers/MainContainter.scala | Scala | bsd-3-clause | 523 |
package org.jetbrains.plugins.scala.testingSupport.specs2.specs2_2_11_3_1M
import org.jetbrains.plugins.scala.testingSupport.specs2.Specs2SpecialCharactersTest
/**
* @author Roman.Shein
* @since 27.01.2015.
*/
class Specs2_2_11_3_1_M_SpecialCharactersTest extends Specs2SpecialCharactersTest with Specs2_2_11_3_1_M_Base {
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/specs2/specs2_2_11_3_1M/Specs2_2_11_3_1_M_SpecialCharactersTest.scala | Scala | apache-2.0 | 333 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.rdd.RDD
/**
* :: Experimental ::
* Inverse document frequency (IDF).
* The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`, where `m` is the total
* number of documents and `d(t)` is the number of documents that contain term `t`.
*
* This implementation supports filtering out terms which do not appear in a minimum number
* of documents (controlled by the variable `minDocFreq`). For terms that are not in
* at least `minDocFreq` documents, the IDF is found as 0, resulting in TF-IDFs of 0.
*
* @param minDocFreq minimum of documents in which a term
* should appear for filtering
*/
@Experimental
class IDF(val minDocFreq: Int) {
def this() = this(0)
// TODO: Allow different IDF formulations.
/**
* Computes the inverse document frequency.
* @param dataset an RDD of term frequency vectors
*/
def fit(dataset: RDD[Vector]): IDFModel = {
val idf = dataset.treeAggregate(new IDF.DocumentFrequencyAggregator(
minDocFreq = minDocFreq))(
seqOp = (df, v) => df.add(v),
combOp = (df1, df2) => df1.merge(df2)
).idf()
new IDFModel(idf)
}
/**
* Computes the inverse document frequency.
* @param dataset a JavaRDD of term frequency vectors
*/
def fit(dataset: JavaRDD[Vector]): IDFModel = {
fit(dataset.rdd)
}
}
private object IDF {
/** Document frequency aggregator. */
class DocumentFrequencyAggregator(val minDocFreq: Int) extends Serializable {
/** number of documents */
private var m = 0L
/** document frequency vector */
private var df: BDV[Long] = _
def this() = this(0)
/** Adds a new document. */
def add(doc: Vector): this.type = {
if (isEmpty) {
df = BDV.zeros(doc.size)
}
doc match {
case SparseVector(size, indices, values) =>
val nnz = indices.size
var k = 0
while (k < nnz) {
if (values(k) > 0) {
df(indices(k)) += 1L
}
k += 1
}
case DenseVector(values) =>
val n = values.size
var j = 0
while (j < n) {
if (values(j) > 0.0) {
df(j) += 1L
}
j += 1
}
case other =>
throw new UnsupportedOperationException(
s"Only sparse and dense vectors are supported but got ${other.getClass}.")
}
m += 1L
this
}
/** Merges another. */
def merge(other: DocumentFrequencyAggregator): this.type = {
if (!other.isEmpty) {
m += other.m
if (df == null) {
df = other.df.copy
} else {
df += other.df
}
}
this
}
private def isEmpty: Boolean = m == 0L
/** Returns the current IDF vector. */
def idf(): Vector = {
if (isEmpty) {
throw new IllegalStateException("Haven't seen any document yet.")
}
val n = df.length
val inv = new Array[Double](n)
var j = 0
while (j < n) {
/*
* If the term is not present in the minimum
* number of documents, set IDF to 0. This
* will cause multiplication in IDFModel to
* set TF-IDF to 0.
*
* Since arrays are initialized to 0 by default,
* we just omit changing those entries.
*/
if (df(j) >= minDocFreq) {
inv(j) = math.log((m + 1.0) / (df(j) + 1.0))
}
j += 1
}
Vectors.dense(inv)
}
}
}
/**
* :: Experimental ::
* Represents an IDF model that can transform term frequency vectors.
*/
@Experimental
class IDFModel private[spark] (val idf: Vector) extends Serializable {
/**
* Transforms term frequency (TF) vectors to TF-IDF vectors.
*
* If `minDocFreq` was set for the IDF calculation,
* the terms which occur in fewer than `minDocFreq`
* documents will have an entry of 0.
*
* @param dataset an RDD of term frequency vectors
* @return an RDD of TF-IDF vectors
*/
def transform(dataset: RDD[Vector]): RDD[Vector] = {
val bcIdf = dataset.context.broadcast(idf)
dataset.mapPartitions(iter => iter.map(v => IDFModel.transform(bcIdf.value, v)))
}
/**
* Transforms a term frequency (TF) vector to a TF-IDF vector
*
* @param v a term frequency vector
* @return a TF-IDF vector
*/
def transform(v: Vector): Vector = IDFModel.transform(idf, v)
/**
* Transforms term frequency (TF) vectors to TF-IDF vectors (Java version).
* @param dataset a JavaRDD of term frequency vectors
* @return a JavaRDD of TF-IDF vectors
*/
def transform(dataset: JavaRDD[Vector]): JavaRDD[Vector] = {
transform(dataset.rdd).toJavaRDD()
}
}
private object IDFModel {
/**
* Transforms a term frequency (TF) vector to a TF-IDF vector with a IDF vector
*
* @param idf an IDF vector
* @param v a term frequence vector
* @return a TF-IDF vector
*/
def transform(idf: Vector, v: Vector): Vector = {
val n = v.size
v match {
case SparseVector(size, indices, values) =>
val nnz = indices.size
val newValues = new Array[Double](nnz)
var k = 0
while (k < nnz) {
newValues(k) = values(k) * idf(indices(k))
k += 1
}
Vectors.sparse(n, indices, newValues)
case DenseVector(values) =>
val newValues = new Array[Double](n)
var j = 0
while (j < n) {
newValues(j) = values(j) * idf(j)
j += 1
}
Vectors.dense(newValues)
case other =>
throw new UnsupportedOperationException(
s"Only sparse and dense vectors are supported but got ${other.getClass}.")
}
}
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala | Scala | apache-2.0 | 6,800 |
/**
* Thanks to joescii
* https://github.com/joescii/type-prog
*/
package playground.typelevel
sealed trait SizeType {
type plus[That <: SizeType] <: SizeType
}
sealed trait Size0 extends SizeType {
override type plus[That <: SizeType] = That
}
sealed trait SizeN[Prev <: SizeType] extends SizeType {
override type plus[That <: SizeType] = SizeN[Prev#plus[That]]
}
| falconepl/scala-playground | src/main/scala/playground/typelevel/SizeType.scala | Scala | mit | 378 |
package org.opencommercesearch.api.common
/*
* Licensed to OpenCommerceSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. OpenCommerceSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.solr.client.solrj.SolrQuery
/**
* This trait provides subclasses with functionality to
* add to the solr query facets
*
* @author gsegura
*/
trait FacetQuery {
/**
* Sets the parameters required to do facet searches on a field.
* <br/><br/>
* This method should be called when only facets are needed (i.e. find all brands on the products catalog), not
* to return actual search results.
* @param facetField Field to facet on
* @param query Solr query to add new parameters to
* @return Solr query with necessary facet parameters
*/
def withFieldFacet(facetField: String, query: SolrQuery) : SolrQuery = {
query.setRows(0);
query.setFacet(true)
query.addFacetField(facetField)
query.setFacetMinCount(1);
}
}
| madickson/opencommercesearch | opencommercesearch-api/app/org/opencommercesearch/api/common/FacetQuery.scala | Scala | apache-2.0 | 1,600 |
package net.addictivesoftware.deploymentagent
import akka.actor._
import akka.routing.Listeners
object Dispatcher {
def props(name: String): Props = Props(classOf[Dispatcher], name)
}
class Dispatcher extends ComposableActor with Listeners {
receiveBuilder += {
case Broadcast(obj:Dispatcher) => {
//withListeners {
// become {
// obj
// }
//}
}
}
}
| gertjana/DeploymentAgentAkka | src/main/scala/net/addictivesoftware/deploymentagent/Dispatcher.scala | Scala | apache-2.0 | 402 |
package uk.skelty.ScalIRC
object Runner {
def main(args: Array[String]) {
println("Loading ScalaBot...")
val server = new IRCClient()
server.connect()
println("Shutting down!")
}
}
| SkylarKelty/scalirc | src/main/scala/uk/skelty/ScalIRC/Runner.scala | Scala | mit | 193 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.web.core
import com.typesafe.scalalogging.LazyLogging
import javax.servlet.ServletContext
import org.scalatra.servlet.RichServletContext
import org.springframework.context.{ApplicationContext, ApplicationContextAware}
import org.springframework.web.context.ServletContextAware
import scala.beans.BeanProperty
import scala.collection.JavaConversions._
class SpringScalatraBootstrap extends ApplicationContextAware with ServletContextAware with LazyLogging {
@BeanProperty var applicationContext: ApplicationContext = _
@BeanProperty var servletContext: ServletContext = _
@BeanProperty var rootPath: String = GeoMesaScalatraServlet.DefaultRootPath
def init(): Unit = {
val richCtx = RichServletContext(servletContext)
val servlets = applicationContext.getBeansOfType(classOf[GeoMesaScalatraServlet])
for ((name, servlet) <- servlets) {
val path = s"$rootPath/${servlet.root}"
logger.info(s"Mounting servlet bean '$name' at path '/$path'")
richCtx.mount(servlet, s"/$path/*", path) // name is needed for swagger support to work
}
richCtx.mount(applicationContext.getBean("geomesaResourcesApp").asInstanceOf[ResourcesApp], "/api-docs")
}
}
| elahrvivaz/geomesa | geomesa-web/geomesa-web-core/src/main/scala/org/locationtech/geomesa/web/core/SpringScalatraBootstrap.scala | Scala | apache-2.0 | 1,689 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.errors.TreeNodeException
import org.apache.spark.sql.catalyst.trees
abstract sealed class SortDirection
case object Ascending extends SortDirection
case object Descending extends SortDirection
/**
* An expression that can be used to sort a tuple. This class extends expression primarily so that
* transformations over expression will descend into its child.
*/
case class SortOrder(child: Expression, direction: SortDirection) extends Expression
with trees.UnaryNode[Expression] {
override def dataType = child.dataType
override def nullable = child.nullable
// SortOrder itself is never evaluated.
override def eval(input: Row = null): EvaluatedType =
throw new TreeNodeException(this, s"No function to evaluate expression. type: ${this.nodeName}")
override def toString = s"$child ${if (direction == Ascending) "ASC" else "DESC"}"
}
| hengyicai/OnlineAggregationUCAS | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala | Scala | apache-2.0 | 1,753 |
package com.twitter.finatra.kafkastreams.config
import com.twitter.conversions.StorageUnitOps._
import com.twitter.finagle.stats.{LoadedStatsReceiver, StatsReceiver}
import com.twitter.finatra.kafkastreams.internal.stats.RocksDBStatsCallback
import com.twitter.inject.Injector
import com.twitter.jvm.numProcs
import com.twitter.util.logging.Logging
import com.twitter.util.StorageUnit
import java.util
import org.apache.kafka.streams.state.RocksDBConfigSetter
import org.rocksdb.{
BlockBasedTableConfig,
BloomFilter,
ColumnFamilyOptionsInterface,
CompactionStyle,
CompressionType,
InfoLogLevel,
LRUCache,
Options,
Statistics,
StatisticsCollector,
StatsCollectorInput,
StatsLevel
}
object FinatraRocksDBConfig {
val RocksDbBlockCacheSizeConfig = "rocksdb.block.cache.size"
val RocksDbBlockCacheSizeConfigDefault: StorageUnit = 200.megabytes
val RocksDbBlockCacheSizeConfigDoc =
"""Size of the rocksdb block cache per task. We recommend that this should be about 1/3 of
|your total memory budget. The remaining free memory can be left for the OS page cache""".stripMargin
val RocksDbBlockCacheShardBitsConfig = "rocksdb.block.cache.shard.bits"
val RocksDbBlockCacheShardBitsConfigDefault: Int = 1
val RocksDbBlockCacheShardBitsConfigDoc =
"""Cache is is sharded 2^bits shards by hash of the key. Setting the value to -1 will
|cause auto determine the size with starting size of 512KB. Shard bits will not exceed 6.
|If mutex locking is frequent and database size is smaller then RAM, increasing this value
|will improve locking as more shards will be available.
""".stripMargin
val RocksDbLZ4Config = "rocksdb.lz4"
val RocksDbLZ4ConfigDefault: Boolean = false
val RocksDbLZ4ConfigDoc =
"Enable RocksDB LZ4 compression. (See https://github.com/facebook/rocksdb/wiki/Compression)"
val RocksDbEnableStatistics = "rocksdb.statistics"
val RocksDbEnableStatisticsDefault: Boolean = false
val RocksDbEnableStatisticsDoc =
"""Enable RocksDB statistics. Note: RocksDB Statistics could add 5-10% degradation in performance
|(See https://github.com/facebook/rocksdb/wiki/Statistics)""".stripMargin
val RocksDbStatCollectionPeriodMs = "rocksdb.statistics.collection.period.ms"
val RocksDbStatCollectionPeriodMsDefault: Int = 60000
val RocksDbStatCollectionPeriodMsDoc = "Set the period in milliseconds for stats collection."
val RocksDbInfoLogLevel = "rocksdb.log.info.level"
val RocksDbInfoLogLevelDefault = "DEBUG_LEVEL"
val RocksDbInfoLogLevelDoc =
"""Level of logging for rocksdb LOG file.
|DEBUG_LEVEL, INFO_LEVEL, WARN_LEVEL, ERROR_LEVEL, FATAL_LEVEL, HEADER_LEVEL""".stripMargin
val RocksDbMaxLogFileSize = "rocksdb.log.max.file.size"
val RocksDbMaxLogFileSizeDefault: StorageUnit = 50.megabytes
val RocksDbMaxLogFileSizeDoc =
s"""Specify the maximal size of the info log file. If the log file is larger then
|"rocksdb.log.keep.file.num" a new log file will be created.""".stripMargin
val RocksDbKeepLogFileNum = "rocksdb.log.keep.file.num"
val RocksDbKeepLogFileNumDefault: Int = 10
val RocksDbKeepLogFileNumDoc = "Maximal info log files to be kept."
val RocksDbCacheIndexAndFilterBlocks = "rocksdb.cache.index.and.filter.blocks"
val RocksDbCacheIndexAndFilterBlocksDefault: Boolean = true
val RocksDbCacheIndexAndFilterBlocksDoc =
"""Store index and filter blocks into the block cache. This bounds the memory usage,
| which is desirable when running in a container.
|(See https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks)""".stripMargin
val RocksDbCachePinL0IndexAndFilterBlocks = "rocksdb.cache.pin.l0.index.and.filter.blocks"
val RocksDbCachePinL0IndexAndFilterBlocksDefault: Boolean = true
val RocksDbCachePinL0IndexAndFilterBlocksDoc =
"""Pin level-0 file's index and filter blocks in block cache, to avoid them from being evicted.
| This setting is generally recommended to be turned on along to minimize the negative
| performance impact resulted by turning on RocksDbCacheIndexAndFilterBlocks.
|(See https://github.com/facebook/rocksdb/wiki/Block-Cache#caching-index-and-filter-blocks)""".stripMargin
val RocksDbTableConfigBlockSize = "rocksdb.tableconfig.block.size"
val RocksDbTableConfigBlockSizeDefault: StorageUnit = (16 * 1024).bytes
val RocksDbTableConfigBlockSizeDoc =
s"""Approximate size of user data packed per block. This is the uncompressed size and on disk
|size will differ due to compression. Increasing block_size decreases memory usage and space
|amplification, but increases read amplification.""".stripMargin
val RocksDbTableConfigBoomFilterKeyBits = "rocksdb.tableconfig.bloomfilter.key.bits"
val RocksDbTableConfigBoomFilterKeyBitsDefault: Int = 10
val RocksDbTableConfigBoomFilterKeyBitsDoc =
"""
|Bits per key in bloom filter. A bits_per_key if 10, yields a filter with ~ 1% false positive
|rate.
|(See https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#bloom-filters)""".stripMargin
val RocksDbTableConfigBoomFilterMode = "rocksdb.tableconfig.bloomfilter.mode"
val RocksDbTableConfigBoomFilterModeDefault: Boolean = true
val RocksDbTableConfigBoomFilterModeDoc =
s"""Toggle the mode of the bloom filer between Block-based filter (true) and Full filter (false).
|Block-based filter is a filter for each block where Full filter is a filter per file.
|If multiple keys are contained in the same file, the Block-based filter will serve best.
|If keys are same among database files then Full filter is best.""".stripMargin
val RocksDbDatabaseWriteBufferSize = "rocksdb.db.write.buffer.size"
val RocksDbDatabaseWriteBufferSizeDefault: StorageUnit = 0.bytes
val RocksDbDatabaseWriteBufferSizeDoc =
"""Data stored in memtables across all column families before writing to disk. Disabled by
|specifying a 0 value, can be enabled by setting positive value in bytes. This value can be
|used to control the total memtable sizes.""".stripMargin
val RocksDbWriteBufferSize = "rocksdb.write.buffer.size"
val RocksDbWriteBufferSizeDefault: StorageUnit = 1.gigabyte
val RocksDbWriteBufferSizeDoc =
"""Data stored in memory (stored in unsorted log on disk) before writing tto sorted on-disk
|file. Larger values will increase performance, especially on bulk loads up to
|max_write_buffer_number write buffers available. This value can be used to adjust the control
|of memory usage. Larger write buffers will cause longer recovery on file open.""".stripMargin
val RocksDbManifestPreallocationSize = "rocksdb.manifest.preallocation.size"
val RocksDbManifestPreallocationSizeDefault: StorageUnit = 4.megabytes
val RocksDbManifestPreallocationSizeDoc =
"""Number of bytes to preallocate (via fallocate) the manifest files.
|Default is 4mb, which is reasonable to reduce random IO as well as prevent overallocation
|for mounts that preallocate large amounts of data (such as xfs's allocsize option).""".stripMargin
val RocksDbMinWriteBufferNumberToMerge = "rocksdb.min.write.buffer.num.merge"
val RocksDbMinWriteBufferNumberToMergeDefault: Int = 1
val RocksDbMinWriteBufferNumberToMergeDoc =
"""Minimum number of write buffers that will be merged together before flushing to storage.
|Setting of 1 will cause L0 flushed as individual files and increase read amplification
|as all files will be scanned.""".stripMargin
val RocksDbMaxWriteBufferNumber = "rocksdb.max.write.buffer.num"
val RocksDbMaxWriteBufferNumberDefault: Int = 2
val RocksDbMaxWriteBufferNumberDoc =
"""Maximum number of write buffers that will be stored in memory. While 1 buffer is flushed to disk
|other buffers can be written.""".stripMargin
val RocksDbBytesPerSync = "rocksdb.bytes.per.sync"
val RocksDbBytesPerSyncDefault: StorageUnit = 1048576.bytes
val RocksDbBytesPerSyncDoc =
"Setting for OS to sync files to disk in the background while they are written."
val RocksDbMaxBackgroundCompactions = "rocksdb.max.background.compactions"
val RocksDbMaxBackgroundCompactionsDefault: Int = 4
val RocksDbMaxBackgroundCompactionsDoc =
"""Maximum background compactions, increased values will fully utilize CPU and storage for
|compaction routines. If stats indication higher latency due to compaction, this value could
|be adjusted.
|(https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#parallelism-options)""".stripMargin
val RocksDbMaxBackgroundFlushes = "rocksdb.max.background.flushes"
val RocksDbMaxBackgroundFlushesDefault: Int = 2
val RocksDbMaxBackgroundFlushesDoc =
"""Maximum number of concurrent background flushes.
|(https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#parallelism-options)
""".stripMargin
val RocksDbIncreaseParallelism = "rocksdb.parallelism"
def RocksDbIncreaseParallelismDefault(): Int = numProcs().toInt
val RocksDbIncreaseParallelismDoc =
"""Increases the total number of threads used for flushes and compaction. If rocks seems to be
|an indication of bottleneck, this is a value you want to increase for addressing that.""".stripMargin
val RocksDbInplaceUpdateSupport = "rocksdb.inplace.update.support"
val RocksDbInplaceUpdateSupportDefault: Boolean = true
val RocksDbInplaceUpdateSupportDoc =
"""Enables thread safe updates in place. If true point-in-time consistency using snapshot/iterator
|will not be possible. Set this to true if not using snapshot iterators, otherwise false.""".stripMargin
val RocksDbAllowConcurrentMemtableWrite = "rocksdb.allow.concurrent.memtable.write"
val RocksDbAllowConcurrentMemtableWriteDefault: Boolean = false
val RocksDbAllowConcurrentMemtableWriteDoc =
"""Set true if multiple writers to modify memtables in parallel. This flag is not compatible
|with inplace update support or filter deletes, default should be false unless memtable used
|supports it.""".stripMargin
val RocksDbEnableWriteThreadAdaptiveYield = "rocksdb.enable.write.thread.adaptive.yield"
val RocksDbEnableWriteThreadAdaptiveYieldDefault: Boolean = false
val RocksDbEnableWriteThreadAdaptiveYieldDoc =
"""Set true to enable thread synchronizing with write batch group leader. Concurrent workloads
|can be improved by setting to true.""".stripMargin
val RocksDbCompactionStyle = "rocksdb.compaction.style"
val RocksDbCompactionStyleDefault = "UNIVERSAL"
val RocksDbCompactionStyleDoc =
"""Set compaction style for database.
|UNIVERSAL, LEVEL, FIFO.""".stripMargin
val RocksDbCompactionStyleOptimize = "rocksdb.compaction.style.optimize"
val RocksDbCompactionStyleOptimizeDefault = true
val RocksDbCompactionStyleOptimizeDoc =
s"""Heavy workloads and big datasets are not the default mode of operation for rocksdb databases.
|Enabling optimization will use rocksdb internal configuration for a range of values calculated.
|The values calculated are based on the flag value "rocksdb.compaction.style.memtable.budget"
|for memory given to optimize performance. Generally this should be true but means other settings
|values might be different from values specified on the commandline.
|(See https://github.com/facebook/rocksdb/blob/master/options/options.cc)""".stripMargin
val RocksDbMaxBytesForLevelBase = "rocksdb.max.bytes.for.level.base"
val RocksDbMaxBytesForLevelBaseDefault: StorageUnit = 1.gigabyte
val RocksDbMaxBytesForLevelBaseDoc =
"""Total size of level 1, should be about the same size as level 0. Lowering this value
|can help control memory usage.""".stripMargin
val RocksDbLevelCompactionDynamicLevelBytes = "rocksdb.level.compaction.dynamic.level.bytes"
val RocksDbLevelCompactionDynamicLevelBytesDefault: Boolean = true
val RocksDbLevelCompactionDynamicLevelBytesDoc =
"""If true, enables rockdb to pick target size for each level dynamically.""".stripMargin
val RocksDbCompactionStyleMemtableBudget = "rocksdb.compaction.style.memtable.budget"
val RocksDbCompactionStyleMemtableBudgetDefault: StorageUnit =
ColumnFamilyOptionsInterface.DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET.bytes
val RocksDbCompactionStyleMemtableBudgetDoc =
s"""Memory budget in bytes used when "rocksdb.compaction.style.optimize" is true."""
// BlockCache to be shared by all RocksDB instances created on this instance.
// Note: That a single Kafka Streams instance may get multiple tasks assigned to it
// and each stateful task will have a separate RocksDB instance created.
// This cache will be shared across all the tasks.
// See: https://github.com/facebook/rocksdb/wiki/Block-Cache
private var SharedBlockCache: LRUCache = _
private var globalStatsReceiver: StatsReceiver = LoadedStatsReceiver
def init(injector: Injector): Unit = {
globalStatsReceiver = injector.instance[StatsReceiver]
}
}
/**
* Maintains the RocksDB configuration used by Kafka Streams.
*/
class FinatraRocksDBConfig extends RocksDBConfigSetter with Logging {
//See https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#other-general-options
override def setConfig(
storeName: String,
options: Options,
configs: util.Map[String, AnyRef]
): Unit = {
setTableConfiguration(options, configs)
setWriteBufferConfiguration(options, configs)
setOperatingSystemProcessConfiguration(options, configs)
setDatabaseConcurrency(options, configs)
setCompactionConfiguration(options, configs)
setCompression(options, configs)
setInformationLoggingLevel(options, configs)
setStatisticsOptions(options, configs)
}
private def setWriteBufferConfiguration(
options: Options,
configs: util.Map[String, AnyRef]
): Unit = {
val dbWriteBufferSize = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbDatabaseWriteBufferSize,
FinatraRocksDBConfig.RocksDbDatabaseWriteBufferSizeDefault)
val writeBufferSize = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbWriteBufferSize,
FinatraRocksDBConfig.RocksDbWriteBufferSizeDefault)
val minWriteBufferNumberToMerge = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMinWriteBufferNumberToMerge,
FinatraRocksDBConfig.RocksDbMinWriteBufferNumberToMergeDefault)
val maxWriteBufferNumber = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMaxWriteBufferNumber,
FinatraRocksDBConfig.RocksDbMaxWriteBufferNumberDefault)
val manifestPreallocationSize = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbManifestPreallocationSize,
FinatraRocksDBConfig.RocksDbManifestPreallocationSizeDefault)
options
.setDbWriteBufferSize(dbWriteBufferSize)
.setWriteBufferSize(writeBufferSize)
.setMinWriteBufferNumberToMerge(minWriteBufferNumberToMerge)
.setMaxWriteBufferNumber(maxWriteBufferNumber)
.setManifestPreallocationSize(manifestPreallocationSize)
}
private def setTableConfiguration(options: Options, configs: util.Map[String, AnyRef]): Unit = {
if (FinatraRocksDBConfig.SharedBlockCache == null) {
val blockCacheSize =
getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbBlockCacheSizeConfig,
FinatraRocksDBConfig.RocksDbBlockCacheSizeConfigDefault)
val numShardBits = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbBlockCacheShardBitsConfig,
FinatraRocksDBConfig.RocksDbBlockCacheShardBitsConfigDefault)
FinatraRocksDBConfig.SharedBlockCache = new LRUCache(blockCacheSize, numShardBits)
}
val tableConfig = new BlockBasedTableConfig
val blockSize = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbTableConfigBlockSize,
FinatraRocksDBConfig.RocksDbTableConfigBlockSizeDefault)
val bitsPerKey = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbTableConfigBoomFilterKeyBits,
FinatraRocksDBConfig.RocksDbTableConfigBoomFilterKeyBitsDefault)
val useBlockBasedMode = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbTableConfigBoomFilterMode,
FinatraRocksDBConfig.RocksDbTableConfigBoomFilterModeDefault)
val cacheIndexAndFilterBlocks = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbCacheIndexAndFilterBlocks,
FinatraRocksDBConfig.RocksDbCacheIndexAndFilterBlocksDefault)
val cachePinL0IndexAndFilterBlocks = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbCachePinL0IndexAndFilterBlocks,
FinatraRocksDBConfig.RocksDbCachePinL0IndexAndFilterBlocksDefault)
tableConfig.setBlockSize(blockSize)
tableConfig.setBlockCache(FinatraRocksDBConfig.SharedBlockCache)
tableConfig.setFilter(new BloomFilter(bitsPerKey, useBlockBasedMode))
tableConfig.setCacheIndexAndFilterBlocks(cacheIndexAndFilterBlocks)
tableConfig.setPinL0FilterAndIndexBlocksInCache(cachePinL0IndexAndFilterBlocks)
options
.setTableFormatConfig(tableConfig)
}
private def setOperatingSystemProcessConfiguration(
options: Options,
configs: util.Map[String, AnyRef]
): Unit = {
val bytesPerSync = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbBytesPerSync,
FinatraRocksDBConfig.RocksDbBytesPerSyncDefault)
val maxBackgroundCompactions = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMaxBackgroundCompactions,
FinatraRocksDBConfig.RocksDbMaxBackgroundCompactionsDefault)
val maxBackgroundFlushes = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMaxBackgroundFlushes,
FinatraRocksDBConfig.RocksDbMaxBackgroundFlushesDefault)
val increaseParallelism = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbIncreaseParallelism,
FinatraRocksDBConfig.RocksDbIncreaseParallelismDefault())
options
.setBytesPerSync(bytesPerSync)
.setMaxBackgroundCompactions(maxBackgroundCompactions)
.setMaxBackgroundFlushes(maxBackgroundFlushes)
.setIncreaseParallelism(Math.max(increaseParallelism, 2))
}
private def setDatabaseConcurrency(options: Options, configs: util.Map[String, AnyRef]): Unit = {
val inplaceUpdateSupport = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbInplaceUpdateSupport,
FinatraRocksDBConfig.RocksDbInplaceUpdateSupportDefault)
val allowConcurrentMemtableWrite = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbAllowConcurrentMemtableWrite,
FinatraRocksDBConfig.RocksDbAllowConcurrentMemtableWriteDefault)
val enableWriteThreadAdaptiveYield = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbEnableWriteThreadAdaptiveYield,
FinatraRocksDBConfig.RocksDbEnableWriteThreadAdaptiveYieldDefault)
options
.setInplaceUpdateSupport(inplaceUpdateSupport)
.setAllowConcurrentMemtableWrite(allowConcurrentMemtableWrite)
.setEnableWriteThreadAdaptiveYield(enableWriteThreadAdaptiveYield)
}
private def setCompactionConfiguration(
options: Options,
configs: util.Map[String, AnyRef]
): Unit = {
val compactionStyle = CompactionStyle.valueOf(
getStringOrDefault(
configs,
FinatraRocksDBConfig.RocksDbCompactionStyle,
FinatraRocksDBConfig.RocksDbCompactionStyleDefault).toUpperCase)
val compactionStyleOptimize = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbCompactionStyleOptimize,
FinatraRocksDBConfig.RocksDbCompactionStyleOptimizeDefault)
val maxBytesForLevelBase = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMaxBytesForLevelBase,
FinatraRocksDBConfig.RocksDbMaxBytesForLevelBaseDefault)
val levelCompactionDynamicLevelBytes = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbLevelCompactionDynamicLevelBytes,
FinatraRocksDBConfig.RocksDbLevelCompactionDynamicLevelBytesDefault)
val optimizeWithMemtableMemoryBudget = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbCompactionStyleMemtableBudget,
FinatraRocksDBConfig.RocksDbCompactionStyleMemtableBudgetDefault)
options
.setCompactionStyle(compactionStyle)
.setMaxBytesForLevelBase(maxBytesForLevelBase)
.setLevelCompactionDynamicLevelBytes(levelCompactionDynamicLevelBytes)
compactionStyle match {
case CompactionStyle.UNIVERSAL if compactionStyleOptimize =>
options
.optimizeUniversalStyleCompaction(optimizeWithMemtableMemoryBudget)
case CompactionStyle.LEVEL if compactionStyleOptimize =>
options
.optimizeLevelStyleCompaction(optimizeWithMemtableMemoryBudget)
case _ =>
}
}
private def setCompression(options: Options, configs: util.Map[String, AnyRef]): Unit = {
val lz4Config = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbLZ4Config,
FinatraRocksDBConfig.RocksDbLZ4ConfigDefault
)
if (lz4Config) {
options.setCompressionType(CompressionType.LZ4_COMPRESSION)
}
}
private def setInformationLoggingLevel(
options: Options,
configs: util.Map[String, AnyRef]
): Unit = {
val infoLogLevel = InfoLogLevel.valueOf(
getStringOrDefault(
configs,
FinatraRocksDBConfig.RocksDbInfoLogLevel,
FinatraRocksDBConfig.RocksDbInfoLogLevelDefault).toUpperCase)
options
.setInfoLogLevel(infoLogLevel)
}
private def setStatisticsOptions(options: Options, configs: util.Map[String, AnyRef]): Unit = {
val maxLogFileSize = getBytesOrDefault(
configs,
FinatraRocksDBConfig.RocksDbMaxLogFileSize,
FinatraRocksDBConfig.RocksDbMaxLogFileSizeDefault)
options.setMaxLogFileSize(maxLogFileSize)
val keepLogFileNum = getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbKeepLogFileNum,
FinatraRocksDBConfig.RocksDbKeepLogFileNumDefault)
options.setKeepLogFileNum(keepLogFileNum)
val enableStatistics = getBooleanOrDefault(
configs,
FinatraRocksDBConfig.RocksDbEnableStatistics,
FinatraRocksDBConfig.RocksDbEnableStatisticsDefault)
if (enableStatistics) {
val statistics = new Statistics
val statsCallback = new RocksDBStatsCallback(FinatraRocksDBConfig.globalStatsReceiver)
val statsCollectorInput = new StatsCollectorInput(statistics, statsCallback)
val statsCollector = new StatisticsCollector(
util.Arrays.asList(statsCollectorInput),
getIntOrDefault(
configs,
FinatraRocksDBConfig.RocksDbStatCollectionPeriodMs,
FinatraRocksDBConfig.RocksDbStatCollectionPeriodMsDefault)
)
statsCollector.start()
statistics.setStatsLevel(StatsLevel.ALL)
options
.setStatistics(statistics)
.setStatsDumpPeriodSec(20)
}
}
private def getBytesOrDefault(
configs: util.Map[String, AnyRef],
key: String,
default: StorageUnit
): Long = {
val valueBytesString = configs.get(key)
if (valueBytesString != null) {
valueBytesString.toString.toLong
} else {
default.inBytes
}
}
private def getIntOrDefault(configs: util.Map[String, AnyRef], key: String, default: Int): Int = {
val valueString = configs.get(key)
if (valueString != null) {
valueString.toString.toInt
} else {
default
}
}
private def getStringOrDefault(
configs: util.Map[String, AnyRef],
key: String,
default: String
): String = {
val valueString = configs.get(key)
if (valueString != null) {
valueString.toString
} else {
default
}
}
private def getBooleanOrDefault(
configs: util.Map[String, AnyRef],
key: String,
default: Boolean
): Boolean = {
val valueString = configs.get(key)
if (valueString != null) {
valueString.toString.toBoolean
} else {
default
}
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/config/FinatraRocksDBConfig.scala | Scala | apache-2.0 | 24,126 |
package scalan
import scalan.linalgebra.{LADslExp, LinearAlgebraExamples}
class ScalanCake extends ScalanDslExp
with LinearAlgebraExamples with LADslExp {
// override val cacheElems = false
implicit val vizConfig = defaultGraphVizConfig
}
| scalan/scalan-starter | scalan-starter-core/src/test/scala/scalan/ScalanCake.scala | Scala | apache-2.0 | 247 |
// code-examples/ObjectSystem/sealed/http-script.scala
sealed abstract class HttpMethod()
case class Connect(body: String) extends HttpMethod
case class Delete (body: String) extends HttpMethod
case class Get (body: String) extends HttpMethod
case class Head (body: String) extends HttpMethod
case class Options(body: String) extends HttpMethod
case class Post (body: String) extends HttpMethod
case class Put (body: String) extends HttpMethod
case class Trace (body: String) extends HttpMethod
def handle (method: HttpMethod) = method match {
case Connect (body) => println("connect: " + body)
case Delete (body) => println("delete: " + body)
case Get (body) => println("get: " + body)
case Head (body) => println("head: " + body)
case Options (body) => println("options: " + body)
case Post (body) => println("post: " + body)
case Put (body) => println("put: " + body)
case Trace (body) => println("trace: " + body)
}
val methods = List(
Connect("connect body..."),
Delete ("delete body..."),
Get ("get body..."),
Head ("head body..."),
Options("options body..."),
Post ("post body..."),
Put ("put body..."),
Trace ("trace body..."))
methods.foreach { method => handle(method) }
| XClouded/t4f-core | scala/src/tmp/ObjectSystem/sealed/http-script.scala | Scala | apache-2.0 | 1,273 |
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.core
import org.joda.time.DateTimeZone
/** Represents exchange. */
trait Exchange {
/** Name of the exchange. */
def name: String
/** Time zone at the exchange. */
def timeZone: DateTimeZone
}
object Exchange {
case object Nasdaq extends Exchange {
def name = "NASDAQ"
def timeZone = DateTimeZone.forID("America/New_York")
}
case object Nyse extends Exchange {
def name = "NYSE"
def timeZone = DateTimeZone.forID("America/New_York")
}
case object Cboe extends Exchange {
def name = "CBOE"
def timeZone = DateTimeZone.forID("America/Chicago")
}
}
| ViliusN/Crossbow | crossbow-core/src/lt/norma/crossbow/core/Exchange.scala | Scala | gpl-3.0 | 1,365 |
package org.jetbrains.plugins.scala.lang.formatter
import java.io.File
import com.intellij.application.options.CodeStyle
import com.intellij.lang.Language
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.command.CommandProcessor
import com.intellij.openapi.editor.EditorFactory
import com.intellij.openapi.editor.impl.DocumentImpl
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.util.io.FileUtil
import com.intellij.psi.codeStyle.CodeStyleManager
import com.intellij.psi.{PsiDocumentManager, PsiElement, PsiFile, PsiFileFactory}
import com.intellij.testFramework.LightIdeaTestCase
import com.intellij.util.IncorrectOperationException
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.extensions.{CharSeqExt, IteratorExt, PsiElementExt, StringExt}
import org.jetbrains.plugins.scala.lang.formatter.AbstractScalaFormatterTestBase._
import org.jetbrains.plugins.scala.lang.formatting.scalafmt.processors.ScalaFmtPreFormatProcessor
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.util.{MarkersUtils, TestUtils}
import org.junit.Assert._
/**
* Base class for java formatter tests that holds utility methods.
*
* @author Denis Zhdanov
* @since Apr 27, 2010 6:26:29 PM
*/
// NOTE: initially was almost duplicate from Java
abstract class AbstractScalaFormatterTestBase extends LightIdeaTestCase {
protected def language: Language = ScalaLanguage.INSTANCE
protected def getCommonSettings = getSettings.getCommonSettings(language)
protected def getScalaSettings = getSettings.getCustomSettings(classOf[ScalaCodeStyleSettings])
protected def getIndentOptions = getCommonSettings.getIndentOptions
protected def getSettings = CodeStyle.getSettings(getProject)
protected def scalaSettings = getScalaSettings
protected def commonSettings = getCommonSettings
protected def ss = getScalaSettings
protected def cs = getCommonSettings
implicit protected def project: Project = getProject
private def codeStyleManager(implicit project: Project): CodeStyleManager =
CodeStyleManager.getInstance(project)
override protected def setUp(): Unit = {
super.setUp()
TestUtils.disableTimerThread()
}
override def tearDown(): Unit = {
// clean virtual files references to aboid project leaks
// NOTE: in theory it shouldn't be required because VirtualFiles are not associated with project, they are application-level
// but for some reason project is leaked in LightVirtualFile via FileManagerImpl.myPsiHardRefKey key, stuck in the user map
// there was an attempt to fix it in https://github.com/JetBrains/intellij-community/commit/ba9f0e8624ab8e64bd52928e662c154672452ff8
// but the change was reverted for unknown reason =/
ScalaFmtPreFormatProcessor.formattedCountMap.clear()
super.tearDown()
}
import scala.jdk.CollectionConverters._
private val Actions: Map[Action, TestFormatAction] = Map(
Action.Reformat -> ((file, ranges) => {
codeStyleManager.reformatText(file, ranges.asJava)
}),
Action.Indent -> ((file, ranges) => {
ranges match {
case head :: Nil => codeStyleManager.adjustLineIndent(file, head.getStartOffset)
case _ => throw new UnsupportedOperationException("Adjusting indents for a collection of ranges is not supported in tests.")
}
})
)
def tempFileName: String =
getTestName(true) + ".scala"
def doTest(): Unit =
doTest(getTestName(false) + ".scala", getTestName(false) + "_after.scala")
def doTest(fileNameBefore: String, fileNameAfter: String): Unit =
doTextTest(Action.Reformat, loadFile(fileNameBefore), loadFile(fileNameAfter))
def doTextTest(text: String, textAfter: String): Unit =
doTextTest(text, textAfter, 1)
def assertFormatterDoesNotFail(text: String, repeats: Int): Unit =
doTextTest(TestData.apply(text, None, tempFileName, Action.Reformat, Seq(), repeats, checkAfterEachIteration = false))
def doTextTest(text: String, textAfter: String, repeats: Int): Unit =
doTextTest(TestData.reformat(text, textAfter, tempFileName, repeats, checkAfterEachIteration = false))
def doTextTest(text: String, textAfter: String, repeats: Int, checkAfterEachIteration: Boolean = false): Unit =
doTextTest(TestData.reformat(text, textAfter, tempFileName, repeats, checkAfterEachIteration))
def doTextTest(text: String, textAfter: String, fileName: String): Unit =
doTextTest(TestData.reformat(text, textAfter, fileName))
def doTextTest(value: String): Unit =
doTextTest(value, value)
def doTextTest(value: String, actionRepeats: Int): Unit =
doTextTest(value, value, actionRepeats)
private def doTextTest(action: Action, text: String, textAfter: String): Unit =
doTextTest(TestData(text, textAfter, tempFileName, action, 1, checkAfterEachIteration = false))
private def initFile(fileName: String, text: String): PsiFile = {
PsiFileFactory.getInstance(project)
.createFileFromText(fileName, language, text, true, false)
}
/**
* For a given selection create all possible selections text ranges with borders leaf elements ranges borders.
* For each selection runs a formatting test and ensures it doesn't break the code.
* USE WITH CAUTIONS: the amount of selections grows very fast depending on the amount of inner elements.
* NOTE: for now it only supports selection of a whole valid node
*/
protected def doAllRangesTextTest(text: String, checkResult: Boolean = true): Unit = {
val (textClean, selections) = MarkersUtils.extractMarker(text)
val selection: TextRange = selections match {
case head :: Nil => head
case Nil => TextRange.create(0, textClean.length)
case other => fail(s"expecting single range for all ranges test, but got: $other").asInstanceOf[Nothing]
}
val file = initFile(tempFileName, textClean)
val startElement = file.findElementAt(selection.getStartOffset)
//val endElement = file.findElementAt(selection.getEndOffset)
val element = // select non-leaf element
startElement.withParents.takeWhile(_.startOffset == startElement.startOffset).lastOption.get
val allRanges = allPossibleSubRanges(element)
println(s"allRanges.size: ${allRanges.size}")
if (allRanges.size > 1500)
fail(s"too many ranges: ${allRanges.size}")
val manager = PsiDocumentManager.getInstance(getProject)
val document = manager.getDocument(file).ensuring(_ != null, "Don't expect the document to be null")
for { range <- allRanges } {
try {
runCommandInWriteAction(() => try {
Actions(Action.Reformat).run(file, Seq(range))
} catch {
case e: IncorrectOperationException =>
fail(e.getLocalizedMessage)
}, "", "")
if (checkResult) {
val expected = prepareText(textClean)
assertEquals(expected, prepareText(document.getText))
manager.commitDocument(document)
assertEquals(expected, prepareText(file.getText))
}
} catch {
case t: Throwable =>
System.err.println(s"range: $range")
System.err.println(s"text: ${textClean.substring(range)}")
throw t
}
}
val expectedFormats = allRanges.size
val actualFormats = ScalaFmtPreFormatProcessor.formattedCountMap.get(file.getVirtualFile)
assertTrue(
"All generated range should be actually used for formatting",
actualFormats >= expectedFormats // intermediate formats from platform expected
)
}
private def allPossibleSubRanges(element: PsiElement): Seq[TextRange] = {
def collectRanges(el: PsiElement): Iterator[TextRange] =
Iterator(el.getTextRange) ++ el.children.flatMap(collectRanges)
val allChildRanges = collectRanges(element).toSeq
val allBorders = allChildRanges.flatMap(r => Seq(r.getStartOffset, r.getEndOffset)).sorted.distinct.toIndexedSeq
for {
from <- allBorders.indices.dropRight(1)
to <- from + 1 until allBorders.size
} yield new TextRange(allBorders(from) ,allBorders(to))
}
private def doTextTest(testData: TestData): Unit = {
val TestData(textBefore, textAfter, fileName, action, selectedRanges, actionRepeats, checkAfterEachIteration) =
testData
assertTrue("action should be applied at least once", actionRepeats >= 1)
if (actionRepeats > 1 && selectedRanges.nonEmpty)
fail("for now an action can not be applied multiple times for selection")
val file = initFile(fileName, textBefore)
val manager = PsiDocumentManager.getInstance(getProject)
val document = manager.getDocument(file)ensuring(_ != null, "Don't expect the document to be null")
def check(expected: String): Unit = {
val expected2 = prepareText(expected)
assertEquals(expected2, prepareText(document.getText))
manager.commitDocument(document)
assertEquals(expected2, prepareText(file.getText))
}
runCommandInWriteAction(() => try {
for (_ <- 0 until actionRepeats) {
val ranges = if (selectedRanges.nonEmpty) selectedRanges else Seq(file.getTextRange)
Actions(action).run(file, ranges)
if (checkAfterEachIteration) {
textAfter.foreach(check)
}
}
} catch {
case e: IncorrectOperationException =>
fail(e.getLocalizedMessage)
}, "", "")
textAfter.foreach(check)
}
protected def prepareText(actual0: String): String = {
val actual1 = if (actual0.startsWith("\\n")) actual0.substring(1) else actual0
val actual2 = if (actual1.startsWith("\\n")) actual1.substring(1) else actual1
// Strip trailing spaces
val doc = EditorFactory.getInstance.createDocument(actual2)
runCommandInWriteAction(() => {
doc.asInstanceOf[DocumentImpl].stripTrailingSpaces(getProject)
}, "formatting", null)
doc.getText.trim
}
//noinspection ReferencePassedToNls
private def runCommandInWriteAction(runnable: Runnable, name: String, groupId: String): Unit =
CommandProcessor.getInstance.executeCommand(getProject, () => {
ApplicationManager.getApplication.runWriteAction(runnable)
}, name, groupId)
}
private object AbstractScalaFormatterTestBase {
sealed trait Action
object Action {
case object Reformat extends Action
case object Indent extends Action
}
case class TestData(
textBefore: String,
textAfter: Option[String], // None means that we just want to test that formatter doesnt fail
fileName: String,
action: Action,
ranges: Seq[TextRange],
actionRepeats: Int,
checkAfterEachIteration: Boolean
)
object TestData {
def apply(textBefore: String, textAfter: String, fileName: String, action: Action, ranges: Seq[TextRange], actionRepeats: Int): TestData =
new TestData(textBefore, Some(textAfter), fileName, action, ranges, actionRepeats, checkAfterEachIteration = false)
def apply(before: String, after: String, fileName: String, action: Action, actionRepeats: Int, checkAfterEachIteration: Boolean): TestData = {
val (beforeWithoutMarkers, selectedTextRanges) = MarkersUtils.extractNumberedMarkers(before)
val (afterWithoutMarkers, _) = MarkersUtils.extractNumberedMarkers(after)
TestData(beforeWithoutMarkers, Some(afterWithoutMarkers), fileName, action, selectedTextRanges, actionRepeats, checkAfterEachIteration)
}
def reformat(before: String, after: String, fileName: String, repeats: Int, checkAfterEachIteration: Boolean): TestData =
TestData(before, after, fileName, Action.Reformat, repeats, checkAfterEachIteration)
def reformat(before: String, after: String, fileName: String): TestData =
TestData(before, after, fileName, Action.Reformat, 1, checkAfterEachIteration = false)
}
private trait TestFormatAction {
def run(file: PsiFile, ranges: Seq[TextRange]): Unit
}
private def loadFile(name: String): String = {
val fullName = (TestUtils.getTestDataPath + "/psi/formatter") + File.separatorChar + name
val text = new String(FileUtil.loadFileText(new File(fullName)))
text.withNormalizedSeparator
}
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/formatter/AbstractScalaFormatterTestBase.scala | Scala | apache-2.0 | 12,272 |
/*
* Copyright 2016 Well-Factored Software Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.wellfactored.playbindings
import shapeless.{::, Generic, HNil, Lazy}
/**
* This trait provides an implicit function that will generate a ValueWrapper[W,V]
* for a case class of type `W` that has a single member of type `V`. This uses
* Shapeless to summon a `Generic[W, V :: HNil]` to assist with the wrapping
* and unwrapping, so it will actually work with any type `W` that is record-like
* enough for Shapeless to handle.
*/
trait ValueWrapperGen {
/**
*
* @param gen provides the Generic mapping between the wrapper type and the wrapped
* value type with optional validation of of V when wrapping.
*/
implicit def genWV[W, V](
implicit
gen: Lazy[Generic.Aux[W, V :: HNil]]
): ValueWrapper[W, V] =
new ValueWrapper[W, V] {
override def wrap(v: V): W = gen.value.from(v :: HNil)
override def unwrap(w: W): V = gen.value.to(w).head
}
}
/**
* Import `ValueWrapperGen._` in cases where you can't or don't want to extend the
* `ValueWrapperGen` trait yourself.
*/
object ValueWrapperGen extends ValueWrapperGen | WellFactored/play-extras | src/main/scala/com/wellfactored/playbindings/ValueWrapperGen.scala | Scala | apache-2.0 | 1,787 |
package code.blog.snippet
import java.io.BufferedOutputStream
import java.io.File.separator
import java.io.InputStream
import java.nio.file.Files.createDirectories
import java.nio.file.Files.newOutputStream
import java.nio.file.Files.notExists
import java.nio.file.Path
import java.nio.file.Paths
import java.nio.file.StandardOpenOption.APPEND
import java.nio.file.StandardOpenOption.CREATE
import java.text.SimpleDateFormat
object Utils {
val slashDate = new SimpleDateFormat("yyyy-MM-dd")
val slashYear = new SimpleDateFormat("yyyy")
/* def storeFile(filename: String, inputStream: InputStream): String = {
def getPath(filename: String): Path = {
val file_root = Paths.get("/upload/files/")
if (notExists(file_root)) createDirectories(file_root)
val path = Paths.get(file_root.toString + separator + filename)
path
}
val path = getPath(filename)
val out = new BufferedOutputStream(newOutputStream(path, CREATE, APPEND))
val ba = new Array[Byte](8192)
def doStore() {
inputStream.read(ba) match {
case x if x < 0 =>
case 0 => doStore()
case x => out.write(ba, 0, x); doStore()
}
}
doStore()
inputStream.close
out.close
"succeed"
}*/
} | liaosiwei/blog | src/main/scala/code/blog/snippet/Utils.scala | Scala | mit | 1,262 |
package complexity_analyser
import java.io.File
import java.nio.file.Files.copy
import java.nio.file.StandardCopyOption.REPLACE_EXISTING
import java.util.concurrent.ExecutorService
import json_parser.Error
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
class HaskellProcessor(modelAnswer: File, studentAnswer: File, executorService: ExecutorService) {
private final lazy val TIME_THRESHOLD = 35000
/*
* Regexes
*/
// Matches "benchmarking tests/word"
private final val BenchmarkLine =
"""benchmarking tests/(\w+)""".r
// Matches "number.number"
private final val matchMean =
"""(\d+.\d+)""".r
// Used to find test names and scores
// Matches "word: number / number"
private final val TestLine =
"""(\w+): (\d+) / (\d+)""".r
// Used to find functions in files
// Matches "word some whitespace :: something else"
private final val FunctionLine =
"""(\w+)\s+::\s+.+""".r
// Map that stores test name and the max score that you can get (taken from model solution)
private final val TestScore = new mutable.HashMap[String, Int]
// Map that stores function name, line and file where it is located
private final val FunctionMap = new mutable.HashMap[String, (Int, String)]
val GRAPH_FILE = "/res.html"
/**
* Copies Bench.hs to both model solution and student submission
* Finds all the functions in the student submission
* so that later we can trace them back
* Copies the Model test suite to the student, just in case they changed it
*/
def prepare(): Unit = {
val benchFile = "/Bench.hs"
val tests = "/Tests.hs"
val bench = new File("backend/complexity_analyser/res/Bench.hs")
if (!bench.exists()) throw new Exception("Missing resource Bench.hs")
if (!modelAnswer.isDirectory) throw new Exception("Model solution should be a directory")
if (!studentAnswer.isDirectory) throw new Exception("Student submission should be a directory")
val mod = new File(modelAnswer.toPath.toString + benchFile)
val modTest = new File(modelAnswer.toPath.toString + tests).toPath
val studTest = new File(studentAnswer.toPath.toString + tests).toPath
val stud = new File(studentAnswer.toPath.toString + benchFile)
studentAnswer.listFiles().filter(hFilter).foreach(findFunctions)
// Ensure that both versions have the same Tests.hs file
copy(modTest, studTest, REPLACE_EXISTING)
copy(bench.toPath, mod.toPath, REPLACE_EXISTING)
copy(bench.toPath, stud.toPath, REPLACE_EXISTING)
}
def findFunctions(file: File) = {
for ((l, i) <- Source.fromFile(file).getLines().zipWithIndex) {
l match {
case FunctionLine(n) =>
FunctionMap += ((n, (i + 1, file.toString)))
case _ => None
}
}
}
private def hFilter(f: File) = f.isFile && f.getName.endsWith(".hs") && f.getName != "Tests.hs"
def runTests() = {
compileClassOnBoth("Tests")
val testOutcomeStudent = executorService.submit(new ShellExecutor(s"$studentAnswer/Tests"))
val testOutcomeModel = executorService.submit(new ShellExecutor(s"$modelAnswer/Tests"))
testOutcomeModel.get.split("\n").foreach(findMaxScoreHeader)
calculateTestScores(findStudentScore(testOutcomeStudent.get))
}
private def findMaxScoreHeader(line: String): Unit = {
line match {
case TestLine(name, _, max) =>
TestScore += ((name, max.toInt))
case _ => None
}
}
private def findStudentScore(line: String) = {
val buff = new ArrayBuffer[(String, Int, Int)]()
val lines = line.split("\n")
for (l <- lines) {
l match {
case TestLine(name, score, m) =>
val max = TestScore.getOrElse(name, m.toInt)
buff += ((name, score.toInt, max))
case _ => None
}
}
buff
}
private def calculateTestScores(testsResult: Seq[(String, Int, Int)]) = {
var score: Double = 100.0d
val scorePerTest = Math.round(score / testsResult.length)
val buff = new ArrayBuffer[Error]
for ((name, studScore, maxScore) <- testsResult) {
if (studScore < maxScore) {
score -= scorePerTest * (1 - (studScore / maxScore))
val (line, file) = FunctionMap.getOrElse(name, (0, studentAnswer.getName))
buff += new Error(s"Student passes $studScore/$maxScore tests for $name", file, line, 0, "tests")
}
}
(buff, Math.max(score, 0))
}
private def compileClassOnBoth(name: String) = {
val exitModel = executorService.submit(new ShellExecutor(s"ghc -i$modelAnswer/IC -i$modelAnswer " +
s"--make -O3 $name -main-is $name"))
val exitStudent = executorService.submit(new ShellExecutor(s"ghc -i$studentAnswer/IC -i$studentAnswer " +
s"--make -O3 $name -main-is $name"))
val outputModel = exitModel.get
val outputStudent = exitStudent.get
(outputModel, outputStudent)
}
def runBench(): ((ArrayBuffer[Error], Int), String, String) = {
compileClassOnBoth("Bench")
val benchOutcomeStudent = executorService.submit(new ShellExecutor(s"$studentAnswer/Bench ${bFlags(studentAnswer)}"))
val benchOutcomeModel = executorService.submit(new ShellExecutor(s"$modelAnswer/Bench ${bFlags(modelAnswer)}"))
val zippedMeanModel = genListBenchNameMean(benchOutcomeModel.get)
val zippedMeanStud = genListBenchNameMean(benchOutcomeStudent.get)
val deltas = produceDelta(zippedMeanModel, zippedMeanStud)
(calculateScore(deltas), studentAnswer.getAbsolutePath + GRAPH_FILE, modelAnswer.getAbsolutePath + GRAPH_FILE)
}
private final def bFlags(o: File) = s"--output=$o$GRAPH_FILE"
private def produceDelta(zippedMeanModel: Seq[(String, Double)], zippedMeanStud: Seq[(String, Double)]) = {
val buff = new ArrayBuffer[(String, Double)]
for ((e, i) <- zippedMeanModel.zipWithIndex) {
val (name, modMean) = e
val (_, studMean) = zippedMeanStud.apply(i)
buff += ((name, modMean - studMean))
}
buff
}
private def genListBenchNameMean(outcome: String) = {
val names = BenchmarkLine.findAllMatchIn(outcome).map(_.group(1))
val details = BenchmarkLine.split(outcome)
val means = details.flatMap(_.split("\n")).filter(_.trim.startsWith("mean"))
val doubles = means.map(convertToNS)
names.toSeq.zip(doubles)
}
private def convertToNS(meanLine: String) = {
val double = matchMean.findFirstIn(meanLine).get.toDouble
val factor = meanLine match {
case m if m.contains("ns") => 1
case m if m.contains("μs") => 1000
case m if m.contains("ms") => 1000 * 1000
case m if m.contains(" s") => 1000 * 1000 * 1000
}
double * factor
}
def calculateScore(deltas: ArrayBuffer[(String, Double)]) = {
var score = 100
val annotations = new ArrayBuffer[Error]
var eff = ""
for ((n, diff) <- deltas) {
if (Math.abs(diff) > TIME_THRESHOLD) {
score -= (diff / 50000).toInt
val (line, file) = FunctionMap.getOrElse(n, (0, studentAnswer.getName))
if (diff < 0) {
eff = s"Function $n is inefficient -> ${diff.formatted("%.2f")} ns diff!"
} else {
eff = s"Function $n is more efficient than " +
s"the model solution -> ${diff.formatted("%.2f")} ns diff!"
}
annotations.append(new Error(eff, file, line, 0, "complexity"))
}
}
(annotations, Math.min(Math.max(score, 0), 100))
}
}
| ke00n/alabno | backend/complexity_analyser/src/main/scala/complexity_analyser/HaskellProcessor.scala | Scala | mit | 7,404 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.api.controllers
import scala.language.reflectiveCalls
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
import play.api.test.Helpers._
import es.tid.cosmos.api.controllers.ResultMatchers.redirectTo
import es.tid.cosmos.api.controllers.pages.WithSampleSessions
class CliConfigResourceIT extends FlatSpec with MustMatchers {
"A CLI config resource" must "redirect when not authenticated and registered" in
new WithSampleSessions {
unauthUser.doRequest("/cosmosrc") must redirectTo ("/")
unregUser.doRequest("/cosmosrc") must redirectTo ("/register")
}
it must "return a cosmosrc when authenticated" in new WithSampleSessions {
val response = regUserInGroup.doRequest("/cosmosrc")
status(response) must be (OK)
contentAsString(response) must (
include (s"api_key: ${regUserInGroup.cosmosProfile.apiCredentials.apiKey}") and
include (s"api_secret: ${regUserInGroup.cosmosProfile.apiCredentials.apiSecret}"))
}
}
| telefonicaid/fiware-cosmos-platform | cosmos-api/it/scala/es/tid/cosmos/api/controllers/CliConfigResourceIT.scala | Scala | apache-2.0 | 1,649 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
/* IndexVector is an IntVector whose elements represent indices (e.g., of another vector).
* It is either backed by a discrete sequence of integers (e.g. 1,5,10) or a continouous RangeVector.
*
* IndexVectors can be used for scatter/gather operations.
*
* They also provide a vector construction operator { } that takes a function mapping an index to a value,
* producing a new vector.
*
* author: Arvind Sujeeth (asujeeth@stanford.edu)
* last modified: Dec 27, 2010
*
* Pervasive Parallelism Laboratory (PPL)
* Stanford University
*
*/
class IndexVectorDenseC(__length: Int, __isRow: Boolean) {
var _length = __length
var _isRow = __isRow
var _data: Array[Int] = new Array[Int](_length)
/**
* These are temporarily needed because they are hard-coded into DeliteOp code gen.
*/
def unsafeSetData(xs: Array[Int], len: Int) {
_data = xs
_length = len
}
def Clone = {
val v = new IntDenseVector(_length, _isRow);
v._data = _data.clone
v
}
}
class IndexVectorRange(__start: Int, __end: Int) {
var _start = __start
var _end = __end
var _stride = 1
var _isRow = true
def unsafeSetData(xs: Array[Int], len: Int) {
throw new IllegalArgumentException("RangeVector cannot be updated")
}
def Clone = {
val len = _end - _start
val v = new IntDenseVector(len, _isRow)
var i = 0
while (i < len) {
v._data(i) = _start + i
i += 1
}
v
}
}
| tesendic/Relite | src/generated/scala/IndexVector.scala | Scala | agpl-3.0 | 2,454 |
package io.skysail.app.demo
import akka.actor.{ Actor, ActorLogging, ActorPath, Props, Status }
//import akka.camel.{ CamelMessage, Producer }
//import nl.codecentric.coffee.ActorSettings
//import nl.codecentric.coffee.domain.User
import org.apache.camel.component.rabbitmq.RabbitMQConstants
import scala.collection.immutable
object EventSender {
final val Name = "event-sender"
def props(): Props = Props(new EventSender())
final case class Msg(deliveryId: Long, user: Contact)
final case class Confirm(deliveryId: Long)
}
class EventSender extends Actor with ActorLogging {
import EventSender._
// import io.circe.generic.auto._
// import io.circe.syntax._
//private val camelSender = context.watch(context.actorOf(Props[CamelSender]))
private var unconfirmed = immutable.SortedMap.empty[Long, ActorPath]
override def receive: Receive = {
case Msg(deliveryId, user) =>
log.info("Sending msg for user: {}", user.email)
unconfirmed = unconfirmed.updated(deliveryId, sender().path)
// val headersMap = Map(RabbitMQConstants.MESSAGE_ID -> deliveryId, RabbitMQConstants.CORRELATIONID -> deliveryId)
// camelSender ! CamelMessage(user.asJson.noSpaces, headersMap)
// case CamelMessage(_, headers) =>
// val deliveryId: Long = headers.getOrElse(RabbitMQConstants.MESSAGE_ID, -1L).asInstanceOf[Long]
// log.info("Event successfully delivered for id {}, sending confirmation", deliveryId)
// unconfirmed
// .get(deliveryId)
// .foreach(
// senderActor => {
// unconfirmed -= deliveryId
// context.actorSelection(senderActor) ! Confirm(deliveryId)
// }
// )
case Status.Failure(ex) =>
log.error("Event delivery failed. Reason: {}", ex.toString)
}
}
//class CamelSender extends Actor with Producer /*with ActorSettings*/ {
// override def endpointUri: String = "rabbitmq://127.0.0.1:5672/userevents?username=guest&password=guest"//settings.rabbitMQ.uri
//
// override def headersToCopy: Set[String] =
// super.headersToCopy + RabbitMQConstants.CORRELATIONID + RabbitMQConstants.MESSAGE_ID
//}
| evandor/skysail-core | skysail.app.demo/src/io/skysail/app/demo/EventSender.scala | Scala | apache-2.0 | 2,138 |
package blended.streams.multiresult
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import blended.util.logging.Logger
// A stage that may generate multiple responses from one incoming message
class MultiResultGraphStage[T, U](name : String)(f : T => List[U]) extends GraphStage[FlowShape[T,U]] {
private val in : Inlet[T] = Inlet[T](s"MultiResult.$name.in")
private val out : Outlet[U] = Outlet[U](s"MultiResult.$name.out")
private val log : Logger = Logger(classOf[MultiResultGraphStage[_,_]].getName())
override val shape : FlowShape[T, U] = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private var pendingValues: List[U] = Nil
// as long as values are still available, we will push them one by one
// if no more messages are available to push downstream, we will pull
// the inlet and eventually a new inbound message will be pulled from
// upstream, yielding in a new list of values to be pushed.
private def push(): Unit = {
pendingValues match {
case Nil =>
if (!hasBeenPulled(in)) {
log.trace("Pulling new input")
pull(in)
}
case h :: t =>
if (isAvailable(out)) {
push(out, h)
log.trace(s"Pushing value [$h], rest is [$t]")
pendingValues = t
}
}
}
setHandlers(
in, out,
new InHandler with OutHandler {
override def onPush(): Unit = {
// We calculate the list of responses
pendingValues = f(grab(in))
// and start to push one by one
push()
}
override def onPull(): Unit = push()
})
// We start by pulling the inlet
override def preStart(): Unit = {
pull(in)
}
}
}
| woq-blended/blended | blended.streams/src/main/scala/blended/streams/multiresult/MultiResultGraphStage.scala | Scala | apache-2.0 | 1,924 |
package org.jetbrains.plugins.scala.debugger.evaluation.util
import com.intellij.debugger.engine.{DebugProcess, DebugProcessImpl, JVMName, JVMNameUtil}
import com.intellij.debugger.jdi.VirtualMachineProxyImpl
import com.intellij.debugger.{DebuggerBundle, NoDataException, SourcePosition}
import com.intellij.lang.ASTNode
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.util.Computable
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import com.sun.jdi.{Field, ObjectReference, ReferenceType, Value}
import org.jetbrains.plugins.scala.debugger.ScalaPositionManager
import org.jetbrains.plugins.scala.debugger.evaluation.{EvaluationException, ScalaEvaluatorBuilderUtil}
import org.jetbrains.plugins.scala.debugger.filters.ScalaDebuggerSettings
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScCaseClause}
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScMethodLike, ScPrimaryConstructor, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScAnnotations, ScExpression, ScForStatement, ScNewTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{PsiTypeParameterExt, ScClassParameter, ScParameter}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScEarlyDefinitions, ScPackaging, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaFile, ScalaRecursiveElementVisitor}
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.result.{Typeable, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{ScSubstitutor, ScType, ValueClassType}
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* User: Alefas
* Date: 19.10.11
*/
object DebuggerUtil {
class JVMNameBuffer {
def append(evaluator: JVMName) {
buffer += evaluator
}
def append(name: Char) {
append(Character.toString(name))
}
def append(text: String) {
buffer += JVMNameUtil.getJVMRawText(text)
}
def toName: JVMName = {
new JVMName {
def getName(process: DebugProcessImpl): String = {
if (myName == null) {
var name: String = ""
for (nameEvaluator <- buffer) {
name += nameEvaluator.getName(process)
}
myName = name
}
myName
}
def getDisplayName(debugProcess: DebugProcessImpl): String = {
if (myDisplayName == null) {
var displayName: String = ""
for (nameEvaluator <- buffer) {
displayName += nameEvaluator.getDisplayName(debugProcess)
}
myDisplayName = displayName
}
myDisplayName
}
private var myName: String = null
private var myDisplayName: String = null
}
}
private var buffer = new ArrayBuffer[JVMName]
}
def getJVMQualifiedName(tp: ScType): JVMName = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case Any => JVMNameUtil.getJVMRawText("java.lang.Object")
case Null => JVMNameUtil.getJVMRawText("scala.Null") //shouldn't be
case AnyRef => JVMNameUtil.getJVMRawText("java.lang.Object") //shouldn't be
case Nothing => JVMNameUtil.getJVMRawText("scala.Nothing") //shouldn't be
case Singleton => JVMNameUtil.getJVMRawText("java.lang.Object")
case AnyVal => JVMNameUtil.getJVMRawText("scala.AnyVal") //shouldn't be
case Unit => JVMNameUtil.getJVMRawText("java.lang.Void")
case Boolean => JVMNameUtil.getJVMRawText("java.lang.Boolean")
case Char => JVMNameUtil.getJVMRawText("java.lang.Character")
case Int => JVMNameUtil.getJVMRawText("java.lang.Int")
case Long => JVMNameUtil.getJVMRawText("java.lang.Long")
case Float => JVMNameUtil.getJVMRawText("java.lang.Float")
case Double => JVMNameUtil.getJVMRawText("java.lang.Double")
case Byte => JVMNameUtil.getJVMRawText("java.lang.Byte")
case Short => JVMNameUtil.getJVMRawText("java.lang.Short")
case JavaArrayType(argument) =>
val buff = new JVMNameBuffer()
buff.append(getJVMQualifiedName(argument))
buff.append("[]")
buff.toName
case ParameterizedType(arr, Seq(arg)) if arr.extractClass.exists(_.qualifiedName == "scala.Array") =>
val buff = new JVMNameBuffer()
buff.append(getJVMQualifiedName(arg))
buff.append("[]")
buff.toName
case _ =>
tp.extractClass match {
case Some(clazz) => getClassJVMName(clazz)
case None => JVMNameUtil.getJVMRawText(tp.canonicalText)
}
}
}
def getJVMStringForType(tp: ScType, isParam: Boolean = true): String = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case AnyRef => "Ljava/lang/Object;"
case Any => "Ljava/lang/Object;"
case Singleton => "Ljava/lang/Object;"
case Null => "Lscala/Null$;"
case Nothing => "Lscala/Nothing$;"
case Boolean => "Z"
case Byte => "B"
case Char => "C"
case Short => "S"
case Int => "I"
case Long => "J"
case Float => "F"
case Double => "D"
case Unit if isParam => "Lscala/runtime/BoxedUnit;"
case Unit => "V"
case JavaArrayType(arg) => "[" + getJVMStringForType(arg)
case ParameterizedType(ScDesignatorType(clazz: PsiClass), Seq(arg))
if clazz.qualifiedName == "scala.Array" => "[" + getJVMStringForType(arg)
case _ =>
tp.extractClass match {
case Some(obj: ScObject) => "L" + obj.getQualifiedNameForDebugger.replace('.', '/') + "$;"
case Some(obj: ScTypeDefinition) => "L" + obj.getQualifiedNameForDebugger.replace('.', '/') + ";"
case Some(clazz) => "L" + clazz.qualifiedName.replace('.', '/') + ";"
case _ => "Ljava/lang/Object;"
}
}
}
def getFunctionJVMSignature(function: ScMethodLike): JVMName = {
val typeParams = function match {
case fun: ScFunction if !fun.isConstructor => fun.typeParameters
case _: ScFunction | _: ScPrimaryConstructor =>
function.containingClass match {
case td: ScTypeDefinition => td.typeParameters
case _ => Seq.empty
}
case _ => Seq.empty
}
val subst = typeParams.foldLeft(ScSubstitutor.empty) {
(subst, tp) => subst.bindT(tp.nameAndId, tp.upperBound.getOrAny)
}
val localParameters = function match {
case fun: ScFunctionDefinition if fun.isLocal => localParamsForFunDef(fun)
case fun if fun.isConstructor =>
fun.containingClass match {
case c: ScClass => localParamsForConstructor(c)
case _ => Seq.empty
}
case _ => Seq.empty
}
val valueClassParameter = function.containingClass match {
case cl: ScClass if ValueClassType.isValueClass(cl) =>
cl.constructors match {
case Seq(pc: ScPrimaryConstructor) => pc.parameters.headOption
case _ => None
}
case _ => None
}
val simpleParameters = function.effectiveParameterClauses.flatMap(_.effectiveParameters)
val parameters = valueClassParameter ++: simpleParameters ++: localParameters
val paramTypes = parameters.map(parameterForJVMSignature(_, subst)).mkString("(", "", ")")
val resultType = function match {
case fun: ScFunction if !fun.isConstructor =>
getJVMStringForType(subst.subst(fun.returnType.getOrAny), isParam = false)
case _: ScFunction | _: ScPrimaryConstructor => "V"
}
JVMNameUtil.getJVMRawText(paramTypes + resultType)
}
def constructorSignature(named: PsiNamedElement): JVMName = {
named match {
case fun: ScFunction => DebuggerUtil.getFunctionJVMSignature(fun)
case constr: ScPrimaryConstructor =>
constr.containingClass match {
case td: ScTypeDefinition if td.isTopLevel => DebuggerUtil.getFunctionJVMSignature(constr)
case clazz => new JVMConstructorSignature(clazz)
}
case method: PsiMethod => JVMNameUtil.getJVMSignature(method)
case clazz: ScClass if clazz.isTopLevel => clazz.constructor match {
case Some(cnstr) => DebuggerUtil.getFunctionJVMSignature(cnstr)
case _ => JVMNameUtil.getJVMRawText("()V")
}
case clazz: ScClass => new JVMConstructorSignature(clazz)
case _: PsiClass => JVMNameUtil.getJVMRawText("()V")
case _ => JVMNameUtil.getJVMRawText("()V")
}
}
def lambdaJVMSignature(lambda: PsiElement): Option[String] = {
val (argumentTypes, returnType) = lambda match {
case (expr: ScExpression) && Typeable(tp) if ScalaPsiUtil.isByNameArgument(expr) => (Seq.empty, tp)
case Typeable(FunctionType(retT, argTypes)) => (argTypes, retT)
case _ => return None
}
val trueReturnType = returnType match {
case ValueClassType(inner) => inner
case _ => returnType
}
val paramText = argumentTypes.map(getJVMStringForType(_, isParam = true)).mkString("(", "", ")")
val returnTypeText = getJVMStringForType(trueReturnType, isParam = false)
Some(paramText + returnTypeText)
}
private def parameterForJVMSignature(param: ScTypedDefinition, subst: ScSubstitutor) = param match {
case p: ScParameter if p.isRepeatedParameter => "Lscala/collection/Seq;"
case p: ScParameter if p.isCallByNameParameter => "Lscala/Function0;"
case _ => getJVMStringForType(subst.subst(param.getType(TypingContext.empty).getOrAny))
}
def createValue(vm: VirtualMachineProxyImpl, tp: ScType, b: Boolean): Value = {
tp match {
case _ if tp.isBoolean => vm.mirrorOf(b)
case _ if tp.isUnit => vm.mirrorOfVoid()
case _ => null
}
}
def createValue(vm: VirtualMachineProxyImpl, tp: ScType, b: Long): Value = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case Long => vm.mirrorOf(b)
case Int => vm.mirrorOf(b.toInt)
case Byte => vm.mirrorOf(b.toByte)
case Short => vm.mirrorOf(b.toShort)
case Char => vm.mirrorOf(b.toChar)
case Float => vm.mirrorOf(b.toFloat)
case Double => vm.mirrorOf(b.toDouble)
case Unit => vm.mirrorOfVoid()
case _ => null
}
}
def createValue(vm: VirtualMachineProxyImpl, tp: ScType, b: Char): Value = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case Long => vm.mirrorOf(b)
case Int => vm.mirrorOf(b.toInt)
case Byte => vm.mirrorOf(b.toByte)
case Short => vm.mirrorOf(b.toShort)
case Char => vm.mirrorOf(b.toChar)
case Float => vm.mirrorOf(b.toFloat)
case Double => vm.mirrorOf(b.toDouble)
case Unit => vm.mirrorOfVoid()
case _ => null
}
}
def createValue(vm: VirtualMachineProxyImpl, tp: ScType, b: Double): Value = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case Long => vm.mirrorOf(b)
case Int => vm.mirrorOf(b.toInt)
case Byte => vm.mirrorOf(b.toByte)
case Short => vm.mirrorOf(b.toShort)
case Char => vm.mirrorOf(b.toChar)
case Float => vm.mirrorOf(b.toFloat)
case Double => vm.mirrorOf(b.toDouble)
case Unit => vm.mirrorOfVoid()
case _ => null
}
}
def createValue(vm: VirtualMachineProxyImpl, tp: ScType, b: Float): Value = {
val stdTypes = tp.projectContext.stdTypes
import stdTypes._
tp match {
case Long => vm.mirrorOf(b)
case Int => vm.mirrorOf(b.toInt)
case Byte => vm.mirrorOf(b.toByte)
case Short => vm.mirrorOf(b.toShort)
case Char => vm.mirrorOf(b.toChar)
case Float => vm.mirrorOf(b.toFloat)
case Double => vm.mirrorOf(b.toDouble)
case Unit => vm.mirrorOfVoid()
case _ => null
}
}
class JVMClassAt(sourcePosition: SourcePosition) extends JVMName {
def getName(process: DebugProcessImpl): String = {
jvmClassAtPosition(sourcePosition, process) match {
case Some(refType) => refType.name
case _ =>
throw EvaluationException(DebuggerBundle.message("error.class.not.loaded", getDisplayName(process)))
}
}
def getDisplayName(debugProcess: DebugProcessImpl): String = {
ApplicationManager.getApplication.runReadAction(new Computable[String] {
def compute: String = {
JVMNameUtil.getSourcePositionClassDisplayName(debugProcess, sourcePosition)
}
})
}
}
class JVMConstructorSignature(clazz: PsiClass) extends JVMName {
val position = SourcePosition.createFromElement(clazz)
override def getName(process: DebugProcessImpl): String = {
jvmClassAtPosition(position, process) match {
case Some(refType) => refType.methodsByName("<init>").get(0).signature()
case None =>
throw EvaluationException(DebuggerBundle.message("error.class.not.loaded", inReadAction(clazz.qualifiedName)))
}
}
override def getDisplayName(debugProcess: DebugProcessImpl): String = getName(debugProcess)
}
def isScala(refType: ReferenceType, default: Boolean = true): Boolean = {
ScalaPositionManager.cachedSourceName(refType).map(_.endsWith(".scala")).getOrElse(default)
}
def jvmClassAtPosition(sourcePosition: SourcePosition, debugProcess: DebugProcess): Option[ReferenceType] = {
val allClasses = try {
debugProcess.getPositionManager.getAllClasses(sourcePosition)
} catch {
case _: NoDataException => return None
}
if (!allClasses.isEmpty) Some(allClasses.get(0))
else None
}
def withoutBackticks(name: String): String = {
val backticked = """\\$u0060(.+)\\$u0060""".r
name match {
case null => null
case backticked(id) => id
case _ => name
}
}
def getClassJVMName(clazz: PsiClass, withPostfix: Boolean = false): JVMName = {
clazz match {
case t: ScNewTemplateDefinition =>
new JVMClassAt(SourcePosition.createFromElement(t))
case t: ScTypeDefinition =>
if (isLocalClass(t)) new JVMClassAt(SourcePosition.createFromElement(t))
else {
val qual = t.getQualifiedNameForDebugger + classnamePostfix(t, withPostfix)
JVMNameUtil.getJVMRawText(qual)
}
case _ => JVMNameUtil.getJVMQualifiedName(clazz)
}
}
def classnamePostfix(t: ScTemplateDefinition, withPostfix: Boolean = false): String = {
t match {
case _: ScTrait if withPostfix => "$class"
case o: ScObject if withPostfix || o.isPackageObject => "$"
case c: ScClass if withPostfix && ValueClassType.isValueClass(c) => "$" //methods from a value class always delegate to the companion object
case _ => ""
}
}
def getSourcePositions(elem: PsiElement, lines: mutable.HashSet[SourcePosition] = new mutable.HashSet[SourcePosition]): Set[SourcePosition] = {
val node = elem.getNode
val children: Array[ASTNode] = if (node != null) node.getChildren(null) else Array.empty[ASTNode]
if (children.isEmpty) {
val position = SourcePosition.createFromElement(elem)
if (!lines.exists(_.getLine == position.getLine)) {
lines += position
}
}
for (child <- children) {
getSourcePositions(child.getPsi, lines)
}
lines.toSet
}
def unwrapScalaRuntimeRef(value: AnyRef): AnyRef = {
value match {
case _ if !ScalaDebuggerSettings.getInstance().DONT_SHOW_RUNTIME_REFS => value
case objRef: ObjectReference =>
val refType = objRef.referenceType()
if (isScalaRuntimeRef(refType.name))
runtimeRefField(refType).map(objRef.getValue).getOrElse(objRef)
else objRef
case _ => value
}
}
def isScalaRuntimeRef(typeFqn: String): Boolean = {
typeFqn.startsWith("scala.runtime.") && typeFqn.endsWith("Ref")
}
object scalaRuntimeRefTo {
def unapply(objRef: ObjectReference): Option[AnyRef] = {
val typeName = objRef.referenceType().name()
if (isScalaRuntimeRef(typeName)) Some(unwrapScalaRuntimeRef(objRef))
else None
}
}
def runtimeRefField(refType: ReferenceType): Option[Field] = {
refType.fieldByName("elem").toOption
.orElse(refType.fieldByName("_value").toOption)
}
def localParamsForFunDef(fun: ScFunctionDefinition, visited: mutable.HashSet[PsiElement] = mutable.HashSet.empty): Seq[ScTypedDefinition] = {
val container = ScalaEvaluatorBuilderUtil.getContextClass(fun)
fun.body match { //to exclude references from default parameters
case Some(b) => localParams(b, fun, container, visited)
case _ => Seq.empty
}
}
def localParamsForConstructor(cl: ScClass, visited: mutable.HashSet[PsiElement] = mutable.HashSet.empty): Seq[ScTypedDefinition] = {
val container = ScalaEvaluatorBuilderUtil.getContextClass(cl)
val extendsBlock = cl.extendsBlock //to exclude references from default parameters
localParams(extendsBlock, cl, container, visited)
}
def localParamsForDefaultParam(param: ScParameter, visited: mutable.HashSet[PsiElement] = mutable.HashSet.empty): Seq[ScTypedDefinition] = {
val owner = param.owner
val container = ScalaEvaluatorBuilderUtil.getContextClass {
owner match {
case pc: ScPrimaryConstructor => pc.containingClass
case fun => fun
}
}
param.getDefaultExpression match {
case Some(expr) => localParams(expr, owner, container, visited)
case None => Seq.empty
}
}
def localParams(block: PsiElement, excludeContext: PsiElement, container: PsiElement,
visited: mutable.HashSet[PsiElement] = mutable.HashSet.empty): Seq[ScTypedDefinition] = {
def atRightPlace(elem: PsiElement): Boolean = {
if (PsiTreeUtil.isContextAncestor(excludeContext, elem, false)) return false
container match {
case (_: ScExpression) childOf ScForStatement(enumerators, _) if PsiTreeUtil.isContextAncestor(enumerators, elem, true) =>
val generators = enumerators.generators
if (generators.size <= 1) true
else {
val lastGenerator = generators.last
elem.getTextOffset >= lastGenerator.getTextOffset
}
case _ => PsiTreeUtil.isContextAncestor(container, elem, false)
}
}
val buf = new mutable.HashSet[ScTypedDefinition]
block.accept(new ScalaRecursiveElementVisitor {
override def visitReference(ref: ScReferenceElement) {
if (ref.qualifier.isDefined) {
super.visitReference(ref)
return
}
val elem = ref.resolve()
elem match {
case null =>
case fun: ScFunctionDefinition if fun.isLocal && !visited.contains(fun) =>
visited += fun
buf ++= localParamsForFunDef(fun, visited).filter(atRightPlace)
case fun: ScMethodLike if fun.isConstructor && !visited.contains(fun) =>
fun.containingClass match {
case c: ScClass if isLocalClass(c) =>
visited += c
buf ++= localParamsForConstructor(c, visited).filter(atRightPlace)
case _ =>
}
case td: ScTypedDefinition if isLocalV(td) && atRightPlace(td) =>
buf += td
case _ => super.visitReference(ref)
}
}
})
buf.toSeq.sortBy(e => (e.isInstanceOf[ScObject], e.getTextRange.getStartOffset))
}
def isLocalV(resolve: PsiElement): Boolean = {
resolve match {
case _: PsiLocalVariable => true
case _: ScClassParameter => false
case _: PsiParameter => true
case b: ScBindingPattern =>
ScalaPsiUtil.nameContext(b) match {
case v @ (_: ScValue | _: ScVariable) =>
!v.getContext.isInstanceOf[ScTemplateBody] && !v.getContext.isInstanceOf[ScEarlyDefinitions]
case _: ScCaseClause => true
case _ => true //todo: for generator/enumerators
}
case o: ScObject =>
!o.getContext.isInstanceOf[ScTemplateBody] && ScalaPsiUtil.getContextOfType(o, true, classOf[PsiClass]) != null
case _ => false
}
}
def generatesAnonClass(newTd: ScNewTemplateDefinition): Boolean = {
val extBl = newTd.extendsBlock
extBl.templateBody.nonEmpty || extBl.templateParents.exists(_.typeElementsWithoutConstructor.nonEmpty)
}
@tailrec
def isLocalClass(td: PsiClass): Boolean = {
td.getParent match {
case _: ScTemplateBody =>
val parent = PsiTreeUtil.getParentOfType(td, classOf[PsiClass], true)
if (parent == null || parent.isInstanceOf[ScNewTemplateDefinition]) return true
isLocalClass(parent)
case _: ScPackaging | _: ScalaFile => false
case _ => true
}
}
def getContainingMethod(elem: PsiElement): Option[PsiElement] = {
elem.withParentsInFile.collectFirst {
case c if ScalaPositionManager.isLambda(c) => c
case m: PsiMethod => m
case tb: ScTemplateBody => tb
case ed: ScEarlyDefinitions => ed
case ChildOf(f: ScalaFile) if f.isScriptFile => f
case c: ScClass => c
}
}
def inTheMethod(pos: SourcePosition, method: PsiElement): Boolean = {
val elem: PsiElement = pos.getElementAt
if (elem == null) return false
getContainingMethod(elem).contains(method)
}
def getSignificantElement(elem: PsiElement): PsiElement = {
elem match {
case _: ScAnnotationsHolder | _: ScCommentOwner =>
val firstSignificant = elem.children.find {
case ElementType(t) if ScalaTokenTypes.WHITES_SPACES_AND_COMMENTS_TOKEN_SET.contains(t) => false
case _: ScAnnotations => false
case e if e.getTextLength == 0 => false
case _ => true
}
firstSignificant.getOrElse(elem)
case _ => elem
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/debugger/evaluation/util/DebuggerUtil.scala | Scala | apache-2.0 | 22,317 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.redis
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreParams
import org.locationtech.geomesa.security.SecurityParams
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
import org.locationtech.geomesa.utils.geotools.GeoMesaParam
import redis.clients.jedis.JedisPool
import scala.util.control.NonFatal
package object data extends LazyLogging {
// keep lazy to allow for runtime setting of the underlying sys prop
lazy val TransactionBackoffs: IndexedSeq[Int] = try { backoffs(RedisSystemProperties.TransactionBackoff.get) } catch {
case NonFatal(_) =>
logger.error(s"Invalid backoff for property '${RedisSystemProperties.TransactionBackoff.property}', using defaults")
backoffs(RedisSystemProperties.TransactionBackoff.default)
}
object RedisDataStoreParams extends GeoMesaDataStoreParams with SecurityParams {
// disable loose bbox by default, since we don't have any z-iterators
override protected def looseBBoxDefault = false
val RedisUrlParam =
new GeoMesaParam[String](
"redis.url",
"Redis connection URL. The URL can be used to specify the Redis database and credentials, if required - " +
"for example, 'redis://user:password@localhost:6379/1'",
optional = false,
supportsNiFiExpressions = true
)
val RedisCatalogParam =
new GeoMesaParam[String](
"redis.catalog",
"The name of the GeoMesa catalog table",
optional = false,
supportsNiFiExpressions = true
)
val PoolSizeParam =
new GeoMesaParam[Integer](
"redis.connection.pool.size",
"Max number of simultaneous connections to use",
default = 16,
supportsNiFiExpressions = true
)
val TestConnectionParam =
new GeoMesaParam[java.lang.Boolean](
"redis.connection.pool.validate",
"Test connections when borrowed from the pool. Connections may be closed due to inactivity, " +
"which would cause a transient error if validation is disabled",
default = Boolean.box(true)
)
val PipelineParam =
new GeoMesaParam[java.lang.Boolean](
"redis.pipeline.enabled",
"Enable pipelining of query requests. This reduces network latency, " +
"but restricts queries to a single execution thread",
default = Boolean.box(false)
)
val ConnectionPoolParam = new GeoMesaParam[JedisPool]("redis.connection", "Connection pool") // generally used for testing
}
object RedisSystemProperties {
val WriteBatchSize = SystemProperty("geomesa.redis.write.batch", "1000")
val TransactionRetries = SystemProperty("geomesa.redis.tx.retry", "10")
val TransactionPause = SystemProperty("geomesa.redis.tx.pause", "100ms")
val TransactionBackoff = SystemProperty("geomesa.redis.tx.backoff", "1,1,2,2,5,10,20")
val AgeOffInterval = SystemProperty("geomesa.redis.age.off.interval", "10 minutes")
}
/**
* Parse backoff property
*
* @param prop system property value
* @return
*/
private def backoffs(prop: String): IndexedSeq[Int] = {
val seq = prop.split(",").map(_.toInt)
require(seq.nonEmpty, "No backoff defined")
seq
}
}
| locationtech/geomesa | geomesa-redis/geomesa-redis-datastore/src/main/scala/org/locationtech/geomesa/redis/data/package.scala | Scala | apache-2.0 | 3,832 |
/*
* Copyright (c) 2012 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
/**
* Searching arbitrarily nested case classes, tuples, and lists.
*
* @author Travis Brown
*/
object DeepSearchExamples extends App {
import shapeless._
// Evidence that an A is something that we can look around in for Qs that
// satisfy some predicate.
trait Searchable[A, Q] {
def find(p: Q => Boolean)(a: A): Option[Q]
}
trait LowPrioritySearchable {
implicit def hlistishSearchable[A, L <: HList, Q](
implicit gen: Generic.Aux[A, L], s: Searchable[L, Q]
) = new Searchable[A, Q] {
def find(p: Q => Boolean)(a: A) = s.find(p)(gen to a)
}
}
object Searchable extends LowPrioritySearchable {
implicit def elemSearchable[A] = new Searchable[A, A] {
def find(p: A => Boolean)(a: A) = if (p(a)) Some(a) else None
}
implicit def listSearchable[A, Q](implicit s: Searchable[A, Q]) =
new Searchable[List[A], Q] {
def find(p: Q => Boolean)(a: List[A]) = a.flatMap(s.find(p)).headOption
}
implicit def hnilSearchable[Q] = new Searchable[HNil, Q] {
def find(p: Q => Boolean)(a: HNil) = None
}
implicit def hlistSearchable[H, T <: HList, Q](
implicit hs: Searchable[H, Q] = null, ts: Searchable[T, Q]
) = new Searchable[H :: T, Q] {
def find(p: Q => Boolean)(a: H :: T) =
Option(hs).flatMap(_.find(p)(a.head)) orElse ts.find(p)(a.tail)
}
}
case class SearchableWrapper[A](a: A) {
def deepFind[Q](p: Q => Boolean)(implicit s: Searchable[A, Q]) =
s.find(p)(a)
}
implicit def wrapSearchable[A](a: A) = SearchableWrapper(a)
// An example predicate:
val p = (_: String) endsWith "o"
// On strings:
assert("hello".deepFind(p) == Some("hello"))
assert("hell".deepFind(p) == None)
// On lists:
assert(List("yes", "maybe", "no").deepFind(p) == Some("no"))
// On arbitrarily sized and nested tuples:
assert(("yes", "maybe", ("no", "why")).deepFind(p) == Some("no"))
assert(("a", ("b", "c"), "d").deepFind(p) == None)
// On tuples with non-string elements:
assert((1, "two", ('three, '4')).deepFind(p) == Some("two"))
// Search the same tuple for a specific character instead:
assert((1, "two", ('three, '4')).deepFind((_: Char) == 52) == Some('4'))
// Our case class:
case class Foo(a: String, b: String, c: List[String])
// And it works:
assert(Foo("four", "three", List("two", "one")).deepFind(p) == Some("two"))
assert(Foo("a", "b", "c" :: Nil).deepFind(p) == None)
}
| rorygraves/shapeless | examples/src/main/scala/shapeless/examples/deepsearch.scala | Scala | apache-2.0 | 3,097 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, NamedRelation}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.streaming.{Offset, SparkDataStream}
import org.apache.spark.sql.sources.v2.writer._
/**
* A logical plan representing a data source v2 table.
*
* @param table The table that this relation represents.
* @param options The options for this table operation. It's used to create fresh [[ScanBuilder]]
* and [[WriteBuilder]].
*/
case class DataSourceV2Relation(
table: Table,
output: Seq[AttributeReference],
options: Map[String, String])
extends LeafNode with MultiInstanceRelation with NamedRelation {
import DataSourceV2Implicits._
override def name: String = table.name()
override def simpleString(maxFields: Int): String = {
s"RelationV2${truncatedString(output, "[", ", ", "]", maxFields)} $name"
}
def newScanBuilder(): ScanBuilder = {
table.asBatchReadable.newScanBuilder(options.toDataSourceOptions)
}
override def computeStats(): Statistics = {
val scan = newScanBuilder().build()
scan match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
Statistics(sizeInBytes = statistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
override def newInstance(): DataSourceV2Relation = {
copy(output = output.map(_.newInstance()))
}
}
/**
* A specialization of [[DataSourceV2Relation]] with the streaming bit set to true.
*
* Note that, this plan has a mutable reader, so Spark won't apply operator push-down for this plan,
* to avoid making the plan mutable. We should consolidate this plan and [[DataSourceV2Relation]]
* after we figure out how to apply operator push-down for streaming data sources.
*/
case class StreamingDataSourceV2Relation(
output: Seq[Attribute],
scan: Scan,
stream: SparkDataStream,
startOffset: Option[Offset] = None,
endOffset: Option[Offset] = None)
extends LeafNode with MultiInstanceRelation {
override def isStreaming: Boolean = true
override def newInstance(): LogicalPlan = copy(output = output.map(_.newInstance()))
override def computeStats(): Statistics = scan match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
Statistics(sizeInBytes = statistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
object DataSourceV2Relation {
def create(table: Table, options: Map[String, String]): DataSourceV2Relation = {
val output = table.schema().toAttributes
DataSourceV2Relation(table, output, options)
}
}
| WindCanDie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Relation.scala | Scala | apache-2.0 | 3,923 |
package org.bfn.ninetynineprobs
import org.scalatest._
class P13Spec extends UnitSpec { // adapted from P10Spec
"encode" should "leave an empty list unchanged" in {
assert(P13.encodeDirect(List()) == List())
}
it should "count unique elements as '1'" in {
assert(P13.encodeDirect(List(1, 2, 3)) == List((1, 1), (1, 2), (1, 3)))
}
it should "count consecutive duplicates" in {
assert(P13.encodeDirect(List(1, 1, 2, 3, 3, 2, 1, 1, 1)) ==
List((2, 1), (1, 2), (2, 3), (1, 2), (3, 1)))
}
it should "work with String lists as well" in {
assert(P13.encodeDirect(List("foo", "foo", "bar")) ==
List((2, "foo"), (1, "bar")))
}
}
| bfontaine/99Scala | src/test/scala/P13Spec.scala | Scala | mit | 672 |
package pl.combosolutions.backup.tasks
import java.nio.file.{ Path, Paths }
import pl.combosolutions.backup.{ Async, Reporting }
import scala.collection.mutable
object BackupFiles extends Reporting {
type Paths2Result = (List[Path]) => Async[List[Path]]
val filesBackupDir = "files"
private def backupAction(implicit withSettings: Settings): Paths2Result = { files =>
implicit val e = withSettings.withElevation
implicit val c = withSettings.cleaner
reporter inform s"Backing up files: $files"
withSettings.components.fileSystemService copyFiles backupPaths(files)
}
private def restoreAction(implicit withSettings: Settings): Paths2Result = { files =>
implicit val e = withSettings.withElevation
implicit val c = withSettings.cleaner
reporter inform s"Restoring files: $files"
withSettings.components.fileSystemService copyFiles restorePaths(files)
}
private def backupPaths(files: List[Path])(implicit withSettings: Settings) = files map { file =>
val backup = file.toAbsolutePath
val restore = Paths.get(withSettings.backupDir.toString, filesBackupDir, backup.hashCode.toString)
(backup, restore)
}
private def restorePaths(files: List[Path])(implicit withSettings: Settings) = backupPaths(files) map (_.swap)
class BackupSubTaskBuilder[ChildResult](implicit withSettings: Settings)
extends ParentDependentSubTaskBuilder[List[Path], List[Path], ChildResult](backupAction)
class RestoreSubTaskBuilder[ChildResult](implicit withSettings: Settings)
extends ParentDependentSubTaskBuilder[List[Path], List[Path], ChildResult](restoreAction)
}
import BackupFiles._
class BackupFiles[CBR, CRR](implicit withSettings: Settings)
extends TaskBuilder[List[Path], List[Path], CBR, List[Path], List[Path], CRR](
new BackupSubTaskBuilder[CBR],
new RestoreSubTaskBuilder[CRR]
)
// $COVERAGE-OFF$ Hard to test, no real benefit
class BackupFilesConfigurator[CBR, CRR](
parent: Configurator[List[Path], _, List[Path], List[Path], _, List[Path]],
val initialSettings: Settings
) extends Configurator[List[Path], List[Path], CBR, List[Path], List[Path], CRR](Some(parent)) {
implicit val withSettings = initialSettings
override val builder = new BackupFiles[CBR, CRR]
val files: mutable.MutableList[String] = mutable.MutableList()
}
// $COVERAGE-ON$
| MateuszKubuszok/BackupDSL | modules/tasks/src/main/scala/pl/combosolutions/backup/tasks/BackupFiles.scala | Scala | mit | 2,364 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils.timer
import kafka.utils.MockTime
import scala.collection.mutable
class MockTimer extends Timer {
val time = new MockTime
private val taskQueue = mutable.PriorityQueue[TimerTaskEntry]()(Ordering[TimerTaskEntry].reverse)
def add(timerTask: TimerTask) {
if (timerTask.delayMs <= 0)
timerTask.run()
else {
taskQueue synchronized {
taskQueue.enqueue(new TimerTaskEntry(timerTask, timerTask.delayMs + time.milliseconds))
}
}
}
def advanceClock(timeoutMs: Long): Boolean = {
time.sleep(timeoutMs)
var executed = false
val now = time.milliseconds
var hasMore = true
while (hasMore) {
hasMore = false
val head = taskQueue synchronized {
if (taskQueue.nonEmpty && now > taskQueue.head.expirationMs) {
val entry = Some(taskQueue.dequeue())
hasMore = taskQueue.nonEmpty
entry
} else
None
}
head.foreach { taskEntry =>
if (!taskEntry.cancelled) {
val task = taskEntry.timerTask
task.run()
executed = true
}
}
}
executed
}
def size: Int = taskQueue.size
override def shutdown(): Unit = {}
}
| ollie314/kafka | core/src/test/scala/unit/kafka/utils/timer/MockTimer.scala | Scala | apache-2.0 | 2,037 |
package edu.umass.ciir.strepsimur.galago.stopstructure
import scala.collection.mutable
import org.lemurproject.galago.core.util.WordLists
import org.lemurproject.galago.core.retrieval.Retrieval
/**
* User: dietz
* Date: 2/19/14
* Time: 2:48 PM
*/
class StopStructuring(retrieval: Retrieval) {
val stopstructureMap: mutable.TreeSet[String] = {
val stopstructurelist = retrieval.getGlobalParameters.get("stopstructurelist",
"stopStructure")
import scala.collection.JavaConversions._
val ss_set: Set[String] = WordLists.getWordList(stopstructurelist).toSet
val defaultStopStructures = new mutable.TreeSet[String]()
for (ss <- ss_set) {
defaultStopStructures.add(ss.trim + " ")
}
defaultStopStructures
}
def removeStopStructure(origQueryText: String): String = {
val tokens = origQueryText.toLowerCase.replaceAll("[^a-z]", " ").split(" ") // split on non-alpha characters
val stopPrefixes = (1 to tokens.length).reverse
.map(tokens.slice(0, _).mkString("", " ", " "))
val stopStructure = stopPrefixes.find(stopstructureMap.contains)
val queryText = stopStructure match {
case None => origQueryText
case Some(structure) => origQueryText.substring(structure.length)
}
queryText
}
def removeStopStructure(tokenizedQuery: Seq[String]): Seq[String] = {
val tokens = tokenizedQuery.map(_.toLowerCase).map(_.replaceAll("[^a-z]", "")) // zap nonalpha characters
val stopPrefixes = (1 to tokens.length).reverse
.map(tokens.slice(0, _).mkString("", " ", " "))
val stopStructure = stopPrefixes.find(stopstructureMap.contains)
val queryText = stopStructure match {
case None => tokenizedQuery
case Some(structure) => {
val countStructureTokens = structure.count(_ == " ")
// the structure is appended by a space,
// i.e. 1 space = 1 token
tokenizedQuery.slice(countStructureTokens, tokenizedQuery.length)
}
}
queryText
}
}
| laura-dietz/strepsimur | src/main/scala/edu/umass/ciir/strepsimur/galago/stopstructure/StopStructuring.scala | Scala | apache-2.0 | 2,001 |
package leo.modules.procedures
import leo.datastructures.Term.:::>
import leo.datastructures.{BoundFront, Signature, Subst, Term, Type}
import leo.datastructures.Term.local._
import leo.modules.HOLSignature.{&, Exists, Forall, HOLUnaryConnective, Impl, Not, |||}
/**
* Miniscoping of formulas (does not assume NNF).
*
* @author Max Wisniewski
* @note Moved and to be refactored.
*/
object Miniscoping extends Function1[Term, Term] {
final def apply(term: Term, polarity: Boolean): Term = apply0(term, polarity, Vector.empty)
final def apply(term: Term): Term = apply(term, polarity = true)
type QUANT_LIST = Seq[(Boolean, Type)]
type QUANT_ITERATOR = Iterator[(Boolean, Type)]
type PUSH_TYPE = Int
@inline final val BOTH : PUSH_TYPE = 3
@inline final val NONE : PUSH_TYPE = 0
@inline final val LEFT : PUSH_TYPE = 1
@inline final val RIGHT : PUSH_TYPE = 2
/**
*
* Performs miniscoping.
* quants is a stack of removed quantifiers, where
* (true, ty) --> Forall(\\(ty)...)
* (false, ty) --> Exists(\\(ty)...)
*
* @param t The term to miniscope
* @param pol The current polarity
* @param quants The current quantifier
* @param sig the signature
* @return a miniscoped term
*/
private[this] final def apply0(t : Term, pol : Boolean, quants : QUANT_LIST): Term = {
t match {
case Exists(ty :::> body) => apply0(body, pol, quants :+ (!pol, ty))
case Forall(ty :::> body) => apply0(body, pol, quants :+ (pol, ty))
case Not(a) => (apply0(a, !pol, quants))
case (a & b) =>
val (rest, leftQ, leftSub, rightQ, rightSub) = pushQuants(a, b, quants, pol, pol)
val amini = apply0(a.substitute(leftSub).betaNormalize, pol, leftQ)
val bmini = apply0(b.substitute(rightSub).betaNormalize, pol, rightQ)
prependQuantList(&(amini, bmini), pol, rest)
case (a ||| b) =>
val (rest, leftQ, leftSub, rightQ, rightSub) = pushQuants(a, b, quants, pol, !pol)
val amini = apply0(a.substitute(leftSub).betaNormalize, pol, leftQ)
val bmini = apply0(b.substitute(rightSub).betaNormalize, pol, rightQ)
prependQuantList(|||(amini, bmini), pol, rest)
case Impl(a, b) =>
val (rest, leftQ, leftSub, rightQ, rightSub) = pushQuants(a, b, quants, pol, !pol)
val amini = apply0(a.substitute(leftSub), !pol, leftQ)
val bmini = apply0(b.substitute(rightSub), pol, rightQ)
prependQuantList(Impl(amini, bmini), pol, rest)
case other =>
prependQuantList(other, pol, quants.reverseIterator)
}
}
/**
*
* @param left The left side of the operator
* @param right The right side of the operator
* @param quants The quantifiers seen to this point
* @param pol the current polarity
* @param and if(true) op = AND else op = OR
* @return
*/
private[this] final def pushQuants(left : Term, right : Term, quants : QUANT_LIST, pol : Boolean, and : Boolean) : (QUANT_ITERATOR, QUANT_LIST, Subst, QUANT_LIST, Subst) = {
val it = quants.reverseIterator
var leftQ : QUANT_LIST = Vector() // Quantifiers pushed left
var leftSubst : Seq[Int] = Seq() // Substitution (reversed) removed Quants left
var rightQ : QUANT_LIST = Vector() // Quantifiers pushed right
var rightSubst : Seq[Int] = Seq() // Substitution (reversed) removed Quants right
var loop = 1
while(it.hasNext){
val q@(quant , ty) = it.next()
val push = testPush(left, right, loop, quant, and)
if(push != 0) {
if ((push & LEFT) == LEFT) leftQ = q +: leftQ // Push the quantifier left if possible
val nFrontl = leftQ.size
leftSubst = (if(nFrontl > 0) nFrontl else 1) +: leftSubst // Update indizes
if((push & RIGHT) == RIGHT) rightQ = q +: rightQ
val nFrontr = rightQ.size
rightSubst = (if(nFrontr > 0) nFrontr else 1) +: rightSubst
} else {
val lSub = revListToSubst(leftSubst, leftQ.size)
val rSub = revListToSubst(rightSubst, rightQ.size)
return (Iterator(q)++it, leftQ, lSub, rightQ, rSub)
}
loop += 1
}
return (it, leftQ, revListToSubst(leftSubst, leftQ.size), rightQ, revListToSubst(rightSubst, rightQ.size))
}
@inline private[this] final def revListToSubst(preSubst : Seq[Int], shift : Int) = {
var s : Subst = Subst.shift(shift)
val it = preSubst.iterator
while(it.hasNext){
s = BoundFront(it.next()) +: s
}
s
}
@inline private[this] final def testPush(left : Term, right : Term, bound : Int, quant : Boolean, and : Boolean) : PUSH_TYPE = {
var result = 0
if (left.looseBounds.contains(bound)) result |= 1
if (right.looseBounds.contains(bound)) result |= 2
if((!quant && and || quant && !and) && result == 3) 0
else result
}
/**
* @param quants Reverse Iterator of the quantifier prefix
*/
private[this] final def prependQuantList(t : Term, pol : Boolean, quants : QUANT_ITERATOR) : Term = {
var itTerm : Term = t
while(quants.hasNext){
val (q, ty) = quants.next()
itTerm = quantToTerm(q, pol)(\\(ty)(itTerm))
}
itTerm
}
private[this] final def quantToTerm(quant : Boolean, pol : Boolean) : HOLUnaryConnective = {
val realQuant = if(pol) quant else !quant
if(realQuant) Forall else Exists
}
}
| lex-lex/Leo-III | src/main/scala/leo/modules/procedures/Miniscoping.scala | Scala | bsd-3-clause | 5,342 |
package akka.persistence.jdbc.integration
import akka.persistence.jdbc.query.{
HardDeleteQueryTest,
MysqlCleaner,
OracleCleaner,
PostgresCleaner,
SqlServerCleaner
}
class PostgresHardDeleteQueryTest
extends HardDeleteQueryTest("postgres-application-with-hard-delete.conf")
with PostgresCleaner
class MySQLHardDeleteQueryTest extends HardDeleteQueryTest("mysql-application-with-hard-delete.conf") with MysqlCleaner
class OracleHardDeleteQueryTest
extends HardDeleteQueryTest("oracle-application-with-hard-delete.conf")
with OracleCleaner
class SqlServerHardDeleteQueryTest
extends HardDeleteQueryTest("sqlserver-application-with-hard-delete.conf")
with SqlServerCleaner
| dnvriend/akka-persistence-jdbc | core/src/it/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala | Scala | apache-2.0 | 708 |
package org.scalaide.ui.internal.jdt.model
import java.util.{ Map => JMap }
import org.eclipse.jdt.ui.JavaUI
import org.eclipse.jdt.internal.core.Openable
import org.eclipse.jface.text.{ Position => JFacePosition }
import org.eclipse.jface.text.source
import scala.tools.eclipse.contribution.weaving.jdt.IScalaOverrideIndicator
import org.eclipse.ui.texteditor.ITextEditor
import org.eclipse.jdt.internal.ui.javaeditor.EditorUtility
import org.scalaide.core.internal.compiler.ScalaPresentationCompiler
import org.scalaide.core.IScalaPlugin
import org.scalaide.logging.HasLogger
import org.scalaide.core.internal.jdt.model.ScalaCompilationUnit
import org.scalaide.core.internal.jdt.util.JDTUtils
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits.RichResponse
object ScalaOverrideIndicatorBuilder {
val OVERRIDE_ANNOTATION_TYPE = "org.eclipse.jdt.ui.overrideIndicator"
}
case class JavaIndicator(scu: ScalaCompilationUnit,
packageName: String,
typeNames: String,
methodName: String,
methodTypeSignatures: List[String],
text: String,
val isOverwrite: Boolean) extends source.Annotation(ScalaOverrideIndicatorBuilder.OVERRIDE_ANNOTATION_TYPE, false, text) with IScalaOverrideIndicator {
def open(): Unit = {
val tpe0 = JDTUtils.resolveType(scu.scalaProject.newSearchableEnvironment().nameLookup, packageName, typeNames, 0)
tpe0 foreach { (tpe) =>
val method = tpe.getMethod(methodName, methodTypeSignatures.toArray)
if (method.exists)
JavaUI.openInEditor(method, true, true);
}
}
}
trait ScalaOverrideIndicatorBuilder { self : ScalaPresentationCompiler =>
import ScalaOverrideIndicatorBuilder.OVERRIDE_ANNOTATION_TYPE
case class ScalaIndicator(scu: ScalaCompilationUnit, text: String, base: Symbol, val isOverwrite: Boolean)
extends source.Annotation(OVERRIDE_ANNOTATION_TYPE, false, text) with IScalaOverrideIndicator {
def open = {
asyncExec{ findDeclaration(base, scu.scalaProject.javaProject) }.getOption().flatten map {
case (file, pos) =>
EditorUtility.openInEditor(file, true) match {
case editor: ITextEditor => editor.selectAndReveal(pos, 0)
case _ =>
}
}
}
}
class OverrideIndicatorBuilderTraverser(scu : ScalaCompilationUnit, annotationMap : JMap[AnyRef, AnyRef]) extends Traverser with HasLogger {
override def traverse(tree: Tree): Unit = {
tree match {
case defn: DefTree if (defn.symbol ne NoSymbol) && defn.symbol.pos.isOpaqueRange =>
try {
for(base <- defn.symbol.allOverriddenSymbols) {
val isOverwrite = base.isDeferred && !defn.symbol.isDeferred
val text = (if (isOverwrite) "implements " else "overrides ") + base.fullName
val position = new JFacePosition(defn.pos.start, 0)
if (base.isJavaDefined) {
val packageName = base.enclosingPackage.fullName
val typeNames = enclosingTypeNames(base).mkString(".")
val methodName = base.name.toString
val paramTypes = base.tpe.paramss.flatMap(_.map(_.tpe))
val methodTypeSignatures = paramTypes.map(mapParamTypeSignature(_))
annotationMap.put(JavaIndicator(scu, packageName, typeNames, methodName, methodTypeSignatures, text, isOverwrite), position)
} else annotationMap.put(ScalaIndicator(scu, text, base, isOverwrite), position)
}
} catch {
case ex: Throwable => eclipseLog.error("Error creating override indicators for %s".format(scu.file.path), ex)
}
case _ =>
}
super.traverse(tree)
}
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/internal/jdt/model/ScalaOverrideIndicatorBuilder.scala | Scala | bsd-3-clause | 3,715 |
package uk.gov.gds.ier.transaction.ordinary.nationality
import uk.gov.gds.ier.validation.ErrorTransformForm
import uk.gov.gds.ier.step.StepTemplate
import uk.gov.gds.ier.transaction.ordinary.InprogressOrdinary
import uk.gov.gds.ier.transaction.ordinary.confirmation.ConfirmationMustache
import uk.gov.gds.ier.form.OrdinaryFormImplicits
trait NationalityMustache extends StepTemplate[InprogressOrdinary]
with OrdinaryFormImplicits {
case class NationalityModel(
question:Question,
nationality: FieldSet,
britishOption: Field,
irishOption: Field,
hasOtherCountryOption: Field,
otherCountry: FieldSet,
otherCountries0: Field,
otherCountries1: Field,
otherCountries2: Field,
noNationalityReason: Field,
noNationalityReasonShowFlag: String,
emailField: Field
) extends MustacheData
val mustache = MultilingualTemplate("ordinary/nationality") { implicit lang =>
(form, postEndpoint) =>
implicit val progressForm = form
val nationalityReason = form(keys.nationality.noNationalityReason).value
val nationalityReasonClass = nationalityReason match {
case Some("") | None => ""
case _ => "-open"
}
val emailAddress = form(keys.contact.email.detail).value
val hasOtherCountryOption = CheckboxField(
key = keys.nationality.hasOtherCountry,
value = "true"
)
NationalityModel(
question = Question(
postUrl = postEndpoint.url,
errorMessages = Messages.translatedGlobalErrors(form),
title = Messages("ordinary_nationality_title")
),
nationality = FieldSet(keys.nationality),
britishOption = CheckboxField(
key = keys.nationality.british,
value = "true"
),
irishOption = CheckboxField(
key = keys.nationality.irish,
value = "true"
),
hasOtherCountryOption = hasOtherCountryOption.copy(
attributes = hasOtherCountryOption.attributes.replaceAll("\\"", "'")
),
otherCountry = FieldSet(keys.nationality.otherCountries),
otherCountries0 = TextField(keys.nationality.otherCountries.item(0)),
otherCountries1 = TextField(keys.nationality.otherCountries.item(1)),
otherCountries2 = TextField(keys.nationality.otherCountries.item(2)),
noNationalityReason = TextField(keys.nationality.noNationalityReason),
noNationalityReasonShowFlag = nationalityReasonClass,
emailField = TextField(
key = keys.contact.email.detail,
default = emailAddress
)
)
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/ordinary/nationality/NationalityMustache.scala | Scala | mit | 2,561 |
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
class base( val attributes: Attributes )
extends markup.base
with Tag.Empty[base]
{
def this( href: Option[String], target: Option[base.target], attributes: Attributes ) =
{
this( attributes ~~ ( ( "href" -> href, "target" -> target ) ) )
}
}
object base
extends Tag.Empty.Appliable[base]
with property.a
{
def apply( href: Option[String] = None, target: Option[target] = None, attributes: Attributes = Attributes.empty ) =
{
new base( href, target, attributes )
}
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/base.scala | Scala | mit | 540 |
package org.http4s
package laws
import cats.implicits._
import cats.effect._
import cats.laws._
import org.http4s.headers.{`Content-Length`, `Transfer-Encoding`}
trait EntityEncoderLaws[F[_], A] {
implicit def F: Sync[F]
implicit def encoder: EntityEncoder[F, A]
def accurateContentLengthIfDefined(a: A): IsEq[F[Boolean]] =
(for {
entity <- F.pure(encoder.toEntity(a))
body <- entity.body.compile.toVector
bodyLength = body.size.toLong
contentLength = entity.length
} yield contentLength.fold(true)(_ === bodyLength)) <-> F.pure(true)
def noContentLengthInStaticHeaders: Boolean =
encoder.headers.get(`Content-Length`).isEmpty
def noTransferEncodingInStaticHeaders: Boolean =
encoder.headers.get(`Transfer-Encoding`).isEmpty
}
object EntityEncoderLaws {
def apply[F[_], A](
implicit F0: Sync[F],
entityEncoderFA: EntityEncoder[F, A]
): EntityEncoderLaws[F, A] = new EntityEncoderLaws[F, A] {
val F = F0
val encoder = entityEncoderFA
}
}
| ChristopherDavenport/http4s | laws/src/main/scala/org/http4s/laws/EntityEncoderLaws.scala | Scala | apache-2.0 | 1,020 |
package json.writers {
import com.plasmaconduit.json._;
object GenJsWriters extends AnyRef {
implicit lazy val DateJsWriterImplicit = DateJsWriter;
implicit lazy val DateRangeJsWriterImplicit = DateRangeJsWriter;
implicit lazy val ItemJsWriterImplicit = ItemJsWriter;
implicit lazy val PhoneNumberJsWriterImplicit = PhoneNumberJsWriter;
implicit lazy val UserJsWriterImplicit = UserJsWriter;
implicit lazy val VehicleJsWriterImplicit = VehicleJsWriter;
implicit lazy val CarJsWriterImplicit = CarJsWriter;
implicit lazy val TruckJsWriterImplicit = TruckJsWriter;
implicit lazy val BoatJsWriterImplicit = BoatJsWriter;
object DateJsWriter extends JsWriter[org.company.app.models.Date] {
val dateWriter = org.company.app.models.Date.DateLocalTimeWriter;
override def write(m: org.company.app.models.Date): JsValue = {
dateWriter.write(m.date)
}
};
object DateRangeJsWriter extends JsWriter[org.company.app.models.DateRange] {
override def write(m: org.company.app.models.DateRange): JsValue = {
JsObject(Tuple2("start", m.start), Tuple2("end", m.end))
}
};
object ItemJsWriter extends JsWriter[org.company.app.models.Item] {
val nameWriter = org.company.app.models.Item.ItemNameJsWriter;
override def write(m: org.company.app.models.Item): JsValue = {
JsObject(Tuple2("id", m.id), Tuple2("name", nameWriter.write(m.name)))
}
};
object PhoneNumberJsWriter extends JsWriter[org.company.app.models.PhoneNumber] {
override def write(m: org.company.app.models.PhoneNumber): JsValue = {
JsString(m.value)
}
};
object UserJsWriter extends JsWriter[org.company.app.models.User] {
override def write(m: org.company.app.models.User): JsValue = {
JsObject(Tuple2("id", m.id), Tuple2("username", JsString(m.username)), Tuple2("email", JsString(m.email)), Tuple2("items", m.items), Tuple2("lastPurchase", JsOption(m.lastPurchase)))
}
};
object VehicleJsWriter extends JsWriter[org.company.app.models.Vehicle] {
override def write(m: org.company.app.models.Vehicle): JsValue = {
m match {
case (c @ (_: org.company.app.models.Car)) => CarJsWriter.write(c)
case (c @ (_: org.company.app.models.Truck)) => TruckJsWriter.write(c)
case (c @ (_: org.company.app.models.Boat)) => BoatJsWriter.write(c)
}
}
};
object CarJsWriter extends JsWriter[org.company.app.models.Car] {
override def write(m: org.company.app.models.Car): JsValue = {
JsObject(Tuple2("seats", m.seats), Tuple2("vehicleType", "car"))
}
};
object TruckJsWriter extends JsWriter[org.company.app.models.Truck] {
override def write(m: org.company.app.models.Truck): JsValue = {
JsObject(Tuple2("space", m.space), Tuple2("vehicleType", "truck"))
}
};
object BoatJsWriter extends JsWriter[org.company.app.models.Boat] {
override def write(m: org.company.app.models.Boat): JsValue = {
JsObject(Tuple2("seats", m.seats), Tuple2("vehicleType", "boat"))
}
}
}
} | Agrosis/jcg | examples/src/main/scala/json/writers/GenJsWriters.scala | Scala | mit | 3,142 |
package scala.pickling.`null`.binary
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, binary._
class D
final class E
case class C(val x: String, val y: Int, val d: D, val e: E)
class NullBinaryTest extends FunSuite {
test("main") {
val c = C(null, 0, null, null)
val pickle = c.pickle
assert(pickle.value.mkString("[", ",", "]") === "[0,0,0,28,115,99,97,108,97,46,112,105,99,107,108,105,110,103,46,110,117,108,108,46,98,105,110,97,114,121,46,67,-2,0,0,0,0,-2,-2]")
assert(pickle.unpickle[C].toString === c.toString)
}
}
| beni55/pickling | core/src/test/scala/pickling/run/null-binary.scala | Scala | bsd-3-clause | 576 |
package org.openurp.edu.eams.teach.election.service.event
import org.beangle.commons.event.BusinessEvent
import org.openurp.edu.teach.lesson.CourseTake
import ElectCourseEvent._
object ElectCourseEvent {
def create(source: CourseTake): ElectCourseEvent = new ElectCourseEvent(source)
}
@SerialVersionUID(2721467858671088410L)
class ElectCourseEvent(source: CourseTake) extends BusinessEvent(source) {
override def getSource(): CourseTake = source.asInstanceOf[CourseTake]
}
| openurp/edu-eams-webapp | election/src/main/scala/org/openurp/edu/eams/teach/election/service/event/ElectCourseEvent.scala | Scala | gpl-3.0 | 485 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import org.scalacheck._
import Prop._
import TaskGen._
object ExecuteSpec extends Properties("Execute") {
val iGen = Arbitrary.arbInt.arbitrary
property("evaluates simple task") = forAll(iGen, MaxWorkersGen) { (i: Int, workers: Int) =>
("Workers: " + workers) |:
checkResult(tryRun(task(i), false, workers), i)
}
// no direct dependencies currently
/*property("evaluates simple static graph") = forAll(iGen, MaxWorkersGen) { (i: Int, workers: Int) =>
("Workers: " + workers) |:
{
def result = tryRun(Task(i) dependsOn(task(false),task("a")), false, workers)
checkResult(result, i)
}
}*/
property("evaluates simple mapped task") = forAll(iGen, MaxTasksGen, MaxWorkersGen) {
(i: Int, times: Int, workers: Int) =>
("Workers: " + workers) |: ("Value: " + i) |: ("Times: " + times) |: {
def result = tryRun(task(i).map(_ * times), false, workers)
checkResult(result, i * times)
}
}
property("evaluates chained mapped task") = forAllNoShrink(iGen, MaxTasksGen, MaxWorkersGen) {
(i: Int, times: Int, workers: Int) =>
("Workers: " + workers) |: ("Value: " + i) |: ("Times: " + times) |: {
val initial = task(0) map (identity[Int])
def t = (0 until times).foldLeft(initial)((t, ignore) => t.map(_ + i))
checkResult(tryRun(t, false, workers), i * times)
}
}
property("evaluates simple bind") = forAll(iGen, MaxTasksGen, MaxWorkersGen) {
(i: Int, times: Int, workers: Int) =>
("Workers: " + workers) |: ("Value: " + i) |: ("Times: " + times) |: {
def result = tryRun(task(i).flatMap(x => task(x * times)), false, workers)
checkResult(result, i * times)
}
}
}
| xuwei-k/xsbt | tasks-standard/src/test/scala/Execute.scala | Scala | apache-2.0 | 1,859 |
package org.pico.cuckoo.filter
import java.lang.{Integer => JInteger}
import org.pico.hash.{Hashable2, Hash64, Hashable}
import org.pico.hash.syntax._
import org.pico.twiddle.Bits
import org.pico.twiddle.instances._
import org.pico.twiddle.syntax.arrayIndexed._
import org.pico.twiddle.syntax.anyVal._
import org.pico.twiddle.syntax.fixedInt._
import scala.annotation.tailrec
import scala.util.Random
class CuckooFilter(fingerprintsPerBucket: Int, fingerprintBits: Bits, maxNumKicks: Int = 5, totalBuckets: Int = 128) {
require(fingerprintBits > Bits(0))
require(maxNumKicks > 0)
require(fingerprintsPerBucket > 0)
type DI = DummyImplicit
private val bucketIndexBits = Bits(32 - JInteger.numberOfLeadingZeros(fingerprintsPerBucket))
private val bucketBits = fingerprintBits * fingerprintsPerBucket + bucketIndexBits
private val buffer = new Array[Byte]((bucketBits * totalBuckets) /+ bitSize[Byte])
def bucketBitIndex(index: Long): Bits = bucketBits * index
def bucketBitIndex(hash: Hash64)(implicit ev: DummyImplicit): Bits = {
bucketBitIndex((hash.value & 0x7fffffffffffffffL) % totalBuckets)
}
def fingerprintsInBucketAt(bucketBitIndex: Bits): Int = buffer.unsigned(bucketBitIndex, bucketIndexBits).toInt
def fingerprintsInBucketAt(bucketBitIndex: Bits, value: Long): Unit = {
buffer.update(bucketBitIndex, bucketIndexBits, value)
}
def setFingerprint(bucketBitIndex: Bits, fingerprintIndex: Int, fingerprint: Fingerprint): Unit = {
buffer.update(
bucketBitIndex + bucketIndexBits + fingerprintBits * fingerprintIndex, fingerprintBits, fingerprint.value)
}
def getFingerprint(bucketBitIndex: Bits, fingerprintIndex: Int): Fingerprint = {
Fingerprint(
buffer.unsigned(bucketBitIndex + bucketIndexBits + fingerprintBits * fingerprintIndex, fingerprintBits))
}
def bucketBits(bucketBitIndex: Bits): String = buffer.bitsString(bucketBitIndex, bucketBits)
def removeFingerprintFromBucketForHash(bucketBitIndex: Bits, f: Fingerprint): Boolean = {
val fingerprints = fingerprintsInBucketAt(bucketBitIndex)
val index = fingerprintIndex(bucketBitIndex, f)
if (index != -1) {
setFingerprint(bucketBitIndex, index, getFingerprint(bucketBitIndex, fingerprints - 1))
setFingerprint(bucketBitIndex, fingerprints - 1, Fingerprint(0))
true
} else {
false
}
}
def fingerprintIndex(bucketBitIndex: Bits, fingerprint: Fingerprint): Int = {
val fingerprints = fingerprintsInBucketAt(bucketBitIndex)
@tailrec def go(index: Int): Int = {
if (index < fingerprints) {
val f = getFingerprint(bucketBitIndex, index)
if (f == fingerprint) {
index
} else {
go(index + 1)
}
} else {
-1
}
}
go(0)
}
def fingerprintIsInBucket(bucketBitIndex: Bits, fingerprint: Fingerprint): Boolean = {
fingerprintIndex(bucketBitIndex, fingerprint) != -1
}
def fingerprint[A: Hashable2](a: A): Fingerprint = Fingerprint(a.hashed2.value, fingerprintBits)
def addToBucket(bucketBitIndex: Bits, f: Fingerprint): Boolean = {
val fingerprints = fingerprintsInBucketAt(bucketBitIndex)
if (fingerprints < fingerprintsPerBucket) {
setFingerprint(bucketBitIndex, fingerprints, f)
fingerprintsInBucketAt(bucketBitIndex, fingerprints + 1)
true
} else {
false
}
}
def swapRandomBucketEntry(bucketBitIndex: Bits, f: Fingerprint): Fingerprint = {
val fingerprints = fingerprintsInBucketAt(bucketBitIndex)
if (fingerprints > 0) {
val candidateIndex = Random.nextInt(fingerprints)
val candidate = getFingerprint(bucketBitIndex, candidateIndex)
setFingerprint(bucketBitIndex, candidateIndex, f)
candidate
} else {
f
}
}
final def insert[A: Hashable: Hashable2](value: A)(implicit ev0: Hashable[Long], ev1: Hashable[Fingerprint]): Boolean = {
var f = fingerprint(value)
val i1 = value.hashed
val i2 = i1 ^ f.hashed
addToBucket(bucketBitIndex(i1), f) || addToBucket(bucketBitIndex(i2), f) || {
// must relocate existing items
var i = if (Random.nextBoolean()) i1 else i2
for (n <- 0 until maxNumKicks) {
f = swapRandomBucketEntry(bucketBitIndex(i), f)
i = i ^ f.hashed
if (addToBucket(bucketBitIndex(i), f)) {
return true
}
}
false
}
}
final def lookup[A: Hashable: Hashable2](value: A)(implicit ev0: Hashable[Long], ev1: Hashable[Fingerprint]): Boolean = {
val f = fingerprint(value)
val i1 = value.hashed
val i2 = i1 ^ f.hashed
fingerprintIsInBucket(bucketBitIndex(i1), f) || fingerprintIsInBucket(bucketBitIndex(i2), f)
}
final def delete[A: Hashable: Hashable2](value: A)(implicit ev0: Hashable[Long], ev1: Hashable[Fingerprint]): Boolean = {
val f = fingerprint(value)
val i1 = value.hashed
val i2 = i1 ^ f.hashed
removeFingerprintFromBucketForHash(bucketBitIndex(i1), f) || removeFingerprintFromBucketForHash(bucketBitIndex(i2), f)
}
}
| newhoggy/pico-cuckoo-filter | pico-cuckoo-filter/src/main/scala/org/pico/cuckoo/filter/CuckooFilter.scala | Scala | bsd-3-clause | 5,066 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
package com.intel.analytics.zoo.serving
import java.util
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.github.fppt.jedismock.RedisServer
import com.intel.analytics.zoo.serving.http.{PredictionInputMessage, _}
import com.intel.analytics.zoo.serving.utils.Conventions
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import redis.clients.jedis.Jedis
import scala.concurrent.{Await, ExecutionContextExecutor}
import scala.util.Random
class FrontendActorsSpec extends FlatSpec with Matchers with BeforeAndAfter with Supportive {
val random = new Random()
val redisHost = "localhost"
val redisPort = random.nextInt(100) + 10000
var redisServer: RedisServer = _
var jedis: Jedis = _
implicit var system: ActorSystem = _
implicit var materializer: ActorMaterializer = _
implicit var executionContext: ExecutionContextExecutor = _
implicit var timeout: Timeout = _
val redisInputQueue = Conventions.SERVING_STREAM_DEFAULT_NAME
val redisOutputQueue = Conventions.RESULT_PREFIX + Conventions.SERVING_STREAM_DEFAULT_NAME + ":"
val input1 = BytesPredictionInput("aW1hZ2UgYnl0ZXM=")
val input2 = BytesPredictionInput("aW1hZ2UgYnl0ZXM=")
val input3 = BytesPredictionInput("aW1hZ2UgYnl0ZXM=")
val inputMessage1 = PredictionInputMessage(Seq(input1, input2))
val inputMessage2 = PredictionInputMessage(input3)
val flushMessage = PredictionInputFlushMessage()
before {
redisServer = RedisServer.newRedisServer(redisPort)
redisServer.start()
jedis = new Jedis(redisHost, redisPort)
system = ActorSystem("zoo-serving-frontend-system")
materializer = ActorMaterializer()
executionContext = system.dispatcher
timeout = Timeout(10, TimeUnit.SECONDS)
}
after {
redisServer.stop()
system.terminate()
}
"redisServer" should "works well" in {
redisServer shouldNot be (null)
redisServer.getBindPort should be (redisPort)
jedis shouldNot be (null)
}
"actors" should "works well" in {
val redisPutterName = s"redis-putter"
val redisPutter = timing(s"$redisPutterName initialized.")() {
val redisPutterProps = Props(new RedisPutActor(redisHost, redisPort,
redisInputQueue, redisOutputQueue, 0, 56, false, null, null))
system.actorOf(redisPutterProps, name = redisPutterName)
}
val redisGetterName = s"redis-getter"
val redisGetter = timing(s"$redisGetterName initialized.")() {
val redisGetterProps = Props(new RedisGetActor(redisHost, redisPort,
redisInputQueue, redisOutputQueue, false, null, null))
system.actorOf(redisGetterProps, name = redisGetterName)
}
val querierNum = 1
val querierQueue = timing(s"queriers initialized.")() {
val querierQueue = new LinkedBlockingQueue[ActorRef](querierNum)
val querierProps = Props(new QueryActor(redisGetter))
List.range(0, querierNum).map(index => {
val querierName = s"querier-$index"
val querier = system.actorOf(querierProps, name = querierName)
querierQueue.put(querier)
})
querierQueue
}
redisPutter shouldNot be (null)
redisGetter shouldNot be (null)
querierQueue.size() should be (1)
redisPutter ! inputMessage1
redisPutter ! inputMessage2
// redisPutter ! flushMessage
// mock the cluster serving doing stuff
mockClusterServing(inputMessage1, inputMessage2)
List(inputMessage1, inputMessage2).foreach(message => {
val inputs = message.inputs
val ids = inputs.map(_.getId())
val queryMessage = PredictionQueryMessage(ids)
val querier = silent("querier take")() {
querierQueue.take()
}
val result = timing(s"query message wait for key $ids")() {
Await.result(querier ? queryMessage, timeout.duration)
.asInstanceOf[Seq[(String, util.Map[String, String])]]
}
silent("querier back")() {
querierQueue.offer(querier)
}
println(result)
result.size should be (message.inputs.size)
})
}
def mockClusterServing(messages: PredictionInputMessage*): Any = {
messages.foreach(message => {
val items = message.inputs
items.foreach(item => {
val key = s"${redisOutputQueue}${item.getId()}"
val value = new util.HashMap[String, String]()
value.put("value", "mock-result")
println(key, value)
jedis.hmset(key, value)
})
})
}
}
*/
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/serving/FrontendActorsSpec.scala | Scala | apache-2.0 | 5,170 |
package dotty
package tools
package vulpix
import scala.language.unsafeNulls
import java.io.{File => JFile, IOException, PrintStream, ByteArrayOutputStream}
import java.lang.System.{lineSeparator => EOL}
import java.net.URL
import java.nio.file.StandardCopyOption.REPLACE_EXISTING
import java.nio.file.{Files, NoSuchFileException, Path, Paths}
import java.nio.charset.{Charset, StandardCharsets}
import java.text.SimpleDateFormat
import java.util.{HashMap, Timer, TimerTask}
import java.util.concurrent.{TimeUnit, TimeoutException, Executors => JExecutors}
import scala.collection.mutable
import scala.io.{Codec, Source}
import scala.jdk.CollectionConverters.*
import scala.util.{Random, Try, Failure => TryFailure, Success => TrySuccess, Using}
import scala.util.control.NonFatal
import scala.util.matching.Regex
import scala.collection.mutable.ListBuffer
import dotc.{Compiler, Driver}
import dotc.core.Contexts.*
import dotc.decompiler
import dotc.report
import dotc.interfaces.Diagnostic.ERROR
import dotc.reporting.{Reporter, TestReporter}
import dotc.reporting.Diagnostic
import dotc.config.Config
import dotc.util.{DiffUtil, SourceFile, SourcePosition, Spans, NoSourcePosition}
import io.AbstractFile
import dotty.tools.vulpix.TestConfiguration.defaultOptions
/** A parallel testing suite whose goal is to integrate nicely with JUnit
*
* This trait can be mixed in to offer parallel testing to compile runs. When
* using this, you should be running your JUnit tests **sequentially**, as the
* test suite itself runs with a high level of concurrency.
*/
trait ParallelTesting extends RunnerOrchestration { self =>
import ParallelTesting._
/** If the running environment supports an interactive terminal, each `Test`
* will be run with a progress bar and real time feedback
*/
def isInteractive: Boolean
/** A list of strings which is used to filter which tests to run, if `Nil` will run
* all tests. All absolute paths that contain any of the substrings in `testFilter`
* will be run
*/
def testFilter: List[String]
/** Tests should override the checkfiles with the current output */
def updateCheckFiles: Boolean
/** A test source whose files or directory of files is to be compiled
* in a specific way defined by the `Test`
*/
sealed trait TestSource { self =>
def name: String
def outDir: JFile
def flags: TestFlags
def sourceFiles: Array[JFile]
def runClassPath: String = outDir.getPath + JFile.pathSeparator + flags.runClassPath
def title: String = self match {
case self: JointCompilationSource =>
if (self.files.length > 1) name
else self.files.head.getPath
case self: SeparateCompilationSource =>
self.dir.getPath
}
/** Adds the flags specified in `newFlags0` if they do not already exist */
def withFlags(newFlags0: String*) = {
val newFlags = newFlags0.toArray
if (!flags.options.containsSlice(newFlags)) self match {
case self: JointCompilationSource =>
self.copy(flags = flags.and(newFlags:_*))
case self: SeparateCompilationSource =>
self.copy(flags = flags.and(newFlags:_*))
}
else self
}
def withoutFlags(flags1: String*): TestSource = self match {
case self: JointCompilationSource =>
self.copy(flags = flags.without(flags1: _*))
case self: SeparateCompilationSource =>
self.copy(flags = flags.without(flags1: _*))
}
/** Generate the instructions to redo the test from the command line */
def buildInstructions(errors: Int, warnings: Int): String = {
val sb = new StringBuilder
val maxLen = 80
var lineLen = 0
val delimiter = " "
sb.append(
s"""|
|Test '$title' compiled with $errors error(s) and $warnings warning(s),
|the test can be reproduced by running from SBT (prefix it with ./bin/ if you
|want to run from the command line):""".stripMargin
)
sb.append("\\n\\nscalac ")
flags.all.foreach { arg =>
if (lineLen > maxLen) {
sb.append(delimiter)
lineLen = 4
}
sb.append(arg)
lineLen += arg.length
sb += ' '
}
self match {
case source: JointCompilationSource => {
source.sourceFiles.map(_.getPath).foreach { path =>
sb.append(delimiter)
sb += '\\''
sb.append(path)
sb += '\\''
sb += ' '
}
sb.toString + "\\n\\n"
}
case self: SeparateCompilationSource => { // TODO: this won't work when using other versions of compiler
val command = sb.toString
val fsb = new StringBuilder(command)
self.compilationGroups.foreach { (_, files) =>
files.map(_.getPath).foreach { path =>
fsb.append(delimiter)
lineLen = 8
fsb += '\\''
fsb.append(path)
fsb += '\\''
fsb += ' '
}
fsb.append("\\n\\n")
fsb.append(command)
}
fsb.toString + "\\n\\n"
}
}
}
}
/** A group of files that may all be compiled together, with the same flags
* and output directory
*/
private case class JointCompilationSource(
name: String,
files: Array[JFile],
flags: TestFlags,
outDir: JFile,
fromTasty: Boolean = false,
decompilation: Boolean = false
) extends TestSource {
def sourceFiles: Array[JFile] = files.filter(isSourceFile)
override def toString() = outDir.toString
}
/** A test source whose files will be compiled separately according to their
* suffix `_X`
*/
case class SeparateCompilationSource(
name: String,
dir: JFile,
flags: TestFlags,
outDir: JFile
) extends TestSource {
case class Group(ordinal: Int, compiler: String, release: String)
lazy val compilationGroups: List[(Group, Array[JFile])] =
val Release = """r([\\d\\.]+)""".r
val Compiler = """c([\\d\\.]+)""".r
val Ordinal = """(\\d+)""".r
def groupFor(file: JFile): Group =
val groupSuffix = file.getName.dropWhile(_ != '_').stripSuffix(".scala").stripSuffix(".java")
val groupSuffixParts = groupSuffix.split("_")
val ordinal = groupSuffixParts.collectFirst { case Ordinal(n) => n.toInt }.getOrElse(Int.MinValue)
val release = groupSuffixParts.collectFirst { case Release(r) => r }.getOrElse("")
val compiler = groupSuffixParts.collectFirst { case Compiler(c) => c }.getOrElse("")
Group(ordinal, compiler, release)
dir.listFiles
.filter(isSourceFile)
.groupBy(groupFor)
.toList
.sortBy { (g, _) => (g.ordinal, g.compiler, g.release) }
.map { (g, f) => (g, f.sorted) }
def sourceFiles = compilationGroups.map(_._2).flatten.toArray
}
private trait CompilationLogic { this: Test =>
def suppressErrors = false
/**
* Compiles the test source.
* @return The reporters containing the results of all the compilation runs for this test source.
*/
private final def compileTestSource(testSource: TestSource): Try[List[TestReporter]] =
Try(testSource match {
case testSource @ JointCompilationSource(name, files, flags, outDir, fromTasty, decompilation) =>
val reporter =
if (fromTasty) compileFromTasty(flags, suppressErrors, outDir)
else compile(testSource.sourceFiles, flags, suppressErrors, outDir)
List(reporter)
case testSource @ SeparateCompilationSource(_, dir, flags, outDir) =>
testSource.compilationGroups.map { (group, files) =>
val flags1 = if group.release.isEmpty then flags else flags.and("-scala-output-version", group.release)
if group.compiler.isEmpty then
compile(files, flags1, suppressErrors, outDir)
else
compileWithOtherCompiler(group.compiler, files, flags1, outDir)
}
})
final def countErrorsAndWarnings(reporters: Seq[TestReporter]): (Int, Int) =
reporters.foldLeft((0, 0)) { case ((err, warn), r) => (err + r.errorCount, warn + r.warningCount) }
final def countErrors (reporters: Seq[TestReporter]) = countErrorsAndWarnings(reporters)._1
final def countWarnings(reporters: Seq[TestReporter]) = countErrorsAndWarnings(reporters)._2
final def reporterFailed(r: TestReporter) = r.compilerCrashed || r.errorCount > 0
/**
* For a given test source, returns a check file against which the result of the test run
* should be compared. Is used by implementations of this trait.
*/
final def checkFile(testSource: TestSource): Option[JFile] = (testSource match {
case ts: JointCompilationSource =>
ts.files.collectFirst {
case f if !f.isDirectory =>
new JFile(f.getPath.replaceFirst("\\\\.(scala|java)$", ".check"))
}
case ts: SeparateCompilationSource =>
Option(new JFile(ts.dir.getPath + ".check"))
}).filter(_.exists)
/**
* Checks if the given actual lines are the same as the ones in the check file.
* If not, fails the test.
*/
final def diffTest(testSource: TestSource, checkFile: JFile, actual: List[String], reporters: Seq[TestReporter], logger: LoggedRunnable) = {
for (msg <- FileDiff.check(testSource.title, actual, checkFile.getPath)) {
onFailure(testSource, reporters, logger, Some(msg))
if (updateCheckFiles) {
FileDiff.dump(checkFile.toPath.toString, actual)
echo("Updated checkfile: " + checkFile.getPath)
} else {
val outFile = checkFile.toPath.resolveSibling(s"${checkFile.toPath.getFileName}.out").toString
FileDiff.dump(outFile, actual)
echo(FileDiff.diffMessage(checkFile.getPath, outFile))
}
}
}
/** Entry point: runs the test */
final def encapsulatedCompilation(testSource: TestSource) = new LoggedRunnable { self =>
def checkTestSource(): Unit = tryCompile(testSource) {
val reportersOrCrash = compileTestSource(testSource)
onComplete(testSource, reportersOrCrash, self)
registerCompletion()
}
}
/** This callback is executed once the compilation of this test source finished */
private final def onComplete(testSource: TestSource, reportersOrCrash: Try[Seq[TestReporter]], logger: LoggedRunnable): Unit =
reportersOrCrash match {
case TryFailure(exn) => onFailure(testSource, Nil, logger, Some(s"Fatal compiler crash when compiling: ${testSource.title}:\\n${exn.getMessage}${exn.getStackTrace.map("\\n\\tat " + _).mkString}"))
case TrySuccess(reporters) => maybeFailureMessage(testSource, reporters) match {
case Some(msg) => onFailure(testSource, reporters, logger, Option(msg).filter(_.nonEmpty))
case None => onSuccess(testSource, reporters, logger)
}
}
/**
* Based on the reporters obtained after the compilation, determines if this test has failed.
* If it has, returns a Some with an error message. Otherwise, returns None.
* As the conditions of failure are different for different test types, this method should be
* overridden by the concrete implementations of this trait.
*/
def maybeFailureMessage(testSource: TestSource, reporters: Seq[TestReporter]): Option[String] =
if (reporters.exists(reporterFailed)) Some(s"Compilation failed for: '${testSource.title}'")
else None
/**
* If the test has compiled successfully, this callback will be called. You can still fail the test from this callback.
*/
def onSuccess(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable): Unit = ()
/**
* If the test failed to compile or the compiler crashed, this callback will be called.
*/
def onFailure(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable, message: Option[String]): Unit = {
message.foreach(echo)
reporters.filter(reporterFailed).foreach(logger.logReporterContents)
logBuildInstructions(testSource, reporters)
failTestSource(testSource)
}
}
/** Each `Test` takes the `testSources` and performs the compilation and assertions
* according to the implementing class "neg", "run" or "pos".
*/
private class Test(testSources: List[TestSource], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit val summaryReport: SummaryReporting) extends CompilationLogic { test =>
import summaryReport._
protected final val realStdout = System.out
protected final val realStderr = System.err
/** A runnable that logs its contents in a buffer */
trait LoggedRunnable extends Runnable {
/** Instances of `LoggedRunnable` implement this method instead of the
* `run` method
*/
def checkTestSource(): Unit
private val logBuffer = mutable.ArrayBuffer.empty[String]
def log(msg: String): Unit = logBuffer.append(msg)
def logReporterContents(reporter: TestReporter): Unit =
reporter.messages.foreach(log)
def echo(msg: String): Unit = {
log(msg)
test.echo(msg)
}
final def run(): Unit = {
checkTestSource()
summaryReport.echoToLog(logBuffer.iterator)
}
}
/** All testSources left after filtering out */
private val filteredSources =
if (testFilter.isEmpty) testSources
else testSources.filter {
case JointCompilationSource(_, files, _, _, _, _) =>
testFilter.exists(filter => files.exists(file => file.getPath.contains(filter)))
case SeparateCompilationSource(_, dir, _, _) =>
testFilter.exists(dir.getPath.contains)
}
/** Total amount of test sources being compiled by this test */
val sourceCount = filteredSources.length
private var _testSourcesCompleted = 0
private def testSourcesCompleted: Int = _testSourcesCompleted
/** Complete the current compilation with the amount of errors encountered */
protected final def registerCompletion() = synchronized {
_testSourcesCompleted += 1
}
sealed trait Failure
case class JavaCompilationFailure(reason: String) extends Failure
case class TimeoutFailure(title: String) extends Failure
case object Generic extends Failure
private var _failures = Set.empty[Failure]
private var _failureCount = 0
/** Fail the current test */
protected final def fail(failure: Failure = Generic): Unit = synchronized {
_failures = _failures + failure
_failureCount = _failureCount + 1
}
def didFail: Boolean = _failureCount != 0
/** A set of the different failures */
def failureReasons: Set[Failure] = _failures
/** Number of failed tests */
def failureCount: Int = _failureCount
protected def logBuildInstructions(testSource: TestSource, reporters: Seq[TestReporter]) = {
val (errCount, warnCount) = countErrorsAndWarnings(reporters)
val errorMsg = testSource.buildInstructions(errCount, warnCount)
addFailureInstruction(errorMsg)
}
/** Instructions on how to reproduce failed test source compilations */
private val reproduceInstructions = mutable.ArrayBuffer.empty[String]
protected final def addFailureInstruction(ins: String): Unit =
synchronized { reproduceInstructions.append(ins) }
/** The test sources that failed according to the implementing subclass */
private val failedTestSources = mutable.ArrayBuffer.empty[String]
protected final def failTestSource(testSource: TestSource, reason: Failure = Generic) = synchronized {
val extra = reason match {
case TimeoutFailure(title) => s", test '$title' timed out"
case JavaCompilationFailure(msg) => s", java test sources failed to compile with: \\n$msg"
case Generic => ""
}
failedTestSources.append(testSource.title + s" failed" + extra)
fail(reason)
}
/** Prints to `System.err` if we're not suppressing all output */
protected def echo(msg: String): Unit = if (!suppressAllOutput) {
// pad right so that output is at least as large as progress bar line
val paddingRight = " " * math.max(0, 80 - msg.length)
realStderr.println(msg + paddingRight)
}
/** Print a progress bar for the current `Test` */
private def updateProgressMonitor(start: Long): Unit = {
val tCompiled = testSourcesCompleted
if (tCompiled < sourceCount) {
val timestamp = (System.currentTimeMillis - start) / 1000
val progress = (tCompiled.toDouble / sourceCount * 40).toInt
realStdout.print(
"[" + ("=" * (math.max(progress - 1, 0))) +
(if (progress > 0) ">" else "") +
(" " * (39 - progress)) +
s"] completed ($tCompiled/$sourceCount, $failureCount failed, ${timestamp}s)\\r"
)
}
}
/** Wrapper function to make sure that the compiler itself did not crash -
* if it did, the test should automatically fail.
*/
protected def tryCompile(testSource: TestSource)(op: => Unit): Unit =
try op
catch
case e: Throwable =>
// if an exception is thrown during compilation, the complete test
// run should fail
failTestSource(testSource)
e.printStackTrace()
registerCompletion()
throw e
protected def compile(files0: Array[JFile], flags0: TestFlags, suppressErrors: Boolean, targetDir: JFile): TestReporter = {
def flattenFiles(f: JFile): Array[JFile] =
if (f.isDirectory) f.listFiles.flatMap(flattenFiles)
else Array(f)
val files: Array[JFile] = files0.flatMap(flattenFiles)
val flags = flags0
.and(toolArgsFor(files.toList.map(_.toPath), getCharsetFromEncodingOpt(flags0)): _*)
.and("-d", targetDir.getPath)
.withClasspath(targetDir.getPath)
def compileWithJavac(fs: Array[String]) = if (fs.nonEmpty) {
val fullArgs = Array(
"javac",
"-encoding", StandardCharsets.UTF_8.name,
) ++ flags.javacFlags ++ fs
val process = Runtime.getRuntime.exec(fullArgs)
val output = Source.fromInputStream(process.getErrorStream).mkString
if (process.waitFor() != 0) Some(output)
else None
} else None
val reporter =
TestReporter.reporter(realStdout, logLevel =
if (suppressErrors || suppressAllOutput) ERROR + 1 else ERROR)
val driver =
if (times == 1) new Driver
else new Driver {
private def ntimes(n: Int)(op: Int => Reporter): Reporter =
(1 to n).foldLeft(emptyReporter) ((_, i) => op(i))
override def doCompile(comp: Compiler, files: List[AbstractFile])(using Context) =
ntimes(times) { run =>
val start = System.nanoTime()
val rep = super.doCompile(comp, files)
report.echo(s"\\ntime run $run: ${(System.nanoTime - start) / 1000000}ms")
rep
}
}
val allArgs = flags.all
// If a test contains a Java file that cannot be parsed by Dotty's Java source parser, its
// name must contain the string "JAVA_ONLY".
val dottyFiles = files.filterNot(_.getName.contains("JAVA_ONLY")).map(_.getPath)
driver.process(allArgs ++ dottyFiles, reporter = reporter)
val javaFiles = files.filter(_.getName.endsWith(".java")).map(_.getPath)
val javaErrors = compileWithJavac(javaFiles)
if (javaErrors.isDefined) {
echo(s"\\njava compilation failed: \\n${ javaErrors.get }")
fail(failure = JavaCompilationFailure(javaErrors.get))
}
reporter
}
private def parseErrors(errorsText: String, compilerVersion: String, pageWidth: Int) =
val errorPattern = """^.*Error: (.*\\.scala):(\\d+):(\\d+).*""".r
val brokenClassPattern = """^class file (.*) is broken.*""".r
val warnPattern = """^.*Warning: (.*\\.scala):(\\d+):(\\d+).*""".r
val summaryPattern = """\\d+ (?:warning|error)s? found""".r
val indent = " "
var diagnostics = List.empty[Diagnostic.Error]
def barLine(start: Boolean) = s"$indent${if start then "╭" else "╰"}${"┄" * pageWidth}${if start then "╮" else "╯"}\\n"
def errorLine(line: String) = s"$indent┆${String.format(s"%-${pageWidth}s", stripAnsi(line))}┆\\n"
def stripAnsi(str: String): String = str.replaceAll("\\u001b\\\\[\\\\d+m", "")
def addToLast(str: String): Unit =
diagnostics match
case head :: tail =>
diagnostics = Diagnostic.Error(s"${head.msg.rawMessage}$str", head.pos) :: tail
case Nil =>
var inError = false
for line <- errorsText.linesIterator do
line match
case error @ warnPattern(filePath, line, column) =>
inError = false
case error @ errorPattern(filePath, line, column) =>
inError = true
val lineNum = line.toInt
val columnNum = column.toInt
val abstractFile = AbstractFile.getFile(filePath)
val sourceFile = SourceFile(abstractFile, Codec.UTF8)
val offset = sourceFile.lineToOffset(lineNum - 1) + columnNum - 1
val span = Spans.Span(offset)
val sourcePos = SourcePosition(sourceFile, span)
addToLast(barLine(start = false))
diagnostics ::= Diagnostic.Error(s"Compilation of $filePath with Scala $compilerVersion failed at line: $line, column: $column.\\nFull error output:\\n${barLine(start = true)}${errorLine(error)}", sourcePos)
case error @ brokenClassPattern(filePath) =>
inError = true
diagnostics ::= Diagnostic.Error(s"$error\\nFull error output:\\n${barLine(start = true)}${errorLine(error)}", NoSourcePosition)
case summaryPattern() => // Ignored
case line if inError => addToLast(errorLine(line))
case _ =>
addToLast(barLine(start = false))
diagnostics.reverse
protected def compileWithOtherCompiler(compiler: String, files: Array[JFile], flags: TestFlags, targetDir: JFile): TestReporter =
def artifactClasspath(organizationName: String, moduleName: String) =
import coursier._
val dep = Dependency(
Module(
Organization(organizationName),
ModuleName(moduleName),
attributes = Map.empty
),
version = compiler
)
Fetch()
.addDependencies(dep)
.run()
.mkString(JFile.pathSeparator)
val stdlibClasspath = artifactClasspath("org.scala-lang", "scala3-library_3")
val scalacClasspath = artifactClasspath("org.scala-lang", "scala3-compiler_3")
val pageWidth = TestConfiguration.pageWidth - 20
val flags1 = flags.copy(defaultClassPath = stdlibClasspath)
.withClasspath(targetDir.getPath)
.and("-d", targetDir.getPath)
.and("-pagewidth", pageWidth.toString)
val scalacCommand = Array("java", "-cp", scalacClasspath, "dotty.tools.dotc.Main")
val command = scalacCommand ++ flags1.all ++ files.map(_.getAbsolutePath)
val process = Runtime.getRuntime.exec(command)
val reporter = TestReporter.reporter(realStdout, logLevel =
if (suppressErrors || suppressAllOutput) ERROR + 1 else ERROR)
val errorsText = Source.fromInputStream(process.getErrorStream).mkString
if process.waitFor() != 0 then
val diagnostics = parseErrors(errorsText, compiler, pageWidth)
diagnostics.foreach { diag =>
val context = (new ContextBase).initialCtx
reporter.report(diag)(using context)
}
reporter
protected def compileFromTasty(flags0: TestFlags, suppressErrors: Boolean, targetDir: JFile): TestReporter = {
val tastyOutput = new JFile(targetDir.getPath + "_from-tasty")
tastyOutput.mkdir()
val flags = flags0 and ("-d", tastyOutput.getPath) and "-from-tasty"
val classes = flattenFiles(targetDir).filter(isTastyFile).map(_.toString)
val reporter =
TestReporter.reporter(realStdout, logLevel =
if (suppressErrors || suppressAllOutput) ERROR + 1 else ERROR)
val driver = new Driver
driver.process(flags.all ++ classes, reporter = reporter)
reporter
}
private[ParallelTesting] def executeTestSuite(): this.type = {
assert(testSourcesCompleted == 0, "not allowed to re-use a `CompileRun`")
if (filteredSources.nonEmpty) {
val pool = threadLimit match {
case Some(i) => JExecutors.newWorkStealingPool(i)
case None => JExecutors.newWorkStealingPool()
}
val timer = new Timer()
val logProgress = isInteractive && !suppressAllOutput
val start = System.currentTimeMillis()
if (logProgress) {
val task = new TimerTask {
def run(): Unit = updateProgressMonitor(start)
}
timer.schedule(task, 100, 200)
}
val eventualResults = filteredSources.map { target =>
pool.submit(encapsulatedCompilation(target))
}
pool.shutdown()
if (!pool.awaitTermination(20, TimeUnit.MINUTES)) {
val remaining = new ListBuffer[TestSource]
filteredSources.lazyZip(eventualResults).foreach { (src, res) =>
if (!res.isDone)
remaining += src
}
pool.shutdownNow()
System.setOut(realStdout)
System.setErr(realStderr)
throw new TimeoutException(s"Compiling targets timed out, remaining targets: ${remaining.mkString(", ")}")
}
eventualResults.foreach { x =>
try x.get()
catch {
case ex: Exception =>
System.err.println(ex.getMessage)
ex.printStackTrace()
}
}
if (logProgress) {
timer.cancel()
val timestamp = (System.currentTimeMillis - start) / 1000
realStdout.println(
s"[=======================================] completed ($sourceCount/$sourceCount, $failureCount failed, ${timestamp}s)"
)
}
if (didFail) {
reportFailed()
failedTestSources.toSet.foreach(addFailedTest)
reproduceInstructions.iterator.foreach(addReproduceInstruction)
}
else reportPassed()
}
else echo {
testFilter match
case _ :: _ => s"""No files matched "${testFilter.mkString(",")}" in test"""
case _ => "No tests available under target - erroneous test?"
}
this
}
/** Returns all files in directory or the file if not a directory */
private def flattenFiles(f: JFile): Array[JFile] =
if (f.isDirectory) f.listFiles.flatMap(flattenFiles)
else Array(f)
}
private final class PosTest(testSources: List[TestSource], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit summaryReport: SummaryReporting)
extends Test(testSources, times, threadLimit, suppressAllOutput)
private final class RewriteTest(testSources: List[TestSource], checkFiles: Map[JFile, JFile], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit summaryReport: SummaryReporting)
extends Test(testSources, times, threadLimit, suppressAllOutput) {
private def verifyOutput(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable) = {
testSource.sourceFiles.foreach { file =>
if checkFiles.contains(file) then
val checkFile = checkFiles(file)
val actual = {
val source = Source.fromFile(file, StandardCharsets.UTF_8.name)
try source.getLines().toList
finally source.close()
}
diffTest(testSource, checkFile, actual, reporters, logger)
}
// check that the rewritten code compiles
new CompilationTest(testSource).checkCompile()
}
override def onSuccess(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable) =
verifyOutput(testSource, reporters, logger)
}
private final class RunTest(testSources: List[TestSource], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit summaryReport: SummaryReporting)
extends Test(testSources, times, threadLimit, suppressAllOutput) {
private var didAddNoRunWarning = false
private def addNoRunWarning() = if (!didAddNoRunWarning) {
didAddNoRunWarning = true
summaryReport.addStartingMessage {
"""|WARNING
|-------
|Run tests were only compiled, not run - this is due to the `dotty.tests.norun`
|property being set
|""".stripMargin
}
}
private def verifyOutput(checkFile: Option[JFile], dir: JFile, testSource: TestSource, warnings: Int, reporters: Seq[TestReporter], logger: LoggedRunnable) = {
if (Properties.testsNoRun) addNoRunWarning()
else runMain(testSource.runClassPath) match {
case Success(output) => checkFile match {
case Some(file) if file.exists => diffTest(testSource, file, output.linesIterator.toList, reporters, logger)
case _ =>
}
case Failure(output) =>
echo(s"Test '${testSource.title}' failed with output:")
echo(output)
failTestSource(testSource)
case Timeout =>
echo("failed because test " + testSource.title + " timed out")
failTestSource(testSource, TimeoutFailure(testSource.title))
}
}
override def onSuccess(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable) =
verifyOutput(checkFile(testSource), testSource.outDir, testSource, countWarnings(reporters), reporters, logger)
}
private final class NegTest(testSources: List[TestSource], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit summaryReport: SummaryReporting)
extends Test(testSources, times, threadLimit, suppressAllOutput) {
override def suppressErrors = true
override def maybeFailureMessage(testSource: TestSource, reporters: Seq[TestReporter]): Option[String] = {
def compilerCrashed = reporters.exists(_.compilerCrashed)
lazy val (errorMap, expectedErrors) = getErrorMapAndExpectedCount(testSource.sourceFiles.toIndexedSeq)
lazy val actualErrors = reporters.foldLeft(0)(_ + _.errorCount)
lazy val (expected, unexpected) = getMissingExpectedErrors(errorMap, reporters.iterator.flatMap(_.errors))
def hasMissingAnnotations = expected.nonEmpty || unexpected.nonEmpty
def showErrors = "-> following the errors:\\n" +
reporters.flatMap(_.allErrors.sortBy(_.pos.line).map(e => s"${e.pos.line + 1}: ${e.message}")).mkString(" at ", "\\n at ", "")
Option {
if compilerCrashed then s"Compiler crashed when compiling: ${testSource.title}"
else if actualErrors == 0 then s"\\nNo errors found when compiling neg test $testSource"
else if expectedErrors == 0 then s"\\nNo errors expected/defined in $testSource -- use // error or // nopos-error"
else if expectedErrors != actualErrors then
s"""|Wrong number of errors encountered when compiling $testSource
|expected: $expectedErrors, actual: $actualErrors
|${expected.mkString("Unfulfilled expectations:\\n", "\\n", "")}
|${unexpected.mkString("Unexpected errors:\\n", "\\n", "")}
|$showErrors
|""".stripMargin.trim.linesIterator.mkString("\\n", "\\n", "")
else if hasMissingAnnotations then s"\\nErrors found on incorrect row numbers when compiling $testSource\\n$showErrors"
else if !errorMap.isEmpty then s"\\nExpected error(s) have {<error position>=<unreported error>}: $errorMap"
else null
}
}
override def onSuccess(testSource: TestSource, reporters: Seq[TestReporter], logger: LoggedRunnable): Unit =
checkFile(testSource).foreach(diffTest(testSource, _, reporterOutputLines(reporters), reporters, logger))
def reporterOutputLines(reporters: Seq[TestReporter]): List[String] =
reporters.flatMap(_.consoleOutput.split("\\n")).toList
// In neg-tests we allow two types of error annotations,
// "nopos-error" which doesn't care about position and "error" which
// has to be annotated on the correct line number.
//
// We collect these in a map `"file:row" -> numberOfErrors`, for
// nopos errors we save them in `"file" -> numberOfNoPosErrors`
def getErrorMapAndExpectedCount(files: Seq[JFile]): (HashMap[String, Integer], Int) =
val comment = raw"//( *)(nopos-|anypos-)?error".r
val errorMap = new HashMap[String, Integer]()
var expectedErrors = 0
def bump(key: String): Unit =
errorMap.get(key) match
case null => errorMap.put(key, 1)
case n => errorMap.put(key, n+1)
expectedErrors += 1
files.filter(isSourceFile).foreach { file =>
Using(Source.fromFile(file, StandardCharsets.UTF_8.name)) { source =>
source.getLines.zipWithIndex.foreach { case (line, lineNbr) =>
comment.findAllMatchIn(line).foreach { m =>
m.group(2) match
case prefix if m.group(1).isEmpty =>
val what = Option(prefix).getOrElse("")
echo(s"Warning: ${file.getCanonicalPath}:${lineNbr}: found `//${what}error` but expected `// ${what}error`, skipping comment")
case "nopos-" => bump("nopos")
case "anypos-" => bump("anypos")
case _ => bump(s"${file.getPath}:${lineNbr+1}")
}
}
}.get
}
(errorMap, expectedErrors)
end getErrorMapAndExpectedCount
// return unfulfilled expected errors and unexpected diagnostics
def getMissingExpectedErrors(errorMap: HashMap[String, Integer], reporterErrors: Iterator[Diagnostic]): (List[String], List[String]) =
val unexpected, unpositioned = ListBuffer.empty[String]
// For some reason, absolute paths leak from the compiler itself...
def relativize(path: String): String = path.split(JFile.separatorChar).dropWhile(_ != "tests").mkString(JFile.separator)
def seenAt(key: String): Boolean =
errorMap.get(key) match
case null => false
case 1 => errorMap.remove(key) ; true
case n => errorMap.put(key, n - 1) ; true
def sawDiagnostic(d: Diagnostic): Unit =
d.pos.nonInlined match
case srcpos if srcpos.exists =>
val key = s"${relativize(srcpos.source.file.toString)}:${srcpos.line + 1}"
if !seenAt(key) then unexpected += key
case srcpos =>
if !seenAt("nopos") then unpositioned += relativize(srcpos.source.file.toString)
reporterErrors.foreach(sawDiagnostic)
errorMap.get("anypos") match
case n if n == unexpected.size => errorMap.remove("anypos") ; unexpected.clear()
case _ =>
(errorMap.asScala.keys.toList, (unexpected ++ unpositioned).toList)
end getMissingExpectedErrors
}
private final class NoCrashTest(testSources: List[TestSource], times: Int, threadLimit: Option[Int], suppressAllOutput: Boolean)(implicit summaryReport: SummaryReporting)
extends Test(testSources, times, threadLimit, suppressAllOutput) {
override def suppressErrors = true
override def maybeFailureMessage(testSource: TestSource, reporters: Seq[TestReporter]): Option[String] = None
}
/** The `CompilationTest` is the main interface to `ParallelTesting`, it
* can be instantiated via one of the following methods:
*
* - `compileFile`
* - `compileDir`
* - `compileList`
* - `compileFilesInDir`
* - `compileShallowFilesInDir`
*
* Each compilation test can then be turned into either a "pos", "neg" or
* "run" test:
*
* ```
* compileFile("tests/pos/i1103.scala", opts).pos()
* ```
*
* These tests can be customized before calling one of the execution
* methods, for instance:
*
* ```
* compileFile("tests/pos/i1103.scala", opts).times(2).verbose.pos()
* ```
*
* Which would compile `i1103.scala` twice with the verbose flag as a "pos"
* test.
*
* pos tests
* =========
* Pos tests verify that the compiler is able to compile the given
* `TestSource`s and that they generate no errors or exceptions during
* compilation
*
* neg tests
* =========
* Neg tests are expected to generate a certain amount of errors - but not
* crash the compiler. In each `.scala` file, you specify the line on which
* the error will be generated, e.g:
*
* ```
* val x: String = 1 // error
* ```
*
* if a line generates multiple errors, you need to annotate it multiple
* times. For a line that generates two errors:
*
* ```
* val y: String = { val y1: String = 1; 2 } // error // error
* ```
*
* Certain errors have no position, if you need to check these annotate the
* file anywhere with `// nopos-error`
*
* run tests
* =========
* Run tests are a superset of pos tests, they both verify compilation and
* that the compiler does not crash. In addition, run tests verify that the
* tests are able to run as expected.
*
* Run tests need to have the following form:
*
* ```
* object Test {
* def main(args: Array[String]): Unit = ()
* }
* ```
*
* This is because the runner instantiates the `Test` class and calls the
* main method.
*
* Other definitions are allowed in the same file, but the file needs to at
* least have the `Test` object with a `main` method.
*
* To verify output you may use `.check` files. These files should share the
* name of the file or directory that they are testing. For instance:
*
* ```none
* .
* └── tests
* ├── i1513.scala
* └── i1513.check
* ```
*
* If you are testing a directory under separate compilation, you would
* have:
*
* ```none
* .
* └── tests
* ├── myTestDir
* │ ├── T_1.scala
* │ ├── T_2.scala
* │ └── T_3.scala
* └── myTestDir.check
* ```
*
* In the above example, `i1513.scala` and one of the files `T_X.scala`
* would contain a `Test` object with a main method.
*
* Composing tests
* ===============
* Since this is a parallel test suite, it is essential to be able to
* compose tests to take advantage of the concurrency. This is done using
* the `+` function. This function will make sure that tests being combined
* are compatible according to the `require`s in `+`.
*/
final class CompilationTest private (
private[ParallelTesting] val targets: List[TestSource],
private[ParallelTesting] val times: Int,
private[ParallelTesting] val shouldDelete: Boolean,
private[ParallelTesting] val threadLimit: Option[Int],
private[ParallelTesting] val shouldFail: Boolean,
private[ParallelTesting] val shouldSuppressOutput: Boolean
) {
import org.junit.Assert.fail
def this(target: TestSource) =
this(List(target), 1, true, None, false, false)
def this(targets: List[TestSource]) =
this(targets, 1, true, None, false, false)
/** Creates a "pos" test run, which makes sure that all tests pass
* compilation without generating errors and that they do not crash the
* compiler
*/
def checkCompile()(implicit summaryReport: SummaryReporting): this.type = {
val test = new PosTest(targets, times, threadLimit, shouldFail || shouldSuppressOutput).executeTestSuite()
cleanup()
if (!shouldFail && test.didFail) {
fail(s"Expected no errors when compiling, failed for the following reason(s):\\n${ reasonsForFailure(test) }")
}
else if (shouldFail && !test.didFail) {
fail("Pos test should have failed, but didn't")
}
this
}
/** Creates a "neg" test run, which makes sure that each test generates the
* correct amount of errors at the correct positions. It also makes sure
* that none of these tests crash the compiler
*/
def checkExpectedErrors()(implicit summaryReport: SummaryReporting): this.type = {
val test = new NegTest(targets, times, threadLimit, shouldFail || shouldSuppressOutput).executeTestSuite()
cleanup()
if (shouldFail && !test.didFail) {
fail(s"Neg test shouldn't have failed, but did. Reasons:\\n${ reasonsForFailure(test) }")
}
else if (!shouldFail && test.didFail) {
fail("Neg test should have failed, but did not")
}
this
}
/** Creates a "fuzzy" test run, which makes sure that each test compiles (or not) without crashing */
def checkNoCrash()(implicit summaryReport: SummaryReporting): this.type = {
val test = new NoCrashTest(targets, times, threadLimit, shouldSuppressOutput).executeTestSuite()
cleanup()
if (test.didFail) {
fail("Fuzzy test shouldn't have crashed, but did")
}
this
}
/** Creates a "run" test run, which is a superset of "pos". In addition to
* making sure that all tests pass compilation and that they do not crash
* the compiler; it also makes sure that all tests can run with the
* expected output
*/
def checkRuns()(implicit summaryReport: SummaryReporting): this.type = {
val test = new RunTest(targets, times, threadLimit, shouldFail || shouldSuppressOutput).executeTestSuite()
cleanup()
if (!shouldFail && test.didFail) {
fail(s"Run test failed, but should not, reasons:\\n${ reasonsForFailure(test) }")
}
else if (shouldFail && !test.didFail) {
fail("Run test should have failed, but did not")
}
this
}
/** Tests `-rewrite`, which makes sure that the rewritten files still compile
* and agree with the expected result (if specified).
*
* Check files are only supported for joint compilation sources.
*/
def checkRewrites()(implicit summaryReport: SummaryReporting): this.type = {
// use the original check file, to simplify update of check files
var checkFileMap = Map.empty[JFile, JFile]
// copy source file to targets, as they will be changed
val copiedTargets = targets.map {
case target @ JointCompilationSource(_, files, _, outDir, _, _) =>
val files2 = files.map { f =>
val dest = copyToDir(outDir, f)
val checkFile = new JFile(f.getPath.replaceFirst("\\\\.scala$", ".check"))
if (checkFile.exists) checkFileMap = checkFileMap.updated(dest, checkFile)
dest
}
target.copy(files = files2)
case target @ SeparateCompilationSource(_, dir, _, outDir) =>
target.copy(dir = copyToDir(outDir, dir))
}
new RewriteTest(copiedTargets, checkFileMap, times, threadLimit, shouldFail || shouldSuppressOutput).executeTestSuite()
this
}
/** Deletes output directories and files */
private def cleanup(): this.type = {
if (shouldDelete) delete()
this
}
/** Extract `Failure` set and render from `Test` */
private def reasonsForFailure(test: Test): String = {
val failureReport =
if (test.failureCount == 0) ""
else s"\\n - encountered ${test.failureCount} test failures(s)"
failureReport + test.failureReasons.collect {
case test.TimeoutFailure(title) =>
s" - test '$title' timed out"
case test.JavaCompilationFailure(msg) =>
s" - java compilation failed with:\\n${ msg.linesIterator.map(" " + _).mkString("\\n") }"
}.mkString("\\n")
}
/** Copies `file` to `dir` - taking into account if `file` is a directory,
* and if so copying recursively
*/
private def copyToDir(dir: JFile, file: JFile): JFile = {
val target = Paths.get(dir.getPath, file.getName)
Files.copy(file.toPath, target, REPLACE_EXISTING)
if (file.isDirectory) file.listFiles.map(copyToDir(target.toFile, _))
target.toFile
}
/** Builds a `CompilationTest` which performs the compilation `i` times on
* each target
*/
def times(i: Int): CompilationTest =
new CompilationTest(targets, i, shouldDelete, threadLimit, shouldFail, shouldSuppressOutput)
/** Builds a `Compilationtest` which passes the verbose flag and logs the
* classpath
*/
def verbose: CompilationTest = new CompilationTest(
targets.map(t => t.withFlags("-verbose", "-Ylog-classpath")),
times, shouldDelete, threadLimit, shouldFail, shouldSuppressOutput
)
/** Builds a `CompilationTest` which keeps the generated output files
*
* This is needed for tests like `tastyBootstrap` which relies on first
* compiling a certain part of the project and then compiling a second
* part which depends on the first
*/
def keepOutput: CompilationTest =
new CompilationTest(targets, times, false, threadLimit, shouldFail, shouldSuppressOutput)
/** Builds a `CompilationTest` with a limited level of concurrency with
* maximum `i` threads
*/
def limitThreads(i: Int): CompilationTest =
new CompilationTest(targets, times, shouldDelete, Some(i), shouldFail, shouldSuppressOutput)
/** Builds a `CompilationTest` where the executed test is expected to fail
*
* This behaviour is mainly needed for the tests that test the test suite.
*/
def expectFailure: CompilationTest =
new CompilationTest(targets, times, shouldDelete, threadLimit, true, shouldSuppressOutput)
/** Builds a `CompilationTest` where all output is suppressed
*
* This behaviour is mainly needed for the tests that test the test suite.
*/
def suppressAllOutput: CompilationTest =
new CompilationTest(targets, times, shouldDelete, threadLimit, shouldFail, true)
/** Delete all output files generated by this `CompilationTest` */
def delete(): Unit = targets.foreach(t => delete(t.outDir))
private def delete(file: JFile): Unit = {
if (file.isDirectory) file.listFiles.foreach(delete)
try Files.delete(file.toPath)
catch {
case _: NoSuchFileException => // already deleted, everything's fine
}
}
}
object CompilationTest {
/** Compose test targets from `tests`
*
* It does this, only if the two tests are compatible. Otherwise it throws
* an `IllegalArgumentException`.
*
* Grouping tests together like this allows us to take advantage of the
* concurrency offered by this test suite as each call to an executing
* method (`pos()` / `checkExpectedErrors()`/ `run()`) will spin up a thread pool with the
* maximum allowed level of concurrency. Doing this for only a few targets
* does not yield any real benefit over sequential compilation.
*
* As such, each `CompilationTest` should contain as many targets as
* possible.
*/
def aggregateTests(tests: CompilationTest*): CompilationTest = {
assert(tests.nonEmpty)
def aggregate(test1: CompilationTest, test2: CompilationTest) = {
require(test1.times == test2.times, "can't combine tests that are meant to be benchmark compiled")
require(test1.shouldDelete == test2.shouldDelete, "can't combine tests that differ on deleting output")
require(test1.shouldFail == test2.shouldFail, "can't combine tests that have different expectations on outcome")
require(test1.shouldSuppressOutput == test2.shouldSuppressOutput, "can't combine tests that both suppress and don't suppress output")
new CompilationTest(test1.targets ++ test2.targets, test1.times, test1.shouldDelete, test1.threadLimit, test1.shouldFail, test1.shouldSuppressOutput)
}
tests.reduce(aggregate)
}
}
/** Create out directory for directory `d` */
def createOutputDirsForDir(d: JFile, sourceDir: JFile, outDir: String): JFile = {
val targetDir = new JFile(outDir + s"${sourceDir.getName}/${d.getName}")
targetDir.mkdirs()
targetDir
}
/** Create out directory for `file` */
private def createOutputDirsForFile(file: JFile, sourceDir: JFile, outDir: String): JFile = {
val uniqueSubdir = file.getName.substring(0, file.getName.lastIndexOf('.'))
val targetDir = new JFile(outDir + s"${sourceDir.getName}${JFile.separatorChar}$uniqueSubdir")
targetDir.mkdirs()
targetDir
}
/** Make sure that directory string is as expected */
private def checkRequirements(f: String, sourceDir: JFile, outDir: String): Unit = {
require(sourceDir.isDirectory && sourceDir.exists, "passed non-directory to `compileFilesInDir`: " + sourceDir)
require(outDir.last == JFile.separatorChar, "please specify an `outDir` with a trailing file separator")
}
/** Separates directories from files and returns them as `(dirs, files)` */
private def compilationTargets(sourceDir: JFile, fileFilter: FileFilter = FileFilter.NoFilter): (List[JFile], List[JFile]) =
sourceDir.listFiles.foldLeft((List.empty[JFile], List.empty[JFile])) { case ((dirs, files), f) =>
if (!fileFilter.accept(f.getName)) (dirs, files)
else if (f.isDirectory) (f :: dirs, files)
else if (isSourceFile(f)) (dirs, f :: files)
else (dirs, files)
}
/** Compiles a single file from the string path `f` using the supplied flags */
def compileFile(f: String, flags: TestFlags)(implicit testGroup: TestGroup): CompilationTest = {
val sourceFile = new JFile(f)
val parent = sourceFile.getParentFile
val outDir =
defaultOutputDir + testGroup + JFile.separator +
sourceFile.getName.substring(0, sourceFile.getName.lastIndexOf('.')) + JFile.separator
require(
sourceFile.exists && !sourceFile.isDirectory &&
(parent ne null) && parent.exists && parent.isDirectory,
s"Source file: $f, didn't exist"
)
val target = JointCompilationSource(
testGroup.name,
Array(sourceFile),
flags,
createOutputDirsForFile(sourceFile, parent, outDir)
)
new CompilationTest(target)
}
/** Compiles a directory `f` using the supplied `flags`. This method does
* deep compilation, that is - it compiles all files and subdirectories
* contained within the directory `f`.
*
* By default, files are compiled in alphabetical order. An optional seed
* can be used for randomization.
*/
def compileDir(f: String, flags: TestFlags, randomOrder: Option[Int] = None, recursive: Boolean = true)(implicit testGroup: TestGroup): CompilationTest = {
val outDir = defaultOutputDir + testGroup + JFile.separator
val sourceDir = new JFile(f)
checkRequirements(f, sourceDir, outDir)
def flatten(f: JFile): Array[JFile] =
if (f.isDirectory) {
val files = f.listFiles
if (recursive) files.flatMap(flatten) else files
}
else Array(f)
// Sort files either alphabetically or randomly using the provided seed:
val sortedFiles = flatten(sourceDir).sorted
val randomized = randomOrder match {
case None => sortedFiles
case Some(seed) => new Random(seed).shuffle(sortedFiles.toList).toArray
}
// Directories in which to compile all containing files with `flags`:
val targetDir = new JFile(outDir + JFile.separator + sourceDir.getName + JFile.separator)
targetDir.mkdirs()
val target = JointCompilationSource(s"compiling '$f' in test '$testGroup'", randomized, flags, targetDir)
new CompilationTest(target)
}
/** Compiles all `files` together as a single compilation run. It is given a
* `testName` since files can be in separate directories and or be otherwise
* dissociated
*/
def compileList(testName: String, files: List[String], flags: TestFlags)(implicit testGroup: TestGroup): CompilationTest = {
val outDir = defaultOutputDir + testGroup + JFile.separator + testName + JFile.separator
// Directories in which to compile all containing files with `flags`:
val targetDir = new JFile(outDir)
targetDir.mkdirs()
assert(targetDir.exists, s"couldn't create target directory: $targetDir")
val target = JointCompilationSource(s"$testName from $testGroup", files.map(new JFile(_)).toArray, flags, targetDir)
// Create a CompilationTest and let the user decide whether to execute a pos or a neg test
new CompilationTest(target)
}
/** This function compiles the files and folders contained within directory
* `f` in a specific way.
*
* - Each file is compiled separately as a single compilation run
* - Each directory is compiled as a `SeparateCompilationTarget`, in this
* target all files are grouped according to the file suffix `_X` where `X`
* is a number. These groups are then ordered in ascending order based on
* the value of `X` and each group is compiled one after the other.
*
* For this function to work as expected, we use the same convention for
* directory layout as the old partest. That is:
*
* - Single files can have an associated check-file with the same name (but
* with file extension `.check`)
* - Directories can have an associated check-file, where the check file has
* the same name as the directory (with the file extension `.check`)
*/
def compileFilesInDir(f: String, flags: TestFlags, fileFilter: FileFilter = FileFilter.NoFilter)(implicit testGroup: TestGroup): CompilationTest = {
val outDir = defaultOutputDir + testGroup + JFile.separator
val sourceDir = new JFile(f)
checkRequirements(f, sourceDir, outDir)
val (dirs, files) = compilationTargets(sourceDir, fileFilter)
val isPicklerTest = flags.options.contains("-Ytest-pickler")
def picklerDirFilter(source: SeparateCompilationSource): Boolean = {
// Pickler tests stop after pickler not producing class/tasty files. The second part of the compilation
// will not be able to compile due to the missing artifacts from the first part.
!isPicklerTest || source.compilationGroups.length == 1
}
val targets =
files.map(f => JointCompilationSource(testGroup.name, Array(f), flags, createOutputDirsForFile(f, sourceDir, outDir))) ++
dirs.map { dir => SeparateCompilationSource(testGroup.name, dir, flags, createOutputDirsForDir(dir, sourceDir, outDir)) }.filter(picklerDirFilter)
// Create a CompilationTest and let the user decide whether to execute a pos or a neg test
new CompilationTest(targets)
}
/** This function compiles the files and folders contained within directory
* `f` in a specific way. Once compiled, they are recompiled/run from tasty as sources.
*
* - Each file is compiled separately as a single compilation run
* - Each directory is compiled as a `SeparateCompilationTarget`, in this
* target all files are grouped according to the file suffix `_X` where `X`
* is a number. These groups are then ordered in ascending order based on
* the value of `X` and each group is compiled one after the other.
*
* For this function to work as expected, we use the same convention for
* directory layout as the old partest. That is:
*
* - Single files can have an associated check-file with the same name (but
* with file extension `.check`)
* - Directories can have an associated check-file, where the check file has
* the same name as the directory (with the file extension `.check`)
*
* Tests in the first part of the tuple must be executed before the second.
* Both testsRequires explicit delete().
*/
def compileTastyInDir(f: String, flags0: TestFlags, fromTastyFilter: FileFilter)(
implicit testGroup: TestGroup): TastyCompilationTest = {
val outDir = defaultOutputDir + testGroup + JFile.separator
val flags = flags0 and "-Yretain-trees"
val sourceDir = new JFile(f)
checkRequirements(f, sourceDir, outDir)
val (dirs, files) = compilationTargets(sourceDir, fromTastyFilter)
val filteredFiles = testFilter match
case _ :: _ => files.filter(f => testFilter.exists(f.getPath.contains))
case _ => Nil
class JointCompilationSourceFromTasty(
name: String,
file: JFile,
flags: TestFlags,
outDir: JFile,
fromTasty: Boolean = false,
) extends JointCompilationSource(name, Array(file), flags, outDir, fromTasty) {
override def buildInstructions(errors: Int, warnings: Int): String = {
val runOrPos = if (file.getPath.startsWith(s"tests${JFile.separator}run${JFile.separator}")) "run" else "pos"
val listName = if (fromTasty) "from-tasty" else "decompilation"
s"""|
|Test '$title' compiled with $errors error(s) and $warnings warning(s),
|the test can be reproduced by running:
|
| sbt "testCompilation --from-tasty $file"
|
|This tests can be disabled by adding `${file.getName}` to `compiler${JFile.separator}test${JFile.separator}dotc${JFile.separator}$runOrPos-$listName.blacklist`
|
|""".stripMargin
}
}
val targets = filteredFiles.map { f =>
val classpath = createOutputDirsForFile(f, sourceDir, outDir)
new JointCompilationSourceFromTasty(testGroup.name, f, flags.withClasspath(classpath.getPath), classpath, fromTasty = true)
}
// TODO add SeparateCompilationSource from tasty?
// Create a CompilationTest and let the user decide whether to execute a pos or a neg test
val generateClassFiles = compileFilesInDir(f, flags0, fromTastyFilter)
new TastyCompilationTest(
generateClassFiles.keepOutput,
new CompilationTest(targets).keepOutput,
shouldDelete = true
)
}
class TastyCompilationTest(step1: CompilationTest, step2: CompilationTest, shouldDelete: Boolean)(implicit testGroup: TestGroup) {
def keepOutput: TastyCompilationTest =
new TastyCompilationTest(step1, step2, shouldDelete)
def checkCompile()(implicit summaryReport: SummaryReporting): this.type = {
step1.checkCompile() // Compile all files to generate the class files with tasty
step2.checkCompile() // Compile from tasty
if (shouldDelete)
CompilationTest.aggregateTests(step1, step2).delete()
this
}
def checkRuns()(implicit summaryReport: SummaryReporting): this.type = {
step1.checkCompile() // Compile all files to generate the class files with tasty
step2.checkRuns() // Compile from tasty
if (shouldDelete)
CompilationTest.aggregateTests(step1, step2).delete()
this
}
}
/** This function behaves similar to `compileFilesInDir` but it ignores
* sub-directories and as such, does **not** perform separate compilation
* tests.
*/
def compileShallowFilesInDir(f: String, flags: TestFlags)(implicit testGroup: TestGroup): CompilationTest = {
val outDir = defaultOutputDir + testGroup + JFile.separator
val sourceDir = new JFile(f)
checkRequirements(f, sourceDir, outDir)
val (_, files) = compilationTargets(sourceDir)
val targets = files.map { file =>
JointCompilationSource(testGroup.name, Array(file), flags, createOutputDirsForFile(file, sourceDir, outDir))
}
// Create a CompilationTest and let the user decide whether to execute a pos or a neg test
new CompilationTest(targets)
}
private def getCharsetFromEncodingOpt(flags: TestFlags) =
flags.options.sliding(2).collectFirst {
case Array("-encoding", encoding) => Charset.forName(encoding)
}.getOrElse(StandardCharsets.UTF_8)
}
object ParallelTesting {
def defaultOutputDir: String = "out"+JFile.separator
def isSourceFile(f: JFile): Boolean = {
val name = f.getName
name.endsWith(".scala") || name.endsWith(".java")
}
def isTastyFile(f: JFile): Boolean =
f.getName.endsWith(".tasty")
}
| dotty-staging/dotty | compiler/test/dotty/tools/vulpix/ParallelTesting.scala | Scala | apache-2.0 | 60,339 |
package org.bitcoins.spvnode.messages.control
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.util.Factory
import org.bitcoins.spvnode.messages.AddrMessage
import org.bitcoins.spvnode.serializers.messages.control.RawAddrMessageSerializer
import org.bitcoins.spvnode.util.NetworkIpAddress
/**
* Created by chris on 6/3/16.
* The companion object for an AddrMessage
* https://bitcoin.org/en/developer-reference#addr
*/
object AddrMessage extends Factory[AddrMessage] {
private case class AddrMessageImpl(ipCount : CompactSizeUInt, addresses : Seq[NetworkIpAddress]) extends AddrMessage
def fromBytes(bytes : Seq[Byte]) : AddrMessage = RawAddrMessageSerializer.read(bytes)
def apply(ipCount : CompactSizeUInt, addresses : Seq[NetworkIpAddress]) : AddrMessage = AddrMessageImpl(ipCount,addresses)
}
| bitcoin-s/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/messages/control/AddrMessage.scala | Scala | mit | 842 |
/**
* Copyright (C) 2017 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.saxon
import org.orbeon.oxf.util.XPath
import org.orbeon.oxf.xml.{FunctionSupport, OrbeonFunctionLibrary, SaxonUtils, XMLConstants}
import org.orbeon.saxon.MapFunctions._
import org.orbeon.saxon.`type`._
import org.orbeon.saxon.expr.StaticProperty.{ALLOWS_ZERO_OR_MORE, EXACTLY_ONE}
import org.orbeon.saxon.expr.XPathContext
import org.orbeon.saxon.om._
import org.orbeon.saxon.value._
import org.orbeon.scaxon.Implicits._
trait MapFunction extends FunctionSupport {
// override def addToPathMap(pathMap: PathMap, pathMapNodeSet: PathMap.PathMapNodeSet): PathMap.PathMapNodeSet =
// addSubExpressionsToPathMap(pathMap, pathMapNodeSet)
}
trait ReturnMapFunction extends MapFunction {
override def getItemType(th: TypeHierarchy): ItemType =
saxonTypeForMap(th.getConfiguration)
}
//
// `map:entry($key as xs:anyAtomicType, $value as item()*) as map(*)`
//
class MapEntry extends ReturnMapFunction {
override def evaluateItem(context: XPathContext): ObjectValue = {
implicit val ctx = context
val config = context.getConfiguration
val mapType = saxonTypeForMap(config)
val key = SaxonUtils.fixStringValue(itemArgument(0).asInstanceOf[AtomicValue]) // enforced by signature
val value = itemsArgument(1)
createValue(
Map(key → new SequenceExtent(value)),
config
)
}
}
//
// `map:merge($maps as map(*)*) as map(*)`
//
class MapMerge extends ReturnMapFunction {
override def evaluateItem(context: XPathContext): ObjectValue = {
implicit val ctx = context
val config = context.getConfiguration
val mapType = saxonTypeForMap(config)
val maps = itemsArgumentOpt(0).iterator flatMap collectMapValues
createValue(
maps.foldLeft(Map.empty[AtomicValue, ValueRepresentation])(_ ++ _),
config
)
}
}
//
// `map:get($map as map(*), $key as xs:anyAtomicType) as item()*`
//
class MapGet extends MapFunction {
override def iterate(context: XPathContext): SequenceIterator = {
implicit val ctx = context
val map = itemsArgumentOpt(0).iterator flatMap collectMapValues next()
val key = SaxonUtils.fixStringValue(itemArgument(1).asInstanceOf[AtomicValue]) // enforced by signature
map.getOrElse(key, EmptySequence.getInstance) match {
case v: Value ⇒ v.iterate()
case v: NodeInfo ⇒ SingletonIterator.makeIterator(v)
case _ ⇒ throw new IllegalStateException
}
}
}
object MapFunctions {
type UnderlyingType = Map[AtomicValue, ValueRepresentation]
val UnderlyingClass = classOf[UnderlyingType]
def saxonTypeForMap(config: Configuration) = new ExternalObjectType(UnderlyingClass, config)
def collectMapValues(it: SequenceIterator)(implicit context: XPathContext): Iterator[UnderlyingType] = {
val config = context.getConfiguration
val mapType = saxonTypeForMap(config)
asScalaIterator(it) collect {
case v: ObjectValue if v.getItemType(config.getTypeHierarchy) == mapType ⇒
v.getObject.asInstanceOf[UnderlyingType]
}
}
def createValue(value: UnderlyingType, config: Configuration = XPath.GlobalConfiguration): ObjectValue =
new ObjectValue(
value,
saxonTypeForMap(config)
)
}
trait MapFunctions extends OrbeonFunctionLibrary {
Namespace(XMLConstants.XPATH_MAP_FUNCTIONS_NAMESPACE_URI) {
Fun("entry", classOf[MapEntry], op = 0, min = 2, BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE,
Arg(BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE),
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
Fun("merge", classOf[MapMerge], op = 0, min = 1, BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE,
Arg(BuiltInAtomicType.ANY_ATOMIC, ALLOWS_ZERO_OR_MORE)
)
Fun("get", classOf[MapGet], op = 0, min = 2, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE),
Arg(BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE)
)
}
}
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/saxon/MapFunctions.scala | Scala | lgpl-2.1 | 4,575 |
package demy.mllib.search
import scala.collection.mutable.ArrayBuffer
import scala.collection.Map
case class PTree[T](var options:Seq[PTree[T]], value:T, p:Double, level:Int) {
def step(sequence:Seq[Seq[T]],probMap:Map[(T, T, String), Double], probTypes:Seq[String]) = {
val candidates = sequence(this.level+1)
this.options = candidates.map(c => PTree[T](options=Seq[PTree[T]]()
, value = c
, p = probTypes.map(pType => probMap.getOrElse((this.value, c, pType), 0.0)).reduce(_ * _)
, level = level + 1
))
this
}
def getP(currentLevel:Int):Double = if(this.level == currentLevel) this.p else this.p * this.options.map(c => c.getP(currentLevel)).max
def chooseBestChild(currentLevel:Int) = {
this.options = Seq(this.options.map(c => (c, c.getP(currentLevel))).reduce((scored1,scored2) =>
(scored1,scored2) match {case ((c1, p1),(c2, p2))
=> if(p1 > p2) scored1 else scored2 })._1)
}
def getDescendants(currentLevel:Int):Seq[PTree[T]] = {
if(this.level == currentLevel) Seq(this)
else this.options.flatMap(c => c.getDescendants(currentLevel))
}
}
object PTree {
def evaluate[T](sequence:Seq[Seq[T]], maxLeafs:Int = 32, probMap:scala.collection.Map[(T, T, String), Double], probTypes:Seq[String], default:T) = {
var iRes = 0
var iFetch = 0
val ret = ArrayBuffer[T]()
var leafs = sequence(0).map(v => PTree[T](options = Seq[PTree[T]](), value = v, p=1.0, level = 0))
var toChoose = PTree[T](options = leafs, value = null.asInstanceOf[T], p = 1.0, level = -1)
val sequenceSize = sequence.size
println(s"root, ${leafs.size} nodes")
while(iRes < sequenceSize) {
if(toChoose.options.size == 0) {
println("we found an empty option, applying default")
toChoose.options = Seq(PTree[T](options = Seq[PTree[T]](), value = default, p = 0.0, level = iRes))
leafs = toChoose.options
}
var levelCount = leafs.size
while(levelCount > 0 && levelCount < maxLeafs && iFetch < sequenceSize -1) {
leafs.foreach(l => l.step(sequence, probMap, probTypes))
leafs = leafs.flatMap(l => l.options)
levelCount = leafs.size
iFetch = iFetch + 1
}
println(s"level $iRes, $levelCount nodes")
if(levelCount == 0) {
println(s"Expansion $iFetch lead to no possible solution choosing a level before expansion")
toChoose.chooseBestChild(iFetch - 1)
} else {
println(s"Expansion $iFetch lead to max leafs or array end using current information ")
toChoose.chooseBestChild(iFetch)
}
val chosen = toChoose.options(0)
println(s"level $iRes, $chosen has been chosen")
ret.append(chosen.value)
toChoose = chosen
leafs = chosen.getDescendants(iFetch)
iRes = iRes + 1
}
ret
}
}
| forchard-epi/demy | mllib/src/main/scala/search/PTree.scala | Scala | bsd-3-clause | 3,088 |
package edu.rice.habanero.benchmarks.bitonicsort
import java.util.Random
import edu.rice.habanero.actors.{JetlangActor, JetlangActorState, JetlangPool}
import edu.rice.habanero.benchmarks.philosopher.PhilosopherAkkaActorBenchmark.ExitMessage
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import scala.collection.mutable.ListBuffer
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object BitonicSortJetlangActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BitonicSortJetlangActorBenchmark)
}
private final class BitonicSortJetlangActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BitonicSortConfig.parseArgs(args)
}
def printArgInfo() {
BitonicSortConfig.printArgs()
}
def runIteration() {
val validationActor = new ValidationActor(BitonicSortConfig.N)
validationActor.start()
val adapterActor = new DataValueAdapterActor(validationActor)
adapterActor.start()
val kernelActor = new BitonicSortKernelActor(BitonicSortConfig.N, true, adapterActor)
kernelActor.start()
val sourceActor = new IntSourceActor(BitonicSortConfig.N, BitonicSortConfig.M, BitonicSortConfig.S, kernelActor)
sourceActor.start()
sourceActor.send(StartMessage())
JetlangActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
JetlangPool.shutdown()
}
}
}
private case class NextActorMessage(actor: JetlangActor[AnyRef])
private case class ValueMessage(value: Long)
private case class DataMessage(orderId: Int, value: Long)
private case class StartMessage()
private class ValueDataAdapterActor(orderId: Int, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(new DataMessage(orderId, vm.value))
case dm: DataMessage =>
nextActor.send(dm)
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class DataValueAdapterActor(nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(vm)
case dm: DataMessage =>
nextActor.send(new ValueMessage(dm.value))
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class RoundRobinSplitterActor(name: String, length: Int, receivers: Array[JetlangActor[AnyRef]]) extends JetlangActor[AnyRef] {
private var receiverIndex = 0
private var currentRun = 0
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
receivers(receiverIndex).send(vm)
currentRun += 1
if (currentRun == length) {
receiverIndex = (receiverIndex + 1) % receivers.length
currentRun = 0
}
case em: ExitMessage =>
receivers.foreach(loopActor => loopActor.send(em))
exit()
}
}
}
private class RoundRobinJoinerActor(name: String, length: Int, numJoiners: Int, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
private val receivedData = Array.tabulate[ListBuffer[DataMessage]](numJoiners)(i => new ListBuffer[DataMessage]())
private var forwardIndex = 0
private var currentRun = 0
private var exitsReceived = 0
override def process(msg: AnyRef) {
msg match {
case dm: DataMessage =>
receivedData(dm.orderId).append(dm)
tryForwardMessages(dm)
case em: ExitMessage =>
exitsReceived += 1
if (exitsReceived == numJoiners) {
nextActor.send(em)
exit()
}
}
}
def tryForwardMessages(dm: DataMessage) {
while (receivedData(forwardIndex).nonEmpty) {
val dm = receivedData(forwardIndex).remove(0)
val vm = new ValueMessage(dm.value)
nextActor.send(vm)
currentRun += 1
if (currentRun == length) {
forwardIndex = (forwardIndex + 1) % numJoiners
currentRun = 0
}
}
}
}
/**
* Compares the two input keys and exchanges their order if they are not sorted.
*
* sortDirection determines if the sort is nondecreasing (UP) [true] or nonincreasing (DOWN) [false].
*/
private class CompareExchangeActor(orderId: Int, sortDirection: Boolean, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
private var k1: Long = 0
private var valueAvailable = false
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
if (!valueAvailable) {
valueAvailable = true
k1 = vm.value
} else {
valueAvailable = false
val k2 = vm.value
val (minK, maxK) = if (k1 <= k2) (k1, k2) else (k2, k1)
if (sortDirection) {
// UP sort
nextActor.send(DataMessage(orderId, minK))
nextActor.send(DataMessage(orderId, maxK))
} else {
// DOWN sort
nextActor.send(DataMessage(orderId, maxK))
nextActor.send(DataMessage(orderId, minK))
}
}
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
/**
* Partition the input bitonic sequence of length L into two bitonic sequences of length L/2,
* with all numbers in the first sequence <= all numbers in the second sequence if sortdir is UP (similar case for DOWN sortdir)
*
* Graphically, it is a bunch of CompareExchanges with same sortdir, clustered together in the sort network at a particular step (of some merge stage).
*/
private class PartitionBitonicSequenceActor(orderId: Int, length: Int, sortDir: Boolean, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val halfLength = length / 2
val forwardActor = {
val actor = new ValueDataAdapterActor(orderId, nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("Partition-" + orderId, 1, halfLength, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[JetlangActor[AnyRef]](halfLength)(i => {
val actor = new CompareExchangeActor(i, sortDir, joinerActor)
actor.start()
actor
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("Partition-" + orderId, 1, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of a particular merge stage (used by all merge stages except the last)
*
* directionCounter determines which step we are in the current merge stage (which in turn is determined by <L, numSeqPartitions>)
*/
private class StepOfMergeActor(orderId: Int, length: Int, numSeqPartitions: Int, directionCounter: Int, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val forwardActor = {
val actor = new DataValueAdapterActor(nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfMerge-" + orderId + ":" + length, length, numSeqPartitions, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[JetlangActor[AnyRef]](numSeqPartitions)(i => {
// finding out the currentDirection is a bit tricky -
// the direction depends only on the subsequence number during the FIRST step.
// So to determine the FIRST step subsequence to which this sequence belongs, divide this sequence's number j by directionCounter
// (bcoz 'directionCounter' tells how many subsequences of the current step make up one subsequence of the FIRST step).
// Then, test if that result is even or odd to determine if currentDirection is UP or DOWN respectively.
val currentDirection = (i / directionCounter) % 2 == 0
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, currentDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, currentDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfMerge-" + orderId + ":" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of the last merge stage
*
* Main difference form StepOfMerge is the direction of sort.
* It is always in the same direction - sortdir.
*/
private class StepOfLastMergeActor(length: Int, numSeqPartitions: Int, sortDirection: Boolean, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfLastMerge-" + length, length, numSeqPartitions, nextActor)
actor.start()
actor
}
val workerActors = Array.tabulate[JetlangActor[AnyRef]](numSeqPartitions)(i => {
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, sortDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, sortDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfLastMerge-" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* Divide the input sequence of length N into subsequences of length P and sort each of them
* (either UP or DOWN depending on what subsequence number [0 to N/P-1] they get.
* All even subsequences are sorted UP and all odd subsequences are sorted DOWN).
* In short, a MergeStage is N/P Bitonic Sorters of order P each.
* But, this MergeStage is implemented *iteratively* as logP STEPS.
*/
private class MergeStageActor(P: Int, N: Int, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val forwardActor = {
var loopActor: JetlangActor[AnyRef] = nextActor
// for each of the lopP steps (except the last step) of this merge stage
var i = P / 2
while (i >= 1) {
// length of each sequence for the current step - goes like P, P/2, ..., 2.
val L = P / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = (N / P) * i
val directionCounter = i
val tempActor = new StepOfMergeActor(i, L, numSeqPartitions, directionCounter, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The LastMergeStage is basically one Bitonic Sorter of order N i.e.,
* it takes the bitonic sequence produced by the previous merge stages
* and applies a bitonic merge on it to produce the final sorted sequence.
*
* This is implemented iteratively as logN steps.
*/
private class LastMergeStageActor(N: Int, sortDirection: Boolean, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val forwardActor = {
var loopActor: JetlangActor[AnyRef] = nextActor
// for each of the lopN steps (except the last step) of this merge stage
var i = N / 2
while (i >= 1) {
// length of each sequence for the current step - goes like N, N/2, ..., 2.
val L = N / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = i
val tempActor = new StepOfLastMergeActor(L, numSeqPartitions, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The top-level kernel of bitonic-sort (iterative version) -
* It has logN merge stages and all merge stages except the last progressively builds a bitonic sequence out of the input sequence.
* The last merge stage acts on the resultant bitonic sequence to produce the final sorted sequence (sortdir determines if it is UP or DOWN).
*/
private class BitonicSortKernelActor(N: Int, sortDirection: Boolean, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
val forwardActor = {
var loopActor: JetlangActor[AnyRef] = nextActor
{
val tempActor = new LastMergeStageActor(N, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
}
var i = N / 2
while (i >= 2) {
val tempActor = new MergeStageActor(i, N, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long, nextActor: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
private val random = new Random(seed)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case nm: StartMessage =>
var i = 0
while (i < numValues) {
val candidate = Math.abs(random.nextLong()) % maxValue
if (BitonicSortConfig.debug) {
sb.append(candidate + " ")
}
val message = new ValueMessage(candidate)
nextActor.send(message)
i += 1
}
if (BitonicSortConfig.debug) {
println(" SOURCE: " + sb)
}
nextActor.send(ExitMessage())
exit()
}
}
}
private class ValidationActor(numValues: Int) extends JetlangActor[AnyRef] {
private var sumSoFar = 0.0
private var valuesSoFar = 0
private var prevValue = 0L
private var errorValue = (-1L, -1)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
valuesSoFar += 1
if (BitonicSortConfig.debug) {
sb.append(vm.value + " ")
}
if (vm.value < prevValue && errorValue._1 < 0) {
errorValue = (vm.value, valuesSoFar - 1)
}
prevValue = vm.value
sumSoFar += prevValue
case em: ExitMessage =>
if (valuesSoFar == numValues) {
if (BitonicSortConfig.debug) {
println(" OUTPUT: " + sb)
}
if (errorValue._1 >= 0) {
println(" ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2)
} else {
println(" Elements sum: " + sumSoFar)
}
} else {
println(" ERROR: early exit triggered, received only " + valuesSoFar + " values!")
}
exit()
}
}
}
}
| smarr/savina | src/main/scala/edu/rice/habanero/benchmarks/bitonicsort/BitonicSortJetlangActorBenchmark.scala | Scala | gpl-2.0 | 16,644 |
/*
* Copyright (c) 2013-2017 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
import sbt._
object Dependencies {
val resolutionRepos = Seq(
"Snowplow Analytics Maven repo" at "http://maven.snplow.com/releases/",
// For uaParser utils
"user-agent-parser repo" at "https://clojars.org/repo/"
)
object V {
// Java
val awsSdk = "1.11.115"
val yodaTime = "2.9.9"
val kafka = "0.10.2.1"
val slf4j = "1.7.5"
val config = "1.3.1"
// Scala
val snowplowCommonEnrich = "0.26.0"
val igluClient = "0.5.0"
val collectorPayload = "0.0.0"
val scalaz7 = "7.0.9"
val akkaHttp = "10.0.9"
val akkaSlf4j = "2.4.19"
val scopt = "3.6.0"
val json4s = "3.2.11"
val pureconfig = "0.8.0"
// Scala (test only)
val specs2 = "3.9.4"
}
object Libraries {
// Java
val awsSdk = "com.amazonaws" % "aws-java-sdk-kinesis" % V.awsSdk
val yodaTime = "joda-time" % "joda-time" % V.yodaTime
val kafkaClients = "org.apache.kafka" % "kafka-clients" % V.kafka
val slf4j = "org.slf4j" % "slf4j-simple" % V.slf4j
val log4jOverSlf4j = "org.slf4j" % "log4j-over-slf4j" % V.slf4j
val config = "com.typesafe" % "config" % V.config
// Scala
val snowplowCommonEnrich = "com.snowplowanalytics" %% "snowplow-common-enrich" % V.snowplowCommonEnrich
val igluClient = "com.snowplowanalytics" %% "iglu-scala-client" % V.igluClient
val collectorPayload = "com.snowplowanalytics" % "collector-payload-1" % V.collectorPayload
val scalaz7 = "org.scalaz" %% "scalaz-core" % V.scalaz7
val scopt = "com.github.scopt" %% "scopt" % V.scopt
val akkaHttp = "com.typesafe.akka" %% "akka-http" % V.akkaHttp
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % V.akkaSlf4j
val json4sJackson = "org.json4s" %% "json4s-jackson" % V.json4s
val pureconfig = "com.github.pureconfig" %% "pureconfig" % V.pureconfig
// Scala (test only)
val specs2 = "org.specs2" %% "specs2-core" % V.specs2 % "test"
val akkaHttpTestkit = "com.typesafe.akka" %% "akka-http-testkit" % V.akkaHttp % "test"
}
}
| sspinc/snowplow | 2-collectors/scala-stream-collector/project/Dependencies.scala | Scala | apache-2.0 | 3,344 |
package se.gigurra.leavu3
import com.github.gigurra.heisenberg.{MapDataProducer, MapDataParser}
/**
* Created by kjolh on 3/12/2016.
*/
package object datamodel {
implicit val vec3MapDataParser = new MapDataParser[Vec3] {
override def parse(field: Any): Vec3 = {
val data = field.asInstanceOf[Map[String, Number]]
val dcsX_North = data("x").floatValue
val dcsY_Up = data("y").floatValue
val dcsZ_East = data("z").floatValue
Vec3(dcsZ_East, dcsX_North, dcsY_Up)
}
}
implicit val vec3MapDataProducer = new MapDataProducer[Vec3] {
override def produce(t: Vec3): Any = {
val dcsX_North = t.y
val dcsY_Up = t.z
val dcsZ_East = t.x
Map("x" -> dcsX_North, "y" -> dcsY_Up, "z" -> dcsZ_East)
}
}
}
| GiGurra/leavu3 | src/main/scala/se/gigurra/leavu3/datamodel/package.scala | Scala | mit | 775 |
package com.outr.arango.collection
import com.outr.arango.CollectionType
import com.outr.arango.query.QueryPart
trait Collection extends QueryPart.Support {
def `type`: CollectionType
def dbName: String
def name: String
override def toQueryPart: QueryPart = QueryPart.Static(name)
}
| outr/scarango | driver/src/main/scala/com/outr/arango/collection/Collection.scala | Scala | mit | 296 |
package io.flow.delta.actors
import akka.actor.Actor
import db.{ItemsDao, ProjectsDao}
import io.flow.akka.SafeReceive
import io.flow.log.RollbarLogger
import io.flow.postgresql.Authorization
object SearchActor {
sealed trait Message
object Messages {
case class SyncProject(id: String) extends Message
}
}
class SearchActor(
logger: RollbarLogger,
projectsDao: ProjectsDao,
itemsDao: ItemsDao
) extends Actor {
private[this] implicit val configuredRollbar = logger.fingerprint("SearchActor")
def receive = SafeReceive.withLogUnhandled {
case SearchActor.Messages.SyncProject(id) =>
projectsDao.findById(Authorization.All, id) match {
case None => itemsDao.deleteByObjectId(Authorization.All, MainActor.SystemUser, id)
case Some(project) => itemsDao.replaceProject(MainActor.SystemUser, project)
}
()
}
}
| flowcommerce/delta | api/app/actors/SearchActor.scala | Scala | mit | 874 |
package de.fosd.typechef.parser.test.parsers
import de.fosd.typechef.parser._
import de.fosd.typechef.conditional._
import de.fosd.typechef.featureexpr.{FeatureExprFactory, FeatureExpr}
import scala.language.higherKinds
case class DigitList2(list: List[Opt[Conditional[AST]]]) extends AST
abstract class DigitList2Parser extends MultiFeatureParser {
type Elem = MyToken
type TypeContext = Any
type OptResult[T]
def myRepOpt[T](p: => MultiParser[T], productionName: String): MultiParser[List[OptResult[T]]]
def parse(tokens: List[MyToken]): ParseResult[Conditional[AST]] = digits(new TokenReader[MyToken, TypeContext](tokens, 0, null, EofToken), FeatureExprFactory.True).join(FeatureExprFactory.True).expectOneResult
def digitList: MultiParser[Conditional[AST]] =
(t("(") ~! (digits ~ t(")"))) ^^! ({
case b1 ~ (e ~ b2) => e
})
def digits: MultiParser[AST]
def t(text: String) = token(text, (x => x.t == text))
def digit: MultiParser[AST] =
token("digit", ((x) => x.t == "1" | x.t == "2" | x.t == "3" | x.t == "4" | x.t == "5")) ~ repPlain(t("!")) ^^ {
case t ~ _ => Lit(t.text.toInt)
}
}
| mbeddr/TypeChef | ParserFramework/src/test/scala/de/fosd/typechef/parser/test/parsers/DigitList2Parser.scala | Scala | lgpl-3.0 | 1,189 |
package com.twitter.finatra.conversions
object boolean {
implicit class RichBoolean(boolean: Boolean) {
def option[A](func: => A): Option[A] = {
if (boolean)
Some(func)
else
None
}
def onTrue(func: => Unit): Boolean = {
if (boolean) {
func
}
boolean
}
def onFalse(func: => Unit): Boolean = {
if (!boolean) {
func
}
boolean
}
}
}
| joecwu/finatra | utils/src/main/scala/com/twitter/finatra/conversions/boolean.scala | Scala | apache-2.0 | 444 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.chart
import java.time.ZoneOffset
import java.time.ZonedDateTime
import com.netflix.atlas.core.model.DsType
import com.netflix.atlas.core.model.FunctionTimeSeq
import com.netflix.atlas.core.model.TimeSeries
import com.netflix.atlas.core.util.Streams
import org.scalatest.FunSuite
class JsonGraphEngineSuite extends FunSuite {
val step = 60000
def constant(v: Double): TimeSeries = {
TimeSeries(Map("name" -> v.toString), new FunctionTimeSeq(DsType.Gauge, step, _ => v))
}
def constantSeriesDef(value: Double) : SeriesDef = {
val seriesDef = new SeriesDef
seriesDef.data = constant(value)
seriesDef
}
def label(vs: SeriesDef*): List[SeriesDef] = {
vs.zipWithIndex.foreach { case (v, i) => v.label = i.toString }
vs.toList
}
def strip(expected: String): String = {
expected.stripMargin.split("\n").mkString("")
}
def process(engine: GraphEngine, expected: String) {
val plotDef = new PlotDef
plotDef.series = label(constantSeriesDef(42), constantSeriesDef(Double.NaN))
val graphDef = new GraphDef
graphDef.startTime = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant
graphDef.endTime = ZonedDateTime.of(2012, 1, 1, 0, 3, 0, 0, ZoneOffset.UTC).toInstant
graphDef.plots = List(plotDef)
val bytes = Streams.byteArray { out => engine.write(graphDef, out) }
val json = new String(bytes, "UTF-8")
assert(json === strip(expected))
}
test("json") {
val expected =
"""{
|"start":1325376000000,
|"step":60000,
|"legend":["0","1"],
|"metrics":[{},{}],
|"values":[[42.000000,NaN],[42.000000,NaN],[42.000000,NaN],[42.000000,NaN]],
|"notices":[]
|}"""
process(new JsonGraphEngine, expected)
}
test("std.json") {
val expected =
"""{
|"start":1325376000000,
|"step":60000,
|"legend":["0","1"],
|"metrics":[{},{}],
|"values":[[42.000000,"NaN"],[42.000000,"NaN"],[42.000000,"NaN"],[42.000000,"NaN"]],
|"notices":[]
|}"""
process(new StdJsonGraphEngine, expected)
}
}
| gorcz/atlas | atlas-chart/src/test/scala/com/netflix/atlas/chart/JsonGraphEngineSuite.scala | Scala | apache-2.0 | 2,739 |
package org.knora.webapi.responders.v2.search.gravsearch.prequery
import org.knora.webapi.messages.admin.responder.usersmessages.UserADM
import org.knora.webapi.responders.ResponderData
import org.knora.webapi.responders.v2.search._
import org.knora.webapi.responders.v2.search.gravsearch.types.{GravsearchTypeInspectionRunner, GravsearchTypeInspectionUtil}
import org.knora.webapi.responders.v2.search.gravsearch.{GravsearchParser, GravsearchQueryChecker}
import org.knora.webapi.util.IriConversions._
import org.knora.webapi.util.StringFormatter
import org.knora.webapi.{AssertionException, CoreSpec, SettingsImpl, SharedTestDataADM}
import scala.concurrent.Await
import scala.concurrent.duration._
private object CountQueryHandler {
private val timeout = 10.seconds
val anythingUser: UserADM = SharedTestDataADM.anythingAdminUser
def transformQuery(query: String, responderData: ResponderData, settings: SettingsImpl): SelectQuery = {
val constructQuery = GravsearchParser.parseQuery(query)
val typeInspectionRunner = new GravsearchTypeInspectionRunner(responderData = responderData, inferTypes = true)
val typeInspectionResultFuture = typeInspectionRunner.inspectTypes(constructQuery.whereClause, anythingUser)
val typeInspectionResult = Await.result(typeInspectionResultFuture, timeout)
val whereClauseWithoutAnnotations: WhereClause = GravsearchTypeInspectionUtil.removeTypeAnnotations(constructQuery.whereClause)
// Validate schemas and predicates in the CONSTRUCT clause.
GravsearchQueryChecker.checkConstructClause(
constructClause = constructQuery.constructClause,
typeInspectionResult = typeInspectionResult
)
// Create a Select prequery
val nonTriplestoreSpecificConstructToSelectTransformer: NonTriplestoreSpecificGravsearchToCountPrequeryGenerator = new NonTriplestoreSpecificGravsearchToCountPrequeryGenerator(
typeInspectionResult = typeInspectionResult,
querySchema = constructQuery.querySchema.getOrElse(throw AssertionException(s"WhereClause has no querySchema"))
)
val nonTriplestoreSpecficPrequery: SelectQuery = QueryTraverser.transformConstructToSelect(
inputQuery = constructQuery.copy(
whereClause = whereClauseWithoutAnnotations,
orderBy = Seq.empty[OrderCriterion] // count queries do not need any sorting criteria
),
transformer = nonTriplestoreSpecificConstructToSelectTransformer
)
nonTriplestoreSpecficPrequery
}
}
class NonTriplestoreSpecificGravsearchToCountPrequeryGeneratorSpec extends CoreSpec() {
implicit val stringFormatter: StringFormatter = StringFormatter.getGeneralInstance
"The NonTriplestoreSpecificGravsearchToCountPrequeryGenerator object" should {
"transform an input query with a decimal as an optional sort criterion and a filter" in {
val transformedQuery = CountQueryHandler.transformQuery(inputQueryWithDecimalOptionalSortCriterionAndFilter, responderData, settings)
assert(transformedQuery === transformedQueryWithDecimalOptionalSortCriterionAndFilter)
}
"transform an input query with a decimal as an optional sort criterion and a filter (submitted in complex schema)" in {
val transformedQuery = CountQueryHandler.transformQuery(inputQueryWithDecimalOptionalSortCriterionAndFilterComplex, responderData, settings)
assert(transformedQuery === transformedQueryWithDecimalOptionalSortCriterionAndFilterComplex)
}
}
val inputQueryWithDecimalOptionalSortCriterionAndFilter: String =
"""
|PREFIX anything: <http://0.0.0.0:3333/ontology/0001/anything/simple/v2#>
|PREFIX knora-api: <http://api.knora.org/ontology/knora-api/simple/v2#>
|
|CONSTRUCT {
| ?thing knora-api:isMainResource true .
|
| ?thing anything:hasDecimal ?decimal .
|} WHERE {
|
| ?thing a anything:Thing .
| ?thing a knora-api:Resource .
|
| OPTIONAL {
| ?thing anything:hasDecimal ?decimal .
| anything:hasDecimal knora-api:objectType xsd:decimal .
|
| ?decimal a xsd:decimal .
|
| FILTER(?decimal > "2"^^xsd:decimal)
| }
|} ORDER BY ASC(?decimal)
""".stripMargin
val transformedQueryWithDecimalOptionalSortCriterionAndFilter: SelectQuery =
SelectQuery(
variables = Vector(Count(
inputVariable = QueryVariable(variableName = "thing"),
distinct = true,
outputVariableName = "count"
)),
offset = 0,
groupBy = Nil,
orderBy = Nil,
whereClause = WhereClause(
patterns = Vector(
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type".toSmartIri,
propertyPathOperator = None
),
obj = IriRef(
iri = "http://www.knora.org/ontology/knora-base#Resource".toSmartIri,
propertyPathOperator = None
),
namedGraph = None
),
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#isDeleted".toSmartIri,
propertyPathOperator = None
),
obj = XsdLiteral(
value = "false",
datatype = "http://www.w3.org/2001/XMLSchema#boolean".toSmartIri
),
namedGraph = Some(IriRef(
iri = "http://www.knora.org/explicit".toSmartIri,
propertyPathOperator = None
))
),
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type".toSmartIri,
propertyPathOperator = None
),
obj = IriRef(
iri = "http://www.knora.org/ontology/0001/anything#Thing".toSmartIri,
propertyPathOperator = None
),
namedGraph = None
),
OptionalPattern(patterns = Vector(
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.knora.org/ontology/0001/anything#hasDecimal".toSmartIri,
propertyPathOperator = None
),
obj = QueryVariable(variableName = "decimal"),
namedGraph = None
),
StatementPattern(
subj = QueryVariable(variableName = "decimal"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#isDeleted".toSmartIri,
propertyPathOperator = None
),
obj = XsdLiteral(
value = "false",
datatype = "http://www.w3.org/2001/XMLSchema#boolean".toSmartIri
),
namedGraph = Some(IriRef(
iri = "http://www.knora.org/explicit".toSmartIri,
propertyPathOperator = None
))
),
StatementPattern(
subj = QueryVariable(variableName = "decimal"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#valueHasDecimal".toSmartIri,
propertyPathOperator = None
),
obj = QueryVariable(variableName = "decimal__valueHasDecimal"),
namedGraph = Some(IriRef(
iri = "http://www.knora.org/explicit".toSmartIri,
propertyPathOperator = None
))
),
FilterPattern(expression = CompareExpression(
leftArg = QueryVariable(variableName = "decimal__valueHasDecimal"),
operator = CompareExpressionOperator.GREATER_THAN,
rightArg = XsdLiteral(
value = "2",
datatype = "http://www.w3.org/2001/XMLSchema#decimal".toSmartIri
)
))
))
),
positiveEntities = Set(),
querySchema = None
),
limit = Some(1),
useDistinct = true
)
val inputQueryWithDecimalOptionalSortCriterionAndFilterComplex: String =
"""
|PREFIX anything: <http://0.0.0.0:3333/ontology/0001/anything/v2#>
|PREFIX knora-api: <http://api.knora.org/ontology/knora-api/v2#>
|
|CONSTRUCT {
| ?thing knora-api:isMainResource true .
|
| ?thing anything:hasDecimal ?decimal .
|} WHERE {
|
| ?thing a anything:Thing .
| ?thing a knora-api:Resource .
|
| OPTIONAL {
| ?thing anything:hasDecimal ?decimal .
|
| ?decimal knora-api:decimalValueAsDecimal ?decimalVal .
|
| FILTER(?decimalVal > "2"^^xsd:decimal)
| }
|} ORDER BY ASC(?decimal)
""".stripMargin
val transformedQueryWithDecimalOptionalSortCriterionAndFilterComplex: SelectQuery =
SelectQuery(
variables = Vector(Count(
inputVariable = QueryVariable(variableName = "thing"),
distinct = true,
outputVariableName = "count"
)),
offset = 0,
groupBy = Nil,
orderBy = Nil,
whereClause = WhereClause(
patterns = Vector(
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type".toSmartIri,
propertyPathOperator = None
),
obj = IriRef(
iri = "http://www.knora.org/ontology/knora-base#Resource".toSmartIri,
propertyPathOperator = None
),
namedGraph = None
),
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#isDeleted".toSmartIri,
propertyPathOperator = None
),
obj = XsdLiteral(
value = "false",
datatype = "http://www.w3.org/2001/XMLSchema#boolean".toSmartIri
),
namedGraph = Some(IriRef(
iri = "http://www.knora.org/explicit".toSmartIri,
propertyPathOperator = None
))
),
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type".toSmartIri,
propertyPathOperator = None
),
obj = IriRef(
iri = "http://www.knora.org/ontology/0001/anything#Thing".toSmartIri,
propertyPathOperator = None
),
namedGraph = None
),
OptionalPattern(patterns = Vector(
StatementPattern(
subj = QueryVariable(variableName = "thing"),
pred = IriRef(
iri = "http://www.knora.org/ontology/0001/anything#hasDecimal".toSmartIri,
propertyPathOperator = None
),
obj = QueryVariable(variableName = "decimal"),
namedGraph = None
),
StatementPattern(
subj = QueryVariable(variableName = "decimal"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#isDeleted".toSmartIri,
propertyPathOperator = None
),
obj = XsdLiteral(
value = "false",
datatype = "http://www.w3.org/2001/XMLSchema#boolean".toSmartIri
),
namedGraph = Some(IriRef(
iri = "http://www.knora.org/explicit".toSmartIri,
propertyPathOperator = None
))
),
StatementPattern(
subj = QueryVariable(variableName = "decimal"),
pred = IriRef(
iri = "http://www.knora.org/ontology/knora-base#valueHasDecimal".toSmartIri,
propertyPathOperator = None
),
obj = QueryVariable(variableName = "decimalVal"),
namedGraph = None
),
FilterPattern(expression = CompareExpression(
leftArg = QueryVariable(variableName = "decimalVal"),
operator = CompareExpressionOperator.GREATER_THAN,
rightArg = XsdLiteral(
value = "2",
datatype = "http://www.w3.org/2001/XMLSchema#decimal".toSmartIri
)
))
))
),
positiveEntities = Set(),
querySchema = None
),
limit = Some(1),
useDistinct = true
)
} | musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/responders/v2/search/gravsearch/prequery/NonTriplestoreSpecificGravsearchToCountPrequeryGeneratorSpec.scala | Scala | agpl-3.0 | 15,671 |
package service
import model._
import scala.slick.driver.H2Driver.simple._
import Database.threadLocalSession
import service.SystemSettingsService.SystemSettings
import util.StringUtil._
import model.GroupMember
import scala.Some
import model.Account
import util.LDAPUtil
import org.slf4j.LoggerFactory
trait AccountService {
private val logger = LoggerFactory.getLogger(classOf[AccountService])
def authenticate(settings: SystemSettings, userName: String, password: String): Option[Account] =
if(settings.ldapAuthentication){
ldapAuthentication(settings, userName, password)
} else {
defaultAuthentication(userName, password)
}
/**
* Authenticate by internal database.
*/
private def defaultAuthentication(userName: String, password: String) = {
getAccountByUserName(userName).collect {
case account if(!account.isGroupAccount && account.password == sha1(password)) => Some(account)
} getOrElse None
}
/**
* Authenticate by LDAP.
*/
private def ldapAuthentication(settings: SystemSettings, userName: String, password: String) = {
LDAPUtil.authenticate(settings.ldap.get, userName, password) match {
case Right(mailAddress) => {
// Create or update account by LDAP information
getAccountByUserName(userName) match {
case Some(x) => updateAccount(x.copy(mailAddress = mailAddress))
case None => createAccount(userName, "", mailAddress, false, None)
}
getAccountByUserName(userName)
}
case Left(errorMessage) => {
logger.info(s"LDAP Authentication Failed: ${errorMessage}")
defaultAuthentication(userName, password)
}
}
}
def getAccountByUserName(userName: String): Option[Account] =
Query(Accounts) filter(_.userName is userName.bind) firstOption
def getAccountByMailAddress(mailAddress: String): Option[Account] =
Query(Accounts) filter(_.mailAddress is mailAddress.bind) firstOption
def getAllUsers(): List[Account] = Query(Accounts) sortBy(_.userName) list
def createAccount(userName: String, password: String, mailAddress: String, isAdmin: Boolean, url: Option[String]): Unit =
Accounts insert Account(
userName = userName,
password = password,
mailAddress = mailAddress,
isAdmin = isAdmin,
url = url,
registeredDate = currentDate,
updatedDate = currentDate,
lastLoginDate = None,
image = None,
isGroupAccount = false)
def updateAccount(account: Account): Unit =
Accounts
.filter { a => a.userName is account.userName.bind }
.map { a => a.password ~ a.mailAddress ~ a.isAdmin ~ a.url.? ~ a.registeredDate ~ a.updatedDate ~ a.lastLoginDate.? }
.update (
account.password,
account.mailAddress,
account.isAdmin,
account.url,
account.registeredDate,
currentDate,
account.lastLoginDate)
def updateAvatarImage(userName: String, image: Option[String]): Unit =
Accounts.filter(_.userName is userName.bind).map(_.image.?).update(image)
def updateLastLoginDate(userName: String): Unit =
Accounts.filter(_.userName is userName.bind).map(_.lastLoginDate).update(currentDate)
def createGroup(groupName: String, url: Option[String]): Unit =
Accounts insert Account(
userName = groupName,
password = "",
mailAddress = groupName + "@devnull",
isAdmin = false,
url = url,
registeredDate = currentDate,
updatedDate = currentDate,
lastLoginDate = None,
image = None,
isGroupAccount = true)
def updateGroup(groupName: String, url: Option[String]): Unit =
Accounts.filter(_.userName is groupName.bind).map(_.url.?).update(url)
def updateGroupMembers(groupName: String, members: List[String]): Unit = {
Query(GroupMembers).filter(_.groupName is groupName.bind).delete
members.foreach { userName =>
GroupMembers insert GroupMember (groupName, userName)
}
}
def getGroupMembers(groupName: String): List[String] =
Query(GroupMembers)
.filter(_.groupName is groupName.bind)
.sortBy(_.userName)
.map(_.userName)
.list
def getGroupsByUserName(userName: String): List[String] =
Query(GroupMembers)
.filter(_.userName is userName.bind)
.sortBy(_.groupName)
.map(_.groupName)
.list
}
| idleyoungman/gitbucket | src/main/scala/service/AccountService.scala | Scala | apache-2.0 | 4,473 |
package org.helgoboss.scala_osgi_metatype.adapters
import org.osgi.service.metatype.{ ObjectClassDefinition => JObjectClassDefinition, AttributeDefinition => JAttributeDefinition }
import org.helgoboss.scala_osgi_metatype.interfaces.{ListAttributeDefinition, ElementaryAttributeDefinition, ObjectClassDefinition}
/**
* Provides the given Scala object class definition as an OSGi-compliant object class definition.
*
* @constructor Creates an adapter for the given definition.
* @param delegate Scala object class definition
*/
class ObjectClassDefinitionAdapter(delegate: ObjectClassDefinition) extends JObjectClassDefinition {
def getAttributeDefinitions(filter: Int): Array[JAttributeDefinition] = {
import JObjectClassDefinition._
val list = filter match {
case REQUIRED => delegate.requiredAttributeDefinitions
case OPTIONAL => delegate.optionalAttributeDefinitions
case ALL => delegate.requiredAttributeDefinitions ++ delegate.optionalAttributeDefinitions
}
if (list.isEmpty) {
null
} else {
list map {
case ed: ElementaryAttributeDefinition[_] => new ElementaryAttributeDefinitionAdapter(ed)
case ld: ListAttributeDefinition[_] => new ListAttributeDefinitionAdapter(ld)
} toArray
}
}
def getDescription = delegate.description
def getIcon(size: Int) = delegate.getIcon(size).orNull
def getID = delegate.id
def getName = delegate.name
}
| helgoboss/scala-osgi-metatype | src/main/scala/org/helgoboss/scala_osgi_metatype/adapters/ObjectClassDefinitionAdapter.scala | Scala | mit | 1,442 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.SparkContext
import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{SparkSession, SQLContext}
/**
* An instance of the Spark SQL execution engine that integrates with data stored in Hive.
* Configuration for Hive is read from hive-site.xml on the classpath.
*/
@deprecated("Use SparkSession.builder.enableHiveSupport instead", "2.0.0")
class HiveContext private[hive](_sparkSession: SparkSession)
extends SQLContext(_sparkSession) with Logging {
self =>
def this(sc: SparkContext) = {
this(SparkSession.builder().enableHiveSupport().sparkContext(sc).getOrCreate())
}
def this(sc: JavaSparkContext) = this(sc.sc)
/**
* Returns a new HiveContext as new session, which will have separated SQLConf, UDF/UDAF,
* temporary tables and SessionState, but sharing the same CacheManager, IsolatedClientLoader
* and Hive client (both of execution and metadata) with existing HiveContext.
*/
override def newSession(): HiveContext = {
new HiveContext(sparkSession.newSession())
}
/**
* Invalidate and refresh all the cached the metadata of the given table. For performance reasons,
* Spark SQL or the external data source library it uses might cache certain metadata about a
* table, such as the location of blocks. When those change outside of Spark SQL, users should
* call this function to invalidate the cache.
*
* @since 1.3.0
*/
def refreshTable(tableName: String): Unit = {
sparkSession.catalog.refreshTable(tableName)
}
}
| mahak/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala | Scala | apache-2.0 | 2,421 |
package de.kaufhof.hajobs
import JobState.JobState
import org.joda.time.DateTime
import play.api.libs.json.{JsValue, Json, Writes}
import scala.concurrent.{ExecutionContext, Future}
/**
* Supports shortcut to store the job status, can be mixed into Jobs.
*/
trait WriteStatus {
def jobStatusRepository: StatusWriter
def writeStatus(jobState: JobState, content: Option[JsValue] = None)
(implicit jobContext: JobContext, ec: ExecutionContext): Future[JobStatus] = {
val status = JobStatus(jobContext.triggerId, jobContext.jobType, jobContext.jobId,
jobState, JobStatus.stateToResult(jobState), DateTime.now(), content)
jobStatusRepository.save(status)
}
def writeStatus(jobState: JobState, content: JsValue)
(implicit jobContext: JobContext, ec: ExecutionContext): Future[JobStatus] = {
writeStatus(jobState, Some(content))
}
/**
* Converts the given content to json and writes it as content of JobStatus to the StatusWriter.
*/
def writeStatusAsJson[T](jobState: JobState, content: T)
(implicit writes: Writes[T], jobContext: JobContext, ec: ExecutionContext): Future[JobStatus] = {
writeStatus(jobState, Json.toJson(content))
}
}
| Galeria-Kaufhof/ha-jobs | ha-jobs-core/src/main/scala/de/kaufhof/hajobs/WriteStatus.scala | Scala | apache-2.0 | 1,263 |
package com.tapad.docker
import scala.util.parsing.combinator.RegexParsers
case class Version(major: Int, minor: Int, release: Int)
object Version extends RegexParsers {
def apply(version: String): Version = {
parseVersion(version)
}
def parseVersion(version: String): Version = {
parse(parser, version) match {
case Success(ver, _) => ver
case NoSuccess(msg, _) => throw new RuntimeException(s"Could not parse Version from $version: $msg")
}
}
private val positiveWholeNumber: Parser[Int] = {
("0".r | """[1-9]?\\d*""".r).map(_.toInt).withFailureMessage("non-negative integer value expected")
}
private val parser: Parser[Version] = {
positiveWholeNumber ~ ("." ~> positiveWholeNumber) ~ ("." ~> positiveWholeNumber) ^^ {
case major ~ minor ~ release => Version(major, minor, release)
}
}
} | Tapad/sbt-docker-compose | src/main/scala/com/tapad/docker/Version.scala | Scala | bsd-3-clause | 854 |
package eu.delving.basex.client
import org.basex.core.BaseXException
import org.specs2.mutable._
import scala.xml.Utility.trim
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
class BaseXSpec extends Specification {
var s: BaseX = {
val server = new BaseX("localhost", 1234, 1235, "admin", "admin", false)
server.start()
server
}
sequential
"the BaseX storage" should {
"create a database" in {
s.createDatabase("test")
success
}
"open a database" in {
s.openDatabase("test")
success
}
"not open a non-existing database" in {
s.openDatabase("gumby") must throwA[BaseXException]
}
"insert a document" in {
s.add("test", "/foo.xml", "<root><bla>bar</bla></root>")
val r = s.query("test", "//root")
r.size must be equalTo (1)
}
"fetch a document as scala node" in {
val r = s.fetch("test", "/foo.xml")
r must be not empty
trim(r.get) must be equalTo trim(<root><bla>bar</bla></root>)
}
"find something and return it as scala nodes" in {
val r = s.withSession { session =>
session.open("test")
session.find("let $items := /root for $i in $items return <version id=\\"{$i/@id}\\">{count($i)}</version>").toList
}
r.size must equalTo (1)
}
"replace a document" in {
s.replace("test", "/foo.xml", "<replacedRoot><bla>bar</bla></replacedRoot>")
val r = s.query("test", "//replacedRoot")
val r1 = s.query("test", "//root")
r.size must be equalTo (1)
r1.size must be equalTo (0)
}
"rename a document" in {
s.rename("test", "/foo.xml", "/foo/foobar.xml")
val r = s.query("test", "db:open(\\"test\\", \\"/foo/foobar.xml\\")")
val r1 = s.query("test", "db:open(\\"test\\", \\"/foo.xml\\")")
r.size must be equalTo (1)
r1.size must be equalTo (0)
}
"delete a document" in {
s.delete("test", "/foo/foobar.xml")
val r = s.query("test", "//replacedRoot")
r.size must be equalTo (0)
}
"fail to insert a document in a non-existing db" in {
s.add("blablabla", "foo.xml", "<root/>") must throwA[BaseXException]
}
"shut down" in {
s.dropDatabase("test")
s.stop()
success
}
}
}
| delving/basex-scala-client | src/test/scala/eu/delving/basex/client/BaseXSpec.scala | Scala | apache-2.0 | 2,300 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.compat.java8.collectionImpl
/** An accumulator that works with Java 8 streams; it accepts elements of type `A`,
* is itself an `AC`. Accumulators can handle more than `Int.MaxValue` elements.
*/
trait AccumulatorLike[@specialized(Double, Int, Long) A, AC] {
private[java8] var index: Int = 0
private[java8] var hIndex: Int = 0
private[java8] var totalSize: Long = 0L
private[java8] def cumulative(i: Int): Long
private[java8] def nextBlockSize: Int = {
if (totalSize < 32) 16
else if (totalSize <= Int.MaxValue) {
val bit = (64 - java.lang.Long.numberOfLeadingZeros(totalSize))
1 << (bit - (bit >> 2))
}
else 1 << 24
}
/** Size of the accumulated collection, as a `Long` */
final def size = totalSize
/** Remove all accumulated elements from this accumulator. */
def clear(): Unit = {
index = 0
hIndex = 0
totalSize = 0L
}
private[java8] def seekSlot(ix: Long): Long = {
var lo = -1
var hi = hIndex
while (lo + 1 < hi) {
val m = (lo + hi) >>> 1 // Shift allows division-as-unsigned, prevents overflow
if (cumulative(m) > ix) hi = m
else lo = m
}
(hi.toLong << 32) | (if (hi==0) ix else (ix - cumulative(hi-1))).toInt
}
}
| scala/scala-java8-compat | src/main/scala-2.13-/scala/compat/java8/collectionImpl/AccumulatorLike.scala | Scala | apache-2.0 | 1,548 |
package apigen
package annotations
object EventListenerTest {
trait SomeEvent {}
trait SomeListener {
def onFoo(e: SomeEvent): Unit
}
class A {
def addSomeListener(l: SomeListener) {}
}
implicit class AWrapper(self: A) {
@eventListener def some: SomeListener
}
locally {
val a = new AWrapper(null)
a.addSomeListener { evt =>
println("some event happend!")
}
a.some { evt =>
println("shorthand syntax works!")
}
}
}
| b-studios/apigen | src/test/scala/apigen/annotations/EventListenerTest.scala | Scala | mit | 488 |
package microtools.metrics
import com.codahale.metrics.MetricRegistry
import play.api.inject.{Binding, Module}
import play.api.{Configuration, Environment}
class MetricsModule extends Module {
override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = {
Seq(
bind[MetricRegistry].toProvider[MetricRegistryProvider],
bind[InstrumentLogging].toSelf.eagerly()
)
}
}
| 21re/play-error-handling | src/main/scala/microtools/metrics/MetricsModule.scala | Scala | mit | 427 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.