code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package hylien
import eu.unicredit.web.Models.{DomNode, Location, Size}
import eu.unicredit.web.hylien.Distances
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import scala.collection.mutable
/**
* Created by fabiana on 7/4/16.
*/
class TreeEditDistanceSpecs extends Specification {
class Context extends Scope {
val node_tagA = DomNode(id =1,
parentId=0,
tagName = "a",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node A",
html = "")
val node_tagB = DomNode(id = 2,
parentId=0,
tagName = "b",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node B",
html = "")
val node_tagC = DomNode(id = 3,
parentId=0,
tagName = "c",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node C",
html = "")
val node_tagD = DomNode(id = 2,
parentId=0,
tagName = "d",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node D",
html = "")
val node_tagE = DomNode(id = 2,
parentId=0,
tagName = "e",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node E",
html = "")
val node_tagF = DomNode(id = 2,
parentId=0,
tagName = "f",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node F",
html = "")
val node_tagG = DomNode(id = 2,
parentId=0,
tagName = "g",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node G",
html = "")
val node_tagH = DomNode(id = 2,
parentId=0,
tagName = "h",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node H",
html = "")
val node_tagI = DomNode(id = 2,
parentId=0,
tagName = "i",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "node I",
html = "")
val T1 = DomNode(id = 2,
parentId=0,
tagName = "b",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T1",
html = "T1",
children = mutable.Buffer(node_tagC, node_tagD)
)
val T2 = DomNode(id = 2,
parentId=0,
tagName = "c",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T2",
html = "",
children = mutable.Buffer(node_tagF)
)
val T3 = DomNode(id = 2,
parentId=0,
tagName = "d",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T3",
html = "",
children = mutable.Buffer(node_tagE)
)
val T4 = DomNode(id = 2,
parentId=0,
tagName = "g",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T4",
html = "",
children = mutable.Buffer(node_tagH, node_tagF)
)
val T5 = DomNode(id = 2,
parentId=0,
tagName = "c",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T5",
html = "",
children = mutable.Buffer(T4, node_tagF)
)
val T6 = DomNode(id = 2,
parentId=0,
tagName = "a",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T6",
html = "",
children = mutable.Buffer(T1, T2, T3, T5)
)
val T7 = DomNode(id = 2,
parentId=0,
tagName = "b",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T7",
html = "",
children = mutable.Buffer(node_tagC, node_tagD)
)
val T8 = DomNode(id = 2,
parentId=0,
tagName = "g",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T8",
html = "",
children = mutable.Buffer( node_tagF, node_tagH, node_tagI)
)
val T9 = DomNode(id = 2,
parentId=0,
tagName = "c",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T9",
html = "",
children = mutable.Buffer(T8)
)
val T10 = DomNode(id = 2,
parentId=0,
tagName = "e",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T10",
html = "",
children = mutable.Buffer( node_tagF)
)
val T11 = DomNode(id = 2,
parentId=0,
tagName = "d",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T11",
html = "",
children = mutable.Buffer(T10)
)
val T12 = DomNode(id = 2,
parentId=0,
tagName = "a",
cssClass ="",
cssProperties = Map(),
cssSelector = "",
location = Location(100, 100),
size = Size(100,100),
text= "T12",
html = "",
children = mutable.Buffer(T7, T9, T11)
)
}
"TreeEditDistance" should {
"returns a score of 7.0 when you compare 2 trees having 7 nodes in common" in new Context{
Distances.treeEditDistance(T6, T12) === 7D
}
"returns a score of 0.0 when you compare 2 trees having no common nodes" in new Context{
Distances.treeEditDistance(node_tagA, node_tagB) === 0
}
}
"NormalizedTreeEditDistance" should {
"returns a score of 0.56 when compare 2 trees having 7 nodes in common and size 13 and 12 respectively" in new Context{
Distances.normalizedTreeEditDistance(T6, T12) === 0.56
}
"returns a score of 1 when a tree is compared with itself" in new Context {
Distances.normalizedTreeEditDistance(T6,T6) === 1
}
}
}
|
fabiofumarola/HyLiEn
|
src/test/scala/hylien/TreeEditDistanceSpecs.scala
|
Scala
|
apache-2.0
| 6,914
|
package org.velvia.filo.codecs
import scala.language.postfixOps
import scalaxy.loops._
import org.velvia.filo.{FiloVector, FastBufferReader}
import org.velvia.filo.vector._
object DictStringWrapper {
// Used to represent no string value or NA. Better than using null.
val NoString = ""
}
abstract class DictStringWrapper(val dsv: DictStringVector) extends FiloVector[String] {
import DictStringWrapper._
private val _len = dsv.len
val reader = FastBufferReader(dsv.codesAsByteBuffer())
// To be mixed in depending on type of code vector
def getCode(index: Int): Int
// Cache the Strings so we only pay cost of deserializing each unique string once
val strCache = Array.fill(dsv.dictionaryLength())(NoString)
final private def dictString(code: Int): String = {
val cacheValue = strCache(code)
if (cacheValue == NoString) {
val strFromDict = dsv.dictionary(code)
strCache(code) = strFromDict
strFromDict
} else {
cacheValue
}
}
final def isAvailable(index: Int): Boolean = getCode(index) != 0
final def apply(index: Int): String = dictString(getCode(index))
final def length: Int = _len
final def foreach[B](fn: String => B): Unit = {
for { i <- 0 until length optimized } {
val code = getCode(i)
if (code != 0) fn(dictString(code))
}
}
}
|
velvia/filo
|
filo-scala/src/main/scala/org.velvia.filo/codecs/DictEncodingWrappers.scala
|
Scala
|
apache-2.0
| 1,341
|
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* Nikolay.Tropin
* 2014-05-05
*/
class SortFilterInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] =
Array(SortFilter)
}
object SortFilter extends SimplificationType {
def hint = InspectionBundle.message("sort.filter.hint")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual`.sort`()`.filter`(pred) if !hasSideEffects(pred) => swapLastTwoMethods(expr)
case qual`.sort`(_)`.filter`(pred) if !hasSideEffects(pred) => swapLastTwoMethods(expr)
case _ => None
}
}
def swapLastTwoMethods(expr: ScExpression): Option[Simplification] = {
def refWithArgumentsText(method: MethodRepr): Option[String] = (method.itself, method.optionalBase) match {
case (_: ScMethodCall | _: ScReferenceExpression, Some(baseExpr)) =>
val startIndex = baseExpr.getTextRange.getEndOffset - method.itself.getTextRange.getStartOffset
val text = method.itself.getText
if (startIndex > 0 && startIndex < text.length) Option(text.substring(startIndex))
else None
case (ScInfixExpr(left, op, right), _) =>
def argListFromInfix(arg: ScExpression) = arg match {
case x @ (_: ScBlock | _: ScParenthesisedExpr | _: ScTuple) => x.getText
case _ => s"(${arg.getText})"
}
Some(s".${op.refName}${argListFromInfix(right)}")
case _ => None
}
expr match {
case MethodSeq(last, second, _*) =>
for {
lastText <- refWithArgumentsText(last)
secondText <- refWithArgumentsText(second)
baseExpr <- second.optionalBase
} {
val newText = s"${baseExpr.getText}$lastText$secondText"
val qual = second.optionalBase.getOrElse(second.itself)
val simplification = replace(expr).withText(newText).highlightFrom(qual)
return Some(simplification)
}
None
case _ => None
}
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/collections/SortFilterInspection.scala
|
Scala
|
apache-2.0
| 2,207
|
package com.twitter.finagle.thriftmux
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.{Address, Name, Service, ThriftMux}
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.thriftmux.thriftscala.TestService
import com.twitter.finagle.thriftmux.thriftscala.TestService.{Inquiry, Query, Question}
import com.twitter.io.Buf
import com.twitter.scrooge.{Request, Response}
import com.twitter.util.{Await, Awaitable, Duration, Future, Return, Try}
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.OneInstancePerTest
import org.scalatest.funsuite.AnyFunSuite
class ContextAmplificationTest extends AnyFunSuite with OneInstancePerTest {
def await[T](a: Awaitable[T], d: Duration = 60.seconds): T =
Await.result(a, d)
protected def clientImpl: ThriftMux.Client =
ThriftMux.client.copy(muxer = ThriftMux.Client.standardMuxer)
protected def serverImpl: ThriftMux.Server = {
// need to copy the params since the `.server` call sets the Label to "thrift" into
// the current muxers params
val serverParams = ThriftMux.server.params
ThriftMux.server.copy(muxer = ThriftMux.Server.defaultMuxer.withParams(serverParams))
}
case class TestContext(buf: Buf)
val testContext = new Contexts.broadcast.Key[TestContext]("com.twitter.finagle.mux.MuxContext") {
def marshal(tc: TestContext): Buf = tc.buf
def tryUnmarshal(buf: Buf): Try[TestContext] = Return(TestContext(buf))
}
val originServer = serverImpl.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.ReqRepServicePerEndpoint {
def query: Service[Request[Query.Args], Response[String]] =
Service.mk { req: Request[Query.Args] =>
Future.value(Response(req.headers.toBufSeq.length.toString))
}
def question: Service[Request[Question.Args], Response[String]] = ???
def inquiry: Service[Request[Inquiry.Args], Response[String]] = ???
}.toThriftService
)
val proxyServer = {
val proxyClient: TestService.MethodPerEndpoint = {
val underlying = clientImpl.servicePerEndpoint[TestService.ServicePerEndpoint](
Name.bound(Address(originServer.boundAddress.asInstanceOf[InetSocketAddress])),
"ProxyClient"
)
// This sets up the auto-forwarding of request headers
ThriftMux.Client.methodPerEndpoint(underlying)
}
serverImpl.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.ReqRepServicePerEndpoint {
def query: Service[Request[Query.Args], Response[String]] = Service.mk {
req: Request[Query.Args] =>
val requestHeaders = req.headers.toBufSeq.length
proxyClient.query("").map { result =>
val transmittedHeaders = result.toInt
if (transmittedHeaders == requestHeaders) Response("success")
else Response(s"Unexpected number of headers transmitted: $transmittedHeaders")
}
}
def question: Service[Request[Question.Args], Response[String]] = ???
def inquiry: Service[Request[Inquiry.Args], Response[String]] = ???
}.toThriftService
)
}
test("contexts/headers are not amplified between hops") {
val client =
clientImpl.build[TestService.MethodPerEndpoint](
Name.bound(Address(proxyServer.boundAddress.asInstanceOf[InetSocketAddress])),
"client"
)
Contexts.broadcast.let(testContext, TestContext(Buf.Utf8("foo"))) {
assert(await(client.query("ok").map { s => s }) == "success")
}
await(originServer.close(3.seconds))
}
}
|
twitter/finagle
|
finagle-thriftmux/src/test/scala/com/twitter/finagle/thriftmux/ContextAmplificationTest.scala
|
Scala
|
apache-2.0
| 3,646
|
package com.stulsoft.serialization
/**
* @author Yuriy Stul.
*/
trait MessageTrait {
}
|
ysden123/poc
|
pserialization/src/main/scala/com/stulsoft/serialization/MessageTrait.scala
|
Scala
|
mit
| 93
|
package com.github.agourlay.cornichon.experimental.examples
import com.github.agourlay.cornichon.experimental.CornichonFeature
class DummyExamplesFour extends CornichonFeature {
def feature = Feature("Dummy four feature") {
Scenario("single scenario pending").pending
}
}
|
OlegIlyenko/cornichon
|
cornichon-experimental/src/test/scala/com/github/agourlay/cornichon/experimental/examples/DummyExamplesFour.scala
|
Scala
|
apache-2.0
| 285
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.net.URLEncoder
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.HashSet
import scala.xml.{Elem, Node, Unparsed}
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.SparkConf
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo, TaskLocality}
import org.apache.spark.ui._
import org.apache.spark.ui.exec.ExecutorsListener
import org.apache.spark.ui.jobs.UIData._
import org.apache.spark.util.{Distribution, Utils}
/** Page showing statistics and task list for a given stage */
private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") {
import StagePage._
private val progressListener = parent.progressListener
private val operationGraphListener = parent.operationGraphListener
private val executorsListener = parent.executorsListener
private val TIMELINE_LEGEND = {
<div class="legend-area">
<svg>
{
val legendPairs = List(("scheduler-delay-proportion", "Scheduler Delay"),
("deserialization-time-proportion", "Task Deserialization Time"),
("shuffle-read-time-proportion", "Shuffle Read Time"),
("executor-runtime-proportion", "Executor Computing Time"),
("shuffle-write-time-proportion", "Shuffle Write Time"),
("serialization-time-proportion", "Result Serialization Time"),
("getting-result-time-proportion", "Getting Result Time"))
legendPairs.zipWithIndex.map {
case ((classAttr, name), index) =>
<rect x={5 + (index / 3) * 210 + "px"} y={10 + (index % 3) * 15 + "px"}
width="10px" height="10px" class={classAttr}></rect>
<text x={25 + (index / 3) * 210 + "px"}
y={20 + (index % 3) * 15 + "px"}>{name}</text>
}
}
</svg>
</div>
}
// TODO: We should consider increasing the number of this parameter over time
// if we find that it's okay.
private val MAX_TIMELINE_TASKS = parent.conf.getInt("spark.ui.timeline.tasks.maximum", 1000)
private def getLocalitySummaryString(stageData: StageUIData): String = {
val localities = stageData.taskData.values.map(_.taskInfo.taskLocality)
val localityCounts = localities.groupBy(identity).mapValues(_.size)
val localityNamesAndCounts = localityCounts.toSeq.map { case (locality, count) =>
val localityName = locality match {
case TaskLocality.PROCESS_LOCAL => "Process local"
case TaskLocality.NODE_LOCAL => "Node local"
case TaskLocality.RACK_LOCAL => "Rack local"
case TaskLocality.ANY => "Any"
}
s"$localityName: $count"
}
localityNamesAndCounts.sorted.mkString("; ")
}
def render(request: HttpServletRequest): Seq[Node] = {
progressListener.synchronized {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val parameterId = UIUtils.stripXSS(request.getParameter("id"))
require(parameterId != null && parameterId.nonEmpty, "Missing id parameter")
val parameterAttempt = UIUtils.stripXSS(request.getParameter("attempt"))
require(parameterAttempt != null && parameterAttempt.nonEmpty, "Missing attempt parameter")
val parameterTaskPage = UIUtils.stripXSS(request.getParameter("task.page"))
val parameterTaskSortColumn = UIUtils.stripXSS(request.getParameter("task.sort"))
val parameterTaskSortDesc = UIUtils.stripXSS(request.getParameter("task.desc"))
val parameterTaskPageSize = UIUtils.stripXSS(request.getParameter("task.pageSize"))
val parameterTaskPrevPageSize = UIUtils.stripXSS(request.getParameter("task.prevPageSize"))
val taskPage = Option(parameterTaskPage).map(_.toInt).getOrElse(1)
val taskSortColumn = Option(parameterTaskSortColumn).map { sortColumn =>
UIUtils.decodeURLParameter(sortColumn)
}.getOrElse("Index")
val taskSortDesc = Option(parameterTaskSortDesc).map(_.toBoolean).getOrElse(false)
val taskPageSize = Option(parameterTaskPageSize).map(_.toInt).getOrElse(100)
val taskPrevPageSize = Option(parameterTaskPrevPageSize).map(_.toInt).getOrElse(taskPageSize)
val stageId = parameterId.toInt
val stageAttemptId = parameterAttempt.toInt
val stageDataOption = progressListener.stageIdToData.get((stageId, stageAttemptId))
val stageHeader = s"Details for Stage $stageId (Attempt $stageAttemptId)"
if (stageDataOption.isEmpty) {
val content =
<div id="no-info">
<p>No information to display for Stage {stageId} (Attempt {stageAttemptId})</p>
</div>
return UIUtils.headerSparkPage(stageHeader, content, parent)
}
if (stageDataOption.get.taskData.isEmpty) {
val content =
<div>
<h4>Summary Metrics</h4> No tasks have started yet
<h4>Tasks</h4> No tasks have started yet
</div>
return UIUtils.headerSparkPage(stageHeader, content, parent)
}
val stageData = stageDataOption.get
val tasks = stageData.taskData.values.toSeq.sortBy(_.taskInfo.launchTime)
val numCompleted = stageData.numCompleteTasks
val totalTasks = stageData.numActiveTasks +
stageData.numCompleteTasks + stageData.numFailedTasks
val totalTasksNumStr = if (totalTasks == tasks.size) {
s"$totalTasks"
} else {
s"$totalTasks, showing ${tasks.size}"
}
val allAccumulables = progressListener.stageIdToData((stageId, stageAttemptId)).accumulables
val externalAccumulables = allAccumulables.values.filter { acc => !acc.internal }
val hasAccumulators = externalAccumulables.nonEmpty
val summary =
<div>
<ul class="unstyled">
<li>
<strong>Total Time Across All Tasks: </strong>
{UIUtils.formatDuration(stageData.executorRunTime)}
</li>
<li>
<strong>Locality Level Summary: </strong>
{getLocalitySummaryString(stageData)}
</li>
{if (stageData.hasInput) {
<li>
<strong>Input Size / Records: </strong>
{s"${Utils.bytesToString(stageData.inputBytes)} / ${stageData.inputRecords}"}
</li>
}}
{if (stageData.hasOutput) {
<li>
<strong>Output: </strong>
{s"${Utils.bytesToString(stageData.outputBytes)} / ${stageData.outputRecords}"}
</li>
}}
{if (stageData.hasShuffleRead) {
<li>
<strong>Shuffle Read: </strong>
{s"${Utils.bytesToString(stageData.shuffleReadTotalBytes)} / " +
s"${stageData.shuffleReadRecords}"}
</li>
}}
{if (stageData.hasShuffleWrite) {
<li>
<strong>Shuffle Write: </strong>
{s"${Utils.bytesToString(stageData.shuffleWriteBytes)} / " +
s"${stageData.shuffleWriteRecords}"}
</li>
}}
{if (stageData.hasBytesSpilled) {
<li>
<strong>Shuffle Spill (Memory): </strong>
{Utils.bytesToString(stageData.memoryBytesSpilled)}
</li>
<li>
<strong>Shuffle Spill (Disk): </strong>
{Utils.bytesToString(stageData.diskBytesSpilled)}
</li>
}}
</ul>
</div>
val showAdditionalMetrics =
<div>
<span class="expand-additional-metrics">
<span class="expand-additional-metrics-arrow arrow-closed"></span>
<a>Show Additional Metrics</a>
</span>
<div class="additional-metrics collapsed">
<ul>
<li>
<input type="checkbox" id="select-all-metrics"/>
<span class="additional-metric-title"><em>(De)select All</em></span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.SCHEDULER_DELAY} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SCHEDULER_DELAY}/>
<span class="additional-metric-title">Scheduler Delay</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.TASK_DESERIALIZATION_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}/>
<span class="additional-metric-title">Task Deserialization Time</span>
</span>
</li>
{if (stageData.hasShuffleRead) {
<li>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_BLOCKED_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}/>
<span class="additional-metric-title">Shuffle Read Blocked Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_REMOTE_SIZE} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}/>
<span class="additional-metric-title">Shuffle Remote Reads</span>
</span>
</li>
}}
<li>
<span data-toggle="tooltip"
title={ToolTips.RESULT_SERIALIZATION_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}/>
<span class="additional-metric-title">Result Serialization Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.GETTING_RESULT_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.GETTING_RESULT_TIME}/>
<span class="additional-metric-title">Getting Result Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.PEAK_EXECUTION_MEMORY} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}/>
<span class="additional-metric-title">Peak Execution Memory</span>
</span>
</li>
</ul>
</div>
</div>
val dagViz = UIUtils.showDagVizForStage(
stageId, operationGraphListener.getOperationGraphForStage(stageId))
val accumulableHeaders: Seq[String] = Seq("Accumulable", "Value")
def accumulableRow(acc: AccumulableInfo): Seq[Node] = {
(acc.name, acc.value) match {
case (Some(name), Some(value)) => <tr><td>{name}</td><td>{value}</td></tr>
case _ => Seq.empty[Node]
}
}
val accumulableTable = UIUtils.listingTable(
accumulableHeaders,
accumulableRow,
externalAccumulables.toSeq)
val page: Int = {
// If the user has changed to a larger page size, then go to page 1 in order to avoid
// IndexOutOfBoundsException.
if (taskPageSize <= taskPrevPageSize) {
taskPage
} else {
1
}
}
val currentTime = System.currentTimeMillis()
val (taskTable, taskTableHTML) = try {
val _taskTable = new TaskPagedTable(
parent.conf,
UIUtils.prependBaseUri(parent.basePath) +
s"/stages/stage?id=${stageId}&attempt=${stageAttemptId}",
tasks,
hasAccumulators,
stageData.hasInput,
stageData.hasOutput,
stageData.hasShuffleRead,
stageData.hasShuffleWrite,
stageData.hasBytesSpilled,
currentTime,
pageSize = taskPageSize,
sortColumn = taskSortColumn,
desc = taskSortDesc,
executorsListener = executorsListener
)
(_taskTable, _taskTable.table(page))
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
val errorMessage =
<div class="alert alert-error">
<p>Error while rendering stage table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
(null, errorMessage)
}
val jsForScrollingDownToTaskTable =
<script>
{Unparsed {
"""
|$(function() {
| if (/.*&task.sort=.*$/.test(location.search)) {
| var topOffset = $("#tasks-section").offset().top;
| $("html,body").animate({scrollTop: topOffset}, 200);
| }
|});
""".stripMargin
}
}
</script>
val taskIdsInPage = if (taskTable == null) Set.empty[Long]
else taskTable.dataSource.slicedTaskIds
// Excludes tasks which failed and have incomplete metrics
val validTasks = tasks.filter(t => t.taskInfo.status == "SUCCESS" && t.metrics.isDefined)
val summaryTable: Option[Seq[Node]] =
if (validTasks.isEmpty) {
None
}
else {
def getDistributionQuantiles(data: Seq[Double]): IndexedSeq[Double] =
Distribution(data).get.getQuantiles()
def getFormattedTimeQuantiles(times: Seq[Double]): Seq[Node] = {
getDistributionQuantiles(times).map { millis =>
<td>{UIUtils.formatDuration(millis.toLong)}</td>
}
}
def getFormattedSizeQuantiles(data: Seq[Double]): Seq[Elem] = {
getDistributionQuantiles(data).map(d => <td>{Utils.bytesToString(d.toLong)}</td>)
}
val deserializationTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.executorDeserializeTime.toDouble
}
val deserializationQuantiles =
<td>
<span data-toggle="tooltip" title={ToolTips.TASK_DESERIALIZATION_TIME}
data-placement="right">
Task Deserialization Time
</span>
</td> +: getFormattedTimeQuantiles(deserializationTimes)
val serviceTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.executorRunTime.toDouble
}
val serviceQuantiles = <td>Duration</td> +: getFormattedTimeQuantiles(serviceTimes)
val gcTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.jvmGCTime.toDouble
}
val gcQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.GC_TIME} data-placement="right">GC Time
</span>
</td> +: getFormattedTimeQuantiles(gcTimes)
val serializationTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.resultSerializationTime.toDouble
}
val serializationQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.RESULT_SERIALIZATION_TIME} data-placement="right">
Result Serialization Time
</span>
</td> +: getFormattedTimeQuantiles(serializationTimes)
val gettingResultTimes = validTasks.map { taskUIData: TaskUIData =>
getGettingResultTime(taskUIData.taskInfo, currentTime).toDouble
}
val gettingResultQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.GETTING_RESULT_TIME} data-placement="right">
Getting Result Time
</span>
</td> +:
getFormattedTimeQuantiles(gettingResultTimes)
val peakExecutionMemory = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.peakExecutionMemory.toDouble
}
val peakExecutionMemoryQuantiles = {
<td>
<span data-toggle="tooltip"
title={ToolTips.PEAK_EXECUTION_MEMORY} data-placement="right">
Peak Execution Memory
</span>
</td> +: getFormattedSizeQuantiles(peakExecutionMemory)
}
// The scheduler delay includes the network delay to send the task to the worker
// machine and to send back the result (but not the time to fetch the task result,
// if it needed to be fetched from the block manager on the worker).
val schedulerDelays = validTasks.map { taskUIData: TaskUIData =>
getSchedulerDelay(taskUIData.taskInfo, taskUIData.metrics.get, currentTime).toDouble
}
val schedulerDelayTitle = <td><span data-toggle="tooltip"
title={ToolTips.SCHEDULER_DELAY} data-placement="right">Scheduler Delay</span></td>
val schedulerDelayQuantiles = schedulerDelayTitle +:
getFormattedTimeQuantiles(schedulerDelays)
def getFormattedSizeQuantilesWithRecords(data: Seq[Double], records: Seq[Double])
: Seq[Elem] = {
val recordDist = getDistributionQuantiles(records).iterator
getDistributionQuantiles(data).map(d =>
<td>{s"${Utils.bytesToString(d.toLong)} / ${recordDist.next().toLong}"}</td>
)
}
val inputSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.inputMetrics.bytesRead.toDouble
}
val inputRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.inputMetrics.recordsRead.toDouble
}
val inputQuantiles = <td>Input Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(inputSizes, inputRecords)
val outputSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.outputMetrics.bytesWritten.toDouble
}
val outputRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.outputMetrics.recordsWritten.toDouble
}
val outputQuantiles = <td>Output Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(outputSizes, outputRecords)
val shuffleReadBlockedTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.fetchWaitTime.toDouble
}
val shuffleReadBlockedQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_BLOCKED_TIME} data-placement="right">
Shuffle Read Blocked Time
</span>
</td> +:
getFormattedTimeQuantiles(shuffleReadBlockedTimes)
val shuffleReadTotalSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.totalBytesRead.toDouble
}
val shuffleReadTotalRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.recordsRead.toDouble
}
val shuffleReadTotalQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ} data-placement="right">
Shuffle Read Size / Records
</span>
</td> +:
getFormattedSizeQuantilesWithRecords(shuffleReadTotalSizes, shuffleReadTotalRecords)
val shuffleReadRemoteSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.remoteBytesRead.toDouble
}
val shuffleReadRemoteQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_REMOTE_SIZE} data-placement="right">
Shuffle Remote Reads
</span>
</td> +:
getFormattedSizeQuantiles(shuffleReadRemoteSizes)
val shuffleWriteSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleWriteMetrics.bytesWritten.toDouble
}
val shuffleWriteRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleWriteMetrics.recordsWritten.toDouble
}
val shuffleWriteQuantiles = <td>Shuffle Write Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(shuffleWriteSizes, shuffleWriteRecords)
val memoryBytesSpilledSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.memoryBytesSpilled.toDouble
}
val memoryBytesSpilledQuantiles = <td>Shuffle spill (memory)</td> +:
getFormattedSizeQuantiles(memoryBytesSpilledSizes)
val diskBytesSpilledSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.diskBytesSpilled.toDouble
}
val diskBytesSpilledQuantiles = <td>Shuffle spill (disk)</td> +:
getFormattedSizeQuantiles(diskBytesSpilledSizes)
val listings: Seq[Seq[Node]] = Seq(
<tr>{serviceQuantiles}</tr>,
<tr class={TaskDetailsClassNames.SCHEDULER_DELAY}>{schedulerDelayQuantiles}</tr>,
<tr class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}>
{deserializationQuantiles}
</tr>
<tr>{gcQuantiles}</tr>,
<tr class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}>
{serializationQuantiles}
</tr>,
<tr class={TaskDetailsClassNames.GETTING_RESULT_TIME}>{gettingResultQuantiles}</tr>,
<tr class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}>
{peakExecutionMemoryQuantiles}
</tr>,
if (stageData.hasInput) <tr>{inputQuantiles}</tr> else Nil,
if (stageData.hasOutput) <tr>{outputQuantiles}</tr> else Nil,
if (stageData.hasShuffleRead) {
<tr class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}>
{shuffleReadBlockedQuantiles}
</tr>
<tr>{shuffleReadTotalQuantiles}</tr>
<tr class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}>
{shuffleReadRemoteQuantiles}
</tr>
} else {
Nil
},
if (stageData.hasShuffleWrite) <tr>{shuffleWriteQuantiles}</tr> else Nil,
if (stageData.hasBytesSpilled) <tr>{memoryBytesSpilledQuantiles}</tr> else Nil,
if (stageData.hasBytesSpilled) <tr>{diskBytesSpilledQuantiles}</tr> else Nil)
val quantileHeaders = Seq("Metric", "Min", "25th percentile",
"Median", "75th percentile", "Max")
// The summary table does not use CSS to stripe rows, which doesn't work with hidden
// rows (instead, JavaScript in table.js is used to stripe the non-hidden rows).
Some(UIUtils.listingTable(
quantileHeaders,
identity[Seq[Node]],
listings,
fixedWidth = true,
id = Some("task-summary-table"),
stripeRowsWithCss = false))
}
val executorTable = new ExecutorTable(stageId, stageAttemptId, parent)
val maybeAccumulableTable: Seq[Node] =
if (hasAccumulators) { <h4>Accumulators</h4> ++ accumulableTable } else Seq.empty
val aggMetrics =
<span class="collapse-aggregated-metrics collapse-table"
onClick="collapseTable('collapse-aggregated-metrics','aggregated-metrics')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Aggregated Metrics by Executor</a>
</h4>
</span>
<div class="aggregated-metrics collapsible-table">
{executorTable.toNodeSeq}
</div>
val content =
summary ++
dagViz ++
showAdditionalMetrics ++
makeTimeline(
// Only show the tasks in the table
stageData.taskData.values.toSeq.filter(t => taskIdsInPage.contains(t.taskInfo.taskId)),
currentTime) ++
<h4>Summary Metrics for <a href="#tasks-section">{numCompleted} Completed Tasks</a></h4> ++
<div>{summaryTable.getOrElse("No tasks have reported metrics yet.")}</div> ++
aggMetrics ++
maybeAccumulableTable ++
<h4 id="tasks-section">Tasks ({totalTasksNumStr})</h4> ++
taskTableHTML ++ jsForScrollingDownToTaskTable
UIUtils.headerSparkPage(stageHeader, content, parent, showVisualization = true)
}
}
def makeTimeline(tasks: Seq[TaskUIData], currentTime: Long): Seq[Node] = {
val executorsSet = new HashSet[(String, String)]
var minLaunchTime = Long.MaxValue
var maxFinishTime = Long.MinValue
val executorsArrayStr =
tasks.sortBy(-_.taskInfo.launchTime).take(MAX_TIMELINE_TASKS).map { taskUIData =>
val taskInfo = taskUIData.taskInfo
val executorId = taskInfo.executorId
val host = taskInfo.host
executorsSet += ((executorId, host))
val launchTime = taskInfo.launchTime
val finishTime = if (!taskInfo.running) taskInfo.finishTime else currentTime
val totalExecutionTime = finishTime - launchTime
minLaunchTime = launchTime.min(minLaunchTime)
maxFinishTime = finishTime.max(maxFinishTime)
def toProportion(time: Long) = time.toDouble / totalExecutionTime * 100
val metricsOpt = taskUIData.metrics
val shuffleReadTime =
metricsOpt.map(_.shuffleReadMetrics.fetchWaitTime).getOrElse(0L)
val shuffleReadTimeProportion = toProportion(shuffleReadTime)
val shuffleWriteTime =
(metricsOpt.map(_.shuffleWriteMetrics.writeTime).getOrElse(0L) / 1e6).toLong
val shuffleWriteTimeProportion = toProportion(shuffleWriteTime)
val serializationTime = metricsOpt.map(_.resultSerializationTime).getOrElse(0L)
val serializationTimeProportion = toProportion(serializationTime)
val deserializationTime = metricsOpt.map(_.executorDeserializeTime).getOrElse(0L)
val deserializationTimeProportion = toProportion(deserializationTime)
val gettingResultTime = getGettingResultTime(taskUIData.taskInfo, currentTime)
val gettingResultTimeProportion = toProportion(gettingResultTime)
val schedulerDelay =
metricsOpt.map(getSchedulerDelay(taskInfo, _, currentTime)).getOrElse(0L)
val schedulerDelayProportion = toProportion(schedulerDelay)
val executorOverhead = serializationTime + deserializationTime
val executorRunTime = if (taskInfo.running) {
totalExecutionTime - executorOverhead - gettingResultTime
} else {
metricsOpt.map(_.executorRunTime).getOrElse(
totalExecutionTime - executorOverhead - gettingResultTime)
}
val executorComputingTime = executorRunTime - shuffleReadTime - shuffleWriteTime
val executorComputingTimeProportion =
math.max(100 - schedulerDelayProportion - shuffleReadTimeProportion -
shuffleWriteTimeProportion - serializationTimeProportion -
deserializationTimeProportion - gettingResultTimeProportion, 0)
val schedulerDelayProportionPos = 0
val deserializationTimeProportionPos =
schedulerDelayProportionPos + schedulerDelayProportion
val shuffleReadTimeProportionPos =
deserializationTimeProportionPos + deserializationTimeProportion
val executorRuntimeProportionPos =
shuffleReadTimeProportionPos + shuffleReadTimeProportion
val shuffleWriteTimeProportionPos =
executorRuntimeProportionPos + executorComputingTimeProportion
val serializationTimeProportionPos =
shuffleWriteTimeProportionPos + shuffleWriteTimeProportion
val gettingResultTimeProportionPos =
serializationTimeProportionPos + serializationTimeProportion
val index = taskInfo.index
val attempt = taskInfo.attemptNumber
val svgTag =
if (totalExecutionTime == 0) {
// SPARK-8705: Avoid invalid attribute error in JavaScript if execution time is 0
"""<svg class="task-assignment-timeline-duration-bar"></svg>"""
} else {
s"""<svg class="task-assignment-timeline-duration-bar">
|<rect class="scheduler-delay-proportion"
|x="$schedulerDelayProportionPos%" y="0px" height="26px"
|width="$schedulerDelayProportion%"></rect>
|<rect class="deserialization-time-proportion"
|x="$deserializationTimeProportionPos%" y="0px" height="26px"
|width="$deserializationTimeProportion%"></rect>
|<rect class="shuffle-read-time-proportion"
|x="$shuffleReadTimeProportionPos%" y="0px" height="26px"
|width="$shuffleReadTimeProportion%"></rect>
|<rect class="executor-runtime-proportion"
|x="$executorRuntimeProportionPos%" y="0px" height="26px"
|width="$executorComputingTimeProportion%"></rect>
|<rect class="shuffle-write-time-proportion"
|x="$shuffleWriteTimeProportionPos%" y="0px" height="26px"
|width="$shuffleWriteTimeProportion%"></rect>
|<rect class="serialization-time-proportion"
|x="$serializationTimeProportionPos%" y="0px" height="26px"
|width="$serializationTimeProportion%"></rect>
|<rect class="getting-result-time-proportion"
|x="$gettingResultTimeProportionPos%" y="0px" height="26px"
|width="$gettingResultTimeProportion%"></rect></svg>""".stripMargin
}
val timelineObject =
s"""
|{
|'className': 'task task-assignment-timeline-object',
|'group': '$executorId',
|'content': '<div class="task-assignment-timeline-content"
|data-toggle="tooltip" data-placement="top"
|data-html="true" data-container="body"
|data-title="${s"Task " + index + " (attempt " + attempt + ")"}<br>
|Status: ${taskInfo.status}<br>
|Launch Time: ${UIUtils.formatDate(new Date(launchTime))}
|${
if (!taskInfo.running) {
s"""<br>Finish Time: ${UIUtils.formatDate(new Date(finishTime))}"""
} else {
""
}
}
|<br>Scheduler Delay: $schedulerDelay ms
|<br>Task Deserialization Time: ${UIUtils.formatDuration(deserializationTime)}
|<br>Shuffle Read Time: ${UIUtils.formatDuration(shuffleReadTime)}
|<br>Executor Computing Time: ${UIUtils.formatDuration(executorComputingTime)}
|<br>Shuffle Write Time: ${UIUtils.formatDuration(shuffleWriteTime)}
|<br>Result Serialization Time: ${UIUtils.formatDuration(serializationTime)}
|<br>Getting Result Time: ${UIUtils.formatDuration(gettingResultTime)}">
|$svgTag',
|'start': new Date($launchTime),
|'end': new Date($finishTime)
|}
|""".stripMargin.replaceAll("""[\\r\\n]+""", " ")
timelineObject
}.mkString("[", ",", "]")
val groupArrayStr = executorsSet.map {
case (executorId, host) =>
s"""
{
'id': '$executorId',
'content': '$executorId / $host',
}
"""
}.mkString("[", ",", "]")
<span class="expand-task-assignment-timeline">
<span class="expand-task-assignment-timeline-arrow arrow-closed"></span>
<a>Event Timeline</a>
</span> ++
<div id="task-assignment-timeline" class="collapsed">
{
if (MAX_TIMELINE_TASKS < tasks.size) {
<strong>
This stage has more than the maximum number of tasks that can be shown in the
visualization! Only the most recent {MAX_TIMELINE_TASKS} tasks
(of {tasks.size} total) are shown.
</strong>
} else {
Seq.empty
}
}
<div class="control-panel">
<div id="task-assignment-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
{TIMELINE_LEGEND}
</div> ++
<script type="text/javascript">
{Unparsed(s"drawTaskAssignmentTimeline(" +
s"$groupArrayStr, $executorsArrayStr, $minLaunchTime, $maxFinishTime, " +
s"${UIUtils.getTimeZoneOffset()})")}
</script>
}
}
private[ui] object StagePage {
private[ui] def getGettingResultTime(info: TaskInfo, currentTime: Long): Long = {
if (info.gettingResult) {
if (info.finished) {
info.finishTime - info.gettingResultTime
} else {
// The task is still fetching the result.
currentTime - info.gettingResultTime
}
} else {
0L
}
}
private[ui] def getSchedulerDelay(
info: TaskInfo, metrics: TaskMetricsUIData, currentTime: Long): Long = {
if (info.finished) {
val totalExecutionTime = info.finishTime - info.launchTime
val executorOverhead = metrics.executorDeserializeTime +
metrics.resultSerializationTime
math.max(
0,
totalExecutionTime - metrics.executorRunTime - executorOverhead -
getGettingResultTime(info, currentTime))
} else {
// The task is still running and the metrics like executorRunTime are not available.
0L
}
}
}
private[ui] case class TaskTableRowInputData(inputSortable: Long, inputReadable: String)
private[ui] case class TaskTableRowOutputData(outputSortable: Long, outputReadable: String)
private[ui] case class TaskTableRowShuffleReadData(
shuffleReadBlockedTimeSortable: Long,
shuffleReadBlockedTimeReadable: String,
shuffleReadSortable: Long,
shuffleReadReadable: String,
shuffleReadRemoteSortable: Long,
shuffleReadRemoteReadable: String)
private[ui] case class TaskTableRowShuffleWriteData(
writeTimeSortable: Long,
writeTimeReadable: String,
shuffleWriteSortable: Long,
shuffleWriteReadable: String)
private[ui] case class TaskTableRowBytesSpilledData(
memoryBytesSpilledSortable: Long,
memoryBytesSpilledReadable: String,
diskBytesSpilledSortable: Long,
diskBytesSpilledReadable: String)
/**
* Contains all data that needs for sorting and generating HTML. Using this one rather than
* TaskUIData to avoid creating duplicate contents during sorting the data.
*/
private[ui] class TaskTableRowData(
val index: Int,
val taskId: Long,
val attempt: Int,
val speculative: Boolean,
val status: String,
val taskLocality: String,
val executorIdAndHost: String,
val launchTime: Long,
val duration: Long,
val formatDuration: String,
val schedulerDelay: Long,
val taskDeserializationTime: Long,
val gcTime: Long,
val serializationTime: Long,
val gettingResultTime: Long,
val peakExecutionMemoryUsed: Long,
val accumulators: Option[String], // HTML
val input: Option[TaskTableRowInputData],
val output: Option[TaskTableRowOutputData],
val shuffleRead: Option[TaskTableRowShuffleReadData],
val shuffleWrite: Option[TaskTableRowShuffleWriteData],
val bytesSpilled: Option[TaskTableRowBytesSpilledData],
val error: String,
val logs: Map[String, String])
private[ui] class TaskDataSource(
tasks: Seq[TaskUIData],
hasAccumulators: Boolean,
hasInput: Boolean,
hasOutput: Boolean,
hasShuffleRead: Boolean,
hasShuffleWrite: Boolean,
hasBytesSpilled: Boolean,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorsListener: ExecutorsListener) extends PagedDataSource[TaskTableRowData](pageSize) {
import StagePage._
// Convert TaskUIData to TaskTableRowData which contains the final contents to show in the table
// so that we can avoid creating duplicate contents during sorting the data
private val data = tasks.map(taskRow).sorted(ordering(sortColumn, desc))
private var _slicedTaskIds: Set[Long] = _
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[TaskTableRowData] = {
val r = data.slice(from, to)
_slicedTaskIds = r.map(_.taskId).toSet
r
}
def slicedTaskIds: Set[Long] = _slicedTaskIds
private def taskRow(taskData: TaskUIData): TaskTableRowData = {
val info = taskData.taskInfo
val metrics = taskData.metrics
val duration = taskData.taskDuration.getOrElse(1L)
val formatDuration = taskData.taskDuration.map(d => UIUtils.formatDuration(d)).getOrElse("")
val schedulerDelay = metrics.map(getSchedulerDelay(info, _, currentTime)).getOrElse(0L)
val gcTime = metrics.map(_.jvmGCTime).getOrElse(0L)
val taskDeserializationTime = metrics.map(_.executorDeserializeTime).getOrElse(0L)
val serializationTime = metrics.map(_.resultSerializationTime).getOrElse(0L)
val gettingResultTime = getGettingResultTime(info, currentTime)
val externalAccumulableReadable = info.accumulables
.filterNot(_.internal)
.flatMap { a =>
(a.name, a.update) match {
case (Some(name), Some(update)) => Some(StringEscapeUtils.escapeHtml4(s"$name: $update"))
case _ => None
}
}
val peakExecutionMemoryUsed = metrics.map(_.peakExecutionMemory).getOrElse(0L)
val maybeInput = metrics.map(_.inputMetrics)
val inputSortable = maybeInput.map(_.bytesRead).getOrElse(0L)
val inputReadable = maybeInput
.map(m => s"${Utils.bytesToString(m.bytesRead)}")
.getOrElse("")
val inputRecords = maybeInput.map(_.recordsRead.toString).getOrElse("")
val maybeOutput = metrics.map(_.outputMetrics)
val outputSortable = maybeOutput.map(_.bytesWritten).getOrElse(0L)
val outputReadable = maybeOutput
.map(m => s"${Utils.bytesToString(m.bytesWritten)}")
.getOrElse("")
val outputRecords = maybeOutput.map(_.recordsWritten.toString).getOrElse("")
val maybeShuffleRead = metrics.map(_.shuffleReadMetrics)
val shuffleReadBlockedTimeSortable = maybeShuffleRead.map(_.fetchWaitTime).getOrElse(0L)
val shuffleReadBlockedTimeReadable =
maybeShuffleRead.map(ms => UIUtils.formatDuration(ms.fetchWaitTime)).getOrElse("")
val totalShuffleBytes = maybeShuffleRead.map(_.totalBytesRead)
val shuffleReadSortable = totalShuffleBytes.getOrElse(0L)
val shuffleReadReadable = totalShuffleBytes.map(Utils.bytesToString).getOrElse("")
val shuffleReadRecords = maybeShuffleRead.map(_.recordsRead.toString).getOrElse("")
val remoteShuffleBytes = maybeShuffleRead.map(_.remoteBytesRead)
val shuffleReadRemoteSortable = remoteShuffleBytes.getOrElse(0L)
val shuffleReadRemoteReadable = remoteShuffleBytes.map(Utils.bytesToString).getOrElse("")
val maybeShuffleWrite = metrics.map(_.shuffleWriteMetrics)
val shuffleWriteSortable = maybeShuffleWrite.map(_.bytesWritten).getOrElse(0L)
val shuffleWriteReadable = maybeShuffleWrite
.map(m => s"${Utils.bytesToString(m.bytesWritten)}").getOrElse("")
val shuffleWriteRecords = maybeShuffleWrite
.map(_.recordsWritten.toString).getOrElse("")
val maybeWriteTime = metrics.map(_.shuffleWriteMetrics.writeTime)
val writeTimeSortable = maybeWriteTime.getOrElse(0L)
val writeTimeReadable = maybeWriteTime.map(t => t / (1000 * 1000)).map { ms =>
if (ms == 0) "" else UIUtils.formatDuration(ms)
}.getOrElse("")
val maybeMemoryBytesSpilled = metrics.map(_.memoryBytesSpilled)
val memoryBytesSpilledSortable = maybeMemoryBytesSpilled.getOrElse(0L)
val memoryBytesSpilledReadable =
maybeMemoryBytesSpilled.map(Utils.bytesToString).getOrElse("")
val maybeDiskBytesSpilled = metrics.map(_.diskBytesSpilled)
val diskBytesSpilledSortable = maybeDiskBytesSpilled.getOrElse(0L)
val diskBytesSpilledReadable = maybeDiskBytesSpilled.map(Utils.bytesToString).getOrElse("")
val input =
if (hasInput) {
Some(TaskTableRowInputData(inputSortable, s"$inputReadable / $inputRecords"))
} else {
None
}
val output =
if (hasOutput) {
Some(TaskTableRowOutputData(outputSortable, s"$outputReadable / $outputRecords"))
} else {
None
}
val shuffleRead =
if (hasShuffleRead) {
Some(TaskTableRowShuffleReadData(
shuffleReadBlockedTimeSortable,
shuffleReadBlockedTimeReadable,
shuffleReadSortable,
s"$shuffleReadReadable / $shuffleReadRecords",
shuffleReadRemoteSortable,
shuffleReadRemoteReadable
))
} else {
None
}
val shuffleWrite =
if (hasShuffleWrite) {
Some(TaskTableRowShuffleWriteData(
writeTimeSortable,
writeTimeReadable,
shuffleWriteSortable,
s"$shuffleWriteReadable / $shuffleWriteRecords"
))
} else {
None
}
val bytesSpilled =
if (hasBytesSpilled) {
Some(TaskTableRowBytesSpilledData(
memoryBytesSpilledSortable,
memoryBytesSpilledReadable,
diskBytesSpilledSortable,
diskBytesSpilledReadable
))
} else {
None
}
val logs = executorsListener.executorToTaskSummary.get(info.executorId)
.map(_.executorLogs).getOrElse(Map.empty)
new TaskTableRowData(
info.index,
info.taskId,
info.attemptNumber,
info.speculative,
info.status,
info.taskLocality.toString,
s"${info.executorId} / ${info.host}",
info.launchTime,
duration,
formatDuration,
schedulerDelay,
taskDeserializationTime,
gcTime,
serializationTime,
gettingResultTime,
peakExecutionMemoryUsed,
if (hasAccumulators) Some(externalAccumulableReadable.mkString("<br/>")) else None,
input,
output,
shuffleRead,
shuffleWrite,
bytesSpilled,
taskData.errorMessage.getOrElse(""),
logs)
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[TaskTableRowData] = {
val ordering: Ordering[TaskTableRowData] = sortColumn match {
case "Index" => Ordering.by(_.index)
case "ID" => Ordering.by(_.taskId)
case "Attempt" => Ordering.by(_.attempt)
case "Status" => Ordering.by(_.status)
case "Locality Level" => Ordering.by(_.taskLocality)
case "Executor ID / Host" => Ordering.by(_.executorIdAndHost)
case "Launch Time" => Ordering.by(_.launchTime)
case "Duration" => Ordering.by(_.duration)
case "Scheduler Delay" => Ordering.by(_.schedulerDelay)
case "Task Deserialization Time" => Ordering.by(_.taskDeserializationTime)
case "GC Time" => Ordering.by(_.gcTime)
case "Result Serialization Time" => Ordering.by(_.serializationTime)
case "Getting Result Time" => Ordering.by(_.gettingResultTime)
case "Peak Execution Memory" => Ordering.by(_.peakExecutionMemoryUsed)
case "Accumulators" =>
if (hasAccumulators) {
Ordering.by(_.accumulators.get)
} else {
throw new IllegalArgumentException(
"Cannot sort by Accumulators because of no accumulators")
}
case "Input Size / Records" =>
if (hasInput) {
Ordering.by(_.input.get.inputSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Input Size / Records because of no inputs")
}
case "Output Size / Records" =>
if (hasOutput) {
Ordering.by(_.output.get.outputSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Output Size / Records because of no outputs")
}
// ShuffleRead
case "Shuffle Read Blocked Time" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadBlockedTimeSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Read Blocked Time because of no shuffle reads")
}
case "Shuffle Read Size / Records" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Read Size / Records because of no shuffle reads")
}
case "Shuffle Remote Reads" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadRemoteSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Remote Reads because of no shuffle reads")
}
// ShuffleWrite
case "Write Time" =>
if (hasShuffleWrite) {
Ordering.by(_.shuffleWrite.get.writeTimeSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Write Time because of no shuffle writes")
}
case "Shuffle Write Size / Records" =>
if (hasShuffleWrite) {
Ordering.by(_.shuffleWrite.get.shuffleWriteSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Write Size / Records because of no shuffle writes")
}
// BytesSpilled
case "Shuffle Spill (Memory)" =>
if (hasBytesSpilled) {
Ordering.by(_.bytesSpilled.get.memoryBytesSpilledSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Spill (Memory) because of no spills")
}
case "Shuffle Spill (Disk)" =>
if (hasBytesSpilled) {
Ordering.by(_.bytesSpilled.get.diskBytesSpilledSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Spill (Disk) because of no spills")
}
case "Errors" => Ordering.by(_.error)
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
private[ui] class TaskPagedTable(
conf: SparkConf,
basePath: String,
data: Seq[TaskUIData],
hasAccumulators: Boolean,
hasInput: Boolean,
hasOutput: Boolean,
hasShuffleRead: Boolean,
hasShuffleWrite: Boolean,
hasBytesSpilled: Boolean,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorsListener: ExecutorsListener) extends PagedTable[TaskTableRowData] {
override def tableId: String = "task-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped table-head-clickable"
override def pageSizeFormField: String = "task.pageSize"
override def prevPageSizeFormField: String = "task.prevPageSize"
override def pageNumberFormField: String = "task.page"
override val dataSource: TaskDataSource = new TaskDataSource(
data,
hasAccumulators,
hasInput,
hasOutput,
hasShuffleRead,
hasShuffleWrite,
hasBytesSpilled,
currentTime,
pageSize,
sortColumn,
desc,
executorsListener)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
basePath +
s"&$pageNumberFormField=$page" +
s"&task.sort=$encodedSortColumn" +
s"&task.desc=$desc" +
s"&$pageSizeFormField=$pageSize"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$basePath&task.sort=$encodedSortColumn&task.desc=$desc"
}
def headers: Seq[Node] = {
val taskHeadersAndCssClasses: Seq[(String, String)] =
Seq(
("Index", ""), ("ID", ""), ("Attempt", ""), ("Status", ""), ("Locality Level", ""),
("Executor ID / Host", ""), ("Launch Time", ""), ("Duration", ""),
("Scheduler Delay", TaskDetailsClassNames.SCHEDULER_DELAY),
("Task Deserialization Time", TaskDetailsClassNames.TASK_DESERIALIZATION_TIME),
("GC Time", ""),
("Result Serialization Time", TaskDetailsClassNames.RESULT_SERIALIZATION_TIME),
("Getting Result Time", TaskDetailsClassNames.GETTING_RESULT_TIME),
("Peak Execution Memory", TaskDetailsClassNames.PEAK_EXECUTION_MEMORY)) ++
{if (hasAccumulators) Seq(("Accumulators", "")) else Nil} ++
{if (hasInput) Seq(("Input Size / Records", "")) else Nil} ++
{if (hasOutput) Seq(("Output Size / Records", "")) else Nil} ++
{if (hasShuffleRead) {
Seq(("Shuffle Read Blocked Time", TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME),
("Shuffle Read Size / Records", ""),
("Shuffle Remote Reads", TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE))
} else {
Nil
}} ++
{if (hasShuffleWrite) {
Seq(("Write Time", ""), ("Shuffle Write Size / Records", ""))
} else {
Nil
}} ++
{if (hasBytesSpilled) {
Seq(("Shuffle Spill (Memory)", ""), ("Shuffle Spill (Disk)", ""))
} else {
Nil
}} ++
Seq(("Errors", ""))
if (!taskHeadersAndCssClasses.map(_._1).contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
taskHeadersAndCssClasses.map { case (header, cssClass) =>
if (header == sortColumn) {
val headerLink = Unparsed(
basePath +
s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&task.desc=${!desc}" +
s"&task.pageSize=$pageSize")
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th class={cssClass}>
<a href={headerLink}>
{header}
<span> {Unparsed(arrow)}</span>
</a>
</th>
} else {
val headerLink = Unparsed(
basePath +
s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&task.pageSize=$pageSize")
<th class={cssClass}>
<a href={headerLink}>
{header}
</a>
</th>
}
}
}
<thead>{headerRow}</thead>
}
def row(task: TaskTableRowData): Seq[Node] = {
<tr>
<td>{task.index}</td>
<td>{task.taskId}</td>
<td>{if (task.speculative) s"${task.attempt} (speculative)" else task.attempt.toString}</td>
<td>{task.status}</td>
<td>{task.taskLocality}</td>
<td>
<div style="float: left">{task.executorIdAndHost}</div>
<div style="float: right">
{
task.logs.map {
case (logName, logUrl) => <div><a href={logUrl}>{logName}</a></div>
}
}
</div>
</td>
<td>{UIUtils.formatDate(new Date(task.launchTime))}</td>
<td>{task.formatDuration}</td>
<td class={TaskDetailsClassNames.SCHEDULER_DELAY}>
{UIUtils.formatDuration(task.schedulerDelay)}
</td>
<td class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}>
{UIUtils.formatDuration(task.taskDeserializationTime)}
</td>
<td>
{if (task.gcTime > 0) UIUtils.formatDuration(task.gcTime) else ""}
</td>
<td class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}>
{UIUtils.formatDuration(task.serializationTime)}
</td>
<td class={TaskDetailsClassNames.GETTING_RESULT_TIME}>
{UIUtils.formatDuration(task.gettingResultTime)}
</td>
<td class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}>
{Utils.bytesToString(task.peakExecutionMemoryUsed)}
</td>
{if (task.accumulators.nonEmpty) {
<td>{Unparsed(task.accumulators.get)}</td>
}}
{if (task.input.nonEmpty) {
<td>{task.input.get.inputReadable}</td>
}}
{if (task.output.nonEmpty) {
<td>{task.output.get.outputReadable}</td>
}}
{if (task.shuffleRead.nonEmpty) {
<td class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}>
{task.shuffleRead.get.shuffleReadBlockedTimeReadable}
</td>
<td>{task.shuffleRead.get.shuffleReadReadable}</td>
<td class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}>
{task.shuffleRead.get.shuffleReadRemoteReadable}
</td>
}}
{if (task.shuffleWrite.nonEmpty) {
<td>{task.shuffleWrite.get.writeTimeReadable}</td>
<td>{task.shuffleWrite.get.shuffleWriteReadable}</td>
}}
{if (task.bytesSpilled.nonEmpty) {
<td>{task.bytesSpilled.get.memoryBytesSpilledReadable}</td>
<td>{task.bytesSpilled.get.diskBytesSpilledReadable}</td>
}}
{errorMessageCell(task.error)}
</tr>
}
private def errorMessageCell(error: String): Seq[Node] = {
val isMultiline = error.indexOf('\\n') >= 0
// Display the first line by default
val errorSummary = StringEscapeUtils.escapeHtml4(
if (isMultiline) {
error.substring(0, error.indexOf('\\n'))
} else {
error
})
val details = if (isMultiline) {
// scalastyle:off
<span onclick="this.parentNode.querySelector('.stacktrace-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span> ++
<div class="stacktrace-details collapsed">
<pre>{error}</pre>
</div>
// scalastyle:on
} else {
""
}
<td>{errorSummary}{details}</td>
}
}
|
mike0sv/spark
|
core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
|
Scala
|
apache-2.0
| 55,150
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed.cascading_backend
import cascading.pipe.joiner.{Joiner => CJoiner, JoinerClosure}
import cascading.tuple.{Tuple => CTuple}
import com.twitter.scalding.serialization.Externalizer
import com.twitter.scalding.typed.MultiJoinFunction
import scala.collection.JavaConverters._
/**
* Only intended to be use to implement the hashCogroup on TypedPipe/Grouped
*/
class HashJoiner[K, V, W, R](
rightHasSingleValue: Boolean,
rightGetter: MultiJoinFunction[K, W],
joiner: (K, V, Iterable[W]) => Iterator[R]
) extends CJoiner {
private[this] val joinEx = Externalizer(joiner)
override def getIterator(jc: JoinerClosure) = {
// The left one cannot be iterated multiple times on Hadoop:
val leftIt = jc.getIterator(0).asScala // should only be 0 or 1 here
if (leftIt.isEmpty) {
(Iterator.empty: Iterator[CTuple]).asJava // java is not covariant so we need this
} else {
// In this branch there must be at least one item on the left in a hash-join
val left = leftIt.buffered
val key = left.head.getObject(0).asInstanceOf[K]
// It is safe to iterate over the right side again and again
val rightIterable =
if (rightHasSingleValue) {
// Materialize this once for all left values
rightGetter(key, jc.getIterator(1).asScala.map(_.getObject(1): Any), Nil).toList
} else {
// TODO: it might still be good to count how many there are and materialize
// in memory without reducing again
new Iterable[W] {
def iterator = rightGetter(key, jc.getIterator(1).asScala.map(_.getObject(1): Any), Nil)
}
}
left.flatMap { kv =>
val leftV = kv.getObject(1).asInstanceOf[V] // get just the Vs
joinEx
.get(key, leftV, rightIterable)
.map { rval =>
// There always has to be four resulting fields
// or otherwise the flow planner will throw
val res = CTuple.size(4)
res.set(0, key)
res.set(1, rval)
res
}
}.asJava
}
}
override val numJoins = 1
}
|
twitter/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/typed/cascading_backend/HashJoiner.scala
|
Scala
|
apache-2.0
| 2,716
|
package gapt.formats.dimacs
import gapt.expr._
import gapt.expr.formula.Atom
import gapt.models.PropositionalModel
import gapt.proofs.rup.RupProof
import gapt.proofs.{ Clause, HOLClause }
import scala.collection.mutable
object DIMACS {
type Atom = Int
type Literal = Int
type Clause = Seq[Literal]
type CNF = Seq[Clause]
type Model = Seq[Literal]
def maxAtom( cnf: CNF ) = {
val atoms = cnf.flatten.map( math.abs )
if ( atoms.nonEmpty ) atoms.max else 0
}
}
class DIMACSEncoding {
private val atomMap = mutable.Map[Atom, DIMACS.Atom]()
private val reverseAtomMap = mutable.Map[DIMACS.Atom, Atom]()
private var atomIndex = 1
def encodeAtom( atom: Atom ): DIMACS.Atom =
atomMap.getOrElse( atom, {
val idx = atomIndex
atomIndex += 1
atomMap( atom ) = idx
reverseAtomMap( idx ) = atom
idx
} )
def encodeClause( clause: HOLClause ): DIMACS.Clause =
clause.map( encodeAtom ).map( -_, +_ ).elements
def encodeCNF( cnf: Iterable[HOLClause] ): DIMACS.CNF =
cnf.map( encodeClause ).toSeq
def decodeAtom( i: DIMACS.Atom ) = reverseAtomMap( i )
def decodeAtomOption( i: DIMACS.Atom ) = reverseAtomMap.get( i )
def decodeClause( clause: DIMACS.Clause ) =
Clause( clause.filter( _ < 0 ), clause.filter( _ > 0 ) ).map( l => decodeAtom( math.abs( l ) ) )
def decodeModel( model: DIMACS.Model ) =
PropositionalModel( model.flatMap {
case l if l > 0 => decodeAtomOption( l ) map { _ -> true }
case l if l < 0 => decodeAtomOption( -l ) map { _ -> false }
} )
override def toString = s"DIMACSEncoding(${atomMap.map( x => s"${x._1} -> ${x._2}" ).mkString( ", " )})"
}
object readDIMACS {
private val whitespace = """\\s""".r
def apply( dimacsOutput: String ): DIMACS.Model =
whitespace
.split( dimacsOutput.trim )
.diff( Seq( "SAT", "s", "SATISFIABLE", "v", "0", "" ) )
.map { _.toInt }
.toIndexedSeq
}
object writeDIMACS {
def apply( cnf: DIMACS.CNF ): String = {
val dimacsInput = new StringBuilder
dimacsInput ++= s"p cnf ${DIMACS maxAtom cnf} ${cnf size}\\n"
cnf foreach { clause =>
dimacsInput ++= s"${clause mkString " "} 0\\n"
}
dimacsInput.result()
}
}
object readDRUP {
def apply( cnf: DIMACS.CNF, drupOutput: String ): RupProof =
RupProof( cnf.map( RupProof.Input( _ ) ) ++ apply( drupOutput ) )
def apply( drupOutput: String ): Seq[RupProof.Line] =
drupOutput.trim.split( "\\n" ).toSeq flatMap {
case line if line startsWith "s " => None
case line if line startsWith "%RUPD" => None
case "" => None
case "UNSAT" => None
case "f DRUP" => None
case "o proof DRUP" => None
case line if line.startsWith( "d " ) =>
Some( RupProof.Delete( line.substring( 2 ).split( " " ).toSeq.map( _.toInt ).dropRight( 1 ) ) )
case line =>
Some( RupProof.Rup( line.split( " " ).map( _.toInt ).toSeq.dropRight( 1 ) ) )
}
}
object writeWDIMACS {
def apply( wcnf: Seq[( DIMACS.Clause, Int )], threshold: Int ): String = {
val dimacsInput = new StringBuilder
dimacsInput ++= s"p wcnf ${DIMACS maxAtom wcnf.map( _._1 )} ${wcnf size} $threshold\\n"
wcnf foreach {
case ( clause, weight ) =>
dimacsInput ++= s"$weight ${clause mkString " "} 0\\n"
}
dimacsInput.result()
}
def apply( hard: DIMACS.CNF, soft: Seq[( DIMACS.Clause, Int )] ): String = {
val threshold = soft.map( _._2 ).sum + 1
writeWDIMACS( hard.map( _ -> threshold ) ++ soft, threshold )
}
}
object readWDIMACS {
def apply( dimacsOutput: String ): Option[DIMACS.Model] = {
val lines = dimacsOutput.split( "\\n" )
if ( lines exists { _ startsWith "o " } ) {
Some( lines
.filter { _ startsWith "v " }
.map { _ substring 2 trim }
.flatMap[String] { _.split( " " ) }
.map { _ replace ( "x", "" ) } // toysat :-(
.filter { _ nonEmpty }
.map { _ toInt }
.filterNot { _ == 0 }
.toIndexedSeq )
} else {
None
}
}
}
|
gapt/gapt
|
core/src/main/scala/gapt/formats/dimacs/dimacs.scala
|
Scala
|
gpl-3.0
| 4,147
|
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.struct
import _root_.scalapb.internal.compat.JavaConverters._
/** `ListValue` is a wrapper around a repeated field of values.
*
* The JSON representation for `ListValue` is JSON array.
*
* @param values
* Repeated field of dynamically typed values.
*/
@SerialVersionUID(0L)
final case class ListValue(
values: _root_.scala.Seq[com.google.protobuf.struct.Value] = _root_.scala.Seq.empty
) extends scalapb.GeneratedMessage with scalapb.Message[ListValue] with scalapb.lenses.Updatable[ListValue] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
values.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
values.foreach { __v =>
val __m = __v
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.struct.ListValue = {
val __values = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.struct.Value] ++= this.values)
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__values += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.struct.Value.defaultInstance)
case tag => _input__.skipField(tag)
}
}
com.google.protobuf.struct.ListValue(
values = __values.result()
)
}
def clearValues = copy(values = _root_.scala.Seq.empty)
def addValues(__vs: com.google.protobuf.struct.Value*): ListValue = addAllValues(__vs)
def addAllValues(__vs: Iterable[com.google.protobuf.struct.Value]): ListValue = copy(values = values ++ __vs)
def withValues(__v: _root_.scala.Seq[com.google.protobuf.struct.Value]): ListValue = copy(values = __v)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => values
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PRepeated(values.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.struct.ListValue
}
object ListValue extends scalapb.GeneratedMessageCompanion[com.google.protobuf.struct.ListValue] with scalapb.JavaProtoSupport[com.google.protobuf.struct.ListValue, com.google.protobuf.ListValue] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.struct.ListValue] with scalapb.JavaProtoSupport[com.google.protobuf.struct.ListValue, com.google.protobuf.ListValue] = this
def toJavaProto(scalaPbSource: com.google.protobuf.struct.ListValue): com.google.protobuf.ListValue = {
val javaPbOut = com.google.protobuf.ListValue.newBuilder
javaPbOut.addAllValues(scalaPbSource.values.iterator.map(com.google.protobuf.struct.Value.toJavaProto).toIterable.asJava)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.ListValue): com.google.protobuf.struct.ListValue = com.google.protobuf.struct.ListValue(
values = javaPbSource.getValuesList.asScala.iterator.map(com.google.protobuf.struct.Value.fromJavaProto).toSeq
)
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.struct.ListValue = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.struct.ListValue(
__fieldsMap.getOrElse(__fields.get(0), Nil).asInstanceOf[_root_.scala.Seq[com.google.protobuf.struct.Value]]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.struct.ListValue] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.struct.ListValue(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Seq[com.google.protobuf.struct.Value]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = StructProto.javaDescriptor.getMessageTypes.get(2)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = StructProto.scalaDescriptor.messages(2)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 1 => __out = com.google.protobuf.struct.Value
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.struct.ListValue(
)
implicit class ListValueLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.struct.ListValue]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.struct.ListValue](_l) {
def values: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.struct.Value]] = field(_.values)((c_, f_) => c_.copy(values = f_))
}
final val VALUES_FIELD_NUMBER = 1
def of(
values: _root_.scala.Seq[com.google.protobuf.struct.Value]
): _root_.com.google.protobuf.struct.ListValue = _root_.com.google.protobuf.struct.ListValue(
values
)
}
|
dotty-staging/ScalaPB
|
scalapb-runtime/jvm/src/main/scala/com/google/protobuf/struct/ListValue.scala
|
Scala
|
apache-2.0
| 6,992
|
package com.ruimo.scoins
case class Percent(value: Double) extends AnyVal with Ordered[Percent] {
def of(that: Double) = value * that / 100
def +(that: Percent) = Percent(value + that.value)
def -(that: Percent) = Percent(value - that.value)
override def compare(that: Percent): Int =
if (value < that.value) -1
else if (value > that.value) 1
else 0
}
object Percent {
import scala.language.implicitConversions
implicit def toPercent(d: Double): Percent = Percent(d)
implicit def toPercent(d: java.lang.Double): Percent = Percent(d)
implicit def fromPercent(p: Percent): Double = p.value
}
|
ruimo/scoins
|
src/main/scala/com/ruimo/scoins/Percent.scala
|
Scala
|
apache-2.0
| 623
|
package com.alzindiq.cluster
import com.alzindiq.Plumber
import Plumber
import scala.collection.mutable
object PenaltyCalculator {
def calculatePenalty(bucket : String, plumbers : Set[Plumber], neighbouSimSumMap : Map[Plumber, Double], precalcCohe : Double, similarities : Map[Plumber,Map[Plumber,Double]]) : Double = {
precalcCohe + correlation(bucket, plumbers, neighbouSimSumMap, Math.abs(1-precalcCohe))
}
def calculatePenalty(bucket : String, plumbers : Set[Plumber], neighbouSimSumMap : Map[Plumber, Double], similarities : Map[Plumber,Map[Plumber,Double]]) : Double = {
var cohe = cohesion(bucket, plumbers, similarities)
val corr= correlation(bucket, plumbers, neighbouSimSumMap, Math.abs(1-cohe))
cohe + corr
}
def cohesion(bucket : String, plumbers : Set[Plumber], similarities : Map[Plumber,Map[Plumber,Double]]) = {
if(plumbers.size <= 1) {
0d
}else {
var cohe = 0d
val list = plumbers.toList
val range = 0 to list.size-1
for(i <- range; j <- range
if j>i){
val simM = similarities.getOrElse(list(i),Map.empty) // splitting it boosts performance
cohe = cohe + 2 * (1 - simM.getOrElse(list(j), 0.0))
}
cohe
}
}
private def correlation(bucket : String, plumbers : Set[Plumber], neighbouSimSumMap : Map[Plumber, Double], simToSelf : Double) : Double = plumbers.map(neighbouSimSumMap.getOrElse(_,0d)).sum - simToSelf
}
|
alzindiq/plumb
|
src/main/scala/com/alzindiq/cluster/PenaltyCalculator.scala
|
Scala
|
apache-2.0
| 1,447
|
/*
* Copyright (c) 2011-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
/**
* Type class supporting type safe cast.
*
* @author Miles Sabin
*/
trait Typeable[T] extends Serializable {
def cast(t: Any): Option[T]
def describe: String
override def toString = s"Typeable[$describe]"
}
trait LowPriorityTypeable {
implicit def dfltTypeable[T]: Typeable[T] = macro TypeableMacros.dfltTypeableImpl[T]
}
/**
* Provides instances of `Typeable`. Also provides an implicit conversion which enhances arbitrary values with a
* `cast[T]` method.
*/
object Typeable extends TupleTypeableInstances with LowPriorityTypeable {
import java.{ lang => jl }
import scala.collection.{ GenMap, GenTraversable }
import scala.reflect.ClassTag
import syntax.typeable._
def apply[T](implicit castT: Typeable[T]) = castT
case class ValueTypeable[T, B](cB: Class[B], describe: String) extends Typeable[T] {
def cast(t: Any): Option[T] = {
if(t != null && cB.isInstance(t)) Some(t.asInstanceOf[T]) else None
}
}
/** Typeable instance for `Byte`. */
implicit val byteTypeable: Typeable[Byte] = ValueTypeable[Byte, jl.Byte](classOf[jl.Byte], "Byte")
/** Typeable instance for `Short`. */
implicit val shortTypeable: Typeable[Short] = ValueTypeable[Short, jl.Short](classOf[jl.Short], "Short")
/** Typeable instance for `Char`. */
implicit val charTypeable: Typeable[Char] = ValueTypeable[Char, jl.Character](classOf[jl.Character], "Char")
/** Typeable instance for `Int`. */
implicit val intTypeable: Typeable[Int] = ValueTypeable[Int, jl.Integer](classOf[jl.Integer], "Int")
/** Typeable instance for `Long`. */
implicit val longTypeable: Typeable[Long] = ValueTypeable[Long, jl.Long](classOf[jl.Long], "Long")
/** Typeable instance for `Float`. */
implicit val floatTypeable: Typeable[Float] = ValueTypeable[Float, jl.Float](classOf[jl.Float], "Float")
/** Typeable instance for `Double`. */
implicit val doubleTypeable: Typeable[Double] = ValueTypeable[Double, jl.Double](classOf[jl.Double], "Double")
/** Typeable instance for `Boolean`. */
implicit val booleanTypeable: Typeable[Boolean] = ValueTypeable[Boolean, jl.Boolean](classOf[jl.Boolean], "Boolean")
/** Typeable instance for `Unit`. */
implicit val unitTypeable: Typeable[Unit] = ValueTypeable[Unit, runtime.BoxedUnit](classOf[runtime.BoxedUnit], "Unit")
def isValClass[T](clazz: Class[T]) =
(classOf[jl.Number] isAssignableFrom clazz) ||
clazz == classOf[jl.Boolean] ||
clazz == classOf[jl.Character] ||
clazz == classOf[runtime.BoxedUnit]
/** Typeable instance for `Any`. */
implicit val anyTypeable: Typeable[Any] =
new Typeable[Any] {
def cast(t: Any): Option[Any] = Some(t)
def describe = "Any"
}
/** Typeable instance for `AnyVal`. */
implicit val anyValTypeable: Typeable[AnyVal] =
new Typeable[AnyVal] {
def cast(t: Any): Option[AnyVal] = {
if(t != null && isValClass(t.getClass)) Some(t.asInstanceOf[AnyVal]) else None
}
def describe = "AnyVal"
}
/** Typeable instance for `AnyRef`. */
implicit val anyRefTypeable: Typeable[AnyRef] =
new Typeable[AnyRef] {
def cast(t: Any): Option[AnyRef] = {
if(t == null || isValClass(t.getClass)) None else Some(t.asInstanceOf[AnyRef])
}
def describe = "AnyRef"
}
/** Typeable instance for simple monomorphic types */
def simpleTypeable[T](erased: Class[T]): Typeable[T] =
new Typeable[T] {
def cast(t: Any): Option[T] = {
if(t != null && erased.isAssignableFrom(t.getClass)) Some(t.asInstanceOf[T]) else None
}
def describe = {
// Workaround for https://issues.scala-lang.org/browse/SI-5425
try {
erased.getSimpleName
} catch {
case _: InternalError =>
erased.getName
}
}
}
/** Typeable instance for singleton value types */
def valueSingletonTypeable[T](value: T, name: String): Typeable[T] =
new Typeable[T] {
def cast(t: Any): Option[T] =
if(t == value) Some(value) else None
def describe = s"$name($value)"
}
/** Typeable instance for singleton reference types */
def referenceSingletonTypeable[T <: AnyRef](value: T, name: String): Typeable[T] =
new Typeable[T] {
def cast(t: Any): Option[T] =
if(t.asInstanceOf[AnyRef] eq value) Some(value) else None
def describe = s"$name.type"
}
/** Typeable instance for intersection types with typeable parents */
def intersectionTypeable[T](parents: Array[Typeable[_]]): Typeable[T] =
new Typeable[T] {
def cast(t: Any): Option[T] = {
if(t != null && parents.forall(_.cast(t).isDefined)) Some(t.asInstanceOf[T]) else None
}
def describe = parents map(_.describe) mkString " with "
}
/** Typeable instance for `Option`. */
implicit def optionTypeable[T](implicit castT: Typeable[T]): Typeable[Option[T]] =
new Typeable[Option[T]]{
def cast(t: Any): Option[Option[T]] = {
if(t == null) None
else if(t.isInstanceOf[Option[_]]) {
val o = t.asInstanceOf[Option[_]]
if(o.isEmpty) Some(t.asInstanceOf[Option[T]])
else for(e <- o; _ <- e.cast[T]) yield t.asInstanceOf[Option[T]]
} else None
}
def describe = s"Option[${castT.describe}]"
}
/** Typeable instance for `Either`. */
implicit def eitherTypeable[A, B]
(implicit castA: Typeable[A], castB: Typeable[B]): Typeable[Either[A, B]] =
new Typeable[Either[A, B]] {
def cast(t: Any): Option[Either[A, B]] = {
t.cast[Left[A, B]] orElse t.cast[Right[A, B]]
}
def describe = s"Either[${castA.describe}, ${castB.describe}]"
}
/** Typeable instance for `Left`. */
implicit def leftTypeable[A, B](implicit castA: Typeable[A]): Typeable[Left[A, B]] =
new Typeable[Left[A, B]] {
def cast(t: Any): Option[Left[A, B]] = {
if(t == null) None
else if(t.isInstanceOf[Left[_, _]]) {
val l = t.asInstanceOf[Left[_, _]]
for(a <- l.a.cast[A]) yield t.asInstanceOf[Left[A, B]]
} else None
}
def describe = s"Left[${castA.describe}]"
}
/** Typeable instance for `Right`. */
implicit def rightTypeable[A, B](implicit castB: Typeable[B]): Typeable[Right[A, B]] =
new Typeable[Right[A, B]] {
def cast(t: Any): Option[Right[A, B]] = {
if(t == null) None
else if(t.isInstanceOf[Right[_, _]]) {
val r = t.asInstanceOf[Right[_, _]]
for(b <- r.b.cast[B]) yield t.asInstanceOf[Right[A, B]]
} else None
}
def describe = s"Right[${castB.describe}]"
}
/** Typeable instance for `GenTraversable`.
* Note that the contents be will tested for conformance to the element type. */
implicit def genTraversableTypeable[CC[X] <: GenTraversable[X], T]
(implicit mCC: ClassTag[CC[_]], castT: Typeable[T]): Typeable[CC[T] with GenTraversable[T]] =
// Nb. the apparently redundant `with GenTraversable[T]` is a workaround for a
// Scala 2.10.x bug which causes conflicts between this instance and `anyTypeable`.
new Typeable[CC[T]] {
def cast(t: Any): Option[CC[T]] =
if(t == null) None
else if(mCC.runtimeClass isAssignableFrom t.getClass) {
val cc = t.asInstanceOf[CC[Any]]
if(cc.forall(_.cast[T].isDefined)) Some(t.asInstanceOf[CC[T]])
else None
} else None
def describe = s"${mCC.runtimeClass.getSimpleName}[${castT.describe}]"
}
/** Typeable instance for `Map`. Note that the contents will be tested for conformance to the key/value types. */
implicit def genMapTypeable[M[X, Y], K, V]
(implicit ev: M[K, V] <:< GenMap[K, V], mM: ClassTag[M[_, _]], castK: Typeable[K], castV: Typeable[V]): Typeable[M[K, V]] =
new Typeable[M[K, V]] {
def cast(t: Any): Option[M[K, V]] =
if(t == null) None
else if(mM.runtimeClass isAssignableFrom t.getClass) {
val m = t.asInstanceOf[GenMap[Any, Any]]
if(m.forall(_.cast[(K, V)].isDefined)) Some(t.asInstanceOf[M[K, V]])
else None
} else None
def describe = s"${mM.runtimeClass.getSimpleName}[${castK.describe}, ${castV.describe}]"
}
/** Typeable instance for polymorphic case classes with typeable elements */
def caseClassTypeable[T](erased: Class[T], fields: Array[Typeable[_]]): Typeable[T] =
new Typeable[T] {
def cast(t: Any): Option[T] =
if(classOf[Product].isAssignableFrom(erased) && erased.isAssignableFrom(t.getClass)) {
val c = t.asInstanceOf[Product with T]
val f = c.productIterator.toList
if((f zip fields).forall { case (f, castF) => castF.cast(f).isDefined }) Some(c)
else None
} else None
def describe = {
val typeParams = fields map(_.describe) mkString(",")
// Workaround for https://issues.scala-lang.org/browse/SI-5425
val name = try {
erased.getSimpleName
} catch {
case _: InternalError =>
erased.getName
}
s"$name[$typeParams]"
}
}
/** Typeable instance for `HNil`. */
implicit val hnilTypeable: Typeable[HNil] =
new Typeable[HNil] {
def cast(t: Any): Option[HNil] = if(t != null && t.isInstanceOf[HNil]) Some(t.asInstanceOf[HNil]) else None
def describe = "HNil"
}
/** Typeable instance for `HList`s. Note that the contents will be tested for conformance to the element types. */
implicit def hlistTypeable[H, T <: HList](implicit castH: Typeable[H], castT: Typeable[T]): Typeable[H :: T] =
new Typeable[H :: T] {
def cast(t: Any): Option[H :: T] = {
if(t == null) None
else if(t.isInstanceOf[::[_, _ <: HList]]) {
val l = t.asInstanceOf[::[_, _ <: HList]]
for(hd <- l.head.cast[H]; tl <- (l.tail: Any).cast[T]) yield t.asInstanceOf[H :: T]
} else None
}
def describe = s"${castH.describe} :: ${castT.describe}"
}
/** Typeable instance for `CNil`. */
implicit val cnilTypeable: Typeable[CNil] =
new Typeable[CNil] {
def cast(t: Any): Option[CNil] = None
def describe = "CNil"
}
/**
* Typeable instance for `Coproduct`s.
* Note that the contents will be tested for conformance to one of the element types.
*/
implicit def coproductTypeable[H, T <: Coproduct]
(implicit castH: Typeable[H], castT: Typeable[T]): Typeable[H :+: T] =
new Typeable[H :+: T] {
def cast(t: Any): Option[H :+: T] = {
t.cast[Inl[H, T]] orElse t.cast[Inr[H, T]]
}
def describe = s"${castH.describe} :+: ${castT.describe}"
}
/** Typeable instance for `Inl`. */
implicit def inlTypeable[H, T <: Coproduct](implicit castH: Typeable[H]): Typeable[Inl[H, T]] =
new Typeable[Inl[H, T]] {
def cast(t: Any): Option[Inl[H, T]] = {
if(t == null) None
else if(t.isInstanceOf[Inl[_, _ <: Coproduct]]) {
val l = t.asInstanceOf[Inl[_, _ <: Coproduct]]
for(hd <- l.head.cast[H]) yield t.asInstanceOf[Inl[H, T]]
} else None
}
def describe = s"Inl[${castH.describe}}]"
}
/** Typeable instance for `Inr`. */
implicit def inrTypeable[H, T <: Coproduct](implicit castT: Typeable[T]): Typeable[Inr[H, T]] =
new Typeable[Inr[H, T]] {
def cast(t: Any): Option[Inr[H, T]] = {
if(t == null) None
else if(t.isInstanceOf[Inr[_, _ <: Coproduct]]) {
val r = t.asInstanceOf[Inr[_, _ <: Coproduct]]
for(tl <- r.tail.cast[T]) yield t.asInstanceOf[Inr[H, T]]
} else None
}
def describe = s"Inr[${castT.describe}}]"
}
}
/**
* Extractor for use of `Typeable` in pattern matching.
*
* Thanks to Stacy Curl for the idea.
*
* @author Miles Sabin
*/
trait TypeCase[T] extends Serializable {
def unapply(t: Any): Option[T]
}
object TypeCase {
import syntax.typeable._
def apply[T](implicit tt:Typeable[T]): TypeCase[T] = new TypeCase[T] {
def unapply(t: Any): Option[T] = t.cast[T]
override def toString = s"TypeCase[${tt.describe}]"
}
}
@macrocompat.bundle
class TypeableMacros(val c: blackbox.Context) extends SingletonTypeUtils {
import c.universe._
import internal._
import definitions.NothingClass
def dfltTypeableImpl[T: WeakTypeTag]: Tree = {
val tpe = weakTypeOf[T]
val typeableTpe = typeOf[Typeable[_]].typeConstructor
val genericTpe = typeOf[Generic[_]].typeConstructor
val dealiased = tpe.dealias
dealiased match {
case t: TypeRef if t.sym == NothingClass =>
c.abort(c.enclosingPosition, "No Typeable for Nothing")
case ExistentialType(_, underlying) =>
val tArgs = dealiased.typeArgs
val normalized = appliedType(dealiased.typeConstructor, tArgs)
val normalizedTypeable = c.inferImplicitValue(appliedType(typeableTpe, List(normalized)))
if(normalizedTypeable == EmptyTree)
c.abort(c.enclosingPosition, s"No default Typeable for parametrized type $tpe")
normalizedTypeable
case SingletonSymbolType(c) =>
val sym = mkSingletonSymbol(c)
val name = sym.symbol.name.toString
q"""_root_.shapeless.Typeable.referenceSingletonTypeable[$tpe]($sym, $name)"""
case RefinedType(parents, decls) =>
if(decls.nonEmpty)
c.abort(c.enclosingPosition, "No Typeable for a refinement with non-empty decls")
val parentTypeables = parents.filterNot(_ =:= typeOf[AnyRef]).map { parent =>
c.inferImplicitValue(appliedType(typeableTpe, List(parent)))
}
if(parentTypeables.exists(_ == EmptyTree))
c.abort(c.enclosingPosition, "Missing Typeable for parent of a refinement")
q"""
_root_.shapeless.Typeable.intersectionTypeable(
_root_.scala.Array[_root_.shapeless.Typeable[_]](..$parentTypeables)
)
"""
case pTpe if pTpe.typeArgs.nonEmpty =>
val pSym = {
val sym = pTpe.typeSymbol
if (!sym.isClass)
c.abort(c.enclosingPosition, s"No default Typeable for parametrized type $tpe")
val pSym0 = sym.asClass
pSym0.typeSignature // Workaround for <https://issues.scala-lang.org/browse/SI-7755>
pSym0
}
if(!pSym.isCaseClass)
c.abort(c.enclosingPosition, s"No default Typeable for parametrized type $tpe")
val fields = tpe.decls.toList collect {
case sym: TermSymbol if sym.isVal && sym.isCaseAccessor => sym.typeSignatureIn(tpe)
}
val fieldTypeables = fields.map { field => c.inferImplicitValue(appliedType(typeableTpe, List(field))) }
if(fieldTypeables.exists(_ == EmptyTree))
c.abort(c.enclosingPosition, "Missing Typeable for field of a case class")
q"""
_root_.shapeless.Typeable.caseClassTypeable(
classOf[$tpe], _root_.scala.Array[_root_.shapeless.Typeable[_]](..$fieldTypeables)
)
"""
case SingleType(_, v) if !v.isParameter =>
val name = v.name.toString
q"""_root_.shapeless.Typeable.referenceSingletonTypeable[$tpe]($v.asInstanceOf[$tpe], $name)"""
case ConstantType(c) =>
val name = c.tpe.typeSymbol.name.toString
q"""_root_.shapeless.Typeable.valueSingletonTypeable[$tpe]($c.asInstanceOf[$tpe], $name)"""
case other =>
q"""_root_.shapeless.Typeable.simpleTypeable(classOf[$tpe])"""
}
}
}
|
liff/shapeless
|
core/src/main/scala/shapeless/typeable.scala
|
Scala
|
apache-2.0
| 16,162
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter
import java.time.{ZoneOffset, ZonedDateTime}
import java.util.{Date, Locale}
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.DataUtilities
import org.locationtech.geomesa.filter.Bounds.Bound
import org.locationtech.geomesa.filter.expression.AttributeExpression.{FunctionLiteral, PropertyLiteral}
import org.locationtech.geomesa.filter.visitor.IdDetectingFilterVisitor
import org.locationtech.geomesa.utils.date.DateUtils.toInstant
import org.locationtech.geomesa.utils.geotools.GeometryUtils
import org.locationtech.geomesa.utils.geotools.converters.FastConverter
import org.locationtech.jts.geom._
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter._
import org.opengis.filter.expression.{Expression, PropertyName}
import org.opengis.filter.spatial._
import org.opengis.filter.temporal.{After, Before, During, TEquals}
import org.opengis.temporal.Period
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
object FilterHelper {
import org.locationtech.geomesa.utils.geotools.WholeWorldPolygon
// helper shim to let other classes avoid importing FilterHelper.logger
object FilterHelperLogger extends LazyLogging {
private [FilterHelper] def log = logger
}
val ff: FilterFactory2 = org.locationtech.geomesa.filter.ff
def isFilterWholeWorld(f: Filter): Boolean = f match {
case op: BBOX => isOperationGeomWholeWorld(op)
case op: Intersects => isOperationGeomWholeWorld(op)
case op: Overlaps => isOperationGeomWholeWorld(op)
case op: Within => isOperationGeomWholeWorld(op, SpatialOpOrder.PropertyFirst)
case op: Contains => isOperationGeomWholeWorld(op, SpatialOpOrder.LiteralFirst)
case _ => false
}
private def isOperationGeomWholeWorld[Op <: BinarySpatialOperator]
(op: Op, order: SpatialOpOrder.SpatialOpOrder = SpatialOpOrder.AnyOrder): Boolean = {
val prop = checkOrder(op.getExpression1, op.getExpression2)
// validate that property and literal are in the specified order
prop.exists { p =>
val ordered = order match {
case SpatialOpOrder.AnyOrder => true
case SpatialOpOrder.PropertyFirst => !p.flipped
case SpatialOpOrder.LiteralFirst => p.flipped
}
ordered && Option(FastConverter.evaluate(p.literal, classOf[Geometry])).exists(isWholeWorld)
}
}
def isWholeWorld[G <: Geometry](g: G): Boolean = g != null && g.union.covers(WholeWorldPolygon)
/**
* Returns the intersection of this geometry with the world polygon
*
* Note: may return the geometry itself if it is already covered by the world
*
* @param g geometry
* @return
*/
def trimToWorld(g: Geometry): Geometry =
if (WholeWorldPolygon.covers(g)) { g } else { g.intersection(WholeWorldPolygon) }
/**
* Add way points to a geometry, preventing it from being split by JTS AM handling
*
* @param g geom
* @return
*/
def addWayPointsToBBOX(g: Geometry): Geometry = {
val geomArray = g.getCoordinates
val correctedGeom = GeometryUtils.addWayPoints(geomArray).toArray
if (geomArray.length == correctedGeom.length) { g } else { g.getFactory.createPolygon(correctedGeom) }
}
/**
* Extracts geometries from a filter into a sequence of OR'd geometries
*
* @param filter filter to evaluate
* @param attribute attribute to consider
* @param intersect intersect AND'd geometries or return them all
* note if not intersected, 'and/or' distinction will be lost
* @return geometry bounds from spatial filters
*/
def extractGeometries(filter: Filter, attribute: String, intersect: Boolean = true): FilterValues[Geometry] =
extractUnclippedGeometries(filter, attribute, intersect).map(trimToWorld)
/**
* Extract geometries from a filter without validating boundaries.
*
* @param filter filter to evaluate
* @param attribute attribute to consider
* @param intersect intersect AND'd geometries or return them all
* @return geometry bounds from spatial filters
*/
private def extractUnclippedGeometries(filter: Filter, attribute: String, intersect: Boolean): FilterValues[Geometry] = {
filter match {
case o: Or =>
val all = o.getChildren.map(extractUnclippedGeometries(_, attribute, intersect))
val join = FilterValues.or[Geometry]((l, r) => l ++ r) _
all.reduceLeftOption[FilterValues[Geometry]](join).getOrElse(FilterValues.empty)
case a: And =>
val all = a.getChildren.map(extractUnclippedGeometries(_, attribute, intersect)).filter(_.nonEmpty)
if (intersect) {
val intersect = FilterValues.and[Geometry]((l, r) => Option(l.intersection(r)).filterNot(_.isEmpty)) _
all.reduceLeftOption[FilterValues[Geometry]](intersect).getOrElse(FilterValues.empty)
} else {
FilterValues(all.flatMap(_.values))
}
// Note: although not technically required, all known spatial predicates are also binary spatial operators
case f: BinarySpatialOperator if isSpatialFilter(f) =>
FilterValues(GeometryProcessing.extract(f, attribute))
case _ =>
FilterValues.empty
}
}
/**
* Extracts intervals from a filter. Intervals will be merged where possible - the resulting sequence
* is considered to be a union (i.e. OR)
*
* @param filter filter to evaluate
* @param attribute attribute to consider
* @param intersect intersect extracted values together, or return them all
* note if not intersected, 'and/or' distinction will be lost
* @return a sequence of intervals, if any. disjoint intervals will result in Seq((null, null))
*/
def extractIntervals(filter: Filter,
attribute: String,
intersect: Boolean = true,
handleExclusiveBounds: Boolean = false): FilterValues[Bounds[ZonedDateTime]] = {
extractAttributeBounds(filter, attribute, classOf[Date]).map { bounds =>
var lower, upper: Bound[ZonedDateTime] = null
if (!handleExclusiveBounds || bounds.lower.value.isEmpty || bounds.upper.value.isEmpty ||
(bounds.lower.inclusive && bounds.upper.inclusive)) {
lower = createDateTime(bounds.lower, roundSecondsUp, handleExclusiveBounds)
upper = createDateTime(bounds.upper, roundSecondsDown, handleExclusiveBounds)
} else {
// check for extremely narrow filters where our rounding makes the result out-of-order
// note: both upper and lower are known to be defined based on hitting this else branch
val margin = if (bounds.lower.inclusive || bounds.upper.inclusive) { 1000 } else { 2000 }
val round = bounds.upper.value.get.getTime - bounds.lower.value.get.getTime > margin
lower = createDateTime(bounds.lower, roundSecondsUp, round)
upper = createDateTime(bounds.upper, roundSecondsDown, round)
}
Bounds(lower, upper)
}
}
private def createDateTime(bound: Bound[Date],
round: ZonedDateTime => ZonedDateTime,
roundExclusive: Boolean): Bound[ZonedDateTime] = {
if (bound.value.isEmpty) { Bound.unbounded } else {
val dt = bound.value.map(d => ZonedDateTime.ofInstant(toInstant(d), ZoneOffset.UTC))
if (roundExclusive && !bound.inclusive) {
Bound(dt.map(round), inclusive = true)
} else {
Bound(dt, bound.inclusive)
}
}
}
private def roundSecondsUp(dt: ZonedDateTime): ZonedDateTime = dt.plusSeconds(1).withNano(0)
private def roundSecondsDown(dt: ZonedDateTime): ZonedDateTime = {
val nanos = dt.getNano
if (nanos == 0) { dt.minusSeconds(1) } else { dt.withNano(0) }
}
/**
* Extracts bounds from filters that pertain to a given attribute. Bounds will be merged where
* possible.
*
* @param filter filter to evaluate
* @param attribute attribute name to consider
* @param binding attribute type
* @return a sequence of bounds, if any
*/
def extractAttributeBounds[T](filter: Filter, attribute: String, binding: Class[T]): FilterValues[Bounds[T]] = {
filter match {
case o: Or =>
val union = FilterValues.or[Bounds[T]](Bounds.union[T]) _
o.getChildren.map(f =>
extractAttributeBounds(f, attribute, binding)
).reduceLeft[FilterValues[Bounds[T]]]((acc, child) => {
if (acc.isEmpty || child.isEmpty) {
FilterValues.empty
} else {
union(acc, child)
}
})
case a: And =>
val all = a.getChildren.flatMap { f =>
val child = extractAttributeBounds(f, attribute, binding)
if (child.isEmpty) { Seq.empty } else { Seq(child) }
}
val intersection = FilterValues.and[Bounds[T]](Bounds.intersection[T]) _
all.reduceLeftOption[FilterValues[Bounds[T]]](intersection).getOrElse(FilterValues.empty)
case f: PropertyIsEqualTo =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = true)
FilterValues(Seq(Bounds(bound, bound)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = true, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsBetween =>
try {
val prop = f.getExpression.asInstanceOf[PropertyName].getPropertyName
if (prop != attribute) { FilterValues.empty } else {
// note that between is inclusive
val lower = Bound(Option(FastConverter.evaluate(f.getLowerBoundary, binding)), inclusive = true)
val upper = Bound(Option(FastConverter.evaluate(f.getUpperBoundary, binding)), inclusive = true)
FilterValues(Seq(Bounds(lower, upper)))
}
} catch {
case e: Exception =>
FilterHelperLogger.log.warn(s"Unable to extract bounds from filter '${filterToString(f)}'", e)
FilterValues.empty
}
case f: During if classOf[Date].isAssignableFrom(binding) =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, classOf[Period])).map { p =>
// note that during is exclusive
val lower = Bound(Option(p.getBeginning.getPosition.getDate.asInstanceOf[T]), inclusive = false)
val upper = Bound(Option(p.getEnding.getPosition.getDate.asInstanceOf[T]), inclusive = false)
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = false, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsGreaterThan =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = false)
val (lower, upper) = if (e.flipped) { (Bound.unbounded[T], bound) } else { (bound, Bound.unbounded[T]) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = false, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsGreaterThanOrEqualTo =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = true)
val (lower, upper) = if (e.flipped) { (Bound.unbounded[T], bound) } else { (bound, Bound.unbounded[T]) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = true, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsLessThan =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = false)
val (lower, upper) = if (e.flipped) { (bound, Bound.unbounded[T]) } else { (Bound.unbounded[T], bound) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = false, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsLessThanOrEqualTo =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = true)
val (lower, upper) = if (e.flipped) { (bound, Bound.unbounded[T]) } else { (Bound.unbounded[T], bound) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = true, binding)
}.getOrElse(FilterValues.empty)
case f: Before =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
// note that before is exclusive
val bound = Bound(Some(lit), inclusive = false)
val (lower, upper) = if (e.flipped) { (bound, Bound.unbounded[T]) } else { (Bound.unbounded[T], bound) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = false, binding)
}.getOrElse(FilterValues.empty)
case f: After =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap {
case e: PropertyLiteral =>
Option(FastConverter.evaluate(e.literal, binding)).map { lit =>
// note that after is exclusive
val bound = Bound(Some(lit), inclusive = false)
val (lower, upper) = if (e.flipped) { (Bound.unbounded[T], bound) } else { (bound, Bound.unbounded[T]) }
FilterValues(Seq(Bounds(lower, upper)))
}
case e: FunctionLiteral => extractFunctionBounds(e, inclusive = false, binding)
}.getOrElse(FilterValues.empty)
case f: PropertyIsLike if binding == classOf[String] =>
try {
val prop = f.getExpression.asInstanceOf[PropertyName].getPropertyName
if (prop != attribute) { FilterValues.empty } else {
// find the first wildcard and create a range prefix
val literal = f.getLiteral
var i = literal.indexWhere(Wildcards.contains)
// check for escaped wildcards
while (i > 1 && literal.charAt(i - 1) == '\\\\' && literal.charAt(i - 2) == '\\\\') {
i = literal.indexWhere(Wildcards.contains, i + 1)
}
if (i == -1) {
val literals = if (f.isMatchingCase) { Seq(literal) } else { casePermutations(literal) }
val bounds = literals.map { lit =>
val bound = Bound(Some(lit), inclusive = true)
Bounds(bound, bound)
}
FilterValues(bounds.asInstanceOf[Seq[Bounds[T]]], precise = true)
} else {
val prefix = literal.substring(0, i)
val prefixes = if (f.isMatchingCase) { Seq(prefix) } else { casePermutations(prefix) }
val bounds = prefixes.map { p =>
Bounds(Bound(Some(p), inclusive = true), Bound(Some(p + WildcardSuffix), inclusive = true))
}
// our ranges fully capture the filter if there's a single trailing multi-char wildcard
val exact = i == literal.length - 1 && literal.charAt(i) == WildcardMultiChar
FilterValues(bounds.asInstanceOf[Seq[Bounds[T]]], precise = exact)
}
}
} catch {
case e: Exception =>
FilterHelperLogger.log.warn(s"Unable to extract bounds from filter '${filterToString(f)}'", e)
FilterValues.empty
}
case f: Not if f.getFilter.isInstanceOf[PropertyIsNull] =>
try {
val isNull = f.getFilter.asInstanceOf[PropertyIsNull]
val prop = isNull.getExpression.asInstanceOf[PropertyName].getPropertyName
if (prop != attribute) { FilterValues.empty } else {
FilterValues(Seq(Bounds.everything[T]))
}
} catch {
case e: Exception =>
FilterHelperLogger.log.warn(s"Unable to extract bounds from filter '${filterToString(f)}'", e)
FilterValues.empty
}
case f: Not =>
// we extract the sub-filter bounds, then invert them
val inverted = extractAttributeBounds(f.getFilter, attribute, binding)
if (inverted.isEmpty) {
inverted
} else if (inverted.disjoint) {
FilterValues(Seq(Bounds.everything[T])) // equivalent to not null
} else if (!inverted.precise) {
FilterHelperLogger.log.warn(s"Falling back to full table scan for inverted query: '${filterToString(f)}'")
FilterValues(Seq(Bounds.everything[T]), precise = false)
} else {
// NOT(A OR B) turns into NOT(A) AND NOT(B)
val uninverted = inverted.values.map { bounds =>
// NOT the single bound
val not = bounds.bounds match {
case (None, None) => Seq.empty
case (Some(lo), None) => Seq(Bounds(Bound.unbounded, Bound(Some(lo), !bounds.lower.inclusive)))
case (None, Some(hi)) => Seq(Bounds(Bound(Some(hi), !bounds.upper.inclusive), Bound.unbounded))
case (Some(lo), Some(hi)) => Seq(
Bounds(Bound.unbounded, Bound(Some(lo), !bounds.lower.inclusive)),
Bounds(Bound(Some(hi), !bounds.upper.inclusive), Bound.unbounded)
)
}
FilterValues(not)
}
// AND together
val intersect = FilterValues.and[Bounds[T]](Bounds.intersection[T]) _
uninverted.reduceLeft[FilterValues[Bounds[T]]](intersect)
}
case f: TEquals =>
checkOrder(f.getExpression1, f.getExpression2).filter(_.name == attribute).flatMap { prop =>
Option(FastConverter.evaluate(prop.literal, binding)).map { lit =>
val bound = Bound(Some(lit), inclusive = true)
FilterValues(Seq(Bounds(bound, bound)))
}
}.getOrElse(FilterValues.empty)
case _ => FilterValues.empty
}
}
private def extractFunctionBounds[T](function: FunctionLiteral,
inclusive: Boolean,
binding: Class[T]): Option[FilterValues[Bounds[T]]] = {
// TODO GEOMESA-1990 extract some meaningful bounds from the function
Some(FilterValues(Seq(Bounds.everything[T]), precise = false))
}
/**
* Calculates all the different case permutations of a string.
*
* For example, "foo" -> Seq("foo", "Foo", "fOo", "foO", "fOO", "FoO", "FOo", "FOO")
*
* @param string input string
* @return
*/
private def casePermutations(string: String): Seq[String] = {
val max = FilterProperties.CaseInsensitiveLimit.toInt.getOrElse {
// has a valid default value so should never return a none
throw new IllegalStateException(
s"Error getting default value for ${FilterProperties.CaseInsensitiveLimit.property}")
}
val lower = string.toLowerCase(Locale.US)
val upper = string.toUpperCase(Locale.US)
// account for chars without upper/lower cases, which we don't need to permute
val count = (0 until lower.length).count(i => lower(i) != upper(i))
if (count > max) {
FilterHelperLogger.log.warn(s"Not expanding case-insensitive prefix due to length: $string")
Seq.empty
} else {
// there will be 2^n different permutations, accounting for chars that don't have an upper/lower case
val permutations = Array.fill(math.pow(2, count).toInt)(Array(lower: _*))
var i = 0 // track the index of the current char
var c = 0 // track the index of the bit check, which skips chars that don't have an upper/lower case
while (i < string.length) {
val upperChar = upper.charAt(i)
if (lower.charAt(i) != upperChar) {
var j = 0
while (j < permutations.length) {
// set upper/lower based on the bit
if (((j >> c) & 1) != 0) {
permutations(j)(i) = upperChar
}
j += 1
}
c += 1
}
i += 1
}
permutations.map(new String(_))
}
}
/**
* Extract property names from a filter. If a schema is available,
* prefer `propertyNames(Filter, SimpleFeatureType)` as that will handle
* things like default geometry bboxes
*
* @param filter filter
* @return unique property names referenced in the filter, in sorted order
*/
def propertyNames(filter: Filter): Seq[String] = propertyNames(filter, null)
/**
* Extract property names from a filter
*
* @param filter filter
* @param sft simple feature type
* @return unique property names referenced in the filter, in sorted order
*/
def propertyNames(filter: Filter, sft: SimpleFeatureType): Seq[String] =
DataUtilities.attributeNames(filter, sft).toSeq.distinct.sorted
def propertyNames(expression: Expression, sft: SimpleFeatureType): Seq[String] =
DataUtilities.attributeNames(expression, sft).toSeq.distinct.sorted
def hasIdFilter(filter: Filter): Boolean =
filter.accept(new IdDetectingFilterVisitor, false).asInstanceOf[Boolean]
def filterListAsAnd(filters: Seq[Filter]): Option[Filter] = andOption(filters)
def filterListAsOr(filters: Seq[Filter]): Option[Filter] = orOption(filters)
/**
* Simplifies filters to make them easier to process.
*
* Current simplifications:
*
* 1) Extracts out common parts in an OR clause to simplify further processing.
*
* Example: OR(AND(1, 2), AND(1, 3), AND(1, 4)) -> AND(1, OR(2, 3, 4))
*
* 2) N/A - add more simplifications here as needed
*
* @param filter filter
* @return
*/
def simplify(filter: Filter): Filter = {
def deduplicateOrs(f: Filter): Filter = f match {
case and: And => ff.and(and.getChildren.map(deduplicateOrs))
case or: Or =>
// OR(AND(1,2,3), AND(1,2,4)) -> Seq(Seq(1,2,3), Seq(1,2,4))
val decomposed = or.getChildren.map(decomposeAnd)
val clauses = decomposed.head // Seq(1,2,3)
val duplicates = clauses.filter(c => decomposed.tail.forall(_.contains(c))) // Seq(1,2)
if (duplicates.isEmpty) { or } else {
val simplified = decomposed.flatMap(d => andOption(d.filterNot(duplicates.contains)))
if (simplified.length < decomposed.length) {
// the duplicated filters are an entire clause, so we can ignore the rest of the clauses
andFilters(duplicates)
} else {
andFilters(orOption(simplified).toSeq ++ duplicates)
}
}
case _ => f
}
// TODO GEOMESA-1533 simplify ANDs of ORs for DNF
flatten(deduplicateOrs(flatten(filter)))
}
/**
* Flattens nested ands and ors.
*
* Example: AND(1, AND(2, 3)) -> AND(1, 2, 3)
*
* @param filter filter
* @return
*/
def flatten(filter: Filter): Filter = {
filter match {
case and: And => ff.and(flattenAnd(and.getChildren))
case or: Or => ff.or(flattenOr(or.getChildren))
case f: Filter => f
}
}
private [filter] def flattenAnd(filters: Seq[Filter]): ListBuffer[Filter] = {
val remaining = ListBuffer.empty[Filter] ++ filters
val result = ListBuffer.empty[Filter]
do {
remaining.remove(0) match {
case f: And => remaining.appendAll(f.getChildren)
case f => result.append(flatten(f))
}
} while (remaining.nonEmpty)
result
}
private [filter] def flattenOr(filters: Seq[Filter]): ListBuffer[Filter] = {
val remaining = ListBuffer.empty[Filter] ++ filters
val result = ListBuffer.empty[Filter]
do {
remaining.remove(0) match {
case f: Or => remaining.appendAll(f.getChildren)
case f => result.append(flatten(f))
}
} while (remaining.nonEmpty)
result
}
private object SpatialOpOrder extends Enumeration {
type SpatialOpOrder = Value
val PropertyFirst, LiteralFirst, AnyOrder = Value
}
}
|
locationtech/geomesa
|
geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/FilterHelper.scala
|
Scala
|
apache-2.0
| 25,609
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool
import akka.actor.{Actor, ActorRef, ActorRefFactory, Props}
import org.apache.openwhisk.common.{Logging, LoggingMarkers, MetricEmitter, TransactionId}
import org.apache.openwhisk.core.connector.MessageFeed
import org.apache.openwhisk.core.entity.ExecManifest.ReactivePrewarmingConfig
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.size._
import scala.annotation.tailrec
import scala.collection.immutable
import scala.concurrent.duration._
import scala.util.{Random, Try}
case class ColdStartKey(kind: String, memory: ByteSize)
case object EmitMetrics
case object AdjustPrewarmedContainer
/**
* A pool managing containers to run actions on.
*
* This pool fulfills the other half of the ContainerProxy contract. Only
* one job (either Start or Run) is sent to a child-actor at any given
* time. The pool then waits for a response of that container, indicating
* the container is done with the job. Only then will the pool send another
* request to that container.
*
* Upon actor creation, the pool will start to prewarm containers according
* to the provided prewarmConfig, iff set. Those containers will **not** be
* part of the poolsize calculation, which is capped by the poolSize parameter.
* Prewarm containers are only used, if they have matching arguments
* (kind, memory) and there is space in the pool.
*
* @param childFactory method to create new container proxy actor
* @param feed actor to request more work from
* @param prewarmConfig optional settings for container prewarming
* @param poolConfig config for the ContainerPool
*/
class ContainerPool(childFactory: ActorRefFactory => ActorRef,
feed: ActorRef,
prewarmConfig: List[PrewarmingConfig] = List.empty,
poolConfig: ContainerPoolConfig)(implicit val logging: Logging)
extends Actor {
import ContainerPool.memoryConsumptionOf
implicit val ec = context.dispatcher
var freePool = immutable.Map.empty[ActorRef, ContainerData]
var busyPool = immutable.Map.empty[ActorRef, ContainerData]
var prewarmedPool = immutable.Map.empty[ActorRef, PreWarmedData]
var prewarmStartingPool = immutable.Map.empty[ActorRef, (String, ByteSize)]
// If all memory slots are occupied and if there is currently no container to be removed, than the actions will be
// buffered here to keep order of computation.
// Otherwise actions with small memory-limits could block actions with large memory limits.
var runBuffer = immutable.Queue.empty[Run]
// Track the resent buffer head - so that we don't resend buffer head multiple times
var resent: Option[Run] = None
val logMessageInterval = 10.seconds
//periodically emit metrics (don't need to do this for each message!)
context.system.scheduler.schedule(30.seconds, 10.seconds, self, EmitMetrics)
// Key is ColdStartKey, value is the number of cold Start in minute
var coldStartCount = immutable.Map.empty[ColdStartKey, Int]
adjustPrewarmedContainer(true, false)
// check periodically, adjust prewarmed container(delete if unused for some time and create some increment containers)
// add some random amount to this schedule to avoid a herd of container removal + creation
val interval = poolConfig.prewarmExpirationCheckInterval + poolConfig.prewarmExpirationCheckIntervalVariance
.map(v =>
Random
.nextInt(v.toSeconds.toInt))
.getOrElse(0)
.seconds
context.system.scheduler.schedule(2.seconds, interval, self, AdjustPrewarmedContainer)
def logContainerStart(r: Run, containerState: String, activeActivations: Int, container: Option[Container]): Unit = {
val namespaceName = r.msg.user.namespace.name.asString
val actionName = r.action.name.name
val actionNamespace = r.action.namespace.namespace
val maxConcurrent = r.action.limits.concurrency.maxConcurrent
val activationId = r.msg.activationId.toString
r.msg.transid.mark(
this,
LoggingMarkers.INVOKER_CONTAINER_START(containerState, namespaceName, actionNamespace, actionName),
s"containerStart containerState: $containerState container: $container activations: $activeActivations of max $maxConcurrent action: $actionName namespace: $namespaceName activationId: $activationId",
akka.event.Logging.InfoLevel)
}
def receive: Receive = {
// A job to run on a container
//
// Run messages are received either via the feed or from child containers which cannot process
// their requests and send them back to the pool for rescheduling (this may happen if "docker" operations
// fail for example, or a container has aged and was destroying itself when a new request was assigned)
case r: Run =>
// Check if the message is resent from the buffer. Only the first message on the buffer can be resent.
val isResentFromBuffer = runBuffer.nonEmpty && runBuffer.dequeueOption.exists(_._1.msg == r.msg)
// Only process request, if there are no other requests waiting for free slots, or if the current request is the
// next request to process
// It is guaranteed, that only the first message on the buffer is resent.
if (runBuffer.isEmpty || isResentFromBuffer) {
if (isResentFromBuffer) {
//remove from resent tracking - it may get resent again, or get processed
resent = None
}
val kind = r.action.exec.kind
val memory = r.action.limits.memory.megabytes.MB
val createdContainer =
// Schedule a job to a warm container
ContainerPool
.schedule(r.action, r.msg.user.namespace.name, freePool)
.map(container => (container, container._2.initingState)) //warmed, warming, and warmingCold always know their state
.orElse(
// There was no warm/warming/warmingCold container. Try to take a prewarm container or a cold container.
// When take prewarm container, has no need to judge whether user memory is enough
takePrewarmContainer(r.action)
.map(container => (container, "prewarmed"))
.orElse {
// Is there enough space to create a new container or do other containers have to be removed?
if (hasPoolSpaceFor(busyPool ++ freePool ++ prewarmedPool, prewarmStartingPool, memory)) {
val container = Some(createContainer(memory), "cold")
incrementColdStartCount(kind, memory)
container
} else None
})
.orElse(
// Remove a container and create a new one for the given job
ContainerPool
// Only free up the amount, that is really needed to free up
.remove(freePool, Math.min(r.action.limits.memory.megabytes, memoryConsumptionOf(freePool)).MB)
.map(removeContainer)
// If the list had at least one entry, enough containers were removed to start the new container. After
// removing the containers, we are not interested anymore in the containers that have been removed.
.headOption
.map(_ =>
takePrewarmContainer(r.action)
.map(container => (container, "recreatedPrewarm"))
.getOrElse {
val container = (createContainer(memory), "recreated")
incrementColdStartCount(kind, memory)
container
}))
createdContainer match {
case Some(((actor, data), containerState)) =>
//increment active count before storing in pool map
val newData = data.nextRun(r)
val container = newData.getContainer
if (newData.activeActivationCount < 1) {
logging.error(this, s"invalid activation count < 1 ${newData}")
}
//only move to busyPool if max reached
if (!newData.hasCapacity()) {
if (r.action.limits.concurrency.maxConcurrent > 1) {
logging.info(
this,
s"container ${container} is now busy with ${newData.activeActivationCount} activations")
}
busyPool = busyPool + (actor -> newData)
freePool = freePool - actor
} else {
//update freePool to track counts
freePool = freePool + (actor -> newData)
}
// Remove the action that was just executed from the buffer and execute the next one in the queue.
if (isResentFromBuffer) {
// It is guaranteed that the currently executed messages is the head of the queue, if the message comes
// from the buffer
val (_, newBuffer) = runBuffer.dequeue
runBuffer = newBuffer
// Try to process the next item in buffer (or get another message from feed, if buffer is now empty)
processBufferOrFeed()
}
actor ! r // forwards the run request to the container
logContainerStart(r, containerState, newData.activeActivationCount, container)
case None =>
// this can also happen if createContainer fails to start a new container, or
// if a job is rescheduled but the container it was allocated to has not yet destroyed itself
// (and a new container would over commit the pool)
val isErrorLogged = r.retryLogDeadline.map(_.isOverdue).getOrElse(true)
val retryLogDeadline = if (isErrorLogged) {
logging.warn(
this,
s"Rescheduling Run message, too many message in the pool, " +
s"freePoolSize: ${freePool.size} containers and ${memoryConsumptionOf(freePool)} MB, " +
s"busyPoolSize: ${busyPool.size} containers and ${memoryConsumptionOf(busyPool)} MB, " +
s"maxContainersMemory ${poolConfig.userMemory.toMB} MB, " +
s"userNamespace: ${r.msg.user.namespace.name}, action: ${r.action}, " +
s"needed memory: ${r.action.limits.memory.megabytes} MB, " +
s"waiting messages: ${runBuffer.size}")(r.msg.transid)
MetricEmitter.emitCounterMetric(LoggingMarkers.CONTAINER_POOL_RESCHEDULED_ACTIVATION)
Some(logMessageInterval.fromNow)
} else {
r.retryLogDeadline
}
if (!isResentFromBuffer) {
// Add this request to the buffer, as it is not there yet.
runBuffer = runBuffer.enqueue(Run(r.action, r.msg, retryLogDeadline))
}
//buffered items will be processed via processBufferOrFeed()
}
} else {
// There are currently actions waiting to be executed before this action gets executed.
// These waiting actions were not able to free up enough memory.
runBuffer = runBuffer.enqueue(r)
}
// Container is free to take more work
case NeedWork(warmData: WarmedData) =>
val oldData = freePool.get(sender()).getOrElse(busyPool(sender()))
val newData =
warmData.copy(lastUsed = oldData.lastUsed, activeActivationCount = oldData.activeActivationCount - 1)
if (newData.activeActivationCount < 0) {
logging.error(this, s"invalid activation count after warming < 1 ${newData}")
}
if (newData.hasCapacity()) {
//remove from busy pool (may already not be there), put back into free pool (to update activation counts)
freePool = freePool + (sender() -> newData)
if (busyPool.contains(sender())) {
busyPool = busyPool - sender()
if (newData.action.limits.concurrency.maxConcurrent > 1) {
logging.info(
this,
s"concurrent container ${newData.container} is no longer busy with ${newData.activeActivationCount} activations")
}
}
} else {
busyPool = busyPool + (sender() -> newData)
freePool = freePool - sender()
}
processBufferOrFeed()
// Container is prewarmed and ready to take work
case NeedWork(data: PreWarmedData) =>
prewarmStartingPool = prewarmStartingPool - sender()
prewarmedPool = prewarmedPool + (sender() -> data)
// Container got removed
case ContainerRemoved(replacePrewarm) =>
// if container was in free pool, it may have been processing (but under capacity),
// so there is capacity to accept another job request
freePool.get(sender()).foreach { f =>
freePool = freePool - sender()
}
// container was busy (busy indicates at full capacity), so there is capacity to accept another job request
busyPool.get(sender()).foreach { _ =>
busyPool = busyPool - sender()
}
processBufferOrFeed()
// in case this was a prewarm
prewarmedPool.get(sender()).foreach { data =>
prewarmedPool = prewarmedPool - sender()
}
// in case this was a starting prewarm
prewarmStartingPool.get(sender()).foreach { _ =>
logging.info(this, "failed starting prewarm, removed")
prewarmStartingPool = prewarmStartingPool - sender()
}
//backfill prewarms on every ContainerRemoved(replacePrewarm = true), just in case
if (replacePrewarm) {
adjustPrewarmedContainer(false, false) //in case a prewarm is removed due to health failure or crash
}
// This message is received for one of these reasons:
// 1. Container errored while resuming a warm container, could not process the job, and sent the job back
// 2. The container aged, is destroying itself, and was assigned a job which it had to send back
// 3. The container aged and is destroying itself
// Update the free/busy lists but no message is sent to the feed since there is no change in capacity yet
case RescheduleJob =>
freePool = freePool - sender()
busyPool = busyPool - sender()
case EmitMetrics =>
emitMetrics()
case AdjustPrewarmedContainer =>
adjustPrewarmedContainer(false, true)
}
/** Resend next item in the buffer, or trigger next item in the feed, if no items in the buffer. */
def processBufferOrFeed() = {
// If buffer has more items, and head has not already been resent, send next one, otherwise get next from feed.
runBuffer.dequeueOption match {
case Some((run, _)) => //run the first from buffer
implicit val tid = run.msg.transid
//avoid sending dupes
if (resent.isEmpty) {
logging.info(this, s"re-processing from buffer (${runBuffer.length} items in buffer)")
resent = Some(run)
self ! run
} else {
//do not resend the buffer head multiple times (may reach this point from multiple messages, before the buffer head is re-processed)
}
case None => //feed me!
feed ! MessageFeed.Processed
}
}
/** adjust prewarm containers up to the configured requirements for each kind/memory combination. */
def adjustPrewarmedContainer(init: Boolean, scheduled: Boolean): Unit = {
if (scheduled) {
//on scheduled time, remove expired prewarms
ContainerPool.removeExpired(poolConfig, prewarmConfig, prewarmedPool).foreach { p =>
prewarmedPool = prewarmedPool - p
p ! Remove
}
//on scheduled time, emit cold start counter metric with memory + kind
coldStartCount foreach { coldStart =>
val coldStartKey = coldStart._1
MetricEmitter.emitCounterMetric(
LoggingMarkers.CONTAINER_POOL_PREWARM_COLDSTART(coldStartKey.memory.toString, coldStartKey.kind))
}
}
//fill in missing prewarms (replaces any deletes)
ContainerPool
.increasePrewarms(init, scheduled, coldStartCount, prewarmConfig, prewarmedPool, prewarmStartingPool)
.foreach { c =>
val config = c._1
val currentCount = c._2._1
val desiredCount = c._2._2
if (currentCount < desiredCount) {
(currentCount until desiredCount).foreach { _ =>
prewarmContainer(config.exec, config.memoryLimit, config.reactive.map(_.ttl))
}
}
}
if (scheduled) {
// lastly, clear coldStartCounts each time scheduled event is processed to reset counts
coldStartCount = immutable.Map.empty[ColdStartKey, Int]
}
}
/** Creates a new container and updates state accordingly. */
def createContainer(memoryLimit: ByteSize): (ActorRef, ContainerData) = {
val ref = childFactory(context)
val data = MemoryData(memoryLimit)
freePool = freePool + (ref -> data)
ref -> data
}
/** Creates a new prewarmed container */
def prewarmContainer(exec: CodeExec[_], memoryLimit: ByteSize, ttl: Option[FiniteDuration]): Unit = {
if (hasPoolSpaceFor(busyPool ++ freePool ++ prewarmedPool, prewarmStartingPool, memoryLimit)) {
val newContainer = childFactory(context)
prewarmStartingPool = prewarmStartingPool + (newContainer -> (exec.kind, memoryLimit))
newContainer ! Start(exec, memoryLimit, ttl)
} else {
logging.warn(
this,
s"Cannot create prewarm container due to reach the invoker memory limit: ${poolConfig.userMemory.toMB}")
}
}
/** this is only for cold start statistics of prewarm configs, e.g. not blackbox or other configs. */
def incrementColdStartCount(kind: String, memoryLimit: ByteSize): Unit = {
prewarmConfig
.filter { config =>
kind == config.exec.kind && memoryLimit == config.memoryLimit
}
.foreach { _ =>
val coldStartKey = ColdStartKey(kind, memoryLimit)
coldStartCount.get(coldStartKey) match {
case Some(value) => coldStartCount = coldStartCount + (coldStartKey -> (value + 1))
case None => coldStartCount = coldStartCount + (coldStartKey -> 1)
}
}
}
/**
* Takes a prewarm container out of the prewarmed pool
* iff a container with a matching kind and memory is found.
*
* @param action the action that holds the kind and the required memory.
* @return the container iff found
*/
def takePrewarmContainer(action: ExecutableWhiskAction): Option[(ActorRef, ContainerData)] = {
val kind = action.exec.kind
val memory = action.limits.memory.megabytes.MB
val now = Deadline.now
prewarmedPool.toSeq
.sortBy(_._2.expires.getOrElse(now))
.find {
case (_, PreWarmedData(_, `kind`, `memory`, _, _)) => true
case _ => false
}
.map {
case (ref, data) =>
// Move the container to the usual pool
freePool = freePool + (ref -> data)
prewarmedPool = prewarmedPool - ref
// Create a new prewarm container
// NOTE: prewarming ignores the action code in exec, but this is dangerous as the field is accessible to the
// factory
//get the appropriate ttl from prewarm configs
val ttl =
prewarmConfig.find(pc => pc.memoryLimit == memory && pc.exec.kind == kind).flatMap(_.reactive.map(_.ttl))
prewarmContainer(action.exec, memory, ttl)
(ref, data)
}
}
/** Removes a container and updates state accordingly. */
def removeContainer(toDelete: ActorRef) = {
toDelete ! Remove
freePool = freePool - toDelete
busyPool = busyPool - toDelete
}
/**
* Calculate if there is enough free memory within a given pool.
*
* @param pool The pool, that has to be checked, if there is enough free memory.
* @param memory The amount of memory to check.
* @return true, if there is enough space for the given amount of memory.
*/
def hasPoolSpaceFor[A](pool: Map[A, ContainerData],
prewarmStartingPool: Map[A, (String, ByteSize)],
memory: ByteSize): Boolean = {
memoryConsumptionOf(pool) + prewarmStartingPool.map(_._2._2.toMB).sum + memory.toMB <= poolConfig.userMemory.toMB
}
/**
* Log metrics about pool state (buffer size, buffer memory requirements, active number, active memory, prewarm number, prewarm memory)
*/
private def emitMetrics() = {
MetricEmitter.emitGaugeMetric(LoggingMarkers.CONTAINER_POOL_RUNBUFFER_COUNT, runBuffer.size)
MetricEmitter.emitGaugeMetric(
LoggingMarkers.CONTAINER_POOL_RUNBUFFER_SIZE,
runBuffer.map(_.action.limits.memory.megabytes).sum)
val containersInUse = freePool.filter(_._2.activeActivationCount > 0) ++ busyPool
MetricEmitter.emitGaugeMetric(LoggingMarkers.CONTAINER_POOL_ACTIVE_COUNT, containersInUse.size)
MetricEmitter.emitGaugeMetric(
LoggingMarkers.CONTAINER_POOL_ACTIVE_SIZE,
containersInUse.map(_._2.memoryLimit.toMB).sum)
MetricEmitter.emitGaugeMetric(
LoggingMarkers.CONTAINER_POOL_PREWARM_COUNT,
prewarmedPool.size + prewarmStartingPool.size)
MetricEmitter.emitGaugeMetric(
LoggingMarkers.CONTAINER_POOL_PREWARM_SIZE,
prewarmedPool.map(_._2.memoryLimit.toMB).sum + prewarmStartingPool.map(_._2._2.toMB).sum)
val unused = freePool.filter(_._2.activeActivationCount == 0)
val unusedMB = unused.map(_._2.memoryLimit.toMB).sum
MetricEmitter.emitGaugeMetric(LoggingMarkers.CONTAINER_POOL_IDLES_COUNT, unused.size)
MetricEmitter.emitGaugeMetric(LoggingMarkers.CONTAINER_POOL_IDLES_SIZE, unusedMB)
}
}
object ContainerPool {
/**
* Calculate the memory of a given pool.
*
* @param pool The pool with the containers.
* @return The memory consumption of all containers in the pool in Megabytes.
*/
protected[containerpool] def memoryConsumptionOf[A](pool: Map[A, ContainerData]): Long = {
pool.map(_._2.memoryLimit.toMB).sum
}
/**
* Finds the best container for a given job to run on.
*
* Selects an arbitrary warm container from the passed pool of idle containers
* that matches the action and the invocation namespace. The implementation uses
* matching such that structural equality of action and the invocation namespace
* is required.
* Returns None iff no matching container is in the idle pool.
* Does not consider pre-warmed containers.
*
* @param action the action to run
* @param invocationNamespace the namespace, that wants to run the action
* @param idles a map of idle containers, awaiting work
* @return a container if one found
*/
protected[containerpool] def schedule[A](action: ExecutableWhiskAction,
invocationNamespace: EntityName,
idles: Map[A, ContainerData]): Option[(A, ContainerData)] = {
idles
.find {
case (_, c @ WarmedData(_, `invocationNamespace`, `action`, _, _, _)) if c.hasCapacity() => true
case _ => false
}
.orElse {
idles.find {
case (_, c @ WarmingData(_, `invocationNamespace`, `action`, _, _)) if c.hasCapacity() => true
case _ => false
}
}
.orElse {
idles.find {
case (_, c @ WarmingColdData(`invocationNamespace`, `action`, _, _)) if c.hasCapacity() => true
case _ => false
}
}
}
/**
* Finds the oldest previously used container to remove to make space for the job passed to run.
* Depending on the space that has to be allocated, several containers might be removed.
*
* NOTE: This method is never called to remove an action that is in the pool already,
* since this would be picked up earlier in the scheduler and the container reused.
*
* @param pool a map of all free containers in the pool
* @param memory the amount of memory that has to be freed up
* @return a list of containers to be removed iff found
*/
@tailrec
protected[containerpool] def remove[A](pool: Map[A, ContainerData],
memory: ByteSize,
toRemove: List[A] = List.empty): List[A] = {
// Try to find a Free container that does NOT have any active activations AND is initialized with any OTHER action
val freeContainers = pool.collect {
// Only warm containers will be removed. Prewarmed containers will stay always.
case (ref, w: WarmedData) if w.activeActivationCount == 0 =>
ref -> w
}
if (memory > 0.B && freeContainers.nonEmpty && memoryConsumptionOf(freeContainers) >= memory.toMB) {
// Remove the oldest container if:
// - there is more memory required
// - there are still containers that can be removed
// - there are enough free containers that can be removed
val (ref, data) = freeContainers.minBy(_._2.lastUsed)
// Catch exception if remaining memory will be negative
val remainingMemory = Try(memory - data.memoryLimit).getOrElse(0.B)
remove(freeContainers - ref, remainingMemory, toRemove ++ List(ref))
} else {
// If this is the first call: All containers are in use currently, or there is more memory needed than
// containers can be removed.
// Or, if this is one of the recursions: Enough containers are found to get the memory, that is
// necessary. -> Abort recursion
toRemove
}
}
/**
* Find the expired actor in prewarmedPool
*
* @param poolConfig
* @param prewarmConfig
* @param prewarmedPool
* @param logging
* @return a list of expired actor
*/
def removeExpired[A](poolConfig: ContainerPoolConfig,
prewarmConfig: List[PrewarmingConfig],
prewarmedPool: Map[A, PreWarmedData])(implicit logging: Logging): List[A] = {
val now = Deadline.now
val expireds = prewarmConfig
.flatMap { config =>
val kind = config.exec.kind
val memory = config.memoryLimit
config.reactive
.map { c =>
val expiredPrewarmedContainer = prewarmedPool.toSeq
.filter { warmInfo =>
warmInfo match {
case (_, p @ PreWarmedData(_, `kind`, `memory`, _, _)) if p.isExpired() => true
case _ => false
}
}
.sortBy(_._2.expires.getOrElse(now))
// emit expired container counter metric with memory + kind
MetricEmitter.emitCounterMetric(LoggingMarkers.CONTAINER_POOL_PREWARM_EXPIRED(memory.toString, kind))
if (expiredPrewarmedContainer.nonEmpty) {
logging.info(
this,
s"[kind: ${kind} memory: ${memory.toString}] ${expiredPrewarmedContainer.size} expired prewarmed containers")
}
expiredPrewarmedContainer.map(e => (e._1, e._2.expires.getOrElse(now)))
}
.getOrElse(List.empty)
}
.sortBy(_._2) //need to sort these so that if the results are limited, we take the oldest
.map(_._1)
if (expireds.nonEmpty) {
logging.info(this, s"removing up to ${poolConfig.prewarmExpirationLimit} of ${expireds.size} expired containers")
expireds.take(poolConfig.prewarmExpirationLimit).foreach { e =>
prewarmedPool.get(e).map { d =>
logging.info(this, s"removing expired prewarm of kind ${d.kind} with container ${d.container} ")
}
}
}
expireds.take(poolConfig.prewarmExpirationLimit)
}
/**
* Find the increased number for the prewarmed kind
*
* @param init
* @param scheduled
* @param coldStartCount
* @param prewarmConfig
* @param prewarmedPool
* @param prewarmStartingPool
* @param logging
* @return the current number and increased number for the kind in the Map
*/
def increasePrewarms(init: Boolean,
scheduled: Boolean,
coldStartCount: Map[ColdStartKey, Int],
prewarmConfig: List[PrewarmingConfig],
prewarmedPool: Map[ActorRef, PreWarmedData],
prewarmStartingPool: Map[ActorRef, (String, ByteSize)])(
implicit logging: Logging): Map[PrewarmingConfig, (Int, Int)] = {
prewarmConfig.map { config =>
val kind = config.exec.kind
val memory = config.memoryLimit
val runningCount = prewarmedPool.count {
// done starting (include expired, since they may not have been removed yet)
case (_, p @ PreWarmedData(_, `kind`, `memory`, _, _)) => true
// started but not finished starting (or expired)
case _ => false
}
val startingCount = prewarmStartingPool.count(p => p._2._1 == kind && p._2._2 == memory)
val currentCount = runningCount + startingCount
// determine how many are needed
val desiredCount: Int =
if (init) config.initialCount
else {
if (scheduled) {
// scheduled/reactive config backfill
config.reactive
.map(c => getReactiveCold(coldStartCount, c, kind, memory).getOrElse(c.minCount)) //reactive -> desired is either cold start driven, or minCount
.getOrElse(config.initialCount) //not reactive -> desired is always initial count
} else {
// normal backfill after removal - make sure at least minCount or initialCount is started
config.reactive.map(_.minCount).getOrElse(config.initialCount)
}
}
if (currentCount < desiredCount) {
logging.info(
this,
s"found ${currentCount} started and ${startingCount} starting; ${if (init) "initing" else "backfilling"} ${desiredCount - currentCount} pre-warms to desired count: ${desiredCount} for kind:${config.exec.kind} mem:${config.memoryLimit.toString}")(
TransactionId.invokerWarmup)
}
(config, (currentCount, desiredCount))
}.toMap
}
/**
* Get the required prewarmed container number according to the cold start happened in previous minute
*
* @param coldStartCount
* @param config
* @param kind
* @param memory
* @return the required prewarmed container number
*/
def getReactiveCold(coldStartCount: Map[ColdStartKey, Int],
config: ReactivePrewarmingConfig,
kind: String,
memory: ByteSize): Option[Int] = {
coldStartCount.get(ColdStartKey(kind, memory)).map { value =>
// Let's assume that threshold is `2`, increment is `1` in runtimes.json
// if cold start number in previous minute is `2`, requireCount is `2/2 * 1 = 1`
// if cold start number in previous minute is `4`, requireCount is `4/2 * 1 = 2`
math.min(math.max(config.minCount, (value / config.threshold) * config.increment), config.maxCount)
}
}
def props(factory: ActorRefFactory => ActorRef,
poolConfig: ContainerPoolConfig,
feed: ActorRef,
prewarmConfig: List[PrewarmingConfig] = List.empty)(implicit logging: Logging) =
Props(new ContainerPool(factory, feed, prewarmConfig, poolConfig))
}
/** Contains settings needed to perform container prewarming. */
case class PrewarmingConfig(initialCount: Int,
exec: CodeExec[_],
memoryLimit: ByteSize,
reactive: Option[ReactivePrewarmingConfig] = None)
|
akrabat/openwhisk
|
core/invoker/src/main/scala/org/apache/openwhisk/core/containerpool/ContainerPool.scala
|
Scala
|
apache-2.0
| 32,543
|
/**
* Copyright 2013-2014 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package securesocial.core.services
/**
* A mockable interface for the http client
*/
trait HttpService {
import play.api.libs.ws.WS.WSRequestHolder
def url(url: String): WSRequestHolder
}
object HttpService {
/**
* A default implementation for HttpService based on the Play WS client.
*/
class Default extends HttpService {
import play.api.libs.ws.WS
import play.api.libs.ws.WS._
import play.api.Play.current
def url(url: String): WSRequestHolder = WS.url(url)
}
}
|
matthewchartier/securesocial
|
module-code/app/securesocial/core/services/HttpService.scala
|
Scala
|
apache-2.0
| 1,153
|
package com.greencatsoft.angularjs.core
import scala.concurrent.Future
import scala.language.implicitConversions
import scala.scalajs.js
import scala.scalajs.js.Any.{ fromFunction1, fromFunction5 }
import scala.scalajs.js.UndefOr
import scala.scalajs.js.UndefOr.undefOr2ops
import scala.scalajs.js.annotation.JSExportAll
import com.greencatsoft.angularjs.Factory
import com.greencatsoft.angularjs.core.HttpStatus.int2HttpStatus
import com.greencatsoft.angularjs.injectable
import com.greencatsoft.angularjs.core.Defer.DeferredPromise
@js.native
@injectable("$http")
trait HttpService extends js.Object {
def get[T](url: String): HttpPromise[T] = js.native
def get[T](url: String, config: HttpConfig): HttpPromise[T] = js.native
def head[T](url: String): HttpPromise[T] = js.native
def head[T](url: String, config: HttpConfig): HttpPromise[T] = js.native
def post[T](url: String): HttpPromise[T] = js.native
def post[T](url: String, data: js.Any): HttpPromise[T] = js.native
def post[T](url: String, data: js.Any, config: HttpConfig): HttpPromise[T] = js.native
def jsonp[T](url: String, config: HttpConfig): HttpPromise[T] = js.native
def put[T](url: String): HttpPromise[T] = js.native
def put[T](url: String, data: js.Any): HttpPromise[T] = js.native
def put[T](url: String, data: js.Any, config: HttpConfig): HttpPromise[T] = js.native
def delete[T](url: String): HttpPromise[T] = js.native
def delete[T](url: String, data: js.Any): HttpPromise[T] = js.native
def delete[T](url: String, data: js.Any, config: HttpConfig): HttpPromise[T] = js.native
}
@js.native
trait HttpConfig extends js.Object {
var url: String = js.native
var params: js.Dictionary[js.Any] = js.native
var method: String = js.native
var timeout: Int = js.native
var withCredentials: Boolean = js.native
var cache: Boolean = js.native
var responseType: String = js.native
var headers: js.Dictionary[String] = js.native
var transformResponse: js.Array[js.Function3[js.Any, js.Any, js.Any, js.Any]] = js.native
var transformRequest: js.Array[js.Function2[js.Any, js.Any, js.Any]] = js.native
}
object HttpConfig {
def empty: HttpConfig = {
val config = new js.Object().asInstanceOf[HttpConfig]
config.headers = js.Dictionary()
config.transformRequest = js.Array()
config.transformResponse = js.Array()
config
}
def documentHandler: HttpConfig = {
val config = empty
config.responseType = "document"
config
}
def postHandler: HttpConfig = {
val config = empty
config.headers = js.Dictionary(
"method" -> "POST",
"Content-Type" -> "application/x-www-form-urlencoded")
config
}
}
@js.native
@injectable("$httpProvider")
trait HttpProvider extends js.Object {
var defaults: HttpConfig = js.native
var interceptors: js.Array[String] = js.native
}
@js.native
trait HttpPromise[T] extends Promise[T] {
def success(callback: js.Function1[T, Unit]): this.type = js.native
def success(callback: js.Function2[T, Int, Unit]): this.type = js.native
def success(callback: js.Function3[T, js.Any, Int, Unit]): this.type = js.native
def success(callback: js.Function4[T, Int, js.Any, js.Any, Unit]): this.type = js.native
def success(callback: js.Function5[T, Int, js.Any, js.Any, js.Any, Unit]): this.type = js.native
def error(callback: js.Function1[T, Unit]): this.type = js.native
def error(callback: js.Function2[T, Int, Unit]): this.type = js.native
def error(callback: js.Function3[T, js.Any, Int, Unit]): this.type = js.native
def error(callback: js.Function4[T, Int, js.Any, js.Any, Unit]): this.type = js.native
def error(callback: js.Function5[T, Int, js.Any, js.Any, UndefOr[String], Unit]): this.type = js.native
}
trait HttpInterceptor {
def q: Q
def request(config: HttpConfig): HttpConfig = config
def requestError[T](rejection: HttpResult): Promise[T] = q.reject(rejection)
def response(response: HttpResult): HttpResult = response
def responseError[T](rejection: HttpResult): Promise[T] = q.reject(rejection)
}
@JSExportAll
case class HttpInterceptorFunctions(
request: js.Function1[HttpConfig, HttpConfig],
requestError: js.Function1[HttpResult, Promise[_]],
response: js.Function1[HttpResult, HttpResult],
responseError: js.Function1[HttpResult, Promise[_]])
trait HttpInterceptorFactory extends Factory[HttpInterceptorFunctions] {
implicit def toInterceptorFunctions(interceptor: HttpInterceptor): HttpInterceptorFunctions = {
import interceptor._
HttpInterceptorFunctions(request _, requestError _, response _, responseError _)
}
}
case class HttpStatus(code: Int)
object HttpStatus {
//From https://github.com/spray/spray/blob/master/spray-http/src/main/scala/spray/http/StatusCode.scala
val Continue = HttpStatus(100)
val SwitchingProtocols = HttpStatus(101)
val Processing = HttpStatus(102)
val Ok = HttpStatus(200)
val Created = HttpStatus(201)
val Accepted = HttpStatus(202)
val NonAuthoritativeInformation = HttpStatus(203)
val NoContent = HttpStatus(204)
val ResetContent = HttpStatus(205)
val PartialContent = HttpStatus(206)
val MultiStatus = HttpStatus(207)
val AlreadyReported = HttpStatus(208)
val IMUsed = HttpStatus(226)
val MultipleChoices = HttpStatus(300)
val MovedPermanently = HttpStatus(301)
val Found = HttpStatus(302)
val SeeOther = HttpStatus(303)
val NotModified = HttpStatus(304)
val UseProxy = HttpStatus(305)
val TemporaryRedirect = HttpStatus(307)
val PermanentRedirect = HttpStatus(308)
val BadRequest = HttpStatus(400)
val Unauthorized = HttpStatus(401)
val PaymentRequired = HttpStatus(402)
val Forbidden = HttpStatus(403)
val NotFound = HttpStatus(404)
val MethodNotAllowed = HttpStatus(405)
val NotAcceptable = HttpStatus(406)
val ProxyAuthenticationRequired = HttpStatus(407)
val RequestTimeout = HttpStatus(408)
val Conflict = HttpStatus(409)
val Gone = HttpStatus(410)
val LengthRequired = HttpStatus(411)
val PreconditionFailed = HttpStatus(412)
val EntityTooLarge = HttpStatus(413)
val RequestUriTooLong = HttpStatus(414)
val UnsupportedMediaType = HttpStatus(415)
val RequestedRangeNotSatisfiable = HttpStatus(416)
val ExpectationFailed = HttpStatus(417)
val EnhanceYourCalm = HttpStatus(420)
val UnprocessableEntity = HttpStatus(422)
val Locked = HttpStatus(423)
val FailedDependency = HttpStatus(424)
val UnorderedCollection = HttpStatus(425)
val UpgradeRequired = HttpStatus(426)
val PreconditionRequired = HttpStatus(428)
val TooManyRequests = HttpStatus(429)
val RequestHeaderFieldsTooLarge = HttpStatus(431)
val RetryWith = HttpStatus(449)
val BlockedByParentalControls = HttpStatus(450)
val UnavailableForLegalReasons = HttpStatus(451)
val InternalServerError = HttpStatus(500)
val NotImplemented = HttpStatus(501)
val BadGateway = HttpStatus(502)
val ServiceUnavailable = HttpStatus(503)
val GatewayTimeout = HttpStatus(504)
val HTTPVersionNotSupported = HttpStatus(505)
val VariantAlsoNegotiates = HttpStatus(506)
val InsufficientStorage = HttpStatus(507)
val LoopDetected = HttpStatus(508)
val BandwidthLimitExceeded = HttpStatus(509)
val NotExtended = HttpStatus(510)
val NetworkAuthenticationRequired = HttpStatus(511)
val NetworkReadTimeout = HttpStatus(598)
val NetworkConnectTimeout = HttpStatus(599)
implicit def int2HttpStatus(code: Int): HttpStatus = HttpStatus(code)
}
case class HttpException(status: HttpStatus, message: String) extends Exception
object HttpPromise {
implicit def promise2future[A](promise: HttpPromise[A]): Future[A] = {
val p = concurrent.Promise[A]
def onSuccess(data: A): Unit = p.success(data.asInstanceOf[A])
def onError(data: A, status: Int, config: js.Any, headers: js.Any, statusText: UndefOr[String]): Unit =
p failure HttpException(status, statusText getOrElse s"Failed to process HTTP request: '$data'")
promise.success(onSuccess _).error(onError _)
p.future
}
}
@js.native
trait HttpResult extends js.Object {
val config: js.Any = js.native
val data: js.Any = js.native
val status: Int = js.native
val statusText: String = js.native
}
|
easel/scalajs-angular
|
src/main/scala/com/greencatsoft/angularjs/core/Http.scala
|
Scala
|
apache-2.0
| 8,222
|
/**
* Copyright (C) 2010-2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.client
import java.io.{IOException, InputStream}
import net.lshift.diffa.adapter.common.JSONHelper
import net.lshift.diffa.schema.servicelimits.ScanResponseSizeLimit
import net.lshift.diffa.kernel.config.{PairServiceLimitsView, PairRef}
import scala.collection.JavaConversions._
class ValidatingScanResultParser(validatorFactory: ScanEntityValidatorFactory) extends JsonScanResultParser {
def parse(s: InputStream) = JSONHelper.readQueryResult(s, validatorFactory.createValidator).toSeq
}
trait LengthCheckingParser extends JsonScanResultParser {
val serviceLimitsView: PairServiceLimitsView
val pair: PairRef
abstract override def parse(s: InputStream) = {
val responseSizeLimit = serviceLimitsView.getEffectiveLimitByNameForPair(
pair.space, pair.name, ScanResponseSizeLimit)
try {
super.parse(new LengthCheckingInputStream(s, responseSizeLimit))
} catch {
//case e:IOException if e.getCause.isInstanceOf[ScanLimitBreachedException] => throw e.getCause
case e:IOException => throw e.getCause
}
}
class LengthCheckingInputStream(stream: InputStream, sizeLimit:Int) extends InputStream {
var numBytes = 0;
def read(): Int = {
val byte = stream.read()
if (byte >=0) numBytes += 1
if (numBytes > sizeLimit) {
val msg = "Scan response size for pair %s exceeded configured limit of %d bytes".format(
pair.name, sizeLimit)
throw new RuntimeException(msg)
} else {
byte
}
}
}
}
|
0x6e6562/diffa
|
client-support/src/main/scala/net/lshift/diffa/client/ValidatingScanResultParser.scala
|
Scala
|
apache-2.0
| 2,136
|
package org.jetbrains.plugins.scala.macroAnnotations
import com.intellij.psi.util.PsiModificationTracker
import org.junit.Assert
/**
* Author: Svyatoslav Ilinskiy
* Date: 9/25/15.
*/
class CachedInsidePsiElementTest extends CachedWithRecursionGuardTestBase {
def testSimple(): Unit = {
object Foo extends CachedMockPsiElement {
@CachedInsidePsiElement(this, PsiModificationTracker.MODIFICATION_COUNT)
def currentTime(): Long = System.currentTimeMillis()
}
val firstRes: Long = Foo.currentTime()
Thread.sleep(1)
Assert.assertEquals(firstRes, Foo.currentTime())
}
}
|
LPTK/intellij-scala
|
test/org/jetbrains/plugins/scala/macroAnnotations/CachedInsidePsiElementTest.scala
|
Scala
|
apache-2.0
| 607
|
/* *\\
** \\ \\ / _) \\ \\ / \\ | **
** \\ \\ / | __ \\ _ \\ __| \\ \\ / |\\/ | **
** \\ \\ / | | | __/ | \\ \\ / | | **
** \\_/ _| .__/ \\___| _| \\_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\\* */
package org.vipervm.runtime.mm
import org.vipervm.platform._
import org.vipervm.profiling._
import org.vipervm.utils._
import scala.collection.immutable.HashMap
import akka.actor.TypedActor
private class DefaultDataManager(val platform:Platform, profiler:Profiler) extends DataManager {
private val self = TypedActor.self[DataManager]
protected var dataStates:Map[(MemoryNode,MetaView),DataState] = Map.empty
protected var configs:Map[DataConfig,(Int,Event)] = Map.empty
protected var instances:HashMap[Data,Seq[DataInstance[_]]] = HashMap.empty
def register(data:Data):Unit = {
instances += (data -> Seq.empty)
}
def unregister(data:Data):Unit = {
instances -= data
}
def associate(instance:DataInstance[Repr],data:Data):Unit = {
val old = instances.getOrElse(data, Seq.empty)
instances = instances.updated(data, instance +: old)
}
def dataIsAvailableIn(data:Data,memory:MemoryNode):Boolean = {
instances(data).exists { _.isAvailableIn(memory) match {
case Right(b) => b
case Left(datas) => datas.forall(data => dataIsAvailableIn(data,memory))
}}
}
def availableInstancesIn(data:Data,memory:MemoryNode):Seq[DataInstance[_]] = {
instances(data).filter { _.isAvailableIn(memory) match {
case Right(b) => b
case Left(datas) => datas.forall(data => dataIsAvailableIn(data,memory))
}}
}
def dataState(data:MetaView,memory:MemoryNode):DataState = {
dataStateInternal(data,memory)
}
protected def dataStateInternal(data:MetaView,memory:MemoryNode):DataState = {
dataStates.getOrElse(memory -> data, DataState())
}
def updateDataState(data:MetaView,memory:MemoryNode,state:DataState):Unit = {
updateDataStateInternal(data,memory,state)
}
protected def updateDataStateInternal(data:MetaView,memory:MemoryNode,state:DataState):Unit = {
dataStates += (memory -> data) -> state
self.wakeUp
}
def release(config:DataConfig):Unit = {
//TODO: check config state and release data...
val (count,event) = configs(config)
if (count == 1) configs -= config
else configs += config -> (count-1 -> event)
self.wakeUp
}
def prepare(config:DataConfig):Event = {
val (count,event) = configs.getOrElse(config, (0,new UserEvent))
configs += config -> (count+1 -> event)
self.wakeUp
event
}
def withConfig[A](config:DataConfig)(body: => A):FutureEvent[A] = {
prepare(config) willTrigger {
val result = body
self.release(config)
result
}
}
def wakeUp:Unit = {
/* Skip already complete configs */
val confs = configs.collect { case (conf,(_,ev)) if !ev.test => conf }
val (completed,uncompleted) = confs.partition(isComplete)
/* Signal valid configs */
completed.foreach { conf =>
val (_,event) = configs(conf)
event.complete
}
val metaConf = (Set.empty[(MetaView,MemoryNode)] /: uncompleted.map(_.toSet))(_++_)
/* Skip data available or being transferred */
val metaConf2 = metaConf.filterNot { case (data,mem) => {
val state = dataStateInternal(data,mem)
state.available || state.uploading
}}
val (validData,scratchData) = metaConf2.partition(x => isValid(x._1))
/* Allocate scratch data */
scratchData.foreach { case (data,mem) => {
val view = data.allocate(mem)
data.store(view)
val state = dataStateInternal(data,mem)
updateDataStateInternal(data, mem, state.copy(available = true))
}}
/* Split between those requiring a hop in host memory and the other */
val (directs,indirects) = validData.partition {
case (data,mem) => isDirect(data,mem)
}
/* Schedule required direct data transfers */
val directTransfers = directs.map { case (data,mem) => {
//FIXME: support "not enough space on device" exception
val target = data.allocate(mem)
val (source,link) = selectDirectSource(data,target)
transfer(data,source,target,link)
}}
/* Check that no uploading is taking place to the host for indirect transfers */
val findirects = indirects.filterNot { case (data,_) =>
dataStateInternal(data, platform.hostMemory).uploading
}
/* Schedule indirect transfers to the host */
val indirectTransfers = findirects.map { case (data,mem) => {
//FIXME: support "not enough space on device" exception
val target = data.allocate(platform.hostMemory)
val (source,link) = selectDirectSource(data,target)
transfer(data,source,target,link)
}}
}
def isComplete(config:DataConfig):Boolean = {
config.map {
case (data,mem) => dataStateInternal(data,mem).available
}.reduceLeft(_&&_)
}
def isDirect(data:MetaView,memory:MemoryNode):Boolean = data.views.exists(
view => platform.linkBetween(view.buffer.memory,memory).isDefined
)
def isValid(data:MetaView):Boolean = data.isDefined
protected def transfer(data:MetaView,source:BufferView,target:BufferView,link:Link):DataTransfer = {
val mem = target.buffer.memory
val tr = link.copy(source,target)
profiler.transferStart(data,tr)
val state = dataStateInternal(data,mem)
updateDataStateInternal(data, mem, state.copy(uploading = true))
tr.willTrigger {
profiler.transferEnd(data,tr)
data.store(target.asInstanceOf[data.ViewType])
val state = dataState(data,mem)
updateDataState(data, mem, state.copy(uploading = false, available = true))
}
tr
}
def selectDirectSource(data:MetaView, target:BufferView):(BufferView,Link) = {
val directSources = data.views.filter(src => platform.linkBetween(src,target).isDefined)
val src = directSources.head
val link = platform.linkBetween(src,target).get
(src,link)
}
}
object DefaultDataManager {
def apply(platform:Platform,profiler:Profiler = DummyProfiler()):DataManager = {
DataManager {
new DefaultDataManager(platform,profiler)
}
}
}
|
hsyl20/Scala_ViperVM
|
src/main/scala/org/vipervm/runtime/mm/DefaultDataManager.scala
|
Scala
|
gpl-3.0
| 6,617
|
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
import leon.utils._
object Main {
lazy val allPhases: List[LeonPhase[_, _]] = {
List(
frontends.scalac.ExtractionPhase,
frontends.scalac.ClassgenPhase,
utils.TypingPhase,
utils.FileOutputPhase,
purescala.RestoreMethods,
xlang.AntiAliasingPhase,
xlang.EpsilonElimination,
xlang.ImperativeCodeElimination,
xlang.FixReportLabels,
xlang.XLangDesugaringPhase,
purescala.FunctionClosure,
synthesis.SynthesisPhase,
termination.TerminationPhase,
verification.VerificationPhase,
repair.RepairPhase,
evaluators.EvaluationPhase,
solvers.isabelle.AdaptationPhase,
solvers.isabelle.IsabellePhase,
transformations.InstrumentationPhase,
transformations.RunnableCodePhase,
transformations.WebBuilderPhase,
invariant.engine.InferInvariantsPhase,
laziness.HOInferencePhase,
genc.GenerateCPhase,
genc.CFileOutputPhase,
verification.InjectAsserts
)
}
// Add whatever you need here.
lazy val allComponents : Set[LeonComponent] = allPhases.toSet ++ Set(
solvers.unrolling.UnrollingProcedure, MainComponent, GlobalOptions, solvers.smtlib.SMTLIBCVC4Component, solvers.isabelle.Component
)
/*
* This object holds the options that determine the selected pipeline of Leon.
* Please put any further such options here to have them print nicely in --help message.
*/
object MainComponent extends LeonComponent {
val name = "main"
val description = "Selection of Leon functionality. Default: verify"
val optEval = LeonStringOptionDef("eval", "Evaluate ground functions through code generation or evaluation (default: evaluation)", "default", "[codegen|default]")
val optTermination = LeonFlagOptionDef("termination", "Check program termination. Can be used along --verify", false)
val optRepair = LeonFlagOptionDef("repair", "Repair selected functions", false)
val optSynthesis = LeonFlagOptionDef("synthesis", "Partial synthesis of choose() constructs", false)
val optWebBuilder = LeonFlagOptionDef("webbuilder", "Evaluate the program using webbuilder to --o file.html", false)
val optIsabelle = LeonFlagOptionDef("isabelle", "Run Isabelle verification", false)
val optNoop = LeonFlagOptionDef("noop", "No operation performed, just output program", false)
val optVerify = LeonFlagOptionDef("verify", "Verify function contracts", false)
val optHelp = LeonFlagOptionDef("help", "Show help message", false)
val optInstrument = LeonFlagOptionDef("instrument", "Instrument the code for inferring time/depth/stack bounds", false)
val optRunnable = LeonFlagOptionDef("runnable", "Generate runnable code after instrumenting it", false)
val optInferInv = LeonFlagOptionDef("inferInv", "Infer invariants from (instrumented) the code", false)
val optLazyEval = LeonFlagOptionDef("mem", "Handles programs that may use the memoization and higher-order programs", false)
val optGenc = LeonFlagOptionDef("genc", "Generate C code", false)
override val definedOptions: Set[LeonOptionDef[Any]] =
Set(optTermination, optRepair, optSynthesis, optWebBuilder, optIsabelle, optNoop, optHelp, optEval, optVerify, optInstrument, optRunnable, optInferInv, optLazyEval, optGenc)
}
lazy val allOptions: Set[LeonOptionDef[Any]] = allComponents.flatMap(_.definedOptions)
def displayHelp(reporter: Reporter, error: Boolean) = {
reporter.title(MainComponent.description)
for (opt <- MainComponent.definedOptions.toSeq.sortBy(_.name)) {
reporter.info(opt.helpString)
}
reporter.info("")
reporter.title("Additional global options")
for (opt <- GlobalOptions.definedOptions.toSeq.sortBy(_.name)) {
reporter.info(opt.helpString)
}
reporter.info("")
reporter.title("Additional options, by component:")
for (c <- (allComponents - MainComponent - GlobalOptions).toSeq.sortBy(_.name) if c.definedOptions.nonEmpty) {
reporter.info("")
reporter.info(s"${c.name} (${c.description})")
for (opt <- c.definedOptions.toSeq.sortBy(_.name)) {
// there is a non-breaking space at the beginning of the string :)
reporter.info(opt.helpString)
}
}
exit(error)
}
def displayVersion(reporter: Reporter) = {
reporter.title("Leon verification and synthesis tool (http://leon.epfl.ch/)")
reporter.info("")
}
private def exit(error: Boolean) = sys.exit(if (error) 1 else 0)
def splitOptions(options: String): Seq[String] = {
"""([^"\\s]+(?:"(?:(?!["\\\\]).|\\\\\\\\|\\\\")*")?)+|(?:"[^"]*"[^"\\s]*)+""".r.findAllIn(options).toList
}
def parseOptions(options: String, dismissErrors: Boolean): Seq[LeonOption[Any]] = {
parseOptions(splitOptions(options), dismissErrors)
}
val initReporter = new DefaultReporter(Set())
def parseOption(opt: String, dismissErrors: Boolean): Option[LeonOption[Any]] = {
val (name, value) = OptionsHelpers.nameValue(opt).getOrElse{
if(dismissErrors) return None
initReporter.fatalError(
s"Malformed option $opt. Options should have the form --name or --name=value"
)
}
// Find respective LeonOptionDef, or report an unknown option
val df = allOptions.find(_.name == name).getOrElse{
if(dismissErrors) return None
initReporter.fatalError(
s"Unknown option: $name\\n" +
"Try 'leon --help' for more information."
)
}
try {
Some(df.parse(value)(initReporter))
} catch {
case e: Throwable if dismissErrors => None
case e: Throwable => throw e
}
}
def parseOptions(options: Seq[String], dismissErrors: Boolean): Seq[LeonOption[Any]] = {
val initOptions: Seq[LeonOption[Any]] = options.flatMap(parseOption(_, dismissErrors))
val leonOptions: Seq[LeonOption[Any]] =
if (initOptions.exists(opt => opt.optionDef == MainComponent.optLazyEval && opt.value == true)) {
// here, add the `disablePos` option to side step a bug in the scala compiler
LeonOption(GlobalOptions.optDisablePos)(true) +: initOptions
} else initOptions
leonOptions
}
def processOptions(args: Seq[String]): LeonContext = {
val options = args.filter(_.startsWith("--"))
val files = args.filterNot(_.startsWith("-")).map(new java.io.File(_))
val leonOptions = parseOptions(options, dismissErrors = false)
val reporter = new DefaultReporter(
leonOptions.collectFirst {
case LeonOption(GlobalOptions.optDebug, sections) =>
sections.asInstanceOf[Set[DebugSection]]
}.getOrElse(Set[DebugSection]())
)
reporter.whenDebug(DebugSectionOptions) { debug =>
debug("Options considered by Leon:")
for (lo <- leonOptions) debug(lo.toString)
}
LeonContext(
reporter = reporter,
files = files,
options = leonOptions,
interruptManager = new InterruptManager(reporter)
)
}
def computePipeline(ctx: LeonContext): Pipeline[List[String], Any] = {
import purescala.Definitions.Program
import purescala.RestoreMethods
import utils.FileOutputPhase
import frontends.scalac.{ ExtractionPhase, ClassgenPhase }
import synthesis.SynthesisPhase
import termination.TerminationPhase
import xlang.FixReportLabels
import verification.VerificationPhase
import repair.RepairPhase
import evaluators.EvaluationPhase
import solvers.isabelle.IsabellePhase
import genc.GenerateCPhase
import genc.CFileOutputPhase
import MainComponent._
import invariant.engine.InferInvariantsPhase
import transformations.InstrumentationPhase
import transformations.RunnableCodePhase
import transformations.WebBuilderPhase
import laziness._
val helpF = ctx.findOptionOrDefault(optHelp)
val noopF = ctx.findOptionOrDefault(optNoop)
val synthesisF = ctx.findOptionOrDefault(optSynthesis)
val optWebBuilderF = ctx.findOptionOrDefault(optWebBuilder)
val repairF = ctx.findOptionOrDefault(optRepair)
val isabelleF = ctx.findOptionOrDefault(optIsabelle)
val terminationF = ctx.findOptionOrDefault(optTermination)
val verifyF = ctx.findOptionOrDefault(optVerify)
val gencF = ctx.findOptionOrDefault(optGenc)
val evalF = ctx.findOption(optEval).isDefined
val inferInvF = ctx.findOptionOrDefault(optInferInv)
val instrumentF = ctx.findOptionOrDefault(optInstrument)
val runnableF = ctx.findOptionOrDefault(optRunnable)
val lazyevalF = ctx.findOptionOrDefault(optLazyEval)
val analysisF = verifyF && terminationF
// Check consistency in options
if (helpF) {
displayVersion(ctx.reporter)
displayHelp(ctx.reporter, error = false)
} else {
val pipeBegin: Pipeline[List[String], Program] =
ClassgenPhase andThen
ExtractionPhase andThen
new PreprocessingPhase(genc = gencF)
val verification =
InstrumentationPhase andThen
VerificationPhase andThen
FixReportLabels andThen
PrintReportPhase
val termination = TerminationPhase andThen PrintReportPhase
val pipeProcess: Pipeline[Program, Any] = {
if (noopF) RestoreMethods andThen FileOutputPhase
else if (synthesisF) SynthesisPhase
else if (optWebBuilderF) WebBuilderPhase andThen RawFileOutputPhase
else if (repairF) RepairPhase
else if (analysisF) Pipeline.both(verification, termination)
else if (terminationF) termination
else if (isabelleF) IsabellePhase andThen PrintReportPhase
else if (evalF) EvaluationPhase
else if (inferInvF) InferInvariantsPhase
else if (instrumentF) InstrumentationPhase andThen FileOutputPhase
else if (runnableF) InstrumentationPhase andThen RunnableCodePhase
else if (gencF) GenerateCPhase andThen CFileOutputPhase
else if (lazyevalF) HOInferencePhase
else verification
}
pipeBegin andThen
pipeProcess
}
}
private var hasFatal = false
def main(args: Array[String]) {
val argsl = args.toList
// Process options
val ctx = try {
processOptions(argsl)
} catch {
case LeonFatalError(None) =>
exit(error = true)
case LeonFatalError(Some(msg)) =>
// For the special case of fatal errors not sent though Reporter, we
// send them through reporter one time
try {
new DefaultReporter(Set()).fatalError(msg)
} catch {
case _: LeonFatalError =>
}
exit(error = true)
}
ctx.interruptManager.registerSignalHandler()
val doWatch = ctx.findOptionOrDefault(GlobalOptions.optWatch)
if (doWatch) {
val watcher = new FilesWatcher(ctx, ctx.files ++ Build.libFiles.map { new java.io.File(_) })
watcher.onChange {
execute(args, ctx)
}
} else {
execute(args, ctx)
}
exit(hasFatal)
}
def execute(args: Seq[String], ctx0: LeonContext): Unit = {
val ctx = ctx0.copy(reporter = new DefaultReporter(ctx0.reporter.debugSections))
try {
// Compute leon pipeline
val pipeline = computePipeline(ctx)
val timer = ctx.timers.total.start()
// Run pipeline
val (ctx2, _) = pipeline.run(ctx, args.toList)
timer.stop()
ctx2.reporter.whenDebug(DebugSectionTimers) { debug =>
ctx2.timers.outputTable(debug)
}
hasFatal = false
} catch {
case LeonFatalError(None) =>
hasFatal = true
case LeonFatalError(Some(msg)) =>
// For the special case of fatal errors not sent though Reporter, we
// send them through reporter one time
try {
ctx.reporter.fatalError(msg)
} catch {
case _: LeonFatalError =>
}
hasFatal = true
}
}
}
|
regb/leon
|
src/main/scala/leon/Main.scala
|
Scala
|
gpl-3.0
| 12,227
|
package org.lolhens.minechanics.client.util
/**
* Created by LolHens on 22.09.2014.
*/
class Vertex(val vec: Vec3f, val norm: Vec3f) {
}
|
LolHens/Minechanics
|
src/main/scala/org/lolhens/minechanics/client/util/Vertex.scala
|
Scala
|
gpl-2.0
| 140
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.agg.batch
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.ListTypeInfo
import org.apache.flink.runtime.operators.sort.QuickSort
import org.apache.flink.streaming.api.operators.OneInputStreamOperator
import org.apache.flink.table.api.Types
import org.apache.flink.table.dataformat.{BaseRow, BinaryRow}
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.codegen.CodeGenUtils.{BINARY_ROW, newName}
import org.apache.flink.table.planner.codegen.OperatorCodeGenerator.generateCollect
import org.apache.flink.table.planner.codegen.agg.batch.AggCodeGenHelper.genGroupKeyChangedCheckCode
import org.apache.flink.table.planner.codegen.agg.batch.HashAggCodeGenHelper.{genHashAggOutputExpr, genRetryAppendToMap, prepareHashAggKVTypes, prepareHashAggMap}
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, ExprCodeGenerator, GenerateUtils, GeneratedExpression, ProjectionCodeGenerator}
import org.apache.flink.table.planner.plan.logical.{LogicalWindow, SlidingGroupWindow, TumblingGroupWindow}
import org.apache.flink.table.planner.plan.utils.AggregateInfoList
import org.apache.flink.table.runtime.generated.GeneratedOperator
import org.apache.flink.table.runtime.operators.TableStreamOperator
import org.apache.flink.table.runtime.operators.aggregate.{BytesHashMap, BytesHashMapSpillMemorySegmentPool}
import org.apache.flink.table.runtime.operators.sort.BinaryKVInMemorySortBuffer
import org.apache.flink.table.runtime.operators.window.TimeWindow
import org.apache.flink.table.runtime.types.TypeInfoLogicalTypeConverter.fromTypeInfoToLogicalType
import org.apache.flink.table.runtime.typeutils.BinaryRowSerializer
import org.apache.flink.table.types.logical.{LogicalType, RowType}
import org.apache.flink.util.MutableObjectIterator
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.tools.RelBuilder
import org.apache.commons.math3.util.ArithmeticUtils
import scala.collection.JavaConversions._
/**
* Tumbling window: like [[HashAggCodeGenerator]].
*
* Sliding window:
* 1.enableAssignPane + 2 phase:
* -- assign pane + hash agg
* -- distribute by (key)
* -- global hash agg(key + pane)
* -- sort by (key + pane)
* -- assign window + sort agg
* 2.disableAssignPane + 1 phase:
* -- distribute by (key)
* -- assign window + hash agg[(key + window) -> agg buffer].
*/
class HashWindowCodeGenerator(
ctx: CodeGeneratorContext,
relBuilder: RelBuilder,
window: LogicalWindow,
inputTimeFieldIndex: Int,
inputTimeIsDate: Boolean,
namedProperties: Seq[PlannerNamedWindowProperty],
aggInfoList: AggregateInfoList,
inputRowType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
enableAssignPane: Boolean = true,
isMerge: Boolean,
isFinal: Boolean)
extends WindowCodeGenerator(
relBuilder,
window,
inputTimeFieldIndex,
inputTimeIsDate,
namedProperties,
aggInfoList,
inputRowType,
grouping,
auxGrouping,
enableAssignPane,
isMerge,
isFinal) {
private lazy val aggBufferRowType = RowType.of(aggBufferTypes.flatten, aggBufferNames.flatten)
private lazy val aggMapKeyRowType = RowType.of(
(groupKeyRowType.getChildren :+ timestampInternalType).toArray,
(groupKeyRowType.getFieldNames :+ "assignedTs").toArray)
def gen(
inputType: RowType,
outputType: RowType,
buffLimitSize: Int,
windowStart: Long,
windowSize: Long,
slideSize: Long): GeneratedOperator[OneInputStreamOperator[BaseRow, BaseRow]] = {
val className = if (isFinal) "HashWinAgg" else "LocalHashWinAgg"
val suffix = if (grouping.isEmpty) "WithoutKeys" else "WithKeys"
val inputTerm = CodeGenUtils.DEFAULT_INPUT1_TERM
// add logger
val logTerm = CodeGenUtils.newName("LOG")
ctx.addReusableLogger(logTerm, className)
// gen code to do aggregate using aggregate map
val aggMapKey = newName("aggMapKey")
val aggMapKeyWriter = newName("aggMapKeyWriter")
val (processElementPerWindow, outputResultFromMap) = genHashWindowAggCodes(
buffLimitSize,
windowSize,
slideSize,
inputTerm,
inputType,
outputType,
aggMapKey,
logTerm)
val (processCode, outputWhenEndInputCode) = if (isFinal && isMerge) {
// prepare for aggregate map key's projection
val projAggMapKeyCode = ProjectionCodeGenerator.generateProjectionExpression(ctx,
inputType,
aggMapKeyRowType,
grouping :+ grouping.length,
inputTerm = inputTerm,
outRecordTerm = aggMapKey,
outRecordWriterTerm = aggMapKeyWriter).code
val processCode =
s"""
|if (!$inputTerm.isNullAt($inputTimeFieldIndex)) {
| hasInput = true;
| // input field access for group key projection, window/pane assign
| // and aggregate map update
| ${ctx.reuseInputUnboxingCode(inputTerm)}
| // build aggregate map key
| $projAggMapKeyCode
| // look up aggregate map and aggregate
| $processElementPerWindow
|}
""".stripMargin
(processCode, outputResultFromMap)
} else {
// gen code to assign windows/pane to current input
val assignTimestampExprs = genTimestampAssignExprs(
enableAssignPane, windowStart, windowSize, slideSize, window, inputTerm, inputType)
val processCode =
if (!isSlidingWindowWithOverlapping(enableAssignPane, window, slideSize, windowSize)) {
// Each input will be assigned with only one window, in the cases of
// Tumbling window, Sliding window with slideSize >= windowSize or with pane optimization.
assert(assignTimestampExprs.size == 1)
val assignTimestampExpr = assignTimestampExprs.head
// prepare for aggregate map key's projection
val accessAssignedTimestampExpr = GeneratedExpression(
assignTimestampExpr.resultTerm, "false", "", timestampInternalType)
val prepareInitAggMapKeyExpr = prepareAggMapKeyExpr(inputTerm, inputType,
Some(accessAssignedTimestampExpr), aggMapKeyRowType, aggMapKey, aggMapKeyWriter)
val processAggregate =
s"""
| // build aggregate map key
| ${prepareInitAggMapKeyExpr.code}
| // aggregate by each input with assigned timestamp
| $processElementPerWindow
""".stripMargin
// gen code to filter invalid windows in the case of jumping window
val processEachInput = if (isJumpingWindow(slideSize, windowSize)) {
val checkValidWindow = s"${getInputTimeValue(inputTerm, inputTimeFieldIndex)} < " +
s"${assignTimestampExpr.resultTerm} + ${windowSize}L"
s"""
|if ($checkValidWindow) {
| // build aggregate map key
| ${prepareInitAggMapKeyExpr.code}
| // aggregate by each input with assigned timestamp
| $processAggregate
|}
""".stripMargin
} else {
processAggregate
}
s"""
|if (!$inputTerm.isNullAt($inputTimeFieldIndex)) {
| hasInput = true;
| // input field access for group key projection, window/pane assign
| // and aggregate map update
| ${ctx.reuseInputUnboxingCode(inputTerm)}
| // assign timestamp(window or pane)
| ${assignTimestampExpr.code}
| // process each input
| $processEachInput
|}""".stripMargin
} else {
// Otherwise, each input will be assigned with overlapping windows.
assert(assignTimestampExprs.size > 1)
val assignedWindows = newName("assignedWindows")
ctx.addReusableMember(
s"transient java.util.List<java.lang.Long> $assignedWindows" +
s" = new java.util.ArrayList<java.lang.Long>();")
val prepareCodes = for (expr <- assignTimestampExprs) yield {
s"""
|${expr.code}
|$assignedWindows.add(${expr.resultTerm});
""".stripMargin
}
val code =
s"""
|$assignedWindows.clear();
|${prepareCodes.mkString("\\n").trim}
""".stripMargin
val assignTimestampExpr =
new GeneratedExpression(assignedWindows, "false", code,
fromTypeInfoToLogicalType(new ListTypeInfo(Types.LONG)))
// gen code to filter invalid overlapping windows
val assignedTimestamp = newName("assignedTimestamp")
val timestampTerm = s"${getInputTimeValue(inputTerm, inputTimeFieldIndex)}"
val checkValidWindow = s"$timestampTerm >= $assignedTimestamp " +
s" && $timestampTerm < $assignedTimestamp + ${windowSize}L"
// prepare for aggregate map key's projection
val prepareInitAggMapKeyExpr = prepareAggMapKeyExpr(
inputTerm, inputType, None, aggMapKeyRowType, aggMapKey, aggMapKeyWriter)
val realAssignedValue = if (inputTimeIsDate) {
convertToIntValue(s"$assignedTimestamp")
} else {
assignedTimestamp
}
val updateAssignedTsCode = s"$aggMapKey.set$timestampInternalTypeName(${
grouping.length
}, $realAssignedValue);"
s"""
|if (!$inputTerm.isNullAt($inputTimeFieldIndex)) {
| hasInput = true;
| // input field access for group key projection, window/pane assign
| // and aggregate map update
| ${ctx.reuseInputUnboxingCode(inputTerm)}
| // assign windows/pane
| ${assignTimestampExpr.code}
| // build aggregate map key
| ${prepareInitAggMapKeyExpr.code}
| // we assigned all the possible overlapping windows in this case,
| // so need filtering the invalid window here
| for (Long $assignedTimestamp : ${assignTimestampExpr.resultTerm}) {
| if ($checkValidWindow) {
| // update input's assigned timestamp
| $updateAssignedTsCode
| $processElementPerWindow
| } else {
| break;
| }
| }
|}
""".stripMargin
}
(processCode, outputResultFromMap)
}
val baseClass = classOf[TableStreamOperator[_]].getName
val endInputCode = if (isFinal) {
s"""
|$outputWhenEndInputCode
""".stripMargin
} else {
outputWhenEndInputCode
}
AggCodeGenHelper.generateOperator(
ctx, className + suffix, baseClass, processCode, endInputCode, inputType)
}
private def genTimestampAssignExprs(
assignPane: Boolean,
windowStart: Long,
windowSize: Long,
slideSize: Long,
window: LogicalWindow,
inputTerm: String,
inputType: RowType): Seq[GeneratedExpression] = {
window match {
case SlidingGroupWindow(_, timeField, _, _) =>
if (assignPane) {
val paneSize = ArithmeticUtils.gcd(windowSize, slideSize)
Seq(genAlignedWindowStartExpr(
ctx, inputTerm, inputType, timeField, windowStart, paneSize))
} else if (slideSize >= windowSize) {
Seq(genAlignedWindowStartExpr(
ctx, inputTerm, inputType, timeField, windowStart, slideSize))
} else {
val maxNumOverlapping = math.ceil(windowSize * 1.0 / slideSize).toInt
val exprs = for (index <- 0 until maxNumOverlapping) yield {
genAlignedWindowStartExpr(
ctx, inputTerm, inputType, timeField, windowStart, slideSize, index)
}
exprs
}
case TumblingGroupWindow(_, timeField, _) =>
Seq(genAlignedWindowStartExpr(
ctx, inputTerm, inputType, timeField, windowStart, windowSize))
case _ =>
throw new RuntimeException(s"Bug. Assign pane for $window is not supported.")
}
}
private def prepareAggMapKeyExpr(
inputTerm: String,
inputType: LogicalType,
assignedTimestampExpr: Option[GeneratedExpression],
currentKeyType: RowType,
currentKeyTerm: String,
currentKeyWriterTerm: String): GeneratedExpression = {
val codeGen = new ExprCodeGenerator(ctx, false)
.bindInput(inputType, inputTerm = inputTerm)
val expr = if (assignedTimestampExpr.isDefined) {
val assignedLongTimestamp = assignedTimestampExpr.get
if (inputTimeIsDate) {
val dateTerm = ctx.addReusableLocalVariable("int", "dateTerm")
val convertDateCode =
s"""
| ${assignedLongTimestamp.code}
| $dateTerm = ${convertToIntValue(assignedLongTimestamp.resultTerm)};
""".stripMargin
GeneratedExpression(
dateTerm,
assignedLongTimestamp.nullTerm,
convertDateCode,
timestampInternalType)
} else {
assignedLongTimestamp
}
} else {
val value = if (inputTimeIsDate) "-1" else "-1L"
GeneratedExpression(value, "false", "", timestampInternalType)
}
// There will be only assigned timestamp field when no grouping window aggregate case.
codeGen.generateResultExpression(
grouping.map(
idx => GenerateUtils.generateFieldAccess(ctx, inputType, inputTerm, idx)) :+ expr,
currentKeyType.asInstanceOf[RowType],
classOf[BinaryRow],
outRow = currentKeyTerm,
outRowWriter = Some(currentKeyWriterTerm))
}
private def genGroupWindowHashAggCodes(
isMerge: Boolean,
isFinal: Boolean,
windowSize: Long,
slideSize: Long,
aggMapKeyTypesTerm: String,
aggBufferTypesTerm: String,
bufferLimitSize: Int,
aggregateMapTerm: String,
inputTerm: String,
inputType: RowType,
outputType: RowType,
currentAggBufferTerm: String): (GeneratedExpression, GeneratedExpression, String) = {
// build mapping for DeclarativeAggregationFunction binding references
val offset = if (isMerge) grouping.length + 1 else grouping.length
val argsMapping = AggCodeGenHelper.buildAggregateArgsMapping(
isMerge, offset, inputType, auxGrouping, aggArgs, aggBufferTypes)
val aggBuffMapping = HashAggCodeGenHelper.buildAggregateAggBuffMapping(aggBufferTypes)
// gen code to create empty agg buffer
val initedAggBuffer = HashAggCodeGenHelper.genReusableEmptyAggBuffer(
ctx, builder, inputTerm, inputType, auxGrouping, aggregates, aggBufferRowType)
if (auxGrouping.isEmpty) {
// init aggBuffer in open function when there is no auxGrouping
ctx.addReusableOpenStatement(initedAggBuffer.code)
}
// gen code to update agg buffer from the aggregate map
val doAggregateExpr = HashAggCodeGenHelper.genAggregate(
isMerge,
ctx,
builder,
inputType,
inputTerm,
auxGrouping,
aggregates,
aggCallToAggFunction,
argsMapping,
aggBuffMapping,
currentAggBufferTerm,
aggBufferRowType)
// gen code to set hash agg result
val aggOutputCode = if (isFinal && enableAssignPane) {
// gen output by sort and merge pre accumulate results
genOutputByMerging(
windowSize,
slideSize,
bufferLimitSize,
outputType,
aggregateMapTerm,
argsMapping,
aggBuffMapping,
aggMapKeyTypesTerm,
aggBufferTypesTerm,
aggMapKeyRowType,
aggBufferRowType)
} else {
genOutputDirectly(
windowSize,
inputTerm,
inputType,
outputType,
aggregateMapTerm,
argsMapping,
aggBuffMapping)
}
(initedAggBuffer, doAggregateExpr, aggOutputCode)
}
private def genOutputByMerging(
windowSize: Long,
slideSize: Long,
bufferLimitSize: Int,
outputType: RowType,
aggregateMapTerm: String,
argsMapping: Array[Array[(Int, LogicalType)]],
aggBuffMapping: Array[Array[(Int, LogicalType)]],
aggKeyTypeTerm: String,
aggBufferTypeTerm: String,
aggMapKeyType: RowType,
aggBufferType: RowType): String = {
val keyComputerTerm = CodeGenUtils.newName("keyComputer")
val recordComparatorTerm = CodeGenUtils.newName("recordComparator")
val prepareSorterCode = HashAggCodeGenHelper.genKVSorterPrepareCode(
ctx, keyComputerTerm, recordComparatorTerm, aggMapKeyType)
val memPoolTypeTerm = classOf[BytesHashMapSpillMemorySegmentPool].getName
val binaryRowSerializerTypeTerm = classOf[BinaryRowSerializer].getName
val sorterBufferType = classOf[BinaryKVInMemorySortBuffer].getName
val sorterBufferTerm = newName("buffer")
val createSorterBufferCode =
s"""
| $prepareSorterCode
| $sorterBufferType $sorterBufferTerm = $sorterBufferType.createBuffer(
| $keyComputerTerm,
| new $binaryRowSerializerTypeTerm($aggKeyTypeTerm.length),
| new $binaryRowSerializerTypeTerm($aggBufferTypeTerm.length),
| $recordComparatorTerm,
| $aggregateMapTerm.getRecordAreaMemorySegments(),
| $aggregateMapTerm.getNumElements(),
| new $memPoolTypeTerm($aggregateMapTerm.getBucketAreaMemorySegments())
| );
""".stripMargin
val reuseAggMapKeyTerm = newName("reusedKey")
val reuseAggBufferTerm = newName("reusedValue")
val reuseKVTerm = newName("reusedKV")
val binaryRow = classOf[BinaryRow].getName
val kvType = classOf[JTuple2[_,_]].getName
ctx.addReusableMember(
s"transient $binaryRow $reuseAggMapKeyTerm = new $binaryRow(${aggMapKeyType.getFieldCount});")
ctx.addReusableMember(
s"transient $binaryRow $reuseAggBufferTerm = new $binaryRow(${aggBufferType.getFieldCount});")
ctx.addReusableMember(
s"transient $kvType<$binaryRow, $binaryRow> $reuseKVTerm = " +
s"new $kvType<$binaryRow, $binaryRow>($reuseAggMapKeyTerm, $reuseAggBufferTerm);"
)
// ---------------------------------------------------------------------------------------------
// gen code to create a buffer to group all the elements having the same grouping key
val windowElementType = getWindowsGroupingElementInfo()
// project into aggregate map key and value into prepared window element
val bufferWindowElementTerm = newName("prepareWinElement")
val bufferWindowElementWriterTerm = newName("prepareWinElementWriter")
val exprCodegen = new ExprCodeGenerator(ctx, false)
// TODO refine this. Is it possible to reuse grouping key projection?
val accessKeyExprs = for (idx <- 0 until aggMapKeyType.getFieldCount - 1) yield
GenerateUtils.generateFieldAccess(
ctx, aggMapKeyType, reuseAggMapKeyTerm, idx)
val accessTimestampExpr = GenerateUtils.generateFieldAccess(
ctx,
aggMapKeyType,
reuseAggMapKeyTerm,
aggMapKeyType.getFieldCount - 1)
val accessValExprs = for (idx <- 0 until aggBufferType.getFieldCount) yield
GenerateUtils.generateFieldAccess(ctx, aggBufferType, reuseAggBufferTerm, idx)
val accessExprs = (accessKeyExprs :+ GeneratedExpression(
accessTimestampExpr.resultTerm,
"false",
accessTimestampExpr.code,
timestampInternalType)) ++ accessValExprs
val buildWindowsGroupingElementExpr = exprCodegen.generateResultExpression(
accessExprs,
windowElementType,
classOf[BinaryRow],
outRow = bufferWindowElementTerm,
outRowWriter = Some(bufferWindowElementWriterTerm))
// ---------------------------------------------------------------------------------------------
// gen code to apply aggregate functions to grouping window elements
val timeWindowType = classOf[TimeWindow].getName
val currentWindow = newName("currentWindow")
ctx.addReusableMember(s"transient $timeWindowType $currentWindow = null;")
// gen code to assign window and aggregate
val windowsGrouping = CodeGenUtils.newName("windowsGrouping")
val (processCode, endCode) = if (grouping.isEmpty) {
val (triggerWindowAgg, endWindowAgg) = genWindowAggCodes(
enablePreAcc = true,
ctx,
windowSize,
slideSize,
windowsGrouping,
bufferLimitSize,
windowElementType,
inputTimeFieldIndex,
currentWindow,
None,
outputType)
val process =
s"""
|// prepare windows grouping input
|${buildWindowsGroupingElementExpr.code}
|$windowsGrouping
| .addInputToBuffer(($BINARY_ROW)${buildWindowsGroupingElementExpr.resultTerm});
|$triggerWindowAgg
""".stripMargin
(process, endWindowAgg)
} else {
// project grouping keys from aggregate map's key
val groupKeyTerm = newName("groupKey")
val groupKeyWriterTerm = newName("groupKeyWriter")
val projGroupingKeyCode = ProjectionCodeGenerator.generateProjectionExpression(ctx,
aggMapKeyType,
groupKeyRowType,
grouping.indices.toArray,
inputTerm = reuseAggMapKeyTerm,
outRecordTerm = groupKeyTerm,
outRecordWriterTerm = groupKeyWriterTerm).code
("GroupingKeyFromAggMapKey", ctx,
groupKeyRowType, grouping.indices.toArray,
aggMapKeyType, reuseAggMapKeyTerm, groupKeyTerm, groupKeyWriterTerm)
// gen code to check group key changed
val lastKeyTerm = newName("lastKey")
ctx.addReusableMember(s"transient $BINARY_ROW $lastKeyTerm = null;")
val keyNotEqualsCode = genGroupKeyChangedCheckCode(groupKeyTerm, lastKeyTerm)
val (triggerWindowAgg, endWindowAgg) = genWindowAggCodes(
enablePreAcc = true,
ctx,
windowSize,
slideSize,
windowsGrouping,
bufferLimitSize,
windowElementType,
inputTimeFieldIndex,
currentWindow,
Some(lastKeyTerm),
outputType)
val process =
s"""
|// project agg grouping key
|$projGroupingKeyCode
|// prepare windows grouping input
|${buildWindowsGroupingElementExpr.code}
|if ($lastKeyTerm == null) {
| $lastKeyTerm = $groupKeyTerm.copy();
|} else if ($keyNotEqualsCode) {
| $endWindowAgg
| $lastKeyTerm = $groupKeyTerm.copy();
|}
|$windowsGrouping
| .addInputToBuffer(($BINARY_ROW)${buildWindowsGroupingElementExpr.resultTerm});
|$triggerWindowAgg
""".stripMargin
val end =
s"""
| $endWindowAgg
| $lastKeyTerm = null;
""".stripMargin
(process, end)
}
val sortType = classOf[QuickSort].getName
val bufferIteratorType = classOf[MutableObjectIterator[_]].getName
s"""
| if (hasInput) {
| // sort by grouping keys and assigned timestamp
| $createSorterBufferCode
| new $sortType().sort($sorterBufferTerm);
| // merge and get result
| $bufferIteratorType<$kvType<$binaryRow, $binaryRow>> iterator =
| $sorterBufferTerm.getIterator();
| while (iterator.next($reuseKVTerm) != null) {
| // reusable input fields access
| ${ctx.reuseInputUnboxingCode(bufferWindowElementTerm)}
| $processCode
| }
| $endCode
| }
""".stripMargin
}
private def genOutputDirectly(
windowSize: Long,
inputTerm: String,
inputType: RowType,
outputType: RowType,
aggregateMapTerm: String,
argsMapping: Array[Array[(Int, LogicalType)]],
aggBuffMapping: Array[Array[(Int, LogicalType)]]): String = {
val outputTerm = "hashAggOutput"
ctx.addReusableOutputRecord(outputType, getOutputRowClass, outputTerm)
val (reuseAggMapEntryTerm, reuseAggMapKeyTerm, reuseAggBufferTerm) =
HashAggCodeGenHelper.prepareTermForAggMapIteration(
ctx, outputTerm, outputType, aggMapKeyRowType, aggBufferRowType, getOutputRowClass)
val windowAggOutputExpr = if (isFinal) {
// project group key if exists
val (groupKey, projGroupKeyCode) = if (!grouping.isEmpty) {
val groupKey = newName("groupKey")
val keyProjectionCode = ProjectionCodeGenerator.generateProjectionExpression(
ctx,
aggMapKeyRowType,
groupKeyRowType,
grouping.indices.toArray,
inputTerm = reuseAggMapKeyTerm,
outRecordTerm = groupKey,
outRecordWriterTerm = newName("groupKeyWriter")).code
(Some(groupKey), keyProjectionCode)
} else {
(None, "")
}
// gen agg result
val resultExpr = genHashAggOutputExpr(
isMerge,
isFinal,
ctx,
builder,
auxGrouping,
aggregates,
argsMapping,
aggBuffMapping,
outputTerm,
outputType,
inputTerm,
inputType,
groupKey,
reuseAggBufferTerm,
aggBufferRowType)
// update current window
val timeWindowType = classOf[TimeWindow].getName
val currentWindow = newName("currentWindow")
ctx.addReusableMember(s"transient $timeWindowType $currentWindow = null;")
val assignedTsIndex = grouping.length
val currentWindowCode =
s"""
|$currentWindow = $timeWindowType.of(
|${convertToLongValue(s"$reuseAggMapKeyTerm.get$timestampInternalTypeName" +
s"($assignedTsIndex)")},
|${convertToLongValue(s"$reuseAggMapKeyTerm.get$timestampInternalTypeName" +
s"($assignedTsIndex)")}
| + ${windowSize}L);
""".stripMargin
val winResExpr =
genWindowAggOutputWithWindowPorps(ctx, outputType, currentWindow, resultExpr)
val output =
s"""
|// update current window
|$currentWindowCode
|// project current group keys if exist
|$projGroupKeyCode
|// build agg output
|${winResExpr.code}
""".stripMargin
new GeneratedExpression(
winResExpr.resultTerm, "false", output, outputType)
} else {
genHashAggOutputExpr(
isMerge,
isFinal,
ctx,
builder,
auxGrouping,
aggregates,
argsMapping,
aggBuffMapping,
outputTerm,
outputType,
inputTerm,
inputType,
Some(reuseAggMapKeyTerm),
reuseAggBufferTerm,
aggBufferRowType)
}
// -------------------------------------------------------------------------------------------
// gen code to iterating the aggregate map and output to downstream
val mapEntryTypeTerm = classOf[BytesHashMap.Entry].getCanonicalName
s"""
|org.apache.flink.util.MutableObjectIterator<$mapEntryTypeTerm> iterator =
| $aggregateMapTerm.getEntryIterator();
|while (iterator.next($reuseAggMapEntryTerm) != null) {
| ${ctx.reuseInputUnboxingCode(reuseAggBufferTerm)}
| ${windowAggOutputExpr.code}
| ${generateCollect(windowAggOutputExpr.resultTerm)}
|}
""".stripMargin
}
private def genHashWindowAggCodes(
buffLimitSize: Int,
windowSize: Long,
slideSize: Long,
inputTerm: String,
inputType: RowType,
outputType: RowType,
aggMapKey: String,
logTerm: String): (String, String) = {
// prepare aggregate map
val aggMapKeyTypesTerm = CodeGenUtils.newName("aggMapKeyTypes")
val aggBufferTypesTerm = CodeGenUtils.newName("aggBufferTypes")
prepareHashAggKVTypes(
ctx, aggMapKeyTypesTerm, aggBufferTypesTerm, aggMapKeyRowType, aggBufferRowType)
val aggregateMapTerm = CodeGenUtils.newName("aggregateMap")
prepareHashAggMap(ctx, aggMapKeyTypesTerm, aggBufferTypesTerm, aggregateMapTerm)
// gen code to do aggregate by window using aggregate map
val currentAggBufferTerm =
ctx.addReusableLocalVariable(classOf[BinaryRow].getName, "currentAggBuffer")
val (initedAggBufferExpr, doAggregateExpr, outputResultFromMap) = genGroupWindowHashAggCodes(
isMerge,
isFinal,
windowSize,
slideSize,
aggMapKeyTypesTerm,
aggBufferTypesTerm,
buffLimitSize,
aggregateMapTerm,
inputTerm,
inputType,
outputType,
currentAggBufferTerm)
// -------------------------------------------------------------------------------------------
val lazyInitAggBufferCode = if (auxGrouping.nonEmpty) {
s"""
|// lazy init agg buffer (with auxGrouping)
|${initedAggBufferExpr.code}
""".stripMargin
} else {
""
}
val lookupInfo =
ctx.addReusableLocalVariable(classOf[BytesHashMap.LookupInfo].getCanonicalName, "lookupInfo")
val dealWithAggHashMapOOM = if (isFinal) {
s"""throw new java.io.IOException("Hash window aggregate map OOM.");"""
} else {
val retryAppend = genRetryAppendToMap(
aggregateMapTerm, aggMapKey, initedAggBufferExpr, lookupInfo, currentAggBufferTerm)
val logMapOutput = CodeGenUtils.genLogInfo(
logTerm, s"BytesHashMap out of memory with {} entries, output directly.",
s"$aggregateMapTerm.getNumElements()")
s"""
|$logMapOutput
| // hash map out of memory, output directly
|$outputResultFromMap
| // retry append
|$retryAppend
""".stripMargin
}
val process =
s"""
|// look up output buffer using current key (grouping keys ..., assigned timestamp)
|$lookupInfo = $aggregateMapTerm.lookup($aggMapKey);
|$currentAggBufferTerm = $lookupInfo.getValue();
|if (!$lookupInfo.isFound()) {
| $lazyInitAggBufferCode
| // append empty agg buffer into aggregate map for current group key
| try {
| $currentAggBufferTerm =
| $aggregateMapTerm.append($lookupInfo, ${initedAggBufferExpr.resultTerm});
| } catch (java.io.EOFException exp) {
| $dealWithAggHashMapOOM
| }
|}
|// aggregate buffer fields access
|${ctx.reuseInputUnboxingCode(currentAggBufferTerm)}
|// do aggregate and update agg buffer
|${doAggregateExpr.code}
""".stripMargin.trim
(process, outputResultFromMap)
}
}
|
bowenli86/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/agg/batch/HashWindowCodeGenerator.scala
|
Scala
|
apache-2.0
| 31,573
|
// scalac: -Xfatal-warnings
//
import scala.language.reflectiveCalls
trait A {
trait Concrete { def conco: Int = 1 }
type Foo <: { def bippy: Int }
type Bar <: { def barry: Int }
implicit def barTag: scala.reflect.ClassTag[Bar]
def f1(x: Any) = x match {
case x: Foo with Concrete => x.bippy + x.conco
case _ => -1
}
def f2(x: Any) = x match {
case x: Concrete with Foo => x.bippy + x.conco
case _ => -1
}
def f3(x: Any) = x match {
case x: Foo with Bar => x.bippy + x.barry
case _ => -1
}
def f4(x: Any) = x match {
case x: (Foo @unchecked) => x.bippy // warns, suppressed
case _ => -1
}
def f5(x: Any) = x match {
case x: (Bar @unchecked) => x.barry // warns (but about the "outer reference"), suppressed
case _ => -1
}
}
trait B extends A {
type Foo <: { def bippy: Int ; def dingo: Int }
type Bar <: { def barry: Int ; def bongo: Int }
override implicit def barTag: scala.reflect.ClassTag[Bar]
override def f1(x: Any) = x match {
case x: Foo with Concrete => x.bippy + x.dingo + x.conco
case _ => -1
}
override def f2(x: Any) = x match {
case x: Concrete with Foo => x.bippy + x.dingo + x.conco
case _ => -1
}
override def f3(x: Any) = x match {
case x: Foo with Bar with Concrete => x.bippy + x.barry + x.dingo + x.conco + x.bongo
case _ => -1
}
override def f4(x: Any) = x match {
case x: (Foo @unchecked) => x.bippy + x.dingo // warns, suppressed
case _ => -1
}
override def f5(x: Any) = x match {
case x: (Bar @unchecked) => x.barry + x.bongo // warns (but about the "outer reference"), suppressed
case _ => -1
}
}
object Test {
abstract class Base extends A {
trait Foo {
def bippy = 2
def dingo = 3
}
trait Bar {
def barry = 2
def bongo = 3
}
implicit def barTag: scala.reflect.ClassTag[Bar] = scala.reflect.ClassTag(classOf[Bar])
def run(): Unit = {
println("f1")
wrap(f1(new Concrete {}))
wrap(f1(new Foo {}))
wrap(f1(new Bar {}))
wrap(f1(new Foo with Concrete {}))
wrap(f1(new Concrete with Foo {}))
println("\nf2")
wrap(f2(new Concrete {}))
wrap(f2(new Foo {}))
wrap(f2(new Bar {}))
wrap(f2(new Foo with Concrete {}))
wrap(f2(new Concrete with Foo {}))
wrap(f2(new Bar with Concrete {}))
wrap(f2(new Concrete with Bar {}))
wrap(f2(new Concrete with Foo with Bar {}))
wrap(f2(new Foo with Bar with Concrete {}))
println("\nf3")
wrap(f3(new Concrete {}))
wrap(f3(new Foo {}))
wrap(f3(new Bar {}))
wrap(f3(new Foo with Concrete {}))
wrap(f3(new Concrete with Foo {}))
wrap(f3(new Bar with Concrete {}))
wrap(f3(new Concrete with Bar {}))
wrap(f3(new Concrete with Foo with Bar {}))
wrap(f3(new Foo with Bar with Concrete {}))
println("\nf4")
wrap(f4(new Concrete {}))
wrap(f4(new Foo {}))
wrap(f4(new Bar {}))
wrap(f4(new Foo with Concrete {}))
wrap(f4(new Concrete with Foo {}))
wrap(f4(new Bar with Concrete {}))
wrap(f4(new Concrete with Bar {}))
wrap(f4(new Concrete with Foo with Bar {}))
wrap(f4(new Foo with Bar with Concrete {}))
println("\nf5")
wrap(f5(new Concrete {}))
wrap(f5(new Foo {}))
wrap(f5(new Bar {}))
wrap(f5(new Foo with Concrete {}))
wrap(f5(new Concrete with Foo {}))
wrap(f5(new Bar with Concrete {}))
wrap(f5(new Concrete with Bar {}))
wrap(f5(new Concrete with Foo with Bar {}))
wrap(f5(new Foo with Bar with Concrete {}))
}
}
object ao extends Base
object bo extends Base with B
private def wrap(body: => Any): Unit = {
try println(body)
catch { case ex: NoSuchMethodException => println(ex) }
}
def main(args: Array[String]): Unit = {
ao.run()
bo.run()
}
}
// java.lang.NoSuchMethodException: Test$$anon$1.bippy()
|
scala/scala
|
test/files/neg/t7721.scala
|
Scala
|
apache-2.0
| 4,142
|
/*******************************************************************************
Copyright (c) 2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMCore
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr}
import kr.ac.kaist.jsaf.analysis.typing.{ControlPoint, Helper, PreHelper, Semantics}
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
object DOMStringList extends DOM {
private val name = "DOMStringList"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor or object*/
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValue(Value(NullTop)))),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("item", AbsBuiltinFunc("DOMStringList.item", 1)),
("contains", AbsBuiltinFunc("DOMStringList.contains", 1))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("DOMStringList.item"-> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val n_index = Helper.toNumber(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
if (n_index </ NumBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val n_length = lset_this.foldLeft[AbsNumber](NumBot)((n, l) =>
n + Helper.toNumber(Helper.toPrimitive(Helper.Proto(h, l, AbsString.alpha("length")))))
val s_index = Helper.toString(PValue(n_index))
// Returns the indexth item in the collection.
val v_return = lset_this.foldLeft(ValueBot)((v, l) => v + Helper.Proto(h, l, s_index))
// If index is greater than or equal to the number of nodes in the list, this returns null.
val v_null = (n_index < n_length) match {
case BoolBot | BoolTrue => ValueBot
case BoolTop | BoolFalse => Value(NullTop)
}
((Helper.ReturnStore(h, v_return + v_null), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
})),
("DOMStringList.contains"-> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val s_str = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
if (s_str </ StrBot)
/* imprecise semantic */
((Helper.ReturnStore(h, Value(BoolTop)), ctx), (he, ctxe))
else
((HeapBot, ContextBot), (he, ctxe))
}))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
("DOMStringList.item"-> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val PureLocalLoc = cfg.getPureLocal(cp)
/* arguments */
val n_index = PreHelper.toNumber(PreHelper.toPrimitive(getArgValue_pre(h, ctx, args, "0", PureLocalLoc)))
if (n_index </ NumBot) {
val lset_this = h(PureLocalLoc)("@this")._1._2._2
val n_length = lset_this.foldLeft[AbsNumber](NumBot)((n, l) =>
n + PreHelper.toNumber(PreHelper.toPrimitive(PreHelper.Proto(h, l, AbsString.alpha("length")))))
val s_index = PreHelper.toString(PValue(n_index))
// Returns the indexth item in the collection.
val v_return = lset_this.foldLeft(ValueBot)((v, l) => v + PreHelper.Proto(h, l, s_index))
// If index is greater than or equal to the number of nodes in the list, this returns null.
val v_null = (n_index < n_length) match {
case BoolBot | BoolTrue => ValueBot
case BoolTop | BoolFalse => Value(NullTop)
}
((PreHelper.ReturnStore(h, PureLocalLoc, v_return + v_null), ctx), (he, ctxe))
}
else
((h, ctx), (he, ctxe))
})),
("DOMStringList.contains"-> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val PureLocalLoc = cfg.getPureLocal(cp)
/* arguments */
val s_str = PreHelper.toString(PreHelper.toPrimitive(getArgValue_pre(h, ctx, args, "0", PureLocalLoc)))
if (s_str </ StrBot)
/* imprecise semantic */
((PreHelper.ReturnStore(h, PureLocalLoc, Value(BoolTop)), ctx), (he, ctxe))
else
((h, ctx), (he, ctxe))
}))
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
// TODO: NYI
// case "DOMStringList.item" =>
// case "DOMStringList.contains" =>
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
// TODO: NYI
// case "DOMStringList.item" =>
// case "DOMStringList.contains" =>
)
}
/* instance */
//def instantiate() = Unit // not yet implemented
// intance of DOMStringList should have 'length' property
}
|
daejunpark/jsaf
|
src/kr/ac/kaist/jsaf/analysis/typing/models/DOMCore/DOMStringList.scala
|
Scala
|
bsd-3-clause
| 6,692
|
package org.elinker.core.api.service
import spray.routing.HttpService
import spray.routing.authentication._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by nilesh on 9/12/14.
*/
trait TokenService extends HttpService with UserAuthentication {
val tokenRoute =
path("token") {
authenticate(BasicAuth(authenticateUser, "token")) {
user => {
complete {
user.token
}
}
}
}
}
|
nilesh-c/elinker
|
src/main/scala/org/elinker/core/api/service/TokenService.scala
|
Scala
|
apache-2.0
| 473
|
package controllers.util
import akka.actor.ActorSelection
import javax.inject.Inject
import com.overviewdocs.messages.DocumentSetCommands
import modules.RemoteActorSystemModule
class JobQueueSender @Inject() (remoteActorSystemModule: RemoteActorSystemModule) {
protected def messageBroker: ActorSelection = remoteActorSystemModule.messageBroker
def send(command: DocumentSetCommands.Command): Unit = {
messageBroker ! command
}
}
|
overview/overview-server
|
web/app/controllers/util/JobQueueSender.scala
|
Scala
|
agpl-3.0
| 444
|
package com.arcusys.valamis.course.model
import com.arcusys.learn.liferay.services.GroupLocalServiceHelper
import scala.util.{Failure, Success, Try}
/**
* Created By:
* User: zsoltberki
* Date: 29.4.2016
*/
object CourseMembershipType extends Enumeration {
type CourseMembershipType = Value
val OPEN = Value(GroupLocalServiceHelper.TYPE_SITE_OPEN)
val ON_REQUEST = Value(GroupLocalServiceHelper.TYPE_SITE_RESTRICTED)
val CLOSED = Value(GroupLocalServiceHelper.TYPE_SITE_PRIVATE)
def toValidString(v: Int): String = Try(this.apply(v)) match {
case Success(s) => s.toString
case Failure(f) => ""
}
}
|
arcusys/Valamis
|
valamis-course/src/main/scala/com/arcusys/valamis/course/model/CourseMembershipType.scala
|
Scala
|
gpl-3.0
| 630
|
package com.softwaremill.bootzooka.http
import cats.effect.IO
import cats.implicits._
import com.softwaremill.bootzooka._
import com.softwaremill.bootzooka.infrastructure.Json._
import com.softwaremill.bootzooka.util.Id
import com.softwaremill.tagging._
import com.typesafe.scalalogging.StrictLogging
import io.circe.Printer
import sttp.model.StatusCode
import sttp.tapir.Codec.PlainCodec
import sttp.tapir.generic.auto._
import sttp.tapir.json.circe.TapirJsonCirce
import sttp.tapir.{Codec, Endpoint, EndpointOutput, Schema, SchemaType, Tapir}
import tsec.common.SecureRandomId
/** Helper class for defining HTTP endpoints. Import the members of this class when defining an HTTP API using tapir. */
class Http() extends Tapir with TapirJsonCirce with TapirSchemas with StrictLogging {
val jsonErrorOutOutput: EndpointOutput[Error_OUT] = jsonBody[Error_OUT]
/** Description of the output, that is used to represent an error that occurred during endpoint invocation. */
val failOutput: EndpointOutput[(StatusCode, Error_OUT)] = statusCode.and(jsonErrorOutOutput)
/** Base endpoint description for non-secured endpoints. Specifies that errors are always returned as JSON values corresponding to the
* [[Error_OUT]] class.
*/
val baseEndpoint: Endpoint[Unit, Unit, (StatusCode, Error_OUT), Unit, Any] =
endpoint.errorOut(failOutput)
/** Base endpoint description for secured endpoints. Specifies that errors are always returned as JSON values corresponding to the
* [[Error_OUT]] class, and that authentication is read from the `Authorization: Bearer` header.
*/
val secureEndpoint: Endpoint[Id, Unit, (StatusCode, Error_OUT), Unit, Any] =
baseEndpoint.securityIn(auth.bearer[String]().map(_.asInstanceOf[Id])(identity))
//
private val InternalServerError = (StatusCode.InternalServerError, "Internal server error")
private val failToResponseData: Fail => (StatusCode, String) = {
case Fail.NotFound(what) => (StatusCode.NotFound, what)
case Fail.Conflict(msg) => (StatusCode.Conflict, msg)
case Fail.IncorrectInput(msg) => (StatusCode.BadRequest, msg)
case Fail.Forbidden => (StatusCode.Forbidden, "Forbidden")
case Fail.Unauthorized(msg) => (StatusCode.Unauthorized, msg)
case _ => InternalServerError
}
private def exceptionToErrorOut(e: Exception): (StatusCode, Error_OUT) = {
val (statusCode, message) = e match {
case f: Fail => failToResponseData(f)
case _ =>
logger.error("Exception when processing request", e)
InternalServerError
}
logger.warn(s"Request fail: $message")
val errorOut = Error_OUT(message)
(statusCode, errorOut)
}
//
implicit class IOOut[T](f: IO[T]) {
/** An extension method for [[IO]], which converts a possibly failed IO, to one which either returns the error converted to an
* [[Error_OUT]] instance, or returns the successful value unchanged.
*/
def toOut: IO[Either[(StatusCode, Error_OUT), T]] = {
f.map(t => t.asRight[(StatusCode, Error_OUT)]).recover { case e: Exception =>
exceptionToErrorOut(e).asLeft[T]
}
}
}
override def jsonPrinter: Printer = noNullsPrinter
}
/** Schemas for custom types used in endpoint descriptions (as parts of query parameters, JSON bodies, etc.) */
trait TapirSchemas {
implicit val idPlainCodec: PlainCodec[SecureRandomId] =
Codec.string.map(_.asInstanceOf[SecureRandomId])(identity)
implicit def taggedPlainCodec[U, T](implicit uc: PlainCodec[U]): PlainCodec[U @@ T] =
uc.map(_.taggedWith[T])(identity)
implicit val schemaForId: Schema[Id] = Schema(SchemaType.SString[Id]())
implicit def schemaForTagged[U, T](implicit uc: Schema[U]): Schema[U @@ T] = uc.asInstanceOf[Schema[U @@ T]]
}
case class Error_OUT(error: String)
|
softwaremill/bootzooka
|
backend/src/main/scala/com/softwaremill/bootzooka/http/Http.scala
|
Scala
|
apache-2.0
| 3,839
|
package uk.org.nbn.nbnv.importer.validation
import uk.org.nbn.nbnv.importer.records.NbnRecord
import uk.org.nbn.nbnv.importer.fidelity.{Result, ResultLevel}
//validate SiteKey length
class Nbnv79Validator {
def validate(record: NbnRecord) = {
val validator = new LengthValidator
validator.validate("NBNV-79", record.key, "SiteKey", record.siteKey getOrElse "", 30)
}
}
|
JNCC-dev-team/nbn-importer
|
importer/src/main/scala/uk/org/nbn/nbnv/importer/validation/Nbnv79Validator.scala
|
Scala
|
apache-2.0
| 397
|
/*
* Copyright (c) 2011, Daniel Spiewak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* - Neither the name of "Anti-XML" nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.codecommit.antixml
package performance
import org.w3c.dom
import scala.collection.IndexedSeqOptimized
sealed trait JavaNodeSeqWithIndexedSeq extends scala.collection.IndexedSeq[dom.Node]
with dom.NodeList
with IndexedSeqOptimized[dom.Node,scala.collection.IndexedSeq[dom.Node]] {
/** Simulated one-level select */
def \\(name: String): JavaNodeSeqWithIndexedSeq = {
val b = JavaNodeSeqWithIndexedSeq.newBuilder
for(node <- this; node2 <- JavaNodeSeqWithIndexedSeq.wrap(node.getChildNodes)) node2 match {
case e: dom.Element => {
if (simpleNameOf(e) == name)
b += e
}
case _ => ()
}
b.result
}
}
object JavaNodeSeqWithIndexedSeq {
def wrap(s: scala.collection.IndexedSeq[dom.Node]) = s match {
case jnswis: JavaNodeSeqWithIndexedSeq => jnswis
case _ => new JavaNodeSeqWithIndexedSeq {
override def apply(i: Int) = s(i)
override def length = s.length
override def getLength = s.length
override def item(i: Int) = s(i)
}
}
def wrap(nl: dom.NodeList) = nl match {
case jnswis: JavaNodeSeqWithIndexedSeq => jnswis
case _=> new JavaNodeSeqWithIndexedSeq {
override def apply(i: Int) = nl.item(i)
override def length = nl.getLength
override def getLength = nl.getLength
override def item(i: Int) = nl.item(i)
}
}
def newBuilder = Array.newBuilder[dom.Node] mapResult {wrap(_)}
//Use this instead of `wrap` to force a lazy NodeList to be realized.
def copy(nl: dom.NodeList) = {
val b = newBuilder
for(i <- 0 until nl.getLength)
b += nl.item(i)
b.result
}
}
|
djspiewak/anti-xml
|
src/test/scala/com/codecommit/antixml/performance/JavaNodeSeqWithIndexedSeq.scala
|
Scala
|
bsd-3-clause
| 3,222
|
/*
* Copyright (c) 2015
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tudarmstadt.lt.flinkdt.pipes
import de.tudarmstadt.lt.flinkdt.tasks._
import de.tudarmstadt.lt.flinkdt.types.{CT2red, CT2, CT2def}
import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.scala._
/**
* Created by Steffen Remus
*/
object RerankDT extends App {
var config = DSTaskConfig.resolveConfig(args)
if(!config.hasPath("dt.jobname"))
config = DSTaskConfig.resolveConfig(args ++ Array("-dt.jobname", getClass.getSimpleName.replaceAllLiterally("$","")))
DSTaskConfig.load(config)
// input data is output of dt computation
val in = DSTaskConfig.io_dt
val hash = DSTaskConfig.jobname.toLowerCase.contains("hash")
val ct_computation_chain =
if(hash) {
{
/* */
ComputeCT2[CT2red[Array[Byte],Array[Byte]], CT2def[Array[Byte],Array[Byte]], Array[Byte],Array[Byte]](prune = true, sigfun = _.lmi, order = Order.DESCENDING) ~> DSWriter(DSTaskConfig.io_accumulated_CT,s"${DSTaskConfig.io_dt_sorted}-rerank") ~>
/* */
Convert.Hash.Reverse[CT2def[Array[Byte], Array[Byte]], CT2def[String, String], String, String](DSTaskConfig.io_keymap)
/* */
}
} else {
{
/* */
ComputeCT2[CT2def[String,String], CT2def[String,String], String,String](prune = true, sigfun = _.lmi, order = Order.DESCENDING) ~> DSWriter(DSTaskConfig.io_accumulated_CT,s"${DSTaskConfig.io_dt_sorted}-rerank")
/* */
}
}
val rerank_chain = ct_computation_chain ~> FilterSortDT[CT2def[String, String],String, String](_.lmi)
rerank_chain.process(input = DSTaskConfig.io_dt, output = s"${DSTaskConfig.io_dt_sorted}-rerank")
}
|
remstef/flinkfun
|
src/main/scala/de/tudarmstadt/lt/flinkdt/pipes/RerankDT.scala
|
Scala
|
apache-2.0
| 2,243
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.{Locale, OptionalLong}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.IO_WARNING_LARGEFILETHRESHOLD
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionSet}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.connector.read.{Batch, InputPartition, Scan, Statistics, SupportsReportStatistics}
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.connector.SupportsMetadata
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
trait FileScan extends Scan
with Batch with SupportsReportStatistics with SupportsMetadata with Logging {
/**
* Returns whether a file with `path` could be split or not.
*/
def isSplitable(path: Path): Boolean = {
false
}
def sparkSession: SparkSession
def fileIndex: PartitioningAwareFileIndex
/**
* Returns the required data schema
*/
def readDataSchema: StructType
/**
* Returns the required partition schema
*/
def readPartitionSchema: StructType
/**
* Returns the filters that can be use for partition pruning
*/
def partitionFilters: Seq[Expression]
/**
* Returns the data filters that can be use for file listing
*/
def dataFilters: Seq[Expression]
/**
* Create a new `FileScan` instance from the current one
* with different `partitionFilters` and `dataFilters`
*/
def withFilters(partitionFilters: Seq[Expression], dataFilters: Seq[Expression]): FileScan
/**
* If a file with `path` is unsplittable, return the unsplittable reason,
* otherwise return `None`.
*/
def getFileUnSplittableReason(path: Path): String = {
assert(!isSplitable(path))
"undefined"
}
protected def seqToString(seq: Seq[Any]): String = seq.mkString("[", ", ", "]")
override def equals(obj: Any): Boolean = obj match {
case f: FileScan =>
fileIndex == f.fileIndex && readSchema == f.readSchema
ExpressionSet(partitionFilters) == ExpressionSet(f.partitionFilters) &&
ExpressionSet(dataFilters) == ExpressionSet(f.dataFilters)
case _ => false
}
override def hashCode(): Int = getClass.hashCode()
val maxMetadataValueLength = 100
override def description(): String = {
val metadataStr = getMetaData().toSeq.sorted.map {
case (key, value) =>
val redactedValue =
Utils.redact(sparkSession.sessionState.conf.stringRedactionPattern, value)
key + ": " + StringUtils.abbreviate(redactedValue, maxMetadataValueLength)
}.mkString(", ")
s"${this.getClass.getSimpleName} $metadataStr"
}
override def getMetaData(): Map[String, String] = {
val locationDesc =
fileIndex.getClass.getSimpleName +
Utils.buildLocationMetadata(fileIndex.rootPaths, maxMetadataValueLength)
Map(
"Format" -> s"${this.getClass.getSimpleName.replace("Scan", "").toLowerCase(Locale.ROOT)}",
"ReadSchema" -> readDataSchema.catalogString,
"PartitionFilters" -> seqToString(partitionFilters),
"DataFilters" -> seqToString(dataFilters),
"Location" -> locationDesc)
}
protected def partitions: Seq[FilePartition] = {
val selectedPartitions = fileIndex.listFiles(partitionFilters, dataFilters)
val maxSplitBytes = FilePartition.maxSplitBytes(sparkSession, selectedPartitions)
val partitionAttributes = fileIndex.partitionSchema.toAttributes
val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap
val readPartitionAttributes = readPartitionSchema.map { readField =>
attributeMap.get(normalizeName(readField.name)).getOrElse {
throw new AnalysisException(s"Can't find required partition column ${readField.name} " +
s"in partition schema ${fileIndex.partitionSchema}")
}
}
lazy val partitionValueProject =
GenerateUnsafeProjection.generate(readPartitionAttributes, partitionAttributes)
val splitFiles = selectedPartitions.flatMap { partition =>
// Prune partition values if part of the partition columns are not required.
val partitionValues = if (readPartitionAttributes != partitionAttributes) {
partitionValueProject(partition.values).copy()
} else {
partition.values
}
partition.files.flatMap { file =>
val filePath = file.getPath
PartitionedFileUtil.splitFiles(
sparkSession = sparkSession,
file = file,
filePath = filePath,
isSplitable = isSplitable(filePath),
maxSplitBytes = maxSplitBytes,
partitionValues = partitionValues
)
}.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
}
if (splitFiles.length == 1) {
val path = new Path(splitFiles(0).filePath)
if (!isSplitable(path) && splitFiles(0).length >
sparkSession.sparkContext.getConf.get(IO_WARNING_LARGEFILETHRESHOLD)) {
logWarning(s"Loading one large unsplittable file ${path.toString} with only one " +
s"partition, the reason is: ${getFileUnSplittableReason(path)}")
}
}
FilePartition.getFilePartitions(sparkSession, splitFiles, maxSplitBytes)
}
override def planInputPartitions(): Array[InputPartition] = {
partitions.toArray
}
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = {
val compressionFactor = sparkSession.sessionState.conf.fileCompressionFactor
val size = (compressionFactor * fileIndex.sizeInBytes).toLong
OptionalLong.of(size)
}
override def numRows(): OptionalLong = OptionalLong.empty()
}
}
override def toBatch: Batch = this
override def readSchema(): StructType =
StructType(readDataSchema.fields ++ readPartitionSchema.fields)
// Returns whether the two given arrays of [[Filter]]s are equivalent.
protected def equivalentFilters(a: Array[Filter], b: Array[Filter]): Boolean = {
a.sortBy(_.hashCode()).sameElements(b.sortBy(_.hashCode()))
}
private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
private def normalizeName(name: String): String = {
if (isCaseSensitive) {
name
} else {
name.toLowerCase(Locale.ROOT)
}
}
}
|
dbtsai/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileScan.scala
|
Scala
|
apache-2.0
| 7,425
|
package com.twitter.finagle.mysql
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.scalatest.{MustMatchers, FunSuite}
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.mockito.Matchers._
import com.twitter.util.{Await, Time}
/**
* Tests the transaction functionality of the MySQL client.
*/
@RunWith(classOf[JUnitRunner])
class TransactionTest extends FunSuite with MockitoSugar with MustMatchers {
private val sqlQuery = "SELECT * FROM FOO"
test("transaction test uses a single service repeatedly and closes it upon completion") {
val service = new MockService()
val factory = spy(new MockServiceFactory(service))
val client = Client(factory)
val result = client.transaction[String] { c =>
for {
r1 <- c.query(sqlQuery)
r2 <- c.query(sqlQuery)
} yield "success"
}
Await.result(result) must equal ("success")
service.requests must equal (List(
"START TRANSACTION", sqlQuery, sqlQuery, "COMMIT"
).map(QueryRequest(_)))
verify(factory, times(1)).apply()
verify(factory, times(0)).close(any[Time])
}
test("transaction test rollback") {
val service = new MockService()
val factory = spy(new MockServiceFactory(service))
val client = Client(factory)
try {
client.transaction[String] { c =>
c.query(sqlQuery).map { r1 =>
throw new RuntimeException("Fake exception to trigger ROLLBACK")
"first response object"
}.flatMap { r2 =>
c.query(sqlQuery).map { r3 =>
"final response object"
}
}
}
} catch {
case e: Exception =>
}
service.requests must equal (List(
"START TRANSACTION", sqlQuery, "ROLLBACK"
).map(QueryRequest(_)))
verify(factory, times(1)).apply()
verify(factory, times(0)).close(any[Time])
}
}
|
adriancole/finagle
|
finagle-mysql/src/test/scala/com/twitter/finagle/mysql/unit/TransactionTest.scala
|
Scala
|
apache-2.0
| 1,895
|
trait Cap
trait Toolbox {
type Tree
val tpd: TypedTrees
trait TypedTrees {
type Tree
}
val Apply: ApplyImpl
trait ApplyImpl {
def unapply(tree: Tree): Option[(Tree, Seq[Tree])]
def unapply(tree: tpd.Tree)(implicit c: Cap): Option[(tpd.Tree, Seq[tpd.Tree])]
}
}
class Test(val tb: Toolbox) {
import tb._
implicit val cap: Cap = null
def foo(tree: Tree): Int = (tree: Any) match {
case tb.Apply(fun, args) => 3 // error: ambiguous overload of unapply
}
def bar(tree: tpd.Tree): Int = (tree: Any) match {
case Apply(fun, args) => 3 // error: ambiguous overload of unapply
}
}
|
som-snytt/dotty
|
tests/neg/i2378.scala
|
Scala
|
apache-2.0
| 628
|
package com.sksamuel.avro4s.kafka
import java.io.ByteArrayOutputStream
import com.sksamuel.avro4s.{Decoder, Encoder}
import com.sksamuel.avro4s.{AvroInputStream, AvroOutputStream, AvroSchema, SchemaFor}
import org.apache.avro.Schema
import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
class GenericSerde[T >: Null : SchemaFor : Encoder : Decoder] extends Serde[T]
with Deserializer[T]
with Serializer[T]
with Serializable {
val schema: Schema = AvroSchema[T]
override def serializer(): Serializer[T] = this
override def deserializer(): Deserializer[T] = this
override def deserialize(topic: String, data: Array[Byte]): T = {
if (data == null) null else {
val input = AvroInputStream.binary[T].from(data).build(schema)
input.iterator.next()
}
}
override def close(): Unit = ()
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
override def serialize(topic: String, data: T): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val output = AvroOutputStream.binary[T].to(baos).build(schema)
output.write(data)
output.close()
baos.toByteArray
}
}
|
51zero/avro4s
|
avro4s-kafka/src/main/scala/com/sksamuel/avro4s/kafka/GenericSerde.scala
|
Scala
|
mit
| 1,182
|
/** Case class for converting RDD to DataFrame */
// Define the schema using a case class.
// Note: Case classes in Scala 2.10 can support only up to 22 fields. To work around this limit,
// you can use custom classes that implement the Product interface.
case class Record(starttime: String)
|
faganpe/KafkaStreamingPOC
|
src/main/scala/Record.scala
|
Scala
|
apache-2.0
| 293
|
package lampetia.cg.extensions
/**
* @author Hossam Karim
*/
object Scalate {
def seperate[A](iterable: Seq[A], seperator: String = ",")(f: A => String) =
iterable.map(f).mkString(seperator)
}
|
rosama86/lampetia
|
lampetia-code-gen/src/main/scala/lampetia/cg/extensions/Scalate.scala
|
Scala
|
mit
| 205
|
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.learn.hooks
import org.platanios.tensorflow.api.config.TensorBoardConfig
import org.platanios.tensorflow.api.core.client.Session
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import scala.util.Try
/** Launches a TensorBoard server for the duration of a run.
*
* This can be useful when running on a server or a distributed environment and want to monitor the run.
*
* @param tensorBoardConfig TensorBoard configuration to use.
*
* @author Emmanouil Antonios Platanios
*/
private[learn] class TensorBoardHook protected (val tensorBoardConfig: TensorBoardConfig) extends Hook {
private var tensorBoardProcess: Option[Process] = None
override protected def begin(): Unit = tensorBoardProcess = {
Option(tensorBoardConfig).flatMap(config => {
TensorBoardHook.logger.info(
s"Launching TensorBoard in '${config.host}:${config.port}' " +
s"for log directory '${config.logDir.toAbsolutePath}'.")
val processOrError = Try(config.processBuilder.start())
processOrError.failed.foreach(e => {
TensorBoardHook.logger.warn(e.getMessage)
TensorBoardHook.logger.warn(
"Could not launch TensorBoard. Please make sure it is installed correctly and in your PATH.")
})
processOrError.toOption
})
}
override protected def end(session: Session): Unit = {
tensorBoardProcess.foreach(process => {
TensorBoardHook.logger.info("Killing the TensorBoard service.")
process.destroy()
})
}
}
private[learn] object TensorBoardHook {
private[TensorBoardHook] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / TensorBoard"))
def apply(tensorBoardConfig: TensorBoardConfig): TensorBoardHook = new TensorBoardHook(tensorBoardConfig)
}
|
eaplatanios/tensorflow_scala
|
modules/api/src/main/scala/org/platanios/tensorflow/api/learn/hooks/TensorBoardHook.scala
|
Scala
|
apache-2.0
| 2,450
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.plan.optimize.program.{BatchOptimizeContext, FlinkChainedProgram, FlinkHepRuleSetProgramBuilder, HEP_RULES_EXECUTION_TYPE}
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Tests for [[FlinkJoinPushExpressionsRule]].
*/
class FlinkJoinPushExpressionsRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"rules",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(
SimplifyFilterConditionRule.EXTENDED,
FlinkRewriteSubQueryRule.FILTER,
FlinkSubQueryRemoveRule.FILTER,
JoinConditionTypeCoerceRule.INSTANCE,
FlinkJoinPushExpressionsRule.INSTANCE))
.build()
)
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Long, String)]("l", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("r", 'd, 'e, 'f)
}
@Test
def testPushExpressionsOnSemiJoin_In(): Unit = {
util.verifyPlan("SELECT * FROM l WHERE a IN (SELECT d FROM r WHERE b + 1 = e)")
}
@Test
def testPushExpressionsOnSemiJoin_Exists(): Unit = {
util.verifyPlan("SELECT * FROM l WHERE EXISTS (SELECT d FROM r WHERE CAST(b AS INTEGER) = d)")
}
@Test
def testPushExpressionsOnSemiJoin_NotIn(): Unit = {
util.verifyPlan("SELECT * FROM l WHERE a NOT IN (SELECT d FROM r WHERE b + 1 = e)")
}
@Test
def testPushExpressionsOnSemiJoin_NotExists(): Unit = {
val sqlQuery = "SELECT * FROM l WHERE NOT EXISTS (SELECT d FROM r WHERE CAST(b AS INTEGER) = d)"
util.verifyPlan(sqlQuery)
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinPushExpressionsRuleTest.scala
|
Scala
|
apache-2.0
| 2,883
|
package com.joypeg.scamandrill.client
import akka.actor.ActorSystem
import akka.http.scaladsl._
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.joypeg.scamandrill.utils.SimpleLogger
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}
/**
* This trait abstract on top of spray the handling of all request / response to the mandrill API. Its
* executeQuery fuction is the one use by both the async client and the blocking one (wrapper).
*/
trait ScamandrillSendReceive extends SimpleLogger {
type Entity = Either[Throwable, RequestEntity]
implicit val system: ActorSystem
implicit val materializer = ActorMaterializer()
import system.dispatcher
/**
* Fire a request to Mandrill API and try to parse the response. Because it return a Future[S], the
* unmarshalling of the response body is done via a partially applied function on the Future transformation.
* Uses spray-can internally to fire the request and unmarshall the response, with spray-json to do that.
*
* @param endpoint - the Mandrill API endpoint for the operation, for example '/messages/send.json'
* @param reqBody - the body of the post request already marshalled as json
* @param handler - this is the unmarshaller fuction of the response body, partially applied function
* @tparam S - the type of the expected body response once unmarshalled
* @return - a future of the expected type S
* @note as from the api documentation, all requests are POST, and You can consider any non-200 HTTP
* response code an error - the returned data will contain more detailed information
*/
def executeQuery[S](endpoint: String, reqBodyF: Future[RequestEntity])(handler: (HttpResponse => Future[S])): Future[S] = {
//TODO: reqbody <: MandrillResponse and S :< MandrillRequest
reqBodyF.flatMap { reqBody =>
val request = HttpRequest(method = HttpMethods.POST, uri = Uri("/api/1.0" + endpoint), entity = reqBody)
val clientFlow = Http().cachedHostConnectionPoolHttps[Int]("mandrillapp.com")
val futureResponse = Source.single(request -> 1).via(clientFlow).runWith(Sink.head)
futureResponse.flatMap { case (tryResponse, dummyInt) =>
tryResponse match {
case Success(rsp) => if(rsp.status.isSuccess()) handler(rsp)
else Future.failed(new UnsuccessfulResponseException(rsp))
case Failure(e) => Future.failed(e)
}
}
}
}
/**
* Asks all the underlying actors to close (waiting for 1 second)
* and then shut down the system. Because the blocking client is
* basically a wrapper of the async one, both the async and blocking
* client are supposed to call this method when they are not required
* or the application using them exit.
*/
def shutdown(): Unit = {
logger.info("asking all actor to close")
Await.ready(Http().shutdownAllConnectionPools(), 1 second)
Await.ready(system.terminate(), 1 second)
logger.info("actor system shut down")
}
}
|
AdAgility/scamandrill
|
src/main/scala/com/joypeg/scamandrill/client/ScamandrillSendReceive.scala
|
Scala
|
apache-2.0
| 3,140
|
package io.udash.core
/**
* Creates view with [[io.udash.core.EmptyPresenter]]. Used for static views.
*
* By default, instances of this class are compared by class name to prevent rerendering of static views.
* This behaviour can be opted out of by overriding equals/hashCode.
**/
abstract class StaticViewFactory[S <: State](viewCreator: () => View) extends ViewFactory[S] {
override def create(): (View, EmptyPresenter.type) =
(viewCreator(), EmptyPresenter)
override def equals(other: Any): Boolean = other != null && getClass.equals(other.getClass)
override def hashCode(): Int = getClass.hashCode()
}
/** Ignores state changes. Useful for static views. */
object EmptyPresenter extends Presenter[State] {
override def handleState(state: State): Unit = ()
}
|
UdashFramework/udash-core
|
core/.js/src/main/scala/io/udash/core/Defaults.scala
|
Scala
|
apache-2.0
| 782
|
class Meter(val x: Double) extends AnyVal
trait A {
def apply(x: Double) = x.toString
}
trait B {
def apply(x: Meter) = x.toString
}
object Test extends A with B // error: double definition
|
som-snytt/dotty
|
tests/neg/valueclasses-doubledefs2.scala
|
Scala
|
apache-2.0
| 196
|
package com.github.vonnagy.service.container.http
import java.net.InetAddress
import akka.http.scaladsl.model.headers.`Remote-Address`
import akka.http.scaladsl.model.{RemoteAddress, StatusCodes}
import akka.http.scaladsl.server.Directives
import com.github.vonnagy.service.container.Specs2RouteTest
import com.github.vonnagy.service.container.http.routing.Rejection.NotFoundRejection
import org.specs2.mutable.Specification
class BaseEndpointsSpec extends Specification with Directives with Specs2RouteTest {
val endpoints = new BaseEndpoints
"The base routing infrastructure" should {
"return no content for favicon.ico" in {
Get("/favicon.ico") ~> endpoints.route ~> check {
status must be equalTo StatusCodes.NoContent
}
}
"support a call to ping" in {
Get("/ping") ~> endpoints.route ~> check {
responseAs[String] must startWith("pong")
status must beEqualTo(StatusCodes.OK)
}
}
"a call to shutdown should return and error due to CIDR rules" in {
Post("/shutdown").withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.1.1")))) ~> endpoints.route ~> check {
handled must beFalse
rejections.size must beEqualTo(1)
rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
}
}
}
}
|
vonnagy/service-container
|
service-container/src/test/scala/com/github/vonnagy/service/container/http/BaseEndpointsSpec.scala
|
Scala
|
apache-2.0
| 1,366
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.mathematics
import com.twitter.scalding._
import org.specs._
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen._
object SizeHintProps extends Properties("SizeHint") {
val noClueGen = value(NoClue)
val finiteHintGen = for (
rows <- choose(-1L, 1000000L);
cols <- choose(-1L, 1000000L)
) yield FiniteHint(rows, cols)
val sparseHintGen = for (
rows <- choose(-1L, 1000000L);
cols <- choose(-1L, 1000000L);
sparsity <- choose(0.0, 1.0)
) yield SparseHint(sparsity, rows, cols)
implicit val finiteArb: Arbitrary[FiniteHint] = Arbitrary { finiteHintGen }
implicit val sparseArb: Arbitrary[SparseHint] = Arbitrary { sparseHintGen }
implicit val genHint: Arbitrary[SizeHint] = Arbitrary { oneOf(noClueGen, finiteHintGen, sparseHintGen) }
property("a+b is at least as big as a") = forAll { (a: SizeHint, b: SizeHint) =>
val addT = for (ta <- a.total; tsum <- (a + b).total) yield (tsum >= ta)
addT.getOrElse(true)
}
property("a#*#b is at most as big as a") = forAll { (a: SizeHint, b: SizeHint) =>
val addT = for (ta <- a.total; tsum <- (a #*# b).total) yield (tsum <= ta)
addT.getOrElse(true)
}
property("ordering makes sense") = forAll { (a: SizeHint, b: SizeHint) =>
(List(a, b).max.total.getOrElse(BigInt(-1L)) >= a.total.getOrElse(BigInt(-1L)))
}
property("addition increases sparsity fraction") = forAll { (a: SparseHint, b: SparseHint) =>
(a + b).asInstanceOf[SparseHint].sparsity >= a.sparsity
}
property("Hadamard product does not increase sparsity fraction") = forAll { (a: SparseHint, b: SparseHint) =>
(a #*# b).asInstanceOf[SparseHint].sparsity == (a.sparsity min b.sparsity)
}
property("transpose preserves size") = forAll { (a: SizeHint) =>
a.transpose.total == a.total
}
property("squaring a finite hint preserves size") = forAll { (a: FiniteHint) =>
val sq = a.setRowsToCols
val sq2 = a.setColsToRows
(sq.total == (sq * sq).total) && (sq2.total == (sq2 * sq2).total)
}
property("adding a finite hint to itself preserves size") = forAll { (a: FiniteHint) =>
(a + a).total == a.total
}
property("hadamard product of a finite hint to itself preserves size") = forAll { (a: FiniteHint) =>
(a #*# a).total == a.total
}
property("adding a sparse matrix to itself doesn't decrease size") = forAll { (a: SparseHint) =>
(for (
doubleSize <- (a + a).total;
asize <- a.total
) yield (doubleSize >= asize)).getOrElse(true)
}
property("diagonals are smaller") = forAll { (a: FiniteHint) =>
SizeHint.asDiagonal(a).total.getOrElse(BigInt(-2L)) < a.total.getOrElse(-1L)
}
property("diagonals are about as big as the min(rows,cols)") = forAll { (a: FiniteHint) =>
SizeHint.asDiagonal(a).total.getOrElse(BigInt(-1L)) <= (a.rows min a.cols)
SizeHint.asDiagonal(a).total.getOrElse(BigInt(-1L)) >= ((a.rows min a.cols) - 1L)
}
property("transpose law is obeyed in total") = forAll { (a: SizeHint, b: SizeHint) =>
// (A B)^T = B^T A^T
(a * b).transpose.total == ((b.transpose) * (a.transpose)).total
}
}
|
lucamilanesio/scalding
|
scalding-core/src/test/scala/com/twitter/scalding/mathematics/SizeHintTest.scala
|
Scala
|
apache-2.0
| 3,792
|
package ru.st.training.domain
object CellState extends Enumeration {
type CellState = Value
val AllWallBuilt, TopWallRuin, RightWallRuin, BottomWallRuin, LeftWallRuin,
AllWallBuiltWithExit = Value
}
|
mrzo0m/RobotInMaze
|
src/main/scala/ru/st/training/domain/CellState.scala
|
Scala
|
mit
| 215
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.commons.source
import com.twitter.chill.Externalizer
import com.twitter.bijection.Injection
/**
* Source used to write some type T into an LZO-compressed SequenceFile using a codec on T for serialization.
*/
object LzoCodecSource {
def apply[T](paths: String*)(implicit passedInjection: Injection[T, Array[Byte]]) =
new LzoCodec[T] {
val hdfsPaths = paths
val localPaths = paths
val boxed = Externalizer(passedInjection)
override def injection = boxed.get
}
}
|
twitter/scalding
|
scalding-commons/src/main/scala/com/twitter/scalding/commons/source/LzoCodecSource.scala
|
Scala
|
apache-2.0
| 1,086
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.python.api
import java.util.{ArrayList => JArrayList, HashMap => JHashMap, List => JList, Map => JMap}
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.{Identity => DIdentity, Sample => JSample, _}
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, _}
import com.intel.analytics.bigdl.numeric._
import com.intel.analytics.bigdl.optim.{Optimizer, _}
import com.intel.analytics.bigdl.tensor.{DenseType, SparseType, Storage, Tensor}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{Table, _}
import com.intel.analytics.bigdl.visualization.{Summary, TrainSummary, ValidationSummary}
import com.intel.analytics.bigdl.nn.Zeros
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.rdd.RDD
import java.lang.{Integer, Boolean => JBoolean}
import java.nio.ByteOrder
import java.util
import com.intel.analytics.bigdl.nn.Graph._
import com.intel.analytics.bigdl.nn.tf.{Const, Fill, Shape, SplitAndSelect}
import com.intel.analytics.bigdl.transform.vision.image._
import com.intel.analytics.bigdl.transform.vision.image.augmentation._
import com.intel.analytics.bigdl.transform.vision.image.label.roi._
import com.intel.analytics.bigdl.transform.vision.image.opencv.OpenCVMat
import com.intel.analytics.bigdl.utils.tf.{TensorflowDataFormat, TensorflowSaver}
import com.intel.analytics.bigdl.utils.tf.TensorflowLoader.{buildBigDLModel, buildTFGraph, parse}
import com.intel.analytics.bigdl.utils.tf._
import org.apache.spark.ml.{DLClassifier, DLClassifierModel, DLEstimator, DLModel}
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.log4j._
import org.opencv.imgproc.Imgproc
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.language.existentials
import scala.reflect.ClassTag
/**
* [[com.intel.analytics.bigdl.dataset.Sample]] for python.
* @param features features
* @param label labels
* @param bigdlType bigdl numeric type
*/
case class Sample(features: JList[JTensor],
label: JTensor,
bigdlType: String)
case class JTensor(storage: Array[Float], shape: Array[Int],
bigdlType: String, indices: Array[Array[Int]] = null)
/**
* [[ValidationResult]] for python
* @param result result
* @param totalNum total number
* @param method method name
*/
case class EvaluatedResult(val result: Float, totalNum: Int, method: String)
object PythonBigDL {
def ofFloat(): PythonBigDL[Float] = new PythonBigDL[Float]()
def ofDouble(): PythonBigDL[Double] = new PythonBigDL[Double]()
}
/**
* Implementation of Python API for BigDL
*/
class PythonBigDL[T: ClassTag](implicit ev: TensorNumeric[T]) extends Serializable {
private val typeName = {
val cls = implicitly[ClassTag[T]].runtimeClass
cls.getSimpleName
}
private def toTable(input: JList[JTensor]): Table = {
input.asScala.foldLeft(new Table())((t, jtensor) => t.insert(toTensor(jtensor)))
}
def jTensorsToActivity(input: JList[JTensor], isTable: Boolean): Activity = {
if (input.isEmpty) {
throw new IllegalArgumentException("Empty input")
}
if (isTable) {
toTable(input)
} else {
toTensor(input.iterator().next())
}
}
def activityToJTensors(outputActivity: Activity): JList[JTensor] = {
if (outputActivity.isInstanceOf[Tensor[T]]) {
List(toJTensor(outputActivity.toTensor)).asJava
} else {
outputActivity.toTable.getState().toList.map {
pair => (pair._1.asInstanceOf[Int], toJTensor(pair._2.asInstanceOf[Tensor[T]]))
}.sortWith(_._1 < _._1).map(pair => pair._2).asJava
}
}
def toPySample(sample: JSample[T]): Sample = {
val cls = implicitly[ClassTag[T]].runtimeClass
val features = new JArrayList[JTensor]()
features.add(toJTensor(sample.feature()))
Sample(features, toJTensor(sample.label()), cls.getSimpleName)
}
def toTensor(jTensor: JTensor): Tensor[T] = {
if (jTensor == null) return null
this.typeName match {
case "float" =>
if (null == jTensor.indices) {
if (jTensor.shape == null || jTensor.shape.length == 0) {
Tensor()
} else {
Tensor(jTensor.storage.map(x => ev.fromType(x)), jTensor.shape)
}
} else {
Tensor.sparse(jTensor.indices, jTensor.storage.map(x => ev.fromType(x)), jTensor.shape)
}
case "double" =>
if (null == jTensor.indices) {
if (jTensor.shape == null || jTensor.shape.length == 0) {
Tensor()
} else {
Tensor(jTensor.storage.map(x => ev.fromType(x.toDouble)), jTensor.shape)
}
} else {
Tensor.sparse(jTensor.indices,
jTensor.storage.map(x => ev.fromType(x.toDouble)), jTensor.shape)
}
case t: String =>
throw new IllegalArgumentException(s"Not supported type: ${t}")
}
}
def toJTensor(tensor: Tensor[T]): JTensor = {
// clone here in case the the size of storage larger then the size of tensor.
require(tensor != null, "tensor cannot be null")
tensor.getTensorType match {
case SparseType =>
// Note: as SparseTensor's indices is inaccessible here,
// so we will transfer it to DenseTensor. Just for testing.
if (tensor.nElement() == 0) {
JTensor(Array(), Array(0), bigdlType = typeName)
} else {
val cloneTensor = Tensor.dense(tensor)
val result = JTensor(cloneTensor.storage().array().map(i => ev.toType[Float](i)),
cloneTensor.size(), bigdlType = typeName)
result
}
case DenseType =>
if (tensor.nElement() == 0) {
if (tensor.dim() == 0) {
JTensor(null, null, bigdlType = typeName)
} else {
JTensor(Array(), tensor.size(), bigdlType = typeName)
}
} else {
val cloneTensor = tensor.clone()
val result = JTensor(cloneTensor.storage().array().map(i => ev.toType[Float](i)),
cloneTensor.size(), bigdlType = typeName)
result
}
case _ =>
throw new IllegalArgumentException(s"toJTensor: Unsupported tensor type" +
s" ${tensor.getTensorType}")
}
}
def testTensor(jTensor: JTensor): JTensor = {
val tensor = toTensor(jTensor)
toJTensor(tensor)
}
def testSample(sample: Sample): Sample = {
val jsample = toJSample(sample)
toPySample(jsample)
}
def toJSample(record: Sample): JSample[T] = {
require(record.bigdlType == this.typeName,
s"record.bigdlType: ${record.bigdlType} == this.typeName: ${this.typeName}")
JSample[T](record.features.asScala.toArray.map(toTensor(_)), toTensor(record.label))
}
def toJSample(psamples: RDD[Sample]): RDD[JSample[T]] = {
psamples.map(toJSample(_))
}
// The first dimension is batch for both X and y
private def toSampleArray(Xs: List[Tensor[T]], y: Tensor[T] = null): Array[JSample[T]] = {
require(!Xs.isEmpty, "Xs should not be empty")
val totalNum = Xs(0).size()(0)
var i = 1
val samples = new Array[JSample[T]](totalNum)
if (y != null) {
require(Xs(0).size()(0) == y.size()(0),
s"The batch dim should be equal, but we got: ${Xs(0).size()(0)} vs ${y.size()(0)}")
while (i <= totalNum) {
samples(i-1) = JSample(Xs.map{X => X.select(1, i)}.toArray, y.select(1, i))
i += 1
}
} else {
val dummyTensor = Tensor[T](1).fill(ev.fromType(1))
while (i <= totalNum) {
samples(i-1) = JSample(Xs.map{X => X.select(1, i)}.toArray, dummyTensor)
i += 1
}
}
samples
}
def batching(dataset: DataSet[JSample[T]], batchSize: Int)
: DataSet[MiniBatch[T]] = {
dataset -> SampleToMiniBatch[T](batchSize)
}
private def enrichOptimizer[T](optimizer: Optimizer[T, MiniBatch[T]],
endTrigger: Trigger,
optimMethod: OptimMethod[T]): Optimizer[T, MiniBatch[T]] = {
optimizer.setEndWhen(endTrigger)
optimizer.setOptimMethod(optimMethod)
// TODO: remove this
optimizer.disableCheckSingleton()
optimizer
}
def createSequential(): Sequential[T] = {
Sequential[T]()
}
def createLinear(inputSize: Int, outputSize: Int,
withBias: Boolean,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null): Linear[T] = {
Linear[T](inputSize, outputSize, withBias, wRegularizer, bRegularizer,
toTensor(initWeight), toTensor(initBias), toTensor(initGradWeight), toTensor(initGradBias))
}
def createSparseLinear(inputSize: Int, outputSize: Int,
withBias: Boolean,
backwardStart: Int = -1,
backwardLength: Int = -1,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null): SparseLinear[T] = {
SparseLinear[T](inputSize, outputSize, withBias, backwardStart, backwardLength,
wRegularizer, bRegularizer, toTensor(initWeight), toTensor(initBias),
toTensor(initGradWeight), toTensor(initGradBias))
}
def createNegative(inplace: Boolean): Negative[T] = {
Negative[T](inplace)
}
def createDenseToSparse(): DenseToSparse[T] = {
DenseToSparse[T]()
}
def createReLU(ip: Boolean = false): ReLU[T] = {
ReLU[T](ip)
}
def createTanh(): Tanh[T] = {
Tanh[T]()
}
def createTimeDistributed(layer: TensorModule[T]): TimeDistributed[T] = {
TimeDistributed[T](layer)
}
def createSpatialWithinChannelLRN(size: Int = 5, alpha: Double = 1.0, beta: Double = 0.75)
: SpatialWithinChannelLRN[T] = {
SpatialWithinChannelLRN[T](size, alpha, beta)
}
def createRnnCell(inputSize: Int,
hiddenSize: Int,
activation: TensorModule[T],
isInputWithBias: Boolean = true,
isHiddenWithBias: Boolean = true,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null): RnnCell[T] = {
RnnCell[T](inputSize,
hiddenSize,
activation,
isInputWithBias,
isHiddenWithBias,
wRegularizer,
uRegularizer,
bRegularizer)
}
def createTimeDistributedCriterion(critrn: TensorCriterion[T],
sizeAverage: Boolean = false): TimeDistributedCriterion[T] = {
TimeDistributedCriterion[T](critrn, sizeAverage)
}
def createGRU(
inputSize: Int,
outputSize: Int,
p: Double = 0,
activation: TensorModule[T] = null,
innerActivation: TensorModule[T] = null,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null): GRU[T] = {
GRU[T](inputSize, outputSize, p, activation, innerActivation,
wRegularizer, uRegularizer, bRegularizer)
}
def createLSTM(
inputSize: Int,
hiddenSize: Int,
p: Double = 0,
activation: TensorModule[T] = null,
innerActivation: TensorModule[T] = null,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null): LSTM[T] = {
LSTM[T](inputSize, hiddenSize, p, activation, innerActivation,
wRegularizer, uRegularizer, bRegularizer)
}
def createLSTMPeephole(
inputSize: Int,
hiddenSize: Int,
p: Double = 0,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null): LSTMPeephole[T] = {
LSTMPeephole[T](inputSize, hiddenSize, p, wRegularizer, uRegularizer, bRegularizer)
}
def createRecurrent(): Recurrent[T] = {
Recurrent[T]()
}
def createRecurrentDecoder(outputLength: Int): RecurrentDecoder[T] = {
RecurrentDecoder[T](outputLength)
}
def createConvLSTMPeephole(
inputSize: Int,
outputSize: Int,
kernelI: Int,
kernelC: Int,
stride: Int = 1,
padding: Int = -1,
activation: TensorModule[T] = null,
innerActivation: TensorModule[T] = null,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
cRegularizer: Regularizer[T] = null,
withPeephole: Boolean = true): ConvLSTMPeephole[T] = {
ConvLSTMPeephole[T](inputSize, outputSize, kernelI, kernelC,
stride, padding, activation, innerActivation,
wRegularizer, uRegularizer, bRegularizer, cRegularizer, withPeephole)
}
def createConvLSTMPeephole3D(
inputSize: Int,
outputSize: Int,
kernelI: Int,
kernelC: Int,
stride: Int = 1,
padding: Int = -1,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
cRegularizer: Regularizer[T] = null,
withPeephole: Boolean = true): ConvLSTMPeephole3D[T] = {
ConvLSTMPeephole3D[T](inputSize, outputSize, kernelI, kernelC, stride, padding,
wRegularizer, uRegularizer, bRegularizer, cRegularizer, withPeephole)
}
def createEcho(): Echo[T] = {
Echo[T]()
}
def createLogSoftMax(): LogSoftMax[T] = {
LogSoftMax[T]()
}
def createTemporalMaxPooling(
kW: Int,
dW: Int)
: TemporalMaxPooling[T] = {
TemporalMaxPooling[T](
kW,
dW)
}
def createSpatialMaxPooling(kW: Int,
kH: Int,
dW: Int,
dH: Int,
padW: Int = 0,
padH: Int = 0,
ceilMode: Boolean = false,
format: String = "NCHW")
: SpatialMaxPooling[T] = {
val maxpooling = SpatialMaxPooling[T](kW,
kH,
dW,
dH,
padW,
padH,
format = DataFormat(format))
if (ceilMode) maxpooling.ceil()
else maxpooling
}
def createSpatialConvolution(nInputPlane: Int,
nOutputPlane: Int,
kernelW: Int,
kernelH: Int,
strideW: Int = 1,
strideH: Int = 1,
padW: Int = 0,
padH: Int = 0,
nGroup: Int = 1,
propagateBack: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null,
withBias: Boolean = true,
dataFormat: String = "NCHW"
)
: SpatialConvolution[T] = {
SpatialConvolution[T](nInputPlane,
nOutputPlane,
kernelW,
kernelH,
strideW,
strideH,
padW,
padH,
nGroup,
propagateBack,
wRegularizer,
bRegularizer,
toTensor(initWeight),
toTensor(initBias),
toTensor(initGradWeight),
toTensor(initGradBias),
withBias,
DataFormat(dataFormat)
)
}
def createReshape(size: JList[Int], batchMode: JBoolean = null): Reshape[T] = {
val mappedBatchMode = batchMode match {
case JBoolean.TRUE => Some(true)
case JBoolean.FALSE => Some(false)
case _ => None
}
Reshape(size.asScala.toArray, mappedBatchMode)
}
def createConcat(dimension: Int): Concat[T] = {
Concat[T](dimension)
}
def createSpatialAveragePooling(kW: Int,
kH: Int,
dW: Int = 1,
dH: Int = 1,
padW: Int = 0,
padH: Int = 0,
globalPooling: Boolean = false,
ceilMode: Boolean = false,
countIncludePad: Boolean = true,
divide: Boolean = true,
format: String = "NCHW")
: SpatialAveragePooling[T] = {
SpatialAveragePooling[T](kW, kH, dW, dH, padW, padH, globalPooling,
ceilMode, countIncludePad, divide, format = DataFormat(format))
}
def createSpatialBatchNormalization(nOutput: Int,
eps: Double = 1e-5,
momentum: Double = 0.1,
affine: Boolean = true,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null)
: SpatialBatchNormalization[T] = {
SpatialBatchNormalization[T](nOutput, eps, momentum, affine,
toTensor(initWeight), toTensor(initBias), toTensor(initGradWeight), toTensor(initBias))
}
def createSpatialCrossMapLRN(size: Int = 5,
alpha: Double = 1.0,
beta: Double = 0.75,
k: Double = 1.0)
: SpatialCrossMapLRN[T] = {
SpatialCrossMapLRN[T](size, alpha, beta, k)
}
def createDropout(initP: Double = 0.5,
inplace: Boolean = false,
scale: Boolean = true)
: Dropout[T] = {
Dropout[T](initP, inplace, scale)
}
def createGaussianDropout(rate: Double)
: GaussianDropout[T] = {
GaussianDropout[T](rate)
}
def createGaussianNoise(stddev: Double)
: GaussianNoise[T] = {
GaussianNoise[T](stddev)
}
def createView(sizes: JList[Int], num_input_dims: Int = 0): View[T] = {
View[T](sizes.asScala.toArray).setNumInputDims(num_input_dims)
}
def createAbs()
: Abs[T, T] = {
Abs[T, T]()
}
def createAdd(inputSize: Int)
: Add[T] = {
Add[T](inputSize)
}
def createAddConstant(constant_scalar: Double,
inplace: Boolean = false)
: AddConstant[T] = {
AddConstant[T](constant_scalar,
inplace)
}
def createBatchNormalization(nOutput: Int,
eps: Double = 1e-5,
momentum: Double = 0.1,
affine: Boolean = true,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null)
: BatchNormalization[T] = {
BatchNormalization[T](nOutput,
eps,
momentum,
affine,
toTensor(initWeight),
toTensor(initBias),
toTensor(initGradWeight),
toTensor(initGradBias))
}
def createBilinear(inputSize1: Int,
inputSize2: Int,
outputSize: Int,
biasRes: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: Bilinear[T] = {
Bilinear[T](inputSize1,
inputSize2,
outputSize,
biasRes,
wRegularizer,
bRegularizer)
}
def createBottle(module: AbstractModule[Activity, Activity, T],
nInputDim: Int = 2,
nOutputDim1: Int = Int.MaxValue)
: Bottle[T] = {
Bottle[T](module,
nInputDim,
nOutputDim1)
}
def createCAdd(size: JList[Int],
bRegularizer: Regularizer[T] = null)
: CAdd[T] = {
CAdd[T](size.asScala.toArray, bRegularizer)
}
def createCAddTable(inplace: Boolean = false)
: CAddTable[T] = {
CAddTable[T](inplace)
}
def createCAveTable(inplace: Boolean = false)
: CAveTable[T] = {
CAveTable[T](inplace)
}
def createCDivTable()
: CDivTable[T] = {
CDivTable[T]()
}
def createCMaxTable()
: CMaxTable[T] = {
CMaxTable[T]()
}
def createCMinTable()
: CMinTable[T] = {
CMinTable[T]()
}
def createCMul(size: JList[Int],
wRegularizer: Regularizer[T] = null)
: CMul[T] = {
CMul[T](size.asScala.toArray, wRegularizer)
}
def createCMulTable()
: CMulTable[T] = {
CMulTable[T]()
}
def createCSubTable()
: CSubTable[T] = {
CSubTable[T]()
}
def createClamp(min: Int,
max: Int)
: Clamp[T, T] = {
Clamp[T, T](min,
max)
}
def createContiguous()
: Contiguous[T] = {
Contiguous[T]()
}
def createCosine(inputSize: Int,
outputSize: Int)
: Cosine[T] = {
Cosine[T](inputSize,
outputSize)
}
def createCosineDistance()
: CosineDistance[T] = {
CosineDistance[T]()
}
def createCosineDistanceCriterion(sizeAverage: Boolean = true)
: CosineDistanceCriterion[T] = {
CosineDistanceCriterion[T](sizeAverage)
}
def createDiceCoefficientCriterion(sizeAverage: Boolean = true,
epsilon: Float = 1.0f)
: DiceCoefficientCriterion[T] = {
DiceCoefficientCriterion[T](sizeAverage, epsilon)
}
def createDotProduct()
: DotProduct[T] = {
DotProduct[T]()
}
def createELU(alpha: Double = 1.0,
inplace: Boolean = false)
: ELU[T, T] = {
ELU[T, T](alpha,
inplace)
}
def createEuclidean(inputSize: Int,
outputSize: Int,
fastBackward: Boolean = true)
: Euclidean[T] = {
Euclidean[T](inputSize,
outputSize,
fastBackward)
}
def createExp()
: Exp[T] = {
Exp[T]()
}
def createFlattenTable()
: FlattenTable[T] = {
FlattenTable[T]()
}
def createGradientReversal(lambda: Double = 1)
: GradientReversal[T] = {
GradientReversal[T](lambda)
}
def createHardShrink(lambda: Double = 0.5)
: HardShrink[T] = {
HardShrink[T](lambda)
}
def createHardTanh(minValue: Double = -1,
maxValue: Double = 1,
inplace: Boolean = false)
: HardTanh[T, T] = {
HardTanh[T, T](minValue,
maxValue,
inplace)
}
def createIndex(dimension: Int)
: Index[T] = {
Index[T](dimension)
}
def createInferReshape(size: JList[Int], batchMode: Boolean = false)
: InferReshape[T] = {
InferReshape[T](size.asScala.toArray,
batchMode)
}
def createJoinTable(dimension: Int,
nInputDims: Int)
: JoinTable[T] = {
JoinTable[T](dimension,
nInputDims)
}
def createSparseJoinTable(dimension: Int): SparseJoinTable[T] = {
SparseJoinTable[T](dimension)
}
def createL1Cost()
: L1Cost[T] = {
L1Cost[T]()
}
def createL1Penalty(l1weight: Int,
sizeAverage: Boolean = false,
provideOutput: Boolean = true)
: L1Penalty[T] = {
L1Penalty[T](l1weight,
sizeAverage,
provideOutput)
}
def createLeakyReLU(negval: Double = 0.01,
inplace: Boolean = false)
: LeakyReLU[T] = {
LeakyReLU[T](negval,
inplace)
}
def createLog()
: Log[T, T] = {
Log[T, T]()
}
def createLogSigmoid()
: LogSigmoid[T] = {
LogSigmoid[T]()
}
def createLookupTable(nIndex: Int, nOutput: Int,
paddingValue: Double = 0, maxNorm: Double = Double.MaxValue,
normType: Double = 2.0, shouldScaleGradByFreq: Boolean = false,
wRegularizer: Regularizer[T] = null)
: LookupTable[T] = {
LookupTable[T](nIndex,
nOutput,
paddingValue,
maxNorm,
normType,
shouldScaleGradByFreq,
wRegularizer)
}
def createMM(transA: Boolean = false,
transB: Boolean = false)
: MM[T] = {
MM[T](transA,
transB)
}
def createMV(trans: Boolean = false)
: MV[T] = {
MV[T](trans)
}
def createMapTable(module: AbstractModule[Activity, Activity, T] = null)
: MapTable[T] = {
MapTable[T](module)
}
def createMaskedSelect()
: MaskedSelect[T] = {
MaskedSelect[T]()
}
def createMax(dim: Int = 1,
numInputDims: Int = Int.MinValue)
: Max[T] = {
Max[T](dim,
numInputDims)
}
def createMean(dimension: Int = 1,
nInputDims: Int = -1,
squeeze: Boolean = true)
: Mean[T, T] = {
Mean[T](dimension,
nInputDims,
squeeze)
}
def createMin(dim: Int = 1,
numInputDims: Int = Int.MinValue)
: Min[T] = {
Min[T](dim,
numInputDims)
}
def createMixtureTable(dim: Int = Int.MaxValue)
: MixtureTable[T] = {
MixtureTable[T](dim)
}
def createMul()
: Mul[T] = {
Mul[T]()
}
def createMulConstant(scalar: Double,
inplace: Boolean = false)
: MulConstant[T] = {
MulConstant[T](scalar,
inplace)
}
def createNarrow(dimension: Int,
offset: Int,
length: Int = 1)
: Narrow[T] = {
Narrow[T](dimension,
offset,
length)
}
def createNarrowTable(offset: Int,
length: Int = 1)
: NarrowTable[T] = {
NarrowTable[T](offset,
length)
}
def createNormalize(p: Double,
eps: Double = 1e-10)
: Normalize[T] = {
Normalize[T](p,
eps)
}
def createPReLU(nOutputPlane: Int = 0)
: PReLU[T] = {
PReLU[T](nOutputPlane)
}
def createPadding(dim: Int,
pad: Int,
nInputDim: Int,
value: Double = 0.0,
nIndex: Int = 1)
: Padding[T] = {
Padding[T](dim,
pad,
nInputDim,
value,
nIndex)
}
def createPairwiseDistance(norm: Int = 2)
: PairwiseDistance[T] = {
PairwiseDistance[T](norm)
}
def createParallelTable()
: ParallelTable[T] = {
ParallelTable[T]()
}
def createPower(power: Double,
scale: Double = 1,
shift: Double = 0)
: Power[T, T] = {
Power[T, T](power,
scale,
shift)
}
def createRReLU(lower: Double = 1.0 / 8,
upper: Double = 1.0 / 3,
inplace: Boolean = false)
: RReLU[T] = {
RReLU[T](lower,
upper,
inplace)
}
def createReLU6(inplace: Boolean = false)
: ReLU6[T, T] = {
ReLU6[T, T](inplace)
}
def createReplicate(nFeatures: Int,
dim: Int = 1,
nDim: Int = Int.MaxValue)
: Replicate[T] = {
Replicate[T](nFeatures,
dim,
nDim)
}
def createRoiPooling(pooled_w: Int, pooled_h: Int, spatial_scale: Double)
: RoiPooling[T] = {
RoiPooling[T](pooled_w,
pooled_h,
ev.fromType(spatial_scale))
}
def createScale(size: JList[Int])
: Scale[T] = {
Scale[T](size.asScala.toArray)
}
def createSelect(dimension: Int,
index: Int)
: Select[T] = {
Select[T](dimension,
index)
}
def createSelectTable(dimension: Int)
: SelectTable[T] = {
SelectTable[T](dimension)
}
def createSigmoid()
: Sigmoid[T] = {
Sigmoid[T]()
}
def createSoftMax()
: SoftMax[T] = {
SoftMax[T]()
}
def createSoftMin()
: SoftMin[T] = {
SoftMin[T]()
}
def createSoftPlus(beta: Double = 1.0)
: SoftPlus[T, T] = {
SoftPlus[T, T](beta)
}
def createSoftShrink(lambda: Double = 0.5)
: SoftShrink[T] = {
SoftShrink[T](lambda)
}
def createSoftSign()
: SoftSign[T, T] = {
SoftSign[T, T]()
}
def createSpatialDilatedConvolution(nInputPlane: Int,
nOutputPlane: Int,
kW: Int,
kH: Int,
dW: Int = 1,
dH: Int = 1,
padW: Int = 0,
padH: Int = 0,
dilationW: Int = 1,
dilationH: Int = 1,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: SpatialDilatedConvolution[T] = {
SpatialDilatedConvolution[T](nInputPlane,
nOutputPlane,
kW,
kH,
dW,
dH,
padW,
padH,
dilationW,
dilationH,
wRegularizer,
bRegularizer)
}
def createTemporalConvolution(
inputFrameSize: Int,
outputFrameSize: Int,
kernelW: Int,
strideW: Int = 1,
propagateBack: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null
)
: TemporalConvolution[T] = {
TemporalConvolution[T](
inputFrameSize,
outputFrameSize,
kernelW,
strideW,
propagateBack,
wRegularizer,
bRegularizer,
toTensor(initWeight),
toTensor(initBias),
toTensor(initGradWeight),
toTensor(initGradBias)
)
}
def createBinaryTreeLSTM(
inputSize: Int,
hiddenSize: Int,
gateOutput: Boolean = true,
withGraph: Boolean = true)
: BinaryTreeLSTM[T] = {
BinaryTreeLSTM[T](
inputSize,
hiddenSize,
gateOutput,
withGraph)
}
def createVolumetricFullConvolution(nInputPlane: Int,
nOutputPlane: Int,
kT: Int,
kW: Int,
kH: Int,
dT: Int = 1,
dW: Int = 1,
dH: Int = 1,
padT: Int = 0,
padW: Int = 0,
padH: Int = 0,
adjT: Int = 0,
adjW: Int = 0,
adjH: Int = 0,
nGroup: Int = 1,
noBias: Boolean = false,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: VolumetricFullConvolution[T] = {
VolumetricFullConvolution[T](nInputPlane,
nOutputPlane,
kT,
kW,
kH,
dT,
dW,
dH,
padT,
padW,
padH,
adjT,
adjW,
adjH,
nGroup,
noBias,
wRegularizer,
bRegularizer)
}
def createSpatialFullConvolution(nInputPlane: Int,
nOutputPlane: Int,
kW: Int,
kH: Int,
dW: Int = 1,
dH: Int = 1,
padW: Int = 0,
padH: Int = 0,
adjW: Int = 0,
adjH: Int = 0,
nGroup: Int = 1,
noBias: Boolean = false,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: SpatialFullConvolution[T] = {
SpatialFullConvolution[T](nInputPlane,
nOutputPlane,
kW,
kH,
dW,
dH,
padW,
padH,
adjW,
adjH,
nGroup,
noBias,
wRegularizer,
bRegularizer)
}
def createSpatialShareConvolution(
nInputPlane: Int,
nOutputPlane: Int,
kernelW: Int,
kernelH: Int,
strideW: Int = 1,
strideH: Int = 1,
padW: Int = 0,
padH: Int = 0,
nGroup: Int = 1,
propagateBack: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: JTensor = null,
initBias: JTensor = null,
initGradWeight: JTensor = null,
initGradBias: JTensor = null,
withBias: Boolean = true): SpatialShareConvolution[T] = {
SpatialShareConvolution[T](nInputPlane,
nOutputPlane,
kernelW,
kernelH,
strideW,
strideH,
padW,
padH,
nGroup,
propagateBack,
wRegularizer,
bRegularizer,
toTensor(initWeight),
toTensor(initBias),
toTensor(initGradWeight),
toTensor(initGradBias),
withBias
)
}
def createSpatialZeroPadding(padLeft: Int,
padRight: Int,
padTop: Int,
padBottom: Int)
: SpatialZeroPadding[T] = {
SpatialZeroPadding[T](padLeft,
padRight,
padTop,
padBottom)
}
def createBifurcateSplitTable(dimension: Int)
: BifurcateSplitTable[T] = {
BifurcateSplitTable[T](dimension)
}
def createSplitTable(dimension: Int,
nInputDims: Int = -1)
: SplitTable[T] = {
SplitTable[T](dimension,
nInputDims)
}
def createSqrt()
: Sqrt[T, T] = {
Sqrt[T, T]()
}
def createSquare()
: Square[T, T] = {
Square[T, T]()
}
def createSqueeze(dim: Int = Int.MinValue,
numInputDims: Int = Int.MinValue)
: Squeeze[T] = {
Squeeze[T](dim,
numInputDims)
}
def createSum(dimension: Int = 1,
nInputDims: Int = -1,
sizeAverage: Boolean = false,
squeeze: Boolean = true
)
: Sum[T, T] = {
Sum[T, T](dimension,
nInputDims,
sizeAverage,
squeeze
)
}
def createTanhShrink()
: TanhShrink[T] = {
TanhShrink[T]()
}
def createThreshold(th: Double = 1e-6,
v: Double = 0.0,
ip: Boolean = false)
: Threshold[T] = {
Threshold[T](th,
v,
ip)
}
def createUnsqueeze(pos: Int,
numInputDims: Int = Int.MinValue)
: Unsqueeze[T] = {
Unsqueeze[T](pos,
numInputDims)
}
def createBCECriterion(weights: JTensor = null,
sizeAverage: Boolean = true)
: BCECriterion[T] = {
BCECriterion[T](if (weights == null) null else toTensor(weights),
sizeAverage)
}
def createBiRecurrent(merge: AbstractModule[Table, Tensor[T], T] = null)
: BiRecurrent[T] = {
BiRecurrent[T](merge)
}
def createConcatTable()
: ConcatTable[T] = {
ConcatTable[Activity, T]()
}
def createIdentity()
: Identity[T] = {
Identity[T]()
}
def createGaussianSampler(): GaussianSampler[T] = {
GaussianSampler[T]()
}
def createMultiLabelSoftMarginCriterion(weights: JTensor = null,
sizeAverage: Boolean = true)
: MultiLabelSoftMarginCriterion[T] = {
MultiLabelSoftMarginCriterion[T](if (weights == null) null else toTensor(weights),
sizeAverage)
}
def createMultiMarginCriterion(p: Int = 1,
weights: JTensor = null,
margin: Double = 1.0,
sizeAverage: Boolean = true)
: MultiMarginCriterion[T] = {
MultiMarginCriterion[T](p,
if (weights == null) null else toTensor(weights),
margin,
sizeAverage)
}
def createReverse(dimension: Int = 1, isInplace: Boolean = false)
: Reverse[T] = {
Reverse[T](dimension, isInplace)
}
def createTranspose(permutations: JList[JList[Int]])
: Transpose[T] = {
Transpose[T](permutations.asScala.toArray.map { item =>
val itemArray = item.asScala.toArray
(itemArray(0), itemArray(1))
})
}
def createSpatialContrastiveNormalization(nInputPlane: Int = 1,
kernel: JTensor = null,
threshold: Double = 1e-4,
thresval: Double = 1e-4)
: SpatialContrastiveNormalization[T] = {
SpatialContrastiveNormalization[T](nInputPlane,
if (kernel == null) null else toTensor(kernel),
threshold,
thresval)
}
def createSpatialConvolutionMap(connTable: JTensor,
kW: Int,
kH: Int,
dW: Int = 1,
dH: Int = 1,
padW: Int = 0,
padH: Int = 0,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: SpatialConvolutionMap[T] = {
SpatialConvolutionMap[T](if (connTable == null) null else toTensor(connTable),
kW,
kH,
dW,
dH,
padW,
padH,
wRegularizer,
bRegularizer)
}
def createVolumetricConvolution(nInputPlane: Int,
nOutputPlane: Int,
kT: Int,
kW: Int,
kH: Int,
dT: Int = 1,
dW: Int = 1,
dH: Int = 1,
padT: Int = 0,
padW: Int = 0,
padH: Int = 0,
withBias: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null)
: VolumetricConvolution[T] = {
VolumetricConvolution[T](nInputPlane,
nOutputPlane,
kT,
kW,
kH,
dT,
dW,
dH,
padT,
padW,
padH,
withBias,
wRegularizer,
bRegularizer)
}
def createVolumetricMaxPooling(kT: Int,
kW: Int,
kH: Int,
dT: Int,
dW: Int,
dH: Int,
padT: Int = 0,
padW: Int = 0,
padH: Int = 0): VolumetricMaxPooling[T] = {
VolumetricMaxPooling[T](kT, kW, kH, dT, dW, dH, padT, padW, padH)
}
def createVolumetricAveragePooling(kT: Int,
kW: Int,
kH: Int,
dT: Int,
dW: Int,
dH: Int,
padT: Int = 0,
padW: Int = 0,
padH: Int = 0,
countIncludePad: Boolean = true,
ceilMode: Boolean = false):
VolumetricAveragePooling[T] = {
VolumetricAveragePooling[T](kT, kW, kH, dT, dW, dH, padT, padW, padH, countIncludePad, ceilMode)
}
def createSpatialDivisiveNormalization(nInputPlane: Int = 1,
kernel: JTensor = null,
threshold: Double = 1e-4,
thresval: Double = 1e-4)
: SpatialDivisiveNormalization[T] = {
SpatialDivisiveNormalization[T](nInputPlane,
if (kernel == null) null else toTensor(kernel),
threshold,
thresval)
}
def createSpatialSubtractiveNormalization(nInputPlane: Int = 1,
kernel: JTensor = null)
: SpatialSubtractiveNormalization[T] = {
SpatialSubtractiveNormalization[T](nInputPlane,
if (kernel == null) null else toTensor(kernel))
}
def createSoftMarginCriterion(sizeAverage: Boolean = true)
: SoftMarginCriterion[T] = {
SoftMarginCriterion[T](sizeAverage)
}
// Optimizer
def createPoly(power: Double, maxIteration: Int): SGD.Poly = {
SGD.Poly(power, maxIteration)
}
def createStep(stepSize: Int, gamma: Double): SGD.Step = {
SGD.Step(stepSize, gamma)
}
def createMultiStep(stepSizes: JList[Int], gamma: Double): SGD.MultiStep = {
SGD.MultiStep(stepSizes.asScala.toArray, gamma)
}
def createExponential(decayStep: Int, decayRate: Double,
stairCase: Boolean = false): SGD.Exponential = {
SGD.Exponential(decayStep, decayRate, stairCase)
}
def createDefault(): SGD.Default = {
SGD.Default()
}
def createPlateau(monitor: String, factor: Float = 0.1f,
patience: Int = 10, mode: String = "min", epsilon: Float = 1e-4f,
cooldown: Int = 0, minLr: Float = 0): SGD.Plateau = {
SGD.Plateau(monitor, factor, patience, mode, epsilon, cooldown, minLr)
}
def createClassNLLCriterion(weights: JTensor = null,
sizeAverage: Boolean = true, logProbAsInput: Boolean = true)
: ClassNLLCriterion[T] = {
ClassNLLCriterion[T](if (weights == null) null else toTensor(weights),
sizeAverage, logProbAsInput)
}
def createMSECriterion: MSECriterion[T] = {
MSECriterion[T]()
}
def createAbsCriterion(sizeAverage: Boolean = true)
: AbsCriterion[T] = {
AbsCriterion[T](sizeAverage)
}
def createClassSimplexCriterion(nClasses: Int)
: ClassSimplexCriterion[T] = {
ClassSimplexCriterion[T](nClasses)
}
def createCrossEntropyCriterion(weights: JTensor = null,
sizeAverage: Boolean = true): CrossEntropyCriterion[T] = {
new CrossEntropyCriterion[T](if (null == weights) null else toTensor(weights), sizeAverage)
}
def createCosineEmbeddingCriterion(margin: Double = 0.0,
sizeAverage: Boolean = true)
: CosineEmbeddingCriterion[T] = {
CosineEmbeddingCriterion[T](margin,
sizeAverage)
}
def createDistKLDivCriterion(sizeAverage: Boolean = true)
: DistKLDivCriterion[T] = {
DistKLDivCriterion[T](sizeAverage)
}
def createHingeEmbeddingCriterion(margin: Double = 1,
sizeAverage: Boolean = true)
: HingeEmbeddingCriterion[T] = {
HingeEmbeddingCriterion[T](margin,
sizeAverage)
}
def createL1HingeEmbeddingCriterion(margin: Double = 1)
: L1HingeEmbeddingCriterion[T] = {
L1HingeEmbeddingCriterion[T](margin)
}
def createMarginCriterion(margin: Double = 1.0,
sizeAverage: Boolean = true, squared: Boolean = false)
: MarginCriterion[T] = {
MarginCriterion[T](margin,
sizeAverage, squared)
}
def createMarginRankingCriterion(margin: Double = 1.0,
sizeAverage: Boolean = true)
: MarginRankingCriterion[T] = {
MarginRankingCriterion[T](margin,
sizeAverage)
}
def createMultiCriterion()
: MultiCriterion[T] = {
MultiCriterion[T]()
}
def createMultiLabelMarginCriterion(sizeAverage: Boolean = true)
: MultiLabelMarginCriterion[T] = {
MultiLabelMarginCriterion[T](sizeAverage)
}
def createParallelCriterion(repeatTarget: Boolean = false)
: ParallelCriterion[T] = {
ParallelCriterion[T](repeatTarget)
}
def createKLDCriterion(): KLDCriterion[T] = {
KLDCriterion[T]()
}
def createGaussianCriterion(): GaussianCriterion[T] = {
GaussianCriterion[T]()
}
def createSmoothL1Criterion(sizeAverage: Boolean = true)
: SmoothL1Criterion[T] = {
SmoothL1Criterion[T](sizeAverage)
}
def createSmoothL1CriterionWithWeights(sigma: Double, num: Int = 0)
: SmoothL1CriterionWithWeights[T] = {
SmoothL1CriterionWithWeights[T](sigma,
num)
}
def createSoftmaxWithCriterion(ignoreLabel: Integer = null,
normalizeMode: String = "VALID")
: SoftmaxWithCriterion[T] = {
val normM = normalizeMode match {
case "FULL" => NormMode.FULL
case "VALID" => NormMode.VALID
case "BATCH_SIZE" => NormMode.BATCH_SIZE
case "NONE" => NormMode.NONE
case n: String =>
throw new IllegalArgumentException(s"Only support 'FULL', " +
s"'VALID', 'BATCH_SIZE' and 'NONE': $n")
}
val labelToIgnore = ignoreLabel match {
case i: Integer => Some(i.toInt)
case null => None
}
SoftmaxWithCriterion[T](labelToIgnore, normM)
}
def createPack(dimension: Int): Pack[T] = {
Pack(dimension)
}
def createTile(dim : Int, copies : Int): Tile[T] = {
Tile(dim, copies)
}
def createBinaryThreshold(th: Double, ip: Boolean): BinaryThreshold[T] = {
BinaryThreshold(th, ip)
}
def setModelSeed(seed: Long): Unit = {
RandomGenerator.RNG.setSeed(seed)
}
def modelEvaluate(model: AbstractModule[Activity, Activity, T],
valRDD: JavaRDD[Sample],
batchSize: Int,
valMethods: JList[ValidationMethod[T]])
: JList[EvaluatedResult] = {
val resultArray = model.evaluate(valRDD.rdd.map(toJSample(_)),
valMethods.asScala.toArray, Some(batchSize))
val testResultArray = resultArray.map { result =>
EvaluatedResult(result._1.result()._1, result._1.result()._2,
result._2.toString())
}
testResultArray.toList.asJava
}
def loadBigDL(path: String): AbstractModule[Activity, Activity, T] = {
Module.load[T](path)
}
def loadBigDLModule(path: String): AbstractModule[Activity, Activity, T] = {
Module.loadModule[T](path)
}
def loadTorch(path: String): AbstractModule[Activity, Activity, T] = {
Module.loadTorch[T](path)
}
def loadCaffe(model: AbstractModule[Activity, Activity, T],
defPath: String,
modelPath: String,
matchAll: Boolean = true): AbstractModule[Activity, Activity, T] = {
Module.loadCaffe[T](model, defPath, modelPath, matchAll)
}
def loadCaffeModel(defPath: String, modelPath: String): AbstractModule[Activity, Activity, T] = {
Module.loadCaffeModel[T](defPath, modelPath)
}
def loadTF(path: String, inputs: JList[String], outputs: JList[String],
byteOrder: String, binFile: String = null): AbstractModule[Activity, Activity, T] = {
val order = byteOrder match {
case "little_endian" => ByteOrder.LITTLE_ENDIAN
case "big_endian" => ByteOrder.BIG_ENDIAN
case _ => throw new IllegalArgumentException(s"No support byte order $byteOrder")
}
Module.loadTF[T](path, inputs.asScala, outputs.asScala, order, Option(binFile))
}
def saveTF(model: AbstractModule[Activity, Activity, T],
inputs: JList[Any],
path: String,
byteOrder: String,
dataFormat: String): Unit = {
val order = byteOrder.toLowerCase match {
case "little_endian" => ByteOrder.LITTLE_ENDIAN
case "big_endian" => ByteOrder.BIG_ENDIAN
case _ => throw new IllegalArgumentException(s"Unknown byte order $byteOrder")
}
val format = dataFormat.toLowerCase match {
case "nhwc" => TensorflowDataFormat.NHWC
case "nchw" => TensorflowDataFormat.NCHW
case _ => throw new IllegalArgumentException(s"Unknown format $dataFormat")
}
val scalaInputs = inputs.asScala.map { elem =>
val array = elem.asInstanceOf[JList[Any]]
val name = array.get(0).asInstanceOf[String]
val shape = array.get(1).asInstanceOf[JList[Int]]
(name, shape.asScala)
}
model.saveTF(scalaInputs, path, order, format)
}
def predictLocal(model: AbstractModule[Activity, Activity, T],
features: JList[JTensor]): JList[JTensor] = {
val sampleArray = toSampleArray(features.asScala.toList.map{f => toTensor(f)})
val localModel = LocalModule(model)
val result = localModel.predict(sampleArray)
result.map{a => toJTensor(a.asInstanceOf[Tensor[T]])}.toList.asJava
}
def predictLocalClass(model: AbstractModule[Activity, Activity, T],
features: JList[JTensor]): JList[Int] = {
val sampleArray = toSampleArray(features.asScala.toList.map{f => toTensor(f)})
val localModel = LocalModule(model)
val result = localModel.predictClass(sampleArray)
result.toList.asJava
}
def modelPredictRDD(model: AbstractModule[Activity, Activity, T],
dataRdd: JavaRDD[Sample],
batchSize: Int = -1,
shareBuffer: Boolean = false): JavaRDD[JTensor] = {
val tensorRDD = model.predict(dataRdd.rdd.map(toJSample(_)), batchSize, shareBuffer)
val listRDD = tensorRDD.map { res =>
val tensor = res.asInstanceOf[Tensor[T]]
val cloneTensor = tensor.clone()
toJTensor(cloneTensor)
}
new JavaRDD[JTensor](listRDD)
}
def evaluate(module: AbstractModule[Activity, Activity, T]):
AbstractModule[Activity, Activity, T] = {
module.evaluate()
}
def modelPredictClass(model: AbstractModule[Activity, Activity, T],
dataRdd: JavaRDD[Sample], batchSize: Int = -1): JavaRDD[Int] = {
val sampleRdd = toJSample(dataRdd)
val tensorRDD = model.predictClass(sampleRdd, batchSize)
new JavaRDD[Int](tensorRDD)
}
def modelForward(model: AbstractModule[Activity, Activity, T],
input: JList[JTensor],
inputIsTable: Boolean): JList[JTensor] = {
val inputActivity = jTensorsToActivity(input, inputIsTable)
val outputActivity = model.forward(inputActivity)
activityToJTensors(outputActivity)
}
def modelBackward(model: AbstractModule[Activity, Activity, T],
input: JList[JTensor],
inputIsTable: Boolean,
gradOutput: JList[JTensor],
gradOutputIsTable: Boolean): JList[JTensor] = {
val inputActivity = jTensorsToActivity(input, inputIsTable)
val gradOutputActivity = jTensorsToActivity(gradOutput, gradOutputIsTable)
val outputActivity = model.backward(inputActivity, gradOutputActivity)
activityToJTensors(outputActivity)
}
def modelSave(module: AbstractModule[Activity, Activity, T],
path: String, overWrite: Boolean): Unit = {
module.save(path, overWrite)
}
def saveBigDLModule(module: AbstractModule[Activity, Activity, T],
path: String, overWrite: Boolean): Unit = {
module.saveModule(path, overWrite)
}
def saveCaffe(module: AbstractModule[Activity, Activity, T],
prototxtPath: String, modelPath: String,
useV2: Boolean = true, overwrite: Boolean = false): Unit = {
module.saveCaffe(prototxtPath, modelPath, useV2, overwrite)
}
def criterionForward(criterion: AbstractCriterion[Activity, Activity, T],
input: JList[JTensor],
inputIsTable: Boolean,
target: JList[JTensor],
targetIsTable: Boolean): T = {
val inputActivity = jTensorsToActivity(input, inputIsTable)
val targetActivity = jTensorsToActivity(target, targetIsTable)
return criterion.forward(inputActivity, targetActivity)
}
def criterionBackward(criterion: AbstractCriterion[Activity, Activity, T],
input: JList[JTensor],
inputIsTable: Boolean,
target: JList[JTensor],
targetIsTable: Boolean): JList[JTensor] = {
val inputActivity = jTensorsToActivity(input, inputIsTable)
val targetActivity = jTensorsToActivity(target, targetIsTable)
val outputActivity = criterion.backward(inputActivity, targetActivity)
activityToJTensors(outputActivity)
}
def modelGetParameters(model: AbstractModule[Activity, Activity, T])
: JMap[Any, JMap[Any, JList[JList[Any]]]] = {
model.getParametersTable().getState().mapValues {
case name2Values: Table =>
name2Values.getState().mapValues {
case t: Tensor[T] =>
val tensorClone = t.clone()
val item = List(tensorClone.storage().toList.asJava.asInstanceOf[JList[Any]],
tensorClone.size().toList.asJava.asInstanceOf[JList[Any]]).asJava
item
}.asJava
}.asJava
}
def createMaxEpoch(max: Int): Trigger = {
Trigger.maxEpoch(max)
}
def createEveryEpoch(): Trigger = {
Trigger.everyEpoch
}
def createSeveralIteration(interval: Int): Trigger = {
Trigger.severalIteration(interval)
}
def createMaxIteration(max: Int): Trigger = {
Trigger.maxIteration(max)
}
def createMaxScore(max: Float): Trigger = {
Trigger.maxScore(max)
}
def createMinLoss(min: Float): Trigger = {
Trigger.minLoss(min)
}
def createTop1Accuracy(): ValidationMethod[T] = {
new Top1Accuracy()
}
def createTreeNNAccuracy(): ValidationMethod[T] = {
new TreeNNAccuracy()
}
def createTop5Accuracy(): ValidationMethod[T] = {
new Top5Accuracy()
}
def createLoss(criterion: Criterion[T]): ValidationMethod[T] = {
new Loss(criterion)
}
def createMAE(): ValidationMethod[T] = {
new MAE()
}
def createSGD(learningRate: Double = 1e-3,
learningRateDecay: Double = 0.0,
weightDecay: Double = 0.0,
momentum: Double = 0.0,
dampening: Double = Double.MaxValue,
nesterov: Boolean = false,
leaningRateSchedule: SGD.LearningRateSchedule = SGD.Default(),
learningRates: JTensor = null,
weightDecays: JTensor = null): SGD[T] = {
val p1 = if (learningRates == null) null else toTensor(learningRates)
val p2 = if (weightDecays == null) null else toTensor(weightDecays)
new SGD[T](learningRate, learningRateDecay, weightDecay, momentum, dampening,
nesterov, leaningRateSchedule, p1, p2)
}
def createAdagrad(learningRate: Double = 1e-3,
learningRateDecay: Double = 0.0,
weightDecay: Double = 0.0): Adagrad[T] = {
new Adagrad[T](learningRate, learningRateDecay, weightDecay)
}
def createLBFGS(maxIter: Int = 20,
maxEval: Double = Double.MaxValue,
tolFun: Double = 1e-5,
tolX: Double = 1e-9,
nCorrection: Int = 100,
learningRate: Double = 1.0,
verbose: Boolean = false,
lineSearch: LineSearch[T] = null,
lineSearchOptions: JMap[Any, Any] = null): LBFGS[T] = {
val p1 = if (lineSearch == null) None else Option(lineSearch)
val p2 = if (lineSearchOptions == null) None else Option(T(lineSearchOptions))
new LBFGS[T](maxIter, maxEval, tolFun, tolX, nCorrection, learningRate, verbose, p1, p2)
}
def createAdadelta(decayRate: Double = 0.9, Epsilon: Double = 1e-10): Adadelta[T] = {
new Adadelta[T](decayRate, Epsilon)
}
def createAdam(
learningRate: Double = 1e-3,
learningRateDecay: Double = 0.0,
beta1: Double = 0.9,
beta2: Double = 0.999,
Epsilon: Double = 1e-8): Adam[T] = {
new Adam[T](learningRate, learningRateDecay, beta1, beta2, Epsilon)
}
def createAdamax(
learningRate: Double = 0.002,
beta1: Double = 0.9,
beta2: Double = 0.999,
Epsilon: Double = 1e-38): Adamax[T] = {
new Adamax(learningRate, beta1, beta2, Epsilon)
}
def createRMSprop(
learningRate: Double = 1e-2,
learningRateDecay: Double = 0.0,
decayRate: Double = 0.99,
Epsilon: Double = 1e-8): RMSprop[T] = {
new RMSprop[T](learningRate, learningRateDecay, decayRate, Epsilon)
}
def loadOptimMethod(path: String): OptimMethod[T] = {
OptimMethod.load[T](path)
}
def saveOptimMethod(method: OptimMethod[T], path: String,
overWrite: Boolean = false): Unit = {
method.save(path, overWrite)
}
/**
* Save tensor dictionary to a Java hashmap object file
*/
def saveTensorDictionary(tensors: JHashMap[String, JTensor], path: String): Unit = {
File.save(tensors, path, true)
}
def trainTF(
modelPath: String,
output: String,
samples: JavaRDD[Sample],
optMethod: OptimMethod[T],
criterion: Criterion[T],
batchSize: Int,
endWhen: Trigger): AbstractModule[Activity, Activity, T] = {
val nodeList = parse(modelPath)
val context = new Context[T]()
val session = new BigDLSessionImpl[T](nodeList.asScala, context, ByteOrder.LITTLE_ENDIAN)
val dataset = batching(DataSet.rdd(toJSample(samples)),
batchSize).asInstanceOf[DistributedDataSet[MiniBatch[T]]]
val model = session.train(Seq(output), dataset,
optMethod, criterion, endWhen)
model
}
def createLocalOptimizer(features: JList[JTensor],
y: JTensor,
model: AbstractModule[Activity, Activity, T],
criterion: Criterion[T],
optimMethod: OptimMethod[T],
endTrigger: Trigger,
batchSize: Int,
localCores: Int): Optimizer[T, MiniBatch[T]] = {
val sampleArray = toSampleArray(features.asScala.toList.map{f => toTensor(f)}, toTensor(y))
val optimizer = new LocalOptimizer[T](
model,
batching(DataSet.array(sampleArray), batchSize)
.asInstanceOf[LocalDataSet[MiniBatch[T]]],
criterion
).asInstanceOf[Optimizer[T, MiniBatch[T]]]
Engine.setNodeAndCore(1, localCores)
enrichOptimizer(optimizer, endTrigger, optimMethod)
}
def createDistriOptimizer(model: AbstractModule[Activity, Activity, T],
trainingRdd: JavaRDD[Sample],
criterion: Criterion[T],
optimMethod: OptimMethod[T],
endTrigger: Trigger,
batchSize: Int): Optimizer[T, MiniBatch[T]] = {
val sampleRDD = toJSample(trainingRdd)
val optimizer = new DistriOptimizer(
_model = model,
dataset = batching(DataSet.rdd(sampleRDD), batchSize)
.asInstanceOf[DistributedDataSet[MiniBatch[T]]],
criterion = criterion
).asInstanceOf[Optimizer[T, MiniBatch[T]]]
enrichOptimizer(optimizer, endTrigger, optimMethod)
}
def createL1L2Regularizer(l1: Double, l2: Double): L1L2Regularizer[T] = {
L1L2Regularizer[T](l1, l2)
}
def createL1Regularizer(l1: Double): L1Regularizer[T] = {
L1Regularizer[T](l1)
}
def createL2Regularizer(l2: Double): L2Regularizer[T] = {
L2Regularizer[T](l2)
}
def setValidation(optimizer: Optimizer[T, MiniBatch[T]],
batchSize: Int,
trigger: Trigger,
valRdd: JavaRDD[Sample],
vMethods: JList[ValidationMethod[T]]): Unit = {
val sampleRDD = toJSample(valRdd)
optimizer.setValidation(trigger, batching(DataSet.rdd(sampleRDD), batchSize.toInt),
vMethods.asScala.toArray)
}
def setCheckPoint(optimizer: Optimizer[T, MiniBatch[T]],
trigger: Trigger,
checkPointPath: String,
isOverwrite: Boolean): Unit = {
optimizer.setCheckpoint(checkPointPath, trigger)
if (isOverwrite) {
optimizer.overWriteCheckpoint()
}
}
def setTrainSummary(optimizer: Optimizer[T, MiniBatch[T]], summary: TrainSummary): Unit = {
optimizer.setTrainSummary(summary)
}
def setValSummary(optimizer: Optimizer[T, MiniBatch[T]], summary: ValidationSummary): Unit = {
optimizer.setValidationSummary(summary)
}
def summaryReadScalar(summary: Summary, tag: String): JList[JList[Any]] = {
val result = summary.readScalar(tag)
result.toList.map { item =>
List(item._1, item._2, item._3).asJava.asInstanceOf[JList[Any]]
}.asJava
}
def summarySetTrigger(
summary: TrainSummary,
summaryName: String,
trigger: Trigger): TrainSummary = {
summary.setSummaryTrigger(summaryName, trigger)
summary
}
def createTrainSummary(logDir: String,
appName: String): TrainSummary = {
new TrainSummary(logDir, appName)
}
def createValidationSummary(logDir: String,
appName: String): ValidationSummary = {
new ValidationSummary(logDir, appName)
}
def createModel(input: JList[ModuleNode[T]], output: JList[ModuleNode[T]]): Graph[T] = {
Graph(input.asScala.toArray, output.asScala.toArray)
}
def createNode(module: AbstractModule[Activity, Activity, T],
x: JList[ModuleNode[T]]): ModuleNode[T] = {
if (null == x || x.isEmpty) {
module.inputs()
} else {
module.inputs(x.asScala: _*)
}
}
def createInput(): ModuleNode[T] = {
Input()
}
def initEngine(): Unit = {
Engine.init
}
def setWeights(model: AbstractModule[Activity, Activity, T], weights: JList[JTensor]): Unit = {
val weightTensor = weights.asScala.toArray.map(toTensor(_))
model.setWeightsBias(weightTensor)
}
def getWeights(model: AbstractModule[Activity, Activity, T]): JList[JTensor] = {
val weights = model.getWeightsBias()
if (weights != null) {
weights.map(toJTensor(_)).toList.asJava
} else {
null
}
}
def updateParameters(model: AbstractModule[Activity, Activity, T], lr: Double): Unit = {
model.updateParameters(ev.fromType(lr))
}
def uniform(a: Double, b: Double, size: JList[Int]): JTensor = {
val result = Tensor[T]().resize(size.asScala.toArray)
result.apply1(i => ev.fromType(RandomGenerator.RNG.uniform(a, b)))
toJTensor(result)
}
def createZeros(): Zeros.type = {
Zeros
}
def createOnes(): Ones.type = {
Ones
}
def createConstInitMethod(value: Double): ConstInitMethod = {
ConstInitMethod(value)
}
def createRandomUniform(lower: Double, upper: Double): InitializationMethod = {
RandomUniform(lower, upper)
}
def createRandomUniform(): InitializationMethod = {
RandomUniform
}
def createRandomNormal(mean: Double, stdv: Double): RandomNormal = {
RandomNormal(mean, stdv)
}
def createXavier(): Xavier.type = {
Xavier
}
def createMsraFiller(varianceNormAverage: Boolean = true): MsraFiller = {
MsraFiller(varianceNormAverage)
}
def createBilinearFiller(): BilinearFiller.type = {
BilinearFiller
}
def createHardSigmoid : HardSigmoid[T] = {
HardSigmoid()
}
def setInitMethod(layer: Initializable, weightInitMethod: InitializationMethod,
biasInitMethod: InitializationMethod): layer.type = {
layer.setInitMethod(weightInitMethod, biasInitMethod)
}
def getHiddenStates(rec: Recurrent[T]): JList[JTensor] = {
val states = rec.getHiddenState()
activityToJTensors(states)
}
def setHiddenStates(rec: Recurrent[T], hiddenStates: JList[JTensor], isTable: Boolean): Unit = {
rec.setHiddenState(jTensorsToActivity(hiddenStates, isTable))
}
def freeze(model: AbstractModule[Activity, Activity, T], freezeLayers: JList[String])
: AbstractModule[Activity, Activity, T] = {
if (null == freezeLayers) model.freeze() else model.freeze(freezeLayers.asScala: _*)
}
def unFreeze(model: AbstractModule[Activity, Activity, T],
names: JList[String]): AbstractModule[Activity, Activity, T] = {
if (names == null) {
model.unFreeze()
} else {
model.unFreeze(names.asScala: _*)
}
}
def setStopGradient(model: Graph[T], layers: JList[String]): Graph[T] = {
model.stopGradient(layers.asScala.toArray)
}
def saveGraphTopology(model: Graph[T], logPath: String): Graph[T] = {
model.saveGraphTopology(logPath)
}
def createResizeBilinear(
outputHeight: Int,
outputWidth: Int,
alignCorner: Boolean
): ResizeBilinear[T] = {
ResizeBilinear[T](outputHeight,
outputWidth,
alignCorner)
}
def createHighway(size: Int, withBias: Boolean, activation: String,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null): Graph[T] = {
Highway(size, withBias, activation, wRegularizer, bRegularizer)
}
def createUpSampling3D(size: JList[Int]): UpSampling3D[T] = {
UpSampling3D(size.asScala.toArray)
}
def redirectSparkLogs(logPath: String): Unit = {
LoggerFilter.redirectSparkInfoLogs(logPath)
}
def showBigDlInfoLogs(): Unit = {
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
}
def quantize(module: AbstractModule[Activity, Activity, T]): Module[T] = {
module.quantize()
}
def createDLEstimator(model: Module[T], criterion: Criterion[T],
featureSize: JArrayList[Int],
labelSize: JArrayList[Int]): DLEstimator[T] = {
new DLEstimator[T](model, criterion, featureSize.asScala.toArray, labelSize.asScala.toArray)
}
def createDLClassifier(model: Module[T], criterion: Criterion[T],
featureSize: JArrayList[Int],
labelSize: JArrayList[Int]): DLClassifier[T] = {
new DLClassifier[T](model, criterion, featureSize.asScala.toArray)
}
def fitEstimator(estimator: DLEstimator[T], dataSet: DataFrame): DLModel[T] = {
estimator.fit(dataSet)
}
def fitClassifier(classifier: DLClassifier[T], dataSet: DataFrame): DLModel[T] = {
classifier.fit(dataSet)
}
def setBatchSizeDLEstimator(estimator: DLEstimator[T], batchSize: Int): DLEstimator[T] = {
estimator.setBatchSize(batchSize)
}
def setBatchSizeDLClassifier(classifier: DLClassifier[T], batchSize: Int): DLClassifier[T] = {
classifier.setBatchSize(batchSize)
}
def setMaxEpochDLEstimator(estimator: DLEstimator[T], maxEpoch: Int): DLEstimator[T] = {
estimator.setMaxEpoch(maxEpoch)
}
def setMaxEpochDLClassifier(classifier: DLClassifier[T], maxEpoch: Int): DLClassifier[T] = {
classifier.setMaxEpoch(maxEpoch)
}
def setLearningRateDLEstimator(estimator: DLEstimator[T], lr: Double): DLEstimator[T] = {
estimator.setLearningRate(lr)
}
def setLearningRateDLClassifier(classifier: DLClassifier[T], lr: Double): DLClassifier[T] = {
classifier.setLearningRate(lr)
}
def createDLModel(model: Module[T], featureSize: JArrayList[Int]): DLModel[T] = {
new DLModel[T](model, featureSize.asScala.toArray)
}
def createDLClassifierModel(model: Module[T],
featureSize: JArrayList[Int]): DLClassifierModel[T] = {
new DLClassifierModel[T](model, featureSize.asScala.toArray)
}
def dlModelTransform(dlModel: DLModel[T], dataSet: DataFrame): DataFrame = {
dlModel.transform(dataSet)
}
def dlClassifierModelTransform(dlClassifierModel: DLClassifierModel[T],
dataSet: DataFrame): DataFrame = {
dlClassifierModel.transform(dataSet)
}
def setFeatureSizeDLModel(dlModel: DLModel[T], featureSize: JArrayList[Int]): DLModel[T] = {
dlModel.setFeatureSize(featureSize.asScala.toArray)
}
def setFeatureSizeDLClassifierModel(dlClassifierModel: DLClassifierModel[T],
featureSize: JArrayList[Int]): DLClassifierModel[T] = {
dlClassifierModel.setFeatureSize(featureSize.asScala.toArray)
}
def setBatchSizeDLModel(dlModel: DLModel[T], batchSize: Int): DLModel[T] = {
dlModel.setBatchSize(batchSize)
}
def setBatchSizeDLClassifierModel(dlClassifierModel: DLClassifierModel[T],
batchSize: Int): DLClassifierModel[T] = {
dlClassifierModel.setBatchSize(batchSize)
}
def getContainerModules(module: Container[Activity, Activity, T])
: JList[AbstractModule[Activity, Activity, T]] = {
module.modules.toList.asJava
}
def getFlattenModules(module: Container[Activity, Activity, T])
: JList[AbstractModule[Activity, Activity, T]] = {
val result = ArrayBuffer[AbstractModule[Activity, Activity, T]]()
doGetFlattenModules(module, result)
result.toList.asJava
}
private def doGetFlattenModules(module: Container[Activity, Activity, T],
result: ArrayBuffer[AbstractModule[Activity, Activity, T]]): Unit = {
module.modules.foreach {m =>
if (m.isInstanceOf[Container[Activity, Activity, T]]) {
doGetFlattenModules(m.asInstanceOf[Container[Activity, Activity, T]], result)
} else {
result.append(m)
}
}
}
def isWithWeights(module: Module[T]): Boolean = {
val weights = module.getWeightsBias()
return weights != null && !weights.isEmpty
}
def setRunningMean(module: BatchNormalization[T], runningMean: JTensor): Unit = {
module.runningMean.set(toTensor(runningMean))
}
def setRunningStd(module: BatchNormalization[T], runningStd: JTensor): Unit = {
module.runningVar.set(toTensor(runningStd))
}
def createCosineProximityCriterion(): CosineProximityCriterion[T] = {
CosineProximityCriterion[T]()
}
def createHFlip(): HFlip = {
HFlip()
}
def createResize(resizeH: Int, resizeW: Int, resizeMode: Int = Imgproc.INTER_LINEAR): Resize = {
Resize(resizeH, resizeW, resizeMode)
}
def createColorJitter(brightnessProb: Double = 0.5, brightnessDelta: Double = 32,
contrastProb: Double = 0.5, contrastLower: Double = 0.5, contrastUpper: Double = 1.5,
hueProb: Double = 0.5, hueDelta: Double = 18,
saturationProb: Double = 0.5, saturationLower: Double = 0.5, saturationUpper: Double = 1.5,
randomOrderProb: Double = 0, shuffle: Boolean = false): ColorJitter = {
ColorJitter(brightnessProb, brightnessDelta, contrastProb,
contrastLower, contrastUpper, hueProb, hueDelta, saturationProb,
saturationLower, saturationUpper, randomOrderProb, shuffle)
}
def createBrightness(deltaLow: Double, deltaHigh: Double): Brightness = {
Brightness(deltaLow, deltaHigh)
}
def createChannelOrder(): ChannelOrder = {
ChannelOrder()
}
def createContrast(deltaLow: Double, deltaHigh: Double): Contrast = {
Contrast(deltaLow, deltaHigh)
}
def createRandomCrop(cropWidth: Int, cropHeight: Int, isClip: Boolean): RandomCrop = {
RandomCrop(cropWidth, cropHeight, isClip)
}
def createCenterCrop(cropWidth: Int, cropHeight: Int, isClip: Boolean): CenterCrop = {
CenterCrop(cropWidth, cropHeight, isClip)
}
def createFixedCrop(wStart: Double,
hStart: Double, wEnd: Double, hEnd: Double, normalized: Boolean,
isClip: Boolean): FixedCrop = {
FixedCrop(wStart.toFloat, hStart.toFloat, wEnd.toFloat, hEnd.toFloat, normalized, isClip)
}
def createDetectionCrop(roiKey: String, normalized: Boolean): DetectionCrop = {
DetectionCrop(roiKey, normalized)
}
def createExpand(meansR: Int = 123, meansG: Int = 117, meansB: Int = 104,
minExpandRatio: Double = 1.0,
maxExpandRatio: Double = 4.0): Expand = {
Expand(meansR, meansG, meansB, minExpandRatio, maxExpandRatio)
}
def createRandomAspectScale(scales: JList[Int], scaleMultipleOf: Int = 1,
maxSize: Int = 1000): RandomAspectScale = {
RandomAspectScale(scales.asScala.toArray, scaleMultipleOf, maxSize)
}
def createHue(deltaLow: Double, deltaHigh: Double): Hue = {
Hue(deltaLow, deltaHigh)
}
def createRandomTransformer(transformer: FeatureTransformer, prob: Double): RandomTransformer = {
RandomTransformer(transformer, prob)
}
def createSaturation(deltaLow: Double, deltaHigh: Double): Saturation = {
Saturation(deltaLow, deltaHigh)
}
def createRandomSampler(): FeatureTransformer = {
RandomSampler()
}
def createChannelNormalize(meanR: Double, meanG: Double, meanB: Double,
stdR: Double = 1, stdG: Double = 1, stdB: Double = 1): FeatureTransformer = {
ChannelNormalize(meanR.toFloat, meanG.toFloat, meanB.toFloat,
stdR.toFloat, stdG.toFloat, stdB.toFloat)
}
def createAspectScale(scale: Int, scaleMultipleOf: Int, maxSize: Int): FeatureTransformer = {
AspectScale(scale, scaleMultipleOf, maxSize)
}
def createFiller(startX: Double, startY: Double, endX: Double, endY: Double,
value: Int = 255): Filler = {
Filler(startX.toFloat, startY.toFloat, endX.toFloat, endY.toFloat, value)
}
def createPixelNormalize(means: JList[Double]): PixelNormalizer = {
PixelNormalizer(means.asScala.toArray.map(_.toFloat))
}
def createRoiProject(needMeetCenterConstraint: Boolean): RoiProject = {
RoiProject(needMeetCenterConstraint)
}
def createRoiResize(normalized: Boolean): RoiResize = {
RoiResize(normalized)
}
def createRoiHFlip(normalized: Boolean = true): RoiHFlip = {
RoiHFlip(normalized)
}
def createRoiNormalize(): RoiNormalize = {
RoiNormalize()
}
def transformImageFeature(transformer: FeatureTransformer, feature: ImageFeature)
: ImageFeature = {
transformer.transform(feature)
}
def transformImageFrame(transformer: FeatureTransformer,
imageFrame: ImageFrame): ImageFrame = {
imageFrame.transform(transformer)
}
def createDistributedImageFrame(imageRdd: JavaRDD[JTensor], labelRdd: JavaRDD[JTensor])
: DistributedImageFrame = {
require(null != imageRdd, "imageRdd cannot be null")
val featureRdd = if (null != labelRdd) {
imageRdd.rdd.zip(labelRdd.rdd).map(data => {
createImageFeature(data._1, data._2)
})
} else {
imageRdd.rdd.map(image => {
createImageFeature(image, null)
})
}
new DistributedImageFrame(featureRdd)
}
def createLocalImageFrame(images: JList[JTensor], labels: JList[JTensor])
: LocalImageFrame = {
require(null != images, "images cannot be null")
val features = if (null != labels) {
(0 until images.size()).map(i => {
createImageFeature(images.get(i), labels.get(i))
})
} else {
(0 until images.size()).map(i => {
createImageFeature(images.get(i), null)
})
}
new LocalImageFrame(features.toArray)
}
def createPipeline(list: JList[FeatureTransformer]): FeatureTransformer = {
var cur = list.get(0)
(1 until list.size()).foreach(t => cur = cur -> list.get(t))
cur
}
def createImageFeature(data: JTensor = null, label: JTensor = null, uri: String = null)
: ImageFeature = {
val feature = new ImageFeature()
if (null != data) {
val mat = OpenCVMat.fromFloats(data.storage, data.shape(0), data.shape(1))
feature(ImageFeature.mat) = mat
feature(ImageFeature.size) = mat.shape()
}
if (null != label) {
// todo: may need a method to change label format if needed
feature(ImageFeature.label) = toTensor(label)
}
if (null != uri) {
feature(ImageFeature.uri) = uri
}
feature
}
def imageFeatureToSample(imageFeature: ImageFeature,
floatKey: String = ImageFeature.floats, toChw: Boolean = true,
withImInfo: Boolean = false): Sample = {
val imageTensor = imageFeatureToImageTensor(imageFeature, floatKey, toChw)
val features = new util.ArrayList[JTensor]()
features.add(imageTensor)
if (withImInfo) {
val imInfo = imageFeature.getImInfo()
features.add(toJTensor(imInfo.asInstanceOf[Tensor[T]]))
}
val label = imageFeatureToLabelTensor(imageFeature)
Sample(features, label, "float")
}
def imageFeatureGetKeys(imageFeature: ImageFeature): JList[String] = {
imageFeature.keys().toList.asJava
}
def distributedImageFrameToSampleRdd(imageFrame: DistributedImageFrame,
floatKey: String = ImageFeature.floats, toChw: Boolean = true, withImInfo: Boolean = false)
: JavaRDD[Sample] = {
imageFrame.rdd.map(imageFeatureToSample(_, floatKey, toChw, withImInfo)).toJavaRDD()
}
def distributedImageFrameToImageTensorRdd(imageFrame: DistributedImageFrame,
floatKey: String = ImageFeature.floats, toChw: Boolean = true): JavaRDD[JTensor] = {
imageFrame.rdd.map(imageFeatureToImageTensor(_, floatKey, toChw)).toJavaRDD()
}
def distributedImageFrameToLabelTensorRdd(imageFrame: DistributedImageFrame): JavaRDD[JTensor] = {
imageFrame.rdd.map(imageFeatureToLabelTensor).toJavaRDD()
}
def localImageFrameToSample(imageFrame: LocalImageFrame,
floatKey: String = ImageFeature.floats, toChw: Boolean = true, withImInfo: Boolean = false)
: JList[Sample] = {
imageFrame.array.map(imageFeatureToSample(_, floatKey, toChw, withImInfo)).toList.asJava
}
def localImageFrameToImageTensor(imageFrame: LocalImageFrame,
floatKey: String = ImageFeature.floats, toChw: Boolean = true): JList[JTensor] = {
imageFrame.array.map(imageFeatureToImageTensor(_, floatKey, toChw)).toList.asJava
}
def localImageFrameToLabelTensor(imageFrame: LocalImageFrame): JList[JTensor] = {
imageFrame.array.map(imageFeatureToLabelTensor).toList.asJava
}
def imageFeatureToImageTensor(imageFeature: ImageFeature,
floatKey: String = ImageFeature.floats, toChw: Boolean = true): JTensor = {
toJTensor(imageFeature.toTensor(floatKey, toChw).asInstanceOf[Tensor[T]])
}
def imageFeatureToLabelTensor(imageFeature: ImageFeature): JTensor = {
val label = if (imageFeature.hasLabel()) {
imageFeature.getLabel[Tensor[T]]
} else {
Tensor[T](1).fill(ev.fromType[Float](-1f))
}
toJTensor(label)
}
def read(path: String, sc: JavaSparkContext): ImageFrame = {
if (sc == null) ImageFrame.read(path, null) else ImageFrame.read(path, sc.sc)
}
def readParquet(path: String, sqlContext: SQLContext): DistributedImageFrame = {
ImageFrame.readParquet(path, sqlContext)
}
def createBytesToMat(): BytesToMat = {
BytesToMat()
}
def isLocal(imageFrame: ImageFrame): Boolean = imageFrame.isLocal()
def isDistributed(imageFrame: ImageFrame): Boolean = imageFrame.isDistributed()
}
object PythonBigDLUtils {
def toTensor[T: ClassTag](jTensor: JTensor, typeName: String)
(implicit ev: TensorNumeric[T]): Tensor[T] = {
if (jTensor == null) return null
typeName match {
case "float" =>
Tensor(jTensor.storage.map(x => ev.fromType(x.toFloat)), jTensor.shape)
case "double" =>
Tensor(jTensor.storage.map(x => ev.fromType(x.toDouble)), jTensor.shape)
case t: String =>
throw new IllegalArgumentException(s"Not supported type: ${t}")
}
}
}
|
jenniew/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/python/api/PythonBigDL.scala
|
Scala
|
apache-2.0
| 73,037
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.xml
import java.io.ByteArrayInputStream
import java.nio.charset.StandardCharsets
import com.typesafe.config.ConfigFactory
import com.vividsolutions.jts.geom.Point
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert2.SimpleFeatureConverter
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class XmlConverterTest extends Specification {
sequential
val sftConf = ConfigFactory.parseString(
"""{ type-name = "xmlFeatureType"
| attributes = [
| {name = "number", type = "Integer"}
| {name = "color", type = "String"}
| {name = "weight", type = "Double"}
| {name = "source", type = "String"}
| ]
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(sftConf)
"XML Converter" should {
"parse multiple features out of a single document" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature>
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse multiple features out of a single document with the geometry in the repeated XML tag" >> {
val sftConf2 = ConfigFactory.parseString(
"""{ type-name = "xmlFeatureType"
| attributes = [
| {name = "number", type = "Integer"}
| {name = "color", type = "String"}
| {name = "weight", type = "Double"}
| {name = "source", type = "String"}
| {name = "geom", type = "Point"}
| ]
|}
""".stripMargin)
val sft2 = SimpleFeatureTypes.createType(sftConf2)
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature lon="1.23" lat="4.23">
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature lon="4.56" lat="7.56">
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| { name = "lon", path = "./@lon", transform = "$0::double" }
| { name = "lat", path = "./@lat", transform = "$0::double" }
| { name = "geom", transform = "point($lon, $lat)" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft2, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getAttribute("geom").asInstanceOf[Point] mustEqual WKTUtils.read("POINT(1.23 4.23)").asInstanceOf[Point]
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("geom").asInstanceOf[Point] mustEqual WKTUtils.read("POINT(4.56 7.56)").asInstanceOf[Point]
}
"parse nested feature nodes" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <IgnoreMe>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| </IgnoreMe>
| <IgnoreMe>
| <Feature>
| <number>456</number>
| <color>blue</color>
| <physical weight="150" height="h2"/>
| </Feature>
| </IgnoreMe>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "/doc/IgnoreMe/Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"apply xpath functions" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "floor(physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"use an ID hash for each node" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature>
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "md5(string2bytes(xml2string($0)))"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getID mustEqual "441dd9114a1a345fe59f0dfe461f01ca"
features(1).getID mustEqual "42aae6286c7204c3aa1aa99a4e8dae35"
}
"validate with an xsd" >> {
val xml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
"parse as itr" >> {
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse as stream" >> {
val features = converter.process(new ByteArrayInputStream(xml.replaceAllLiterally("\\n", " ").getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
}
"parse xml im multi line mode" >> {
val xml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options {
| line-mode = "multi"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse xml in single line mode" >> {
val origXml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val xml = origXml.replaceAllLiterally("\\n", " ") + "\\n" + origXml.replaceAllLiterally("\\n", " ")
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options {
| line-mode = "single"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.last.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.last.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.last.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.last.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"invalidate with an xsd" >> {
val xml =
"""<f:doc2 xmlns:f="http://geomesa.org/test-feature" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc2>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(0)
}
"handle user data" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| user-data = {
| my.user.key = "$weight"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "floor(physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getUserData.get("my.user.key") mustEqual 127d
}
"Parse XMLs with a BOM" >> {
val xml = getClass.getClassLoader.getResource("bomTest.xml")
xml must not(beNull)
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val xmlConverter = SimpleFeatureConverter(sft, parserConf)
val features = xmlConverter.process(xml.openStream()).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"support namespaces with saxon" >> {
val xml =
"""<ns:doc xmlns:ns="http://geomesa.example.com/foo" xmlns:ns2="http://geomesa.example.com/foo2">
| <ns:DataSource>
| <ns:name>myxml</ns:name>
| </ns:DataSource>
| <ns:Feature>
| <ns:number>123</ns:number>
| <ns:color>red</ns:color>
| <ns2:physical weight="127.5" height="5'11"/>
| </ns:Feature>
|</ns:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| options { line-mode = "multi" }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "floor(ns2:physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.example.com/foo"
| ns2 = "http://geomesa.example.com/foo2"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverter(sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
}
}
|
ddseapy/geomesa
|
geomesa-convert/geomesa-convert-xml/src/test/scala/org/locationtech/geomesa/convert/xml/XmlConverterTest.scala
|
Scala
|
apache-2.0
| 27,958
|
package com.dataintuitive.luciusapi
// Functions implementation and common code
import com.dataintuitive.luciusapi.functions.HistogramFunctions._
import Common.ParamHandlers._
// LuciusCore
import com.dataintuitive.luciuscore.Model.DbRow
import com.dataintuitive.luciuscore.genes._
// Jobserver
import spark.jobserver.api.{JobEnvironment, SingleProblem, ValidationProblem}
import spark.jobserver._
// Scala, Scalactic and Typesafe
import scala.util.Try
import org.scalactic._
import Accumulation._
import com.typesafe.config.Config
// Spark
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Dataset
object histogram extends SparkSessionJob with NamedObjectSupport {
type JobData = functions.HistogramFunctions.JobData
type JobOutput = collection.Map[String, Any]
override def validate(sparkSession: SparkSession,
runtime: JobEnvironment,
config: Config): JobData Or Every[ValidationProblem] = {
val db = getDB(runtime)
val genes = getGenes(runtime)
val version = optParamVersion(config)
val isValidVersion = validVersion(config)
val signature = paramSignature(config)
val bins = optParamBins(config, 15)
val features = optParamFeatures(config)
val filters = optParamFilters(config)
// features to return: list of features
// val featuresString:String = Try(config.getString("features")).getOrElse("zhang")
// val featuresQueryWithoutZhang = featuresString.split(" ").toList
// // Always add zhang score wrt signature
// val featuresQuery = (featuresQueryWithoutZhang ++ List("zhang")).distinct
(isValidVersion zip
withGood(db, genes, signature) {
JobData(_, _, version, _, features, bins, filters)
}).map(_._2)
}
override def runJob(sparkSession: SparkSession,
runtime: JobEnvironment,
data: JobData): JobOutput = {
implicit val thisSession = sparkSession
data.version match {
case "v2" =>
Map(
"info" -> info(data),
"header" -> header(data),
"data" -> result(data)
)
case _ => Map("result" -> result(data))
}
}
}
|
data-intuitive/LuciusAPI
|
src/main/scala/com/dataintuitive/luciusapi/histogram.scala
|
Scala
|
apache-2.0
| 2,201
|
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tutorial.libcog.actuators
import libcog._
object ActuatorExample extends App {
val graph = new ComputeGraph {
// Initialize a field to (1,2,3,4)
val field = ScalarField(4, (column) => 1 + column)
//Increment each element by one each tick
field <== field + 1
// define an actuator that outputs the field elements to an array each tick
// and specifies an initial actuator state of (4,3,2,1)
val actuatorData = new Array[Float](4)
val actuator = Actuator(field, actuatorData, (column) => 4 - column)
}
import graph._
withRelease {
// reset the graph, print actuator data
reset
println(actuatorData.mkString(" "))
// step the graph 5 times, print actuator data after each step
for(i <- 0 until 5) {
step
println(actuatorData.mkString(" "))
}
}
}
|
hpe-cct/cct-tutorial
|
src/main/scala/tutorial/libcog/actuators/ActuatorExample.scala
|
Scala
|
apache-2.0
| 1,461
|
import org.scalatestplus.play._
import play.api.test._
import play.api.test.Helpers._
import LongestPath._
/**
* Add your spec here.
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class ApplicationSpec extends PlaySpec with OneAppPerTest {
"Routes" should {
"send 404 on a bad request" in {
route(app, FakeRequest(GET, "/boum")).map(status(_)) mustBe Some(NOT_FOUND)
}
}
"HomeController" should {
"render the index page" in {
val home = route(app, FakeRequest(GET, "/")).get
status(home) mustBe OK
contentType(home) mustBe Some("text/html")
contentAsString(home) must include (" :)")
}
}
var grid1 = Array(
Array(1,2),
Array(3,4),
Array(5,5)
)
val graph = GraphHelper.makeGraphFromGrid(grid1)
"Testing grid1 " should {
" graph(0).neighbors should be List(1,2,3)" in {
assert( graph(0).neighbors === List(1,2,3))
}
}
"Testing grid1 " should {
" Neighbors of graph(0) should be List(1,2,3)" in {
assert(graph(0).neighbors === List(1, 2, 3))
}
}
"Testing grid1 " should {
" Neighbors of graph(4) should be List(5)" in {
assert( graph(4).neighbors === List(5))
}
}
"Testing grid1 " should {
" Neighbors of graph(5)" in {
assert( graph(5).neighbors === List(4))
}
}
var grid2 = Array(
Array(2,1,2),
Array(4,10,3),
Array(4,9,4)
)
"A Solved split longest path test from grid2 " should {
"be List(Vector(4, 7, 6, 3, 0, 1), Vector(4, 7, 8, 5, 2, 1)))" in {
assert( (new LongestPathFinder( GraphHelper.makeGraphFromGrid(grid2)))
.getLongestPathStartingAtNode(1) === List(Vector(4, 7, 6, 3, 0, 1), Vector(4, 7, 8, 5, 2, 1)))
}
}
val grid3 = LongestPath.GraphHelper.makeGridFromCommaSeperatedLine("2,1,2,4,10,3,4,9,4", 3)
"Make a grid from a comma separated line of Integers " should {
"be Array(Array(2,1,2),Array(4,10,3),Array(4,9,4))" in {
assert( grid3 === Array(
Array(2,1,2),
Array(4,10,3),
Array(4,9,4)
))
}
}
}
|
Strateger/LongestPath
|
test/ApplicationSpec.scala
|
Scala
|
mit
| 2,221
|
package euler
package til60
object Euler51 extends EulerProblem {
/**
* primePermutations(123, 2) returns all prime permutations of 123xx with xx == 00,11,..,99
*/
def primePermutations(n: Int, addedDigitCnt: Int) = {
val digits = toDigits(n) ++ List.fill(addedDigitCnt)(-1)
digits.permutations.map { x =>
(0 to 9).map { d =>
x.map { case -1 => d; case a => a }
} map fromDigits filter isPrime
} filterNot { _.isEmpty }
}
override def result = {
val res = Stream.from(100) flatMap { primePermutations(_, 3) } dropWhile (xs => xs.size < 8 || xs.head < 100000)
res.head.head
}
}
|
TrustNoOne/Euler
|
scala/src/main/scala/euler/til60/Euler51.scala
|
Scala
|
mit
| 636
|
package io.circe.yaml
import io.circe.Json
import io.circe.Json.eqJson
import io.circe.testing.instances._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.Checkers
import org.typelevel.discipline.Laws
class SnakeYamlSymmetricSerializationTests extends AnyFunSuite with Checkers with SymmetricSerializationTests {
override val laws: SymmetricSerializationLaws = SymmetricSerializationLaws()
def checkAll(name: String, ruleSet: Laws#RuleSet): Unit =
for ((id, prop) <- ruleSet.all.properties)
test(name + "." + id) {
check(prop)
}
checkAll("snake.printer", symmetricPrinter[Json](printer.print, parser.parse))
}
|
circe/circe-yaml
|
src/test/scala/io/circe/yaml/SnakeYamlSymmetricSerializationTests.scala
|
Scala
|
apache-2.0
| 674
|
package akka.guice.annotation
import scala.annotation.{StaticAnnotation, compileTimeOnly}
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
object injectableActor {
def impl(c: whitebox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] =
new InjectableActorHelper[c.type](c).impl(annottees : _*)
}
/**
* Labels an Actor class as being injectable.
*
* Macro expansion will fill in the boilerplate to assure the Actor
* is able to be injected as well as create other injected Actors.
*
* This is the main entry point and the only actual macro. The other
* annotations are merely markers that are processed by this macro
* and subsequently removed.
*
* This means that only Actors can currently be handled using Macros.
* Other types are usually completely custom and have their own rules
* so there is inherently less generic boilerplate involved.
*/
@compileTimeOnly("Enable macro paradise compiler plugin to expand macro annotations")
class injectableActor extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro injectableActor.impl
}
|
Equiem/akka-guice
|
macros/src/main/scala/akka/guice/annotation/injectableActor.scala
|
Scala
|
mit
| 1,119
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package types
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScAnnotTypeElement extends ScTypeElement {
override protected val typeName = "TypeWithAnnotation"
def typeElement: ScTypeElement = findChild[ScTypeElement].get
override protected def innerType: TypeResult = typeElement.`type`()
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScAnnotTypeElement.scala
|
Scala
|
apache-2.0
| 474
|
package edu.gemini.pot.sp
import edu.gemini.spModel.rich.pot.sp._
import org.scalacheck.Gen
import scala.collection.JavaConverters._
import scala.util.Random
object DuplicateSpec extends ProgramTestSupport {
val genTestProg: Gen[ISPFactory => ISPProgram] =
ProgramGen.genProg
def randomInstance[A](as: Vector[A]): Option[A] = {
val s = as.size
if (s == 0) None else Some(as(Random.nextInt(s)))
}
def randomObs(p: ISPProgram): Option[ISPObservation] =
randomInstance(ObservationIterator.apply(p).asScala.toVector)
def randomObsNumber(p: ISPProgram): Option[Int] =
randomObs(p).map(_.getObservationNumber)
def randomContainer(p: ISPProgram): Option[ISPObservationContainer] =
randomInstance(p.toStream.collect {
case oc: ISPObservationContainer => oc
}.toVector)
def expectTreeStateException(block: => Unit): Boolean =
try {
block
false
} catch {
case _: SPTreeStateException => true
}
"the obs number duplication assertion" should {
"prevent a duplicate when directly adding an observation" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
val setup =
for {
oc <- randomContainer(p)
on <- randomObsNumber(p)
} yield (oc, on)
setup.forall { case (obsContainer, obsNum) =>
val obs = odb.getFactory.createObservation(p, obsNum, null)
expectTreeStateException { obsContainer.addObservation(obs) }
}
}
}
"prevent a duplicate when adding a group" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
randomObsNumber(p).forall { obsNum =>
val f = odb.getFactory
val g = f.createGroup(p, null)
val o = f.createObservation(p, obsNum, null)
g.addObservation(o)
expectTreeStateException { p.addGroup(g) }
}
}
}
"prevent a duplicate when adding a template group" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
randomObsNumber(p).forall { obsNum =>
Option(p.getTemplateFolder).forall { tf =>
val f = odb.getFactory
val g = f.createTemplateGroup(p, null)
val o = f.createObservation(p, obsNum, null)
g.addObservation(o)
expectTreeStateException { tf.addTemplateGroup(g) }
}
}
}
}
"prevent a duplicate when setting obs children" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
val setup =
for {
oc <- randomContainer(p)
on <- randomObsNumber(p)
} yield (oc, on)
setup.forall { case (obsContainer, obsNum) =>
val obs = odb.getFactory.createObservation(p, obsNum, null)
expectTreeStateException {
obsContainer.children = obs :: obsContainer.children
}
}
}
}
"prevent a duplicate when setting group children" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
randomObsNumber(p).forall { obsNum =>
val f = odb.getFactory
val g = f.createGroup(p, null)
val o = f.createObservation(p, obsNum, null)
g.addObservation(o)
val gs = p.getGroups
gs.add(g)
expectTreeStateException { p.setGroups(gs) }
}
}
}
"prevent a duplicate when setting template group children" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
val setup =
for {
tf <- Option(p.getTemplateFolder)
on <- randomObsNumber(p)
} yield (tf, on)
setup.forall { case (templateFolder, obsNum) =>
val f = odb.getFactory
val g = f.createTemplateGroup(p, null)
val o = f.createObservation(p, obsNum, null)
g.addObservation(o)
val gs = templateFolder.getTemplateGroups
gs.add(g)
expectTreeStateException { templateFolder.setTemplateGroups(gs) }
}
}
}
"prevent a duplicate when setting template folder" ! forAllPrograms { (odb, progs) =>
progs.forall { p =>
randomObsNumber(p).forall { obsNum =>
val f = odb.getFactory
val tf = f.createTemplateFolder(p, null)
val tgs = Option(p.getTemplateFolder).map(_.getTemplateGroups).getOrElse(new java.util.ArrayList[ISPTemplateGroup]())
val tg = f.createTemplateGroup(p, null)
val o = f.createObservation(p, obsNum, null)
tg.addObservation(o)
tgs.add(tg)
expectTreeStateException {
// One of the two calls should fail, depending upon which random
// observation number we picked up.
tf.setTemplateGroups(tgs)
p.setTemplateFolder(tf)
}
}
}
}
}
}
|
spakzad/ocs
|
bundle/edu.gemini.pot/src/test/scala/edu/gemini/pot/sp/DuplicateSpec.scala
|
Scala
|
bsd-3-clause
| 4,843
|
package me.gregd.cineworld.domain.service
import java.time.format.DateTimeFormatter
import java.time.{LocalDate, LocalTime}
import me.gregd.cineworld.domain.model.{Cinema, Coordinates, Film, Performance}
import me.gregd.cineworld.integration.vue.{ImageUrl, VueIntegrationService}
import me.gregd.cineworld.integration.vue.listings.Showings
import me.gregd.cineworld.util.Clock
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class VueService(underlying: VueIntegrationService, clock: Clock) {
private val timeFormat = DateTimeFormatter ofPattern "h:m a"
def retrieveCinemas(): Future[Seq[Cinema]] = {
val res = underlying
.retrieveCinemas()
.map(raw =>
for { c <- raw } yield {
underlying
.retrieveLocation(c)
.map(loc => {
val coordinatesOpt = loc.map { case (lat, long) => Coordinates(lat, long) }
Cinema(c.id, "Vue", c.search_term, coordinatesOpt)
})
})
res.map(Future.sequence(_)).flatten
}
def retrieveMoviesAndPerformances(cinemaId: String, date: LocalDate): Future[Map[Film, List[Performance]]] = {
underlying.retrieveListings(cinemaId).map { raw =>
val converted = for {
f <- raw.films
image = ImageUrl.resolve(f.image_poster)
film = Film(f.id, f.title, image)
urlBuilder = (sessionId: String) => s"https://www.myvue.com/book-tickets/summary/$cinemaId/${film.id}/$sessionId"
showings = f.showings
performances = filterAndBuild(date, showings, urlBuilder)
if performances.nonEmpty
} yield film -> performances
converted.toMap
}
}
private def filterAndBuild(date: LocalDate, showings: List[Showings], urlBuilder: String => String) = {
def isStale(time: LocalTime) = date == clock.today() && (time.minusHours(1) isBefore clock.time())
for {
s <- showings
if date == LocalDate.parse(s.date_time)
t <- s.times
time = LocalTime.parse(t.time, timeFormat)
if !isStale(time)
} yield Performance(t.time, available = true, t.screen_type, urlBuilder(t.session_id), Option(s.date_time))
}
}
|
Grogs/cinema-service
|
domain/src/main/scala/me/gregd/cineworld/domain/service/VueService.scala
|
Scala
|
gpl-3.0
| 2,207
|
// GENERATED CODE: DO NOT EDIT
package org.usagram.clarify
case class Validity15[+V1, +V2, +V3, +V4, +V5, +V6, +V7, +V8, +V9, +V10, +V11, +V12, +V13, +V14, +V15](_1: Definite[V1], _2: Definite[V2], _3: Definite[V3], _4: Definite[V4], _5: Definite[V5], _6: Definite[V6], _7: Definite[V7], _8: Definite[V8], _9: Definite[V9], _10: Definite[V10], _11: Definite[V11], _12: Definite[V12], _13: Definite[V13], _14: Definite[V14], _15: Definite[V15])
extends Validity with Product15[Definite[V1], Definite[V2], Definite[V3], Definite[V4], Definite[V5], Definite[V6], Definite[V7], Definite[V8], Definite[V9], Definite[V10], Definite[V11], Definite[V12], Definite[V13], Definite[V14], Definite[V15]] {
val values = Seq(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15)
def resolve[R](resolve: (V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15) => R): R =
if (isValid) {
resolve(_1.value, _2.value, _3.value, _4.value, _5.value, _6.value, _7.value, _8.value, _9.value, _10.value, _11.value, _12.value, _13.value, _14.value, _15.value)
}
else {
throw new InvalidValueException(invalidValues)
}
}
|
takkkun/clarify
|
core/src/main/scala/org/usagram/clarify/Validity15.scala
|
Scala
|
mit
| 1,157
|
package demo.pi
import org.apache.spark.{SparkConf, SparkContext}
import scala.math.random
/**
* モンテカルロ法で円周率を計算します
*/
object PiSample {
def main(args: Array[String]): Unit = {
// 実行前準備
val conf = new SparkConf().setAppName(getClass.getSimpleName)
val spark = new SparkContext(conf)
// 引数からサンプル数を決定(デフォルト: 10000)
val default = if (args.length > 0) args(0).toInt else 10000
val palallelizm = if (args.length > 1) args(1).toInt else spark.defaultParallelism
val max = math.min(default, Int.MaxValue)
// RDDを作成(1〜サンプル数までのシーケンスを分割する)して円の中にふくまれる点の個数をカウント
val count = spark.parallelize(1 until max, palallelizm).map { i =>
// 乱数を生成
val x = random
val y = random
// 円の方程式に収まるか否かを判定する
if (x * x + y * y <= 1) 1 else 0
}.reduce(_ + _)
// 円周率を計算する
println("Pi is roughly " + 4.0 * count / max)
// sparkを終了する
spark.stop()
}
}
|
h-mochizuki/rts-sample
|
spark-sample/src/main/scala/demo/pi/PiSample.scala
|
Scala
|
apache-2.0
| 1,148
|
import stainless.annotation._
import stainless.io._
import stainless.math.BitVectors._
object FixedArray {
val CONSTANT1: UInt16 = 2
val CONSTANT2: UInt16 = 3
val CONSTANT3: UInt16 = CONSTANT1 + CONSTANT2
@cCode.`export`
case class W(x: Int, a: Array[Int], y: Int) {
require(
a.length == CONSTANT3.toInt &&
0 <= x && x <= 1000 &&
0 <= y && y <= 1000
)
}
@cCode.`export`
def f(w: W): Int = {
require(0 <= w.a(0) && w.a(0) <= 1000)
require(0 <= w.a(1) && w.a(1) <= 1000)
require(0 <= w.a(2) && w.a(2) <= 1000)
require(0 <= w.a(3) && w.a(3) <= 1000)
require(0 <= w.a(4) && w.a(4) <= 1000)
w.a(0) = 155
w.a(0) + w.a(1) + w.a(2) + w.a(3) + w.a(4) + w.x + w.y
}
@cCode.`export`
def g(a: Array[Int]): Unit = {
require(a.length > 0)
require(0 <= a(0) && a(0) <= 1000)
a(0) += 1
}
@cCode.`export`
def main(): Unit = {
@ghost implicit val state = newState
val w = W(30, Array(10, 20, 30, 20, 42), 100)
val w2 = W(30, Array(10, 20, 30, 20, 42), { w.a(0) += 1; 100 })
g(w.a)
val a2 = w.a
g(a2)
val z = f(w)
assert(z == 30 + 155 + 20 + 30 + 20 + 42 + 100)
StdOut.println(z)
}
}
|
epfl-lara/stainless
|
frontends/benchmarks/genc/valid/FixedArray.scala
|
Scala
|
apache-2.0
| 1,207
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spot.lda
import org.apache.log4j.{Level, LogManager}
import org.apache.spark.mllib.linalg.{Matrices, Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spot.lda.SpotLDAWrapper._
import org.apache.spot.lda.SpotLDAWrapperSchema._
import org.apache.spot.testutils.TestingSparkContextFlatSpec
import org.apache.spot.utilities.{FloatPointPrecisionUtility32, FloatPointPrecisionUtility64}
import org.scalatest.Matchers
import scala.collection.immutable.Map
class SpotLDAWrapperTest extends TestingSparkContextFlatSpec with Matchers {
"SparkLDA" should "handle an extremely unbalanced two word doc with EM optimizer" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 1.02
val ldaBeta = 1.001
val ldaMaxIterations = 20
val optimizer = "em"
val catFancy = SpotLDAInput("pets", "cat", 1)
val dogWorld = SpotLDAInput("pets", "dog", 999)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer ,ldaMaxIterations, FloatPointPrecisionUtility64)
val topicMixDF = out.docToTopicMix
val topicMix =
topicMixDF.filter(topicMixDF(DocumentName) === "pets").select(TopicProbabilityMix).first().toSeq(0)
.asInstanceOf[Seq[Double]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(topicMix(0) * catTopics(0) + topicMix(1) * catTopics(1)) should be < 0.01
Math.abs(0.999 - (topicMix(0) * dogTopics(0) + topicMix(1) * dogTopics(1))) should be < 0.01
}
it should "handle distinct docs on distinct words with EM optimizer" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 1.2
val ldaBeta = 1.001
val ldaMaxIterations = 20
val optimizer = "em"
val catFancy = SpotLDAInput("cat fancy", "cat", 1)
val dogWorld = SpotLDAInput("dog world", "dog", 1)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer ,ldaMaxIterations, FloatPointPrecisionUtility64)
val topicMixDF = out.docToTopicMix
val dogTopicMix: Array[Double] =
topicMixDF.filter(topicMixDF(DocumentName) === "dog world").select(TopicProbabilityMix).first()
.toSeq(0).asInstanceOf[Seq[Double]].toArray
val catTopicMix: Array[Double] =
topicMixDF.filter(topicMixDF(DocumentName) === "cat fancy").select(TopicProbabilityMix).first()
.toSeq(0).asInstanceOf[Seq[Double]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(1 - (catTopicMix(0) * catTopics(0) + catTopicMix(1) * catTopics(1))) should be < 0.01
Math.abs(1 - (dogTopicMix(0) * dogTopics(0) + dogTopicMix(1) * dogTopics(1))) should be < 0.01
}
it should "handle an extremely unbalanced two word doc with Online optimizer" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 0.0009
val ldaBeta = 0.00001
val ldaMaxIterations = 400
val optimizer = "online"
val catFancy = SpotLDAInput("pets", "cat", 1)
val dogWorld = SpotLDAInput("pets", "dog", 999)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer, ldaMaxIterations, FloatPointPrecisionUtility64)
val topicMixDF = out.docToTopicMix
val topicMix =
topicMixDF.filter(topicMixDF(DocumentName) === "pets").select(TopicProbabilityMix).first().toSeq(0)
.asInstanceOf[Seq[Double]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(topicMix(0) * catTopics(0) + topicMix(1) * catTopics(1)) should be < 0.01
Math.abs(0.999 - (topicMix(0) * dogTopics(0) + topicMix(1) * dogTopics(1))) should be < 0.01
}
it should "handle distinct docs on distinct words with Online optimizer" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 0.0009
val ldaBeta = 0.00001
val ldaMaxIterations = 400
val optimizer = "online"
val catFancy = SpotLDAInput("cat fancy", "cat", 1)
val dogWorld = SpotLDAInput("dog world", "dog", 1)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer, ldaMaxIterations, FloatPointPrecisionUtility64)
val topicMixDF = out.docToTopicMix
val dogTopicMix: Array[Double] =
topicMixDF.filter(topicMixDF(DocumentName) === "dog world").select(TopicProbabilityMix).first()
.toSeq(0).asInstanceOf[Seq[Double]].toArray
val catTopicMix: Array[Double] =
topicMixDF.filter(topicMixDF(DocumentName) === "cat fancy").select(TopicProbabilityMix).first()
.toSeq(0).asInstanceOf[Seq[Double]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(1 - (catTopicMix(0) * catTopics(0) + catTopicMix(1) * catTopics(1))) should be < 0.01
Math.abs(1 - (dogTopicMix(0) * dogTopics(0) + dogTopicMix(1) * dogTopics(1))) should be < 0.01
}
it should "handle an extremely unbalanced two word doc with doc probabilities as Float" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 1.02
val ldaBeta = 1.001
val ldaMaxIterations = 20
val optimizer = "em"
val catFancy = SpotLDAInput("pets", "cat", 1)
val dogWorld = SpotLDAInput("pets", "dog", 999)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer, ldaMaxIterations, FloatPointPrecisionUtility32)
val topicMixDF = out.docToTopicMix
val topicMix =
topicMixDF.filter(topicMixDF(DocumentName) === "pets").select(TopicProbabilityMix).first().toSeq(0)
.asInstanceOf[Seq[Float]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(topicMix(0).toDouble * catTopics(0) + topicMix(1).toDouble * catTopics(1)) should be < 0.01
Math.abs(0.999 - (topicMix(0).toDouble * dogTopics(0) + topicMix(1).toDouble * dogTopics(1))) should be < 0.01
}
it should "handle distinct docs on distinct words with doc probabilities as Float" in {
val logger = LogManager.getLogger("SuspiciousConnectsAnalysis")
logger.setLevel(Level.WARN)
val ldaAlpha = 1.02
val ldaBeta = 1.001
val ldaMaxIterations = 20
val optimizer = "em"
val catFancy = SpotLDAInput("cat fancy", "cat", 1)
val dogWorld = SpotLDAInput("dog world", "dog", 1)
val data = sparkSession.sparkContext.parallelize(Seq(catFancy, dogWorld))
val out = SpotLDAWrapper.runLDA(sparkSession, data, 2, logger, Some(0xdeadbeef), ldaAlpha, ldaBeta,
optimizer, ldaMaxIterations, FloatPointPrecisionUtility32)
val topicMixDF = out.docToTopicMix
val dogTopicMix: Array[Float] =
topicMixDF.filter(topicMixDF(DocumentName) === "dog world").select(TopicProbabilityMix).first().toSeq(0)
.asInstanceOf[Seq[Float]].toArray
val catTopicMix: Array[Float] =
topicMixDF.filter(topicMixDF(DocumentName) === "cat fancy").select(TopicProbabilityMix).first().toSeq(0)
.asInstanceOf[Seq[Float]].toArray
val catTopics = out.wordResults("cat")
val dogTopics = out.wordResults("dog")
Math.abs(1 - (catTopicMix(0) * catTopics(0) + catTopicMix(1) * catTopics(1))) should be < 0.01
Math.abs(1 - (dogTopicMix(0) * dogTopics(0) + dogTopicMix(1) * dogTopics(1))) should be < 0.01
}
"formatSparkLDAInput" should "return input in RDD[(Long, Vector)] (collected as Array for testing) format. The index " +
"is the docID, values are the vectors of word occurrences in that doc" in {
val documentWordData = sparkSession.sparkContext.parallelize(Seq(SpotLDAInput("192.168.1.1", "333333_7.0_0.0_1.0", 8),
SpotLDAInput("10.10.98.123", "1111111_6.0_3.0_5.0", 4),
SpotLDAInput("66.23.45.11", "-1_43_7.0_2.0_6.0", 2),
SpotLDAInput("192.168.1.1", "-1_80_6.0_1.0_1.0", 5)))
val wordDictionary = Map("333333_7.0_0.0_1.0" -> 0, "1111111_6.0_3.0_5.0" -> 1, "-1_43_7.0_2.0_6.0" -> 2,
"-1_80_6.0_1.0_1.0" -> 3)
val documentDictionary: DataFrame = sparkSession.createDataFrame(documentWordData
.map({ case SpotLDAInput(doc, word, count) => doc })
.distinct
.zipWithIndex.map({ case (d, c) => Row(d, c) }), StructType(List(DocumentNameField, DocumentNumberField)))
val sparkLDAInput: RDD[(Long, Vector)] = SpotLDAWrapper.formatSparkLDAInput(documentWordData,
documentDictionary, wordDictionary, sparkSession)
val sparkLDAInArr: Array[(Long, Vector)] = sparkLDAInput.collect()
sparkLDAInArr shouldBe Array((0, Vectors.sparse(4, Array(0, 3), Array(8.0, 5.0))), (2, Vectors.sparse(4, Array
(2), Array(2.0))), (1, Vectors.sparse(4, Array(1), Array(4.0))))
}
"formatSparkLDADocTopicOutput" should "return RDD[(String,Array(Double))] after converting doc results from vector " +
"using PrecisionUtilityDouble: convert docID back to string, convert vector of probabilities to array" in {
val documentWordData = sparkSession.sparkContext.parallelize(Seq(SpotLDAInput("192.168.1.1", "333333_7.0_0.0_1.0", 8),
SpotLDAInput("10.10.98.123", "1111111_6.0_3.0_5.0", 4),
SpotLDAInput("66.23.45.11", "-1_43_7.0_2.0_6.0", 2),
SpotLDAInput("192.168.1.1", "-1_80_6.0_1.0_1.0", 5)))
val documentDictionary: DataFrame = sparkSession.createDataFrame(documentWordData
.map({ case SpotLDAInput(doc, word, count) => doc })
.distinct
.zipWithIndex.map({ case (d, c) => Row(d, c) }), StructType(List(DocumentNameField, DocumentNumberField)))
val docTopicDist: RDD[(Long, Vector)] = sparkSession.sparkContext.parallelize(
Array((0.toLong, Vectors.dense(0.15, 0.3, 0.5, 0.05)),
(1.toLong, Vectors.dense(0.25, 0.15, 0.4, 0.2)),
(2.toLong, Vectors.dense(0.4, 0.1, 0.3, 0.2))))
val sparkDocRes: DataFrame = formatSparkLDADocTopicOutput(docTopicDist, documentDictionary, sparkSession,
FloatPointPrecisionUtility64)
import testImplicits._
val documents = sparkDocRes.map({ case Row(documentName: String, docProbabilities: Seq[Double]) => (documentName,
docProbabilities)
}).collect
val documentProbabilities = sparkDocRes.select(TopicProbabilityMix).first.toSeq(0).asInstanceOf[Seq[Double]]
documents should contain("192.168.1.1", Seq(0.15, 0.3, 0.5, 0.05))
documents should contain("10.10.98.123", Seq(0.25, 0.15, 0.4, 0.2))
documents should contain("66.23.45.11", Seq(0.4, 0.1, 0.3, 0.2))
documentProbabilities(0) shouldBe a[java.lang.Double]
}
it should "return RDD[(String,Array(Float))] after converting doc results from vector " +
"using PrecisionUtilityFloat: convert docID back to string, convert vector of probabilities to array" in {
val documentWordData = sparkSession.sparkContext.parallelize(Seq(SpotLDAInput("192.168.1.1", "333333_7.0_0.0_1.0", 8),
SpotLDAInput("10.10.98.123", "1111111_6.0_3.0_5.0", 4),
SpotLDAInput("66.23.45.11", "-1_43_7.0_2.0_6.0", 2),
SpotLDAInput("192.168.1.1", "-1_80_6.0_1.0_1.0", 5)))
val documentDictionary: DataFrame = sparkSession.createDataFrame(documentWordData
.map({ case SpotLDAInput(doc, word, count) => doc })
.distinct
.zipWithIndex.map({ case (d, c) => Row(d, c) }), StructType(List(DocumentNameField, DocumentNumberField)))
val docTopicDist: RDD[(Long, Vector)] = sparkSession.sparkContext.parallelize(
Array((0.toLong, Vectors.dense(0.15, 0.3, 0.5, 0.05)),
(1.toLong, Vectors.dense(0.25, 0.15, 0.4, 0.2)),
(2.toLong, Vectors.dense(0.4, 0.1, 0.3, 0.2))))
val sparkDocRes: DataFrame = formatSparkLDADocTopicOutput(docTopicDist, documentDictionary, sparkSession,
FloatPointPrecisionUtility32)
import testImplicits._
val documents = sparkDocRes.map({ case Row(documentName: String, docProbabilities: Seq[Float]) => (documentName,
docProbabilities)
}).collect
val documentProbabilities = sparkDocRes.select(TopicProbabilityMix).first.toSeq(0).asInstanceOf[Seq[Float]]
documents should contain("192.168.1.1", Seq(0.15f, 0.3f, 0.5f, 0.05f))
documents should contain("10.10.98.123", Seq(0.25f, 0.15f, 0.4f, 0.2f))
documents should contain("66.23.45.11", Seq(0.4f, 0.1f, 0.3f, 0.2f))
documentProbabilities(0) shouldBe a[java.lang.Float]
}
"formatSparkLDAWordOutput" should "return Map[Int,String] after converting word matrix to sequence, wordIDs back " +
"to strings, and sequence of probabilities to array" in {
val testMat = Matrices.dense(4, 4, Array(0.5, 0.2, 0.05, 0.25, 0.25, 0.1, 0.15, 0.5, 0.1, 0.4, 0.25, 0.25, 0.7, 0.2, 0.02, 0.08))
val wordDictionary = Map("-1_23.0_7.0_7.0_4.0" -> 3, "23.0_7.0_7.0_4.0" -> 0, "333333.0_7.0_7.0_4.0" -> 2, "80.0_7.0_7.0_4.0" -> 1)
val revWordMap: Map[Int, String] = wordDictionary.map(_.swap)
val sparkWordRes = formatSparkLDAWordOutput(testMat, revWordMap)
sparkWordRes should contain key ("23.0_7.0_7.0_4.0")
sparkWordRes should contain key ("80.0_7.0_7.0_4.0")
sparkWordRes should contain key ("333333.0_7.0_7.0_4.0")
sparkWordRes should contain key ("-1_23.0_7.0_7.0_4.0")
}
}
|
brandon-edwards/incubator-spot
|
spot-ml/src/test/scala/org/apache/spot/lda/SpotLDAWrapperTest.scala
|
Scala
|
apache-2.0
| 14,725
|
import scala.reflect.ClassTag
object Test {
def main(args: Array[String]): Unit = {
def testArray[T: ClassTag](n: Int, elem: Int => T): Unit = {
val t: Int *: Tuple = 0 *: Tuple.fromArray(Array.tabulate(n)(elem))
println(t.tail)
}
for (i <- 0 to 25)
testArray(i, j => j)
println(Tuple1(1).tail)
println((1, 2).tail)
println((1, 2, 3).tail)
println((1, 2, 3, 4).tail)
println((1, 2, 3, 4, 5).tail)
println((1, 2, 3, 4, 5, 6).tail)
println((1, 2, 3, 4, 5, 6, 7).tail)
println((1, 2, 3, 4, 5, 6, 7, 8).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25).tail)
println((1 *: ()).tail)
println((1 *: 2 *: ()).tail)
println((1 *: 2 *: 3 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: 24 *: ()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: 24 *: 25 *: ()).tail)
}
}
|
som-snytt/dotty
|
tests/run-deep-subtype/Tuple-tail.scala
|
Scala
|
apache-2.0
| 4,242
|
package org.scalatest.examples
// Type-constructor polymorphism
import org.scalactic.TypeCheckedTripleEquals
import org.scalactic.anyvals.PosZDouble
import org.scalatest.examples.Demo._
import org.scalatest.prop.PropertyChecks
import org.scalatest.{LogicFunSpec, Matchers}
// (Demo 12 PosInt.ensuringValid in the REPL)
// DEMO 13 - Use PosZDouble as result type
class Demo13Spec extends LogicFunSpec with Matchers with PropertyChecks with TypeCheckedTripleEquals {
describe("The squareRoot1 function") {
it("should compute the square root") {
forAll { (x: PosZDouble) =>
whenever (!x.isPosInfinity) {
squareRoot3(x).value should === (math.sqrt(x))
}
}
}
it("should should throw IAE on negative input") {
an [IllegalArgumentException] should be thrownBy {
squareRoot1(-1.0)
}
}
it("should should throw IAE on positive infinity input") {
an [IllegalArgumentException] should be thrownBy {
squareRoot1(Double.PositiveInfinity)
}
}
}
}
|
bvenners/scalaX2016Demos
|
src/test/scala/org/scalatest/examples/Demo13Spec.scala
|
Scala
|
apache-2.0
| 1,043
|
package net.virtualvoid.bytecode
package backend
import java.lang.{ String => jString }
/** This trait contains the methods which form the contract between Mnemonic's
* instructions and the backend implementations. Each backend has to implement
* its own frame type which gets called by the instruction implementation.
*/
trait BackendSupport[+ST <: Stack] {
def depth = -1
def frame = this
def stack:ST
def bipush[ST2>:ST<:Stack](i1:Int):F[ST2**Int]
def ldc[ST2>:ST<:Stack](str:jString):F[ST2**jString]
def iadd_int[R<:Stack](rest:R,i1:Int,i2:Int):F[R**Int]
def isub_int[R<:Stack](rest:R,i1:Int,i2:Int):F[R**Int]
def imul_int[R<:Stack](rest:R,i1:Int,i2:Int):F[R**Int]
def pop_int[R<:Stack](rest:R):F[R]
def dup_int[R<:Stack,T](rest:R,top:T):F[R**T**T]
def swap_int[R<:Stack,T1,T2](rest:R,t2:T2,t1:T1):F[R**T1**T2]
def dup_x1_int[R<:Stack,T1,T2](rest:R,t2:T2,t1:T1):F[R**T1**T2**T1]
def invokemethod[R<:Stack,U](handle:MethodHandle):F[R**U]
def invokeconstructor[R<:Stack,U](cons: Constructor): F[R**U]
def new_int[ST2 >: ST <: Stack, U](cl: Class[U]): F[ST2**Uninitialized[U]]
def getstatic_int[ST2>:ST<:Stack,T](code:scala.reflect.Code[()=>T]):F[ST2**T]
def putstatic_int[R<:Stack,T](rest:R,top:T,code:scala.reflect.Code[T=>Unit]):F[R]
def checkcast_int[R<:Stack,T,U](rest:R,top:T)(cl:Class[U]):F[R**U]
def conditional[R<:Stack,T,ST2<:Stack](cond:Int,rest:R,top:T
,thenB:F[R]=>F[ST2]
,elseB:F[R]=>F[ST2]):F[ST2]
def aload_int[R<:Stack,T](rest:R,array:AnyRef/*Array[T]*/,i:Int):F[R**T]
def astore_int[R<:Stack,T](rest:R,array:AnyRef,index:Int,t:T):F[R]
def arraylength_int[R<:Stack](rest:R,array:AnyRef):F[R**Int]
def tailRecursive_int[ST1>:ST<:Stack,ST2<:Stack]
(func: (F[ST1] => F[ST2]) => (F[ST1]=>F[ST2]))
(fr:F[ST1]):F[ST2]
def pop_unit_int[R<:Stack](rest:R):F[R]
def newInstance[T,ST2>:ST<:Stack](cl:Class[T]):F[ST2**T]
def withLocal_int[T,ST<:Stack,ST2<:Stack](top:T,rest:ST,code:Local[T]=>F[ST]=>F[ST2]):F[ST2]
def withTargetHere_int[X,ST2>:ST<:Stack](code:Target[ST2] => F[ST2] => X):X
def conditionalImperative[R<:Stack,T,ST2<:Stack](cond:Int,rest:R,top:T,thenB:F[R]=>Nothing):F[R]
def lookupSwitch[R <: Stack, ST2 <: Stack](cond: Int, rest: R)(candidates: Int*)(mapping: F[R] => PartialFunction[Option[Int], F[ST2]]): F[ST2]
}
|
jrudolph/bytecode
|
src/main/scala/net/virtualvoid/bytecode/backend/BackendSupport.scala
|
Scala
|
bsd-2-clause
| 2,486
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.azblob
import akka.actor.ActorSystem
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer}
import org.apache.openwhisk.core.database.memory.{MemoryArtifactStoreBehaviorBase, MemoryArtifactStoreProvider}
import org.apache.openwhisk.core.database.test.AttachmentStoreBehaviors
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreAttachmentBehaviors
import org.apache.openwhisk.core.entity.WhiskEntity
import org.scalatest.FlatSpec
import scala.reflect.ClassTag
import scala.util.Random
trait AzureBlobAttachmentStoreBehaviorBase
extends FlatSpec
with MemoryArtifactStoreBehaviorBase
with ArtifactStoreAttachmentBehaviors
with AttachmentStoreBehaviors {
override lazy val store = makeAzureStore[WhiskEntity]
override val prefix = s"attachmentTCK_${Random.alphanumeric.take(4).mkString}"
override protected def beforeAll(): Unit = {
MemoryArtifactStoreProvider.purgeAll()
super.beforeAll()
}
override def getAttachmentStore[D <: DocumentSerializer: ClassTag](): AttachmentStore =
makeAzureStore[D]()
def makeAzureStore[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
logging: Logging): AttachmentStore
}
|
style95/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/database/azblob/AzureBlobAttachmentStoreBehaviorBase.scala
|
Scala
|
apache-2.0
| 2,161
|
import sbt._
class StatsdProject(info: ProjectInfo) extends DefaultProject(info)
with rsync.RsyncPublishing
with ruby.GemBuilding
with assembly.AssemblyBuilder {
val codaRepo = "Coda Hale's Repository" at "http://repo.codahale.com/"
val jbossRepo = "JBoss Repo" at
"https://repository.jboss.org/nexus/content/repositories/releases"
val metrics = "com.yammer.metrics" %% "metrics-scala" % "2.0.0-BETA16" withSources()
val specs = "org.scala-tools.testing" %% "specs" % "1.6.6"
val mockito = "org.mockito" % "mockito-all" % "1.8.5"
val netty = "org.jboss.netty" % "netty" % "3.2.4.Final" withSources()
val jerkson = "com.codahale" %% "jerkson" % "0.1.5"
val logula = "com.codahale" %% "logula" % "2.1.1" withSources()
val jmxetric = "com.specialprojectslab" % "jmxetric" % "0.0.5"
val fig = "com.codahale" %% "fig" % "1.1.1" withSources()
override def mainClass = Some("bitlove.statsd.StatsdDaemon")
override def fork = forkRun("-Dlog4j.configuration=config/log4j.properties" :: Nil)
/**
* mvn repo to publish to.
*/
def rsyncRepo = "james@jamesgolick.com:/var/www/repo.jamesgolick.com"
override def rsyncOptions = "-rvz" :: Nil
/**
* Gem building stuff
*/
override val gemAuthor = "James Golick"
override val gemAuthorEmail = "jamesgolick@gmail.com"
override val gemVersion = version.toString
override val gemDependencies = Map("rufus-json" -> "0.2.5")
lazy val publishAll = task { None } dependsOn(publish, publishGem)
}
|
jamesgolick/statsd.scala
|
project/build/StatsdProject.scala
|
Scala
|
mit
| 1,642
|
import java.net.URL
import rescala._
object Main extends App {
val urlCheck = new UrlChecker("http://www.faz.net/aktuell/rhein-main/?rssview=1")
System.out.println(urlCheck.UrlValid.now)
System.out.println(urlCheck.ErrorMessage.now)
val url1 = new URL("http://www.faz.net/aktuell/rhein-main/?rssview=1")
val fetch = new Fetcher(url1)
System.out.println(fetch.listOfUrl.now)
System.out.println(fetch.ChTitle.now)
val g = new GUI
g.main(Array())
}
|
volkc/REScala
|
Examples/RSSReader/SimpleRssReader/src/main/scala/Main.scala
|
Scala
|
apache-2.0
| 472
|
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni
import djinni.ast._
import java.io._
import djinni.generatorTools._
import djinni.meta._
import djinni.syntax.Error
import djinni.writer.IndentWriter
import scala.language.implicitConversions
import scala.collection.mutable
package object generatorTools {
case class Spec(
javaOutFolder: Option[File],
javaPackage: Option[String],
javaClassAccessModifier: JavaAccessModifier.Value,
javaIdentStyle: JavaIdentStyle,
javaCppException: Option[String],
javaAnnotation: Option[String],
javaNullableAnnotation: Option[String],
javaNonnullAnnotation: Option[String],
javaImplementAndroidOsParcelable: Boolean,
javaUseFinalForRecord: Boolean,
cppOutFolder: Option[File],
cppHeaderOutFolder: Option[File],
cppIncludePrefix: String,
cppExtendedRecordIncludePrefix: String,
cppNamespace: String,
cppIdentStyle: CppIdentStyle,
cppFileIdentStyle: IdentConverter,
cppOptionalTemplate: String,
cppOptionalHeader: String,
cppEnumHashWorkaround: Boolean,
cppNnHeader: Option[String],
cppNnType: Option[String],
cppNnCheckExpression: Option[String],
cppUseWideStrings: Boolean,
jniOutFolder: Option[File],
jniHeaderOutFolder: Option[File],
jniIncludePrefix: String,
jniIncludeCppPrefix: String,
jniNamespace: String,
jniClassIdentStyle: IdentConverter,
jniFileIdentStyle: IdentConverter,
jniBaseLibIncludePrefix: String,
cppExt: String,
cppHeaderExt: String,
objcOutFolder: Option[File],
objcppOutFolder: Option[File],
objcIdentStyle: ObjcIdentStyle,
objcFileIdentStyle: IdentConverter,
objcppExt: String,
objcHeaderExt: String,
objcIncludePrefix: String,
objcExtendedRecordIncludePrefix: String,
objcppIncludePrefix: String,
objcppIncludeCppPrefix: String,
objcppIncludeObjcPrefix: String,
objcppNamespace: String,
objcBaseLibIncludePrefix: String,
objcSwiftBridgingHeaderWriter: Option[Writer],
outFileListWriter: Option[Writer],
skipGeneration: Boolean,
yamlOutFolder: Option[File],
yamlOutFile: Option[String],
yamlPrefix: String)
def preComma(s: String) = {
if (s.isEmpty) s else ", " + s
}
def q(s: String) = '"' + s + '"'
def firstUpper(token: String) = if (token.isEmpty()) token else token.charAt(0).toUpper + token.substring(1)
type IdentConverter = String => String
case class CppIdentStyle(ty: IdentConverter, enumType: IdentConverter, typeParam: IdentConverter,
method: IdentConverter, field: IdentConverter, local: IdentConverter,
enum: IdentConverter, const: IdentConverter)
case class JavaIdentStyle(ty: IdentConverter, typeParam: IdentConverter,
method: IdentConverter, field: IdentConverter, local: IdentConverter,
enum: IdentConverter, const: IdentConverter)
case class ObjcIdentStyle(ty: IdentConverter, typeParam: IdentConverter,
method: IdentConverter, field: IdentConverter, local: IdentConverter,
enum: IdentConverter, const: IdentConverter)
object IdentStyle {
val camelUpper = (s: String) => s.split('_').map(firstUpper).mkString
val camelLower = (s: String) => {
val parts = s.split('_')
parts.head + parts.tail.map(firstUpper).mkString
}
val underLower = (s: String) => s
val underUpper = (s: String) => s.split('_').map(firstUpper).mkString("_")
val underCaps = (s: String) => s.toUpperCase
val prefix = (prefix: String, suffix: IdentConverter) => (s: String) => prefix + suffix(s)
val javaDefault = JavaIdentStyle(camelUpper, camelUpper, camelLower, camelLower, camelLower, underCaps, underCaps)
val cppDefault = CppIdentStyle(camelUpper, camelUpper, camelUpper, underLower, underLower, underLower, underCaps, underCaps)
val objcDefault = ObjcIdentStyle(camelUpper, camelUpper, camelLower, camelLower, camelLower, camelUpper, camelUpper)
val styles = Map(
"FooBar" -> camelUpper,
"fooBar" -> camelLower,
"foo_bar" -> underLower,
"Foo_Bar" -> underUpper,
"FOO_BAR" -> underCaps)
def infer(input: String): Option[IdentConverter] = {
styles.foreach((e) => {
val (str, func) = e
if (input endsWith str) {
val diff = input.length - str.length
return Some(if (diff > 0) {
val before = input.substring(0, diff)
prefix(before, func)
} else {
func
})
}
})
None
}
}
object JavaAccessModifier extends Enumeration {
val Public = Value("public")
val Package = Value("package")
def getCodeGenerationString(javaAccessModifier: JavaAccessModifier.Value): String = {
javaAccessModifier match {
case Public => "public "
case Package => "/*package*/ "
}
}
}
implicit val javaAccessModifierReads: scopt.Read[JavaAccessModifier.Value] = scopt.Read.reads(JavaAccessModifier withName _)
final case class SkipFirst() {
private var first = true
def apply(f: => Unit) {
if (first) {
first = false
}
else {
f
}
}
}
case class GenerateException(message: String) extends java.lang.Exception(message)
def createFolder(name: String, folder: File) {
folder.mkdirs()
if (folder.exists) {
if (!folder.isDirectory) {
throw new GenerateException(s"Unable to create $name folder at ${q(folder.getPath)}, there's something in the way.")
}
} else {
throw new GenerateException(s"Unable to create $name folder at ${q(folder.getPath)}.")
}
}
def generate(idl: Seq[TypeDecl], spec: Spec): Option[String] = {
try {
if (spec.cppOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("C++", spec.cppOutFolder.get)
createFolder("C++ header", spec.cppHeaderOutFolder.get)
}
new CppGenerator(spec).generate(idl)
}
if (spec.javaOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("Java", spec.javaOutFolder.get)
}
new JavaGenerator(spec).generate(idl)
}
if (spec.jniOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("JNI C++", spec.jniOutFolder.get)
createFolder("JNI C++ header", spec.jniHeaderOutFolder.get)
}
new JNIGenerator(spec).generate(idl)
}
if (spec.objcOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("Objective-C", spec.objcOutFolder.get)
}
new ObjcGenerator(spec).generate(idl)
}
if (spec.objcppOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("Objective-C++", spec.objcppOutFolder.get)
}
new ObjcppGenerator(spec).generate(idl)
}
if (spec.objcSwiftBridgingHeaderWriter.isDefined) {
SwiftBridgingHeaderGenerator.writeAutogenerationWarning(spec.objcSwiftBridgingHeaderWriter.get)
new SwiftBridgingHeaderGenerator(spec).generate(idl)
}
if (spec.yamlOutFolder.isDefined) {
if (!spec.skipGeneration) {
createFolder("YAML", spec.yamlOutFolder.get)
}
new YamlGenerator(spec).generate(idl)
}
None
}
catch {
case GenerateException(message) => Some(message)
}
}
sealed abstract class SymbolReference
case class ImportRef(arg: String) extends SymbolReference // Already contains <> or "" in C contexts
case class DeclRef(decl: String, namespace: Option[String]) extends SymbolReference
}
abstract class Generator(spec: Spec)
{
protected val writtenFiles = mutable.HashMap[String,String]()
protected def createFile(folder: File, fileName: String, makeWriter: OutputStreamWriter => IndentWriter, f: IndentWriter => Unit): Unit = {
if (spec.outFileListWriter.isDefined) {
spec.outFileListWriter.get.write(new File(folder, fileName).getPath + "\n")
}
if (spec.skipGeneration) {
return
}
val file = new File(folder, fileName)
val cp = file.getCanonicalPath
writtenFiles.put(cp.toLowerCase, cp) match {
case Some(existing) =>
if (existing == cp) {
throw GenerateException("Refusing to write \"" + file.getPath + "\"; we already wrote a file to that path.")
} else {
throw GenerateException("Refusing to write \"" + file.getPath + "\"; we already wrote a file to a path that is the same when lower-cased: \"" + existing + "\".")
}
case _ =>
}
val fout = new FileOutputStream(file)
try {
val out = new OutputStreamWriter(fout, "UTF-8")
f(makeWriter(out))
out.flush()
}
finally {
fout.close()
}
}
protected def createFile(folder: File, fileName: String, f: IndentWriter => Unit): Unit = createFile(folder, fileName, out => new IndentWriter(out), f)
implicit def identToString(ident: Ident): String = ident.name
val idCpp = spec.cppIdentStyle
val idJava = spec.javaIdentStyle
val idObjc = spec.objcIdentStyle
def wrapNamespace(w: IndentWriter, ns: String, f: IndentWriter => Unit) {
ns match {
case "" => f(w)
case s =>
val parts = s.split("::")
w.wl(parts.map("namespace "+_+" {").mkString(" ")).wl
f(w)
w.wl
w.wl(parts.map(p => "}").mkString(" ") + s" // namespace $s")
}
}
def wrapAnonymousNamespace(w: IndentWriter, f: IndentWriter => Unit) {
w.wl("namespace { // anonymous namespace")
w.wl
f(w)
w.wl
w.wl("} // end anonymous namespace")
}
def writeHppFileGeneric(folder: File, namespace: String, fileIdentStyle: IdentConverter)(name: String, origin: String, includes: Iterable[String], fwds: Iterable[String], f: IndentWriter => Unit, f2: IndentWriter => Unit) {
createFile(folder, fileIdentStyle(name) + "." + spec.cppHeaderExt, (w: IndentWriter) => {
w.wl("// AUTOGENERATED FILE - DO NOT MODIFY!")
w.wl("// This file generated by Djinni from " + origin)
w.wl
w.wl("#pragma once")
if (includes.nonEmpty) {
w.wl
includes.foreach(w.wl)
}
w.wl
wrapNamespace(w, namespace,
(w: IndentWriter) => {
if (fwds.nonEmpty) {
fwds.foreach(w.wl)
w.wl
}
f(w)
}
)
f2(w)
})
}
def writeCppFileGeneric(folder: File, namespace: String, fileIdentStyle: IdentConverter, includePrefix: String)(name: String, origin: String, includes: Iterable[String], f: IndentWriter => Unit) {
createFile(folder, fileIdentStyle(name) + "." + spec.cppExt, (w: IndentWriter) => {
w.wl("// AUTOGENERATED FILE - DO NOT MODIFY!")
w.wl("// This file generated by Djinni from " + origin)
w.wl
val myHeader = q(includePrefix + fileIdentStyle(name) + "." + spec.cppHeaderExt)
w.wl(s"#include $myHeader // my header")
val myHeaderInclude = s"#include $myHeader"
for (include <- includes if include != myHeaderInclude)
w.wl(include)
w.wl
wrapNamespace(w, namespace, f)
})
}
def generate(idl: Seq[TypeDecl]) {
for (td <- idl.collect { case itd: InternTypeDecl => itd }) td.body match {
case e: Enum =>
assert(td.params.isEmpty)
generateEnum(td.origin, td.ident, td.doc, e)
case r: Record => generateRecord(td.origin, td.ident, td.doc, td.params, r)
case i: Interface => generateInterface(td.origin, td.ident, td.doc, td.params, i)
}
}
def generateEnum(origin: String, ident: Ident, doc: Doc, e: Enum)
def generateRecord(origin: String, ident: Ident, doc: Doc, params: Seq[TypeParam], r: Record)
def generateInterface(origin: String, ident: Ident, doc: Doc, typeParams: Seq[TypeParam], i: Interface)
// --------------------------------------------------------------------------
// Render type expression
def withNs(namespace: Option[String], t: String) = namespace match {
case None => t
case Some("") => "::" + t
case Some(s) => "::" + s + "::" + t
}
def withCppNs(t: String) = withNs(Some(spec.cppNamespace), t)
def writeAlignedCall(w: IndentWriter, call: String, params: Seq[Field], delim: String, end: String, f: Field => String): IndentWriter = {
w.w(call)
val skipFirst = new SkipFirst
params.foreach(p => {
skipFirst { w.wl(delim); w.w(" " * call.length()) }
w.w(f(p))
})
w.w(end)
}
def writeAlignedCall(w: IndentWriter, call: String, params: Seq[Field], end: String, f: Field => String): IndentWriter =
writeAlignedCall(w, call, params, ",", end, f)
def writeAlignedObjcCall(w: IndentWriter, call: String, params: Seq[Field], end: String, f: Field => (String, String)) = {
w.w(call)
val skipFirst = new SkipFirst
params.foreach(p => {
val (name, value) = f(p)
skipFirst { w.wl; w.w(" " * math.max(0, call.length() - name.length)); w.w(name) }
w.w(":" + value)
})
w.w(end)
}
def normalEnumOptions(e: Enum) = e.options.filter(_.specialFlag == None)
def writeEnumOptionNone(w: IndentWriter, e: Enum, ident: IdentConverter) {
for (o <- e.options.find(_.specialFlag == Some(Enum.SpecialFlag.NoFlags))) {
writeDoc(w, o.doc)
w.wl(ident(o.ident.name) + " = 0,")
}
}
def writeEnumOptions(w: IndentWriter, e: Enum, ident: IdentConverter) {
var shift = 0
for (o <- normalEnumOptions(e)) {
writeDoc(w, o.doc)
w.wl(ident(o.ident.name) + (if(e.flags) s" = 1 << $shift" else "") + ",")
shift += 1
}
}
def writeEnumOptionAll(w: IndentWriter, e: Enum, ident: IdentConverter) {
for (o <- e.options.find(_.specialFlag == Some(Enum.SpecialFlag.AllFlags))) {
writeDoc(w, o.doc)
w.w(ident(o.ident.name) + " = ")
w.w(normalEnumOptions(e).map(o => ident(o.ident.name)).fold("0")((acc, o) => acc + " | " + o))
w.wl(",")
}
}
// --------------------------------------------------------------------------
def writeDoc(w: IndentWriter, doc: Doc) {
doc.lines.length match {
case 0 =>
case 1 =>
w.wl(s"/**${doc.lines.head} */")
case _ =>
w.wl("/**")
doc.lines.foreach (l => w.wl(s" *$l"))
w.wl(" */")
}
}
}
|
PSPDFKit-labs/djinni
|
src/source/generator.scala
|
Scala
|
apache-2.0
| 15,768
|
package filodb.query.exec.rangefn
import java.util.regex.{Pattern, PatternSyntaxException}
import monix.reactive.Observable
import filodb.core.query.{CustomRangeVectorKey, IteratorBackedRangeVector, RangeVector, RangeVectorKey}
import filodb.memory.format.ZeroCopyUTF8String
trait MiscellaneousFunction {
def execute(source: Observable[RangeVector]): Observable[RangeVector]
}
case class LabelReplaceFunction(funcParams: Seq[Any])
extends MiscellaneousFunction {
val labelIdentifier: String = "[a-zA-Z_][a-zA-Z0-9_:\\\\-\\\\.]*"
require(funcParams.size == 4,
"Cannot use LabelReplace without function parameters: " +
"instant-vector, dst_label string, replacement string, src_label string, regex string")
val dstLabel: String = funcParams(0).asInstanceOf[String]
val replacementString: String = funcParams(1).asInstanceOf[String]
val srcLabel: String = funcParams(2).asInstanceOf[String]
val regexString: String = funcParams(3).asInstanceOf[String]
require(dstLabel.matches(labelIdentifier), "Invalid destination label name")
try {
Pattern.compile(regexString)
}
catch {
case ex: PatternSyntaxException => {
throw new IllegalArgumentException("Invalid Regular Expression for label_replace", ex)
}
}
override def execute(source: Observable[RangeVector]): Observable[RangeVector] = {
source.map { rv =>
val newLabel = labelReplaceImpl(rv.key, funcParams)
IteratorBackedRangeVector(newLabel, rv.rows)
}
}
private def labelReplaceImpl(rangeVectorKey: RangeVectorKey, funcParams: Seq[Any]): RangeVectorKey = {
val value: ZeroCopyUTF8String = if (rangeVectorKey.labelValues.contains(ZeroCopyUTF8String(srcLabel))) {
rangeVectorKey.labelValues.get(ZeroCopyUTF8String(srcLabel)).get
}
else {
// Assign dummy value as label_replace should overwrite destination label if the source label is empty but matched
ZeroCopyUTF8String.empty
}
// Pattern is not deserialized correctly if it is a data member
val pattern = Pattern.compile(regexString)
val matcher = pattern.matcher(value.toString)
if (matcher.matches()) {
var labelReplaceValue = replacementString
for (index <- 1 to matcher.groupCount()) {
labelReplaceValue = labelReplaceValue.replace(s"$$$index", matcher.group(index))
}
// Remove groups which are not present
labelReplaceValue = labelReplaceValue.replaceAll("\\\\$[A-Za-z0-9]+", "")
if (labelReplaceValue.length > 0) {
return CustomRangeVectorKey(rangeVectorKey.labelValues.
updated(ZeroCopyUTF8String(dstLabel), ZeroCopyUTF8String(labelReplaceValue)), rangeVectorKey.sourceShards)
}
else {
// Drop label if new value is empty
return CustomRangeVectorKey(rangeVectorKey.labelValues -
ZeroCopyUTF8String(dstLabel), rangeVectorKey.sourceShards)
}
}
return rangeVectorKey;
}
}
case class LabelJoinFunction(funcParams: Seq[Any])
extends MiscellaneousFunction {
val labelIdentifier: String = "[a-zA-Z_][a-zA-Z0-9_:\\\\-\\\\.]*"
require(funcParams.size >= 2,
"expected at least 3 argument(s) in call to label_join")
val dstLabel: String = funcParams(0).asInstanceOf[String]
val separator: String = funcParams(1).asInstanceOf[String]
require(dstLabel.asInstanceOf[String].matches(labelIdentifier), "Invalid destination label name in label_join()")
var srcLabel =
funcParams.drop(2).map { x =>
require(x.asInstanceOf[String].matches(labelIdentifier),
"Invalid source label name in label_join()")
x.asInstanceOf[String]
}
override def execute(source: Observable[RangeVector]): Observable[RangeVector] = {
source.map { rv =>
val newLabel = labelJoinImpl(rv.key)
IteratorBackedRangeVector(newLabel, rv.rows)
}
}
private def labelJoinImpl(rangeVectorKey: RangeVectorKey): RangeVectorKey = {
val srcLabelValues = srcLabel.map(x=> rangeVectorKey.labelValues.get(ZeroCopyUTF8String(x)).
map(_.toString).getOrElse(""))
val labelJoinValue = srcLabelValues.mkString(separator)
if (labelJoinValue.length > 0) {
return CustomRangeVectorKey(rangeVectorKey.labelValues.
updated(ZeroCopyUTF8String(dstLabel), ZeroCopyUTF8String(labelJoinValue)), rangeVectorKey.sourceShards)
}
else {
// Drop label if new value is empty
return CustomRangeVectorKey(rangeVectorKey.labelValues -
ZeroCopyUTF8String(dstLabel), rangeVectorKey.sourceShards)
}
}
}
|
velvia/FiloDB
|
query/src/main/scala/filodb/query/exec/rangefn/MiscellaneousFunction.scala
|
Scala
|
apache-2.0
| 4,524
|
package com.lynbrookrobotics.potassium.frc
import com.ctre.phoenix.ErrorCode
import com.ctre.phoenix.motorcontrol.ControlMode
import com.ctre.phoenix.motorcontrol.can.TalonSRX
import com.lynbrookrobotics.potassium.control.offload.EscConfig.{NativePositionGains, NativeVelocityGains}
import com.lynbrookrobotics.potassium.control.offload.OffloadedSignal._
import org.mockito.ArgumentMatchers
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito.when
import org.scalatest.FunSuite
import org.scalatest.mockito.MockitoSugar
import squants.{Each, Percent}
class LazyTalonTest extends FunSuite with MockitoSugar {
test("LazyTalons apply OpenLoop is lazy") {
val mockedTalon = mock[TalonSRX]
val lazyTalon = new LazyTalon(mockedTalon, 0, 0, -1, 1)
var percOutCalled = 0
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.PercentOutput), anyDouble())).then(_ => {
percOutCalled += 1
})
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Position), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Velocity), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.config_kP(anyInt(), anyDouble(), anyInt())).thenThrow(MockTalonCalled)
when(mockedTalon.config_kI(anyInt(), anyDouble(), anyInt())).thenThrow(MockTalonCalled)
when(mockedTalon.config_kD(anyInt(), anyDouble(), anyInt())).thenThrow(MockTalonCalled)
when(mockedTalon.config_kF(anyInt(), anyDouble(), anyInt())).thenThrow(MockTalonCalled)
val octrl1 = OpenLoop(Percent(1))
val octrl2 = OpenLoop(Percent(2))
lazyTalon.applyCommand(octrl1)
lazyTalon.applyCommand(octrl1)
lazyTalon.applyCommand(octrl1)
assert(percOutCalled == 1)
lazyTalon.applyCommand(octrl2)
lazyTalon.applyCommand(octrl2)
lazyTalon.applyCommand(octrl2)
assert(percOutCalled == 2)
lazyTalon.applyCommand(octrl1)
lazyTalon.applyCommand(octrl1)
lazyTalon.applyCommand(octrl1)
assert(percOutCalled == 3)
}
test("LazyTalons apply PositionBangBang is lazy") {
val mockedTalon = mock[TalonSRX]
val lazyTalon = new LazyTalon(mockedTalon, 0, 0, -1, 1)
var cpofCalled = 0
var cporCalled = 0
var kpSetCalled = 0
var posOutCalled = 0
when(mockedTalon.configPeakOutputForward(anyDouble(), anyInt())).then(_ => {
cpofCalled += 1
ErrorCode.OK
})
when(mockedTalon.configPeakOutputReverse(anyDouble(), anyInt())).then(_ => {
cporCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kP(anyInt(), anyDouble(), anyInt())).then(_ => {
kpSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Position), anyDouble())).then(_ => {
posOutCalled += 1
})
val pctrl1 = PositionBangBang(forwardWhenBelow = true, reverseWhenAbove = true, Each(1))
val pctrl2 = PositionBangBang(forwardWhenBelow = false, reverseWhenAbove = false, Each(2))
val pctrl3 = PositionBangBang(forwardWhenBelow = true, reverseWhenAbove = false, Each(3))
val pctrl4 = PositionBangBang(forwardWhenBelow = false, reverseWhenAbove = true, Each(4))
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
assert(cpofCalled == 0)
assert(cporCalled == 0)
assert(kpSetCalled == 1)
assert(posOutCalled == 1)
lazyTalon.applyCommand(pctrl2)
lazyTalon.applyCommand(pctrl2)
lazyTalon.applyCommand(pctrl2)
assert(cpofCalled == 1)
assert(cporCalled == 1)
assert(kpSetCalled == 1)
assert(posOutCalled == 2)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
assert(cpofCalled == 2)
assert(cporCalled == 2)
assert(kpSetCalled == 1)
assert(posOutCalled == 3)
lazyTalon.applyCommand(pctrl3)
lazyTalon.applyCommand(pctrl3)
lazyTalon.applyCommand(pctrl3)
assert(cpofCalled == 2)
assert(cporCalled == 3)
assert(kpSetCalled == 1)
assert(posOutCalled == 4)
lazyTalon.applyCommand(pctrl4)
lazyTalon.applyCommand(pctrl4)
lazyTalon.applyCommand(pctrl4)
assert(cpofCalled == 3)
assert(cporCalled == 4)
assert(kpSetCalled == 1)
assert(posOutCalled == 5)
}
test("LazyTalons apply VelocityBangBang is lazy") {
val mockedTalon = mock[TalonSRX]
val lazyTalon = new LazyTalon(mockedTalon, 0, 0, -1, 1)
var cpofCalled = 0
var cporCalled = 0
var kpSetCalled = 0
var posOutCalled = 0
when(mockedTalon.configPeakOutputForward(anyDouble(), anyInt())).then(_ => {
cpofCalled += 1
ErrorCode.OK
})
when(mockedTalon.configPeakOutputReverse(anyDouble(), anyInt())).then(_ => {
cporCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kP(anyInt(), anyDouble(), anyInt())).then(_ => {
kpSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Velocity), anyDouble())).then(_ => {
posOutCalled += 1
})
val vctrl1 = VelocityBangBang(forwardWhenBelow = true, reverseWhenAbove = true, Each(1))
val vctrl2 = VelocityBangBang(forwardWhenBelow = false, reverseWhenAbove = false, Each(2))
val vctrl3 = VelocityBangBang(forwardWhenBelow = true, reverseWhenAbove = false, Each(3))
val vctrl4 = VelocityBangBang(forwardWhenBelow = false, reverseWhenAbove = true, Each(4))
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
assert(cpofCalled == 0)
assert(cporCalled == 0)
assert(kpSetCalled == 1)
assert(posOutCalled == 1)
lazyTalon.applyCommand(vctrl2)
lazyTalon.applyCommand(vctrl2)
lazyTalon.applyCommand(vctrl2)
assert(cpofCalled == 1)
assert(cporCalled == 1)
assert(kpSetCalled == 1)
assert(posOutCalled == 2)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
assert(cpofCalled == 2)
assert(cporCalled == 2)
assert(kpSetCalled == 1)
assert(posOutCalled == 3)
lazyTalon.applyCommand(vctrl3)
lazyTalon.applyCommand(vctrl3)
lazyTalon.applyCommand(vctrl3)
assert(cpofCalled == 2)
assert(cporCalled == 3)
assert(kpSetCalled == 1)
assert(posOutCalled == 4)
lazyTalon.applyCommand(vctrl4)
lazyTalon.applyCommand(vctrl4)
lazyTalon.applyCommand(vctrl4)
assert(cpofCalled == 3)
assert(cporCalled == 4)
assert(kpSetCalled == 1)
assert(posOutCalled == 5)
}
test("LazyTalons apply PositionPID is lazy") {
val mockedTalon = mock[TalonSRX]
val lazyTalon = new LazyTalon(mockedTalon, 0, 0, -1, 1)
var posOutCalled = 0
var kpSetCalled = 0
var kiSetCalled = 0
var kdSetCalled = 0
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.PercentOutput), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Velocity), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.config_kF(anyInt(), anyDouble(), anyInt())).thenThrow(MockTalonCalled)
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Position), anyDouble())).then(_ => {
posOutCalled += 1
})
when(mockedTalon.config_kP(anyInt(), anyDouble(), anyInt())).then(_ => {
kpSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kI(anyInt(), anyDouble(), anyInt())).then(_ => {
kiSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kD(anyInt(), anyDouble(), anyInt())).then(_ => {
kdSetCalled += 1
ErrorCode.OK
})
val pctrl1 = PositionPID(NativePositionGains(1, 2, 3), Each(4))
val pctrl2 = PositionPID(NativePositionGains(5, 6, 7), Each(8))
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
assert(posOutCalled == 1)
assert(kpSetCalled == 1)
assert(kiSetCalled == 1)
assert(kdSetCalled == 1)
lazyTalon.applyCommand(pctrl2)
lazyTalon.applyCommand(pctrl2)
lazyTalon.applyCommand(pctrl2)
assert(posOutCalled == 2)
assert(kpSetCalled == 2)
assert(kiSetCalled == 2)
assert(kdSetCalled == 2)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
lazyTalon.applyCommand(pctrl1)
assert(posOutCalled == 3)
assert(kpSetCalled == 3) // this will break once/if we implement idx
assert(kiSetCalled == 3)
assert(kdSetCalled == 3)
}
test("LazyTalons apply VelocityPIDF is lazy") {
val mockedTalon = mock[TalonSRX]
val lazyTalon = new LazyTalon(mockedTalon, 0, 0, -1, 1)
var velOutCalled = 0
var kpSetCalled = 0
var kiSetCalled = 0
var kdSetCalled = 0
var kfSetCalled = 0
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.PercentOutput), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Position), anyDouble())).thenThrow(MockTalonCalled)
when(mockedTalon.set(ArgumentMatchers.eq(ControlMode.Velocity), anyDouble())).then(_ => {
velOutCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kP(anyInt(), anyDouble(), anyInt())).then(_ => {
kpSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kI(anyInt(), anyDouble(), anyInt())).then(_ => {
kiSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kD(anyInt(), anyDouble(), anyInt())).then(_ => {
kdSetCalled += 1
ErrorCode.OK
})
when(mockedTalon.config_kF(anyInt(), anyDouble(), anyInt())).then(_ => {
kfSetCalled += 1
ErrorCode.OK
})
val vctrl1 = VelocityPIDF(NativeVelocityGains(1, 2, 3, 4), Each(5))
val vctrl2 = VelocityPIDF(NativeVelocityGains(6, 7, 8, 9), Each(10))
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
assert(velOutCalled == 1)
assert(kpSetCalled == 1)
assert(kiSetCalled == 1)
assert(kdSetCalled == 1)
assert(kfSetCalled == 1)
lazyTalon.applyCommand(vctrl2)
lazyTalon.applyCommand(vctrl2)
lazyTalon.applyCommand(vctrl2)
assert(velOutCalled == 2)
assert(kpSetCalled == 2)
assert(kiSetCalled == 2)
assert(kdSetCalled == 2)
assert(kfSetCalled == 2)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
lazyTalon.applyCommand(vctrl1)
assert(velOutCalled == 3)
assert(kpSetCalled == 3) // this will break once/if we implement idx
assert(kiSetCalled == 3)
assert(kdSetCalled == 3)
assert(kfSetCalled == 3)
}
}
object MockTalonCalled extends RuntimeException
|
Team846/potassium
|
frc/jvm/src/test/scala/com/lynbrookrobotics/potassium/frc/LazyTalonTest.scala
|
Scala
|
mit
| 10,553
|
/**
* Copyright 2015 Lorand Szakacs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.lorandszakacs.sbt.commonbuild.core
import sbt._
/**
* @author Lorand Szakacs, lsz@lorandszakacs.com
* @since 16 Mar 2015
*
* Dependencies that are shared by at least two projects should
* go in here. This reduces the risk of using different versions
* of the same library between all CareConnect projects.
*/
private[core] trait CommonBuildCoreDependencies {
def scalacVersion: String = "2.12.3"
private lazy val akkaVersion = "2.4.14"
private lazy val akkaHttpVersion = "10.0.0"
private lazy val catsVersion = "0.9.0"
private lazy val shapelessVersion = "2.3.2"
/**
* All predefined dev dependencies should go in here.
*/
object dev {
object akka {
def actor: ModuleID = "com.typesafe.akka" %% "akka-actor" % akkaVersion withSources()
def http: ModuleID = "com.typesafe.akka" %% "akka-http" % akkaHttpVersion withSources()
}
def cats: ModuleID = "org.typelevel" %% "cats" % catsVersion withSources()
def shapeless: ModuleID = "com.chuusai" %% "shapeless" % shapelessVersion withSources()
def scalaParserCombinators: ModuleID = "org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.6" withSources()
def nScalaJodaTime: ModuleID = "com.github.nscala-time" %% "nscala-time" % "2.16.0" withSources()
def reactiveMongo: ModuleID = "org.reactivemongo" %% "reactivemongo" % "0.12.5" withSources()
def typeSafeConfig: ModuleID = "com.typesafe" % "config" % "1.3.1" withSources()
//these two dependencies have to be put together on the ClassPath
def scalaLogging: ModuleID = "com.typesafe.scala-logging" %% "scala-logging" % "3.5.0" withSources()
def logbackClassic: ModuleID = "ch.qos.logback" % "logback-classic" % "1.1.7" withSources()
def pprint: ModuleID = "com.lihaoyi" %% "pprint" % "0.4.3" withSources()
object java {
def jsoup: ModuleID = "org.jsoup" % "jsoup" % "1.8.1" withSources()
}
def resolvers: Seq[MavenRepository] = Seq(
"Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/",
"Sonatype Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots/"
)
}
/**
* All predefined test dependencies should go in here.
*/
object test {
def scalaTest: ModuleID = "org.scalatest" %% "scalatest" % "3.0.1" % Test withSources()
def scalaCheck: ModuleID = "org.scalacheck" %% "scalacheck" % "1.12.2" % Test withSources()
def scalaMock: ModuleID = "org.scalamock" %% "scalamock-scalatest-support" % "3.5" % Test withSources()
object akka {
def testkit: ModuleID = "com.typesafe.akka" %% "akka-testkit" % akkaVersion % Test withSources()
def httpTestkit: ModuleID = "com.typesafe.akka" %% "akka-http-testkit" % akkaHttpVersion % Test withSources()
}
def resolvers: Seq[MavenRepository] = Seq()
}
}
|
lorandszakacs/sbt-common-build
|
src/main/scala/com/lorandszakacs/sbt/commonbuild/core/CommonBuildCoreDependencies.scala
|
Scala
|
apache-2.0
| 3,483
|
package providers
import com.feth.play.module.pa.providers.password.UsernamePasswordAuthUser
import com.feth.play.module.pa.user.NameIdentity
import views.form.Signup
/**
* Use the default constructor for password reset only - do not use this to signup a user!
* @param password
* @param email
*/
class MySignupAuthUser(password: String, email: String = null) extends UsernamePasswordAuthUser(password, email) with NameIdentity {
//-------------------------------------------------------------------
// public
//-------------------------------------------------------------------
/**
* Sign up a new user
* @param signup form data
*/
def this(signup: Signup) {
this(signup.getPassword, signup.getEmail)
name = signup.name
}
//-------------------------------------------------------------------
override def getName: String = {
return name
}
//-------------------------------------------------------------------
// public
//-------------------------------------------------------------------
private var name: String = null
}
|
bravegag/play-authenticate-usage-scala
|
app/providers/MySignupAuthUser.scala
|
Scala
|
apache-2.0
| 1,086
|
package ingraph.ire.nodes.unary
import akka.actor.{ActorSystem, Props, actorRef2Scala}
import akka.testkit.{ImplicitSender, TestActors, TestKit}
import ingraph.ire.datatypes.Tuple
import ingraph.ire.messages.ChangeSet
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
class UnwindNodeTest(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(ActorSystem("MySpec"))
override def afterAll {
TestKit.shutdownActorSystem(system)
}
import ingraph.ire.util.TestUtil._
private def indexer(index: Int) = {
(t: Tuple) => t(index).asInstanceOf[Seq[Any]]
}
"Unwind" must {
"do simple unwind 0" in {
val changeSet = ChangeSet(
positive = tupleBag(
tuple("x", cypherList(1, 2, 3), "y"),
tuple("w", cypherList(), "z")
),
negative = tupleBag(
tuple("a", cypherList(1, 2), "b"),
tuple("c", cypherList(), "d")
)
)
val echoActor = system.actorOf(TestActors.echoActorProps)
val unwind = system.actorOf(Props(new UnwindNode(echoActor ! _, indexer(1))))
unwind ! changeSet
expectMsg(ChangeSet(
positive = tupleBag(
tuple("x", cypherList(1, 2, 3), "y", 1),
tuple("x", cypherList(1, 2, 3), "y", 2),
tuple("x", cypherList(1, 2, 3), "y", 3)
),
negative = tupleBag(
tuple("a", cypherList(1, 2), "b", 1),
tuple("a", cypherList(1, 2), "b", 2)
)
))
}
"do simple unwind 1" in {
val changeSet = ChangeSet(
positive = tupleBag(
tuple("x", List(1, 2, 3), "y"),
tuple("w", List(4, 5), "z")
)
)
val echoActor = system.actorOf(TestActors.echoActorProps)
val unwind = system.actorOf(Props(new UnwindNode(echoActor ! _, indexer(1))))
unwind ! changeSet
expectMsg(ChangeSet(
positive = tupleBag(
tuple("x", cypherList(1, 2, 3), "y", 1),
tuple("x", cypherList(1, 2, 3), "y", 2),
tuple("x", cypherList(1, 2, 3), "y", 3),
tuple("w", cypherList(4, 5), "z", 4),
tuple("w", cypherList(4, 5), "z", 5)
)
))
}
}
}
|
FTSRG/ingraph
|
ire/src/test/scala/ingraph/ire/nodes/unary/UnwindNodeTest.scala
|
Scala
|
epl-1.0
| 2,266
|
package scalax.collection.constrained
package generic
import scala.language.{higherKinds, postfixOps}
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.Iterable
import scala.collection.mutable.{Builder, ListBuffer}
import scala.collection.generic.CanBuildFrom
import scala.reflect.runtime.universe._
import scalax.collection.GraphPredef.{EdgeLikeIn, Param, InParam}
import scalax.collection.generic.GraphCompanion
import scalax.collection.mutable.ArraySet
import scalax.collection.config.GraphConfig
import constraints.NoneConstraint
import mutable.GraphBuilder
import config.ConstrainedConfig
/** Methods common to `Graph` companion objects in the constrained module. */
trait GraphConstrainedCompanion[+GC[N,E[X]<:EdgeLikeIn[X]] <:
Graph[N,E] with GraphLike[N,E,GC]]
extends GraphCompanion[GC]
{
type Config = ConstrainedConfig
def defaultConfig = ConstrainedConfig()
/** Same as `from` except for constraint being suppressed. */
protected[collection] def fromUnchecked[N, E[X] <: EdgeLikeIn[X]]
(nodes: Iterable[N],
edges: Iterable[E[N]])
(implicit edgeT: TypeTag[E[N]],
config: Config) : GC[N,E]
override def newBuilder[N, E[X] <: EdgeLikeIn[X]]
(implicit edgeT: TypeTag[E[N]],
config: Config): Builder[Param[N,E], GC[N,E]] =
new GraphBuilder[N,E,GC](this)(edgeT, config)
}
abstract class GraphConstrainedCompanionAlias
[GC[N,E[X] <: EdgeLikeIn[X]] <: Graph[N,E] with GraphLike[N,E,GC],
E[X] <: EdgeLikeIn[X]]
(companion: GraphConstrainedCompanion[GC],
constraintCompanion: ConstraintCompanion[Constraint])
(implicit adjacencyListHints: ArraySet.Hints = ArraySet.Hints())
{
def empty[N](implicit edgeT: TypeTag[E[N]],
config: GraphConfig): Graph[N,E] =
companion.empty(edgeT, constraintCompanion)
def apply[N](elems: InParam[N,E]*)
(implicit edgeT: TypeTag[E[N]],
config: GraphConfig): Graph[N,E] = companion(elems: _*)(edgeT, constraintCompanion)
def from[N](nodes: Iterable[N],
edges: Iterable[E[N]])
(implicit edgeT: TypeTag[E[N]],
config: GraphConfig): Graph[N,E] = companion.from(nodes, edges)(edgeT, constraintCompanion)
}
trait MutableGraphCompanion[+GC[N,E[X]<:EdgeLikeIn[X]] <:
mutable.Graph[N,E] with mutable.GraphLike[N,E,GC]]
extends GraphConstrainedCompanion[GC]
{
override def newBuilder[N, E[X] <: EdgeLikeIn[X]]
(implicit edgeT: TypeTag[E[N]],
config: Config): Builder[Param[N,E], GC[N,E] @uncheckedVariance] =
new GraphBuilder[N,E,GC](this)(edgeT, config)
}
trait ImmutableGraphCompanion[+GC[N,E[X]<:EdgeLikeIn[X]] <:
immutable.Graph[N,E] with GraphLike[N,E,GC]]
extends GraphConstrainedCompanion[GC]
|
Calavoow/scala-graph
|
constrained/src/main/scala/scalax/collection/constrained/generic/Graph.scala
|
Scala
|
bsd-3-clause
| 2,932
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.agents.producer
import com.bwsw.tstreams.storage.StorageClient
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by Ivan Kudryavtsev on 15.08.16.
*/
class ProducerTransactionData(transaction: ProducerTransactionImpl, ttl: Long, storageClient: StorageClient) {
private[tstreams] var items = ListBuffer[Array[Byte]]()
private[tstreams] var lastOffset: Int = 0
private val streamID = transaction.getProducer.stream.id
def put(elt: Array[Byte]): Int = this.synchronized {
items += elt
return items.size
}
def save(): () => Unit = this.synchronized {
val job = storageClient.putTransactionData(streamID, transaction.getPartition, transaction.getTransactionID, items, lastOffset)
if (Producer.logger.isDebugEnabled()) {
Producer.logger.debug(s"putTransactionData($streamID, ${transaction.getPartition}, ${transaction.getTransactionID}, $items, $lastOffset)")
}
lastOffset += items.size
items = ListBuffer[Array[Byte]]()
() => Await.result(job, 1.minute)
}
}
|
bwsw/t-streams
|
src/main/scala/com/bwsw/tstreams/agents/producer/ProducerTransactionData.scala
|
Scala
|
apache-2.0
| 1,924
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.flume
import java.util.concurrent._
import java.util.{List => JList, Map => JMap}
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import com.google.common.base.Charsets.UTF_8
import org.apache.flume.event.EventBuilder
import org.apache.flume.Context
import org.apache.flume.channel.MemoryChannel
import org.apache.flume.conf.Configurables
import org.apache.spark.streaming.flume.sink.{SparkSinkConfig, SparkSink}
/**
* Share codes for Scala and Python unit tests
*/
private[flume] class PollingFlumeTestUtils {
private val batchCount = 5
val eventsPerBatch = 100
private val totalEventsPerChannel = batchCount * eventsPerBatch
private val channelCapacity = 5000
def getTotalEvents: Int = totalEventsPerChannel * channels.size
private val channels = new ArrayBuffer[MemoryChannel]
private val sinks = new ArrayBuffer[SparkSink]
/**
* Start a sink and return the port of this sink
*/
def startSingleSink(): Int = {
channels.clear()
sinks.clear()
// Start the channel and sink.
val context = new Context()
context.put("capacity", channelCapacity.toString)
context.put("transactionCapacity", "1000")
context.put("keep-alive", "0")
val channel = new MemoryChannel()
Configurables.configure(channel, context)
val sink = new SparkSink()
context.put(SparkSinkConfig.CONF_HOSTNAME, "localhost")
context.put(SparkSinkConfig.CONF_PORT, String.valueOf(0))
Configurables.configure(sink, context)
sink.setChannel(channel)
sink.start()
channels += (channel)
sinks += sink
sink.getPort()
}
/**
* Start 2 sinks and return the ports
*/
def startMultipleSinks(): JList[Int] = {
channels.clear()
sinks.clear()
// Start the channel and sink.
val context = new Context()
context.put("capacity", channelCapacity.toString)
context.put("transactionCapacity", "1000")
context.put("keep-alive", "0")
val channel = new MemoryChannel()
Configurables.configure(channel, context)
val channel2 = new MemoryChannel()
Configurables.configure(channel2, context)
val sink = new SparkSink()
context.put(SparkSinkConfig.CONF_HOSTNAME, "localhost")
context.put(SparkSinkConfig.CONF_PORT, String.valueOf(0))
Configurables.configure(sink, context)
sink.setChannel(channel)
sink.start()
val sink2 = new SparkSink()
context.put(SparkSinkConfig.CONF_HOSTNAME, "localhost")
context.put(SparkSinkConfig.CONF_PORT, String.valueOf(0))
Configurables.configure(sink2, context)
sink2.setChannel(channel2)
sink2.start()
sinks += sink
sinks += sink2
channels += channel
channels += channel2
sinks.map(_.getPort())
}
/**
* Send data and wait until all data has been received
*/
def sendDatAndEnsureAllDataHasBeenReceived(): Unit = {
val executor = Executors.newCachedThreadPool()
val executorCompletion = new ExecutorCompletionService[Void](executor)
val latch = new CountDownLatch(batchCount * channels.size)
sinks.foreach(_.countdownWhenBatchReceived(latch))
channels.foreach(channel => {
executorCompletion.submit(new TxnSubmitter(channel))
})
for (i <- 0 until channels.size) {
executorCompletion.take()
}
latch.await(15, TimeUnit.SECONDS) // Ensure all data has been received.
}
/**
* A Python-friendly method to assert the output
*/
def assertOutput(
outputHeaders: JList[JMap[String, String]], outputBodies: JList[String]): Unit = {
require(outputHeaders.size == outputBodies.size)
val eventSize = outputHeaders.size
if (eventSize != totalEventsPerChannel * channels.size) {
throw new AssertionError(
s"Expected ${totalEventsPerChannel * channels.size} events, but was $eventSize")
}
var counter = 0
for (k <- 0 until channels.size; i <- 0 until totalEventsPerChannel) {
val eventBodyToVerify = s"${channels(k).getName}-$i"
val eventHeaderToVerify: JMap[String, String] = Map[String, String](s"test-$i" -> "header")
var found = false
var j = 0
while (j < eventSize && !found) {
if (eventBodyToVerify == outputBodies.get(j) &&
eventHeaderToVerify == outputHeaders.get(j)) {
found = true
counter += 1
}
j += 1
}
}
if (counter != totalEventsPerChannel * channels.size) {
throw new AssertionError(
s"111 Expected ${totalEventsPerChannel * channels.size} events, but was $counter")
}
}
def assertChannelsAreEmpty(): Unit = {
channels.foreach(assertChannelIsEmpty)
}
private def assertChannelIsEmpty(channel: MemoryChannel): Unit = {
val queueRemaining = channel.getClass.getDeclaredField("queueRemaining")
queueRemaining.setAccessible(true)
val m = queueRemaining.get(channel).getClass.getDeclaredMethod("availablePermits")
if (m.invoke(queueRemaining.get(channel)).asInstanceOf[Int] != 5000) {
throw new AssertionError(s"Channel ${channel.getName} is not empty")
}
}
def close(): Unit = {
sinks.foreach(_.stop())
sinks.clear()
channels.foreach(_.stop())
channels.clear()
}
private class TxnSubmitter(channel: MemoryChannel) extends Callable[Void] {
override def call(): Void = {
var t = 0
for (i <- 0 until batchCount) {
val tx = channel.getTransaction
tx.begin()
for (j <- 0 until eventsPerBatch) {
channel.put(EventBuilder.withBody(s"${channel.getName}-$t".getBytes(UTF_8),
Map[String, String](s"test-$t" -> "header")))
t += 1
}
tx.commit()
tx.close()
Thread.sleep(500) // Allow some time for the events to reach
}
null
}
}
}
|
practice-vishnoi/dev-spark-1
|
external/flume/src/main/scala/org/apache/spark/streaming/flume/PollingFlumeTestUtils.scala
|
Scala
|
apache-2.0
| 6,621
|
package com.github.tkqubo.akka_open_graph_fetcher
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.StatusCodes.ClientError
import org.specs2.mutable.Specification
import spray.json.DefaultJsonProtocol._
import spray.json._
import scala.concurrent.TimeoutException
/**
* Test class for [[Error]]
* {{{
* sbt "test-only com.github.tkqubo.akka_http_og_fetcher.ErrorTest"
* }}}
*/
// scalastyle:off magic.number
class ErrorTest extends Specification {
"Error" should {
val errorMessage: Option[String] = Some("error")
"maybeFromStatusCode" should {
"return None" in {
Error.maybeFromStatusCode(StatusCodes.OK, errorMessage) === None
}
"return Error instance" in {
val requestTimeout: ClientError = StatusCodes.RequestTimeout
Error.maybeFromStatusCode(requestTimeout, errorMessage) === Some(Error(requestTimeout.intValue, errorMessage))
Error.maybeFromStatusCode(requestTimeout) === Some(Error(requestTimeout.intValue))
}
}
"fromThrowable" should {
"return Error with 408 status code" in {
Error.fromThrowable(new TimeoutException(), errorMessage) === Error(StatusCodes.RequestTimeout.intValue, errorMessage)
Error.fromThrowable(new TimeoutException()) === Error(StatusCodes.RequestTimeout.intValue)
}
"return Error with 503 status code" in {
Seq(new IllegalArgumentException, new RuntimeException, new OutOfMemoryError())
.forall(Error.fromThrowable(_) == Error(StatusCodes.ServiceUnavailable.intValue))
}
}
"rootJsonFormat" should {
"pass" in {
val error = Error(StatusCodes.BadRequest.intValue, errorMessage)
val json =
s"""
|{
| "status_code": ${StatusCodes.BadRequest.intValue},
| "message": ${error.message.get.toJson}
|}
""".stripMargin.parseJson
Error.rootJsonFormat.write(error).prettyPrint === json.prettyPrint
Error.rootJsonFormat.read(json) === error
}
}
}
}
|
tkqubo/akka-open-graph-fetcher
|
src/test/scala/com/github/tkqubo/akka_open_graph_fetcher/ErrorTest.scala
|
Scala
|
mit
| 2,068
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
/**
* A partitioner controls the mapping between user-provided keys and kafka partitions. Users can implement a custom
* partitioner to change this mapping.
*
* Implementations will be constructed via reflection and are required to have a constructor that takes a single
* VerifiableProperties instance--this allows passing configuration properties into the partitioner implementation.
*/
@deprecated("This trait has been deprecated and will be removed in a future release. " +
"Please use org.apache.kafka.clients.producer.Partitioner instead.", "0.10.0.0")
trait Partitioner {
/**
* Uses the key to calculate a partition bucket id for routing
* the data to the appropriate broker partition
* @return an integer between 0 and numPartitions-1
*/
def partition(key: Any, numPartitions: Int): Int
}
|
flange/drift-dev
|
kafka/00-kafka_2.11-0.10.1.0/libs/tmp/kafka/producer/Partitioner.scala
|
Scala
|
apache-2.0
| 1,658
|
package toguru.toggles
import javax.inject.{Inject, Named}
import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.codahale.metrics.Counter
import com.kenshoo.play.metrics.Metrics
import play.api.http.MimeTypes
import play.api.libs.json.{JsPath, Json, Writes}
import play.api.libs.functional.syntax._
import play.api.mvc._
import play.mvc.Http.HeaderNames
import toguru.app.Config
import toguru.logging.EventPublishing
import toguru.toggles.ToggleStateActor.{GetState, ToggleStateInitializing}
import toguru.toggles.events.Rollout
object ToggleStateController {
val MimeApiV2 = "application/vnd.toguru.v2+json"
val MimeApiV3 = "application/vnd.toguru.v3+json"
val AllowedContentTypes = Seq(MimeTypes.JSON, MimeApiV2, MimeApiV3)
val toggleStateWriterUntilV2: Writes[ToggleState] = {
def activeRolloutPercentage(state: ToggleState): Option[Int] = state.activations.headOption.flatMap(_.rollout.map(_.percentage))
(
(JsPath \\ "id").write[String] and
(JsPath \\ "tags").write[Map[String, String]] and
(JsPath \\ "rolloutPercentage").writeNullable[Int]
)(ts => (ts.id, ts.tags, activeRolloutPercentage(ts)))
}
val toggleStateSeqWriterUntilV2 = Writes.seq(toggleStateWriterUntilV2)
val toggleStatesWriterUntilV2: Writes[ToggleStates] = (
(JsPath \\ "sequenceNo").write[Long] and
(JsPath \\ "toggles").write(Writes.seq(toggleStateWriterUntilV2))
)(unlift(ToggleStates.unapply))
implicit val rolloutWriter = Json.writes[Rollout]
implicit val toggleActivationWriter = Json.writes[ToggleActivation]
implicit val toggleStateWriter = Json.writes[ToggleState]
implicit val toggleStatesWriter = Json.writes[ToggleStates]
val AcceptsToguruV2 = Accepting(MimeApiV2)
val AcceptsToguruV3 = Accepting(MimeApiV3)
}
class ToggleStateController(actor: ActorRef, config: Config, stateRequests: Counter, stateStaleErrors: Counter)
extends Controller with EventPublishing with JsonResponses {
import ToggleStateController._
@Inject()
def this(@Named("toggle-state") actor: ActorRef, config: Config, metrics: Metrics) =
this(actor, config, metrics.defaultRegistry.counter("state-requests"), metrics.defaultRegistry.counter("state-stale-errors"))
implicit val timeout = Timeout(config.actorTimeout)
def get(seqNo: Option[Long]) = Action.async { request =>
import play.api.libs.concurrent.Execution.Implicits._
stateRequests.inc()
(actor ? GetState).map {
case ToggleStateInitializing =>
InternalServerError(errorJson("Internal Server Error",
"Server is currently initializing",
"Please wait until this server has completed initialization"))
case ts: ToggleStates if seqNo.exists(_ > ts.sequenceNo) =>
stateStaleErrors.inc()
InternalServerError(errorJson("Internal Server Error",
"Server state is older than client state (seqNo in request is greater than server seqNo)",
"Wait until server replays state or query another server"))
case ts: ToggleStates =>
responseFor(request, ts)
}.recover(serverError("get-toggle-state"))
}
def responseFor(request: Request[_], toggleStates: ToggleStates) = request match {
case Accepts.Json() => Ok(Json.toJson(toggleStates.toggles)(toggleStateSeqWriterUntilV2))
case AcceptsToguruV2() => Ok(Json.toJson(toggleStates)(toggleStatesWriterUntilV2)).as(MimeApiV2)
case AcceptsToguruV3() => Ok(Json.toJson(toggleStates)).as(MimeApiV3)
case _ =>
val requestedContentType = request.headers.get(HeaderNames.CONTENT_TYPE).mkString
NotAcceptable(
errorJson(
"Not Acceptable",
s"The requested content type '$requestedContentType' cannot be served",
s"Please choose one of the following content types: ${AllowedContentTypes.mkString(", ")}") ++
Json.obj("allowedContentTypes" -> AllowedContentTypes)
)
}
}
|
andreas-schroeder/toguru
|
app/toguru/toggles/ToggleStateController.scala
|
Scala
|
mit
| 3,931
|
package mesosphere.marathon.core.launcher.impl
import com.google.inject.Inject
import mesosphere.marathon.MarathonConf
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.launcher.{ TaskOp, TaskOpFactory }
import mesosphere.marathon.core.task.{ Task, TaskStateOp }
import mesosphere.marathon.state.AppDefinition
import mesosphere.mesos.ResourceMatcher.ResourceSelector
import mesosphere.mesos.{ PersistentVolumeMatcher, ResourceMatcher, TaskBuilder }
import mesosphere.util.state.FrameworkId
import org.apache.mesos.{ Protos => Mesos }
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.concurrent.duration._
class TaskOpFactoryImpl @Inject() (
config: MarathonConf,
clock: Clock)
extends TaskOpFactory {
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val taskOperationFactory = {
val principalOpt = config.mesosAuthenticationPrincipal.get
val roleOpt = config.mesosRole.get
new TaskOpFactoryHelper(principalOpt, roleOpt)
}
override def buildTaskOp(request: TaskOpFactory.Request): Option[TaskOp] = {
log.debug("buildTaskOp")
if (request.isForResidentApp) {
inferForResidents(request)
}
else {
inferNormalTaskOp(request)
}
}
private[this] def inferNormalTaskOp(request: TaskOpFactory.Request): Option[TaskOp] = {
val TaskOpFactory.Request(app, offer, tasks, _) = request
new TaskBuilder(app, Task.Id.forApp, config).buildIfMatches(offer, tasks.values).map {
case (taskInfo, ports) =>
val task = Task.LaunchedEphemeral(
taskId = Task.Id(taskInfo.getTaskId),
agentInfo = Task.AgentInfo(
host = offer.getHostname,
agentId = Some(offer.getSlaveId.getValue),
attributes = offer.getAttributesList.asScala
),
appVersion = app.version,
status = Task.Status(
stagedAt = clock.now()
),
networking = Task.HostPorts(ports)
)
taskOperationFactory.launchEphemeral(taskInfo, task)
}
}
private[this] def inferForResidents(request: TaskOpFactory.Request): Option[TaskOp] = {
val TaskOpFactory.Request(app, offer, tasks, additionalLaunches) = request
val needToLaunch = additionalLaunches > 0 && request.hasWaitingReservations
val needToReserve = request.numberOfWaitingReservations < additionalLaunches
val acceptedResourceRoles: Set[String] = {
val roles = app.acceptedResourceRoles.getOrElse(config.defaultAcceptedResourceRolesSet)
if (log.isDebugEnabled) log.debug(s"inferForResidents, acceptedResourceRoles $roles")
roles
}
/* *
* If an offer HAS reservations/volumes that match our app, handling these has precedence
* If an offer NAS NO reservations/volumes that match our app, we can reserve if needed
*
* Scenario 1:
* We need to launch tasks and receive an offer that HAS matching reservations/volumes
* - check if we have a task that need those volumes
* - if we do: schedule a Launch TaskOp for the task
* - if we don't: skip for now
*
* Scenario 2:
* We ned to reserve resources and receive an offer that has matching resources
* - schedule a ReserveAndCreate TaskOp
*/
def maybeLaunchOnReservation = if (needToLaunch) {
val maybeVolumeMatch = PersistentVolumeMatcher.matchVolumes(offer, app, request.reserved)
maybeVolumeMatch.flatMap { volumeMatch =>
val matchingReservedResourcesWithoutVolumes =
ResourceMatcher.matchResources(
offer, app, tasks.values,
ResourceSelector(
config.mesosRole.get.toSet, reserved = true,
requiredLabels = TaskLabels.labelsForTask(request.frameworkId, volumeMatch.task)
)
)
matchingReservedResourcesWithoutVolumes.flatMap { otherResourcesMatch =>
launchOnReservation(app, offer, volumeMatch.task, matchingReservedResourcesWithoutVolumes, maybeVolumeMatch)
}
}
}
else None
def maybeReserveAndCreateVolumes = if (needToReserve) {
val matchingResourcesForReservation =
ResourceMatcher.matchResources(
offer, app, tasks.values,
ResourceSelector(acceptedResourceRoles, reserved = false)
)
matchingResourcesForReservation.map { resourceMatch =>
reserveAndCreateVolumes(request.frameworkId, app, offer, resourceMatch)
}
}
else None
maybeLaunchOnReservation orElse maybeReserveAndCreateVolumes
}
private[this] def launchOnReservation(
app: AppDefinition,
offer: Mesos.Offer,
task: Task.Reserved,
resourceMatch: Option[ResourceMatcher.ResourceMatch],
volumeMatch: Option[PersistentVolumeMatcher.VolumeMatch]): Option[TaskOp] = {
// create a TaskBuilder that used the id of the existing task as id for the created TaskInfo
new TaskBuilder(app, (_) => task.taskId, config).build(offer, resourceMatch, volumeMatch) map {
case (taskInfo, ports) =>
val taskStateOp = TaskStateOp.LaunchOnReservation(
task.taskId,
appVersion = app.version,
status = Task.Status(
stagedAt = clock.now()
),
networking = Task.HostPorts(ports))
taskOperationFactory.launchOnReservation(taskInfo, taskStateOp, task)
}
}
private[this] def reserveAndCreateVolumes(
frameworkId: FrameworkId,
app: AppDefinition,
offer: Mesos.Offer,
resourceMatch: ResourceMatcher.ResourceMatch): TaskOp = {
val localVolumes: Iterable[Task.LocalVolume] = app.persistentVolumes.map { volume =>
Task.LocalVolume(Task.LocalVolumeId(app.id, volume), volume)
}
val persistentVolumeIds = localVolumes.map(_.id)
val now = clock.now()
val timeout = Task.Reservation.Timeout(
initiated = now,
deadline = now + config.taskLaunchTimeout().millis,
reason = Task.Reservation.Timeout.Reason.ReservationTimeout
)
val task = Task.Reserved(
taskId = Task.Id.forApp(app.id),
agentInfo = Task.AgentInfo(
host = offer.getHostname,
agentId = Some(offer.getSlaveId.getValue),
attributes = offer.getAttributesList.asScala
),
reservation = Task.Reservation(persistentVolumeIds, Task.Reservation.State.New(timeout = Some(timeout)))
)
val taskStateOp = TaskStateOp.Reserve(task)
taskOperationFactory.reserveAndCreateVolumes(frameworkId, taskStateOp, resourceMatch.resources, localVolumes)
}
}
|
vivekjuneja/marathon
|
src/main/scala/mesosphere/marathon/core/launcher/impl/TaskOpFactoryImpl.scala
|
Scala
|
apache-2.0
| 6,563
|
type ~>[Args <: Tuple, Return] = Args match {
case (arg1, arg2) => ((arg1, arg2) => Return)
}
trait Builder[Args <: NonEmptyTuple] {
def apply(f: Args ~> String): String
}
class BuilderImpl[Args <: NonEmptyTuple] extends Builder[Args] {
override def apply(f: Args ~> String): String = ???
}
val builder = BuilderImpl[Int *: String *: EmptyTuple]()
// builder { (i: Int, s: String) => "test" } // This line compiles
val _ = builder { (i, s) => "test" } // Does not compile
|
dotty-staging/dotty
|
tests/pos/i13526.scala
|
Scala
|
apache-2.0
| 480
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
/**
* Portions
* Copyright 2011-2016 The Apache Software Foundation
*/
package org.locationtech.geomesa.utils.index
import com.google.common.primitives.UnsignedBytes
object ByteArrays {
val ZeroByte: Byte = 0x00.toByte
val OneByte: Byte = 0x01.toByte
val MaxByte: Byte = 0xff.toByte
val ZeroByteArray: Array[Byte] = Array(ByteArrays.ZeroByte)
val OneByteArray : Array[Byte] = Array(ByteArrays.OneByte)
implicit val ByteOrdering: Ordering[Array[Byte]] =
Ordering.comparatorToOrdering(UnsignedBytes.lexicographicalComparator)
implicit val UnsignedByteOrdering: Ordering[Byte] = new Ordering[Byte] {
override def compare(x: Byte, y: Byte): Int = UnsignedBytes.compare(x, y)
}
/**
* Writes the short as 2 bytes in the provided array, starting at offset
*
* @param short short to write
* @param bytes byte array to write to, must have length at least `offset` + 2
* @param offset offset to start writing
*/
def writeShort(short: Short, bytes: Array[Byte], offset: Int = 0): Unit = {
bytes(offset) = (short >> 8).asInstanceOf[Byte]
bytes(offset + 1) = short.asInstanceOf[Byte]
}
/**
* Writes the short as 2 bytes in the provided array, starting at offset,
* and preserving sort order for negative values
*
* @param short short to write
* @param bytes bytes array to write to, must have length at least `offset` + 2
* @param offset offset to start writing
*/
def writeOrderedShort(short: Short, bytes: Array[Byte], offset: Int = 0): Unit = {
bytes(offset) = (((short >> 8) & 0xff) ^ 0x80).asInstanceOf[Byte]
bytes(offset + 1) = (short & 0xff).asInstanceOf[Byte]
}
/**
* Writes the int as 4 bytes in the provided array, starting at offset
*
* @param int int to write
* @param bytes byte array to write to, must have length at least `offset` + 8
* @param offset offset to start writing
*/
def writeInt(int: Int, bytes: Array[Byte], offset: Int = 0): Unit = {
bytes(offset ) = ((int >> 24) & 0xff).asInstanceOf[Byte]
bytes(offset + 1) = ((int >> 16) & 0xff).asInstanceOf[Byte]
bytes(offset + 2) = ((int >> 8) & 0xff).asInstanceOf[Byte]
bytes(offset + 3) = (int & 0xff).asInstanceOf[Byte]
}
/**
* Writes the long as 8 bytes in the provided array, starting at offset
*
* @param long long to write
* @param bytes byte array to write to, must have length at least `offset` + 8
* @param offset offset to start writing
*/
def writeLong(long: Long, bytes: Array[Byte], offset: Int = 0): Unit = {
bytes(offset ) = ((long >> 56) & 0xff).asInstanceOf[Byte]
bytes(offset + 1) = ((long >> 48) & 0xff).asInstanceOf[Byte]
bytes(offset + 2) = ((long >> 40) & 0xff).asInstanceOf[Byte]
bytes(offset + 3) = ((long >> 32) & 0xff).asInstanceOf[Byte]
bytes(offset + 4) = ((long >> 24) & 0xff).asInstanceOf[Byte]
bytes(offset + 5) = ((long >> 16) & 0xff).asInstanceOf[Byte]
bytes(offset + 6) = ((long >> 8) & 0xff).asInstanceOf[Byte]
bytes(offset + 7) = (long & 0xff).asInstanceOf[Byte]
}
/**
* Writes the long as 8 bytes in the provided array, starting at offset,
* and preserving sort order for negative values
*
* @param long long to write
* @param bytes bytes array to write to, must have length at least `offset` + 8
* @param offset offset to start writing
*/
def writeOrderedLong(long: Long, bytes: Array[Byte], offset: Int = 0): Unit = {
bytes(offset ) = (((long >> 56) & 0xff) ^ 0x80).asInstanceOf[Byte]
bytes(offset + 1) = ((long >> 48) & 0xff).asInstanceOf[Byte]
bytes(offset + 2) = ((long >> 40) & 0xff).asInstanceOf[Byte]
bytes(offset + 3) = ((long >> 32) & 0xff).asInstanceOf[Byte]
bytes(offset + 4) = ((long >> 24) & 0xff).asInstanceOf[Byte]
bytes(offset + 5) = ((long >> 16) & 0xff).asInstanceOf[Byte]
bytes(offset + 6) = ((long >> 8) & 0xff).asInstanceOf[Byte]
bytes(offset + 7) = (long & 0xff).asInstanceOf[Byte]
}
/**
* Reads 2 bytes from the provided array as a short, starting at offset
*
* @param bytes array to read from
* @param offset offset to start reading
* @return
*/
def readShort(bytes: Array[Byte], offset: Int = 0): Short =
(((bytes(offset) & 0xff) << 8) | (bytes(offset + 1) & 0xff)).toShort
/**
* Reads 2 bytes from the provided array as a short, starting at offset
*
* @param bytes array to read from
* @param offset offset to start reading
* @return
*/
def readOrderedShort(bytes: Array[Byte], offset: Int = 0): Short =
((((bytes(offset) ^ 0x80) & 0xff) << 8) | (bytes(offset + 1) & 0xff)).toShort
/**
* Reads 4 bytes from the provided array as an int, starting at offset
*
* @param bytes array to read from
* @param offset offset to start reading
* @return
*/
def readInt(bytes: Array[Byte], offset: Int = 0): Int = {
((bytes(offset ) & 0xff) << 24) |
((bytes(offset + 1) & 0xff) << 16) |
((bytes(offset + 2) & 0xff) << 8) |
(bytes(offset + 3) & 0xff)
}
/**
* Reads 8 bytes from the provided array as a long, starting at offset
*
* @param bytes array to read from
* @param offset offset to start reading
* @return
*/
def readLong(bytes: Array[Byte], offset: Int = 0): Long = {
((bytes(offset ) & 0xffL) << 56) |
((bytes(offset + 1) & 0xffL) << 48) |
((bytes(offset + 2) & 0xffL) << 40) |
((bytes(offset + 3) & 0xffL) << 32) |
((bytes(offset + 4) & 0xffL) << 24) |
((bytes(offset + 5) & 0xffL) << 16) |
((bytes(offset + 6) & 0xffL) << 8) |
(bytes(offset + 7) & 0xffL)
}
/**
* Reads 8 bytes from the provided array as a long, starting at offset
*
* @param bytes array to read from
* @param offset offset to start reading
* @return
*/
def readOrderedLong(bytes: Array[Byte], offset: Int = 0): Long = {
(((bytes(offset) ^ 0x80) & 0xffL) << 56) |
((bytes(offset + 1) & 0xffL) << 48) |
((bytes(offset + 2) & 0xffL) << 40) |
((bytes(offset + 3) & 0xffL) << 32) |
((bytes(offset + 4) & 0xffL) << 24) |
((bytes(offset + 5) & 0xffL) << 16) |
((bytes(offset + 6) & 0xffL) << 8) |
(bytes(offset + 7) & 0xffL)
}
/**
* Allocates a new array of length two and writes the short to it
*
* @param short value to encode
* @return
*/
def toBytes(short: Short): Array[Byte] = {
val result = Array.ofDim[Byte](2)
writeShort(short, result)
result
}
/**
* Allocates a new array of length two and writes the short to it, preserving sort order for negative values
*
* @param short value to encode
* @return
*/
def toOrderedBytes(short: Short): Array[Byte] = {
val result = Array.ofDim[Byte](2)
writeOrderedShort(short, result)
result
}
/**
* Allocates a new array of length four and writes the int to it
*
* @param int value to encode
* @return
*/
def toBytes(int: Int): Array[Byte] = {
val result = Array.ofDim[Byte](4)
writeInt(int, result)
result
}
/**
* Allocates a new array of length eight and writes the long to it
*
* @param long value to encode
* @return
*/
def toBytes(long: Long): Array[Byte] = {
val result = Array.ofDim[Byte](8)
writeLong(long, result)
result
}
/**
* Allocates a new array of length eight and writes the long to it, preserving sort order for negative values
*
* @param long value to encode
* @return
*/
def toOrderedBytes(long: Long): Array[Byte] = {
val result = Array.ofDim[Byte](8)
writeOrderedLong(long, result)
result
}
/**
* Creates a byte array with a short and a long.
*
* Code based on the following methods, but avoids allocating extra byte arrays:
*
* com.google.common.primitives.Shorts#toByteArray(short)
* com.google.common.primitives.Longs#toByteArray(long)
*
* @param bin time bin
* @param z z value
* @return
*/
def toBytes(bin: Short, z: Long): Array[Byte] = {
val result = Array.ofDim[Byte](10)
writeShort(bin, result, 0)
writeLong(z, result, 2)
result
}
/**
* Creates a byte array with a short and a long and an int
*
* @param bin bin
* @param s s value
* @param time time offset
* @return
*/
def toBytes(bin: Short, s: Long, time: Int): Array[Byte] = {
val result = Array.ofDim[Byte](14)
writeShort(bin, result)
writeLong(s, result, 2)
writeInt(time, result, 10)
result
}
/**
* Creates a byte array with a short and a long and an int
*
* @param bin bin
* @param s s value
* @param time time offset
* @return
*/
def toBytesFollowingPrefix(bin: Short, s: Long, time: Int): Array[Byte] = incrementInPlace(toBytes(bin, s, time))
/**
* Creates a byte array with a short and a long, preserving the sort order of the short for negative values
*
* @param bin time bin
* @param z z value
* @return
*/
def toOrderedBytes(bin: Short, z: Long): Array[Byte] = {
val result = Array.ofDim[Byte](10)
writeOrderedShort(bin, result, 0)
writeLong(z, result, 2)
result
}
/**
* Creates a byte array with a short and a long.
*
* Code based on the following methods, but avoids allocating extra byte arrays:
*
* com.google.common.primitives.Shorts#toByteArray(short)
* com.google.common.primitives.Longs#toByteArray(long)
*
* @param bin time bin, already converted to 2 bytes
* @param z z value
* @return
*/
def toBytes(bin: Array[Byte], z: Long): Array[Byte] = {
val result = Array.ofDim[Byte](10)
result(0) = bin(0)
result(1) = bin(1)
writeLong(z, result, 2)
result
}
/**
* Converts a UUID into a byte array.
*
* Code based on the following method, but avoids allocating extra byte arrays:
*
* com.google.common.primitives.Longs#toByteArray(long)
*
* @param msb most significant bits
* @param lsb least significant bits
* @return
*/
def uuidToBytes(msb: Long, lsb: Long): Array[Byte] = {
val result = Array.ofDim[Byte](16)
writeLong(msb, result, 0)
writeLong(lsb, result, 8)
result
}
/**
* Converts a byte array into a UUID.
*
* Code based on the following method:
*
* com.google.common.primitives.Longs#fromByteArray(bytes)
*
* @param bytes bytes
* @return (most significant bits, least significant bits)
*/
def uuidFromBytes(bytes: Array[Byte], offset: Int = 0): (Long, Long) = {
val msb = readLong(bytes, offset)
val lsb = readLong(bytes, offset + 8)
(msb, lsb)
}
/**
* Creates a byte array that sorts directly after the z-value (as converted into a byte array).
*
* Code based on the following methods, but avoids allocating extra byte arrays:
*
* org.apache.accumulo.core.data.Range#followingPrefix(org.apache.hadoop.io.Text)
* com.google.common.primitives.Longs#toByteArray(long)
*
*
* @param z z value
* @return
*/
def toBytesFollowingPrefix(z: Long): Array[Byte] = incrementInPlace(toBytes(z))
/**
* Creates a byte array that sorts directly after the z-value (as converted into a byte array).
*
* Code based on the following methods, but avoids allocating extra byte arrays:
*
* org.apache.accumulo.core.data.Range#followingPrefix(org.apache.hadoop.io.Text)
* com.google.common.primitives.Shorts#toByteArray(short)
* com.google.common.primitives.Longs#toByteArray(long)
*
* @param bin epoch bin
* @param z z value
* @return
*/
def toBytesFollowingPrefix(bin: Short, z: Long): Array[Byte] = incrementInPlace(toBytes(bin, z))
/**
* Creates a byte array that sorts directly after the z-value (as converted into a byte array).
*
* @param bin epoch bin
* @param z z value
* @return
*/
def toOrderedBytesFollowingPrefix(bin: Short, z: Long): Array[Byte] = incrementInPlace(toOrderedBytes(bin, z))
def toBytesFollowingRow(long: Long): Array[Byte] = {
val result = Array.ofDim[Byte](9)
writeLong(long, result)
result(8) = ZeroByte
result
}
def toBytesFollowingRow(bin: Short, z: Long): Array[Byte] = {
val result = Array.ofDim[Byte](11)
writeShort(bin, result, 0)
writeLong(z, result, 2)
result(10) = ZeroByte
result
}
def toOrderedBytesFollowingRow(bin: Short, z: Long): Array[Byte] = {
val result = Array.ofDim[Byte](11)
writeOrderedShort(bin, result, 0)
writeLong(z, result, 2)
result(10) = ZeroByte
result
}
/**
* Returns a row that sorts just after all rows beginning with a prefix. Copied from Accumulo Range
*
* @param prefix to follow
* @return prefix that immediately follows the given prefix when sorted, or an empty array if no prefix can follow
* (i.e., the string is all 0xff bytes)
*/
def rowFollowingPrefix(prefix: Array[Byte]): Array[Byte] = {
// find the last byte in the array that is not 0xff
var changeIndex = prefix.length - 1
while (changeIndex >= 0 && prefix(changeIndex) == MaxByte) {
changeIndex -= 1
}
if (changeIndex < 0) { Array.empty } else {
// copy prefix bytes into new array
val following = Array.ofDim[Byte](changeIndex + 1)
System.arraycopy(prefix, 0, following, 0, changeIndex + 1)
// increment the selected byte
following(changeIndex) = (following(changeIndex) + 1).toByte
following
}
}
/**
* Returns a row that immediately follows the row. Useful for inclusive endpoints.
*
* @param row row
* @return
*/
def rowFollowingRow(row: Array[Byte]): Array[Byte] = {
val following = Array.ofDim[Byte](row.length + 1)
System.arraycopy(row, 0, following, 0, row.length)
following(row.length) = ZeroByte
following
}
/**
* Returns a row that immediately follows the row. Useful for inclusive endpoints.
*
* @param bytes row
* @return
*/
def rowFollowingRow(bytes: Array[Byte]*): Array[Byte] = {
var length = 1
bytes.foreach(b => length += b.length)
val result = Array.ofDim[Byte](length)
var i = 0
bytes.foreach { b =>
System.arraycopy(b, 0, result, i, b.length)
i += b.length
}
result(i) = ZeroByte
result
}
/**
* Concatenate byte arrays
*
* @param first first array
* @param second second array
* @return
*/
def concat(first: Array[Byte], second: Array[Byte]): Array[Byte] = {
val result = Array.ofDim[Byte](first.length + second.length)
System.arraycopy(first, 0, result, 0, first.length)
System.arraycopy(second, 0, result, first.length, second.length)
result
}
/**
* Concatenate byte arrays
*
* @param bytes arrays
* @return
*/
def concat(bytes: Array[Byte]*): Array[Byte] = {
var length = 0
bytes.foreach(b => length += b.length)
val result = Array.ofDim[Byte](length)
var i = 0
bytes.foreach { b =>
System.arraycopy(b, 0, result, i, b.length)
i += b.length
}
result
}
/**
* Converts an unsigned byte into a hex string
*
* @param b unsigned byte
* @return
*/
def toHex(b: Byte): String = f"${(b & 0xff) >>> 4}%01x${b & 0x0f}%01x"
/**
* Converts an unsigned byte array into a hex string
*
* @param bytes unsigned byte array
* @return
*/
def toHex(bytes: Array[Byte]): String = toHex(bytes, 0, bytes.length)
/**
* Converts an unsigned byte array into a hex string
*
* @param bytes unsigned byte array
* @return
*/
def toHex(bytes: Array[Byte], offset: Int, length: Int): String = {
val sb = new StringBuilder(length * 2)
var i = 0
while (i < length) {
sb.append(toHex(bytes(i + offset)))
i += 1
}
sb.toString
}
/**
* Convert a byte to a printable string. Based on Accumulo's byte representation
*
* @param b byte
* @return
*/
def printable(b: Byte): String = {
val c = 0xff & b
if (c >= 32 && c <= 126) { c.toChar.toString } else { f"%%$c%02x;" }
}
/**
* Convert each byte in the array to a printable string
*
* @param bytes bytes
* @return
*/
def printable(bytes: Array[Byte]): String = {
if (bytes == null) { "null" } else {
bytes.map(printable).mkString("")
}
}
/**
* Increment the last byte in the array, if it's not equal to MaxByte. Otherwise,
* walk backwards until we find a byte we can increment, and create a new sub-array
*
* @param bytes bytes
* @return
*/
private def incrementInPlace(bytes: Array[Byte]): Array[Byte] = {
var i = bytes.length - 1
if (bytes(i) != MaxByte) {
// normal case - we can just update the original byte array
bytes(i) = (bytes(i) + 1).toByte
bytes
} else {
// walk backwards to find the first byte we can increment, then take the sub-array to that point
do { i -= 1 } while (i >= 0 && bytes(i) == MaxByte)
if (i == -1) { Array.empty } else {
val result = Array.ofDim[Byte](i + 1)
System.arraycopy(bytes, 0, result, 0, result.length)
result(i) = (result(i) + 1).toByte
result
}
}
}
}
|
aheyne/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/index/ByteArrays.scala
|
Scala
|
apache-2.0
| 17,840
|
package se.gigurra.aichallenge.host
import akka.actor.ActorSystem
import org.slf4j.LoggerFactory
import se.culvertsoft.mgen.javapack.serialization.{CommandLineArgHelp, CommandLineArgParser}
import se.gigurra.aichallenge.{ClassRegistry, CmdLineArgs}
import se.gigurra.aichallenge.ClassRegistry
object Main {
protected val logger = LoggerFactory.getLogger(getClass())
def main(args: Array[String]) {
if (args.exists(arg => {
arg.contains("help") || arg == "h" || arg == "-h"
})) {
println(new CommandLineArgHelp(classOf[CmdLineArgs]))
return
}
implicit val system = ActorSystem()
val argParser = new CommandLineArgParser(classOf[CmdLineArgs], new ClassRegistry)
val cmdLineArgs = argParser.parse(args)
val databaseHandler = DatabaseHandler()
val host = GameHost(databaseHandler)
val zmqProvider = ZmqProvider(databaseHandler, host, cmdLineArgs.getZmqPort)
val restProvider = RestProvider(databaseHandler, host, cmdLineArgs.getRestPort, "<no_ssl_cert>")
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
logger.info("Shutting down server..")
system.shutdown()
system.awaitTermination()
logger.info("Server closed normally")
}
})
}
}
|
GiGurra/gigurra-game-challenge
|
src/main/scala/se/gigurra/aichallenge/host/Main.scala
|
Scala
|
gpl-2.0
| 1,278
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia.integration
import algolia.AlgoliaDsl._
import algolia.AlgoliaTest
import algolia.objects.{IndexSettings, SearchableAttributes}
import algolia.responses._
import scala.concurrent.Future
class IndexSettingsIntegrationTest extends AlgoliaTest {
val indexToChangeSettings: String = getTestIndexName("indexToChangeSettings")
after {
clearIndices(indexToChangeSettings)
}
it("should get settings") {
val create = AlgoliaTest.client.execute {
batch(
index into indexToChangeSettings `object` ObjectToGet("1", "toto")
)
}
taskShouldBeCreatedAndWaitForIt(create, indexToChangeSettings)
val request: Future[IndexSettings] = AlgoliaTest.client.execute {
settings of indexToChangeSettings
}
whenReady(request) { result =>
result shouldBe a[IndexSettings] //just checking it deserialize
}
}
it("should set settings") {
val change: Future[Task] = AlgoliaTest.client.execute {
setSettings of indexToChangeSettings `with` IndexSettings(
searchableAttributes = Some(Seq(SearchableAttributes.attribute("att")))
)
}
taskShouldBeCreatedAndWaitForIt(change, indexToChangeSettings)
val request: Future[IndexSettings] = AlgoliaTest.client.execute {
settings of indexToChangeSettings
}
whenReady(request) { result =>
result.searchableAttributes should be(
Some(Seq(SearchableAttributes.attribute("att")))
)
}
}
}
|
algolia/algoliasearch-client-scala
|
src/test/scala/algolia/integration/IndexSettingsIntegrationTest.scala
|
Scala
|
mit
| 2,640
|
/*
* Copyright (c) 2021 Couchbase, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.couchbase.spark
/**
* The keyspace reflects a triple/coordinate of bucket, scope and collection.
*
* Note that not all APIs need all three values to be set. Depending on the context where the keyspace is used or
* the type of service (i.e. kv vs. query) it might be sufficient to only provide a subset. See the individual semantics
* for each operation if in doubt.
*
* @param bucket the bucket name, if present.
* @param scope the scope name, if present.
* @param collection the collection name, if present.
*/
case class Keyspace(bucket: Option[String] = None, scope: Option[String] = None, collection: Option[String] = None) {
def isEmpty: Boolean = bucket.isEmpty && scope.isEmpty && collection.isEmpty
}
|
couchbaselabs/couchbase-spark-connector
|
src/main/scala/com/couchbase/spark/Keyspace.scala
|
Scala
|
apache-2.0
| 1,336
|
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.partitioner
import com.mongodb.casbah.Imports._
import com.mongodb.{MongoCredential, ServerAddress}
import com.stratio.datasource.mongodb.client.MongodbClientFactory
import com.stratio.datasource.mongodb.client.MongodbClientFactory.Client
import com.stratio.datasource.mongodb.config.{MongodbSSLOptions, MongodbCredentials, MongodbConfig}
import com.stratio.datasource.mongodb.partitioner.MongodbPartitioner._
import com.stratio.datasource.partitioner.{PartitionRange, Partitioner}
import com.stratio.datasource.util.Config
import scala.util.Try
/**
* @param config Partition configuration
*/
class MongodbPartitioner(config: Config) extends Partitioner[MongodbPartition] {
@transient private val hosts: List[ServerAddress] =
config[List[String]](MongodbConfig.Host)
.map(add => new ServerAddress(add))
@transient private val credentials: List[MongoCredential] =
config.getOrElse[List[MongodbCredentials]](MongodbConfig.Credentials, MongodbConfig.DefaultCredentials).map {
case MongodbCredentials(user, database, password) =>
MongoCredential.createCredential(user, database, password)
}
@transient private val ssloptions: Option[MongodbSSLOptions] =
config.get[MongodbSSLOptions](MongodbConfig.SSLOptions)
private val clientOptions = config.properties //config.properties.filterKeys(_.contains(MongodbConfig.ListMongoClientOptions)) // TODO review this Map. Can't filter keys
private val databaseName: String = config(MongodbConfig.Database)
private val collectionName: String = config(MongodbConfig.Collection)
private val collectionFullName: String = s"$databaseName.$collectionName"
private val connectionsTime = config.get[String](MongodbConfig.ConnectionsTime).map(_.toLong)
private val cursorBatchSize = config.getOrElse[Int](MongodbConfig.CursorBatchSize, MongodbConfig.DefaultCursorBatchSize)
override def computePartitions(): Array[MongodbPartition] = {
val mongoClient = MongodbClientFactory.getClient(hosts, credentials, ssloptions, clientOptions)
val result = if (isShardedCollection(mongoClient.clientConnection))
computeShardedChunkPartitions(mongoClient.clientConnection)
else
computeNotShardedPartitions(mongoClient.clientConnection)
result
}
/**
* @return Whether this is a sharded collection or not
*/
protected def isShardedCollection(mongoClient: Client): Boolean = {
val collection = mongoClient(databaseName)(collectionName)
val isSharded = collection.stats.ok && collection.stats.getBoolean("sharded", false)
isSharded
}
/**
* @return MongoDB partitions as sharded chunks.
*/
protected def computeShardedChunkPartitions(mongoClient: Client): Array[MongodbPartition] = {
val partitions = Try {
val chunksCollection = mongoClient(ConfigDatabase)(ChunksCollection)
val dbCursor = chunksCollection.find(MongoDBObject("ns" -> collectionFullName))
val shards = describeShardsMap(mongoClient)
val partitions = dbCursor.zipWithIndex.map {
case (chunk: DBObject, i: Int) =>
val lowerBound = chunk.getAs[DBObject]("min")
val upperBound = chunk.getAs[DBObject]("max")
val hosts: Seq[String] = (for {
shard <- chunk.getAs[String]("shard")
hosts <- shards.get(shard)
} yield hosts).getOrElse(Seq[String]())
MongodbPartition(i,
hosts,
PartitionRange(lowerBound, upperBound))
}.toArray
dbCursor.close()
partitions
}.recover {
case _: Exception =>
val serverAddressList: Seq[String] = mongoClient.allAddress.map {
server => server.getHost + ":" + server.getPort
}.toSeq
Array(MongodbPartition(0, serverAddressList, PartitionRange(None, None)))
}.get
partitions
}
/**
* @return Array of not-sharded MongoDB partitions.
*/
protected def computeNotShardedPartitions(mongoClient: Client): Array[MongodbPartition] = {
val ranges = splitRanges(mongoClient)
val serverAddressList: Seq[String] = mongoClient.allAddress.map {
server => server.getHost + ":" + server.getPort
}.toSeq
val partitions: Array[MongodbPartition] = ranges.zipWithIndex.map {
case ((previous: Option[DBObject], current: Option[DBObject]), i) =>
MongodbPartition(i,
serverAddressList,
PartitionRange(previous, current))
}.toArray
partitions
}
/**
* @return A sequence of minimum and maximum DBObject in range.
*/
protected def splitRanges(mongoClient: Client): Seq[(Option[DBObject], Option[DBObject])] = {
val cmd: MongoDBObject = MongoDBObject(
"splitVector" -> collectionFullName,
"keyPattern" -> MongoDBObject(config.getOrElse(MongodbConfig.SplitKey, MongodbConfig.DefaultSplitKey) -> 1),
"force" -> false,
"maxChunkSize" -> config.getOrElse(MongodbConfig.SplitSize, MongodbConfig.DefaultSplitSize)
)
val ranges = Try {
val data = mongoClient("admin").command(cmd)
val splitKeys = data.as[List[DBObject]]("splitKeys").map(Option(_))
val ranges = (None +: splitKeys) zip (splitKeys :+ None)
ranges.toSeq
}.recover {
case _: Exception =>
val stats = mongoClient(databaseName)(collectionName).stats
val shards = mongoClient(ConfigDatabase)(ShardsCollection)
.find(MongoDBObject("_id" -> stats.getString("primary"))).batchSize(cursorBatchSize)
val shard = shards.next()
val shardHost: String = shard.as[String]("host").replace(shard.get("_id") + "/", "")
val shardClient = MongodbClientFactory.getClient(shardHost)
val data = shardClient.clientConnection.getDB("admin").command(cmd)
val splitKeys = data.as[List[DBObject]]("splitKeys").map(Option(_))
val ranges = (None +: splitKeys) zip (splitKeys :+ None)
shards.close()
MongodbClientFactory.setFreeConnectionByKey(shardClient.key, connectionsTime)
ranges.toSeq
}.getOrElse(Seq((None, None)))
ranges
}
/**
* @return Map of shards.
*/
protected def describeShardsMap(mongoClient: Client): Map[String, Seq[String]] = {
val shardsCollection = mongoClient(ConfigDatabase)(ShardsCollection)
val shardsFind = shardsCollection.find()
val shards = shardsFind.map { shard =>
val hosts: Seq[String] = shard.getAs[String]("host")
.fold(ifEmpty = Seq[String]())(_.split(",").map(_.split("/").reverse.head).toSeq)
(shard.as[String]("_id"), hosts)
}.toMap
shardsFind.close()
shards
}
}
object MongodbPartitioner {
val ConfigDatabase = "config"
val ChunksCollection = "chunks"
val ShardsCollection = "shards"
}
|
pmadrigal/spark-mongodb
|
spark-mongodb/src/main/scala/com/stratio/datasource/mongodb/partitioner/MongodbPartitioner.scala
|
Scala
|
apache-2.0
| 7,372
|
package org.lolhens.minechanics.core.storageaccess
import scala.language.dynamics
trait StorageAccess extends Dynamic {
final def selectDynamic(name: String): StorageAccess = {
if (name.matches("_\\d+"))
apply(name.substring(1).toInt)
else
apply(name)
}
def apply(i: Int): StorageAccess = apply(String.valueOf(i))
def apply(i: String): StorageAccess
def foreach(f: (StorageAccess) => Unit)
def map[B](f: (Any) => B): Iterable[B]
def getStringValue: String
def getNumberValue: Number
def getDoubleValue: Double = getNumberValue.doubleValue
def getFloatValue: Float = getNumberValue.floatValue
def getIntegerValue: Integer = getNumberValue.intValue
def getLongValue: Long = getNumberValue.longValue
def getByteValue: Byte = getNumberValue.byteValue
def getShortValue: Short = getNumberValue.shortValue
def getBooleanValue: Boolean = getNumberValue.intValue != 0
def get: Any
def isValid: Boolean
def fromAny(any: Any): StorageAccess
}
object StorageAccess {
implicit def getStringValue(storageAccess: StorageAccess): String = storageAccess.getStringValue
implicit def getDoubleValue(storageAccess: StorageAccess): Double = storageAccess.getDoubleValue
implicit def getFloatValue(storageAccess: StorageAccess): Float = storageAccess.getFloatValue
implicit def getIntegerValue(storageAccess: StorageAccess): Integer = storageAccess.getIntegerValue
implicit def getLongValue(storageAccess: StorageAccess): Long = storageAccess.getLongValue
implicit def getByteValue(storageAccess: StorageAccess): Byte = storageAccess.getByteValue
implicit def getShortValue(storageAccess: StorageAccess): Short = storageAccess.getShortValue
implicit def getBooleanValue(storageAccess: StorageAccess): Boolean = storageAccess.getBooleanValue
}
|
LolHens/Minechanics
|
src/main/scala/org/lolhens/minechanics/core/storageaccess/StorageAccess.scala
|
Scala
|
gpl-2.0
| 1,819
|
package controllers.alertwatcherxr
import javax.inject._
import play.api._
import play.api.mvc._
import play.api.data.Form
import play.api.data.Forms._
import play.api.data._
import models.alertwatcherxr.Alert
import play.api.i18n.Messages
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import services.alertwatcherxr.IAlertWatcherXRService
import play.Application
import utils.Awaits
import org.joda.time.DateTime
import org.joda.time.LocalDate
import java.sql.Timestamp
import com.github.tototoshi.slick.PostgresJodaSupport._
import play.api.libs.iteratee.Enumerator
import reports.ReportBuilder
import play.api.Configuration
@Singleton
class AlertWatcherXRController @Inject() (
val messagesApi: MessagesApi,
val applicationconf: Configuration,
val service: IAlertWatcherXRService) extends Controller with I18nSupport {
val alertForm: Form[Alert] = Form(
mapping(
"id" -> longNumber,
"siteid" -> text,
"sitename" -> text,
"alertlevel" -> number,
"receivedtime" -> jodaLocalDate,
"occurredtime" -> jodaDate,
"alertsource" -> text,
"alertcomment" -> text,
"alertmessage" -> text,
"alertfilename" -> optional(text),
"alertcode" -> optional(text),
"comment" -> optional(text),
"actionid" -> number,
"zoneid" -> number,
"subzoneid" -> number,
// lastmodifiedtime: Datetime,
// lastmodifier: String,
// modifiedtimestamp: Timestamp,
"registeredhost" -> text,
"zonename" -> text,
"subzonename" -> text,
"equipmentname" -> text
)(models.alertwatcherxr.Alert.apply)(models.alertwatcherxr.Alert.unapply _))
def index = Action { implicit request =>
Logger.info("/alertwatcher -> AlertWatcherController index called.")
val alerts = Awaits.get(5, service.findAll()).getOrElse(Seq())
// val alerts = Awaits.get(5, service.findById(29646882)).getOrElse(Seq())
Ok(views.html.alertwatcherxr.alert_index(alerts))
}
def blank = Action { implicit request =>
Logger.info("blank called. ")
Ok(views.html.alertwatcherxr.alert_details(0, alertForm))
}
def details(id: Long) = Action { implicit request =>
Logger.info("details called. siteid: " + id)
val alert = Awaits.get(5, service.findById(id)).get
Ok(views.html.alertwatcherxr.alert_details(id, alertForm.fill(alert)))
}
def detailsbysite(siteid: String) = Action { implicit request =>
Logger.info("details called. siteid: " + siteid)
val alerts = Awaits.get(5, service.findBySiteId(siteid)).getOrElse(Seq())
Ok(views.html.alertwatcherxr.alert_detailsbysite(alerts))
}
def insert() = Action { implicit request =>
Logger.info("insert called.")
alertForm.bindFromRequest.fold(
form => {
BadRequest(views.html.alertwatcherxr.alert_details(0, form))
},
alert => {
service.insert(alert)
Redirect(controllers.alertwatcherxr.routes.AlertWatcherXRController.index)
.flashing("success" -> Messages("success.insert", "new alert created"))
})
}
def update(id: Long) = Action { implicit request =>
Logger.info("updated called. id: " + id)
alertForm.bindFromRequest.fold(
form => {
Ok(views.html.alertwatcherxr.alert_details(0, form))
.flashing("error" -> "Fix the errors!")
},
alert => {
service.update(id, alert)
Redirect(controllers.alertwatcherxr.routes.AlertWatcherXRController.index)
.flashing("success" -> Messages("success.update", alert.sitename))
})
}
def remove(id: Long) = Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val result = Awaits.get(5, service.findById(id))
result.map { alert =>
service.remove(id)
Redirect(controllers.alertwatcherxr.routes.AlertWatcherXRController.index)
.flashing("success" -> Messages("success.delete", alert.sitename))
}.getOrElse(NotFound)
}
def report() = Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val url = applicationconf.getString("slick.dbs.AlertWatcherXR.db.url").getOrElse("None")
Ok.chunked( Enumerator.fromStream( ReportBuilder.toPdf("AlertWatcherXR.jrxml", url) ) )
.withHeaders(CONTENT_TYPE -> "application/octet-stream")
.withHeaders(CONTENT_DISPOSITION -> "attachment; filename=alertwatcher-xrinfo.pdf"
)
}
}
|
tnddn/iv-web
|
portal/rest-portal/app/controllers/alertwatcherxr/AlertWatcherXRController.scala
|
Scala
|
apache-2.0
| 4,487
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.mongodb
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreBehavior
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MongoDBArtifactStoreTests extends FlatSpec with MongoDBStoreBehaviorBase with ArtifactStoreBehavior {}
|
style95/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/database/mongodb/MongoDBArtifactStoreTests.scala
|
Scala
|
apache-2.0
| 1,175
|
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.spark
import com.cloudera.hue.livy.LivyConf
class SparkProcessBuilderFactory(val livyConf: LivyConf, userConfigurableOptions: Set[String]) {
def this(livyConf: LivyConf) = {
this(livyConf, Set())
}
def builder() = {
new SparkProcessBuilder(livyConf, userConfigurableOptions)
}
}
|
MobinRanjbar/hue
|
apps/spark/java/livy-spark/src/main/scala/com/cloudera/hue/livy/spark/SparkProcessBuilderFactory.scala
|
Scala
|
apache-2.0
| 1,120
|
package dispatch.oauth
import dispatch._
import scala.concurrent.{Future,ExecutionContext}
import com.ning.http.client.oauth._
trait SomeHttp {
def http: HttpExecutor
}
trait SomeConsumer {
def consumer: ConsumerKey
}
trait SomeEndpoints {
def requestToken: String
def accessToken: String
def authorize: String
}
trait SomeCallback {
def callback: String
}
trait Exchange {
self: SomeHttp
with SomeConsumer
with SomeCallback
with SomeEndpoints =>
private val random = new java.util.Random(System.identityHashCode(this) +
System.currentTimeMillis)
private val nonceBuffer = Array.fill[Byte](16)(0)
def generateNonce = nonceBuffer.synchronized {
random.nextBytes(nonceBuffer)
com.ning.http.util.Base64.encode(nonceBuffer)
}
def message[A](promised: Future[A], ctx: String)
(implicit executor: ExecutionContext) =
for (exc <- promised.either.left)
yield "Unexpected problem fetching %s:\\n%s".format(ctx, exc.getMessage)
def fetchRequestToken(implicit executor: ExecutionContext)
: Future[Either[String,RequestToken]] = {
val promised = http(
url(requestToken)
<< Map("oauth_callback" -> callback)
<@ (consumer)
> as.oauth.Token
)
for (eth <- message(promised, "request token")) yield eth.joinRight
}
def signedAuthorize(reqToken: RequestToken) = {
import com.ning.http.client.FluentStringsMap
val calc = new OAuthSignatureCalculator(consumer, reqToken)
val timestamp = System.currentTimeMillis() / 1000L
val unsigned = url(authorize) <<? Map("oauth_token" -> reqToken.getKey)
val sig = calc.calculateSignature("GET",
unsigned.url,
timestamp,
generateNonce,
new FluentStringsMap,
new FluentStringsMap)
(unsigned <<? Map("oauth_signature" -> sig)).url
}
def fetchAccessToken(reqToken: RequestToken, verifier: String)
(implicit executor: ExecutionContext)
: Future[Either[String,RequestToken]] = {
val promised = http(
url(accessToken)
<< Map("oauth_verifier" -> verifier)
<@ (consumer, reqToken)
> as.oauth.Token
)
for (eth <- message(promised, "access token")) yield eth.joinRight
}
}
|
kkirsche/reboot
|
core/src/main/scala/oauth/exchange.scala
|
Scala
|
lgpl-3.0
| 2,448
|
package org.openmole.gui.plugin.task.systemexec.client
/*
* Copyright (C) 19/10/2014 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import org.openmole.gui.client.core.dataui.TaskDataUI
import org.openmole.gui.ext.dataui.FactoryUI
import scala.scalajs.js.annotation.JSExport
@JSExport("org.openmole.gui.plugin.task.systemexec.client.SystemExecTaskFactoryUI")
sealed class SystemExecTaskFactoryUI extends FactoryUI {
type DATAUI = TaskDataUI
def dataUI = new SystemExecTaskDataUI
val name = "External"
}
|
ISCPIF/PSEExperiments
|
openmole-src/openmole/gui/plugins/org.openmole.gui.plugin.task.systemexec.client/src/main/scala/org/openmole/gui/plugin/task/systemexec/client/SystemExecTaskFactoryUI.scala
|
Scala
|
agpl-3.0
| 1,168
|
package stainless
package verification
import CoqEncoder._
import CoqExpression._
object optAdmitAll extends inox.FlagOptionDef("admit-all", false)
trait CoqEncoder {
given givenDebugSection: DebugSectionCoq.type = DebugSectionCoq
val p: StainlessProgram
val ctx: inox.Context
val st: stainless.trees.type = stainless.trees
import st._
import p.symbols.{given, _}
import p.symbols.CallGraphOrderings._
// collect the types for which we have no definitions
// unused for now
var undefinedTypes = Set[Type]()
// to give unique names to the arguments we add for preconditions
var i = 0
val hypName = "contractHyp"
val initTactic = CoqIdentifier(FreshIdentifier("t"))
var lastTactic: CoqExpression = idtac
var mainTactic: CoqIdentifier = initTactic
var rewriteTactic: CoqExpression = idtac
//TODO use make fresh uniformly
def freshId(): CoqIdentifier = {
i += 1
CoqIdentifier(FreshIdentifier(hypName + i))
}
// ignore flags with an explicit warning
def ignoreFlags(s: String, flags: Seq[Flag]) = {
//if (!flags.isEmpty)
//ctx.reporter.warning(s"Coq translation ignored flags for $s:\n" + flags.mkString(", ") + "\n")
}
def freePatterns(p: Pattern): Boolean = {
p match {
case WildcardPattern(_) => true
case TuplePattern(_, es) => es forall freePatterns
case _ => false
}
}
def isExhaustive(scrut: Expr, cases: Seq[MatchCase]): Boolean = {
val tpe: Type = scrut.getType
tpe match {
case adt @ ADTType(_, _) => {
val ctorsIds: Seq[Identifier] = adt.getSort.constructors map (_.id)
//non guarded matches without sub patterns
val unguardedADTs: Seq[Identifier] =
cases collect {
case MatchCase(ADTPattern(_, id, _, subPatterns), guard, _)
if subPatterns forall freePatterns => id
}
cases.forall {case MatchCase(_,g,_) => g.isEmpty} &&
(ctorsIds forall (unguardedADTs contains _))
}
case _ => false
}
}
// transform a Stainless expression into a Coq expression
def transformTree(t: st.Expr): CoqExpression = t match {
case MatchExpr(scrut, cases) =>
if(isExhaustive(scrut, cases))
CoqMatch(transformTree(scrut), cases map makeFunctionCase)
else
transformTree(matchToIfThenElse(t, false))
case IfExpr(cond, thenn, elze) =>
IfThenElse(
transformTree(cond),
transformType(t.getType),
CoqLambda(coqUnused, transformTree(thenn)),
CoqLambda(coqUnused, transformTree(elze))
)
case Variable(id,tpe,flags) =>
ignoreFlags(t.toString, flags)
makeFresh(id)
case ADT(id, targs, args) =>
Constructor(constructorIdentifier(id), targs.map(transformType) ++ args.map(transformTree))
case FunctionInvocation(id, targs, args) =>
val allArgs = targs.map(transformType) ++ args.map(transformTree)
val allArgs_and_hyp =
if (exprOps.preconditionOf(p.symbols.functions(id).fullBody).isEmpty)
allArgs
else
allArgs :+ CoqUnknown
if (exprOps.postconditionOf(p.symbols.functions(id).fullBody).isEmpty)
CoqApplication(makeFresh(id), allArgs_and_hyp)
else
proj1_sig(CoqApplication(makeFresh(id), allArgs_and_hyp))
case Application(t, ts) =>
CoqApplication(transformTree(t), ts.map(transformTree))
case FiniteSet(args,tpe) =>
CoqFiniteSet(args map transformTree, transformType(tpe))
case SetUnion(t1,t2) => CoqSetUnion(transformTree(t1), transformTree(t2))
case SetIntersection(t1,t2) => CoqSetIntersection(transformTree(t1), transformTree(t2))
case SetDifference(t1,t2) => CoqSetDifference(transformTree(t1), transformTree(t2))
case SubsetOf(t1,t2 ) => CoqSetSubset(transformTree(t1), transformTree(t2))
case ElementOfSet(t1,t2) => CoqBelongs(transformTree(t1), transformTree(t2))
case Or(ts) => Orb(ts map transformTree)
case And(ts) => Andb(ts map transformTree)
case Not(t) => Negb(transformTree(t))
case Implies(t1,t2) => implb(transformTree(t1), transformTree(t2))
case Equals(t1,t2) if (t1.getType == IntegerType()) =>
CoqApplication(CoqLibraryConstant("Zeq_bool"), Seq(transformTree(t1), transformTree(t2)))
case Equals(t1,t2) if (t1.getType == BooleanType()) =>
CoqApplication(CoqLibraryConstant("Bool.eqb"), Seq(transformTree(t1), transformTree(t2)))
case Equals(t1,t2) if t1.getType.isInstanceOf[SetType] =>
CoqSetEquals(transformTree(t1),transformTree(t2))
case Equals(t1,t2) =>
ctx.reporter.warning(s"Equality for type ${t1.getType} got translated to equality in Coq") //remove warning for lists and other cases where this is on purpose
propInBool(CoqEquals(transformTree(t1),transformTree(t2)))
case BooleanLiteral(true) => trueBoolean
case BooleanLiteral(false) => falseBoolean
case ADTSelector(adt, selector) =>
adt.getType match {
case ADTType(_,args) =>
val typeParameters = args.map(transformType)
CoqApplication(makeFresh(selector), typeParameters :+ transformTree(adt))
case _ =>
ctx.reporter.fatalError(s"The translation to Coq failed because $adt does not have an ADT type but ${adt.getType}.")
}
case Forall(args, body) =>
val params = args.map { case vd@ValDef(id,tpe,flags) =>
ignoreFlags(vd.toString, flags)
(makeFresh(id), transformType(tpe))
}
CoqForall(params, CoqEquals(transformTree(body),trueBoolean))
case Annotated(body, flags) =>
ignoreFlags(t.toString, flags)
transformTree(body)
case Let(vd, value, body) =>
//without type
CoqLet(makeFresh(vd.id), None, transformTree(value), transformTree(body))
case Lambda(vds, body) =>
vds.foldRight(transformTree(body))((a,b) => CoqLambda(makeFresh(a.id), b) )
//Integer operations
case UMinus(e) => CoqApplication(CoqLibraryConstant("Z.opp"), Seq(transformTree(e)))
case GreaterEquals(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.geb"), Seq(transformTree(e1), transformTree(e2)))
case GreaterThan(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.gtb"), Seq(transformTree(e1), transformTree(e2)))
case LessEquals(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.leb"), Seq(transformTree(e1), transformTree(e2)))
case LessThan(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.ltb"), Seq(transformTree(e1), transformTree(e2)))
case Plus(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.add"), Seq(transformTree(e1), transformTree(e2)))
case Minus(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.sub"), Seq(transformTree(e1), transformTree(e2)))
case Times(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.mul"), Seq(transformTree(e1), transformTree(e2)))
case Division(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.div"), Seq(transformTree(e1), transformTree(e2)))
case Modulo(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.modulo"), Seq(transformTree(e1), transformTree(e2)))
case Remainder(e1,e2) =>
CoqApplication(CoqLibraryConstant("Z.rem"), Seq(transformTree(e1), transformTree(e2)))
case IntegerLiteral(i: BigInt) =>
CoqZNum(i)
case bvl @ BVLiteral(_, _, _) => CoqZNum(bvl.toBigInt)
case Tuple(es) =>
CoqTuple(es.map(transformTree))
case TupleSelect(tuple, idx) =>
tuple.getType match {
case tpe @ TupleType(_) =>
if (idx == 1)
(1 to tpe.dimension-idx).foldRight(transformTree(tuple)) {(idx, body) => fst(body)}
else
snd((1 to tpe.dimension-idx).foldRight(transformTree(tuple)) {(idx, body) => fst(body)})
case _ => ctx.reporter.fatalError("Tuple matching with incorrect type")
}
case IsConstructor(expr, id) =>
CoqApplication(recognizer(id), getTParams(getConstructor(id)).map(_ => CoqUnknown) ++ Seq(transformTree(expr)))
case Error(tpe, desc) => deriveContradiction //TODO is it ok?
case Assume(pred, body) =>
CoqLet(makeFresh("assumption"), None, magic(CoqEquals(transformTree(pred), trueBoolean)), transformTree(body) )
case Assert(pred,_,body ) =>
CoqLet(makeFresh("assertion"), Some(CoqEquals(transformTree(pred), trueBoolean)), CoqUnknown, transformTree(body) )
case _ =>
ctx.reporter.warning(s"The translation to Coq does not support expression `${t.getClass}` yet: $t.")
magic(transformType(t.getType))
}
// creates a case for a match expression
def makeFunctionCase(mc: MatchCase): CoqCase = mc match {
case MatchCase(pattern, None, rhs) =>
CoqCase(transformPattern(pattern), transformTree(rhs))
case MatchCase(pattern, _, rhs) =>
ctx.reporter.warning(s"Guard in match cases are not supported by the Coq translation yet:\n$mc.")
ctx.reporter.warning(s"This guard was ignored during the translation.")
CoqCase(transformPattern(pattern), transformTree(rhs))
}
// transform patterns that appear in match cases
def transformPattern(p: Pattern): CoqPattern = p match {
case a@ADTPattern(binder, id, _, subPatterns) =>
for (bind <- binder) {
ctx.reporter.warning(s"Binder $bind in pattern $a is ignored.")
}
val unusedTypeParameters = (1 to getTParams(constructors(id)).size).map(_ => VariablePattern(None))
InductiveTypePattern(constructorIdentifier(id), unusedTypeParameters ++ subPatterns.map(transformPattern))
case WildcardPattern(None) => VariablePattern(None)
case WildcardPattern(Some(ValDef(id,tpe,flags))) =>
ignoreFlags(p.toString, flags)
ctx.reporter.warning(s"Ignoring type $tpe in the wildcard pattern $p.")
VariablePattern(Some(makeFresh(id)))
case TuplePattern(None, ps) => CoqTuplePattern(ps.map(transformPattern))
case TuplePattern(Some(ValDef(id,tpe,flags)), ps) =>
ignoreFlags(p.toString, flags)
ctx.reporter.warning(s"Ignoring type $tpe in the wildcard pattern $p.")
//TODO not tested
CoqTuplePatternVd(ps.map(transformPattern), VariablePattern(Some(makeFresh(id))))
case _ => ctx.reporter.fatalError(s"Coq does not support patterns such as `$p` (${p.getClass}) yet.")
}
// transforms an ADT into an inductive type
def transformADT(a: st.ADTSort): CoqCommand = {
// ignoreFlags(a.toString, a.flags)
InductiveDefinition(
makeFresh(a.id),
a.tparams.map { case p => (CoqIdentifier(p.id), TypeSort) },
a.constructors.map(c => makeCase(a, c))
) $
(if (a.constructors.size > 1) {
buildRecognizers(a) $
buildExistsCreators(a) $
buildSubTypes(a) $
buildAdtTactic(a)
} else
NoCommand) $
buildAccessorsForChildren(a)
}
// Define for each constructor of an ADT a function that identifies such elements
def buildRecognizers(a: ADTSort): CoqCommand = {
manyCommands(a.constructors.map(c => buildRecognizer(a, c)))
}
// Define a function that identifies the case of an element of an inductive type
// and checks that the invariant holds
def buildRecognizer(root: ADTSort, constructor: ADTConstructor): CoqCommand = {
val element = rawIdentifier("src")
val tparams = getTParams(constructor).map(t => CoqIdentifier(t.id))
val extraCase =
if (root.constructors.size > 1) {
Some(CoqCase(VariablePattern(None), falseBoolean))
}
else
None
NormalDefinition(
recognizer(constructor.id),
getTParams(constructor).map { case p => (CoqIdentifier(p.id), TypeSort) } ++
Seq((element, Constructor(makeFresh(root.id), tparams))),
CoqBool,
CoqMatch(element, Seq(
CoqCase(
{
val unusedTypeParameters = (1 to getTParams(constructor).size).map(_ => VariablePattern(None))
val unusedFields = (1 to constructor.fields.size).map(_ => VariablePattern(None))
InductiveTypePattern(constructorIdentifier(constructor.id), unusedTypeParameters ++ unusedFields)
},
trueBoolean
)) ++ extraCase
)
) $
RawCommand(s"#[export] Hint Unfold ${recognizer(constructor.id).coqString}: recognizers.\n")
}
def buildExistsCreators(a: ADTSort): CoqCommand =
manyCommands(a.constructors.map(c => buildExistsCreator(c)))
def buildExistsCreator(ctor: ADTConstructor): CoqCommand = {
val self = makeFresh("self")
val tParams = getTParams(ctor).map(tp => CoqIdentifier(tp.id))
val varTypes: Seq[CoqExpression] = ctor.fields.map(vd => transformType(vd.tpe))
val varNames: Seq[CoqIdentifier] = varTypes map (_ => makeFresh())
val existsExpr = CoqExists(varNames zip varTypes, CoqEquals(CoqApplication(constructorIdentifier(ctor.id), tParams ++ varNames), self))
val impl = BiArrow(
CoqEquals(trueBoolean, CoqApplication(recognizer(ctor.id), tParams :+ self )),
existsExpr
)
val body = CoqForall(
Seq((self, CoqApplication(CoqIdentifier(ctor.sort), tParams))) ++ tParams.map(tp => (tp, TypeSort)),
impl)
CoqLemma(existsCreatorName(ctor.id), body, RawCommand(s"repeat ${mainTactic.coqString} || eauto."))
}
def existsCreatorName(id: Identifier): CoqIdentifier = {
CoqIdentifier(new Identifier(id.name + "_exists", id.id, id.globalId))
}
def buildSubTypes(a: ADTSort): CoqCommand =
manyCommands(a.constructors.map(c => buildSubType(a, c)))
def buildSubType(root: ADTSort, constructor: ADTConstructor): CoqCommand = {
val ttparams = root.tparams.map(p => (CoqIdentifier(p.id), TypeSort))
val tparams = root.tparams.map(t => CoqIdentifier(t.id))
val element = rawIdentifier("src")
NormalDefinition(
refinedIdentifier(constructor.id),
ttparams,
TypeSort,
Refinement(
element,
CoqApplication(makeFresh(root.id), tparams),
CoqApplication(recognizer(constructor.id), tparams :+ element) === trueBoolean
)
) $
RawCommand(s"#[export] Hint Unfold ${refinedIdentifier(constructor.id).coqString}: refinements.\n")
}
def buildAccessorsForChildren(a: ADTSort): CoqCommand =
manyCommands(a.constructors.map(c => buildAccessors(a, c)))
def buildAccessors(root: ADTSort, constructor: ADTConstructor): CoqCommand = {
manyCommands(constructor.fields.zipWithIndex.map {
case (ValDef(id,tpe,flags), i) =>
buildAccessor(id, tpe, i, constructor.fields.size, root, constructor)
})
}
def buildAccessor(id: Identifier, tpe: Type, i: Int, n: Int, root: ADTSort, constructor: ADTConstructor): CoqCommand = {
val element = rawIdentifier("src")
val extraCase =
if (root.constructors.size > 1)
Some(CoqCase(VariablePattern(None), deriveContradiction))
else
None
val tparams = root.tparams.map { case p => (CoqIdentifier(p.id), TypeSort) }
val refid = if (root.constructors.size > 1)
refinedIdentifier(constructor.id)
else
CoqIdentifier (root.id)
NormalDefinition(
makeFresh(id),
tparams ++
Seq(((element, CoqApplication(refid, root.tparams.map(t => CoqIdentifier(t.id)))))),
transformType(tpe),
CoqMatch(element,
Seq(
CoqCase(
{
val unusedTypeParameters = (1 to getTParams(constructor).size).map(_ => VariablePattern(None))
val fields = (0 to n-1).map(i => VariablePattern(Some(rawIdentifier("f" + i))))
InductiveTypePattern(constructorIdentifier(constructor.id), unusedTypeParameters ++ fields)
},
rawIdentifier("f" + i)
)
) ++ extraCase
)
)
}
// creates a case for an inductive type
def makeCase(root: Definition, a: ADTConstructor) = {
val fieldsTypes = a.fields.map(vd => transformType(vd.tpe))
val arrowType = fieldsTypes.foldRight[CoqExpression](
Constructor(makeFresh(root.id), getTParams(a).map(t => CoqIdentifier(t.id)))) // the inductive type
{ case (field, acc) => Arrow(field, acc) } // the parameters of the constructor
InductiveCase(constructorIdentifier(a.id), arrowType)
}
def buildAdtTactic(sort: ADTSort): CoqCommand = {
val newTactic = makeFresh(s"${sort.id.name}_tactic")
val prevTactic = lastTactic
lastTactic = newTactic
CoqMatchTactic(newTactic,
sort.constructors.flatMap(con => makeTacticCases(con)) :+ CoqCase(VariablePattern(None), prevTactic)
) $
updateObligationTactic()
}
def updateObligationTactic() : CoqCommand = {
val t1 = makeFresh("t")
val t = makeFresh("t")
mainTactic = t
RawCommand(s"""Ltac ${t1.coqString} :=
| t_base ||
| ${lastTactic.coqString} ||
| slow ||
| ifthenelse_step ||
| rewrite_ifthenelse ||
| destruct_ifthenelse ||
| (progress autorewrite with libCase in *) ||
| autounfold with definitions in *.""".stripMargin) $
RawCommand(s"""Ltac ${t.coqString} :=
| ${t1.coqString} ||
| ${rewriteTactic.coqString} ||
| autounfold with recognizers in *.""".stripMargin) $
RawCommand(s"\nObligation Tactic := repeat ${t.coqString}.\n")
}
def makeTacticCases(ctor: ADTConstructor) : Seq[CoqCase] = {
val existsCtor = existsCreatorName(ctor.id)
val ids: Seq[CoqIdentifier] = getTParams(ctor).map(tp => CoqIdentifier(tp.id)) :+ makeFresh("self")
val rcg = CoqApplication(recognizer(ctor.id), ids.map(id => CoqUnboundIdentifier(id)))
val label = poseNew(Mark(ids, ctor.id.name + "_exists"))
val h = makeFresh("H")
val pose = {(hyp: CoqExpression) =>
PoseProof(CoqApplication(proj1(CoqApplication(existsCtor, Seq(CoqUnknown, CoqUnknown))), Seq(hyp)))
}
val h1 = makeFresh("H1")
val h2 = makeFresh("H2")
Seq(
CoqCase(
CoqTacticPattern(Map[CoqIdentifier,CoqExpression](h -> CoqEquals(trueBoolean, rcg))),
CoqSequence(Seq(label, pose(h)))
),
CoqCase(
CoqTacticPattern(Map[CoqIdentifier,CoqExpression](h -> CoqEquals(rcg, trueBoolean))),
CoqSequence(Seq(label, pose(eq_sym(h))))
)
)
}
// transform function definitions
def transformFunction(fd: st.FunDef, admitObligations: Boolean = false): CoqCommand = {
ignoreFlags(fd.toString, fd.flags)
val mutual = p.symbols.functions.find{ case (_,fd2) => fd != fd2 && transitivelyCalls(fd, fd2) && transitivelyCalls(fd2, fd) }
if (mutual.isDefined)
ctx.reporter.fatalError(s"The translation to Coq does not support mutual recursion (between ${fd.id.name} and ${mutual.get._1.name})")
else {
val tparams: Seq[(CoqIdentifier,CoqExpression)] = fd.tparams.map { case p => (CoqIdentifier(p.id), TypeSort) }
val params: Seq[(CoqIdentifier,CoqExpression)] = fd.params.map { case vd => (makeFresh(vd.id), transformType(vd.tpe)) }
val body = exprOps.withoutSpecs(fd.fullBody) match {
case None => ctx.reporter.fatalError(s"We do not support functions with empty bodies: ${fd.id.name}")
case Some(b) => transformTree(b)
}
val preconditionName = freshId()
val preconditionParam: Seq[(CoqIdentifier,CoqExpression)] = exprOps.preconditionOf(fd.fullBody) match {
case None => Seq()
case Some(p) => Seq((preconditionName, transformTree(p) === trueBoolean))
}
val returnType = exprOps.postconditionOf(fd.fullBody) match {
case None => transformType(fd.returnType)
case Some(Lambda(Seq(vd), post)) =>
Refinement(makeFresh(vd.id), transformType(vd.tpe), transformTree(post) === trueBoolean)
}
val allParams = tparams ++ params ++ preconditionParam
val tmp = if (fd.isRecursive) {
val funName = makeFresh(fd.id)
//create a name for the return type
val returnTypeName = makeFresh(funName.coqString +"_rt")
val dependentParams: Map[CoqIdentifier, CoqExpression] = (preconditionParam :+ (returnTypeName, returnType)).toMap
val dependentParamNames: Map[CoqIdentifier, CoqIdentifier] =
dependentParams map {case (arg, _) => (arg, makeFresh(arg.coqString + "_type"))}
// scan left to collect heads...
val allParamMap: Map[CoqIdentifier, CoqExpression] = (allParams :+ (returnTypeName, returnType)).toMap
//important to keep order
val allParamNames: Seq[CoqIdentifier] = (allParams map (_._1)) :+ returnTypeName
val dependsOn: Map[CoqIdentifier, Seq[CoqIdentifier]] =
(allParamNames zip allParamNames.scanLeft(Seq[CoqIdentifier]()) {(l,a) => l :+ a}).toMap
val fullType: Map[CoqIdentifier, CoqExpression] =
allParamMap map {
case (x,tpe) => if (dependentParamNames contains x)
(x, dependentParamNames(x)(dependsOn(x):_*))
else
(x, tpe)
}
val argDefs: Seq[CoqCommand] = dependentParams.toSeq map { case (x, body) =>
NormalDefinition(dependentParamNames(x), dependsOn(x) map(y => (y, fullType(y))), typeSort, body) $
RawCommand(s"#[export] Hint Unfold ${dependentParamNames(x).coqString}: core.\n\n")
}
val oldRewriteTactic = rewriteTactic
val newRewriteTactic = makeFresh("rwrtTac")
val phaseA = makeFresh("rwrtTac_A")
val phaseB = makeFresh("rwrtTac_B")
rewriteTactic = newRewriteTactic
val ids = (tparams ++ params) map (_._1)
val h1 = makeFresh("H1")
val h2 = makeFresh("H2")
val u = makeFresh("U")
val label = poseNew(Mark(ids, "unfolding " + funName.coqString + "_equation"))
val markedUnfolding = Marked(ids.map(CoqUnboundIdentifier(_)), "unfolding " + funName.coqString + "_equation")
val rwrtTarget = CoqContext(CoqApplication(funName, ids.map(id => CoqUnboundIdentifier(id))))
val let =
CoqSequence(Seq(
poseNew(Mark(ids, "unfolded " + funName.coqString + "_equation")),
CoqLibraryConstant("add_equation")
(CoqApplication(CoqLibraryConstant(s"${funName.coqString}_equation_1"), ids))
))
SeparatorComment(s"Start of ${fd.id.name}") $
RawCommand(s"Obligation Tactic := ${idtac.coqString}.") $
manyCommands(argDefs) $
CoqEquation(funName,
allParams.map {case(x, _) => (x, fullType(x)) } ,
fullType(returnTypeName), Seq((CoqApplication(funName, allParams map (_._1)), body)), true) $
(if (admitObligations)
RawCommand("\nAdmit Obligations.")
else
RawCommand(s"\nSolve Obligations with (repeat ${mainTactic.coqString}).")
) $
RawCommand("Fail Next Obligation.\n") $
CoqMatchTactic(phaseA, Seq(
CoqCase(CoqTacticPattern(Map(h1 -> rwrtTarget)),
CoqSequence(Seq(label))),
CoqCase(CoqTacticPattern(Map(), rwrtTarget),
CoqSequence(Seq(label)))
)) $
CoqMatchTactic(phaseB, Seq(
CoqCase(CoqTacticPattern(Map(h1 -> rwrtTarget, h2 -> markedUnfolding)), let),
CoqCase(CoqTacticPattern(Map(h2 -> markedUnfolding), rwrtTarget), let)
)) $
RawCommand(s"Ltac ${rewriteTactic.coqString} := ${oldRewriteTactic.coqString}; repeat ${phaseA.coqString}; repeat ${phaseB.coqString}.\n") $
updateObligationTactic() $
SeparatorComment(s"End of ${fd.id.name}")
} else {
SeparatorComment(s"Start of ${fd.id.name}") $
NormalDefinition(makeFresh(fd.id), allParams, returnType, body) $
RawCommand(s"#[export] Hint Unfold ${makeFresh(fd.id).coqString}: definitions.\n") $
SeparatorComment(s"End of ${fd.id.name}")
}
tmp
//if (ctx.options.findOptionOrDefault(optAdmitAll)) {
/*if (fd.flags.contains("library")) {
tmp $
RawCommand("Admit Obligations.")
} else {
tmp
}*/
}
}
// translate a Stainless type to a Coq type
def transformType(tpe: st.Type): CoqExpression = tpe match {
case UnitType() => CoqUnit
case ADTType(id, args) if (sorts.contains(id)) =>
CoqApplication(makeFresh(id), args map transformType)
case ADTType(id, args) =>
refinedIdentifier(id)((args map transformType): _*)
case TypeParameter(id,flags) =>
ignoreFlags(tpe.toString, flags)
CoqIdentifier(id)
case BooleanType() => CoqBool
case FunctionType(ts, t) =>
val tts = ts.map(transformType)
tts.foldRight[CoqExpression](transformType(t))
{ case (arg,acc) => Arrow(arg,acc) }
case SetType(base) =>
CoqSetType(transformType(base))
case IntegerType() => CoqZ
case BVType(_, _) =>
ctx.reporter.warning(s"The translation to Coq currently converts the type $tpe (${tpe.getClass}) to BigInt.")
CoqZ
case MapType(u, v) => mapType(transformType(u), transformType(v))
case TupleType(ts) => CoqTupleType(ts map transformType)
case _ =>
ctx.reporter.fatalError(s"The translation to Coq does not support the type $tpe (${tpe.getClass}).")
}
// finds an order in which to define the functions
// does not work for mutually recursive functions
// highly non optimized
def transformFunctionsInOrder(fds: Seq[FunDef], admitObligations: Boolean = false): CoqCommand = {
if (fds.isEmpty) NoCommand
else {
val f = fds.find { fd =>
fds.forall { fd2 =>
fd == fd2 || !transitivelyCalls(fd,fd2)
}
}
f match {
case Some(fd) =>
transformFunction(fd, admitObligations) $ transformFunctionsInOrder(fds.filterNot(_ == fd), admitObligations)
case None =>
ctx.reporter.warning(s"Coq translation: mutual recursion is not supported yet (" + fds.map(_.id).mkString(",") + ").")
NoCommand
}
}
}
def totalOrder(fds: Seq[FunDef]): Seq[FunDef] = {
if (fds.isEmpty) Seq()
else {
val f = fds.find { fd =>
fds.forall(fd2 => fd == fd2 || !transitivelyCalls(fd,fd2))
}
f match {
case Some(fd) =>
Seq(fd) ++ totalOrder(fds.filterNot(_ == fd))
case None =>
ctx.reporter.warning(s"Coq translation: mutual recursion is not supported yet (" + fds.map(_.id).mkString(",") + ").")
Seq()
}
}
}
def dependentFunctions(fds: Seq[FunDef]): Seq[(FunDef, Seq[FunDef])] = {
val to = totalOrder(fds)
to.map((fd:FunDef) => fd -> fds.filter((fd2:FunDef) => fd != fd2 && transitivelyCalls(fd,fd2)))
}
def makeFilePerFunction(): Seq[(Identifier, String, CoqCommand)] = {
// reset initial state
val funs = p.symbols.functions.values.toSeq.sortBy(_.id.name)
val funDeps = dependentFunctions(funs)
funDeps
.map {case (f, d) =>
lastTactic = idtac
mainTactic = initTactic
rewriteTactic = idtac
(f.id, makeFresh(f.id).coqString, (
header() $
updateObligationTactic() $
makeTactic(p.symbols.sorts.values.toSeq)$
manyCommands(p.symbols.sorts.values.toSeq.map(transformADT))$
transformFunctionsInOrder(d,true) $
transformFunction(f)))
}
}
def makeTactic(adts: Seq[Definition]) = {
NoCommand
}
def header(): CoqCommand = {
RawCommand("Require Import SLC.Lib.") $
RawCommand("Require Import SLC.PropBool.") $
RawCommand("Require Import SLC.Booleans.") $
RawCommand("Require Import SLC.Sets.") $
// RawCommand("Require Import stdpp.set.") $
// RawCommand("Require Import SLC.stdppSets.") $
RawCommand("Require Import SLC.Tactics.") $
RawCommand("Require Import SLC.Ints.") $
RawCommand("Require Import SLC.Unfolding.\n") $
RawCommand("Require Import ZArith.") $
RawCommand("Require Import Coq.Strings.String.") $
RawCommand("From Equations Require Import Equations.\n") $
RawCommand("Set Program Mode.\n") $
RawCommand("Opaque set_elem_of.") $
RawCommand("Opaque set_union.") $
RawCommand("Opaque set_intersection.") $
RawCommand("Opaque set_subset.") $
RawCommand("Opaque set_empty.") $
RawCommand("Opaque set_singleton.") $
RawCommand("Opaque set_difference.\n")
}
def transform(): CoqCommand = {
header() $
updateObligationTactic() $
makeTactic(p.symbols.sorts.values.toSeq)$
manyCommands(p.symbols.sorts.values.toSeq.map(transformADT)) $
transformFunctionsInOrder(p.symbols.functions.values.toSeq.sortBy(_.id.name))
}
def getTParams(a: ADTConstructor) = a.getSort.tparams
}
object CoqEncoder {
val freshIdName = "tmp"
var m = Map[Identifier, CoqIdentifier] ()
var count = Map[String, Int](("t",1))
def makeFresh(id: Identifier): CoqIdentifier = {
if (m.contains(id)) m(id)
else {
val i = count.getOrElse(id.name,0)
val freshName = if (i == 0) id.name else id.name + i
count = count.updated(id.name, i +1)
val res = CoqIdentifier(new Identifier(freshName, id.id, id.globalId))
m = m.updated(id, res)
res
}
}
def makeFresh(): CoqIdentifier = {
val i = count.getOrElse(freshIdName,0)
val freshName = if (i == 0) freshIdName else freshIdName + i
count = count.updated(freshIdName, i +1)
CoqIdentifier(FreshIdentifier(freshName))
}
def makeFresh(name: String): CoqIdentifier = {
val i = count.getOrElse(name,0)
val freshName = if (i == 0) name else name + i
count = count.updated(name, i +1)
CoqIdentifier(FreshIdentifier(freshName))
}
def deriveContradiction = RawExpression("""let contradiction: False := _ in match contradiction with end""")
def unsupportedExpression = RawExpression("""match unsupported with end""")
//def unsupportedExpression = RawExpression("""magic""")
def constructorIdentifier(i: Identifier): CoqIdentifier = {
CoqIdentifier(new Identifier(i.name + "_construct", i.id, i.globalId))
}
def refinedIdentifier(i: Identifier): CoqIdentifier = {
CoqIdentifier(new Identifier(i.name + "_type", i.id, i.globalId))
}
def recognizer(i: Identifier): CoqIdentifier = {
CoqIdentifier(new Identifier("is" + i.name, i.id, i.globalId))
}
def rawIdentifier(s: String): CoqIdentifier = {
CoqIdentifier(new Identifier(s,0,0))
}
def manyCommands(l: Seq[CoqCommand]): CoqCommand = {
if (l.isEmpty) NoCommand
else l.tail.foldLeft(l.head)(_ $ _)
}
def transformProgram(program: StainlessProgram, context: inox.Context): Seq[(Identifier, String, CoqCommand)] = {
object encoder extends CoqEncoder {
val p = program
val ctx = context
}
encoder.makeFilePerFunction() //:+ ("verif1" -> encoder.transform())
}
}
|
epfl-lara/stainless
|
core/src/main/scala/stainless/verification/CoqEncoder.scala
|
Scala
|
apache-2.0
| 30,729
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.xml
package parsing
import scala.xml.dtd.{ IntDef, ParsedEntityDecl }
/** <p>
* (c) David Pollak 2007 WorldWide Conferencing, LLC.
* </p>
*/
object XhtmlEntities {
val entList = List(("quot",34), ("amp",38), ("lt",60), ("gt",62), ("nbsp",160), ("iexcl",161), ("cent",162), ("pound",163), ("curren",164), ("yen",165),
("euro",8364), ("brvbar",166), ("sect",167), ("uml",168), ("copy",169), ("ordf",170), ("laquo",171), ("shy",173), ("reg",174), ("trade",8482),
("macr",175), ("deg",176), ("plusmn",177), ("sup2",178), ("sup3",179), ("acute",180), ("micro",181), ("para",182), ("middot",183), ("cedil",184),
("sup1",185), ("ordm",186), ("raquo",187), ("frac14",188), ("frac12",189), ("frac34",190), ("iquest",191), ("times",215), ("divide",247),
("Agrave",192), ("Aacute",193), ("Acirc",194), ("Atilde",195), ("Auml",196), ("Aring",197), ("AElig",198), ("Ccedil",199), ("Egrave",200),
("Eacute",201), ("Ecirc",202), ("Euml",203), ("Igrave",204), ("Iacute",205), ("Icirc",206), ("Iuml",207), ("ETH",208), ("Ntilde",209),
("Ograve",210), ("Oacute",211), ("Ocirc",212), ("Otilde",213), ("Ouml",214), ("Oslash",216), ("Ugrave",217), ("Uacute",218), ("Ucirc",219),
("Uuml",220), ("Yacute",221), ("THORN",222), ("szlig",223), ("agrave",224), ("aacute",225), ("acirc",226), ("atilde",227), ("auml",228),
("aring",229), ("aelig",230), ("ccedil",231), ("egrave",232), ("eacute",233), ("ecirc",234), ("euml",235), ("igrave",236), ("iacute",237),
("icirc",238), ("iuml",239), ("eth",240), ("ntilde",241), ("ograve",242), ("oacute",243), ("ocirc",244), ("otilde",245), ("ouml",246),
("oslash",248), ("ugrave",249), ("uacute",250), ("ucirc",251), ("uuml",252), ("yacute",253), ("thorn",254), ("yuml",255), ("OElig",338),
("oelig",339), ("Scaron",352), ("scaron",353), ("Yuml",376), ("circ",710), ("ensp",8194), ("emsp",8195), ("zwnj",204), ("zwj",8205), ("lrm",8206),
("rlm",8207), ("ndash",8211), ("mdash",8212), ("lsquo",8216), ("rsquo",8217), ("sbquo",8218), ("ldquo",8220), ("rdquo",8221), ("bdquo",8222),
("dagger",8224), ("Dagger",8225), ("permil",8240), ("lsaquo",8249), ("rsaquo",8250), ("fnof",402), ("bull",8226), ("hellip",8230), ("prime",8242),
("Prime",8243), ("oline",8254), ("frasl",8260), ("weierp",8472), ("image",8465), ("real",8476), ("alefsym",8501), ("larr",8592), ("uarr",8593),
("rarr",8594), ("darr",8495), ("harr",8596), ("crarr",8629), ("lArr",8656), ("uArr",8657), ("rArr",8658), ("dArr",8659), ("hArr",8660),
("forall",8704), ("part",8706), ("exist",8707), ("empty",8709), ("nabla",8711), ("isin",8712), ("notin",8713), ("ni",8715), ("prod",8719),
("sum",8721), ("minus",8722), ("lowast",8727), ("radic",8730), ("prop",8733), ("infin",8734), ("ang",8736), ("and",8743), ("or",8744),
("cap",8745), ("cup",8746), ("int",8747), ("there4",8756), ("sim",8764), ("cong",8773), ("asymp",8776), ("ne",8800), ("equiv",8801), ("le",8804),
("ge",8805), ("sub",8834), ("sup",8835), ("nsub",8836), ("sube",8838), ("supe",8839), ("oplus",8853), ("otimes",8855), ("perp",8869), ("sdot",8901),
("lceil",8968), ("rceil",8969), ("lfloor",8970), ("rfloor",8971), ("lang",9001), ("rang",9002), ("loz",9674), ("spades",9824), ("clubs",9827),
("hearts",9829), ("diams",9830), ("Alpha",913), ("Beta",914), ("Gamma",915), ("Delta",916), ("Epsilon",917), ("Zeta",918), ("Eta",919),
("Theta",920), ("Iota",921), ("Kappa",922), ("Lambda",923), ("Mu",924), ("Nu",925), ("Xi",926), ("Omicron",927), ("Pi",928), ("Rho",929),
("Sigma",931), ("Tau",932), ("Upsilon",933), ("Phi",934), ("Chi",935), ("Psi",936), ("Omega",937), ("alpha",945), ("beta",946), ("gamma",947),
("delta",948), ("epsilon",949), ("zeta",950), ("eta",951), ("theta",952), ("iota",953), ("kappa",954), ("lambda",955), ("mu",956), ("nu",957),
("xi",958), ("omicron",959), ("pi",960), ("rho",961), ("sigmaf",962), ("sigma",963), ("tau",964), ("upsilon",965), ("phi",966), ("chi",967),
("psi",968), ("omega",969), ("thetasym",977), ("upsih",978), ("piv",982))
val entMap: Map[String, Char] = Map.empty[String, Char] ++ entList.map { case (name, value) => (name, value.toChar)}
val entities = entList.
map { case (name, value) => (name, new ParsedEntityDecl(name, new IntDef(value.toChar.toString)))}
def apply() = entities
}
|
cran/rkafkajars
|
java/scala/xml/parsing/XhtmlEntities.scala
|
Scala
|
apache-2.0
| 4,929
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.marathon
import java.io.File
import java.util.Calendar
import akka.actor.{ActorContext, ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.util.Timeout
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.constants.{AkkaConstant, AppConstant}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import com.stratio.tikitakka.common.message._
import com.stratio.tikitakka.common.model.{ContainerId, ContainerInfo, CreateApp, Volume}
import com.stratio.tikitakka.core.UpAndDownActor
import com.stratio.tikitakka.updown.UpAndDownComponent
import com.typesafe.config.Config
import org.apache.curator.framework.CuratorFramework
import play.api.libs.json._
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.io.Source
import scala.util.{Properties, Try}
class MarathonService(context: ActorContext,
val curatorFramework: CuratorFramework,
policyModel: Option[PolicyModel],
sparkSubmitRequest: Option[SubmitRequest]) extends OauthTokenUtils with PolicyStatusUtils {
def this(context: ActorContext,
curatorFramework: CuratorFramework,
policyModel: PolicyModel,
sparkSubmitRequest: SubmitRequest) =
this(context, curatorFramework, Option(policyModel), Option(sparkSubmitRequest))
def this(context: ActorContext, curatorFramework: CuratorFramework) = this(context, curatorFramework, None, None)
/* Implicit variables */
implicit val actorSystem: ActorSystem = context.system
implicit val timeout: Timeout = Timeout(AkkaConstant.DefaultTimeout.seconds)
implicit val materializer: ActorMaterializer = ActorMaterializer(ActorMaterializerSettings(actorSystem))
/* Constant variables */
val AppMainClass = "com.stratio.sparta.driver.MarathonDriver"
val DefaultMarathonTemplateFile = "/etc/sds/sparta/marathon-app-template.json"
val MarathonApp = "marathon"
val DefaultSpartaDockerImage = "qa.stratio.com/stratio/sparta:1.4.0-SNAPSHOT"
val HostMesosNativeLibPath = "/opt/mesosphere/lib"
val HostMesosNativePackagesPath = "/opt/mesosphere/packages"
val HostMesosLib = s"$HostMesosNativeLibPath"
val HostMesosNativeLib = s"$HostMesosNativeLibPath/libmesos.so"
val ServiceName = policyModel.fold("") { policy => s"sparta/workflows/${policy.name}" }
val DefaultMemory = 1024
/* Environment variables to Marathon Application */
val AppTypeEnv = "SPARTA_APP_TYPE"
val MesosNativeJavaLibraryEnv = "MESOS_NATIVE_JAVA_LIBRARY"
val LdLibraryEnv = "LD_LIBRARY_PATH"
val AppMainEnv = "SPARTA_MARATHON_MAIN_CLASS"
val AppJarEnv = "SPARTA_MARATHON_JAR"
val VaultHostEnv = "VAULT_HOST"
val VaultPortEnv = "VAULT_PORT"
val VaultTokenEnv = "VAULT_TOKEN"
val PolicyIdEnv = "SPARTA_POLICY_ID"
val ZookeeperConfigEnv = "SPARTA_ZOOKEEPER_CONFIG"
val DetailConfigEnv = "SPARTA_DETAIL_CONFIG"
val AppHeapSizeEnv = "MARATHON_APP_HEAP_SIZE"
val AppHeapMinimunSizeEnv = "MARATHON_APP_HEAP_MINIMUM_SIZE"
val SparkHomeEnv = "SPARK_HOME"
val HadoopUserNameEnv = "HADOOP_USER_NAME"
val CoreSiteFromUriEnv = "CORE_SITE_FROM_URI"
val CoreSiteFromDfsEnv = "CORE_SITE_FROM_DFS"
val DefaultFsEnv = "DEFAULT_FS"
val HadoopConfDirEnv = "HADOOP_CONF_DIR"
val ServiceLogLevelEnv = "SERVICE_LOG_LEVEL"
val SpartaLogLevelEnv = "SPARTA_LOG_LEVEL"
val SparkLogLevelEnv = "SPARK_LOG_LEVEL"
val ZookeeperLogLevelEnv = "ZOOKEEPER_LOG_LEVEL"
val HadoopLogLevelEnv = "HADOOP_LOG_LEVEL"
val DcosServiceName = "DCOS_SERVICE_NAME"
/* Lazy variables */
lazy val marathonConfig: Config = SpartaConfig.getClusterConfig(Option(ConfigMarathon)).get
lazy val upAndDownComponent: UpAndDownComponent = SpartaMarathonComponent.apply
lazy val upAndDownActor: ActorRef = actorSystem.actorOf(Props(new UpAndDownActor(upAndDownComponent)),
s"${AkkaConstant.UpDownMarathonActor}-${Calendar.getInstance().getTimeInMillis}")
/* PUBLIC METHODS */
def launch(detailExecMode: String): Unit = {
assert(policyModel.isDefined && sparkSubmitRequest.isDefined, "Is mandatory specify one policy and the request")
val createApp = addRequirements(getMarathonAppFromFile, policyModel.get, sparkSubmitRequest.get)
for {
response <- (upAndDownActor ? UpServiceRequest(createApp, Try(getToken).toOption)).mapTo[UpAndDownMessage]
} response match {
case response: UpServiceFails =>
val information = s"Error when launching Sparta Marathon App to Marathon API with id: ${response.appInfo.id}"
log.error(information)
updateStatus(PolicyStatusModel(
id = policyModel.get.id.get,
status = Failed,
statusInfo = Option(information),
marathonId = Option(createApp.id),
lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, response.msg))))
log.error(s"Service ${response.appInfo.id} can't be deployed: ${response.msg}")
case response: UpServiceResponse =>
val information = s"Sparta Marathon App launched correctly to Marathon API with id: ${response.appInfo.id}"
log.info(information)
updateStatus(PolicyStatusModel(id = policyModel.get.id.get, status = Uploaded,
marathonId = Option(createApp.id), statusInfo = Option(information)))
case _ =>
val information = "Unrecognized message received from Marathon API"
log.warn(information)
updateStatus(PolicyStatusModel(id = policyModel.get.id.get, status = NotDefined,
statusInfo = Option(information)))
}
}
def kill(containerId: String): Unit = upAndDownActor ! DownServiceRequest(ContainerId(containerId))
/* PRIVATE METHODS */
private def marathonJar: Option[String] =
Try(marathonConfig.getString("jar")).toOption.orElse(Option(AppConstant.DefaultMarathonDriverURI))
private def mesosNativeLibrary: Option[String] = Properties.envOrNone(MesosNativeJavaLibraryEnv)
private def ldNativeLibrary: Option[String] = Properties.envOrNone(MesosNativeJavaLibraryEnv)
.map(path => new File(path).getParent).orElse(Option(HostMesosLib))
private def mesosphereLibPath: String =
Try(marathonConfig.getString("mesosphere.lib")).toOption.getOrElse(HostMesosNativeLibPath)
private def mesospherePackagesPath: String =
Try(marathonConfig.getString("mesosphere.packages")).toOption.getOrElse(HostMesosNativePackagesPath)
private def spartaDockerImage: String =
Try(marathonConfig.getString("docker.image")).toOption.getOrElse(DefaultSpartaDockerImage)
private def envSparkHome: Option[String] = Properties.envOrNone(SparkHomeEnv)
private def envVaultHost: Option[String] = Properties.envOrNone(VaultHostEnv)
private def envVaulPort: Option[String] = Properties.envOrNone(VaultPortEnv)
private def envVaultToken: Option[String] = Properties.envOrNone(VaultTokenEnv)
private def envHadoopUserName: Option[String] = Properties.envOrNone(HadoopUserNameEnv)
private def envCoreSiteFromUri: Option[String] = Properties.envOrNone(CoreSiteFromUriEnv)
private def envCoreSiteFromDfs: Option[String] = Properties.envOrNone(CoreSiteFromDfsEnv)
private def envDefaultFs: Option[String] = Properties.envOrNone(DefaultFsEnv)
private def envHadoopConfDir: Option[String] = Properties.envOrNone(HadoopConfDirEnv)
private def envServiceLogLevel: Option[String] = Properties.envOrNone(ServiceLogLevelEnv)
private def envSpartaLogLevel: Option[String] = Properties.envOrNone(SpartaLogLevelEnv)
private def envSparkLogLevel: Option[String] = Properties.envOrNone(SparkLogLevelEnv)
private def envHadoopLogLevel: Option[String] = Properties.envOrNone(HadoopLogLevelEnv)
private def getMarathonAppFromFile: CreateApp = {
val templateFile = Try(marathonConfig.getString("template.file")).toOption.getOrElse(DefaultMarathonTemplateFile)
val fileContent = Source.fromFile(templateFile).mkString
Json.parse(fileContent).as[CreateApp]
}
private def transformMemoryToInt(memory: String): Int = Try(memory match {
case mem if mem.contains("G") => mem.replace("G", "").toInt * 1024
case mem if mem.contains("g") => mem.replace("g", "").toInt * 1024
case mem if mem.contains("m") => mem.replace("m", "").toInt
case mem if mem.contains("M") => mem.replace("M", "").toInt
case _ => memory.toInt
}).getOrElse(DefaultMemory)
private def addRequirements(app: CreateApp, policyModel: PolicyModel, submitRequest: SubmitRequest): CreateApp = {
val newCpus = submitRequest.sparkConfigurations.get("spark.driver.cores").map(_.toDouble + 1d).getOrElse(app.cpus)
val newMem = submitRequest.sparkConfigurations.get("spark.driver.memory").map(transformMemoryToInt(_) + 1024)
.getOrElse(app.mem)
val subProperties = substitutionProperties(policyModel, submitRequest, newMem)
val newEnv = app.env.map { properties =>
properties.flatMap { case (k, v) =>
if (v == "???")
subProperties.get(k).map(vParsed => (k, vParsed))
else Some((k, v))
}
}
val newLabels = app.labels.flatMap { case (k, v) =>
if (v == "???")
subProperties.get(k).map(vParsed => (k, vParsed))
else Some((k, v))
}
val newDockerContainerInfo = mesosNativeLibrary match {
case Some(_) => ContainerInfo(app.container.docker.copy(image = spartaDockerImage))
case None => ContainerInfo(app.container.docker.copy(volumes = Option(Seq(
Volume(HostMesosNativeLibPath, mesosphereLibPath, "RO"),
Volume(HostMesosNativePackagesPath, mesospherePackagesPath, "RO"))),
image = spartaDockerImage
))
}
app.copy(
id = ServiceName,
cpus = newCpus,
mem = newMem,
env = newEnv,
labels = newLabels,
container = newDockerContainerInfo
)
}
private def substitutionProperties(policyModel: PolicyModel,
submitRequest: SubmitRequest,
memory: Int): Map[String, String] =
Map(
AppMainEnv -> Option(AppMainClass),
AppTypeEnv -> Option(MarathonApp),
MesosNativeJavaLibraryEnv -> mesosNativeLibrary.orElse(Option(HostMesosNativeLib)),
LdLibraryEnv -> ldNativeLibrary,
AppJarEnv -> marathonJar,
ZookeeperConfigEnv -> submitRequest.driverArguments.get("zookeeperConfig"),
DetailConfigEnv -> submitRequest.driverArguments.get("detailConfig"),
PolicyIdEnv -> policyModel.id,
VaultHostEnv -> envVaultHost,
VaultPortEnv -> envVaulPort,
VaultTokenEnv -> envVaultToken,
AppHeapSizeEnv -> Option(s"-Xmx${memory}m"),
AppHeapMinimunSizeEnv -> Option(s"-Xms${memory.toInt / 2}m"),
SparkHomeEnv -> envSparkHome,
HadoopUserNameEnv -> envHadoopUserName,
CoreSiteFromUriEnv -> envCoreSiteFromUri,
CoreSiteFromDfsEnv -> envCoreSiteFromDfs,
DefaultFsEnv -> envDefaultFs,
HadoopConfDirEnv -> envHadoopConfDir,
ServiceLogLevelEnv -> envServiceLogLevel,
SpartaLogLevelEnv -> envSpartaLogLevel,
SparkLogLevelEnv -> envSparkLogLevel,
HadoopLogLevelEnv -> envHadoopLogLevel,
DcosServiceName -> Option(ServiceName)
).flatMap { case (k, v) => v.map(value => Option(k -> value)) }.flatten.toMap
}
|
diegohurtado/sparta
|
serving-core/src/main/scala/com/stratio/sparta/serving/core/marathon/MarathonService.scala
|
Scala
|
apache-2.0
| 12,380
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, UnknownPartitioning}
import org.apache.spark.sql.execution.metric.SQLMetrics
/**
* Apply all of the GroupExpressions to every input row, hence we will get
* multiple output rows for an input row.
* @param projections The group of expressions, all of the group expressions should
* output the same schema specified bye the parameter `output`
* @param output The output Schema
* @param child Child operator
*/
case class ExpandExec(
projections: Seq[Seq[Expression]],
output: Seq[Attribute],
child: SparkPlan)
extends UnaryExecNode with CodegenSupport {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
// The GroupExpressions can output data with arbitrary partitioning, so set it
// as UNKNOWN partitioning
override def outputPartitioning: Partitioning = UnknownPartitioning(0)
override def references: AttributeSet =
AttributeSet(projections.flatten.flatMap(_.references))
private[this] val projection =
(exprs: Seq[Expression]) => UnsafeProjection.create(exprs, child.output)
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
val numOutputRows = longMetric("numOutputRows")
child.execute().mapPartitions { iter =>
val groups = projections.map(projection).toArray
new Iterator[InternalRow] {
private[this] var result: InternalRow = _
private[this] var idx = -1 // -1 means the initial state
private[this] var input: InternalRow = _
override final def hasNext: Boolean = (-1 < idx && idx < groups.length) || iter.hasNext
override final def next(): InternalRow = {
if (idx <= 0) {
// in the initial (-1) or beginning(0) of a new input row, fetch the next input tuple
input = iter.next()
idx = 0
}
result = groups(idx)(input)
idx += 1
if (idx == groups.length && iter.hasNext) {
idx = 0
}
numOutputRows += 1
result
}
}
}
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.asInstanceOf[CodegenSupport].inputRDDs()
}
protected override def doProduce(ctx: CodegenContext): String = {
child.asInstanceOf[CodegenSupport].produce(ctx, this)
}
override def needCopyResult: Boolean = true
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
/*
* When the projections list looks like:
* expr1A, exprB, expr1C
* expr2A, exprB, expr2C
* ...
* expr(N-1)A, exprB, expr(N-1)C
*
* i.e. column A and C have different values for each output row, but column B stays constant.
*
* The generated code looks something like (note that B is only computed once in declaration):
*
* // part 1: declare all the columns
* colA = ...
* colB = ...
* colC = ...
*
* // part 2: code that computes the columns
* for (row = 0; row < N; row++) {
* switch (row) {
* case 0:
* colA = ...
* colC = ...
* case 1:
* colA = ...
* colC = ...
* ...
* case N - 1:
* colA = ...
* colC = ...
* }
* // increment metrics and consume output values
* }
*
* We use a for loop here so we only includes one copy of the consume code and avoid code
* size explosion.
*/
// Tracks whether a column has the same output for all rows.
// Size of sameOutput array should equal N.
// If sameOutput(i) is true, then the i-th column has the same value for all output rows given
// an input row.
val sameOutput: Array[Boolean] = output.indices.map { colIndex =>
projections.map(p => p(colIndex)).toSet.size == 1
}.toArray
// Part 1: declare variables for each column
// If a column has the same value for all output rows, then we also generate its computation
// right after declaration. Otherwise its value is computed in the part 2.
val outputColumns = output.indices.map { col =>
val firstExpr = projections.head(col)
if (sameOutput(col)) {
// This column is the same across all output rows. Just generate code for it here.
BindReferences.bindReference(firstExpr, child.output).genCode(ctx)
} else {
val isNull = ctx.freshName("isNull")
val value = ctx.freshName("value")
val code = s"""
|boolean $isNull = true;
|${CodeGenerator.javaType(firstExpr.dataType)} $value =
| ${CodeGenerator.defaultValue(firstExpr.dataType)};
""".stripMargin
ExprCode(
code,
JavaCode.isNullVariable(isNull),
JavaCode.variable(value, firstExpr.dataType))
}
}
// Part 2: switch/case statements
val cases = projections.zipWithIndex.map { case (exprs, row) =>
var updateCode = ""
for (col <- exprs.indices) {
if (!sameOutput(col)) {
val ev = BindReferences.bindReference(exprs(col), child.output).genCode(ctx)
updateCode +=
s"""
|${ev.code}
|${outputColumns(col).isNull} = ${ev.isNull};
|${outputColumns(col).value} = ${ev.value};
""".stripMargin
}
}
s"""
|case $row:
| ${updateCode.trim}
| break;
""".stripMargin
}
val numOutput = metricTerm(ctx, "numOutputRows")
val i = ctx.freshName("i")
// these column have to declared before the loop.
val evaluate = evaluateVariables(outputColumns)
s"""
|$evaluate
|for (int $i = 0; $i < ${projections.length}; $i ++) {
| switch ($i) {
| ${cases.mkString("\\n").trim}
| }
| $numOutput.add(1);
| ${consume(ctx, outputColumns)}
|}
""".stripMargin
}
}
|
ddna1021/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/ExpandExec.scala
|
Scala
|
apache-2.0
| 7,131
|
//############################################################################
// Programmation IV - 2002 - Week 01
//############################################################################
object M0 {
//##########################################################################
Console.println(87 + 145);
Console.println(1000 - 333);
Console.println(5 + 2 * 3);
//##########################################################################
def size = 2;
def pi = 3.14159;
def radius = 10;
def circumference = 2 * pi * radius;
Console.println(5 * size);
Console.println(2 * pi * radius);
Console.println(circumference);
Console.println((2 * pi) * radius);
//##########################################################################
def square(x: Double) = x * x;
Console.println(square(2));
Console.println(square(5 + 4));
Console.println(square(square(4)));
//##########################################################################
def sumOfSquares(x: Double, y: Double) = square(x) + square(y);
Console.println(sumOfSquares(3, 2+2));
//##########################################################################
def loop: Int = loop;
def first(x: Int, y: Int) = x;
def constOne(x: Int, y: => Int) = 1;
Console.println(constOne(1, loop));
//##########################################################################
def abs(x: Double) = if (x >= 0) x else -x;
Console.println(abs(737));
Console.println(abs(1));
Console.println(abs(0));
Console.println(abs(-1));
Console.println(abs(-76));
//##########################################################################
def sqrtIter0(guess: Double, x: Double): Double =
if (isGoodEnough0(guess, x)) guess
else sqrtIter0(improve0(guess, x), x);
def improve0(guess: Double, x: Double) =
(guess + x / guess) / 2;
def isGoodEnough0(guess: Double, x: Double) =
abs(square(guess) - x) < 0.001;
def sqrt0(x: Double) = sqrtIter0(1.0, x);
Console.println(sqrt0(2));
Console.println(sqrt0(3));
Console.println(sqrt0(4));
//##########################################################################
def sqrt1(x: Double) = {
def sqrtIter1(guess: Double, x: Double): Double =
if (isGoodEnough1(guess, x)) guess
else sqrtIter1(improve1(guess, x), x);
def improve1(guess: Double, x: Double) =
(guess + x / guess) / 2;
def isGoodEnough1(guess: Double, x: Double) =
abs(square(guess) - x) < 0.001;
sqrtIter1(1.0, x)
}
Console.println(sqrt1(2));
Console.println(sqrt1(3));
Console.println(sqrt1(4));
//##########################################################################
def sqrt2(x: Double) = {
def sqrtIter2(guess: Double): Double =
if (isGoodEnough2(guess)) guess
else sqrtIter2(improve2(guess));
def improve2(guess: Double) =
(guess + x / guess) / 2;
def isGoodEnough2(guess: Double) =
abs(square(guess) - x) < 0.001;
sqrtIter2(1.0)
}
Console.println(sqrt2(2));
Console.println(sqrt2(3));
Console.println(sqrt2(4));
//##########################################################################
}
//############################################################################
object M1 {
def abs(x: Double) = if (x >= 0) x else -x;
def sqrt(x: Double): Double = {
def sqrtIter(prev: Double, guess: Double): Double =
if (isGoodEnough(prev, guess)) guess
else sqrtIter(guess, improve(guess));
def improve(guess: Double) = (guess + x / guess) / 2;
def isGoodEnough(prev: Double, guess: Double) =
abs(prev - guess) / guess < 0.001;
sqrtIter(1.0, improve(1.0))
}
Console.println("sqrt(2) = " + sqrt(2));
}
//############################################################################
object M2 {
def abs(x: Double) = if (x >= 0) x else -x;
def sqrt(x:Double):Double = {
def sqrtIter(guess:Double):Double = {
val next = improve(guess);
if (isGoodEnough(guess,next)) next
else sqrtIter(next)
}
def improve(guess:Double) = (guess+x/guess)/2;
def isGoodEnough(prev:Double,guess:Double) = abs(prev-guess)/guess<0.001;
sqrtIter(1.0)
}
Console.println("sqrt(2) = " + sqrt(2));
}
//############################################################################
object M3 {
def abs(x: Double) = if (x >= 0) x else -x;
def cbrt(x:Double):Double = {
def cbrtIter(guess:Double):Double = {
val next = improve(guess);
if (isGoodEnough(guess,next)) next
else cbrtIter(next)
}
def improve(y:Double) = (x/(y*y)+2*y)/3;
def isGoodEnough(prev:Double,guess:Double) = abs(prev-guess)/guess<0.001;
cbrtIter(1.0)
}
Console.println("cbrt(2) = " + cbrt(2));
}
//############################################################################
object M4 {
def pascal(c: Int, l: Int): Int =
if (c <= 0 || c >= l) 1
else pascal(c - 1, l - 1) + pascal(c, l - 1);
Console.print(pascal(0,0));
Console.println;
Console.print(pascal(0,1));
Console.print(' ');
Console.print(pascal(1,1));
Console.println;
Console.print(pascal(0,2));
Console.print(' ');
Console.print(pascal(1,2));
Console.print(' ');
Console.print(pascal(2,2));
Console.println;
Console.print(pascal(0,3));
Console.print(' ');
Console.print(pascal(1,3));
Console.print(' ');
Console.print(pascal(2,3));
Console.print(' ');
Console.print(pascal(3,3));
Console.println;
Console.print(pascal(0,4));
Console.print(' ');
Console.print(pascal(1,4));
Console.print(' ');
Console.print(pascal(2,4));
Console.print(' ');
Console.print(pascal(3,4));
Console.print(' ');
Console.print(pascal(4,4));
Console.println;
}
//############################################################################
object Test {
def main(args: Array[String]): Unit = {
M0;
M1;
M2;
M3;
M4;
()
}
}
//############################################################################
|
folone/dotty
|
tests/run/Course-2002-01.scala
|
Scala
|
bsd-3-clause
| 6,019
|
import leon.Utils._
/* VSTTE 2008 - Dafny paper */
/*
Add the loop invariants so that Leon can verify this function.
*/
object Mult {
def mult(x : BigInt, y : BigInt): BigInt = ({
var r = 0
if(y < 0) {
var n = y
(while(n != 0) {
r = r - x
n = n + 1
}) //invariant(...)
} else {
var n = y
(while(n != 0) {
r = r + x
n = n - 1
}) //invariant(...)
}
r
}) ensuring(_ == x*y)
}
|
epfl-lara/leon
|
testcases/graveyard/tutorials/07_Ex6_Mult.scala
|
Scala
|
gpl-3.0
| 474
|
package controllers.api
import play.api.mvc._
import play.api.libs.json._
import play.api.mvc.BodyParsers.parse
import utils._
import utils.auth._
import utils.JsonLogging
import levar._
import levar.json._
import levar.Format
import db._
object ExperimentController extends Controller with JsonLogging {
private val dbase = db.impl
def searchByOrg(org: String) = Authenticated { user =>
HasOrgAccess(user, org) {
Action { implicit request =>
val results = dbase.listExperiments(org)
render {
case AcceptsText() => Ok(Format.experimentRStoString(results) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.search(results))
case Accepts.Json() => Ok(Json.toJson(results))
}
}
}
}
def searchByDataset(org: String, datasetId: String) = Authenticated { user =>
HasOrgAccess(user, org) {
Action { implicit request =>
try {
val results = dbase.listExperiments(org, datasetId)
render {
case AcceptsText() => Ok(Format.experimentRStoString(results) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.search(results))
case Accepts.Json() => Ok(Json.toJson(results))
}
} catch {
case _: NotFoundInDb => {
val msg = s"Dataset not found: $org/$datasetId"
render {
case AcceptsText() => NotFound(msg + "\\n")
case Accepts.Json() => NotFound(Json.obj("message" -> msg))
}
}
}
}
}
}
def create(org: String, datasetId: String) = Authenticated { implicit user =>
HasOrgAccess(user, org) {
Action(parse.json) { implicit request =>
request.body.validate[Experiment].fold(
errors => BadRequest(JsError.toJson(errors)),
{ exp =>
try {
dbase.createExperiment(org, datasetId, exp)
infoU("status" -> "success", "action" -> "create", "experiment" -> s"$org/$datasetId/${exp.id}")
val saved = db.impl.getExperiment(org, datasetId, exp.id)
render {
case AcceptsText() => Ok(Format.experimentToString(saved) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.details(saved))
case Accepts.Json() => Ok(Json.toJson(saved))
}
} catch {
case _: NotFoundInDb => {
val msg = s"Dataset $org/$datasetId not found"
infoU("status" -> "error", "message" -> msg)
NotFound(msg + "\\n")
}
case _: ExperimentIdAlreadyExists => {
val msg = s"Experiment ID already exists: $org/$datasetId/${exp.id}"
infoU("status" -> "error", "message" -> msg)
BadRequest(msg + "\\n")
}
}
}
)
}
}
}
def details(org: String, datasetId: String, id: String) = Authenticated { implicit user =>
HasOrgAccess(user, org) {
Action { implicit request =>
try {
val saved = dbase.getExperiment(org, datasetId, id)
render {
case AcceptsText() => Ok(Format.experimentToString(saved) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.details(saved))
case Accepts.Json() => Ok(Json.toJson(saved))
}
} catch {
case _: NotFoundInDb => {
val msg = s"Experiment $org/$datasetId/$id not found"
infoU("status" -> "error", "message" -> msg)
NotFound(msg + "\\n")
}
}
}
}
}
def update(org: String, datasetId: String, id: String) = Authenticated { implicit user =>
HasOrgAccess(user, org) {
Action(parse.json) { implicit request =>
infoU("status" -> "request", "action" -> "update", "dataset" -> datasetId, "experiment" -> id)
request.body.validate[Experiment.Update].fold(
errors => BadRequest(JsError.toJson(errors)),
{ expUpdate =>
try {
dbase.updateExperiment(org, datasetId, id, expUpdate)
val saved = db.impl.getExperiment(org, datasetId, id)
render {
case AcceptsText() => Ok(Format.experimentToString(saved) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.details(saved))
case Accepts.Json() => Ok(Json.toJson(saved))
}
} catch {
case _: NotFoundInDb => {
val msg = s"Experiment not found: $org/$datasetId/$id"
render {
case AcceptsText() => NotFound(msg + "\\n")
case Accepts.Json() => NotFound(Json.obj("message" -> msg))
}
}
}
}
)
}
}
}
def delete(org: String, datasetId: String, experimentId: String) = Authenticated { implicit user =>
HasOrgAccess(user, org) {
Action { implicit request =>
infoU("status" -> "request", "action" -> "delete", "org" -> org, "dataset" -> datasetId, "experiment" -> experimentId)
try {
val savedBeforeDelete = dbase.getExperiment(org, datasetId, experimentId)
dbase.deleteExperiment(org, datasetId, experimentId)
infoU("status" -> "success", "action" -> "delete", "org" -> org, "dataset" -> datasetId, "experiment" -> experimentId)
render {
case AcceptsText() => Ok(Format.experimentToString(savedBeforeDelete) + "\\n")
case Accepts.Html() => Ok(views.html.ExperimentApi.details(savedBeforeDelete))
case Accepts.Json() => Ok(Json.toJson(savedBeforeDelete))
}
} catch {
case _: NotFoundInDb => {
val msg = s"Experiment not found: $org/$datasetId/$experimentId"
render {
case AcceptsText() => NotFound(msg)
case Accepts.Json() => NotFound(Json.obj("message" -> msg))
}
}
}
}
}
}
}
|
peoplepattern/LeVar
|
levar-web/app/controllers/api/ExperimentController.scala
|
Scala
|
apache-2.0
| 6,063
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.