code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package rat.client.components
import org.scalajs.dom._
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
/**
* Minimal facade for JQuery. Use https://github.com/scala-js/scala-js-jquery or
* https://github.com/jducoeur/jquery-facade for more complete one.
*/
@js.native
trait JQueryEventObject extends Event {
var data: js.Any = js.native
}
@js.native
@JSName("jQuery")
object JQueryStatic extends js.Object {
def apply(element: Element): JQuery = js.native
}
@js.native
trait JQuery extends js.Object {
def on(events: String, selector: js.Any, data: js.Any, handler: js.Function1[JQueryEventObject, js.Any]): JQuery = js.native
def off(events: String): JQuery = js.native
} | omidb/rat | client/src/main/scala/rat/client/components/JQuery.scala | Scala | apache-2.0 | 709 |
package io.getquill.context.spark
import org.apache.spark.sql.Dataset
sealed trait Binding
case class DatasetBinding[T](ds: Dataset[T]) extends Binding
case class ValueBinding(str: String) extends Binding | getquill/quill | quill-spark/src/main/scala/io/getquill/context/spark/Binding.scala | Scala | apache-2.0 | 208 |
package com.tribbloids.spookystuff.uav.planning.VRPOptimizers
import com.graphhopper.jsprit.core.problem.VehicleRoutingProblem
import com.graphhopper.jsprit.core.util.{FastVehicleRoutingTransportCostsMatrix, VehicleRoutingTransportCostsMatrix}
import com.graphhopper.jsprit.io.problem.VrpXMLReader
import com.tribbloids.spookystuff.actions.TraceView
import com.tribbloids.spookystuff.uav.UAVConf
import com.tribbloids.spookystuff.uav.actions.Waypoint
import com.tribbloids.spookystuff.uav.spatial.point.{Location, NED}
import com.tribbloids.spookystuff.uav.system.UAV
import com.tribbloids.spookystuff.uav.telemetry.LinkStatus
import com.tribbloids.spookystuff.uav.utils.Binding
class JSpritRunnerSuite extends VRPFixture {
val waypoints: Array[TraceView] = Array[TraceView](
List(Waypoint(NED(3, 4, 0) -> UAVConf.DEFAULT_HOME_LOCATION: Location)),
List(Waypoint(NED(3, 0, 0) -> UAVConf.DEFAULT_HOME_LOCATION: Location)),
List(Waypoint(NED(0, 4, 0) -> UAVConf.DEFAULT_HOME_LOCATION: Location))
)
it("getCostMatrix") {
val mat: FastVehicleRoutingTransportCostsMatrix = JSpritRunner
.getCostMatrix(defaultSchema, waypoints.zipWithIndex)
//TODO: add assertion
val mat2 = for (i <- mat.getMatrix.toList.zipWithIndex;
j <- i._1.toList.zipWithIndex) yield {
(i._2, j._2, j._1.toList.map(v => (v * 1000.0).toInt))
}
mat2
.mkString("\\n")
.shouldBe(
"""
|(0,0,List(0, 0))
|(0,1,List(4000, 4000))
|(0,2,List(3000, 3000))
|(1,0,List(4000, 4000))
|(1,1,List(0, 0))
|(1,2,List(5000, 5000))
|(2,0,List(3000, 3000))
|(2,1,List(5000, 5000))
|(2,2,List(0, 0))
""".stripMargin
)
}
describe("objectiveFunction") {
it("can evaluate 1 route") {
val location = UAVConf.DEFAULT_HOME_LOCATION
val uav = LinkStatus(UAV(Seq("dummy@localhost")), Binding.Open, location, location)
val runner = JSpritRunner(getVRP, defaultSchema, Array(uav), waypoints)
val solution = runner.solve
val cost = JSpritRunner.getObjectiveFunction(0).getCosts(solution)
assert((cost * 1000).toInt == 10000)
val map = runner.getUAV2TraceMap
val first = map.head
val trace = List(Waypoint(first._1.currentLocation)) ++
first._2.flatMap(_.children)
val cost2 = spooky.getConf[UAVConf].costEstimator.estimate(trace, defaultSchema)
assert(cost == cost2)
}
it("can evaluate 3 route") {
val location = UAVConf.DEFAULT_HOME_LOCATION
val uavs = Array("A", "B", "C").map { v =>
LinkStatus(UAV(Seq(s"$v@localhost")), Binding.Open, location, location)
}
val runner = JSpritRunner(getVRP, defaultSchema, uavs, waypoints)
val solution = runner.solve
val cost = JSpritRunner.getObjectiveFunction(0).getCosts(solution)
assert((cost * 1000).toInt == 5000)
val map = runner.getUAV2TraceMap
val traces = map.toSeq.map { v =>
List(Waypoint(v._1.currentLocation)) ++
v._2.flatMap(_.children)
}
val costs2 = traces.map { trace =>
spooky.getConf[UAVConf].costEstimator.estimate(trace, defaultSchema)
}
val cost2 = costs2.max
assert(cost == cost2)
}
}
it("solveVRP") {
val vrpBuilder = VehicleRoutingProblem.Builder.newInstance
new VrpXMLReader(vrpBuilder).read("input/abe/abrahamProblem.xml")
val matrixBuilder = VehicleRoutingTransportCostsMatrix.Builder.newInstance(true)
val matrixReader = new MatrixReader(matrixBuilder)
matrixReader.read("input/abe/Matrix.txt")
val matrix = matrixBuilder.build
vrpBuilder.setRoutingCost(matrix)
val vrp = vrpBuilder.build
val tuple = JSpritRunner.solveVRP(vrp, getVRP)
System.out.println("cost: " + tuple._2)
assert(tuple._2 <= 1011.777)
}
}
| tribbloid/spookystuff | uav/src/test/scala/com/tribbloids/spookystuff/uav/planning/VRPOptimizers/JSpritRunnerSuite.scala | Scala | apache-2.0 | 3,869 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.nio.ByteBuffer
import java.security.Principal
import java.util.concurrent._
import com.yammer.metrics.core.Gauge
import kafka.api._
import kafka.common.TopicAndPartition
import kafka.message.ByteBufferMessageSet
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.{Logging, SystemTime}
import org.apache.kafka.common.network.Send
import org.apache.kafka.common.protocol.{ApiKeys, SecurityProtocol}
import org.apache.kafka.common.requests.{AbstractRequest, RequestHeader}
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.log4j.Logger
object RequestChannel extends Logging {
val AllDone = new Request(processor = 1, connectionId = "2", new Session(KafkaPrincipal.ANONYMOUS, ""), buffer = getShutdownReceive(), startTimeMs = 0, securityProtocol = SecurityProtocol.PLAINTEXT)
def getShutdownReceive() = {
val emptyProducerRequest = new ProducerRequest(0, 0, "", 0, 0, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]())
val byteBuffer = ByteBuffer.allocate(emptyProducerRequest.sizeInBytes + 2)
byteBuffer.putShort(RequestKeys.ProduceKey)
emptyProducerRequest.writeTo(byteBuffer)
byteBuffer.rewind()
byteBuffer
}
case class Session(principal: Principal, host: String)
case class Request(processor: Int, connectionId: String, session: Session, private var buffer: ByteBuffer, startTimeMs: Long, securityProtocol: SecurityProtocol) {
@volatile var requestDequeueTimeMs = -1L
@volatile var apiLocalCompleteTimeMs = -1L
@volatile var responseCompleteTimeMs = -1L
@volatile var responseDequeueTimeMs = -1L
val requestId = buffer.getShort()
// for server-side request / response format
// TODO: this will be removed once we migrated to client-side format
val requestObj =
if ( RequestKeys.keyToNameAndDeserializerMap.contains(requestId))
RequestKeys.deserializerForKey(requestId)(buffer)
else
null
// for client-side request / response format
val header: RequestHeader =
if (requestObj == null) {
buffer.rewind
RequestHeader.parse(buffer)
} else
null
val body: AbstractRequest =
if (requestObj == null)
AbstractRequest.getRequest(header.apiKey, header.apiVersion, buffer)
else
null
buffer = null
private val requestLogger = Logger.getLogger("kafka.request.logger")
private def requestDesc: String = {
if (requestObj != null)
requestObj.describe(false)
else
header.toString + " -- " + body.toString
}
trace("Processor %d received request : %s".format(processor, requestDesc))
def updateRequestMetrics() {
val endTimeMs = SystemTime.milliseconds
// In some corner cases, apiLocalCompleteTimeMs may not be set when the request completes since the remote
// processing time is really small. In this case, use responseCompleteTimeMs as apiLocalCompleteTimeMs.
if (apiLocalCompleteTimeMs < 0)
apiLocalCompleteTimeMs = responseCompleteTimeMs
val requestQueueTime = (requestDequeueTimeMs - startTimeMs).max(0L)
val apiLocalTime = (apiLocalCompleteTimeMs - requestDequeueTimeMs).max(0L)
val apiRemoteTime = (responseCompleteTimeMs - apiLocalCompleteTimeMs).max(0L)
val responseQueueTime = (responseDequeueTimeMs - responseCompleteTimeMs).max(0L)
val responseSendTime = (endTimeMs - responseDequeueTimeMs).max(0L)
val totalTime = endTimeMs - startTimeMs
var metricsList = List(RequestMetrics.metricsMap(ApiKeys.forId(requestId).name))
if (requestId == RequestKeys.FetchKey) {
val isFromFollower = requestObj.asInstanceOf[FetchRequest].isFromFollower
metricsList ::= ( if (isFromFollower)
RequestMetrics.metricsMap(RequestMetrics.followFetchMetricName)
else
RequestMetrics.metricsMap(RequestMetrics.consumerFetchMetricName) )
}
metricsList.foreach{
m => m.requestRate.mark()
m.requestQueueTimeHist.update(requestQueueTime)
m.localTimeHist.update(apiLocalTime)
m.remoteTimeHist.update(apiRemoteTime)
m.responseQueueTimeHist.update(responseQueueTime)
m.responseSendTimeHist.update(responseSendTime)
m.totalTimeHist.update(totalTime)
}
if(requestLogger.isTraceEnabled)
requestLogger.trace("Completed request:%s from connection %s;totalTime:%d,requestQueueTime:%d,localTime:%d,remoteTime:%d,responseQueueTime:%d,sendTime:%d,securityProtocol:%s,principal:%s"
.format(requestDesc, connectionId, totalTime, requestQueueTime, apiLocalTime, apiRemoteTime, responseQueueTime, responseSendTime, securityProtocol, session.principal))
else if(requestLogger.isDebugEnabled)
requestLogger.debug("Completed request:%s from connection %s;totalTime:%d,requestQueueTime:%d,localTime:%d,remoteTime:%d,responseQueueTime:%d,sendTime:%d,securityProtocol:%s,principal:%s"
.format(requestDesc, connectionId, totalTime, requestQueueTime, apiLocalTime, apiRemoteTime, responseQueueTime, responseSendTime, securityProtocol, session.principal))
}
}
case class Response(processor: Int, request: Request, responseSend: Send, responseAction: ResponseAction) {
request.responseCompleteTimeMs = SystemTime.milliseconds
def this(processor: Int, request: Request, responseSend: Send) =
this(processor, request, responseSend, if (responseSend == null) NoOpAction else SendAction)
def this(request: Request, send: Send) =
this(request.processor, request, send)
}
trait ResponseAction
case object SendAction extends ResponseAction
case object NoOpAction extends ResponseAction
case object CloseConnectionAction extends ResponseAction
}
class RequestChannel(val numProcessors: Int, val queueSize: Int) extends KafkaMetricsGroup {
private var responseListeners: List[(Int) => Unit] = Nil
private val requestQueue = new ArrayBlockingQueue[RequestChannel.Request](queueSize)
private val responseQueues = new Array[BlockingQueue[RequestChannel.Response]](numProcessors)
for(i <- 0 until numProcessors)
responseQueues(i) = new LinkedBlockingQueue[RequestChannel.Response]()
newGauge(
"RequestQueueSize",
new Gauge[Int] {
def value = requestQueue.size
}
)
newGauge("ResponseQueueSize", new Gauge[Int]{
def value = responseQueues.foldLeft(0) {(total, q) => total + q.size()}
})
for (i <- 0 until numProcessors) {
newGauge("ResponseQueueSize",
new Gauge[Int] {
def value = responseQueues(i).size()
},
Map("processor" -> i.toString)
)
}
/** Send a request to be handled, potentially blocking until there is room in the queue for the request */
def sendRequest(request: RequestChannel.Request) {
requestQueue.put(request)
}
/** Send a response back to the socket server to be sent over the network */
def sendResponse(response: RequestChannel.Response) {
responseQueues(response.processor).put(response)
for(onResponse <- responseListeners)
onResponse(response.processor)
}
/** No operation to take for the request, need to read more over the network */
def noOperation(processor: Int, request: RequestChannel.Request) {
responseQueues(processor).put(new RequestChannel.Response(processor, request, null, RequestChannel.NoOpAction))
for(onResponse <- responseListeners)
onResponse(processor)
}
/** Close the connection for the request */
def closeConnection(processor: Int, request: RequestChannel.Request) {
responseQueues(processor).put(new RequestChannel.Response(processor, request, null, RequestChannel.CloseConnectionAction))
for(onResponse <- responseListeners)
onResponse(processor)
}
/** Get the next request or block until specified time has elapsed */
def receiveRequest(timeout: Long): RequestChannel.Request =
requestQueue.poll(timeout, TimeUnit.MILLISECONDS)
/** Get the next request or block until there is one */
def receiveRequest(): RequestChannel.Request =
requestQueue.take()
/** Get a response for the given processor if there is one */
def receiveResponse(processor: Int): RequestChannel.Response = {
val response = responseQueues(processor).poll()
if (response != null)
response.request.responseDequeueTimeMs = SystemTime.milliseconds
response
}
def addResponseListener(onResponse: Int => Unit) {
responseListeners ::= onResponse
}
def shutdown() {
requestQueue.clear
}
}
object RequestMetrics {
val metricsMap = new scala.collection.mutable.HashMap[String, RequestMetrics]
val consumerFetchMetricName = RequestKeys.nameForKey(RequestKeys.FetchKey) + "Consumer"
val followFetchMetricName = RequestKeys.nameForKey(RequestKeys.FetchKey) + "Follower"
(ApiKeys.values().toList.map(e => e.name)
++ List(consumerFetchMetricName, followFetchMetricName)).foreach(name => metricsMap.put(name, new RequestMetrics(name)))
}
class RequestMetrics(name: String) extends KafkaMetricsGroup {
val tags = Map("request" -> name)
val requestRate = newMeter("RequestsPerSec", "requests", TimeUnit.SECONDS, tags)
// time a request spent in a request queue
val requestQueueTimeHist = newHistogram("RequestQueueTimeMs", biased = true, tags)
// time a request takes to be processed at the local broker
val localTimeHist = newHistogram("LocalTimeMs", biased = true, tags)
// time a request takes to wait on remote brokers (only relevant to fetch and produce requests)
val remoteTimeHist = newHistogram("RemoteTimeMs", biased = true, tags)
// time a response spent in a response queue
val responseQueueTimeHist = newHistogram("ResponseQueueTimeMs", biased = true, tags)
// time to send the response to the requester
val responseSendTimeHist = newHistogram("ResponseSendTimeMs", biased = true, tags)
val totalTimeHist = newHistogram("TotalTimeMs", biased = true, tags)
}
| slchen2014/kafka | core/src/main/scala/kafka/network/RequestChannel.scala | Scala | apache-2.0 | 10,864 |
package com.wavesplatform.state.diffs.smart.predef
import com.wavesplatform.account.{Address, Alias}
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.lang.Testing._
import com.wavesplatform.lang.v1.compiler.Terms.CONST_BYTESTR
import com.wavesplatform.lang.v1.evaluator.ctx.impl._
import com.wavesplatform.state.IntegerDataEntry
import com.wavesplatform.state.diffs._
import com.wavesplatform.test.{NumericExt, PropSpec}
import com.wavesplatform.transaction.Asset.{IssuedAsset, Waves}
import com.wavesplatform.transaction.transfer.MassTransferTransaction
import com.wavesplatform.transaction.transfer.MassTransferTransaction.ParsedTransfer
import com.wavesplatform.transaction.{TxHelpers, TxVersion}
import org.scalatest.Assertions
import shapeless.Coproduct
import scala.util.Try
class CommonFunctionsTest extends PropSpec {
property("extract should transaction transfer assetId if exists") {
val transfer = TxHelpers.transfer(version = TxVersion.V1)
val result = runScript(
"""
|match tx {
| case ttx : TransferTransaction => extract(ttx.assetId)
| case _ => throw()
| }
|""".stripMargin,
Coproduct(transfer)
)
transfer.assetId match {
case IssuedAsset(v) => result.explicitGet().asInstanceOf[CONST_BYTESTR].bs.arr sameElements v.arr
case Waves => result should produce("extract() called on unit")
}
}
property("isDefined should return true if transfer assetId exists") {
val transfer = TxHelpers.transfer(version = TxVersion.V1)
val result = runScript(
"""
|match tx {
| case ttx : TransferTransaction => isDefined(ttx.assetId)
| case _ => throw()
| }
|""".stripMargin,
Coproduct(transfer)
)
result shouldEqual evaluated(transfer.assetId != Waves)
}
property("Some/None/extract/isDefined") {
val some3 = "if true then 3 else unit"
val none = "if false then 3 else unit"
runScript(some3) shouldBe evaluated(3L)
runScript(none) shouldBe evaluated(unit)
runScript(s"isDefined($some3)") shouldBe evaluated(true)
runScript(s"isDefined($none)") shouldBe evaluated(false)
runScript(s"extract($some3)") shouldBe evaluated(3L)
runScript(s"extract($none)") should produce("extract() called on unit")
}
property("size()") {
val arr = Array(1: Byte, 2: Byte, 3: Byte)
runScript("size(base58'')".stripMargin) shouldBe evaluated(0L)
runScript(s"size(base58'${ByteStr(arr).toString}')".stripMargin) shouldBe evaluated(3L)
}
property("getTransfer should extract MassTransfer transfers") {
val massTransfer = createMassTransfer()
val resultAmount = runScript(
"""
|match tx {
| case mttx : MassTransferTransaction => mttx.transfers[0].amount
| case _ => throw()
| }
|""".stripMargin,
Coproduct(massTransfer)
)
resultAmount shouldBe evaluated(massTransfer.transfers(0).amount)
val resultAddress = runScript(
"""
|match tx {
| case mttx : MassTransferTransaction =>
| match mttx.transfers[0].recipient {
| case address : Address => address.bytes
| case _ => throw()
| }
| case _ => throw()
| }
|""".stripMargin,
Coproduct(massTransfer)
)
resultAddress shouldBe evaluated(ByteStr(massTransfer.transfers(0).address.bytes))
val resultLen = runScript(
"""
|match tx {
| case mttx : MassTransferTransaction => size(mttx.transfers)
| case _ => throw()
| }
|""".stripMargin,
Coproduct(massTransfer)
)
resultLen shouldBe evaluated(massTransfer.transfers.size.toLong)
}
property("+ should check overflow") {
runScript("2 + 3") shouldBe evaluated(5L)
runScript(s"1 + ${Long.MaxValue}") should produce("long overflow")
}
property("general shadowing verification") {
Seq(
TxHelpers.transfer(version = TxVersion.V1),
TxHelpers.transfer(),
TxHelpers.issue(version = TxVersion.V1),
createMassTransfer()
).foreach { tx =>
val result = runScript(
s"""
|match tx {
| case tx : TransferTransaction => tx.id == base58'${tx.id().toString}'
| case tx : IssueTransaction => tx.fee == ${tx.assetFee._2}
| case tx : MassTransferTransaction => tx.timestamp == ${tx.timestamp}
| case _ => throw()
| }
|""".stripMargin,
Coproduct(tx)
)
result shouldBe evaluated(true)
}
}
property("negative shadowing verification") {
Seq(
TxHelpers.transfer(),
TxHelpers.issue(version = TxVersion.V1),
createMassTransfer()
).foreach { tx =>
Try[Either[String, _]] {
runScript(
s"""
|let t = 100
|match tx {
| case t: TransferTransaction => t.id == base58'${tx.id().toString}'
| case t: IssueTransaction => t.fee == ${tx.assetFee._2}
| case t: MassTransferTransaction => t.timestamp == ${tx.timestamp}
| case _ => throw()
| }
|""".stripMargin,
Coproduct(tx)
)
}.recover {
case ex: MatchError =>
Assertions.assert(ex.getMessage().contains("Compilation failed: Value 't' already defined in the scope"))
case _: Throwable => Assertions.fail("Some unexpected error")
}
}
}
property("shadowing of empty ref") {
Try {
runScript(
s"""
|match p {
| case _: TransferTransaction => true
| case _ => throw()
| }
|""".stripMargin
)
}.recover {
case ex: MatchError => Assertions.assert(ex.getMessage().contains("Compilation failed: A definition of 'p' is not found"))
case _: Throwable => Assertions.fail("Some unexpected error")
}
}
property("shadowing of inner pattern matching") {
Seq(
TxHelpers.transfer(),
TxHelpers.issue(version = TxVersion.V1)
).foreach { tx =>
val result =
runScript(
s"""
|match tx {
| case tx: TransferTransaction | IssueTransaction => {
| match tx {
| case tx: TransferTransaction => tx.id == base58'${tx.id().toString}'
| case tx: IssueTransaction => tx.fee == ${tx.assetFee._2}
| }
| }
| case _ => throw()
|}
|""".stripMargin,
Coproduct(tx)
)
result shouldBe evaluated(true)
}
}
property("shadowing of variable considered external") {
runScript(
s"""
|match {
| let aaa = 1
| tx
|} {
| case tx: TransferTransaction => tx == tx
| case _ => throw()
| }
|""".stripMargin
) should produce("already defined")
}
property("data constructors") {
val sender = TxHelpers.signer(1)
val recipient = TxHelpers.signer(2)
val transfer = TxHelpers.transfer(from = sender, to = recipient.toAddress)
val entry = IntegerDataEntry("key", 123L)
val compareClause = (transfer.recipient: @unchecked) match {
case addr: Address => s"tx.recipient == Address(base58'${addr.stringRepr}')"
case alias: Alias => s"""tx.recipient == Alias("${alias.name}")"""
}
val transferResult = runScript(
s"""
|match tx {
| case tx: TransferTransaction =>
| let goodEq = $compareClause
| let badAddressEq = tx.recipient == Address(base58'Mbembangwana')
| let badAddressNe = tx.recipient != Address(base58'3AfZaKieM5')
| let badAliasEq = tx.recipient == Alias("Ramakafana")
| let badAliasNe = tx.recipient != Alias("Nuripitia")
| goodEq && !badAddressEq && badAddressNe && !badAliasEq && badAliasNe
| case _ => throw()
|}
|""".stripMargin,
Coproduct(transfer)
)
transferResult shouldBe evaluated(true)
val dataTx = TxHelpers.data(sender, Seq(entry))
val dataResult = runScript(
s"""
|match tx {
| case tx: DataTransaction =>
| let intEq = tx.data[0] == DataEntry("${entry.key}", ${entry.value})
| let intNe = tx.data[0] != DataEntry("${entry.key}", ${entry.value})
| let boolEq = tx.data[0] == DataEntry("${entry.key}", true)
| let boolNe = tx.data[0] != DataEntry("${entry.key}", true)
| let binEq = tx.data[0] == DataEntry("${entry.key}", base64'WROOooommmmm')
| let binNe = tx.data[0] != DataEntry("${entry.key}", base64'FlapFlap')
| let strEq = tx.data[0] == DataEntry("${entry.key}", "${entry.value}")
| let strNe = tx.data[0] != DataEntry("${entry.key}", "Zam")
| intEq && !intNe && !boolEq && boolNe && !binEq && binNe && !strEq && strNe
| case _ => throw()
|}
""".stripMargin,
Coproduct(dataTx)
)
dataResult shouldBe evaluated(true)
}
property("data constructors bad syntax") {
val realAddr = "3My3KZgFQ3CrVHgz6vGRt8687sH4oAA1qp8"
val cases = Seq(
(s"""Address(\\"$realAddr\\")""", "Compilation failed: Non-matching types"),
("Address(base58'GzumLunBoK', 4)", "Function 'Address' requires 1 arguments, but 2 are provided"),
("Address()", "Function 'Address' requires 1 arguments, but 0 are provided"),
(s"Addr(base58'$realAddr')", "Can't find a function 'Addr'")
)
for ((clause, err) <- cases) {
Try[Either[String, _]] {
runScript(
s"""
|match tx {
| case _: TransferTransaction =>
| let dza = $clause
| throw()
| case _ => throw()
|}
|""".stripMargin
)
}.recover {
case ex: MatchError => Assertions.assert(ex.getMessage().contains(err))
case e: Throwable => Assertions.fail("Unexpected error", e)
}
}
}
private def createMassTransfer(): MassTransferTransaction = {
val sender = TxHelpers.signer(1)
val recipients = (1 to 10).map(idx => TxHelpers.address(idx + 1))
TxHelpers.massTransfer(
from = sender,
to = recipients.map(addr => ParsedTransfer(addr, 1.waves)),
version = TxVersion.V1
)
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/state/diffs/smart/predef/CommonFunctionsTest.scala | Scala | mit | 11,217 |
import sbt.Keys._
import sbt._
import sbtstudent.AdditionalSettings
object CommonSettings {
lazy val commonSettings = Seq(
Compile / scalacOptions ++= CompileOptions.compileOptions,
Compile / javacOptions ++= Seq("--release", "8"),
Compile / unmanagedSourceDirectories := List((Compile / scalaSource).value, (Compile / javaSource).value),
Test / unmanagedSourceDirectories := List((Test / scalaSource).value, (Test / javaSource).value),
Test / logBuffered := false,
Test / parallelExecution := false,
libraryDependencies ++= Dependencies.dependencies,
) ++
AdditionalSettings.initialCmdsConsole ++
AdditionalSettings.initialCmdsTestConsole ++
AdditionalSettings.cmdAliases
lazy val configure: Project => Project = (project: Project) => {
project.settings(CommonSettings.commonSettings: _*)
}
}
| lightbend-training/course-management-tools | course-templates/scala-cmt-template-no-common/project/CommonSettings.scala | Scala | apache-2.0 | 848 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package mutable
import generic._
/** Buffers are used to create sequences of elements incrementally by
* appending, prepending, or inserting new elements. It is also
* possible to access and modify elements in a random access fashion
* via the index of the element in the current sequence.
*
* @author Matthias Zenger
* @author Martin Odersky
* @version 2.8
* @since 1
*
* @tparam A type of the elements contained in this buffer.
*
* @define Coll Buffer
* @define coll buffer
*/
@cloneable
trait Buffer[A] extends Seq[A]
with GenericTraversableTemplate[A, Buffer]
with BufferLike[A, Buffer[A]] {
override def companion: GenericCompanion[Buffer] = Buffer
}
/** $factoryInfo
* @define coll buffer
* @define Coll Buffer
*/
object Buffer extends SeqFactory[Buffer] {
implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, Buffer[A]] = new GenericCanBuildFrom[A]
def newBuilder[A]: Builder[A, Buffer[A]] = new ArrayBuffer
}
| cran/rkafkajars | java/scala/collection/mutable/Buffer.scala | Scala | apache-2.0 | 1,558 |
package scheduler.kafka.manager
import java.util.Date
import org.slf4j.LoggerFactory
import play.api.Play.current
import play.api.libs.json.{JsError, JsSuccess}
import play.api.libs.ws._
import scheduler.kafka.manager.SchedulerRestClient.{AddBrokerResponse, Broker, StatusResponse}
import scheduler.models.form.Failover
import scala.concurrent.{ExecutionContext, Future}
object SchedulerRestClient {
import play.api.libs.functional.syntax._
import play.api.libs.json._
case class Task(id: String,
slaveId: String,
executorId: String,
hostname: String,
endpoint: Option[String],
state: String)
implicit val taskReads: Reads[Task] = (
(__ \\ 'id).read[String] and
(__ \\ 'slaveId).read[String] and
(__ \\ 'executorId).read[String] and
(__ \\ 'hostname).read[String] and
(__ \\ 'endpoint).readNullable[String] and
(__ \\ 'state).read[String]
)(Task)
case class Failover(delay: String,
maxDelay: String,
maxTries: Option[Int],
failures: Option[Int],
failureTime: Option[Date])
implicit val failoverReads: Reads[Failover] = (
(__ \\ 'delay).read[String] and
(__ \\ 'maxDelay).read[String] and
(__ \\ 'maxTries).readNullable[Int] and
(__ \\ 'failures).readNullable[Int] and
(__ \\ 'failureTime).readNullable[Date]
)(Failover)
case class Stickiness(period: String, stopTime: Option[Date], hostname: Option[String])
implicit val stickinessReads: Reads[Stickiness] = (
(__ \\ 'period).read[String] and
(__ \\ 'stopTime).readNullable[Date] and
(__ \\ 'hostname).readNullable[String]
)(Stickiness)
case class Broker(id: String,
active: Boolean,
cpus: Double,
mem: Long,
heap: Long,
port: Option[String],
bindAddress: Option[String],
constraints: Option[String],
options: Option[String],
log4jOptions: Option[String],
jvmOptions: Option[String],
stickiness: Stickiness,
failover: Failover,
task: Option[Task])
implicit val brokerReads: Reads[Broker] = (
(__ \\ 'id).read[String] and
(__ \\ 'active).read[Boolean] and
(__ \\ 'cpus).read[Double] and
(__ \\ 'mem).read[Long] and
(__ \\ 'heap).read[Long] and
(__ \\ 'port).readNullable[String] and
(__ \\ 'bindAddress).readNullable[String] and
(__ \\ 'constraints).readNullable[String] and
(__ \\ 'options).readNullable[String] and
(__ \\ 'log4jOptions).readNullable[String] and
(__ \\ 'jvmOptions).readNullable[String] and
(__ \\ 'stickiness).read[Stickiness] and
(__ \\ 'failover).read[Failover] and
(__ \\ 'task).readNullable[Task]
)(Broker)
case class StatusResponse(brokers: Option[Seq[Broker]], frameworkId: Option[String])
implicit val statusResponseReads: Reads[StatusResponse] = (
(__ \\ 'brokers).readNullable[Seq[Broker]] and
(__ \\ 'frameworkId).readNullable[String]
)(StatusResponse)
case class AddBrokerResponse(brokers: Seq[Broker])
implicit val addBrokerResponseReads: Reads[AddBrokerResponse] =
(__ \\ 'brokers).read[Seq[Broker]].map(AddBrokerResponse)
}
class SchedulerRestClient(val apiUrl: String)(implicit val executionContext: ExecutionContext) {
private[this] lazy val logger = LoggerFactory.getLogger(this.getClass)
private val BrokerApiPrefix = s"$apiUrl/api/broker"
private val StatusUrl = s"$BrokerApiPrefix/list"
private val AddBrokerUrl = s"$BrokerApiPrefix/add"
private val UpdateBrokerUrl = s"$BrokerApiPrefix/update"
private val StartBrokerUrl = s"$BrokerApiPrefix/start"
private val StopBrokerUrl = s"$BrokerApiPrefix/stop"
private val RemoveBrokerUrl = s"$BrokerApiPrefix/remove"
private val TopicApiPrefix = s"$apiUrl/api/topic"
private val RebalanceTopicsUrl = s"$TopicApiPrefix/rebalance"
private val Timeout = 10000
def getStatus: Future[StatusResponse] = {
val holder: Future[WSResponse] = WS
.url(StatusUrl)
.withRequestTimeout(Timeout)
.get()
holder.map {
response => response.json.validate[StatusResponse]
}.flatMap {
case JsError(e) =>
logger.error(s"Failed to parse status response $e")
Future.failed(new Exception("Failed to parse status response json"))
case JsSuccess(status, _) =>
Future.successful(status)
}
}
def addBroker(id: Int, cpus: Option[Double], mem: Option[Long], heap: Option[Long], port: Option[String],
bindAddress: Option[String], constraints: Option[String], options: Option[String],
log4jOptions: Option[String], jvmOptions: Option[String], stickinessPeriod: Option[String],
failover: Failover): Future[Seq[Broker]] = {
val queryParamsSeq = Seq(
"broker" -> Some(id.toString), "cpus" -> cpus.map(_.toString), "mem" -> mem.map(_.toString), "heap" -> heap.map(_.toString),
"port" -> port, "bindAddress" -> bindAddress, "constraints" -> constraints,
"options" -> options, "log4jOptions" -> log4jOptions, "jvmOptions" -> jvmOptions,
"stickinessPeriod" -> stickinessPeriod.map(_.toString), "failoverDelay" -> failover.failoverDelay.map(_.toString),
"failoverMaxDelay" -> failover.failoverMaxDelay.map(_.toString), "failoverMaxTries" -> failover.failoverMaxTries.map(_.toString)).collect {
case (key, Some(v)) => (key, v)
}
val holder: Future[WSResponse] = WS
.url(AddBrokerUrl)
.withQueryString(queryParamsSeq: _*)
.withRequestTimeout(Timeout)
.get()
holder.map {
response =>
response.json.validate[AddBrokerResponse]
}.flatMap {
case JsError(e) =>
logger.error(s"Failed to parse add broker response $e")
Future.failed(new Exception("Failed to parse add broker response json"))
case JsSuccess(brokers, _) =>
Future.successful(brokers.brokers)
}
}
def updateBroker(id: Int, cpus: Option[Double], mem: Option[Long], heap: Option[Long], port: Option[String],
bindAddress: Option[String], constraints: Option[String], options: Option[String],
log4jOptions: Option[String], jvmOptions: Option[String], stickinessPeriod: Option[String],
failover: Failover): Future[Unit] = {
val queryParamsSeq = Seq(
"broker" -> Some(id.toString), "cpus" -> cpus.map(_.toString), "mem" -> mem.map(_.toString), "heap" -> heap.map(_.toString),
"port" -> port, "bindAddress" -> bindAddress, "constraints" -> constraints,
"options" -> options, "log4jOptions" -> log4jOptions, "jvmOptions" -> jvmOptions,
"stickinessPeriod" -> stickinessPeriod.map(_.toString), "failoverDelay" -> failover.failoverDelay.map(_.toString),
"failoverMaxDelay" -> failover.failoverMaxDelay.map(_.toString), "failoverMaxTries" -> failover.failoverMaxTries.map(_.toString)).collect {
case (key, Some(v)) => (key, v)
}
val holder: Future[WSResponse] = WS
.url(UpdateBrokerUrl)
.withQueryString(queryParamsSeq: _*)
.withRequestTimeout(Timeout)
.get()
holder.map { _ => () }
}
def startBroker(id: Int): Future[Unit] = {
val holder: Future[WSResponse] = WS
.url(StartBrokerUrl)
.withQueryString(Seq("broker" -> id.toString, "timeout" -> "0"): _*)
.withRequestTimeout(Timeout)
.get()
holder.map { _ => () }
}
def stopBroker(id: Int): Future[Unit] = {
val holder: Future[WSResponse] = WS
.url(StopBrokerUrl)
.withQueryString(Seq("broker" -> id.toString, "timeout" -> "0"): _*)
.withRequestTimeout(Timeout)
.get()
holder.map { _ => () }
}
def removeBroker(id: Int): Future[Unit] = {
val holder: Future[WSResponse] = WS
.url(RemoveBrokerUrl)
.withQueryString(Seq("broker" -> id.toString): _*)
.withRequestTimeout(Timeout)
.get()
holder.map { _ => () }
}
def rebalanceTopics(ids: String, topics: Option[String]): Future[Unit] = {
val holder: Future[WSResponse] = WS
.url(RebalanceTopicsUrl)
.withQueryString(Seq("broker" -> Some(ids), "topic" -> topics).collect {
case (key, Some(v)) => (key, v)
}: _*)
.withRequestTimeout(Timeout)
.get()
holder.map { _ => () }
}
}
| stealthly/kafka-manager | app/scheduler/kafka/manager/SchedulerRestClient.scala | Scala | apache-2.0 | 8,551 |
package play.boilerplate.parser.model
case class Schema(host: String,
basePath: String,
version: Option[String],
description: Option[String],
schemes: Iterable[String],
consumes: Iterable[String],
produces: Iterable[String],
paths: Iterable[Path],
security: Iterable[SecurityRequirement],
securitySchemas: Map[String, SecuritySchema],
definitions: Map[String, Model],
parameters: Map[String, Parameter],
responses: Map[ResponseCode, Response]
) extends WithResolve[Schema] {
private def markInterfaces(definitions: Map[String, Model]): Map[String, Model] = {
val complexObjects = definitions.values.flatMap(_.complexObject)
if (complexObjects.nonEmpty) {
for ((name, model) <- definitions) yield {
if (model.complexObject.isEmpty) {
val children = complexObjects.filter(_.hasInterface(model.ref))
val isInterface = children.nonEmpty
name -> new Model(name, model.ref, isInterface = isInterface, children.toList :+ model.ref)
} else {
name -> model
}
}
} else {
definitions
}
}
override def containsLazyRef: Boolean = {
definitions.values.exists(_.containsLazyRef) ||
parameters.values.exists(_.containsLazyRef) ||
responses.values.exists(_.containsLazyRef)
}
override def resolve(resolver: DefinitionResolver): Schema = {
copy(
definitions = markInterfaces(for ((name, model) <- definitions) yield {
name -> model.resolve(resolver)
}),
parameters = for ((name, param) <- parameters) yield {
name -> param.resolve(resolver)
},
responses = for ((code, resp) <- responses) yield {
code -> resp.resolve(resolver)
}
)
}
}
object Schema {
def empty: Schema = {
Schema(
host = "localhost",
basePath = "/",
version = None,
description = None,
schemes = Nil,
consumes = Nil,
produces = Nil,
paths = Nil,
security = Nil,
securitySchemas = Map.empty,
definitions = Map.empty,
parameters = Map.empty,
responses = Map.empty
)
}
} | Romastyi/sbt-play-boilerplate | sbt-plugin/lib/src/main/scala/play/boilerplate/parser/model/Schema.scala | Scala | apache-2.0 | 2,345 |
package org.ensime.client
import java.io.{InputStreamReader, BufferedReader, InputStream}
import akka.actor.ActorSystem
import ammonite.ops._
import org.slf4j.LoggerFactory
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
class EnsimeServerStartup(actorSystem: ActorSystem, projectRoot: Path, memoryConfig: MemoryConfig) {
implicit val ec: ExecutionContext = actorSystem.dispatcher
val logger = LoggerFactory.getLogger("EnsimeServer")
val dotEnsimeFile= projectRoot / ".ensime"
val cacheDir = projectRoot / ".ensime_cache"
val httpPortFile = cacheDir / "http"
val tcpPortFile = cacheDir / "port"
val resolutionDir = cacheDir / "Resolution"
val resolutionProjectDir = resolutionDir / "project"
val resolutionSBTFile = resolutionDir / "build.sbt"
val classpathFile = resolutionDir / "classpath"
val resolutionBuildPropertiesFile = resolutionProjectDir / "build.properties"
val scalaVersion = "2.11.8"
val ensimeVersion = "0.9.10-SNAPSHOT"
def projectBuildProps = "sbt.version=0.13.11\\n"
def startServer(): Process = {
logger.info("Starting ensime server")
val javaHome = sys.env.get("JAVA_HOME")
val toolsJar = javaHome match {
case Some(path) =>
val toolsJarPath = Path(path) / "lib" / "tools.jar"
if(!exists(toolsJarPath))
throw new IllegalArgumentException(s"Cannot resolve tools jar from JAVA_HOME - expecting $toolsJarPath")
toolsJarPath
case None =>
throw new IllegalStateException("JAVA_HOME not set")
}
val logbackConfigPath = cacheDir / "ensime-logback.xml"
write.over(logbackConfigPath, """<configuration>
| <contextListener class="ch.qos.logback.classic.jul.LevelChangePropagator">
| <resetJUL>true</resetJUL>
| </contextListener>
| <!-- Incompatible with akka? https://groups.google.com/d/msg/akka-user/YVri58taWsM/X6-XR0_i1nwJ -->
| <!-- <turboFilter class="ch.qos.logback.classic.turbo.DuplicateMessageFilter" /> -->
| <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
| <encoder>
| <!-- NOTE: this truncates messages -->
| <pattern>%d{HH:mm:ss.SSS} %-5level %X{akkaSource:-None} %logger{10} - %.-250msg%n</pattern>
| </encoder>
| </appender>
| <root level="INFO">
| <appender-ref ref="STDOUT" />
| </root>
| <logger name="org.ensime" level="INFO" />
| <logger name="akka" level="WARN" />
| <logger name="scala.tools" level="WARN" />
| <logger name="org.ensime.server.RichPresentationCompiler" level="WARN" />
|</configuration>
|""".stripMargin)
val baseClasspath = read ! classpathFile
val classpath = s"$toolsJar:$baseClasspath"
startProcess(cacheDir, List("java", "-Densime.config=" + dotEnsimeFile,
s"-Dlogback.configurationFile=$logbackConfigPath",
// TODO - These should come from the .ensime file
"-Dfile.encoding=UTF8", "-XX:+CMSClassUnloadingEnabled",
s"-XX:MaxPermSize=${memoryConfig.maxPermSizeMb}m",
s"-XX:ReservedCodeCacheSize=${memoryConfig.reservedCodeCacheSizeMb}m",
s"-Xms${memoryConfig.minMemMb}m", s"-Xmx${memoryConfig.maxMemMb}m", s"-Xss${memoryConfig.stackSizeMb}m",
"-classpath", classpath, "-Densime.explode.on.disconnect=true", "org.ensime.server.Server"))
}
def startProcess(workingDir: Path, command: List[String]): Process = {
val builder = new java.lang.ProcessBuilder()
// builder.environment().putAll(cmd.envArgs)
builder.directory(new java.io.File(workingDir.toString))
val cmdString = command.mkString(" ")
logger.info(s"Starting process with commandline: $cmdString")
val process =
builder
.command(command:_*)
.start()
val stdout = process.getInputStream
val stderr = process.getErrorStream
streamLogger(stdout, "out")
streamLogger(stderr, "err")
process
}
def streamLogger(inputStream: InputStream, opTag: String): Unit = {
Future {
val is = new BufferedReader(new InputStreamReader(inputStream))
var line = is.readLine()
while(line != null) {
logger.info(s"$opTag - $line")
line = is.readLine()
}
}
}
def create(): Unit = {
logger.info("Creating workspace")
mkdir! resolutionDir
mkdir! resolutionProjectDir
write.over(resolutionSBTFile, sbtClasspathScript(classpathFile))
write.over(resolutionBuildPropertiesFile, projectBuildProps)
logger.info("Running save classpath")
Try(%('which, "sbt")(resolutionDir))
Try(%('who, "am", "i")(resolutionDir))
logger.info("Running save classpath -----------------")
// Try(%(root/'bin/'bash, "/usr/local/bin/bin/sbt", "saveClasspath")(resolutionDir))
Try(%(root/'bin/'bash, "sbt", "-Dsbt.log.noformat=true","saveClasspath")(resolutionDir))
//logger.info("Running save classpath -----------------")
//%%("sbt", "saveClasspath")(resolutionDir)
//logger.info("Running save classpath -----------------")
logger.info("Running gen-ensime")
%(root/'bin/'bash, "sbt","-Dsbt.log.noformat=true", "gen-ensime")(projectRoot)
logger.info("Workspace creation complete")
}
def sbtClasspathScript(classpathFile: Path) = s"""
|import sbt._
|import IO._
|import java.io._
|
|scalaVersion := "$scalaVersion"
|
|ivyScala := ivyScala.value map { _.copy(overrideScalaVersion = true) }
|
|// we don't need jcenter, so this speeds up resolution
|fullResolvers -= Resolver.jcenterRepo
|
|// allows local builds of scala
|resolvers += Resolver.mavenLocal
|
|// for java support
|resolvers += "NetBeans" at "http://bits.netbeans.org/nexus/content/groups/netbeans"
|
|// this is where the ensime-server snapshots are hosted
|resolvers += Resolver.sonatypeRepo("snapshots")
|
|libraryDependencies += "org.ensime" %% "ensime" % "$ensimeVersion"
|
|dependencyOverrides ++= Set(
| "org.scala-lang" % "scala-compiler" % scalaVersion.value,
| "org.scala-lang" % "scala-library" % scalaVersion.value,
| "org.scala-lang" % "scala-reflect" % scalaVersion.value,
| "org.scala-lang" % "scalap" % scalaVersion.value
|)
|val saveClasspathTask = TaskKey[Unit]("saveClasspath", "Save the classpath to a file")
|saveClasspathTask := {
| val managed = (managedClasspath in Runtime).value.map(_.data.getAbsolutePath)
| val unmanaged = (unmanagedClasspath in Runtime).value.map(_.data.getAbsolutePath)
| val out = file("$classpathFile")
| write(out, (unmanaged ++ managed).mkString(File.pathSeparator))
|}
|""".stripMargin
}
| rorygraves/ensime-client | src/main/scala/org/ensime/client/EnsimeServerStartup.scala | Scala | apache-2.0 | 6,749 |
/*
Copyright (C) 2016 Mauricio Bustos (m@bustos.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.bustos.ebiexcelente
import java.io.File
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import akka.pattern.ask
import akka.util.Timeout
import org.joda.time.{DateTimeZone, DateTime}
import spray.can.Http
import com.typesafe.config.ConfigFactory
import scala.util.Properties.envOrElse
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
object EbiExcelente extends App {
def doMain = {
implicit val system = ActorSystem()
implicit val timeout = Timeout(DurationInt(5).seconds)
val config = ConfigFactory.load
val portFromEnv = envOrElse("PORT", "") != ""
val port = envOrElse("PORT", config.getString("server.port"))
val server = system.actorOf(Props[EbiExcelenteServiceActor], "ebiExcelenteRoutes")
if (args.length > 0) IO(Http) ? Http.Bind(server, "0.0.0.0", args(0).toInt)
else IO(Http) ? Http.Bind(server, "0.0.0.0", port.toInt)
}
doMain
}
| mbustosorg/ebi-excelente | src/main/scala/org/bustos/ebi-excelente/EbiExcelente.scala | Scala | gpl-3.0 | 1,670 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.mutable.Queue
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest._
import org.scalatest.concurrent.Eventually._
import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.ui.SparkUICssErrorHandler
/**
* Selenium tests for the Spark Streaming Web UI.
*/
class UISeleniumSuite
extends SparkFunSuite with WebBrowser with Matchers with BeforeAndAfterAll with TestSuiteBase {
implicit var webDriver: WebDriver = _
override def beforeAll(): Unit = {
webDriver = new HtmlUnitDriver {
getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
}
}
override def afterAll(): Unit = {
if (webDriver != null) {
webDriver.quit()
}
}
/**
* Create a test SparkStreamingContext with the SparkUI enabled.
*/
private def newSparkStreamingContext(): StreamingContext = {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set("spark.ui.enabled", "true")
val ssc = new StreamingContext(conf, Seconds(1))
assert(ssc.sc.ui.isDefined, "Spark UI is not started!")
ssc
}
private def setupStreams(ssc: StreamingContext): Unit = {
val rdds = Queue(ssc.sc.parallelize(1 to 4, 4))
val inputStream = ssc.queueStream(rdds)
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
rdd.foreach(_ => {})
}
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
try {
rdd.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException if e.getMessage.contains("Oops") =>
}
}
}
test("attaching and detaching a Streaming tab") {
withStreamingContext(newSparkStreamingContext()) { ssc =>
setupStreams(ssc)
ssc.start()
val sparkUI = ssc.sparkContext.ui.get
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.appUIAddress.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should not be (None)
}
eventually(timeout(10 seconds), interval(50 milliseconds)) {
// check whether streaming page exists
go to (sparkUI.appUIAddress.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should contain("Streaming Statistics")
// Check stat table
val statTableHeaders = findAll(cssSelector("#stat-table th")).map(_.text).toSeq
statTableHeaders.exists(
_.matches("Timelines \\(Last \\d+ batches, \\d+ active, \\d+ completed\\)")
) should be (true)
statTableHeaders should contain ("Histograms")
val statTableCells = findAll(cssSelector("#stat-table td")).map(_.text).toSeq
statTableCells.exists(_.contains("Input Rate")) should be (true)
statTableCells.exists(_.contains("Scheduling Delay")) should be (true)
statTableCells.exists(_.contains("Processing Time")) should be (true)
statTableCells.exists(_.contains("Total Delay")) should be (true)
// Check batch tables
val h4Text = findAll(cssSelector("h4")).map(_.text).toSeq
h4Text.exists(_.matches("Active Batches \\(\\d+\\)")) should be (true)
h4Text.exists(_.matches("Completed Batches \\(last \\d+ out of \\d+\\)")) should be (true)
findAll(cssSelector("""#active-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Input Size", "Scheduling Delay (?)", "Processing Time (?)",
"Status")
}
findAll(cssSelector("""#completed-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Input Size", "Scheduling Delay (?)", "Processing Time (?)",
"Total Delay (?)", "Output Ops: Succeeded/Total")
}
val batchLinks =
findAll(cssSelector("""#completed-batches-table a""")).flatMap(_.attribute("href")).toSeq
batchLinks.size should be >= 1
// Check a normal batch page
go to (batchLinks.last) // Last should be the first batch, so it will have some jobs
val summaryText = findAll(cssSelector("li strong")).map(_.text).toSeq
summaryText should contain ("Batch Duration:")
summaryText should contain ("Input data size:")
summaryText should contain ("Scheduling delay:")
summaryText should contain ("Processing time:")
summaryText should contain ("Total delay:")
findAll(cssSelector("""#batch-job-table th""")).map(_.text).toSeq should be {
List("Output Op Id", "Description", "Duration", "Status", "Job Id", "Duration",
"Stages: Succeeded/Total", "Tasks (for all stages): Succeeded/Total", "Error")
}
// Check we have 2 output op ids
val outputOpIds = findAll(cssSelector(".output-op-id-cell")).toSeq
outputOpIds.map(_.attribute("rowspan")) should be (List(Some("2"), Some("2")))
outputOpIds.map(_.text) should be (List("0", "1"))
// Check job ids
val jobIdCells = findAll(cssSelector( """#batch-job-table a""")).toSeq
jobIdCells.map(_.text) should be (List("0", "1", "2", "3"))
val jobLinks = jobIdCells.flatMap(_.attribute("href"))
jobLinks.size should be (4)
// Check stage progress
findAll(cssSelector(""".stage-progress-cell""")).map(_.text).toSeq should be
(List("1/1", "1/1", "1/1", "0/1 (1 failed)"))
// Check job progress
findAll(cssSelector(""".progress-cell""")).map(_.text).toSeq should be
(List("1/1", "1/1", "1/1", "0/1 (1 failed)"))
// Check stacktrace
val errorCells = findAll(cssSelector(""".stacktrace-details""")).map(_.text).toSeq
errorCells should have size 1
errorCells(0) should include("java.lang.RuntimeException: Oops")
// Check the job link in the batch page is right
go to (jobLinks(0))
val jobDetails = findAll(cssSelector("li strong")).map(_.text).toSeq
jobDetails should contain("Status:")
jobDetails should contain("Completed Stages:")
// Check a batch page without id
go to (sparkUI.appUIAddress.stripSuffix("/") + "/streaming/batch/")
webDriver.getPageSource should include ("Missing id parameter")
// Check a non-exist batch
go to (sparkUI.appUIAddress.stripSuffix("/") + "/streaming/batch/?id=12345")
webDriver.getPageSource should include ("does not exist")
}
ssc.stop(false)
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.appUIAddress.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should be(None)
}
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.appUIAddress.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should not contain("Streaming Statistics")
}
}
}
}
| practice-vishnoi/dev-spark-1 | streaming/src/test/scala/org/apache/spark/streaming/UISeleniumSuite.scala | Scala | apache-2.0 | 7,905 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.jdk
import java.time.{Duration => JavaDuration}
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.Test
import scala.AdaptedArrowAssocWorkaround.Tx
import scala.concurrent.duration._
import scala.jdk.DurationConverters._
import scala.jdk.javaapi.{DurationConverters => conv}
import scala.util.Try
class DurationConvertersTest {
@Test
def scalaNanosToJavaDuration(): Unit = {
Seq[(Long, (Long, Int))](
(Long.MinValue + 1) -> Tx(-9223372037L, 145224193), // because java duration nanos are offset from the "wrong" direction
-1000000001L -> Tx(-2, 999999999),
-1L -> Tx(-1, 999999999),
0L -> Tx(0, 0),
1L -> Tx(0, 1),
1000000001L -> Tx(1,1),
Long.MaxValue -> Tx(9223372036L, 854775807)
).foreach { case (n, (expSecs, expNanos)) =>
val result = n.nanos.toJava
assertEquals(s"toJava($n nanos) -> $expSecs s)", expSecs, result.getSeconds)
assertEquals(s"toJava($n nanos) -> $expNanos n)", expNanos, result.getNano)
}
}
@Test
def scalaMilliSecondsToJavaDuration(): Unit = {
Seq[(Long, (Long, Int))](
-9223372036854L -> Tx(-9223372037L, 146000000),
-1L -> Tx(-1L, 999000000),
0L -> Tx(0L, 0),
1L -> Tx(0L, 1000000),
9223372036854L -> Tx(9223372036L, 854000000)
).foreach { case (n, (expSecs, expNanos)) =>
val result = n.millis.toJava
assertEquals(s"toJava($n millis) -> $expSecs s)", expSecs, result.getSeconds)
assertEquals(s"toJava($n millis) -> $expNanos n)", expNanos, result.getNano)
}
}
@Test
def scalaMicroSecondsToJavaDuration(): Unit = {
Seq[(Long, (Long, Int))](
-9223372036854775L -> Tx(-9223372037L, 145225000),
-1L -> Tx(-1L, 999999000),
0L -> Tx(0L, 0),
1L -> Tx(0L, 1000),
9223372036854775L -> Tx(9223372036L, 854775000)
).foreach { case (n, (expSecs, expNanos)) =>
val result = n.micros.toJava
assertEquals(s"toJava($n micros) -> $expSecs s)", expSecs, result.getSeconds)
assertEquals(s"toJava($n micros) -> $expNanos n)", expNanos, result.getNano)
}
}
@Test
def scalaSecondsToJavaDuration(): Unit = {
Seq[(Long, (Long, Int))](
-9223372036L -> Tx(-9223372036L, 0),
-1L -> Tx(-1L, 0),
0L -> Tx(0L, 0),
1L -> Tx(1L, 0),
9223372036L -> Tx(9223372036L, 0)
).foreach { case (n, (expSecs, expNanos)) =>
val result = n.seconds.toJava
assertEquals(expSecs, result.getSeconds)
assertEquals(expNanos, result.getNano)
}
}
@Test
def javaSecondsToScalaDuration(): Unit = {
Seq[Long](-9223372036L, -1L, 0L, 1L, 9223372036L).foreach { n =>
assertEquals(n, conv.toScala(JavaDuration.ofSeconds(n)).toSeconds)
}
}
@Test
def javaNanosPartToScalaDuration(): Unit = {
val nanosPerSecond = 1000000000L
Seq[Long](-nanosPerSecond - 1L, 0L, 1L, nanosPerSecond - 1L).foreach { n =>
assertEquals(n, conv.toScala(JavaDuration.ofNanos(n)).toNanos)
}
}
@Test
def unsupportedJavaDurationThrows(): Unit = {
Seq(JavaDuration.ofSeconds(-9223372037L), JavaDuration.ofSeconds(9223372037L)).foreach { d =>
val res = Try { conv.toScala(d) }
assertTrue(s"Expected exception for $d but got success", res.isFailure)
}
}
}
| scala/scala | test/junit/scala/jdk/DurationConvertersTest.scala | Scala | apache-2.0 | 3,763 |
package inloopio.math.random
import java.util.Random
import java.util.concurrent.locks.ReentrantLock
/**
* <p>A Java version of George Marsaglia's
* <a href="http://school.anhb.uwa.edu.au/personalpages/kwessen/shared/Marsaglia03.html">Complementary
* Multiply With Carry (CMWC) RNG</a>.
* This is a very fast PRNG with an extremely long period (2^131104). It should be used
* in preference to the {@link MersenneTwisterRNG} when a very long period is required.</p>
*
* <p>One potential drawback of this RNG is that it requires significantly more seed data than
* the other RNGs provided by Uncommons Maths. It requires just over 16 kilobytes, which may
* be a problem if your are obtaining seed data from a slow or limited entropy source.
* In contrast, the Mersenne Twister requires only 128 bits of seed data.</p>
*
* @author Daniel Dyer
* @since 1.2
*/
class CMWC4096RNG private (seed: Array[Byte], state: Array[Int]) extends Random with RepeatableRNG {
private var carry = 362436 // TO DO: This should be randomly generated.
private var index = 4095
// Lock to prevent concurrent modification of the RNG's internal state.
private val lock = new ReentrantLock
def getSeed = seed.clone
override protected def next(bits: Int): Int = {
try {
lock.lock
index = (index + 1) & 4095
val t = CMWC4096RNG.A * (state(index) & 0xFFFFFFFFL) + carry
carry = (t >> 32).toInt
var x = t.toInt + carry
if (x < carry) {
x += 1
carry += 1
}
state(index) = 0xFFFFFFFE - x
state(index) >>> (32 - bits)
} finally {
lock.unlock
}
}
}
object CMWC4096RNG {
private val SEED_SIZE_BYTES = 16384 // Needs 4,096 32-bit integers.
private val A = 18782L
/**
* Creates a new RNG and seeds it using the default seeding strategy.
*/
def apply(): CMWC4096RNG = {
apply(SeedGenerator.generateSeed(SEED_SIZE_BYTES))
}
/**
* Seed the RNG using the provided seed generation strategy.
* @param seedGenerator The seed generation strategy that will provide
* the seed value for this RNG.
* @throws SeedException If there is a problem generating a seed.
*/
@throws(classOf[SeedException])
def apply(seedGenerator: SeedGenerator): CMWC4096RNG = {
apply(seedGenerator.generateSeed(SEED_SIZE_BYTES))
}
/**
* Creates an RNG and seeds it with the specified seed data.
* @param seed The seed data used to initialise the RNG.
*/
def apply(seed: Array[Byte]): CMWC4096RNG = {
if (seed == null || seed.length != SEED_SIZE_BYTES) {
throw new IllegalArgumentException("CMWC RNG requires 16kb of seed data.")
}
new CMWC4096RNG(seed.clone, BinaryUtils.convertBytesToInts(seed))
}
} | dcaoyuan/inloopio-libs | inloopio-math/src/main/scala/inloopio/math/random/CMWC4096RNG.scala | Scala | bsd-3-clause | 2,736 |
package net.benchmark.akka.http.world
import akka.http.scaladsl.model.HttpCharsets.`UTF-8`
import akka.http.scaladsl.model.{HttpEntity, HttpResponse, MediaType}
import akka.http.scaladsl.server.Directives.{complete, path}
import akka.http.scaladsl.server.Route
object PlainTextRoute {
private val pt = HttpResponse(
entity = HttpEntity(MediaType.customWithFixedCharset("text", "plain", `UTF-8`), "Hello, World!"))
def route: Route =
path("plaintext") {
complete(pt)
}
}
| zloster/FrameworkBenchmarks | frameworks/Scala/akka-http/akka-http-slick-postgres/src/main/scala/net/benchmark/akka/http/world/PlainTextRoute.scala | Scala | bsd-3-clause | 496 |
package com.avsystem.commons
package serialization.json
import com.avsystem.commons.serialization.{GenCodecRoundtripTest, Input, Output}
class JsonGenCodecRoundtripTest extends GenCodecRoundtripTest {
type Raw = String
def writeToOutput(write: Output => Unit): String = {
val sb = new JStringBuilder
write(new JsonStringOutput(sb))
sb.toString
}
def createInput(raw: String): Input =
new JsonStringInput(new JsonReader(raw))
}
| AVSystem/scala-commons | commons-core/src/test/scala/com/avsystem/commons/serialization/json/JsonGenCodecRoundtripTest.scala | Scala | mit | 455 |
package spark.util
/**
* An extractor object for parsing strings into integers.
*/
object IntParam {
def unapply(str: String): Option[Int] = {
try {
Some(str.toInt)
} catch {
case e: NumberFormatException => None
}
}
}
| ankurdave/arthur | core/src/main/scala/spark/util/IntParam.scala | Scala | bsd-3-clause | 250 |
package ohnosequences.nispero.bundles
import ohnosequences.statika._
import ohnosequences.typesets._
import ohnosequences.nispero.{Names}
import org.clapper.avsl.Logger
abstract class Resources(val configuration: Configuration, aws: AWS) extends Bundle(configuration :~: aws :~: ∅) {
val resources = configuration.config.resources
val config = configuration.config
override def install[D <: AnyDistribution](distribution: D): InstallResults = {
val config = configuration.config
val logger = Logger(this.getClass)
import aws._
logger.info("installing resources")
logger.info("creating error topic: " + resources.errorTopic)
val errorTopic = sns.createTopic(resources.errorTopic)
logger.info("creating error queue: " + resources.errorQueue)
val errorQueue = sqs.createQueue(resources.errorQueue)
logger.info("subscribing error queue to error topic")
errorTopic.subscribeQueue(errorQueue)
logger.info("creating input queue: " + resources.inputQueue)
val inputQueue = sqs.createQueue(resources.inputQueue)
logger.info("creating control queue: " + resources.controlQueue)
sqs.createQueue(resources.controlQueue)
logger.info("creating output topic: " + resources.outputTopic)
val outputTopic = sns.createTopic(resources.outputTopic)
logger.info("creating output queue: " + resources.outputQueue)
val outputQueue = sqs.createQueue(resources.outputQueue)
logger.info("subscribing output queue to output topic")
outputTopic.subscribeQueue(outputQueue)
logger.info("creating notification topic: " + config.notificationTopic)
val topic = sns.createTopic(config.notificationTopic)
if (!topic.isEmailSubscribed(config.email)) {
logger.info("subscribing " + config.email + " to notification topic")
topic.subscribeEmail(config.email)
logger.info("please confirm subscription")
}
logger.info("creating bucket " + resources.bucket)
aws.s3.createBucket(config.resources.bucket)
logger.info("creating farm state table")
dynamoDB.createTable(config.resources.workersStateTable, Names.Tables.WORKERS_STATE_HASH_KEY, Names.Tables.WORKERS_STATE_RANGE_KEY)
success("resources bundle finished")
}
}
| ohnosequences/nispero | nispero-abstract/src/main/scala/ohnosequences/nispero/bundles/Resources.scala | Scala | agpl-3.0 | 2,241 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.util
import java.util.{Collections, Optional}
import kafka.admin.ConsumerGroupCommand.{ConsumerGroupCommandOptions, ConsumerGroupService}
import org.apache.kafka.clients.admin._
import org.apache.kafka.clients.consumer.{OffsetAndMetadata, RangeAssignor}
import org.apache.kafka.common.{ConsumerGroupState, KafkaFuture, Node, TopicPartition, TopicPartitionInfo}
import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue}
import org.junit.jupiter.api.Test
import org.mockito.ArgumentMatchers
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import org.mockito.ArgumentMatcher
import scala.jdk.CollectionConverters._
import org.apache.kafka.common.internals.KafkaFutureImpl
class ConsumerGroupServiceTest {
private val group = "testGroup"
private val topics = (0 until 5).map(i => s"testTopic$i")
private val numPartitions = 10
private val topicPartitions = topics.flatMap(topic => (0 until numPartitions).map(i => new TopicPartition(topic, i)))
private val admin = mock(classOf[Admin])
@Test
def testAdminRequestsForDescribeOffsets(): Unit = {
val args = Array("--bootstrap-server", "localhost:9092", "--group", group, "--describe", "--offsets")
val groupService = consumerGroupService(args)
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any()))
.thenReturn(describeGroupsResult(ConsumerGroupState.STABLE))
when(admin.listConsumerGroupOffsets(ArgumentMatchers.eq(group), any()))
.thenReturn(listGroupOffsetsResult)
when(admin.listOffsets(offsetsArgMatcher, any()))
.thenReturn(listOffsetsResult)
val (state, assignments) = groupService.collectGroupOffsets(group)
assertEquals(Some("Stable"), state)
assertTrue(assignments.nonEmpty)
assertEquals(topicPartitions.size, assignments.get.size)
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any())
verify(admin, times(1)).listConsumerGroupOffsets(ArgumentMatchers.eq(group), any())
verify(admin, times(1)).listOffsets(offsetsArgMatcher, any())
}
@Test
def testAdminRequestsForDescribeNegativeOffsets(): Unit = {
val args = Array("--bootstrap-server", "localhost:9092", "--group", group, "--describe", "--offsets")
val groupService = consumerGroupService(args)
val testTopicPartition0 = new TopicPartition("testTopic1", 0);
val testTopicPartition1 = new TopicPartition("testTopic1", 1);
val testTopicPartition2 = new TopicPartition("testTopic1", 2);
val testTopicPartition3 = new TopicPartition("testTopic2", 0);
val testTopicPartition4 = new TopicPartition("testTopic2", 1);
val testTopicPartition5 = new TopicPartition("testTopic2", 2);
// Some topic's partitions gets valid OffsetAndMetada values, other gets nulls values (negative integers) and others aren't defined
val commitedOffsets = Map(
testTopicPartition1 -> new OffsetAndMetadata(100),
testTopicPartition2 -> null,
testTopicPartition3 -> new OffsetAndMetadata(100),
testTopicPartition4 -> new OffsetAndMetadata(100),
testTopicPartition5 -> null,
).asJava
val resultInfo = new ListOffsetsResult.ListOffsetsResultInfo(100, System.currentTimeMillis, Optional.of(1))
val endOffsets = Map(
testTopicPartition0 -> KafkaFuture.completedFuture(resultInfo),
testTopicPartition1 -> KafkaFuture.completedFuture(resultInfo),
testTopicPartition2 -> KafkaFuture.completedFuture(resultInfo),
testTopicPartition3 -> KafkaFuture.completedFuture(resultInfo),
testTopicPartition4 -> KafkaFuture.completedFuture(resultInfo),
testTopicPartition5 -> KafkaFuture.completedFuture(resultInfo),
)
val assignedTopicPartitions = Set(testTopicPartition0, testTopicPartition1, testTopicPartition2)
val unassignedTopicPartitions = Set(testTopicPartition3, testTopicPartition4, testTopicPartition5)
val consumerGroupDescription = new ConsumerGroupDescription(group,
true,
Collections.singleton(new MemberDescription("member1", Optional.of("instance1"), "client1", "host1", new MemberAssignment(assignedTopicPartitions.asJava))),
classOf[RangeAssignor].getName,
ConsumerGroupState.STABLE,
new Node(1, "localhost", 9092))
def offsetsArgMatcher(expectedPartitions: Set[TopicPartition]): ArgumentMatcher[util.Map[TopicPartition, OffsetSpec]] = {
topicPartitionOffsets => topicPartitionOffsets != null && topicPartitionOffsets.keySet.asScala.equals(expectedPartitions)
}
val future = new KafkaFutureImpl[ConsumerGroupDescription]()
future.complete(consumerGroupDescription)
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any()))
.thenReturn(new DescribeConsumerGroupsResult(Collections.singletonMap(group, future)))
when(admin.listConsumerGroupOffsets(ArgumentMatchers.eq(group), any()))
.thenReturn(AdminClientTestUtils.listConsumerGroupOffsetsResult(commitedOffsets))
when(admin.listOffsets(
ArgumentMatchers.argThat(offsetsArgMatcher(assignedTopicPartitions)),
any()
)).thenReturn(new ListOffsetsResult(endOffsets.filter { case (tp, _) => assignedTopicPartitions.contains(tp) }.asJava))
when(admin.listOffsets(
ArgumentMatchers.argThat(offsetsArgMatcher(unassignedTopicPartitions)),
any()
)).thenReturn(new ListOffsetsResult(endOffsets.filter { case (tp, _) => unassignedTopicPartitions.contains(tp) }.asJava))
val (state, assignments) = groupService.collectGroupOffsets(group)
val returnedOffsets = assignments.map { results =>
results.map { assignment =>
new TopicPartition(assignment.topic.get, assignment.partition.get) -> assignment.offset
}.toMap
}.getOrElse(Map.empty)
val expectedOffsets = Map(
testTopicPartition0 -> None,
testTopicPartition1 -> Some(100),
testTopicPartition2 -> None,
testTopicPartition3 -> Some(100),
testTopicPartition4 -> Some(100),
testTopicPartition5 -> None
)
assertEquals(Some("Stable"), state)
assertEquals(expectedOffsets, returnedOffsets)
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any())
verify(admin, times(1)).listConsumerGroupOffsets(ArgumentMatchers.eq(group), any())
verify(admin, times(1)).listOffsets(ArgumentMatchers.argThat(offsetsArgMatcher(assignedTopicPartitions)), any())
verify(admin, times(1)).listOffsets(ArgumentMatchers.argThat(offsetsArgMatcher(unassignedTopicPartitions)), any())
}
@Test
def testAdminRequestsForResetOffsets(): Unit = {
val args = Seq("--bootstrap-server", "localhost:9092", "--group", group, "--reset-offsets", "--to-latest")
val topicsWithoutPartitionsSpecified = topics.tail
val topicArgs = Seq("--topic", s"${topics.head}:${(0 until numPartitions).mkString(",")}") ++
topicsWithoutPartitionsSpecified.flatMap(topic => Seq("--topic", topic))
val groupService = consumerGroupService((args ++ topicArgs).toArray)
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any()))
.thenReturn(describeGroupsResult(ConsumerGroupState.DEAD))
when(admin.describeTopics(ArgumentMatchers.eq(topicsWithoutPartitionsSpecified.asJava), any()))
.thenReturn(describeTopicsResult(topicsWithoutPartitionsSpecified))
when(admin.listOffsets(offsetsArgMatcher, any()))
.thenReturn(listOffsetsResult)
val resetResult = groupService.resetOffsets()
assertEquals(Set(group), resetResult.keySet)
assertEquals(topicPartitions.toSet, resetResult(group).keySet)
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(group)), any())
verify(admin, times(1)).describeTopics(ArgumentMatchers.eq(topicsWithoutPartitionsSpecified.asJava), any())
verify(admin, times(1)).listOffsets(offsetsArgMatcher, any())
}
private def consumerGroupService(args: Array[String]): ConsumerGroupService = {
new ConsumerGroupService(new ConsumerGroupCommandOptions(args)) {
override protected def createAdminClient(configOverrides: collection.Map[String, String]): Admin = {
admin
}
}
}
private def describeGroupsResult(groupState: ConsumerGroupState): DescribeConsumerGroupsResult = {
val member1 = new MemberDescription("member1", Optional.of("instance1"), "client1", "host1", null)
val description = new ConsumerGroupDescription(group,
true,
Collections.singleton(member1),
classOf[RangeAssignor].getName,
groupState,
new Node(1, "localhost", 9092))
val future = new KafkaFutureImpl[ConsumerGroupDescription]()
future.complete(description)
new DescribeConsumerGroupsResult(Collections.singletonMap(group, future))
}
private def listGroupOffsetsResult: ListConsumerGroupOffsetsResult = {
val offsets = topicPartitions.map(_ -> new OffsetAndMetadata(100)).toMap.asJava
AdminClientTestUtils.listConsumerGroupOffsetsResult(offsets)
}
private def offsetsArgMatcher: util.Map[TopicPartition, OffsetSpec] = {
val expectedOffsets = topicPartitions.map(tp => tp -> OffsetSpec.latest).toMap
ArgumentMatchers.argThat[util.Map[TopicPartition, OffsetSpec]] { map =>
map.keySet.asScala == expectedOffsets.keySet && map.values.asScala.forall(_.isInstanceOf[OffsetSpec.LatestSpec])
}
}
private def listOffsetsResult: ListOffsetsResult = {
val resultInfo = new ListOffsetsResult.ListOffsetsResultInfo(100, System.currentTimeMillis, Optional.of(1))
val futures = topicPartitions.map(_ -> KafkaFuture.completedFuture(resultInfo)).toMap
new ListOffsetsResult(futures.asJava)
}
private def describeTopicsResult(topics: Seq[String]): DescribeTopicsResult = {
val topicDescriptions = topics.map { topic =>
val partitions = (0 until numPartitions).map(i => new TopicPartitionInfo(i, null, Collections.emptyList[Node], Collections.emptyList[Node]))
topic -> new TopicDescription(topic, false, partitions.asJava)
}.toMap
AdminClientTestUtils.describeTopicsResult(topicDescriptions.asJava)
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/admin/ConsumerGroupServiceTest.scala | Scala | apache-2.0 | 11,003 |
package slinkydemo.http
import Web._, Api._
import scalaz._
import scalaz.http.servlet.{HttpServletRequest}
import scalaz.http.response._
import scalaz.http.request._
import scalaz.http.scapps.Route._
import scalaz.http.scapps.Scapps._
import scalaz.http.scapps.{BaseApp, Route}
// TODO Conference registration. Register name & organisation, see who else is registered. Search. API for it all.
final class SlinkyDemoApplication extends BaseApp {
val routes: Kleisli[Option, Request[Stream], Response[Stream]] =
List(
exactPath("/") >=> GET >=> webRoot _,
startsWith("/api") >=> List(
exactPath("/") >=> GET >=> apiUsage _,
startsWith("/register") >=> POST >=> apiRegister _,
startsWith("/registrants") >=> GET >=> apiRegistrants _,
startsWith("/search") >=> GET >=> apiSearch _
)
)
def route(implicit request: Request[Stream], servletRequest: HttpServletRequest) = routes(request)
}
| tomjadams/slinky-demo | src/main/scala/slinkydemo/http/SlinkyDemoApplication.scala | Scala | apache-2.0 | 966 |
package skinny.task.generator
import java.io.File
import org.joda.time.DateTime
/**
* DB migration file generator.
*/
object DBMigrationFileGenerator extends DBMigrationFileGenerator
trait DBMigrationFileGenerator extends CodeGenerator {
private[this] def showUsage = {
showSkinnyGenerator()
println(
""" Usage: sbt "task/run generate:migration Create_members_table 'create table members(id bigserial not null primary key, name varchar(255) not null);'" """
)
println("")
}
def run(args: List[String]): Unit = {
args.toList match {
case name :: sqlParts =>
showSkinnyGenerator()
generate(name, sqlParts.map(_.replaceFirst("^'", "").replaceFirst("'$", "")).mkString(" "))
println("")
case _ => showUsage
}
}
def generate(name: String, sql: String): Unit = {
val version = DateTime.now.toString("yyyyMMddHHmmss")
val file = new File(s"${resourceDir}/db/migration/V${version}__${name}.sql")
writeIfAbsent(file, sql)
}
}
| seratch/skinny-framework | task/src/main/scala/skinny/task/generator/DBMigrationFileGenerator.scala | Scala | mit | 1,022 |
package com.azavea.rasterfoundry.io
import geotrellis.vector._
import geotrellis.raster._
import geotrellis.raster.io.Filesystem
import geotrellis.raster.io.geotiff._
import geotrellis.spark._
import geotrellis.spark.io.s3._
import geotrellis.spark.io.hadoop._
import geotrellis.spark.io.hadoop.formats._
import org.apache.commons.io.FileUtils
import org.apache.commons.io.filefilter._
import org.apache.spark._
import org.apache.spark.rdd._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.{InputSplit, TaskAttemptContext}
import org.apache.hadoop.fs.Path
import java.io.File
import scala.collection.JavaConversions._
trait TileServiceReader[T] {
def read(zoom: Int): RDD[(SpatialKey, T)]
}
object TileServiceReader {
val tiffExtensions = Array(".tif", ".TIF", ".tiff", ".TIFF")
val TilePath = """.*/(\\d+)/(\\d+)/(\\d+)\\.\\w+$""".r
}
trait ByteReader[T] extends Serializable {
def read(bytes: Array[Byte]): T
}
class S3TileServiceReader[T: ByteReader](uri: String)(implicit sc: SparkContext) extends TileServiceReader[T] {
import TileServiceReader._
def read(zoom: Int): RDD[(SpatialKey, T)] = {
val byteReader = implicitly[ByteReader[T]]
val parsed = new java.net.URI(uri)
val bucket = parsed.getHost
val keys = {
val path = parsed.getPath
S3Client.default.listKeys(bucket, path.substring(1, path.length))
.map { key =>
key match {
case TilePath(z, x, y) if z.toInt == zoom => Some((SpatialKey(x.toInt, y.toInt), key))
case _ => None
}
}
.flatten
.toSeq
}
val numPartitions = math.min(keys.size, math.max(keys.size / 10, 50)).toInt
sc.parallelize(keys)
.partitionBy(new HashPartitioner(numPartitions))
.mapPartitions({ partition =>
val client = S3Client.default
partition.map { case (spatialKey, s3Key) =>
(spatialKey, byteReader.read(client.readBytes(bucket, s3Key)))
}
}, preservesPartitioning = true)
}
}
class FileTileServiceReader[T: ByteReader](uri: String)(implicit sc: SparkContext) extends TileServiceReader[T] {
import TileServiceReader._
def read(zoom: Int): RDD[(SpatialKey, T)] = {
val paths =
FileUtils.listFiles(new File(uri), new SuffixFileFilter(tiffExtensions), TrueFileFilter.INSTANCE)
.flatMap { file =>
val path = file.getAbsolutePath
path match {
case TilePath(z, x, y) if z.toInt == zoom => Some((SpatialKey(x.toInt, y.toInt), path))
case _ => None
}
}
val numPartitions = math.min(paths.size, math.max(paths.size / 10, 50)).toInt
val byteReader = implicitly[ByteReader[T]]
sc.parallelize(paths.toSeq)
.partitionBy(new HashPartitioner(numPartitions))
.mapValues { path => byteReader.read(Filesystem.slurp(path)) }
}
}
| kdeloach/raster-foundry-tiler | mosaic/src/main/scala/com/azavea/rasterfoundry/io/TileServiceReader.scala | Scala | apache-2.0 | 2,916 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import config.AuthClientConnector
import featureswitch.core.config.{FeatureSwitching, TrafficManagementPredicate}
import org.mockito.ArgumentMatchers
import org.mockito.Mockito.when
import play.api.mvc.{Action, AnyContent}
import play.api.test.FakeRequest
import services.{SessionProfile, SessionService}
import testHelpers.ControllerSpec
import scala.concurrent.{ExecutionContext, Future}
class BaseControllerSpec extends ControllerSpec with FeatureSwitching {
implicit val ec: ExecutionContext = app.injector.instanceOf[ExecutionContext]
object TestController extends BaseController with SessionProfile {
override implicit val executionContext: ExecutionContext = ec
override val sessionService: SessionService = mockSessionService
val authConnector: AuthClientConnector = mockAuthClientConnector
def callAuthenticated: Action[AnyContent] = isAuthenticated {
implicit request =>
Future.successful(Ok("ALL GOOD"))
}
def callAuthenticatedButError: Action[AnyContent] = isAuthenticated {
implicit request =>
Future.failed(new Exception("Something wrong"))
}
def callAuthenticatedWithProfile(checkTrafficManagement: Boolean = true): Action[AnyContent] =
isAuthenticatedWithProfile(checkTrafficManagement) {
_ =>
profile =>
Future.successful(Ok(s"ALL GOOD with profile: ${profile.registrationId}"))
}
def callAuthenticatedWithProfileButError(checkTrafficManagement: Boolean = true): Action[AnyContent] =
isAuthenticatedWithProfile(checkTrafficManagement) {
_ =>
profile =>
Future.failed(new Exception(s"Something wrong for profile: ${profile.registrationId}"))
}
}
override def beforeEach(): Unit = {
super.beforeEach()
disable(TrafficManagementPredicate)
}
"isAuthenticated" should {
"return 200 if user is Authenticated and has org affinity" in {
mockAuthenticatedOrg()
val result = TestController.callAuthenticated(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe "ALL GOOD"
}
"return 200 if user is Authenticated and has agent affinity" in {
mockAuthenticatedAgent()
val result = TestController.callAuthenticated(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe "ALL GOOD"
}
"redirect to individual affinity kickout page if the user has a Individual affinity" in {
mockAuthenticatedIndividual()
val result = TestController.callAuthenticated(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("/register-for-vat/error/individual-affinity")
}
"return 303 to GG login if user has No Active Session" in {
mockNoActiveSession()
val result = TestController.callAuthenticated(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("http://localhost:9025/gg/sign-in?accountType=organisation&continue=http%3A%2F%2Flocalhost%3A9895%2Fregister-for-vat%2Fpost-sign-in&origin=vat-registration-frontend")
}
"return 500 if user is Not Authenticated" in {
mockNotAuthenticated()
val result = TestController.callAuthenticated(FakeRequest())
status(result) mustBe INTERNAL_SERVER_ERROR
}
"return an Exception if something went wrong" in {
mockAuthenticatedOrg()
val result = TestController.callAuthenticatedButError(FakeRequest())
an[Exception] mustBe thrownBy(await(result))
}
}
"isAuthenticatedWithProfile" when {
"the traffic management FS is disabled" should {
"return 200 with a profile if user is Authenticated" in {
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe s"ALL GOOD with profile: ${currentProfile.registrationId}"
}
"return 303 to GG login if user has No Active Session" in {
mockNoActiveSession()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("http://localhost:9025/gg/sign-in?accountType=organisation&continue=http%3A%2F%2Flocalhost%3A9895%2Fregister-for-vat%2Fpost-sign-in&origin=vat-registration-frontend")
}
"return 500 if user is Not Authenticated" in {
mockNotAuthenticated()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe INTERNAL_SERVER_ERROR
}
"return an Exception if something went wrong" in {
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfileButError()(FakeRequest())
an[Exception] mustBe thrownBy(await(result))
}
}
"the traffic management FS is enabled" when {
"return 200 with a profile if user is Authenticated and TM check passes" in {
enable(TrafficManagementPredicate)
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
when(mockTrafficManagementService.passedTrafficManagement(ArgumentMatchers.eq(regId))(ArgumentMatchers.any()))
.thenReturn(Future.successful(true))
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe s"ALL GOOD with profile: ${currentProfile.registrationId}"
}
"return 303 to start of journey if user is Authenticated and TM check fails" in {
enable(TrafficManagementPredicate)
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
when(mockTrafficManagementService.passedTrafficManagement(ArgumentMatchers.eq(regId))(ArgumentMatchers.any()))
.thenReturn(Future.successful(false))
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(routes.JourneyController.show.url)
}
"return 303 to GG login if user has No Active Session" in {
enable(TrafficManagementPredicate)
mockNoActiveSession()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("http://localhost:9025/gg/sign-in?accountType=organisation&continue=http%3A%2F%2Flocalhost%3A9895%2Fregister-for-vat%2Fpost-sign-in&origin=vat-registration-frontend")
}
"return 500 if user is Not Authenticated" in {
enable(TrafficManagementPredicate)
mockNotAuthenticated()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe INTERNAL_SERVER_ERROR
}
"return an Exception if something went wrong" in {
enable(TrafficManagementPredicate)
mockAuthenticatedOrg()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfileButError()(FakeRequest())
an[Exception] mustBe thrownBy(await(result))
}
}
"return 200 with a profile if user is Authenticated with no TM check" in {
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfile(checkTrafficManagement = false)(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe s"ALL GOOD with profile: ${currentProfile.registrationId}"
}
"return 303 to GG login if user has No Active Session" in {
mockNoActiveSession()
val result = TestController.callAuthenticatedWithProfile(checkTrafficManagement = false)(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("http://localhost:9025/gg/sign-in?accountType=organisation&continue=http%3A%2F%2Flocalhost%3A9895%2Fregister-for-vat%2Fpost-sign-in&origin=vat-registration-frontend")
}
"return 500 if user is Not Authenticated" in {
mockNotAuthenticated()
val result = TestController.callAuthenticatedWithProfile(checkTrafficManagement = false)(FakeRequest())
status(result) mustBe INTERNAL_SERVER_ERROR
}
"return an Exception if something went wrong" in {
mockAuthenticatedOrg()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfileButError(checkTrafficManagement = false)(FakeRequest())
an[Exception] mustBe thrownBy(await(result))
}
}
"isAuthenticatedWithProfileNoStatusCheck" should {
"return 200 with a profile if user is Authenticated" in {
mockAuthenticated()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe OK
contentAsString(result) mustBe s"ALL GOOD with profile: ${currentProfile.registrationId}"
}
"return 303 to GG login if user has No Active Session" in {
mockNoActiveSession()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some("http://localhost:9025/gg/sign-in?accountType=organisation&continue=http%3A%2F%2Flocalhost%3A9895%2Fregister-for-vat%2Fpost-sign-in&origin=vat-registration-frontend")
}
"return 500 if user is Not Authenticated" in {
mockNotAuthenticated()
val result = TestController.callAuthenticatedWithProfile()(FakeRequest())
status(result) mustBe INTERNAL_SERVER_ERROR
}
"return an Exception if something went wrong" in {
mockAuthenticatedOrg()
mockWithCurrentProfile(Some(currentProfile))
val result = TestController.callAuthenticatedWithProfileButError()(FakeRequest())
an[Exception] mustBe thrownBy(await(result))
}
}
}
| hmrc/vat-registration-frontend | test/controllers/BaseControllerSpec.scala | Scala | apache-2.0 | 10,692 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import org.mockito.Mockito._
import org.apache.spark.TestUtils.{assertNotSpilled, assertSpilled}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.expressions.{Ascending, GenericRow, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.execution.{BinaryExecNode, FilterExec, ProjectExec, SortExec, SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.execution.python.BatchEvalPythonExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlanHelper {
import testImplicits._
private def attachCleanupResourceChecker(plan: SparkPlan): Unit = {
// SPARK-21492: Check cleanupResources are finally triggered in SortExec node for every
// test case
plan.foreachUp {
case s: SortExec =>
val sortExec = spy(s)
verify(sortExec, atLeastOnce).cleanupResources()
verify(sortExec.rowSorter, atLeastOnce).cleanupResources()
case _ =>
}
}
override protected def checkAnswer(df: => DataFrame, rows: Seq[Row]): Unit = {
attachCleanupResourceChecker(df.queryExecution.sparkPlan)
super.checkAnswer(df, rows)
}
setupTestData()
def statisticSizeInByte(df: DataFrame): BigInt = {
df.queryExecution.optimizedPlan.stats.sizeInBytes
}
test("equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, $"x.a" === $"y.a", "inner").queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
def assertJoin(pair: (String, Class[_ <: BinaryExecNode])): Any = {
val sqlString = pair._1
val c = pair._2
val df = sql(sqlString)
val physical = df.queryExecution.sparkPlan
val operators = physical.collect {
case j: BroadcastHashJoinExec => j
case j: ShuffledHashJoinExec => j
case j: CartesianProductExec => j
case j: BroadcastNestedLoopJoinExec => j
case j: SortMergeJoinExec => j
}
assert(operators.size === 1)
if (operators.head.getClass != c) {
fail(s"$sqlString expected operator: $c, but got ${operators.head}\\n physical: \\n$physical")
}
operators.head
}
test("join operator selection") {
spark.sharedState.cacheManager.clearCache()
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2", classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2", classOf[CartesianProductExec]),
("SELECT * FROM testData LEFT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a", classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData full outer join testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData left JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData ANTI JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2", classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
}
}
// ignore("SortMergeJoin shouldn't work on unsortable columns") {
// Seq(
// ("SELECT * FROM arrayData JOIN complexData ON data = a", classOf[ShuffledHashJoin])
// ).foreach { case (query, joinClass) => assertJoin(query, joinClass) }
// }
test("broadcasted hash join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
Seq(
("SELECT * FROM testData join testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
test("broadcasted hash outer join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
sql("CACHE TABLE testData2")
Seq(
("SELECT * FROM testData LEFT JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
test("multiple-key equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, ($"x.a" === $"y.a") && ($"x.b" === $"y.b")).queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
test("inner join where, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData).where('n === 'N),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join ON, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join, where, multiple matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 1).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Row(1, 1, 1, 1) ::
Row(1, 1, 1, 2) ::
Row(1, 2, 1, 1) ::
Row(1, 2, 1, 2) :: Nil
)
}
test("inner join, no matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 2).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Nil)
}
test("SPARK-22141: Propagate empty relation before checking Cartesian products") {
Seq("inner", "left", "right", "left_outer", "right_outer", "full_outer").foreach { joinType =>
val x = testData2.where($"a" === 2 && !($"a" === 2)).as("x")
val y = testData2.where($"a" === 1 && !($"a" === 1)).as("y")
checkAnswer(x.join(y, Seq.empty, joinType), Nil)
}
}
test("big inner join, 4 matches per row") {
val bigData = testData.union(testData).union(testData).union(testData)
val bigDataX = bigData.as("x")
val bigDataY = bigData.as("y")
checkAnswer(
bigDataX.join(bigDataY).where($"x.key" === $"y.key"),
testData.rdd.flatMap { row =>
Seq.fill(16)(new GenericRow(Seq(row, row).flatMap(_.toSeq).toArray))
}.collect().toSeq)
}
test("cartesian product join") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
checkAnswer(
testData3.as("x").join(testData3.as("y"), $"x.a" > $"y.a"),
Row(2, 2, 1, null) :: Nil)
}
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
val e = intercept[Exception] {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
}
assert(e.getMessage.contains("Detected implicit cartesian product for INNER join " +
"between logical plans"))
}
}
test("left outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"n" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"N" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"l" > $"L", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
// Make sure we are choosing left.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(
1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY r.a
""".stripMargin),
Row(null, 6) :: Nil)
}
}
test("right outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"n" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"N" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"l" > $"L", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are choosing right.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY l.a
""".stripMargin),
Row(null,
6))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
}
}
test("full outer join") {
withTempView("`left`", "`right`") {
upperCaseData.where('N <= 4).createOrReplaceTempView("`left`")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val left = UnresolvedRelation(TableIdentifier("left"))
val right = UnresolvedRelation(TableIdentifier("right"))
checkAnswer(
left.join(right, $"left.N" === $"right.N", "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"left.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"right.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are UnknownPartitioning as the outputPartitioning for the outer join
// operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY l.a
""".
stripMargin),
Row(null, 10))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row
(1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY r.a
""".
stripMargin),
Row(null, 10))
}
}
test("broadcasted existence join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData ANT JOIN testData2 ON key = a", classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec])
).foreach(assertJoin)
}
}
test("cross join with broadcast") {
sql("CACHE TABLE testData")
val sizeInByteOfTestData = statisticSizeInByte(spark.table("testData"))
// we set the threshold is greater than statistic of the cached table testData
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> (sizeInByteOfTestData + 1).toString(),
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assert(statisticSizeInByte(spark.table("testData2")) >
spark.conf.get[Long](SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
assert(statisticSizeInByte(spark.table("testData")) <
spark.conf.get[Long](SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData left JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key = 2
""".stripMargin),
Row("2", 1, 1) ::
Row("2", 1, 2) ::
Row("2", 2, 1) ::
Row("2", 2, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y ON x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
}
}
test("left semi join") {
val df = sql("SELECT * FROM testData2 LEFT SEMI JOIN testData ON key = a")
checkAnswer(df,
Row(1, 1) ::
Row(1, 2) ::
Row(2, 1) ::
Row(2, 2) ::
Row(3, 1) ::
Row(3, 2) :: Nil)
}
test("cross join detection") {
withTempView("A", "B", "C", "D") {
testData.createOrReplaceTempView("A")
testData.createOrReplaceTempView("B")
testData2.createOrReplaceTempView("C")
testData3.createOrReplaceTempView("D")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val cartesianQueries = Seq(
/** The following should error out since there is no explicit cross join */
"SELECT * FROM testData inner join testData2",
"SELECT * FROM testData left outer join testData2",
"SELECT * FROM testData right outer join testData2",
"SELECT * FROM testData full outer join testData2",
"SELECT * FROM testData, testData2",
"SELECT * FROM testData, testData2 where testData.key = 1 and testData2.a = 22",
/** The following should fail because after reordering there are cartesian products */
"select * from (A join B on (A.key = B.key)) join D on (A.key=D.a) join C",
"select * from ((A join B on (A.key = B.key)) join C) join D on (A.key = D.a)",
/** Cartesian product involving C, which is not involved in a CROSS join */
"select * from ((A join B on (A.key = B.key)) cross join D) join C on (A.key = D.a)");
def checkCartesianDetection(query: String): Unit = {
val e = intercept[Exception] {
checkAnswer(sql(query), Nil);
}
assert(e.getMessage.contains("Detected implicit cartesian product"))
}
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
cartesianQueries.foreach(checkCartesianDetection)
}
// Check that left_semi, left_anti, existence joins without conditions do not throw
// an exception if cross joins are disabled
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
checkAnswer(
sql("SELECT * FROM testData3 LEFT SEMI JOIN testData2"),
Row(1, null) :: Row (2, 2) :: Nil)
checkAnswer(
sql("SELECT * FROM testData3 LEFT ANTI JOIN testData2"),
Nil)
checkAnswer(
sql(
"""
|SELECT a FROM testData3
|WHERE
| EXISTS (SELECT * FROM testData)
|OR
| EXISTS (SELECT * FROM testData2)""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
|SELECT key FROM testData
|WHERE
| key IN (SELECT a FROM testData2)
|OR
| key IN (SELECT a FROM testData3)""".stripMargin),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
}
}
test("test SortMergeJoin (without spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD.key -> Int.MaxValue.toString) {
assertNotSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertNotSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected.toSeq
)
}
assertNotSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected.toSeq
)
}
}
}
test("test SortMergeJoin (with spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "0",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD.key -> "1") {
assertSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected.toSeq
)
}
assertSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected.toSeq
)
}
// FULL OUTER JOIN still does not use [[ExternalAppendOnlyUnsafeRowArray]]
// so should not cause any spill
assertNotSpilled(sparkContext, "full outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|FULL OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected.toSeq
)
}
}
}
test("outer broadcast hash join should not throw NPE") {
withTempView("v1", "v2") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
Seq(2 -> 2).toDF("x", "y").createTempView("v1")
spark.createDataFrame(
Seq(Row(1, "a")).asJava,
new StructType().add("i", "int", nullable = false).add("j", "string", nullable = false)
).createTempView("v2")
checkAnswer(
sql("select x, y, i, j from v1 left join v2 on x = i and y < length(j)"),
Row(2, 2, null, null)
)
}
}
}
test("test SortMergeJoin output ordering") {
val joinQueries = Seq(
"SELECT * FROM testData JOIN testData2 ON key = a",
"SELECT * FROM testData t1 JOIN " +
"testData2 t2 ON t1.key = t2.a JOIN testData3 t3 ON t2.a = t3.a",
"SELECT * FROM testData t1 JOIN " +
"testData2 t2 ON t1.key = t2.a JOIN " +
"testData3 t3 ON t2.a = t3.a JOIN " +
"testData t4 ON t1.key = t4.key")
def assertJoinOrdering(sqlString: String): Unit = {
val df = sql(sqlString)
val physical = df.queryExecution.sparkPlan
val physicalJoins = physical.collect {
case j: SortMergeJoinExec => j
}
val executed = df.queryExecution.executedPlan
val executedJoins = collect(executed) {
case j: SortMergeJoinExec => j
}
// This only applies to the above tested queries, in which a child SortMergeJoin always
// contains the SortOrder required by its parent SortMergeJoin. Thus, SortExec should never
// appear as parent of SortMergeJoin.
executed.foreach {
case s: SortExec => s.foreach {
case j: SortMergeJoinExec => fail(
s"No extra sort should be added since $j already satisfies the required ordering"
)
case _ =>
}
case _ =>
}
val joinPairs = physicalJoins.zip(executedJoins)
val numOfJoins = sqlString.split(" ").count(_.toUpperCase(Locale.ROOT) == "JOIN")
assert(joinPairs.size == numOfJoins)
joinPairs.foreach {
case(join1, join2) =>
val leftKeys = join1.leftKeys
val rightKeys = join1.rightKeys
val outputOrderingPhysical = join1.outputOrdering
val outputOrderingExecuted = join2.outputOrdering
// outputOrdering should always contain join keys
assert(
SortOrder.orderingSatisfies(
outputOrderingPhysical, leftKeys.map(SortOrder(_, Ascending))))
assert(
SortOrder.orderingSatisfies(
outputOrderingPhysical, rightKeys.map(SortOrder(_, Ascending))))
// outputOrdering should be consistent between physical plan and executed plan
assert(outputOrderingPhysical == outputOrderingExecuted,
s"Operator $join1 did not have the same output ordering in the physical plan as in " +
s"the executed plan.")
}
}
joinQueries.foreach(assertJoinOrdering)
}
test("SPARK-22445 Respect stream-side child's needCopyResult in BroadcastHashJoin") {
val df1 = Seq((2, 3), (2, 5), (2, 2), (3, 8), (2, 1)).toDF("k", "v1")
val df2 = Seq((2, 8), (3, 7), (3, 4), (1, 2)).toDF("k", "v2")
val df3 = Seq((1, 1), (3, 2), (4, 3), (5, 1)).toDF("k", "v3")
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.JOIN_REORDER_ENABLED.key -> "false") {
val df = df1.join(df2, "k").join(functions.broadcast(df3), "k")
val plan = df.queryExecution.sparkPlan
// Check if `needCopyResult` in `BroadcastHashJoin` is correct when smj->bhj
val joins = new collection.mutable.ArrayBuffer[BinaryExecNode]()
plan.foreachUp {
case j: BroadcastHashJoinExec => joins += j
case j: SortMergeJoinExec => joins += j
case _ =>
}
assert(joins.size == 2)
assert(joins(0).isInstanceOf[SortMergeJoinExec])
assert(joins(1).isInstanceOf[BroadcastHashJoinExec])
checkAnswer(df, Row(3, 8, 7, 2) :: Row(3, 8, 4, 2) :: Nil)
}
}
test("SPARK-24495: Join may return wrong result when having duplicated equal-join keys") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.CONSTRAINT_PROPAGATION_ENABLED.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df1 = spark.range(0, 100, 1, 2)
val df2 = spark.range(100).select($"id".as("b1"), (- $"id").as("b2"))
val res = df1.join(df2, $"id" === $"b1" && $"id" === $"b2").select($"b1", $"b2", $"id")
checkAnswer(res, Row(0, 0, 0))
}
}
test("SPARK-27485: EnsureRequirements should not fail join with duplicate keys") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "2",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val tbl_a = spark.range(40)
.select($"id" as "x", $"id" % 10 as "y")
.repartition(2, $"x", $"y", $"x")
.as("tbl_a")
val tbl_b = spark.range(20)
.select($"id" as "x", $"id" % 2 as "y1", $"id" % 20 as "y2")
.as("tbl_b")
val res = tbl_a
.join(tbl_b,
$"tbl_a.x" === $"tbl_b.x" && $"tbl_a.y" === $"tbl_b.y1" && $"tbl_a.y" === $"tbl_b.y2")
.select($"tbl_a.x")
checkAnswer(res, Row(0L) :: Row(1L) :: Nil)
}
}
test("SPARK-26352: join reordering should not change the order of columns") {
withTable("tab1", "tab2", "tab3") {
spark.sql("select 1 as x, 100 as y").write.saveAsTable("tab1")
spark.sql("select 42 as i, 200 as j").write.saveAsTable("tab2")
spark.sql("select 1 as a, 42 as b").write.saveAsTable("tab3")
val df = spark.sql("""
with tmp as (select * from tab1 cross join tab2)
select * from tmp join tab3 on a = x and b = i
""")
checkAnswer(df, Row(1, 100, 42, 200, 1, 42))
}
}
test("NaN and -0.0 in join keys") {
withTempView("v1", "v2", "v3", "v4") {
Seq(Float.NaN -> Double.NaN, 0.0f -> 0.0, -0.0f -> -0.0).toDF("f", "d").createTempView("v1")
Seq(Float.NaN -> Double.NaN, 0.0f -> 0.0, -0.0f -> -0.0).toDF("f", "d").createTempView("v2")
checkAnswer(
sql(
"""
|SELECT v1.f, v1.d, v2.f, v2.d
|FROM v1 JOIN v2
|ON v1.f = v2.f AND v1.d = v2.d
""".stripMargin),
Seq(
Row(Float.NaN, Double.NaN, Float.NaN, Double.NaN),
Row(0.0f, 0.0, 0.0f, 0.0),
Row(0.0f, 0.0, -0.0f, -0.0),
Row(-0.0f, -0.0, 0.0f, 0.0),
Row(-0.0f, -0.0, -0.0f, -0.0)))
// test with complicated join keys.
checkAnswer(
sql(
"""
|SELECT v1.f, v1.d, v2.f, v2.d
|FROM v1 JOIN v2
|ON
| array(v1.f) = array(v2.f) AND
| struct(v1.d) = struct(v2.d) AND
| array(struct(v1.f, v1.d)) = array(struct(v2.f, v2.d)) AND
| struct(array(v1.f), array(v1.d)) = struct(array(v2.f), array(v2.d))
""".stripMargin),
Seq(
Row(Float.NaN, Double.NaN, Float.NaN, Double.NaN),
Row(0.0f, 0.0, 0.0f, 0.0),
Row(0.0f, 0.0, -0.0f, -0.0),
Row(-0.0f, -0.0, 0.0f, 0.0),
Row(-0.0f, -0.0, -0.0f, -0.0)))
// test with tables with complicated-type columns.
Seq((Array(-0.0f, 0.0f), Tuple2(-0.0d, Double.NaN), Seq(Tuple2(-0.0d, Double.NaN))))
.toDF("arr", "stru", "arrOfStru").createTempView("v3")
Seq((Array(0.0f, -0.0f), Tuple2(0.0d, 0.0/0.0), Seq(Tuple2(0.0d, 0.0/0.0))))
.toDF("arr", "stru", "arrOfStru").createTempView("v4")
checkAnswer(
sql(
"""
|SELECT v3.arr, v3.stru, v3.arrOfStru, v4.arr, v4.stru, v4.arrOfStru
|FROM v3 JOIN v4
|ON v3.arr = v4.arr AND v3.stru = v4.stru AND v3.arrOfStru = v4.arrOfStru
""".stripMargin),
Seq(Row(
Seq(-0.0f, 0.0f),
Row(-0.0d, Double.NaN),
Seq(Row(-0.0d, Double.NaN)),
Seq(0.0f, -0.0f),
Row(0.0d, 0.0/0.0),
Seq(Row(0.0d, 0.0/0.0)))))
}
}
test("SPARK-28323: PythonUDF should be able to use in join condition") {
import IntegratedUDFTestUtils._
assume(shouldTestPythonUDFs)
val pythonTestUDF = TestPythonUDF(name = "udf")
val left = Seq((1, 2), (2, 3)).toDF("a", "b")
val right = Seq((1, 2), (3, 4)).toDF("c", "d")
val df = left.join(right, pythonTestUDF(left("a")) === pythonTestUDF(right.col("c")))
val joinNode = find(df.queryExecution.executedPlan)(_.isInstanceOf[BroadcastHashJoinExec])
assert(joinNode.isDefined)
// There are two PythonUDFs which use attribute from left and right of join, individually.
// So two PythonUDFs should be evaluated before the join operator, at left and right side.
val pythonEvals = collect(joinNode.get) {
case p: BatchEvalPythonExec => p
}
assert(pythonEvals.size == 2)
checkAnswer(df, Row(1, 2, 1, 2) :: Nil)
}
test("SPARK-28345: PythonUDF predicate should be able to pushdown to join") {
import IntegratedUDFTestUtils._
assume(shouldTestPythonUDFs)
val pythonTestUDF = TestPythonUDF(name = "udf")
val left = Seq((1, 2), (2, 3)).toDF("a", "b")
val right = Seq((1, 2), (3, 4)).toDF("c", "d")
val df = left.crossJoin(right).where(pythonTestUDF(left("a")) === right.col("c"))
// Before optimization, there is a logical Filter operator.
val filterInAnalysis = df.queryExecution.analyzed.find(_.isInstanceOf[Filter])
assert(filterInAnalysis.isDefined)
// Filter predicate was pushdown as join condition. So there is no Filter exec operator.
val filterExec = find(df.queryExecution.executedPlan)(_.isInstanceOf[FilterExec])
assert(filterExec.isEmpty)
checkAnswer(df, Row(1, 2, 1, 2) :: Nil)
}
test("SPARK-21492: cleanupResource without code generation") {
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false",
SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df1 = spark.range(0, 10, 1, 2)
val df2 = spark.range(10).select($"id".as("b1"), (- $"id").as("b2"))
val res = df1.join(df2, $"id" === $"b1" && $"id" === $"b2").select($"b1", $"b2", $"id")
checkAnswer(res, Row(0, 0, 0))
}
}
test("SPARK-29850: sort-merge-join an empty table should not memory leak") {
val df1 = spark.range(10).select($"id", $"id" % 3 as 'p)
.repartition($"id").groupBy($"id").agg(Map("p" -> "max"))
val df2 = spark.range(0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
assert(df2.join(df1, "id").collect().isEmpty)
}
}
test("SPARK-32330: Preserve shuffled hash join build side partitioning") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50",
SQLConf.SHUFFLE_PARTITIONS.key -> "2",
SQLConf.PREFER_SORTMERGEJOIN.key -> "false") {
val df1 = spark.range(10).select($"id".as("k1"))
val df2 = spark.range(30).select($"id".as("k2"))
Seq("inner", "cross").foreach(joinType => {
val plan = df1.join(df2, $"k1" === $"k2", joinType).groupBy($"k1").count()
.queryExecution.executedPlan
assert(plan.collect { case _: ShuffledHashJoinExec => true }.size === 1)
// No extra shuffle before aggregate
assert(plan.collect { case _: ShuffleExchangeExec => true }.size === 2)
})
}
}
test("SPARK-32383: Preserve hash join (BHJ and SHJ) stream side ordering") {
val df1 = spark.range(100).select($"id".as("k1"))
val df2 = spark.range(100).select($"id".as("k2"))
val df3 = spark.range(3).select($"id".as("k3"))
val df4 = spark.range(100).select($"id".as("k4"))
// Test broadcast hash join
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50") {
Seq("inner", "left_outer").foreach(joinType => {
val plan = df1.join(df2, $"k1" === $"k2", joinType)
.join(df3, $"k1" === $"k3", joinType)
.join(df4, $"k1" === $"k4", joinType)
.queryExecution
.executedPlan
assert(plan.collect { case _: SortMergeJoinExec => true }.size === 2)
assert(plan.collect { case _: BroadcastHashJoinExec => true }.size === 1)
// No extra sort before last sort merge join
assert(plan.collect { case _: SortExec => true }.size === 3)
})
}
// Test shuffled hash join
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50",
SQLConf.SHUFFLE_PARTITIONS.key -> "2",
SQLConf.PREFER_SORTMERGEJOIN.key -> "false") {
val df3 = spark.range(10).select($"id".as("k3"))
Seq("inner", "left_outer").foreach(joinType => {
val plan = df1.join(df2, $"k1" === $"k2", joinType)
.join(df3, $"k1" === $"k3", joinType)
.join(df4, $"k1" === $"k4", joinType)
.queryExecution
.executedPlan
assert(plan.collect { case _: SortMergeJoinExec => true }.size === 2)
assert(plan.collect { case _: ShuffledHashJoinExec => true }.size === 1)
// No extra sort before last sort merge join
assert(plan.collect { case _: SortExec => true }.size === 3)
})
}
}
test("SPARK-32290: SingleColumn Null Aware Anti Join Optimize") {
withSQLConf(SQLConf.OPTIMIZE_NULL_AWARE_ANTI_JOIN.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
// positive not in subquery case
var joinExec = assertJoin((
"select * from testData where key not in (select a from testData2)",
classOf[BroadcastHashJoinExec]))
assert(joinExec.asInstanceOf[BroadcastHashJoinExec].isNullAwareAntiJoin)
// negative not in subquery case since multi-column is not supported
assertJoin((
"select * from testData where (key, key + 1) not in (select * from testData2)",
classOf[BroadcastNestedLoopJoinExec]))
// positive hand-written left anti join
// testData.key nullable false
// testData3.b nullable true
joinExec = assertJoin((
"select * from testData left anti join testData3 ON key = b or isnull(key = b)",
classOf[BroadcastHashJoinExec]))
assert(joinExec.asInstanceOf[BroadcastHashJoinExec].isNullAwareAntiJoin)
// negative hand-written left anti join
// testData.key nullable false
// testData2.a nullable false
// isnull(key = a) will be optimized to true literal and removed
joinExec = assertJoin((
"select * from testData left anti join testData2 ON key = a or isnull(key = a)",
classOf[BroadcastHashJoinExec]))
assert(!joinExec.asInstanceOf[BroadcastHashJoinExec].isNullAwareAntiJoin)
// negative hand-written left anti join
// not match pattern Or(EqualTo(a=b), IsNull(EqualTo(a=b))
assertJoin((
"select * from testData2 left anti join testData3 ON testData2.a = testData3.b or " +
"isnull(testData2.b = testData3.b)",
classOf[BroadcastNestedLoopJoinExec]))
}
}
test("SPARK-32399: Full outer shuffled hash join") {
val inputDFs = Seq(
// Test unique join key
(spark.range(10).selectExpr("id as k1"),
spark.range(30).selectExpr("id as k2"),
$"k1" === $"k2"),
// Test non-unique join key
(spark.range(10).selectExpr("id % 5 as k1"),
spark.range(30).selectExpr("id % 5 as k2"),
$"k1" === $"k2"),
// Test empty build side
(spark.range(10).selectExpr("id as k1").filter("k1 < -1"),
spark.range(30).selectExpr("id as k2"),
$"k1" === $"k2"),
// Test empty stream side
(spark.range(10).selectExpr("id as k1"),
spark.range(30).selectExpr("id as k2").filter("k2 < -1"),
$"k1" === $"k2"),
// Test empty build and stream side
(spark.range(10).selectExpr("id as k1").filter("k1 < -1"),
spark.range(30).selectExpr("id as k2").filter("k2 < -1"),
$"k1" === $"k2"),
// Test string join key
(spark.range(10).selectExpr("cast(id * 3 as string) as k1"),
spark.range(30).selectExpr("cast(id as string) as k2"),
$"k1" === $"k2"),
// Test build side at right
(spark.range(30).selectExpr("cast(id / 3 as string) as k1"),
spark.range(10).selectExpr("cast(id as string) as k2"),
$"k1" === $"k2"),
// Test NULL join key
(spark.range(10).map(i => if (i % 2 == 0) i else null).selectExpr("value as k1"),
spark.range(30).map(i => if (i % 4 == 0) i else null).selectExpr("value as k2"),
$"k1" === $"k2"),
(spark.range(10).map(i => if (i % 3 == 0) i else null).selectExpr("value as k1"),
spark.range(30).map(i => if (i % 5 == 0) i else null).selectExpr("value as k2"),
$"k1" === $"k2"),
// Test multiple join keys
(spark.range(10).map(i => if (i % 2 == 0) i else null).selectExpr(
"value as k1", "cast(value % 5 as short) as k2", "cast(value * 3 as long) as k3"),
spark.range(30).map(i => if (i % 3 == 0) i else null).selectExpr(
"value as k4", "cast(value % 5 as short) as k5", "cast(value * 3 as long) as k6"),
$"k1" === $"k4" && $"k2" === $"k5" && $"k3" === $"k6")
)
inputDFs.foreach { case (df1, df2, joinExprs) =>
withSQLConf(
// Set broadcast join threshold and number of shuffle partitions,
// as shuffled hash join depends on these two configs.
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
val smjDF = df1.join(df2, joinExprs, "full")
assert(smjDF.queryExecution.executedPlan.collect {
case _: SortMergeJoinExec => true }.size === 1)
val smjResult = smjDF.collect()
withSQLConf(SQLConf.PREFER_SORTMERGEJOIN.key -> "false") {
val shjDF = df1.join(df2, joinExprs, "full")
assert(shjDF.queryExecution.executedPlan.collect {
case _: ShuffledHashJoinExec => true }.size === 1)
// Same result between shuffled hash join and sort merge join
checkAnswer(shjDF, smjResult)
}
}
}
}
test("SPARK-32649: Optimize BHJ/SHJ inner/semi join with empty hashed relation") {
val inputDFs = Seq(
// Test empty build side for inner join
(spark.range(30).selectExpr("id as k1"),
spark.range(10).selectExpr("id as k2").filter("k2 < -1"),
"inner"),
// Test empty build side for semi join
(spark.range(30).selectExpr("id as k1"),
spark.range(10).selectExpr("id as k2").filter("k2 < -1"),
"semi")
)
inputDFs.foreach { case (df1, df2, joinType) =>
// Test broadcast hash join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "200") {
val bhjCodegenDF = df1.join(df2, $"k1" === $"k2", joinType)
assert(bhjCodegenDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : BroadcastHashJoinExec) => true
case WholeStageCodegenExec(ProjectExec(_, _ : BroadcastHashJoinExec)) => true
}.size === 1)
checkAnswer(bhjCodegenDF, Seq.empty)
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val bhjNonCodegenDF = df1.join(df2, $"k1" === $"k2", joinType)
assert(bhjNonCodegenDF.queryExecution.executedPlan.collect {
case _: BroadcastHashJoinExec => true }.size === 1)
checkAnswer(bhjNonCodegenDF, Seq.empty)
}
}
// Test shuffled hash join
withSQLConf(SQLConf.PREFER_SORTMERGEJOIN.key -> "false",
// Set broadcast join threshold and number of shuffle partitions,
// as shuffled hash join depends on these two configs.
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
val shjCodegenDF = df1.join(df2, $"k1" === $"k2", joinType)
assert(shjCodegenDF.queryExecution.executedPlan.collect {
case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true
case WholeStageCodegenExec(ProjectExec(_, _ : ShuffledHashJoinExec)) => true
}.size === 1)
checkAnswer(shjCodegenDF, Seq.empty)
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val shjNonCodegenDF = df1.join(df2, $"k1" === $"k2", joinType)
assert(shjNonCodegenDF.queryExecution.executedPlan.collect {
case _: ShuffledHashJoinExec => true }.size === 1)
checkAnswer(shjNonCodegenDF, Seq.empty)
}
}
}
}
}
| rednaxelafx/apache-spark | sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala | Scala | apache-2.0 | 49,156 |
package com.twitter.finagle.mux
import com.twitter.conversions.time._
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.netty3.{BufChannelBuffer, ChannelBufferBuf}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.tracing.Trace
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.DefaultTimer
import com.twitter.finagle.{Dtab, Failure, NoStacktrace, Service, Status, WriteException}
import com.twitter.util.{Duration, Future, Promise, Return, Throw, Time, Try, Updatable}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantReadWriteLock
import java.util.logging.{Level, Logger}
import org.jboss.netty.buffer.{ChannelBuffer, ReadOnlyChannelBuffer}
/**
* Indicates that the server failed to interpret or act on the request. This
* could mean that the client sent a [[com.twitter.finagle.mux]] message type
* that the server is unable to process.
*/
case class ServerError(what: String)
extends Exception(what)
with NoStacktrace
/**
* Indicates that the server encountered an error whilst processing the client's
* request. In contrast to [[com.twitter.finagle.mux.ServerError]], a
* ServerApplicationError relates to server application failure rather than
* failure to interpret the request.
*/
case class ServerApplicationError(what: String)
extends Exception(what)
with NoStacktrace
private object Cap extends Enumeration {
type State = Value
val Unknown, Yes, No = Value
}
private[twitter] object ClientDispatcher {
/**
* The client dispatcher can be in one of 4 states,
* independent from its transport.
*
* - [[Dispatching]] is the stable operating state of a Dispatcher.
* Requests are dispatched, and the dispatcher's status is
* [[com.twitter.finagle.Status.Open]].
* - A dispatcher is [[Draining]] when it has received a `Tdrain`
* message from its peer, but still has outstanding requests.
* In this state, we have promised our peer not to send any more
* requests, thus the dispatcher's status is
* [[com.twitter.finagle.Status.Busy]], and requests for service are
* denied.
* - When a dispatcher is fully drained; that is, it has received a
* `Tdrain` and there are no more pending requests, the dispatcher's
* state is set to [[Drained]]. In this state, no more requests are
* admitted, and they never will be. The dispatcher is useless. It is
* dead. Its status is set to [[com.twitter.finagle.Status.Closed]].
* - Finally, if a server has issued the client a lease, its state is
* set to [[Leasing]] which composes the lease expiry time. This state
* is equivalent to [[Dispatching]] except if the lease has expired.
* At this time, the dispatcher's status is set to
* [[com.twitter.finagle.Status.Busy]]; however, leases are advisory
* and requests are still admitted.
*/
sealed trait State
case object Dispatching extends State
case object Draining extends State
case object Drained extends State
case class Leasing(end: Time) extends State {
def remaining: Duration = end.sinceNow
def expired: Boolean = end < Time.now
}
// We reserve a tag for a default ping message to that we
// can cache a full ping message and avoid encoding it
// every time.
val PingTag = Message.MinTag
val MinTag = PingTag+1
val MaxTag = Message.MaxTag
val NackFailure = Failure.rejected("The request was Nacked by the server")
val Empty: Updatable[Try[Response]] = Updatable.empty()
}
/**
* A ClientDispatcher for the mux protocol.
*/
private[twitter] class ClientDispatcher (
name: String,
trans: Transport[ChannelBuffer, ChannelBuffer],
sr: StatsReceiver,
failureDetectorConfig: FailureDetector.Config
) extends Service[Request, Response] {
import ClientDispatcher._
import Message.{MaxTag => _, MinTag => _, _}
private[this] implicit val timer = DefaultTimer.twitter
// Maintain the dispatcher's state, whose access is mediated
// by the readLk and writeLk.
@volatile private[this] var state: State = Dispatching
private[this] val (readLk, writeLk) = {
val lk = new ReentrantReadWriteLock
(lk.readLock, lk.writeLock)
}
@volatile private[this] var canDispatch: Cap.State = Cap.Unknown
private[this] val futureNackedException = Future.exception(NackFailure)
// We pre-encode a ping message with the reserved ping tag
// (PingTag) in order to avoid re-encoding this frequently sent
// message. Since it uses ChannelBuffers, it maintains a read
// cursor, and thus it is important that it is not used
// concurrently. This happens to agree with the natural way you'd
// use it, since a client can only have one outstanding ping per
// tag.
private[this] val pingMessage = {
val buf = new ReadOnlyChannelBuffer(encode(Tping(PingTag)))
buf.markReaderIndex()
buf
}
private[this] val pingPromise = new AtomicReference[Promise[Unit]](null)
private[this] val tags = TagSet(MinTag to MaxTag)
private[this] val reqs = TagMap[Updatable[Try[Response]]](tags)
private[this] val log = Logger.getLogger(getClass.getName)
private[this] val gauge = sr.addGauge("current_lease_ms") {
state match {
case l: Leasing => l.remaining.inMilliseconds
case _ => (Time.Top - Time.now).inMilliseconds
}
}
private[this] val leaseCounter = sr.counter("leased")
private[this] val drainingCounter = sr.counter("draining")
private[this] val drainedCounter = sr.counter("drained")
// We're extra paranoid about logging. The log handler is,
// after all, outside of our control.
private[this] def safeLog(msg: String, level: Level = Level.INFO): Unit =
try {
log.log(level, msg)
} catch {
case _: Throwable =>
}
private[this] def releaseTag(tag: Int): Option[Updatable[Try[Response]]] =
reqs.unmap(tag) match {
case None => None
case some =>
readLk.lock()
if (state == Draining && tags.isEmpty) {
drainedCounter.incr()
safeLog(s"Finished draining a connection to $name", Level.FINE)
readLk.unlock()
writeLk.lock()
state = Drained
writeLk.unlock()
} else {
readLk.unlock()
}
if (some eq Empty) None else some
}
private[this] val receive: Message => Unit = {
case RreqOk(tag, rep) =>
for (p <- releaseTag(tag))
p() = Return(Response(ChannelBufferBuf.Owned(rep)))
case RreqError(tag, error) =>
for (p <- releaseTag(tag))
p() = Throw(ServerApplicationError(error))
case RreqNack(tag) =>
for (p <- releaseTag(tag))
p() = Throw(NackFailure)
case RdispatchOk(tag, _, rep) =>
for (p <- releaseTag(tag))
p() = Return(Response(ChannelBufferBuf.Owned(rep)))
case RdispatchError(tag, _, error) =>
for (p <- releaseTag(tag))
p() = Throw(ServerApplicationError(error))
case RdispatchNack(tag, _) =>
for (p <- releaseTag(tag))
p() = Throw(NackFailure)
case Rerr(tag, error) =>
for (p <- releaseTag(tag))
p() = Throw(ServerError(error))
case Rping(PingTag) =>
val p = pingPromise.getAndSet(null)
if (p != null)
p.setDone()
case Rping(tag) =>
for (p <- releaseTag(tag))
p() = Return(Response.empty)
case Tping(tag) =>
trans.write(encode(Rping(tag)))
case Tdrain(tag) =>
safeLog(s"Started draining a connection to $name", Level.FINE)
drainingCounter.incr()
// must be synchronized to avoid writing after Rdrain has been sent
writeLk.lockInterruptibly()
try {
state = if (tags.nonEmpty) Draining else {
safeLog(s"Finished draining a connection to $name", Level.FINE)
drainedCounter.incr()
Drained
}
trans.write(encode(Rdrain(tag)))
} finally {
writeLk.unlock()
}
case Tlease(Message.Tlease.MillisDuration, millis) =>
writeLk.lock()
try {
state match {
case Leasing(_) | Dispatching =>
state = Leasing(Time.now + millis.milliseconds)
log.fine(s"leased for ${millis.milliseconds} to ${trans.remoteAddress}")
leaseCounter.incr()
case Drained | Draining =>
// Ignore the lease if we're in the process of draining, since
// these are anyway irrecoverable states.
}
} finally {
writeLk.unlock()
}
// Ignore lease types we don't understand. (They are advisory.)
case Tlease(_, _) =>
case m@Tmessage(tag) =>
log.warning("Did not understand Tmessage[tag=%d] %s".format(tag, m))
trans.write(encode(Rerr(tag, "badmessage")))
case m@Rmessage(tag) =>
val what = "Did not understand Rmessage[tag=%d] %s".format(tag, m)
log.warning(what)
for (p <- releaseTag(tag))
p() = Throw(BadMessageException(what))
}
private[this] val readAndAct: ChannelBuffer => Future[Nothing] =
buf => try {
val m = decode(buf)
receive(m)
loop()
} catch {
case exc: BadMessageException =>
Future.exception(exc)
}
private[this] def loop(): Future[Nothing] =
trans.read() flatMap readAndAct
loop() onFailure { case exc =>
trans.close()
val result = Throw(exc)
for (tag <- tags) {
/*
* unmap the `tag` here to prevent the associated promise from
* being fetched from the tag map again, and setting a value twice.
*/
for (p <- reqs.unmap(tag)) p() = result
}
}
def ping(): Future[Unit] = {
val done = new Promise[Unit]
if (pingPromise.compareAndSet(null, done)) {
pingMessage.resetReaderIndex()
// Note that we ignore any errors here. In practice this is fine
// as (1) this will only happen when the session has anyway
// died; (2) subsequent pings will use freshly allocated tags.
trans.write(pingMessage) before done
} else {
val p = new Promise[Response]
reqs.map(p) match {
case None =>
Future.exception(WriteException(new Exception("Exhausted tags")))
case Some(tag) =>
trans.write(encode(Tping(tag))) transform {
case Return(()) =>
p.unit
case t@Throw(_) =>
releaseTag(tag)
Future.const(t)
}
}
}
}
def apply(req: Request): Future[Response] = {
readLk.lock()
try state match {
case Dispatching | Leasing(_) => dispatch(req)
case Draining | Drained => futureNackedException
} finally readLk.unlock()
}
/**
* Dispatch a request.
*
* @param req the buffer representation of the request to be dispatched
*/
private def dispatch(req: Request): Future[Response] = {
val p = new Promise[Response]
val couldDispatch = canDispatch
val tag = reqs.map(p) getOrElse {
return Future.exception(WriteException(new Exception("Exhausted tags")))
}
val msg =
if (couldDispatch == Cap.No)
Treq(tag, Some(Trace.id), BufChannelBuffer(req.body))
else {
val contexts = Contexts.broadcast.marshal() map { case (k, v) =>
(BufChannelBuffer(k), BufChannelBuffer(v))
}
Tdispatch(tag, contexts.toSeq, req.destination, Dtab.local,
BufChannelBuffer(req.body))
}
trans.write(encode(msg)) onFailure { case exc =>
releaseTag(tag)
} before {
p.setInterruptHandler { case cause =>
// We replace the current Updatable, if any, with a stand-in to reserve
// the tag of discarded requests until Tdiscarded is acknowledged by the
// peer.
for (reqP <- reqs.maybeRemap(tag, Empty)) {
trans.write(encode(Tdiscarded(tag, cause.toString)))
reqP() = Throw(cause)
}
}
p
}
if (couldDispatch == Cap.Unknown) {
p onSuccess { _ =>
canDispatch = Cap.Yes
} rescue {
case ServerError(_) =>
// We've determined that the server cannot handle Tdispatch messages,
// so we fall back to a Treq and disable tracing in order to not
// double-count the request.
canDispatch = Cap.No
dispatch(req)
}
} else p
}
private[this] val detector = {
val close = () => trans.close(Time.now)
val dsr = sr.scope("failuredetector")
FailureDetector(failureDetectorConfig, ping, close, dsr)
}
override def status: Status =
Status.worst(detector.status,
trans.status match {
case Status.Closed => Status.Closed
case Status.Busy => Status.Busy
case Status.Open =>
readLk.lock()
try state match {
case Draining => Status.Busy
case Drained => Status.Closed
case leased@Leasing(_) if leased.expired => Status.Busy
case Leasing(_) | Dispatching => Status.Open
} finally readLk.unlock()
}
)
override def close(deadline: Time): Future[Unit] = trans.close(deadline)
}
| rodrigodealer/finagle | finagle-mux/src/main/scala/com/twitter/finagle/mux/Client.scala | Scala | apache-2.0 | 13,122 |
package recommender
import breeze.linalg.Vector
import cmd.NameAndDescription
/**
* Created by Ondra Fiedler on 4.8.14.
*/
/**
* Distance metric between two Vectors
*/
trait DistanceMetric extends Serializable with NameAndDescription {
def getDistance(v1: Vector[Double], v2: Vector[Double]): Double
}
object DistanceMetric {
val distanceMetrics: List[DistanceMetric] = List(EuclideanDistance, CosineDistance)
}
object EuclideanDistance extends DistanceMetric with Serializable {
def getDistance(v1: Vector[Double], v2: Vector[Double]): Double = {
var distance: Double = 0
var mutualProducts = 0
for (i <- v1.activeKeysIterator) {
if (v2(i) != 0) {
distance += (v1(i) - v2(i)) * (v1(i) - v2(i))
mutualProducts += 1
}
}
if (mutualProducts == 0) return Double.PositiveInfinity
distance / mutualProducts
}
override def getName: String = "euclidean"
override def getDescription: String = "Euclidean distance"
}
object CosineDistance extends DistanceMetric with Serializable {
def getDistance(v1: Vector[Double], v2: Vector[Double]) = {
val dotProduct: Double = v1 dot v2
val v1norm: Double = v1.norm(2.0)
val v2norm: Double = v2.norm(2.0)
1 - (dotProduct / (v1norm * v2norm))
}
override def getName: String = "cosine"
override def getDescription: String = "Cosine distance"
} | OndraFiedler/spark-recommender | src/main/scala/recommender/DistanceMetric.scala | Scala | mit | 1,375 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.serialization
import java.util.EnumMap
import java.util.{Map => JMap}
import scala.collection.{Map => SMap}
import org.elasticsearch.hadoop.serialization.bulk.MetadataExtractor.{Metadata => InternalMetadata}
import org.elasticsearch.hadoop.serialization.bulk.PerEntityPoolingMetadataExtractor
import org.elasticsearch.spark.rdd.{Metadata => SparkMetadata}
private[spark] class ScalaMetadataExtractor extends PerEntityPoolingMetadataExtractor {
override def getValue(metadata: InternalMetadata): AnyRef = {
val sparkEnum = ScalaMetadataExtractor.toSparkEnum(metadata)
if (sparkEnum == null) return null
entity match {
case jmap: JMap[_, _] => jmap.asInstanceOf[JMap[SparkMetadata, AnyRef]].get(sparkEnum)
case smap: SMap[_, _] => smap.asInstanceOf[SMap[SparkMetadata, AnyRef]].getOrElse(sparkEnum, null)
case _ => if (sparkEnum == SparkMetadata.ID) entity else null;
}
}
}
object ScalaMetadataExtractor {
val map = new EnumMap[InternalMetadata, SparkMetadata](classOf[InternalMetadata])
for (e <- SparkMetadata.values) {
map.put(InternalMetadata.valueOf(e.name()), e)
}
def toSparkEnum(metadata: InternalMetadata) = map.get(metadata)
} | xjrk58/elasticsearch-hadoop | spark/core/main/scala/org/elasticsearch/spark/serialization/ScalaMetadataExtractor.scala | Scala | apache-2.0 | 2,030 |
package com.github.jpbetz.subspace
import org.testng.Assert._
import org.testng.annotations.Test
class Vector3Test extends Asserts {
// TODO: How to handle divide by zero? Scala x/0f results in Infinity.
@Test def testMagnitude(): Unit = {
assertEquals(Vector3(0, 0, 0).magnitude, 0f)
assertEquals(Vector3(1, 0, 0).magnitude, 1f)
assertEquals(Vector3(0, 1, 0).magnitude, 1f)
assertEquals(Vector3(0, 0, 1).magnitude, 1f)
assertEquals(Vector3(-1, 0, 0).magnitude, 1f)
assertFloat(Vector3(1, 1, 0).magnitude, 1.414f)
assertFloat(Vector3(1, 0, 1).magnitude, 1.414f)
assertFloat(Vector3(0, 1, 1).magnitude, 1.414f)
assertFloat(Vector3(-1, -1, 0).magnitude, 1.414f)
}
@Test def testNormalize(): Unit = {
// TODO: ???
//assertEquals(Vector3(0, 0).normalize, Vector3(Float.NaN, Float.NaN))
assertEquals(Vector3(2, 0, 0).normalize, Vector3(1, 0, 0))
assertEquals(Vector3(0, 2, 0).normalize, Vector3(0, 1, 0))
assertEquals(Vector3(0, 0, 2).normalize, Vector3(0, 0, 1))
Seq(Vector3(2, 2, 2), Vector3(-2, -2, -2)) foreach { vec =>
val normal = vec.normalize
assertEquals(normal.x, normal.y)
assertFloat(normal.magnitude, 1f)
}
}
@Test def testDotProduct(): Unit = {
assertFloat(Vector3(0, 0, 0).dotProduct(Vector3(0, 0, 0)), 0f)
assertFloat(Vector3(2, 4, 1).dotProduct(Vector3(3, 5, 6)), 32f)
}
@Test def testArithmetic(): Unit = {
assertEquals(-Vector3(0, 0, 0), Vector3(0, 0, 0))
assertEquals(-Vector3(1, 2, -3), Vector3(-1, -2, 3))
assertEquals(Vector3(2, 3, 4) + Vector3(5, 2, 1), Vector3(7, 5, 5))
assertEquals(Vector3(2, 3, 4) - Vector3(5, 2, 1), Vector3(-3, 1, 3))
assertEquals(Vector3(0, 0, 0) * 0, Vector3(0, 0, 0))
assertEquals(Vector3(2, 3, 1) * 0, Vector3(0, 0, 0))
assertEquals(Vector3(0, 0, 0) * 10, Vector3(0, 0, 0))
assertEquals(Vector3(1, 2, 3) * 2, Vector3(2, 4, 6))
// TODO: divide by zero should not be infinity. Although it looks like that is the convention in scala.
assertEquals(Vector3(1, 1, 1) / 0, Vector3(Float.PositiveInfinity, Float.PositiveInfinity, Float.PositiveInfinity))
assertEquals(Vector3(0, 0, 0) / 5, Vector3(0, 0, 0))
}
@Test def testDistanceTo(): Unit = {
assertEquals(Vector3(0, 0, 0).distanceTo(Vector3(0, 0, 0)), 0f)
assertEquals(Vector3(0, 0, 0).distanceTo(Vector3(1, 0, 0)), 1f)
assertFloat(Vector3(0, 0, 0).distanceTo(Vector3(1, 1, 0)), 1.414f)
assertFloat(Vector3(0, 0, 0).distanceTo(Vector3(1, 0, 1)), 1.414f)
assertFloat(Vector3(0, 0, 0).distanceTo(Vector3(0, 1, 1)), 1.414f)
assertFloat(Vector3(0, 0, 0).distanceTo(Vector3(1, 1, 1)), 1.732f)
assertFloat(Vector3(-0.5f, -0.5f, -0.5f).distanceTo(Vector3(0.5f, 0.5f, 0.5f)), 1.732f)
assertFloat(Vector3(-0.5f, 0.5f, 0.5f).distanceTo(Vector3(0.5f, -0.5f, -0.5f)), 1.732f)
}
@Test def testClamp(): Unit = {
assertEquals(Vector3(0, 0, 0).clamp(Vector3(0, 0, 0), Vector3(0, 0, 0)), Vector3(0, 0, 0))
assertEquals(Vector3(4, 5, 6).clamp(Vector3(-3, -2, -1), Vector3(3, 2, 1)), Vector3(3, 2, 1))
assertEquals(Vector3(-4, -5, -6).clamp(Vector3(-3, -2, -1), Vector3(3, 2, 1)), Vector3(-3, -2, -1))
}
@Test def testLerp(): Unit = {
assertEquals(Vector3(0, 0, 0).lerp(Vector3(0, 0, 0), 0), Vector3(0, 0, 0))
assertEquals(Vector3(0, 0, 0).lerp(Vector3(0, 0, 0), 1), Vector3(0, 0, 0))
assertEquals(Vector3(-1, -3, -5).lerp(Vector3(1, 3, 5), 0f), Vector3(-1, -3, -5))
assertEquals(Vector3(-1, -3, -5).lerp(Vector3(1, 3, 5), 0.25f), Vector3(-0.5f, -1.5f, -2.5f))
assertEquals(Vector3(-1, -3, -5).lerp(Vector3(1, 3, 5), 0.5f), Vector3(0, 0, 0))
assertEquals(Vector3(-1, -3, -5).lerp(Vector3(1, 3, 5), 0.75f), Vector3(0.5f, 1.5f, 2.5f))
assertEquals(Vector3(-1, -3, -5).lerp(Vector3(1, 3, 5), 1f), Vector3(1, 3, 5))
}
@Test def testCopy(): Unit = {
assertEquals(Vector3(0, 0, 0).copy(), Vector3(0, 0, 0))
assertEquals(Vector3(1, 2, 3).copy(), Vector3(1, 2, 3))
}
@Test def testBuffer(): Unit = {
val v1 = Vector3(1, 10, 5)
val buffer = v1.allocateBuffer
assertEquals(v1, Vector3.fromBuffer(buffer))
val updateBuffer = Vector3.allocateEmptyBuffer
v1.updateBuffer(updateBuffer)
assertEquals(v1, Vector3.fromBuffer(updateBuffer))
}
}
| jpbetz/subspace | subspace/src/test/scala/com/github/jpbetz/subspace/Vector3Test.scala | Scala | apache-2.0 | 4,302 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.types
import io.rdbc.pgsql.core.Oid
trait PgType[T <: PgVal[_]] {
def oid: Oid
def name: String
type ValType = T
def valCls: Class[T]
}
| rdbc-io/rdbc-pgsql | rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgType.scala | Scala | apache-2.0 | 778 |
def f(a: Int = 1, b: Int) {}
/* */ f(1, 2) | ilinum/intellij-scala | testdata/resolve2/function/default/FirstAsTwo.scala | Scala | apache-2.0 | 43 |
/*
* Copyright (c) 2015-6 x.ai inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.x
package typeless
package test
import org.scalatest._
import shapeless._
import typeless.hlist._
class SubsetTests extends FunSuite with Matchers {
val ls = 1 :: 2d :: 'a' :: HNil
test( "find subset" ) {
val subset = Subset[Int :: Double :: Char :: HNil, Char :: Int :: HNil]
assert(
subset( ls ) === Some( 'a' :: 1 :: HNil ) )
}
test( "not find a subset" ) {
val subset = Subset[Int :: Double :: Char :: HNil, String :: Char :: HNil]
assert(
subset( ls ) === None )
}
}
| xdotai/typeless | src/test/scala/subset.scala | Scala | apache-2.0 | 1,125 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.connectors
import play.mvc.Http.Status
import cats.data.EitherT
import com.google.inject.{ImplementedBy, Inject, Singleton}
import uk.gov.hmrc.helptosavefrontend.config.FrontendAppConfig
import uk.gov.hmrc.helptosavefrontend.http.HttpClient.HttpClientOps
import uk.gov.hmrc.helptosavefrontend.models.reminder.{CancelHtsUserReminder, HtsUserSchedule, UpdateReminderEmail}
import uk.gov.hmrc.helptosavefrontend.util.HttpResponseOps._
import uk.gov.hmrc.helptosavefrontend.util.Result
import uk.gov.hmrc.http.{HeaderCarrier, HttpClient, HttpResponse}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
@ImplementedBy(classOf[HelpToSaveReminderConnectorImpl])
trait HelpToSaveReminderConnector {
def updateHtsUser(htsUser: HtsUserSchedule)(implicit hc: HeaderCarrier, ex: ExecutionContext): Result[HtsUserSchedule]
def getHtsUser(nino: String)(implicit hc: HeaderCarrier, ex: ExecutionContext): Result[HtsUserSchedule]
def cancelHtsUserReminders(
cancelHtsUserReminder: CancelHtsUserReminder
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit]
def updateReminderEmail(
updateReminderEmail: UpdateReminderEmail
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit]
}
@Singleton
class HelpToSaveReminderConnectorImpl @Inject() (http: HttpClient)(implicit frontendAppConfig: FrontendAppConfig)
extends HelpToSaveReminderConnector {
private val htsReminderURL = frontendAppConfig.helpToSaveReminderUrl
private val updateHtsReminderURL = s"$htsReminderURL/help-to-save-reminder/update-htsuser-entity"
private def getHtsReminderUserURL(nino: String) = s"$htsReminderURL/help-to-save-reminder/gethtsuser/$nino"
private val cancelHtsReminderURL = s"$htsReminderURL/help-to-save-reminder/delete-htsuser-entity"
private val emailUpdateHtsReminderURL = s"$htsReminderURL/help-to-save-reminder/update-htsuser-email"
private val emptyQueryParameters: Map[String, String] = Map.empty[String, String]
override def updateHtsUser(htsUser: HtsUserSchedule)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[HtsUserSchedule] =
handlePost(updateHtsReminderURL, htsUser, _.parseJSON[HtsUserSchedule](), "update htsUser", identity)
override def getHtsUser(nino: String)(implicit hc: HeaderCarrier, ex: ExecutionContext): Result[HtsUserSchedule] =
handleGet(getHtsReminderUserURL(nino), emptyQueryParameters, _.parseJSON[HtsUserSchedule](), "get hts user", identity)
override def cancelHtsUserReminders(
cancelHtsUserReminder: CancelHtsUserReminder
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit] =
handlePostCancel(cancelHtsReminderURL, cancelHtsUserReminder, _ ⇒ Right(()), "cancel reminder", identity)
private def handlePost[A, B](
url: String,
body: HtsUserSchedule,
ifHTTP200: HttpResponse ⇒ Either[B, A],
description: ⇒ String,
toError: String ⇒ B
)(implicit hc: HeaderCarrier, ec: ExecutionContext): EitherT[Future, B, A] =
handle(http.post(url, body), ifHTTP200, description, toError)
private def handlePostCancel[A, B](
url: String,
body: CancelHtsUserReminder,
ifHTTP200: HttpResponse ⇒ Either[B, A],
description: ⇒ String,
toError: String ⇒ B
)(implicit hc: HeaderCarrier, ec: ExecutionContext): EitherT[Future, B, A] =
handle(http.post(url, body), ifHTTP200, description, toError)
private def handleGet[A, B](
url: String,
queryParameters: Map[String, String],
ifHTTP200: HttpResponse ⇒ Either[B, A],
description: ⇒ String,
toError: String ⇒ B
)(implicit hc: HeaderCarrier, ec: ExecutionContext): EitherT[Future, B, A] =
handle(http.get(url, queryParameters), ifHTTP200, description, toError)
private def handle[A, B](
resF: Future[HttpResponse],
ifHTTP200: HttpResponse ⇒ Either[B, A],
description: ⇒ String,
toError: String ⇒ B
)(implicit ec: ExecutionContext) =
EitherT(
resF
.map { response ⇒
if (response.status == Status.OK || response.status == Status.NOT_FOUND) {
ifHTTP200(response)
} else {
Left(toError(s"Call to $description came back with status ${response.status}. Body was ${(response.body)}"))
}
}
.recover {
case NonFatal(t) ⇒ Left(toError(s"Call to $description failed: ${t.getMessage}"))
}
)
override def updateReminderEmail(
updateReminderEmail: UpdateReminderEmail
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit] =
handlePostEmailUpdate(emailUpdateHtsReminderURL, updateReminderEmail, _ ⇒ Right(()), "update email", identity)
private def handlePostEmailUpdate[A, B](
url: String,
body: UpdateReminderEmail,
ifHTTP200: HttpResponse ⇒ Either[B, A],
description: ⇒ String,
toError: String ⇒ B
)(implicit hc: HeaderCarrier, ec: ExecutionContext): EitherT[Future, B, A] =
handle(http.post(url, body), ifHTTP200, description, toError)
}
| hmrc/help-to-save-frontend | app/uk/gov/hmrc/helptosavefrontend/connectors/HelpToSaveReminderConnector.scala | Scala | apache-2.0 | 5,660 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async.ws
import java.util.{ ArrayList => JArrayList }
import io.gatling.core.stats.StatsEngine
import io.gatling.http.action.async.AsyncTx
import io.gatling.http.ahc.HttpEngine
import akka.actor.ActorRef
import org.asynchttpclient.ws.{ WebSocketListener, WebSocketUpgradeHandler }
object WsTx {
def start(tx: AsyncTx, wsActor: ActorRef, httpEngine: HttpEngine, statsEngine: StatsEngine): Unit = {
val (newTx, client) = {
val (newSession, client) = httpEngine.httpClient(tx.session, tx.protocol)
(tx.copy(session = newSession), client)
}
val handler = {
// can't use a singletonList as list will be cleared on close
val listeners = new JArrayList[WebSocketListener](1)
listeners.add(new WsListener(newTx, wsActor))
new WebSocketUpgradeHandler(listeners)
}
// [fl]
//
// [fl]
client.executeRequest(tx.request, handler)
}
}
| GabrielPlassard/gatling | gatling-http/src/main/scala/io/gatling/http/action/async/ws/WsTx.scala | Scala | apache-2.0 | 1,549 |
package slick.jdbc
import scala.concurrent.ExecutionContext
import scala.reflect.ClassTag
import scala.util.{Failure, Success}
import java.sql.DatabaseMetaData
import slick.SlickException
import slick.ast.ColumnOption
import slick.dbio._
import slick.jdbc.meta._
import slick.{model => m}
import slick.relational.RelationalProfile
import slick.sql.SqlProfile
import slick.util.Logging
/** Build a Slick model from introspecting the JDBC metadata.
*
* In most cases you are better off transforming the generated model instead of overriding functionality here. It is
* only useful if you need easy access to the JDBC metadata in order to influence how the model is generated. A good
* use case would be interpreting column types or default values that Slick doesn't understand out of the box. If you
* just want to remove or hard code some default values, transform the resulting model instead.
*
* The tight coupling can easily lead to source code incompatibilities in future versions. Avoid hooking in here if you
* don't have to.
*
* @param ignoreInvalidDefaults see JdbcModelBuilder#ColumnBuilder#default
*/
class JdbcModelBuilder(mTables: Seq[MTable], ignoreInvalidDefaults: Boolean)(implicit ec: ExecutionContext) extends Logging {
////////////////////////////////////////////////////////////////////// Actions for reading the required JDBC metadata
/** Read the column metadata for a table in ordinal position order */
def readColumns(t: MTable): DBIO[Vector[MColumn]] = t.getColumns.map(_.sortBy(_.ordinalPosition))
/** Read the primary key metadata for a table in key sequence order */
def readPrimaryKeys(t: MTable): DBIO[Vector[MPrimaryKey]] = t.getPrimaryKeys.map(_.sortBy(_.keySeq))
/** Read the foreign key metadata for a table grouped by name and in key sequence order */
def readForeignKeys(t: MTable): DBIO[Seq[Seq[MForeignKey]]] = t.getImportedKeys.map(
// remove foreign keys pointing to tables which were not included
_.filter(fk => tableNamersByQName.isDefinedAt(fk.pkTable))
.groupBy(fk => (fk.pkTable,fk.fkName,fk.pkName,fk.fkTable))
.toSeq
.sortBy{case (key,_) => (key._1.name,key._2,key._3,key._4.name)}
.map(_._2.sortBy(_.keySeq)) // respect order
)
/** Read the index metadata grouped by name and in ordinal position order */
def readIndices(t: MTable): DBIO[Seq[Seq[MIndexInfo]]] = t.getIndexInfo().asTry.map {
case Success(iis) =>
iis.groupBy(_.indexName).toSeq.sortBy(_._1).map(_._2.sortBy(_.ordinalPosition)) // respect order
case Failure(e: java.sql.SQLException) => // TODO: this needs a test!
logger.debug(s"Skipping indices of table ${t.name.name} due to exception during getIndexInfo: "+e.getMessage.trim)
Seq()
case Failure(e) => throw e
}
///////////////////////////////////////////////////////////////////////////////////////////// Builder factory methods
def createTableNamer(meta: MTable): TableNamer = new TableNamer(meta)
/** Column model builder factory. Override for customization.
* @group Basic customization overrides */
def createColumnBuilder(tableBuilder: TableBuilder, meta: MColumn): ColumnBuilder = new ColumnBuilder(tableBuilder, meta)
def createPrimaryKeyBuilder(tableBuilder: TableBuilder, meta: Seq[MPrimaryKey]): PrimaryKeyBuilder = new PrimaryKeyBuilder(tableBuilder, meta)
def createForeignKeyBuilder(tableBuilder: TableBuilder, meta: Seq[MForeignKey]): ForeignKeyBuilder = new ForeignKeyBuilder(tableBuilder, meta)
def createIndexBuilder(tableBuilder: TableBuilder, meta: Seq[MIndexInfo]): IndexBuilder = new IndexBuilder(tableBuilder, meta)
//////////////////////////////////////////////////////////////////////////////////////////////////////// Main builder
lazy val tableNamers: Seq[TableNamer] = mTables.map(createTableNamer)
lazy val tableNamersByQName: Map[MQName, TableNamer] = mTables.map(m => m.name).zip(tableNamers).toMap
/** Table model builder factory. Override for customization.
* @group Basic customization overrides */
def createTableBuilder(namer: TableNamer): DBIO[TableBuilder] = for {
cs <- readColumns(namer.meta)
pks <- readPrimaryKeys(namer.meta)
fks <- readForeignKeys(namer.meta)
idxs <- readIndices(namer.meta)
} yield new TableBuilder(namer.meta, namer, cs, pks, fks, idxs)
/** Creates a Slick data model from jdbc meta data. Foreign keys pointing out of the given tables
* are not included. */
def buildModel: DBIO[m.Model] = for {
ts <- DBIO.sequence(tableNamers.map(createTableBuilder))
tablesByQName = ts.map(t => t.meta.name -> t).toMap
builders = createBuilders(tablesByQName)
} yield m.Model(ts.sortBy(_.meta.name.name).map(_.buildModel(builders)))
def createBuilders(tablesByQName: Map[MQName, TableBuilder]) = new Builders(tablesByQName)
class Builders(val tablesByQName: Map[MQName, TableBuilder])
/** Converts from java.sql.Types w/ type name to the corresponding Java class name (with fully qualified path). */
def jdbcTypeToScala(jdbcType: Int, typeName: String = ""): ClassTag[_] = {
import java.sql.Types._
import scala.reflect.classTag
// see TABLE B-1 of JSR-000221 JBDCTM API Specification 4.1 Maintenance Release
// Mapping to corresponding Scala types where applicable
jdbcType match {
case CHAR | VARCHAR | LONGVARCHAR | NCHAR | NVARCHAR | LONGNVARCHAR => classTag[String]
case NUMERIC | DECIMAL => classTag[BigDecimal]
case BIT | BOOLEAN => classTag[Boolean]
case TINYINT => classTag[Byte]
case SMALLINT => classTag[Short]
case INTEGER => classTag[Int]
case BIGINT => classTag[Long]
case REAL => classTag[Float]
case FLOAT | DOUBLE => classTag[Double]
case BINARY | VARBINARY | LONGVARBINARY | BLOB => classTag[java.sql.Blob]
case DATE => classTag[java.sql.Date]
case TIME => classTag[java.sql.Time]
case TIMESTAMP => classTag[java.sql.Timestamp]
case CLOB => classTag[java.sql.Clob]
// case ARRAY => classTag[java.sql.Array]
// case STRUCT => classTag[java.sql.Struct]
// case REF => classTag[java.sql.Ref]
// case DATALINK => classTag[java.net.URL]
// case ROWID => classTag[java.sql.RowId]
// case NCLOB => classTag[java.sql.NClob]
// case SQLXML => classTag[java.sql.SQLXML]
case NULL => classTag[Null]
case DISTINCT => logger.warn(s"Found jdbc type DISTINCT. Assuming Blob. This may be wrong. You can override ModelBuilder#Table#Column#tpe to fix this."); classTag[java.sql.Blob] // FIXME
case t => logger.warn(s"Found unknown jdbc type $t. Assuming String. This may be wrong. You can override ModelBuilder#Table#Column#tpe to fix this."); classTag[String] // FIXME
}
}
///////////////////////////////////////////////////////////////////////////////////////////// Builder implementations
class TableNamer(val meta: MTable) {
/** Table name */
def name: String = meta.name.name
/** Optional table schema
* @group Basic customization overrides */
def schema: Option[String] = meta.name.schema
/** Optional table catalog
* @group Basic customization overrides */
def catalog = meta.name.catalog
/** Fully qualified table name */
final lazy val qualifiedName = m.QualifiedName(name,schema,catalog)
}
/** Table model builder
* @group Basic customization overrides */
class TableBuilder(val meta: MTable,
val namer: TableNamer,
val mColumns: Seq[MColumn],
val mPrimaryKeys: Seq[MPrimaryKey],
val mForeignKeys: Seq[Seq[MForeignKey]],
val mIndices: Seq[Seq[MIndexInfo]]) { table =>
// models
def buildModel(builders: Builders) = m.Table(namer.qualifiedName, columns, primaryKey, buildForeignKeys(builders), indices)
/** Column models in ordinal position order */
final lazy val columns: Seq[m.Column] = mColumns.map(c => createColumnBuilder(this, c).model)
/** Column models by name */
final lazy val columnsByName: Map[String,m.Column] = columns.map(c => c.name -> c).toMap
/** Primary key models in key sequence order */
final lazy val primaryKey: Option[m.PrimaryKey] = createPrimaryKeyBuilder(this, mPrimaryKeys).model
/** Foreign key models by key sequence order */
final def buildForeignKeys(builders: Builders) =
mForeignKeys.map(mf => createForeignKeyBuilder(this, mf).buildModel(builders)).flatten
/** Index models by ordinal position order */
final lazy val indices: Seq[m.Index] = mIndices.map(mi => createIndexBuilder(this, mi).model).flatten
}
/** Column model builder.
* @group Basic customization overrides */
class ColumnBuilder(tableBuilder: TableBuilder, meta: MColumn) {
/** Regex matcher to extract string out ouf surrounding '' */
final val StringPattern = """^'(.*)'$""".r
/** Scala type this column is mapped to */
def tpe = jdbcTypeToScala(meta.sqlType, meta.typeName).toString match {
case "java.lang.String" => if(meta.size == Some(1)) "Char" else "String"
case t => t
}
def name = meta.name
/** Indicates whether this is a nullable column */
def nullable = meta.nullable.getOrElse(true)
/** Indicates whether this is an auto increment column */
def autoInc: Boolean = meta.isAutoInc.getOrElse(false)
/** Indicates whether a ColumnOption Primary key should be put into the model.
* Only valid for single column primary keys. */
def createPrimaryKeyColumnOption: Boolean =
tableBuilder.mPrimaryKeys.size == 1 && tableBuilder.mPrimaryKeys.head.column == meta.name
/** A (potentially non-portable) database column type for string types, this should not
* include a length ascription for other types it should */
def dbType: Option[String] = Some(meta.typeName)
/** Column length of string types */
def length: Option[Int] = if(tpe == "String") meta.size else None // Only valid for strings!
/** Indicates wether this should be a varchar in case of a string column.
* Currently defaults to true. Should be based on the value of dbType in the future. */
def varying: Boolean =
Seq(java.sql.Types.NVARCHAR, java.sql.Types.VARCHAR, java.sql.Types.LONGVARCHAR, java.sql.Types.LONGNVARCHAR) contains meta.sqlType
def rawDefault = meta.columnDef
/** The default value for the column. The outer option is used to indicate if a default value is given. The inner
* Option is used to allow giving None for a nullable column. This method must not return Some(None) for a
* non-nullable column.
*
* Default values for autoInc column are automatically ignored (as if returning None).
*
* If `ignoreInvalidDefaults = true`, Slick catches scala.MatchError and java.lang.NumberFormatException thrown by
* this method, logs the message and treats it as no default value for convenience. */
def default: Option[Option[Any]] = rawDefault.map { v =>
if(v == "NULL") None else {
// NOTE: When extending this list, please also extend the code generator accordingly
Some((v,tpe) match {
case (v,"Byte") => v.toByte
case (v,"Short") => v.toShort
case (v,"Int") => v.toInt
case (v,"Long") => v.toLong
case (v,"Double") => v.toDouble
case (v,"Float") => v.toFloat
case (v,"Char") =>
v.length match {
case 1 => v(0)
case 3 => v(1) // quoted character
}
case (v,"String") if meta.typeName == "CHAR" => v.head // FIXME: check length
case (v,"scala.math.BigDecimal") => BigDecimal(s"${v.trim}") // need the trim for Oracle trailing space
case (StringPattern(str),"String") => str
case ("TRUE","Boolean") => true
case ("FALSE","Boolean") => false
})
}
}
private def formatDefault(v:Any) =
s" default value $v for column ${tableBuilder.namer.qualifiedName.asString}.$name of type $tpe, meta data: "+meta.toString
/** The default value for the column as a ColumnOption Default or None if no default. The value wrapped by
* ColumnOption Default needs to be an Option in case of a nullable column but can't be an Option in case of a
* non-nullable Column.
*
* Default values for autoInc columns are automatically ignored.
*
* If `ignoreInvalidDefaults = true`, Slick catches scala.MatchError and java.lang.NumberFormatException thrown by
* this method, logs the message and treats it as no default value for convenience. */
def defaultColumnOption: Option[RelationalProfile.ColumnOption.Default[_]] = rawDefault.map(v => (v,tpe)).collect {
case (v,_) if Seq("NOW","CURRENT_TIMESTAMP","CURRENT_DATE","CURRENT_TIME").contains(v.stripSuffix("()").toUpperCase) =>
logger.debug(s"Ignoring"+formatDefault(v))
None
}.getOrElse {
default.map( d =>
RelationalProfile.ColumnOption.Default(
if(nullable) d
else d.getOrElse(throw new SlickException(s"Invalid default value $d for non-nullable column ${tableBuilder.namer.qualifiedName.asString}.$name of type $tpe, meta data: "+meta.toString))
)
)
}
private def convenientDefault: Option[RelationalProfile.ColumnOption.Default[_]] =
try defaultColumnOption catch {
case e: java.lang.NumberFormatException if ignoreInvalidDefaults =>
logger.debug(s"NumberFormatException: Could not parse"+formatDefault(rawDefault))
None
case e: scala.MatchError =>
val msg = "Could not parse" + formatDefault(rawDefault)
if(ignoreInvalidDefaults) {
logger.debug(s"SlickException: $msg")
None
} else throw new SlickException(msg, e)
}
def model = m.Column(name=name, table=tableBuilder.namer.qualifiedName, tpe=tpe, nullable=nullable,
options = Set() ++
dbType.map(SqlProfile.ColumnOption.SqlType) ++
(if(autoInc) Some(ColumnOption.AutoInc) else None) ++
(if(createPrimaryKeyColumnOption) Some(ColumnOption.PrimaryKey) else None) ++
length.map(RelationalProfile.ColumnOption.Length.apply(_,varying=varying)) ++
(if(!autoInc) convenientDefault else None) )
}
class PrimaryKeyBuilder(tableBuilder: TableBuilder, meta: Seq[MPrimaryKey]){
/** Indicates wether a primary key should be generated. Disabled by default for single column primary keys in favor
* of ColumnOption PrimaryKey via Column#createPrimaryKeyColumnOption. */
def enabled: Boolean = meta.size > 1
def name: Option[String] = meta.head.pkName.filter(_ != "")
def columns = meta.map(_.column)
// single column primary keys excluded in favor of PrimaryKey column option
final def model: Option[m.PrimaryKey] = if(!enabled) None else Some(
m.PrimaryKey(name, tableBuilder.namer.qualifiedName,columns.map(tableBuilder.columnsByName))
)
}
class ForeignKeyBuilder(tableBuilder: TableBuilder, meta: Seq[MForeignKey]) {
private val fk = meta.head
def enabled: Boolean = true
def name: Option[String] = fk.fkName.filter(_ != "")
def referencedColumns = meta.map(_.fkColumn)
private val referencingColumns = meta.map(_.pkColumn)
assert(referencingColumns.size == referencedColumns.size)
def updateRule: m.ForeignKeyAction = fk.updateRule
def deleteRule: m.ForeignKeyAction = fk.deleteRule
final def buildModel(builders: Builders): Option[m.ForeignKey] = {
assert(meta.size >= 1)
assert(tableBuilder.namer.qualifiedName == tableNamersByQName(fk.fkTable).qualifiedName)
if(!enabled) None else Some(m.ForeignKey(
name,
tableBuilder.namer.qualifiedName,
referencedColumns.map(tableBuilder.columnsByName),
tableNamersByQName(fk.pkTable).qualifiedName,
referencingColumns.map(builders.tablesByQName(fk.pkTable).columnsByName),
updateRule,
deleteRule
))
}
}
class IndexBuilder(tableBuilder: TableBuilder, meta: Seq[MIndexInfo]) {
private val idx = meta.head
assert(meta.size >= 1)
assert(meta.forall(_.indexName == idx.indexName))
assert(meta.forall(_.nonUnique == idx.nonUnique))
/** Indicates wether an index should be generated. Disabled by default for:
* - indexType == tableIndexStatistic
* - indices matching primary key
* - non-unique indices matching foreign keys referencing columns
* - indices matching foreign keys referenced columns */
def enabled = (
idx.indexType != DatabaseMetaData.tableIndexStatistic &&
(tableBuilder.mPrimaryKeys.isEmpty || tableBuilder.mPrimaryKeys.map(_.column).toSet != columns.toSet) &&
// preserve additional uniqueness constraints on (usually not unique) fk columns
(unique || tableBuilder.mForeignKeys.forall(_.map(_.fkColumn).toSet != columns.toSet)) &&
// postgres may refer to column oid, skipping index for now. Maybe we should generate a column and include it
// instead. And maybe this should be moved into PostgresModelBuilder.
// TODO: This needs a test case!
columns.forall(tableBuilder.columnsByName.isDefinedAt)
)
def unique = !idx.nonUnique
def columns = meta.flatMap(_.column)
def name = idx.indexName.filter(_ != "")
final def model: Option[m.Index] =
if(!enabled) None
else Some(m.Index(name, tableBuilder.namer.qualifiedName, columns.map(tableBuilder.columnsByName), unique))
}
}
| Radsaggi/slick | slick/src/main/scala/slick/jdbc/JdbcModelBuilder.scala | Scala | bsd-2-clause | 17,616 |
/*
* Copyright (C) 2016 Vincibean <Andre Bessi>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vincibean.scala.impatient.chapter4
import scala.io.Source
/**
* Write a program that reads words from a file. Use a mutable map to count
* how often each word appears. To read the words, simply use a java.util.Scanner :
*
* val in = new java.util.Scanner(java.io.File("myfile.txt"))
* while (in.hasNext()) process in.next()
*
* Or look at Chapter 9 for a Scalaesque way.
* At the end, print out all words and their counts.
*
* Created by Vincibean on 16/01/16.
*/
package object exercise2 {
// Using the "Scalaesque" way.
def resourceAsStringArray: Array[String] = Source
.fromInputStream(getClass.getClassLoader.getResourceAsStream("chapter4/Exercise2.txt"))
.getLines()
.toList
.mkString
.split("\\\\s+")
.map(_.toLowerCase)
}
| Vincibean/ScalaForTheImpatient-Solutions | src/main/scala/org/vincibean/scala/impatient/chapter4/exercise2/exercise2.scala | Scala | gpl-3.0 | 1,511 |
package org.bitcoins.core.p2p
import scodec.bits._
trait InetAddress {
def bytes: ByteVector
def ipv4Bytes: ByteVector = {
require(bytes.take(12) == hex"00000000000000000000ffff",
"Cannot call ipv4Bytes for an IPv6 address")
bytes.drop(12)
}
def getAddress: Array[Byte] = bytes.toArray
}
object InetAddress {
private case class InetAddressImpl(
bytes: ByteVector
) extends InetAddress
def apply(array: Array[Byte]): InetAddress = {
getByAddress(array)
}
def apply(bytes: ByteVector): InetAddress = {
getByAddress(bytes.toArray)
}
def getByAddress(array: Array[Byte]): InetAddress = {
val bytes = NetworkIpAddress.writeAddress(ByteVector(array))
InetAddressImpl(bytes)
}
}
trait TorAddress extends InetAddress {
override def ipv4Bytes: ByteVector = throw new IllegalArgumentException(
"Tor address cannot be an IPv4 address")
}
object TorAddress {
val TOR_V2_ADDR_LENGTH = 10
val TOR_V3_ADDR_LENGTH = 32
}
trait Tor2Address extends TorAddress
object Tor2Address {
private case class Tor2AddressImpl(bytes: ByteVector) extends Tor2Address {
require(bytes.size == TorAddress.TOR_V2_ADDR_LENGTH)
}
}
trait Tor3Address extends TorAddress
object Tor3Address {
private case class Tor3AddressImpl(bytes: ByteVector) extends Tor2Address {
require(bytes.size == TorAddress.TOR_V3_ADDR_LENGTH)
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/p2p/InetAddress.scala | Scala | mit | 1,397 |
package contadamination
import java.io.File
import contadamination.bloom.BloomFilterBuilder
import contadamination.results.{ ContaminationFilterUtils, ContaminationFilter }
import org.apache.spark.{ SparkConf, SparkContext }
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.utils.cli._
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
class ContadaminationArgs extends Args4jBase with Serializable {
@Argument(required = false, metaVar = "INPUT", usage = "Reads path", index = 0)
var readsPath = "2-5pM-3h_S3_L001_I2_001.fastq.bam.adam"
@Args4jOption(required = false, name = "-reference_paths", usage = "Reference paths")
var referencePaths = Array("src/test/resources/mt.fasta")
@Args4jOption(required = false, name = "-prob_of_false_positive", usage = "Probability of false positive, default 0.0005")
val probOfFalsePositive = 0.0005
@Args4jOption(required = false, name = "-window_size", usage = "Window size, default 30")
val windowSize = 30
}
object ContadaminationCompanion extends BDGCommandCompanion with Serializable {
val commandName = "contadamination"
val commandDescription = "Find contamination in NGS read data using a bloom filter implementation."
def apply(cmdLine: Array[String]) = {
new Contadamination(Args4j[ContadaminationArgs](cmdLine))
}
}
class Contadamination(protected val args: ContadaminationArgs) extends BDGSparkCommand[ContadaminationArgs] with Serializable {
val companion = ContadaminationCompanion
def run(sc: SparkContext) {
//println("arguments: " + args.windowSize + " " + args.probOfFalsePositive + " " + args.referencePaths + " " + args.readsPath);
val adamContext = new ADAMContext(sc)
val reads = adamContext.loadAlignments(args.readsPath)
val bloomFilterBuilder = new BloomFilterBuilder(
adamContext,
args.probOfFalsePositive,
args.windowSize)
val contaminationFilters =
ContaminationFilterUtils.
createContaminationFilters(args.referencePaths, bloomFilterBuilder)
val results = ContaminationFilterUtils.
queryReadsAgainstFilters(args.windowSize, contaminationFilters, reads)
results.foreach(println)
}
}
| johandahlberg/contAdamination | src/main/scala/contadamination/Contadamination.scala | Scala | apache-2.0 | 2,246 |
package domains.model.material
import java.io.File
import com.typesafe.config.ConfigFactory
/**
* 作業単位のファイル群
*
* @param tsFile TSファイル
* @param errFile エラーログファイル
* @param programFile メタデータファイル
* @param mp4File MP4ファイル
*/
case class Material(tsFile: File,
errFile: Option[File],
programFile: Option[File],
mp4File: Option[File]) {
val name = tsFile.getName.split('.')(0)
override def toString = s"Material($name, err_file: ${errFile.isDefined}, program_file: ${programFile.isDefined}, mp4_file: ${mp4File.isDefined})"
/**
* 対象ディレクトリへ移動する
*
* @param dir 移動先のディレクトリ
* @return 正常に完了したかどうか
*/
def moveTo(dir: File): Boolean = {
if (dir.isDirectory) {
// TODO: 移動先に十分な容量があるか確認したほうが良いかも
// FIXME: 各移動処理の結果を確認しておきたい
tsFile.renameTo(new File(dir, tsFile.getName))
List(errFile, programFile, mp4File) foreach {
case Some(f) => f.renameTo(new File(dir, f.getName))
case None => false
}
true
} else false
}
/**
* ゴミ箱相当のディレクトリへ移動する
*
* @return 正常に完了したかどうか
*/
def trash: Boolean = {
val config = ConfigFactory.load("private")
moveTo(new File(config.getString("path.trash")))
}
}
| kobtea/home-theatre | app/domains/model/material/Material.scala | Scala | lgpl-2.1 | 1,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.streaming.Time
/**
* :: DeveloperApi ::
* Class having information on completed batches.
* @param batchTime Time of the batch
* @param streamIdToInputInfo A map of input stream id to its input info
* @param submissionTime Clock time of when jobs of this batch was submitted to
* the streaming scheduler queue
* @param processingStartTime Clock time of when the first job of this batch started processing
* @param processingEndTime Clock time of when the last job of this batch finished processing
* @param outputOperationInfos The output operations in this batch
*/
@DeveloperApi
case class BatchInfo(
batchTime: Time,
streamIdToInputInfo: Map[Int, StreamInputInfo],
submissionTime: Long,
processingStartTime: Option[Long],
processingEndTime: Option[Long],
outputOperationInfos: Map[Int, OutputOperationInfo]
) {
@deprecated("Use streamIdToInputInfo instead", "1.5.0")
def streamIdToNumRecords: Map[Int, Long] = streamIdToInputInfo.mapValues(_.numRecords)
/**
* Time taken for the first job of this batch to start processing from the time this batch
* was submitted to the streaming scheduler. Essentially, it is
* `processingStartTime` - `submissionTime`.
*/
def schedulingDelay: Option[Long] = processingStartTime.map(_ - submissionTime)
/**
* Time taken for the all jobs of this batch to finish processing from the time they started
* processing. Essentially, it is `processingEndTime` - `processingStartTime`.
*/
def processingDelay: Option[Long] = processingEndTime.zip(processingStartTime)
.map(x => x._1 - x._2).headOption
/**
* Time taken for all the jobs of this batch to finish processing from the time they
* were submitted. Essentially, it is `processingDelay` + `schedulingDelay`.
*/
def totalDelay: Option[Long] = schedulingDelay.zip(processingDelay)
.map(x => x._1 + x._2).headOption
/**
* The number of recorders received by the receivers in this batch.
*/
def numRecords: Long = streamIdToInputInfo.values.map(_.numRecords).sum
}
| chenc10/Spark-PAF | streaming/src/main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala | Scala | apache-2.0 | 3,001 |
package com.aergonaut.lib.manual.page
import java.util
import cofh.lib.util.helpers.ItemHelper
import com.aergonaut.lib.manual.TManual
import com.aergonaut.lib.manual.gui.{TGuiManual, GuiPositionedStack}
import com.aergonaut.lib.util.ClientUtil
import com.aergonaut.lifeaquatic.util.Logger
import cpw.mods.fml.relauncher.ReflectionHelper
import net.minecraft.client.Minecraft
import net.minecraft.client.renderer.RenderHelper
import net.minecraft.client.renderer.entity.RenderItem
import net.minecraft.item.ItemStack
import net.minecraft.item.crafting.{ShapedRecipes, ShapelessRecipes, IRecipe, CraftingManager}
import net.minecraftforge.oredict.{ShapedOreRecipe, ShapelessOreRecipe}
import org.lwjgl.opengl.{GL12, GL11}
import scala.collection.JavaConverters._
import scala.collection.mutable
class CraftingPage(val recipes: Seq[IRecipe]) extends ManualPage(None) {
private def makePositionedStacks(recipe: IRecipe): Option[Seq[GuiPositionedStack]] = extractIngredientsAndSize(recipe) match {
case Some((ingredients, w, h)) => {
val positionedStacks = mutable.ArrayBuffer[GuiPositionedStack]()
positionedStacks += new GuiPositionedStack(recipe.getRecipeOutput, 19, -19)
for (hh <- 0 until h) {
for (ww <- 0 until w) {
positionedStacks += new GuiPositionedStack(ingredients(hh * w + ww), ww * 19, hh * 19)
}
}
Some(positionedStacks.toSeq)
}
case _ => None
}
private def extractIngredientsAndSize(recipe: IRecipe): Option[(Seq[Option[Seq[Any]]], Int, Int)] = recipe match {
case recipe: ShapelessRecipes => {
val ingredients = recipe.recipeItems.asScala.map(el => Some(Vector(el.asInstanceOf[ItemStack])))
val w = if (recipe.getRecipeSize > 6) 3 else if (recipe.getRecipeSize > 1) 2 else 1
val h = if (recipe.getRecipeSize > 4) 3 else if (recipe.getRecipeSize > 2) 2 else 1
Some((ingredients, w, h))
}
case recipe: ShapedRecipes => {
val ingredients = recipe.recipeItems.map(el => Some(Vector(el))).toVector
val w = recipe.recipeWidth
val h = recipe.recipeHeight
Some((ingredients, w, h))
}
case recipe: ShapelessOreRecipe => {
val ingredients: Vector[Option[Vector[Any]]] = recipe.getInput.asScala.map({
case oreList: util.List[ItemStack] => Some(oreList.asScala.toVector)
case stack: ItemStack => Some(Vector(stack))
case _ => None
}).toVector
val w = if (ingredients.length > 6) 3 else if (ingredients.length > 1) 2 else 1
val h = if (ingredients.length > 4) 3 else if (ingredients.length > 2) 2 else 1
Some((ingredients, w, h))
}
case recipe: ShapedOreRecipe => {
val ingredients: Vector[Option[Vector[Any]]] = recipe.getInput.map({
case oreList: util.List[ItemStack] => Some(oreList.asScala.toVector)
case stack: ItemStack => Some(Vector(stack))
case _ => None
}).toVector
val w: Int = ReflectionHelper.getPrivateValue(classOf[ShapedOreRecipe], recipe, "width")
val h: Int = ReflectionHelper.getPrivateValue(classOf[ShapedOreRecipe], recipe, "height")
Some((ingredients, w, h))
}
case _ => None
}
override def renderPage(gui: TGuiManual, manual: TManual, yOffset: Int): Unit = {
ClientUtil.bindTexture("lifeaquatic:textures/gui/craftingOverlay.png")
gui.drawTexturedModalRect(gui.left + 1, gui.top + yOffset, 0, 0, gui.guiWidth, gui.guiHeight)
GL11.glEnable(GL12.GL_RESCALE_NORMAL)
RenderHelper.enableGUIStandardItemLighting()
val yBase = gui.top + 37
var yOff = yOffset
var highlighted: Option[ItemStack] = None
RenderItem.getInstance().renderWithColor = true
val positioned = recipes.map(recipe => makePositionedStacks(recipe))
positioned.foreach(_.foreach(recipe => {
val w = if (recipe.length > 6) 3 else if (recipe.length > 1) 2 else 1
val h = if (recipe.length > 4) 3 else if (recipe.length > 2) 2 else 1
val xBase = gui.left + gui.guiWidth / 2 - (w * 20) / 2
recipe.foreach(stack => stack.getStack() match {
case Some(item) => {
RenderItem.getInstance().renderItemIntoGUI(gui.font, Minecraft.getMinecraft.renderEngine, item, xBase + stack.x, yBase + yOffset + stack.y)
RenderItem.getInstance().renderItemOverlayIntoGUI(gui.font, Minecraft.getMinecraft.renderEngine, item, xBase + stack.x, yBase + yOffset + stack.y)
if (gui.mouseX >= xBase + stack.x && gui.mouseX < xBase + stack.x + 19 && gui.mouseY >= yBase + yOffset + stack.y && gui.mouseY < yBase + yOffset + stack.y + 19) highlighted = Some(item)
}
case _ => {}
})
yOff += h * 18 + 18
}))
GL11.glEnable(GL11.GL_BLEND)
highlighted.foreach(stack => gui.drawTooltip(stack, gui.mouseX, gui.mouseY))
}
}
object CraftingPage {
def apply(recipes: Seq[IRecipe]): CraftingPage = new CraftingPage(recipes)
}
| aergonaut/LifeAquatic | src/main/scala/com/aergonaut/lib/manual/page/CraftingPage.scala | Scala | mit | 4,909 |
/*
* Copyright 2016 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.helper
import simx.core.svaractor.handlersupport.HandlerSupport
import simx.core.svaractor.SVarActor
import scala.reflect.ClassTag
/**
* Created by dwiebusch on 17.01.16.
*/
trait HandlerIntegrationSupport[ActorType <: SVarActor ] extends HandlerSupport{
protected val actorContext : ActorType
private def register[RequiredActorType >: ActorType <: SVarActor , T : ClassTag](h: HandlerRegisterRequest[RequiredActorType, T])
(implicit ct : ClassTag[RequiredActorType]){
if (ct.runtimeClass isAssignableFrom actorContext.getClass)
addHandler[T](h.handler.callFunction(_)(actorContext))
}
addHandler[HandlerRegisterRequest[_ >: ActorType <: SVarActor, _]]{
register(_)
}
}
object Extend {
case class Into protected[Extend] (ref : SVarActor.Ref){
def by[Ctxt <: SVarActor, P : ClassTag, R](f : RemoteFunction1[Ctxt, P, R]) = {
ref ! HandlerRegisterRequest(f)
ref
}
}
def actor(ref : SVarActor.Ref) = Into(ref)
}
private case class HandlerRegisterRequest[RequiredActorType <: SVarActor, MSGType : ClassTag](handler : RemoteFunction1[RequiredActorType, MSGType, _])
class TestActor extends SVarActor with HandlerIntegrationSupport[SVarActor]
object HandlerIntegrationTest{
def main(args: Array[String]) {
val testActor = SVarActor.createActor(new TestActor)
// integrate function
Extend actor testActor by new RemoteFunction1[TestActor, String, Unit]{
def apply(v1: String) = {
println(v1, actorContext)
actorContext.context.system.shutdown()
}
}
// test it
testActor ! "Hallo Welt"
}
} | simulator-x/core | src/simx/core/helper/Integrate.scala | Scala | apache-2.0 | 2,561 |
package com.citypay.pan.search
import com.citypay.pan.search.util.Util
import com.typesafe.config.Config
import scala.collection.JavaConverters._
/**
* A specification for matching a PAN based on a simplistic model. The spec will only cater for LUHN validated
* card numbers which may exclude schemes such as Diners from the search.
*
* @param name a name for the specification
* @param id an id associated to the type
* @param logo an id associated with the type/card scheme
* @param leading numerics used at the beginning of the search as a prefix, may be 1 or more digits
* @param length the expected minimum length of the entire pan
*/
case class PanSpec(name: String,
id: String,
logo: String,
leading: Int,
length: Int,
maxLength: Int) {
assume(leading > 0, "Leading digits should be greater than 0")
assume(length > 0, "Length should be greater than 0")
private val leadingChars = leading.toString.toCharArray
val leadingLen: Int = leading.toString.length
val firstDigit: Int = Util.FirstDigit(leading)
val leadingBytes: Array[Byte] = leading.toString.getBytes
/**
* @param i 0 based index of the leading numerics
* @return any leading digit at the given index or None if overflowed
*/
def nLeadingDigit(i: Int): Option[Int] = {
if (leadingChars.length > i) {
None
} else {
Some(leadingChars(i).toString.toInt)
}
}
private val leadingArr = leading.toString.toCharArray.map(_.toByte)
//
// def matches(position: Int, b: Byte): BinSearchMatcherResult.Value = {
// import BinSearchMatcherResult._
//
// // shortcut any length discrepancies
// if (position >= lengthMax)
// NoMatch
//
// // see if we have a match which is in the leading bracket, leading may (but unlikely) be the length but lets
// // check the length for a full match
// else if (position < leadingLen) {
// if (leadingArr(position) == b)
// return if (positionInLenRange(position + 1)) FullMatch else LeadingMatch
// NoMatch
// }
//
// // other chars should check acceptable chars, a full match however should only appear on a digit
// else if (UTF8_CHARS.contains(b))
// if (positionInLenRange(position + 1) && Character.isDigit(b)) FullMatch else CharMatch
// else
// NoMatch
//
// }
}
object PanSpec {
import com.citypay.pan.search.util.ConfigExt._
def load(conf: Config, level: Int): List[PanSpec] = {
val data = conf.getObjectList(s"chd.level$level").asScala.toList.map(c => {
val cfg = c.toConfig
(cfg.string("name", ""),
cfg.string("len", "16"),
cfg.getString("id"),
cfg.getString("logo"),
cfg.getIntList("bins").asScala.toList)
})
val flattened = data.flatMap {
case (n, len, id, logo, list) =>
val s = len.split("-")
val (from, to) = if (s.length == 2) (s(0).toInt, s(1).toInt) else (len.toInt, len.toInt)
list.map(d => PanSpec(n, id, logo, d, from, to))
}
flattened
}
// def leading(d: Double): Int = abs(floor(d).toInt)
//
// def length(d: Double): Int = {
// val bd = BigDecimal(d)
// val len = ((bd - bd.setScale(0, BigDecimal.RoundingMode.HALF_DOWN)) * 100).toInt
// if (len <= 0) 16 else len
// }
} | citypay/citypay-pan-search | src/main/scala/com/citypay/pan/search/PanSpec.scala | Scala | mit | 3,426 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.{Tumble, ValidationException, _}
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedAggFunctions.OverAgg0
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit._
class OverWindowValidationTest extends TableTestBase {
/**
* OVER clause is necessary for [[OverAgg0]] window function.
*/
@Test(expected = classOf[ValidationException])
def testInvalidOverAggregation(): Unit = {
val util = batchTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
val overAgg = new OverAgg0
t.select('c.count, overAgg('b, 'a))
}
/**
* OVER clause is necessary for [[OverAgg0]] window function.
*/
@Test(expected = classOf[ValidationException])
def testInvalidOverAggregation2(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val overAgg = new OverAgg0
table
.window(Tumble over 5.milli on 'long as 'w)
.groupBy('string,'w)
.select(overAgg('long, 'int))
}
}
| apache/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/batch/table/validation/OverWindowValidationTest.scala | Scala | apache-2.0 | 1,999 |
package org.goingok
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.ActorMaterializer
/**
* Created by andrew@andrewresearch.net on 20/2/17.
*/
object GoingOkContext {
implicit val system = ActorSystem()
implicit val executor = system.dispatcher
implicit val materializer = ActorMaterializer()
val log = Logging(system.eventStream,"~")
def shorten(text:String,num:Int=30) = text.replace("\\n"," ").take(num).concat("\\u2026")
}
| GoingOK/goingok-server | src/main/scala/org/goingok/GoingOkContext.scala | Scala | apache-2.0 | 470 |
/* PrintEventsWithStreamSpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2014 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.monitor
import java.io.{ ByteArrayOutputStream, PrintStream }
import org.scalatest._
/**
* Test suite for [[atmos.monitor.PrintEventsWithStream]].
*/
class PrintEventsWithStreamSpec extends FlatSpec with Matchers {
val encoding = "UTF-8"
val message = "MSG"
val thrown = new RuntimeException
"PrintEventsWithStream" should "forward messages and stack traces to a print stream" in {
val fixture = new StreamFixture
val monitor = PrintEventsWithStream(fixture.stream)
monitor.printMessage(message)
fixture.recover() shouldBe new StreamFixture().message(message)
monitor.printMessageAndStackTrace(message, thrown)
fixture.recover() shouldBe new StreamFixture().messageAndStackTrace(message, thrown)
}
class StreamFixture {
val baos = new ByteArrayOutputStream
val stream = new PrintStream(baos, false, encoding)
def message(msg: String) = {
stream.println(msg)
recover()
}
def messageAndStackTrace(msg: String, thrown: Throwable) = {
stream.println(msg)
thrown.printStackTrace(stream)
recover()
}
def recover() = {
stream.flush()
val result = baos.toString(encoding)
baos.reset()
result
}
}
} | zmanio/atmos | src/test/scala/atmos/monitor/PrintEventsWithStreamSpec.scala | Scala | apache-2.0 | 1,911 |
package sparklyr
import java.nio.channels.Channels
import scala.collection.JavaConverters._
import org.apache.arrow.vector._
import org.apache.arrow.vector.ipc.message.{ArrowRecordBatch, MessageSerializer}
import org.apache.arrow.vector.ipc.WriteChannel
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.__THIS_IS_THE_ROAD_TO_CLOWNTOWN__ArrowUtils
class ArrowBatchStreamWriter(
schema: org.apache.spark.sql.types.StructType,
out: java.io.OutputStream,
timeZoneId: String) {
val arrowSchema = org.apache.spark.sql.util.__THIS_IS_THE_ROAD_TO_CLOWNTOWN__ArrowUtils.toArrowSchema(schema, timeZoneId)
val writeChannel = new WriteChannel(Channels.newChannel(out))
// Write the Arrow schema first, before batches
MessageSerializer.serialize(writeChannel, arrowSchema)
/**
* Consume iterator to write each serialized ArrowRecordBatch to the stream.
*/
def writeBatches(arrowBatchIter: Iterator[Array[Byte]]): Unit = {
arrowBatchIter.foreach(writeChannel.write)
}
def writeOneBatch(arrowBatchIter: Iterator[Array[Byte]]): Unit = {
writeChannel.write(arrowBatchIter.next)
}
/**
* End the Arrow stream, does not close output stream.
*/
def end(): Unit = {
writeChannel.writeIntLittleEndian(0);
}
}
| rstudio/sparklyr | java/spark-3.0.0/arrowbatchstreamwriter.scala | Scala | apache-2.0 | 1,274 |
package filodb.core.memstore
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext
import com.typesafe.config.Config
import debox.Buffer
import java.util
import kamon.Kamon
import kamon.trace.Span
import monix.eval.Task
import monix.execution.Scheduler
import monix.reactive.{Observable, OverflowStrategy}
import filodb.core.{DatasetRef, Types}
import filodb.core.binaryrecord2.RecordSchema
import filodb.core.memstore.ratelimit.QuotaSource
import filodb.core.metadata.Schemas
import filodb.core.query.{QuerySession, ServiceUnavailableException}
import filodb.core.store._
import filodb.memory.NativeMemoryManager
/**
* Extends TimeSeriesShard with on-demand paging functionality by populating in-memory partitions with chunks from
* a raw chunk source which implements RawChunkSource.readRawPartitions API.
*/
class OnDemandPagingShard(ref: DatasetRef,
schemas: Schemas,
storeConfig: StoreConfig,
quotaSource: QuotaSource,
shardNum: Int,
bufferMemoryManager: NativeMemoryManager,
rawStore: ColumnStore,
metastore: MetaStore,
evictionPolicy: PartitionEvictionPolicy,
filodbConfig: Config)
(implicit ec: ExecutionContext) extends
TimeSeriesShard(ref, schemas, storeConfig, quotaSource, shardNum, bufferMemoryManager, rawStore,
metastore, evictionPolicy, filodbConfig)(ec) {
import TimeSeriesShard._
import FiloSchedulers._
private val singleThreadPool =
Scheduler.singleThread(s"${FiloSchedulers.PopulateChunksSched}-$ref-$shardNum")
// TODO: make this configurable
private val strategy = OverflowStrategy.BackPressure(1000)
private def startODPSpan(): Span = Kamon.spanBuilder(s"odp-cassandra-latency")
.asChildOf(Kamon.currentSpan())
.tag("dataset", ref.dataset)
.tag("shard", shardNum)
.start()
private def capDataScannedPerShardCheck(lookup: PartLookupResult): Unit = {
lookup.firstSchemaId.foreach { schId =>
lookup.chunkMethod match {
case TimeRangeChunkScan(st, end) =>
val numMatches = lookup.partsInMemory.length + lookup.partIdsNotInMemory.length
schemas.ensureQueriedDataSizeWithinLimitApprox(schId, numMatches,
storeConfig.flushInterval.toMillis,
storeConfig.estimatedIngestResolutionMillis, end - st, storeConfig.maxDataPerShardQuery)
case _ =>
}
}
}
// NOTE: the current implementation is as follows
// 1. Fetch partitions from memStore
// 2. Determine, one at a time, what chunks are missing and could be fetched from disk
// 3. Fetch missing chunks through a SinglePartitionScan
// 4. upload to memory and return partition
// Definitely room for improvement, such as fetching multiple partitions at once, more parallelism, etc.
//scalastyle:off
override def scanPartitions(partLookupRes: PartLookupResult,
colIds: Seq[Types.ColumnId],
querySession: QuerySession): Observable[ReadablePartition] = {
capDataScannedPerShardCheck(partLookupRes)
// For now, always read every data column.
// 1. We don't have a good way to update just some columns of a chunkset for ODP
// 2. Timestamp column almost always needed
// 3. We don't have a safe way to prevent JVM crashes if someone reads a column that wasn't paged in
// 1. Fetch partitions from memstore
val partIdsNotInMemory = partLookupRes.partIdsNotInMemory
// 2. Now determine list of partitions to ODP and the time ranges to ODP
val partKeyBytesToPage = new ArrayBuffer[Array[Byte]]()
val pagingMethods = new ArrayBuffer[ChunkScanMethod]
val inMemOdp = debox.Set.empty[Int]
partLookupRes.partIdsMemTimeGap.foreach { case (pId, startTime) =>
val p = partitions.get(pId)
if (p != null) {
val odpChunkScan = chunksToODP(p, partLookupRes.chunkMethod, pagingEnabled, startTime)
odpChunkScan.foreach { rawChunkMethod =>
pagingMethods += rawChunkMethod // TODO: really determine range for all partitions
partKeyBytesToPage += p.partKeyBytes
inMemOdp += p.partID
}
} else {
// in the very rare case that partition literally *just* got evicted
// we do not want to thrash by paging this partition back in.
logger.warn(s"Skipped ODP of partId=$pId in dataset=$ref " +
s"shard=$shardNum since we are very likely thrashing")
}
}
logger.debug(s"Query on dataset=$ref shard=$shardNum resulted in partial ODP of partIds ${inMemOdp}, " +
s"and full ODP of partIds ${partLookupRes.partIdsNotInMemory}")
// partitions that do not need ODP are those that are not in the inMemOdp collection
val inMemParts = InMemPartitionIterator2(partLookupRes.partsInMemory)
val noOdpPartitions = inMemParts.filterNot(p => inMemOdp(p.partID))
// NOTE: multiPartitionODP mode does not work with AllChunkScan and unit tests; namely missing partitions will not
// return data that is in memory. TODO: fix
val result = Observable.fromIteratorUnsafe(noOdpPartitions) ++ {
if (storeConfig.multiPartitionODP) {
Observable.fromTask(odpPartTask(partIdsNotInMemory, partKeyBytesToPage, pagingMethods,
partLookupRes.chunkMethod)).flatMap { odpParts =>
val multiPart = MultiPartitionScan(partKeyBytesToPage, shardNum)
if (partKeyBytesToPage.nonEmpty) {
val span = startODPSpan()
rawStore.readRawPartitions(ref, maxChunkTime, multiPart, computeBoundingMethod(pagingMethods))
// NOTE: this executes the partMaker single threaded. Needed for now due to concurrency constraints.
// In the future optimize this if needed.
.mapEval { rawPart => partitionMaker.populateRawChunks(rawPart).executeOn(singleThreadPool) }
.asyncBoundary(strategy) // This is needed so future computations happen in a different thread
.guarantee(Task.eval(span.finish())) // not async
} else { Observable.empty }
}
} else {
Observable.fromTask(odpPartTask(partIdsNotInMemory, partKeyBytesToPage, pagingMethods,
partLookupRes.chunkMethod)).flatMap { odpParts =>
assertThreadName(QuerySchedName)
logger.debug(s"Finished creating full ODP partitions ${odpParts.map(_.partID)}")
if(logger.underlying.isDebugEnabled) {
partKeyBytesToPage.zip(pagingMethods).foreach { case (pk, method) =>
logger.debug(s"Paging in chunks for partId=${getPartition(pk).get.partID} chunkMethod=$method")
}
}
if (partKeyBytesToPage.nonEmpty) {
val span = startODPSpan()
Observable.fromIterable(partKeyBytesToPage.zip(pagingMethods))
.mapParallelUnordered(storeConfig.demandPagingParallelism) { case (partBytes, method) =>
rawStore.readRawPartitions(ref, maxChunkTime, SinglePartitionScan(partBytes, shardNum), method)
.mapEval { rawPart => partitionMaker.populateRawChunks(rawPart).executeOn(singleThreadPool) }
.asyncBoundary(strategy) // This is needed so future computations happen in a different thread
.defaultIfEmpty(getPartition(partBytes).get)
.headL
// headL since we are fetching a SinglePartition above
}
.guarantee(Task.eval(span.finish())) // not async
} else {
Observable.empty
}
}
}
}
result.map { p =>
shardStats.partitionsQueried.increment()
p
}
}
// 3. Deal with partitions no longer in memory but still indexed in Lucene.
// Basically we need to create TSPartitions for them in the ingest thread -- if there's enough memory
private def odpPartTask(partIdsNotInMemory: Buffer[Int], partKeyBytesToPage: ArrayBuffer[Array[Byte]],
pagingMethods: ArrayBuffer[ChunkScanMethod], chunkMethod: ChunkScanMethod) =
if (partIdsNotInMemory.nonEmpty) {
createODPPartitionsTask(partIdsNotInMemory, { case (pId, pkBytes) =>
partKeyBytesToPage += pkBytes
pagingMethods += chunkMethod
logger.debug(s"Finished creating part for full odp. Now need to page partId=$pId chunkMethod=$chunkMethod")
shardStats.partitionsRestored.increment()
}).executeOn(ingestSched).asyncBoundary
// asyncBoundary above will cause subsequent map operations to run on designated scheduler for task or observable
// as opposed to ingestSched
// No need to execute the task on ingestion thread if it's empty / no ODP partitions
} else Task.eval(Nil)
/**
* Creates a Task which is meant ONLY TO RUN ON INGESTION THREAD
* to create TSPartitions for partIDs found in Lucene but not in in-memory data structures
* It runs in ingestion thread so it can correctly verify which ones to actually create or not
*/
private def createODPPartitionsTask(partIDs: Buffer[Int], callback: (Int, Array[Byte]) => Unit):
Task[Seq[TimeSeriesPartition]] = Task.evalAsync {
assertThreadName(IngestSchedName)
require(partIDs.nonEmpty)
partIDs.map { id =>
// for each partID: look up in partitions
partitions.get(id) match {
case TimeSeriesShard.OutOfMemPartition =>
logger.debug(s"Creating TSPartition for ODP from part ID $id in dataset=$ref shard=$shardNum")
// If not there, then look up in Lucene and get the details
for { partKeyBytesRef <- partKeyIndex.partKeyFromPartId(id)
unsafeKeyOffset = PartKeyLuceneIndex.bytesRefToUnsafeOffset(partKeyBytesRef.offset)
group = partKeyGroup(schemas.part.binSchema, partKeyBytesRef.bytes, unsafeKeyOffset, numGroups)
sch <- Option(schemas(RecordSchema.schemaID(partKeyBytesRef.bytes, unsafeKeyOffset)))
} yield {
val part = createNewPartition(partKeyBytesRef.bytes, unsafeKeyOffset, group, id, sch, true, 4)
if (part == OutOfMemPartition) throw new ServiceUnavailableException("The server has too many ingesting " +
"time series and does not have resources to serve this long time range query. Please try " +
"after sometime.")
val stamp = partSetLock.writeLock()
try {
markPartAsNotIngesting(part, odp = true)
partSet.add(part)
} finally {
partSetLock.unlockWrite(stamp)
}
val pkBytes = util.Arrays.copyOfRange(partKeyBytesRef.bytes, partKeyBytesRef.offset,
partKeyBytesRef.offset + partKeyBytesRef.length)
callback(part.partID, pkBytes)
part
}
// create the partition and update data structures (but no need to add to Lucene!)
// NOTE: if no memory, then no partition!
case p: TimeSeriesPartition =>
// invoke callback even if we didn't create the partition
callback(p.partID, p.partKeyBytes)
Some(p)
}
}.toVector.flatten
}
private def computeBoundingMethod(methods: Seq[ChunkScanMethod]): ChunkScanMethod = if (methods.isEmpty) {
AllChunkScan
} else {
var minTime = Long.MaxValue
var maxTime = 0L
methods.foreach { m =>
minTime = Math.min(minTime, m.startTime)
maxTime = Math.max(maxTime, m.endTime)
}
TimeRangeChunkScan(minTime, maxTime)
}
/**
* Check if ODP is really needed for this partition which is in memory
* @return Some(scanMethodForCass) if ODP is needed, None if ODP is not needed
*/
private def chunksToODP(partition: ReadablePartition,
method: ChunkScanMethod,
enabled: Boolean,
partStartTime: Long): Option[ChunkScanMethod] = {
if (enabled) {
method match {
// For now, allChunkScan will always load from disk. This is almost never used, and without an index we have
// no way of knowing what there is anyways.
case AllChunkScan => Some(AllChunkScan)
// Assume initial startKey of first chunk is the earliest - typically true unless we load in historical data
// Compare desired time range with start key and see if in memory data covers desired range
// Also assume we have in memory all data since first key. Just return the missing range of keys.
case req: TimeRangeChunkScan => if (partition.numChunks > 0) {
val memStartTime = partition.earliestTime
if (req.startTime < memStartTime && partStartTime < memStartTime) {
val toODP = TimeRangeChunkScan(req.startTime, memStartTime)
logger.debug(s"Decided to ODP time range $toODP for " +
s"partID=${partition.partID} memStartTime=$memStartTime " +
s"shard=$shardNum ${partition.stringPartition}")
Some(toODP)
}
else None
} else Some(req) // if no chunks ingested yet, read everything from disk
case InMemoryChunkScan => None // Return only in-memory data - ie return none so we never ODP
case WriteBufferChunkScan => None // Write buffers are always in memory only
}
} else {
None
}
}
}
| filodb/FiloDB | core/src/main/scala/filodb.core/memstore/OnDemandPagingShard.scala | Scala | apache-2.0 | 14,085 |
package sledtr
import scala.collection.mutable._
import scala.util.matching._
import sledtr.source._
import sledtr.section._
import sledtr.section.sites._
import net.htmlparser.jericho._
object Collections {
val SimpleExtenderSites: List[Tuple2[Regex, (Element) => Boolean]] = List(
( // はてな匿名ダイアリー
"http://anond.hatelabo.jp/.*".r,
e => e.getName == "div" && e.getAttributeValue("class") == "day"
)
,
( // はてなダイアリー
"http://d.hatena.ne.jp/.*?/.*?".r,
e => e.getName == "div" && e.getAttributeValue("class") == "body"
)
)
val SourceList: List[SourceCompanion] =
List(
Ch2,
UrlList,
DefaultSource,
RssFeed
)
val SectionList: List[SectionCompanion] =
List(
Ch2Section,
SimpleExtender,
IgnSection,
SimpleSection,
DefaultSection
)
val DivNames: List[String] =
List(
"CONTENTS_MAIN",
"article-body entry-content",
"entry_body",
"articleTextnews1",
"main_cont",
"blogbody",
"NewsArticle",
"maincol"
)
} | K2Da/sledtr | src/main/scala/sledtr/Collections.scala | Scala | gpl-3.0 | 1,164 |
package org.scalatest
class ConfigMapWrapperSuiteSpec extends FunSuite with SharedHelpers {
// Need a test that ensures the passed config map gets in there.
test("configMap should get passed into the wrapped Suite") {
SavesConfigMapSuite.resetConfigMap()
val wrapped = new ConfigMapWrapperSuite(classOf[SavesConfigMapSuite])
val configMap = Map("salt" -> "pepper", "eggs" -> "bacon")
wrapped.run(None, SilentReporter, new Stopper {}, Filter(), configMap, None, new Tracker)
assert(SavesConfigMapSuite.savedConfigMap === Some(configMap))
}
} | yyuu/scalatest | src/test/scala/org/scalatest/ConfigMapWrapperSuiteSpec.scala | Scala | apache-2.0 | 568 |
import sbt._
import Keys._
object SbtBowerBuild extends Build with BuildExtra {
lazy val sbtBower = Project("sbt-bower", file("."), settings = mainSettings)
lazy val mainSettings: Seq[Def.Setting[_]] = Defaults.defaultSettings ++ Seq(
sbtPlugin := true,
name := "sbt-bower",
organization := "org.mdedetrich",
version := "0.2.1",
scalacOptions ++= Seq("-deprecation", "-unchecked"),
libraryDependencies ++= Seq(
"org.json4s" %% "json4s-jackson" % "3.2.8"
),
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
pomExtra := <url>https://github.com/mdedetrich/sbt-bower</url>
<licenses>
<license>
<name>BSD-style</name>
<url>http://www.opensource.org/licenses/bsd-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:mdedetrich/utforsca.git</url>
<connection>scm:git:git@github.com:mdedetrich/sbt-bower.git</connection>
</scm>
<developers>
<developer>
<id>mdedetrich</id>
<name>Matthew de Detrich</name>
<email>mdedetrich@gmail.com</email>
</developer>
</developers>
)
} | mdedetrich/sbt-bower | project/SbtBowerBuild.scala | Scala | bsd-2-clause | 1,480 |
package com.twitter.finagle.ssl.server
import com.twitter.finagle.ssl.{
ApplicationProtocols, CipherSuites, ClientAuth, KeyCredentials, Protocols, TrustCredentials}
/**
* SslServerConfiguration represents the collection of parameters that an engine factory
* should use to configure a TLS server [[Engine]].
*
* @param keyCredentials The credentials used by the server engine to verify itself to a
* remote peer.
*
* @param clientAuth Determines whether mutual authentication is desired or required by this
* server engine.
*
* @param trustCredentials The credentials used by the server engine to validate a remote
* peer's credentials.
*
* @param cipherSuites The cipher suites which should be used by a particular server engine.
*
* @param protocols The protocols which should be enabled for use with a particular server engine.
*
* @param applicationProtocols The ALPN or NPN protocols which should be supported by a particular
* server engine.
*/
private[finagle] case class SslServerConfiguration(
keyCredentials: KeyCredentials = KeyCredentials.Unspecified,
clientAuth: ClientAuth = ClientAuth.Unspecified,
trustCredentials: TrustCredentials = TrustCredentials.Unspecified,
cipherSuites: CipherSuites = CipherSuites.Unspecified,
protocols: Protocols = Protocols.Unspecified,
applicationProtocols: ApplicationProtocols = ApplicationProtocols.Unspecified)
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/server/SslServerConfiguration.scala | Scala | apache-2.0 | 1,407 |
package org.ffmmx.example.akka
import akka.agent.Agent
object AkkaAgent {
import scala.concurrent.ExecutionContext.Implicits.global
def agentTest: Unit = {
val agent = Agent(5)
val result = agent()
val result2 = agent.get
println("result="+result)
println("result2="+result2)
// agent.send(7)
println("agent.send(7) = "+ agent.send(7))
// agent.send(_ + 1)
println("agent.send(_ + 1) = "+agent.send(_ + 1))
agent.send(_ * 2)
println("agent.send(_ * 2) = "+agent.send(_ * 2))
val future=agent.future()
future.foreach( x => println("future = " + x))
}
def monadic:Unit={
val agent1=Agent(3)
val agent2=Agent(5)
for(value <- agent1)
println("value = "+value)
val agent3=for(value<-agent1) yield value+1
val agent4=agent1 map(_+1)
val agent5=for{
value1 <- agent1
value2 <-agent2
} yield value1 + value2
println(agent1.get())
println(agent2.get())
println(agent3.get())
println(agent4.get())
println(agent5.get())
}
}
| firefoxmmx2/akka_test | src/main/scala/org/ffmmx/example/akka/AkkaAgent.scala | Scala | apache-2.0 | 1,052 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Sep 16 14:09:25 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import collection.Traversable
import util.Sorting.quickSort
import scalation.math.Real.{abs => ABS, max => MAX, _}
import scalation.math.Real
import scalation.util.Error
import scalation.util.SortingR.{iqsort, qsort2}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorR` class stores and operates on Numeric Vectors of base type `Real`.
* It follows the framework of `gen.VectorN [T]` and is provided for performance.
* @param dim the dimension/size of the vector
* @param v the 1D array used to store vector elements
*/
class VectorR (val dim: Int,
protected var v: Array [Real] = null)
extends Traversable [Real] with PartiallyOrdered [VectorR] with Vec with Error with Serializable
{
if (v == null) {
v = Array.ofDim [Real] (dim)
} else if (dim != v.length) {
flaw ("constructor", "dimension is wrong")
} // if
/** Number of elements in the vector as a Double
*/
val nd = dim.toDouble
/** Range for the storage array
*/
private val range = 0 until dim
/** Format String used for printing vector values (change using setFormat)
* Ex: "%d,\\t", "%.6g,\\t" or "%12.6g,\\t"
*/
private var fString = "%s,\\t"
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a vector from an array of values.
* @param u the array of values
*/
def this (u: Array [Real]) { this (u.length, u) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a vector and assign values from vector 'u'.
* @param u the other vector
*/
def this (u: VectorR)
{
this (u.dim) // invoke primary constructor
for (i <- range) v(i) = u(i)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the size (number of elements) of 'this' vector.
*/
override def size: Int = dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce the range of all indices (0 to one less than dim).
*/
def indices: Range = 0 until dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Expand the size (dim) of 'this' vector by 'more' elements.
* @param more the number of new elements to add
*/
def expand (more: Int = dim): VectorR =
{
if (more < 1) this // no change
else new VectorR (dim + more, Array.concat (v, new Array [Real] (more)))
} // expand
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... 1, ... 0) where the 1 is at position j.
* @param j the position to place the 1
* @param size the size of the vector (upper bound = size - 1)
*/
def oneAt (j: Int, size: Int = dim): VectorR =
{
val c = new VectorR (size)
c.v(j) = _1
c
} // oneAt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... -1, ... 0) where the -1 is at position j.
* @param j the position to place the -1
* @param size the size of the vector (upper bound = size - 1)
*/
def _oneAt (j: Int, size: Int = dim): VectorR =
{
val c = new VectorR (size)
c.v(j) = -_1
c
} // _oneAt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectorR` into a `VectorI`.
*/
def toInt: VectorI =
{
val c = new VectorI (dim)
for (i <- range) c(i) = v(i).toInt
c
} // toInt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectorR` into a `VectorD`.
*/
def toDouble: VectorD =
{
val c = new VectorD (dim)
for (i <- range) c(i) = v(i).toDouble
c
} // toDouble
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's element at the 'i'-th index position.
* @param i the given index
*/
def apply (i: Int): Real = v(i)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's elements within the given range (vector slicing).
* @param r the given range
*/
def apply (r: Range): VectorR = slice (r.start, r.end)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's entire array.
*/
def apply (): Array [Real] = v
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's element at the 'i'-th index position.
* @param i the given index
* @param x the value to assign
*/
def update (i: Int, x: Real) { v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param x the value to assign
*/
def update (r: Range, x: Real) { for (i <- r) v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param u the vector to assign
*/
def update (r: Range, u: VectorR) { for (i <- r) v(i) = u(i - r.start) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set each value in 'this' vector to 'x'.
* @param x the value to be assigned
*/
def set (x: Real) { for (i <- range) v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the values in 'this' vector to the values in array 'u'.
* @param u the array of values to be assigned
*/
def setAll (u: Array [Real]) { for (i <- range) v(i) = u(i) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Iterate over 'this' vector element by element.
* @param f the function to apply
*/
def foreach [U] (f: Real => U)
{
var i = 0
while (i < dim) { f (v(i)); i += 1 }
} // foreach
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* a new vector.
* @param p the predicate (Boolean function) to apply
*/
override def filter (p: Real => Boolean): VectorR = VectorR (v.filter (p))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* the index positions.
* @param p the predicate (Boolean function) to apply
*/
def filterPos (p: Real => Boolean): Array [Int] =
{
(for (i <- range if p (v(i))) yield i).toArray
} // filterPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Map the elements of 'this' vector by applying the mapping function 'f'.
* @param f the function to apply
*/
def map (f: Real => Real): VectorR = new VectorR (this ().map (f))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' vector 'from' to 'end'.
* @param from the start of the slice (included)
* @param till the end of the slice (excluded)
*/
override def slice (from: Int, till: Int): VectorR = new VectorR (till - from, v.slice (from, till))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select a subset of elements of 'this' vector corresponding to a 'basis'.
* @param basis the set of index positions (e.g., 0, 2, 5)
*/
def select (basis: Array [Int]): VectorR =
{
val c = new VectorR (basis.length)
for (i <- c.range) c.v(i) = v(basis(i))
c
} // select
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and vector' b'.
* @param b the vector to be concatenated
*/
def ++ (b: VectorR): VectorR =
{
val c = new VectorR (dim + b.dim)
for (i <- c.range) c.v(i) = if (i < dim) v(i) else b.v(i - dim)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and scalar 's'.
* @param s the scalar to be concatenated
*/
def ++ (s: Real): VectorR =
{
val c = new VectorR (dim + 1)
for (i <- c.range) c.v(i) = if (i < dim) v(i) else s
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and vector 'b'.
* @param b the vector to add
*/
def + (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) + b.v(i)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def + (s: Real): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) + s
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's._1' only at position 's._2'.
* @param s the (scalar, position) to add
*/
def + (s: Tuple2 [Real, Int]): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = if (i == s._2) v(i) + s._1 else v(i)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def += (b: VectorR): VectorR = { for (i <- range) v(i) += b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def += (s: Real): VectorR = { for (i <- range) v(i) += s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the negative of 'this' vector (unary minus).
*/
def unary_- (): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = -v(i)
c
} // unary_-
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract vector 'b'.
* @param b the vector to subtract
*/
def - (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) - b.v(i)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's'.
* @param s the scalar to subtract
*/
def - (s: Real): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) - s
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's._1' only at position 's._2'.
* @param s the (scalar, position) to subtract
*/
def - (s: Tuple2 [Real, Int]): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = if (i == s._2) v(i) - s._1 else v(i)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place vector 'b'.
* @param b the vector to add
*/
def -= (b: VectorR): VectorR = { for (i <- range) v(i) -= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place scalar 's'.
* @param s the scalar to add
*/
def -= (s: Real): VectorR = { for (i <- range) v(i) -= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by vector 'b'.
* @param b the vector to multiply by
*/
def * (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) * b.v(i)
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by scalar 's'.
* @param s the scalar to multiply by
*/
def * (s: Real): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) * s
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' (row) vector by matrix 'm'.
* @param m the matrix to multiply by
*/
def * (m: MatriR): VectorR = m.t * this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def *= (b: VectorR): VectorR = { for (i <- range) v(i) *= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def *= (s: Real): VectorR = { for (i <- range) v(i) *= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by vector 'b' (element-by-element).
* @param b the vector to divide by
*/
def / (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) / b.v(i)
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by scalar 's'.
* @param s the scalar to divide by
*/
def / (s: Real): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) / s
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def /= (b: VectorR): VectorR = { for (i <- range) v(i) /= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def /= (s: Real): VectorR = { for (i <- range) v(i) /= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing each element of 'this' vector raised to the
* s-th power.
* @param s the scalar exponent
*/
def ~^ (s: Double): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = v(i) ~^ s
c
} // ~^
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for inequality.
* @param b that vector
*/
def ≠ (b: VectorR) = this != b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for less than or equal to.
* @param b that vector
*/
def ≤ (b: VectorR) = this <= b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for greater than or equal to.
* @param b that vector
*/
def ≥ (b: VectorR) = this >= b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise each element of 'this' vector to the 's'-th power.
* @param s the scalar exponent
*/
def ~^= (s: Double) { for (i <- range) v(i) = v(i) ~^ s }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the square of each element of 'this' vector.
*/
def sq: VectorR = this * this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the reciprocal of each element of 'this' vector.
*/
def recip: VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = _1 / v(i)
c
} // inverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector that is the element-wise absolute value of 'this' vector.
*/
def abs: VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = ABS (v(i))
c
} // abs
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector.
*/
def sum: Real = v.foldLeft (_0)((s, x) => s + x)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the absolute value of the elements of 'this' vector.
*/
def sumAbs: Real = v.foldLeft (_0)((s, x) => s + ABS (x))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector skipping the 'i'-th element (Not Equal 'i').
* @param i the index of the element to skip
*/
def sumNE (i: Int): Real = sum - v(i)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the positive (> 0) elements of 'this' vector.
*/
def sumPos: Real = v.foldLeft (_0)((s, x) => s + MAX (x, _0))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the mean of the elements of 'this' vector.
*/
def mean = sum / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (unbiased) sample variance of the elements of 'this' vector.
*/
def variance = (normSq - sum * sum / nd) / (nd-1.0)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the population variance of the elements of 'this' vector.
* This is also the (biased) MLE estimator for sample variance.
*/
def pvariance = (normSq - sum * sum / nd) / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Establish the rank order of the elements in 'self' vector, e.g.,
* (8.0, 2.0, 4.0, 6.0) is (3, 0, 1, 2).
*/
def rank: VectorI = new VectorI (iqsort (v))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Cumulate the values of 'this' vector from left to right (e.g., create a
* CDF from a pmf). Example: (4, 2, 3, 1) --> (4, 6, 9, 10)
*/
def cumulate: VectorR =
{
val c = new VectorR (dim)
var sum: Real = _0
for (i <- range) { sum += v(i); c.v(i) = sum }
c
} // cumulate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so that it sums to one (like a probability vector).
*/
def normalize: VectorR = this * (_1 / sum)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so its length is one (unit vector).
*/
def normalizeU: VectorR = this * (_1 / norm)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector to have a maximum of one.
*/
def normalize1: VectorR = this * (_1 / max ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def dot (b: VectorR): Real =
{
var sum: Real = _0
for (i <- range) sum += v(i) * b.v(i)
sum
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def ∙ (b: VectorR): Real =
{
var sum: Real = _0
for (i <- range) sum += v(i) * b.v(i)
sum
} // ∙
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) squared of 'this' vector.
*/
def normSq: Real = this dot this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) of 'this' vector.
*/
def norm: Real = sqrt (normSq).toReal
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Manhattan norm (1-norm) of 'this' vector.
*/
def norm1: Real =
{
var sum: Real = _0
for (i <- range) sum += ABS (v(i))
sum
} // norm1
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def max (e: Int = dim): Real =
{
var x = v(0)
for (i <- 1 until e if v(i) > x) x = v(i)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the maximum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def max (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = if (b.v(i) > v(i)) b.v(i) else v(i)
c
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def min (e: Int = dim): Real =
{
var x = v(0)
for (i <- 1 until e if v(i) < x) x = v(i)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the minimum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def min (b: VectorR): VectorR =
{
val c = new VectorR (dim)
for (i <- range) c.v(i) = if (b.v(i) < v(i)) b.v(i) else v(i)
c
} // min
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the element with the greatest magnitude in 'this' vector.
*/
def mag: Real = ABS (max ()) max ABS (min ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument maximum of 'this' vector (index of maximum element).
* @param e the ending index (exclusive) for the search
*/
def argmax (e: Int = dim): Int =
{
var j = 0
for (i <- 1 until e if v(i) > v(j)) j = i
j
} // argmax
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument minimum of 'this' vector (index of minimum element).
* @param e the ending index (exclusive) for the search
*/
def argmin (e: Int = dim): Int =
{
var j = 0
for (i <- 1 until e if v(i) < v(j)) j = i
j
} // argmin
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument minimum of 'this' vector (-1 if its not negative).
* @param e the ending index (exclusive) for the search
*/
def argminNeg (e: Int = dim): Int =
{
val j = argmin (e); if (v(j) < _0) j else -1
} // argmaxNeg
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument maximum of 'this' vector (-1 if its not positive).
* @param e the ending index (exclusive) for the search
*/
def argmaxPos (e: Int = dim): Int =
{
val j = argmax (e); if (v(j) > _0) j else -1
} // argmaxPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first negative element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstNeg (e: Int = dim): Int =
{
for (i <- 0 until e if v(i) < _0) return i; -1
} // firstNeg
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first positive element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstPos (e: Int = dim): Int =
{
for (i <- 0 until e if v(i) > _0) return i; -1
} // firstPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first occurrence of element 'x' in 'this' vector,
* or -1 if not found.
* @param x the given element
* @param e the ending index (exclusive) for the search
*/
def indexOf (x: Int, e: Int = dim): Int =
{
for (i <- 0 until e if v(i) == x) return i; -1
} // indexOf
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly negative elements in 'this' vector.
*/
def countNeg: Int =
{
var count = 0
for (i <- 0 until dim if v(i) < _0) count += 1
count
} // countNeg
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly positive elements in 'this' vector.
*/
def countPos: Int =
{
var count = 0
for (i <- 0 until dim if v(i) > _0) count += 1
count
} // countPos
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of distinct elements in 'this' vector.
*/
def distinct: Int =
{
var count = 1
val us = new VectorR (this); us.sort () // sorted vector
for (i <- 1 until dim if us(i) != us(i-1)) count += 1
count
} // distinct
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether the predicate 'pred' holds for some element in 'this' vector.
* @param pred the predicate to test (e.g., "_ == 5.")
*/
// def exists (pred: (Real) => Boolean): Boolean = v.exists (pred)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether 'x' is contained in 'this' vector.
* @param x the element to be checked
*/
def contains (x: Real): Boolean = v contains x
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in ascending (non-decreasing) order.
*/
def sort () { quickSort (v)(Real.ord) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in descending (non-increasing) order.
*/
def sort2 () { qsort2 (v) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Swap elements 'i' and 'j' in 'this' vector.
* @param i the first element in the swap
* @param j the second element in the swap
*/
def swap (i: Int, j: Int)
{
val t = v(j); v(j) = v(i); v(i) = t
} // swap
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether the other vector 'b' is at least as long as 'this' vector.
* @param b the other vector
*/
def sameDimensions (b: VectorR): Boolean = dim <= b.dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' vector is nonnegative (has no negative elements).
*/
def isNonnegative: Boolean =
{
for (i <- range if v(i) < _0) return false
true
} // isNonnegative
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with vector 'b'.
* @param b the other vector
*/
def tryCompareTo [B >: VectorR] (b: B)
(implicit view_1: (B) => PartiallyOrdered [B]): Option [Int] =
{
var le = true
var ge = true
for (i <- range) {
val b_i = b.asInstanceOf [VectorR] (i)
if (ge && (v(i) compare b_i) < 0) ge = false
else if (le && (v(i) compare b_i) > 0) le = false
} // for
if (ge && le) Some (0) else if (le) Some (-1) else if (ge) Some (1) else None
} // tryCompareTo
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Override equals to determine whether 'this' vector equals vector 'b..
* @param b the vector to compare with this
*/
override def equals (b: Any): Boolean =
{
b.isInstanceOf [VectorR] && (v.deep equals b.asInstanceOf [VectorR].v.deep)
} // equals
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Must also override hashCode for 'this' vector to be compatible with equals.
*/
override def hashCode: Int = v.deep.hashCode
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the format to the 'newFormat' (e.g., "%.6g,\\t" or "%12.6g,\\t").
* @param newFormat the new format String
*/
def setFormat (newFormat: String) { fString = newFormat }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' vector to a String.
*/
override def toString: String =
{
var sb = new StringBuilder ("VectorR(")
if (dim == 0) return sb.append (")").mkString
for (i <- range) {
sb.append (fString.format (v(i)))
if (i == dim-1) sb = sb.dropRight (1)
} // for
sb.replace (sb.length-1, sb.length, ")").mkString
} // toString
} // VectorR class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorR` object is the companion object for the `VectorR` class.
*/
object VectorR
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorR` from one or more values (repeated values Real*).
* @param x the first Real number
* @param xs the rest of the Real numbers
*/
def apply (x: Real, xs: Real*): VectorR =
{
val c = new VectorR (1 + xs.length)
c(0) = x
for (i <- 0 until c.dim-1) c.v(i+1) = xs(i)
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorR` from a sequence of Reals.
* @param xs the sequence of the Real numbers
*/
def apply (xs: Seq [Real]): VectorR =
{
val c = new VectorR (xs.length)
for (i <- 0 until c.dim) c.v(i) = xs(i)
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorR` from one or more values (repeated values String*).
* @param x the first String
* @param xs the rest of the Strings
*/
def apply (x: String, xs: String*): VectorR =
{
val c = new VectorR (1 + xs.length)
c(0) = Real (x)
for (i <- 0 until c.dim-1) c.v(i+1) = Real (xs(i))
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorR` from an array of Strings.
* @param xs the array of the Strings
*/
def apply (xs: Array [String]): VectorR =
{
val c = new VectorR (xs.length)
for (i <- c.range) c.v(i) = Real (xs(i))
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorR` from an array of Strings, skipping the first 'skip'
* elements. If an element is non-numeric, use its hashcode.
* FIX: Might be better to map non-numeric Strings to ordinal values.
* @param xs the array of the Strings
* @param skip the number of elements at the beginning to skip (e.g., id column)
*/
def apply (xs: Array [String], skip: Int): VectorR =
{
val c = new VectorR (xs.length - skip)
for (i <- skip until xs.length) {
c.v(i - skip) = if (xs(i) matches "[\\\\-\\\\+]?\\\\d*(\\\\.\\\\d+)?") Real (xs(i)) else xs(i).hashCode ()
} // for
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a one vector (all elements are one) of length 'size'.
* @param size the size of the vector
*/
def one (size: Int): VectorR =
{
val c = new VectorR (size)
c.set (_1)
c
} // one
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate scalar 'b' and vector 'u'.
* @param b the scalar to be concatenated - first part
* @param u the vector to be concatenated - second part
*/
def ++ (b: Real, u: VectorR): VectorR =
{
val c = new VectorR (u.dim + 1)
for (i <- c.range) c(i) = if (i == 0) b else u.v(i - 1)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a `VectorR` containing a sequence of increasing integers in a range.
* @param start the start value of the vector, inclusive
* @param end the end value of the vector, exclusive (i.e., the first value not returned)
*/
def range (start: Int, end: Int): VectorR =
{
val c = new VectorR (end - start)
for (i <- c.range) c.v(i) = (start + i).toReal
c
} // range
} // VectorR object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorRTest` object tests the operations provided by `VectorR`.
* > run-main scalation.linalgebra.VectorRTest
*/
object VectorRTest extends App
{
var x: VectorR = null
var y: VectorR = null
for (l <- 1 to 4) {
println ("\\n\\tTest VectorR on vectors of dim " + l)
x = new VectorR (l)
y = new VectorR (l)
x.set (2)
y.set (3)
// test vector op scalar
println ("x + 4 = " + (x + 4))
println ("x - 4 = " + (x - 4))
println ("x * 4 = " + (x * 4))
println ("x / 4 = " + (x / 4))
println ("x ~^ 4 = " + (x ~^ 4))
// test vector op vector
println ("x + y = " + (x + y))
println ("x - y = " + (x - y))
println ("x * y = " + (x * y))
println ("x / y = " + (x / y))
println ("x.min = " + x.min ())
println ("x.max = " + x.max ())
println ("x.sum = " + x.sum)
println ("x.sumNE = " + x.sumNE (0))
println ("x dot y = " + (x dot y))
println ("x ∙ y = " + (x ∙ y))
println ("x.normSq = " + x.normSq)
println ("x.norm = " + x.norm)
println ("x < y = " + (x < y))
} // for
println ("hashCode (" + x + ") = " + x.hashCode ())
println ("hashCode (" + y + ") = " + y.hashCode ())
val z = VectorR ("1", "2", "3", "4")
println ("z = " + z)
println ("z.map (_ * 2) = " + z.map ((e: Real) => e * 2))
println ("z.filter (_ > 2) = " + z.filter (_ > 2))
} // VectorRTest
| mvnural/scalation | src/main/scala/scalation/linalgebra/VectorR.scala | Scala | mit | 35,939 |
package com.houseofmoran.spark.twitter
import java.io.FileReader
import java.util.Properties
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.twitter.TwitterUtils
import scala.collection.JavaConversions._
object TwitterStreamSource {
def streamFromEnv()(implicit streamingContext: StreamingContext) = {
val env = System.getenv()
for (key <- env.keySet()
if key.startsWith("twitter4j_oauth_")) {
System.setProperty(key.replaceAllLiterally("_", "."), env(key))
}
TwitterUtils.createStream(streamingContext, None)
}
def streamFromAuthIn(propertiesFile: String)(implicit streamingContext: StreamingContext) = {
val oauthProperties = new Properties()
oauthProperties.load(new FileReader(propertiesFile))
for (key <- oauthProperties.stringPropertyNames()
if key.startsWith("twitter4j.oauth.")) {
System.setProperty(key, oauthProperties.getProperty(key))
}
TwitterUtils.createStream(streamingContext, None)
}
}
| mikemoraned/selfies | src/main/scala/com/houseofmoran/spark/twitter/TwitterStreamSource.scala | Scala | mit | 1,023 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.dicom.internal.ops
import org.trustedanalytics.sparktk.dicom.Dicom
import org.trustedanalytics.sparktk.dicom.internal.{ DicomState, DicomSummarization, BaseDicom }
import org.trustedanalytics.sparktk.saveload.TkSaveLoad
trait SaveSummarization extends BaseDicom {
/**
* Save the current dicom.
*
* @param path The destination path.
*/
def save(path: String): Unit = {
execute(Save(path))
}
}
case class Save(path: String) extends DicomSummarization[Unit] {
override def work(state: DicomState): Unit = {
state.metadata.dataframe.write.parquet(path + "/metadata")
state.pixeldata.dataframe.write.parquet(path + "/pixeldata")
val formatId = Dicom.formatId
val formatVersion = Dicom.tkFormatVersion
TkSaveLoad.saveTk(state.metadata.dataframe.sqlContext.sparkContext, path, formatId, formatVersion, "No Metadata")
}
} | dmsuehir/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/dicom/internal/ops/Save.scala | Scala | apache-2.0 | 1,596 |
/*
* Copyright (c) 2012 Crown Copyright
* Animal Health and Veterinary Laboratories Agency
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sampler.r
import java.io.FileWriter
import java.nio.file.Path
import scala.sys.process.stringSeqToProcess
/** A class to allow the running of an R script from within the Scala environment*/
class ScriptRunner {
/** Writes and R script to the location defined by path. Executes the script using
* the Rscript shell script
*
* @param script The R script to be executed
* @param scriptPath The path to where the script should be written, including filename
*/
def apply(script: String, scriptPath: Path){
assert(scriptPath.toString.endsWith(".r"), "R script does not have correct file extension")
val writer = new FileWriter(scriptPath.toFile)
val parentPath = scriptPath.toAbsolutePath.getParent.toFile
val fullScript = new StringBuilder()
fullScript.append("setwd(\\""+parentPath+"\\")\\n")
fullScript.append(script + "\\n")
writer.write(fullScript.toString)
writer.close
import scala.sys.process._
val processOutcome = Seq("/usr/bin/Rscript", scriptPath.toString()).!
if(processOutcome != 0) throw new ScriptRunnerException("An error has occured whilst running the R script")
}
}
/** Companion object to allow running of an R script*/
object ScriptRunner{
lazy val instance = new ScriptRunner
def apply(script: String, scriptTarget: Path) = instance(script, scriptTarget)
}
class ScriptRunnerException(msg: String) extends RuntimeException(msg) | tsaratoon/Sampler | sampler-core/src/main/scala/sampler/r/ScriptRunner.scala | Scala | apache-2.0 | 2,088 |
/*
* Copyright (c) 2013 Elmar Athmer
*
* This file is part of SolarSystemGrandTour.
*
* SolarSystemGrandTour is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SolarSystemGrandTour is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SolarSystemGrandTour. If not, see <http://www.gnu.org/licenses/>.
*/
package net.aerospaceresearch.test.jplparser.data
trait Ascp1950TestData {
val content1 =
"""
| 1 1018
| 2.433264500000000000D+06 2.433296500000000000D+06 4.416951494022430000D+07
| 8.080851857071245000D+06 -7.484298509281619000D+05 -4.071477607640307000D+04
| -5.304223109141618000D+02 1.960084066197540000D+01 3.222303796725281000D+00
| 2.083022578048719000D-01 8.945620756853312000D-03 1.631519466228435000D-04
| -1.340317245623168000D-05 -1.759855409114384000D-06 -1.194771554002974000D-07
| -5.359662671499441000D-09 -3.471755709306075000D+07 1.180024281695576000D+07
| 5.886270720492704000D+05 -1.892126606502421000D+04 -1.424502624277869000D+03
| -6.553857324099615000D+01 -1.986120050626400000D+00 1.319147866809289000D-02
| 6.570858278381576000D-03 5.478562255622847000D-04 2.866214104257610000D-05
| 8.511051031908517000D-07 -1.617361618079167000D-08 -4.681249732895293000D-09
| -2.316274987155261000D+07 5.462945389973467000D+06 3.921001922049265000D+05
| -5.876971073626505000D+03 -7.057060196700654000D+02 -3.703820425941331000D+01
| -1.395389494164943000D+00 -1.458790536368366000D-02 2.580291171747820000D-03
| 2.756527295812859000D-04 1.669943745272938000D-05 6.373716209883957000D-07
| 4.006863681165655000D-09 -1.916364602733139000D-09 5.273902819312394000D+07
| 6.413958733355331000D+04 -1.254565975101823000D+06 -3.931033165354736000D+04
| 1.265264726424556000D+03 1.949037451219991000D+02 1.169842732783994000D+01
| 2.038311728099458000D-01 -3.316732821711145000D-02 -3.976103213739038000D-03
| -2.088101503515090000D-04 2.643873091599176000D-07 1.107515009357629000D-06
| 1.056931660342854000D-07 -7.474797614064119000D+06 1.510056260620159000D+07
| 1.752292209656735000D+05 -5.391734497884829000D+04 -2.950264854318955000D+03
| -5.820190864478809000D+01 5.951813506408214000D+00 7.518925004544147000D-01
| 4.062199834412373000D-02 2.143691434143562000D-04 -1.774583656403915000D-04
| -1.786051344286069000D-05 -7.913457938665347000D-07 1.741856248837875000D-08
| -9.503118181370583000D+06 8.058145446388064000D+06 2.238789562991326000D+05
| -2.471339493392489000D+04 -1.707069068106459000D+03 -5.132588488517308000D+01
| 1.963783318270613000D+00 3.803987134112005000D-01 2.513980396869898000D-02
| 5.274260015448313000D-04 -7.309014654443437000D-05 -9.566304482645291000D-06
| -5.378095779767469000D-07 -1.600235870981415000D-09 4.183412702874668000D+07
| -1.112604587209062000D+07 -1.431165911579853000D+06 2.508759121666095000D+04
| 7.127793089591959000D+03 2.644328051040234000D+02 -2.220271762403207000D+01
| -3.022291278148226000D+00 -5.534178732298879000D-02 1.705840365757685000D-02
| 1.579042170154211000D-03 -1.602233132223667000D-05 -1.323242127089014000D-05
| -8.339710374949165000D-07 2.151314280414641000D+07 1.311801658638305000D+07
| -7.437722980244757000D+05 -9.360032426032936000D+04 -5.060863135474169000D+02
| 4.007155341075778000D+02 2.724595332696485000D+01 -6.678321169809602000D-01
| -2.213941684185550000D-01 -1.042968459516274000D-02 8.699068082129742000D-04
| 1.388714877617597000D-04 3.210118437499929000D-06 -8.566556132681489000D-07
| 7.111092914366394000D+06 8.161474054797525000D+06 -2.485969038837058000D+05
| -5.259514363677145000D+04 -1.010543384491409000D+03 1.865498701932858000D+02
| 1.685725841241338000D+01 -4.279375103827030000D-02 -1.124937171398380000D-01
| -7.341832346917030000D-03 3.006051640741704000D-04 7.583182894498642000D-05
| 3.089049888000821000D-06 -3.708948120497568000D-07 1.052494738399623000D+07
| -1.934230226741684000D+07 -4.311437777119278000D+05 1.298782882535791000D+05
| 2.429224865149729000D+03 -7.345499137097770000D+02 -1.648472741124855000D+01
| 5.250943045190261000D+00 1.345379629422423000D-01 -4.292581980564986000D-02
| -1.211162080365866000D-03 3.794556296441222000D-04 1.155554238779971000D-05
| -3.550018593898837000D-06 3.863621049915083000D+07 3.284606042546218000D+06
| -1.589219000719163000D+06 -2.296554022039511000D+04 8.549312034211447000D+03
| 1.392894347213345000D+02 -5.475205392812562000D+01 -1.068134118281653000D+00
| 4.228069414145958000D-01 9.328022297264749000D-03 -3.610398170087172000D-03
| -8.778060047479093000D-05 3.275153636934410000D-05 8.713422144538628000D-07
| 1.950765197860725000D+07 3.762988304135965000D+06 -8.039866777697801000D+05
| -2.575382259440238000D+04 4.313694824764491000D+03 1.506777711277254000D+02
| -2.752971038806459000D+01 -1.115800759358801000D+00 2.118380829420592000D-01
| 9.439892890268544000D-03 -1.802435580263539000D-03 -8.628863543290011000D-05
| 1.629191902527653000D-05 8.341250980253690000D-07 4.284898599726054000D+07
| -2.212184222269912000D+07 -5.433203077575453000D+05 4.628136952663522000D+04
| 6.251838842778277000D+02 -2.897779053167816000D+01 -3.592378958583786000D-01
| 8.463623390260013000D-03 1.714427571789068000D-04 -1.074351757354715000D-06
| 9.035006274590167000D+07 8.179963772573496000D+06 -1.145380799705120000D+06
| -1.805520903528453000D+04 1.198155478135688000D+03 1.368945809025302000D+01
| -5.002743462179384000D-01 -6.999316141124583000D-03 1.053581479193830000D-04
| 3.333730087426698000D-06 3.790819249207839000D+07 5.079435081152635000D+06
| -4.806854090471925000D+05 -1.105072906064032000D+04 4.992311140180754000D+02
| 7.991290909842333000D+00 -2.022361329188686000D-01 -3.683424382477587000D-03
| 3.658220483387318000D-05 1.572762004352665000D-06 -3.893216693317372000D+06
| -2.412349556768574000D+07 5.167658482505135000D+04 5.127047057105876000D+04
| -1.842225128186457000D+01 -3.419375843272277000D+01 -5.566036393457165000D-02
| 1.273508577976568000D-02 8.340040857762363000D-05 -2.886021244380030000D-06
| 9.710175689379668000D+07 -1.507562835947021000D+06 -1.240261017913560000D+06
| 2.594848118911010000D+03 1.338433327747456000D+03 -1.801373486286257000D-01
| -6.317027463748714000D-01 -1.825170837687927000D-03 2.077635418936648000D-04
| 4.877525589952382000D-07 4.390433317211375000D+07 8.495790023644798000D+05
| -5.610365824477300000D+05 -2.080155577958412000D+03 6.030720928035612000D+02
| 2.084508676103941000D+00 -2.805320734928628000D-01 -1.627613613922269000D-03
| 8.816630363335406000D-05 6.229343744454725000D-07 -1.186701510375427000D+06
| -2.087895251767533000D+07 6.651124072687239000D+03 1.732785068647966000D+04
| 3.590347773510823000D+00 -4.942026038423999000D+00 -6.214505621073119000D-03
| 8.499589579651360000D-04 6.754294136351685000D-05 -1.698564078770166000D-07
| -3.464822107174387000D-06 5.387657912436703000D-07 1.438905928428468000D-07
| 1.346724125094050000D+08 -2.444826755695881000D+05 -6.704427133239998000D+05
| 3.320419732189388000D+01 2.917401163472666000D+02 1.553906231339554000D-01
| -6.579966710442177000D-02 3.038360808474912000D-04 5.082384453147686000D-05
| -2.649887687673067000D-05 1.374653922468650000D-06 1.345957120701471000D-06
| -2.901973113970614000D-07 5.839316374120168000D+07 -1.061686217640559000D+05
| -2.907651753659764000D+05 1.463369201170206000D+01 1.265270838182853000D+02
| 6.778989076506139000D-02 -2.841265985663576000D-02 1.010832407884955000D-04
| 1.566667458813053000D-05 -9.119347921371727000D-06 6.723102478876417000D-07
| 4.363735434581366000D-07 -1.082943819672040000D-07 -4.223720455765184000D+07
| -2.000054470261307000D+07 2.116696422701449000D+05 1.659664595797002000D+04
| -9.430645255031632000D+01 -4.734038263044145000D+00 2.154833767046484000D-02
| 6.900331026402672000D-04 2.621993273280194000D-05 1.366677259343650000D-05
| -3.254417032071787000D-07 -5.378432582043576000D-07 -8.208390612276149000D-09
| 1.288769922758875000D+08 -5.527388276128155000D+06 -6.422072789367549000D+05
| 4.641832059591466000D+03 2.793882124598436000D+02 -1.388963532253799000D+00
| -6.285720733253301000D-02 6.854026265426379000D-04 8.347123924957797000D-05
| -1.440843320449777000D-05 -3.769679161925956000D-06 3.643773631582870000D-07
| 1.477797032156158000D-07 5.587947275295581000D+07 -2.397306589722783000D+06
| -2.785165199800108000D+05 2.013426914615480000D+03 1.211741554415081000D+02
| -6.018625522259866000D-01 -2.724981507954775000D-02 2.530920621495675000D-04
| 3.132169728523739000D-05 -4.315229525616152000D-06 -1.311148012427847000D-06
| 9.660654990936560000D-08 4.998597009378285000D-08 -2.056451689643139000D+08
| -1.735003676903194000D+07 8.493321828986664000D+05 1.101773018968747000D+04
| -2.292199516146785000D+02 2.663327929511569000D-01 -2.856286526328776000D-02
| -4.920653822907677000D-04 4.429499392280039000D-06 -6.480686337431126000D-07
| 6.658657665231070000D-09 1.236670187265258000D+08 -2.283956158120113000D+07
| -5.091594821863252000D+05 1.622512914280864000D+04 1.012833160549926000D+02
| -9.323721496420550000D-01 2.821601068318154000D-02 -7.066346366417514000D-04
| -2.084351002147353000D-08 2.411466213659095000D-07 6.944837093752984000D-08
| 6.228495491389264000D+07 -1.000407440813831000D+07 -2.565834474324061000D+05
| 7.142432001998453000D+03 5.267690294698366000D+01 -4.348323137996555000D-01
| 1.372140425226358000D-02 -3.108932657995300000D-04 -3.452722945088137000D-07
| 7.885354294860583000D-08 2.952914219261615000D-08 5.080304816864422000D+08
| 1.320079468829916000D+07 -7.359542113860500000D+04 -3.569998056841888000D+02
| 6.835661448008006000D-01 4.100323298953217000D-03 2.159240588889374000D-06
| 6.809888218425356000D-08 -5.136077238225585000D+08 1.201765710515831000D+07
| 7.446886222978044000D+04 -2.519654422208419000D+02 -1.166115277878159000D+00
| 9.130284620988061000D-04 1.114551064386370000D-05 -2.095191082413558000D-07
| -2.325686000748107000D+08 4.829814780960615000D+06 3.371782655624364000D+04
| -9.930871459610396000D+01 -5.165597423954722000D-01 2.912847165451253000D-04
| 4.809660238236512000D-06 -6.891270413973957000D-08 -1.346783787206796000D+09
| -4.301935161881449000D+06 3.119296070308964000D+04 8.930933688870443000D+00
| -6.745673415717328000D-02 2.565685805575438000D-05 -9.687713186055127000D-09
| 3.263431876417291000D+08 -1.198555792477217000D+07 -7.552238384717205000D+03
| 4.809270104872224000D+01 -2.236055962374919000D-03 -6.899984877817995000D-05
| 2.513387761181041000D-08 1.926158556780133000D+08 -4.764143695902498000D+06
| -4.458611716384757000D+03 1.947321066135148000D+01 1.974983358161292000D-03
| -2.957572925480091000D-05 1.272546216127110000D-08 -1.844007491629836000D+08
| -9.465935196741601000D+06 5.137930603310002000D+02 4.436353662782107000D+00
| 3.912564479019066000D-04 -5.393294711524477000D-07 2.590029055671577000D+09
| -1.013204344396171000D+06 -7.218613024073523000D+03 -7.676006499722734000D-02
| 1.753076704829066000D-03 3.060042045047678000D-07 1.136985853558836000D+09
| -3.096074087822907000D+05 -3.168872634161106000D+03 -9.648441018179819000D-02
| 7.620502624952572000D-04 1.536579681566319000D-07 -4.352315507698768000D+09
| 2.041159343549026000D+06 2.968429062159967000D+03 -2.405779231262746000D-01
| -1.600514679892077000D-04 -2.582107313182812000D-08 -1.203178895359418000D+09
| -6.620410522970757000D+06 8.208389586857189000D+02 7.505149615940752000D-01
| -5.292060288041490000D-05 -1.555115997985576000D-08 -3.841530273287860000D+08
| -2.760585400208942000D+06 2.621063271819830000D+02 3.131967518387494000D-01
| -1.776237952108895000D-05 -1.170053695020996000D-08 -3.968924348923275000D+09
| -3.085160057157632000D+06 1.569964503632357000D+03 4.455903895492936000D-01
| 2.780922733824003000D-05 -1.675696564010668000D-08 3.032245988016760000D+09
| -6.313881285590162000D+06 -1.199481234599635000D+03 2.307311923138865000D-01
| 1.114016281345942000D-04 4.715798586108258000D-08 2.141814701635046000D+09
| -1.040460793196230000D+06 -8.472857511431892000D+02 -6.236159831496398000D-02
| 2.673757344261340000D-05 2.957837799210567000D-08 -2.877887656836187000D+05
| 1.006119735624311000D+05 1.852903996442269000D+04 -9.788614149859608000D+02
| -1.142894176649260000D+02 2.878588162534226000D+00 5.271907146056860000D-01
| 5.260302222904171000D-03 -2.691233942683179000D-03 -1.608235502481683000D-04
| 1.188268582808537000D-05 1.686558174577532000D-06 -1.644205695178464000D-08
| -1.676878370282853000D+05 -1.302746984607222000D+05 1.065788523474912000D+04
| 1.382973942164678000D+03 -5.393004849692990000D+01 -6.708976001575404000D+00
| 7.141152132298075000D-02 3.302369417954203000D-02 1.071712396339356000D-03
| -1.660271073185381000D-04 -1.506697541094941000D-05 5.669243502891278000D-07
| 1.354149885860399000D-07 -8.302573039642450000D+04 -7.283314658293548000D+04
| 5.291311070850272000D+03 7.728834370996854000D+02 -2.632986253428685000D+01
| -3.704462901938390000D+00 2.560026153295885000D-02 1.773261601512524000D-02
| 6.460462765018081000D-04 -8.575486516066382000D-05 -8.439807943909434000D-06
| 2.644580505043074000D-07 7.360726671076788000D-08 7.902859852452447000D+03
| 1.790680165597688000D+05 -3.942415693591775000D+02 -1.801814774203139000D+03
| 2.793226502852578000D+01 7.532717481573529000D+00 -3.443058745602175000D-01
| -3.066344365352632000D-02 2.967376840456098000D-03 1.349640360381837000D-04
| -2.438787785613161000D-05 -3.928924588305895000D-07 1.973619892858277000D-07
| -3.061808750877454000D+05 -1.361614535545472000D+03 1.877873807467711000D+04
| -1.669193620201983000D+02 -1.059982342827976000D+02 3.043728258147580000D+00
| 4.081668631397565000D-01 -2.890686825887440000D-02 -1.776187644780322000D-03
| 2.383945620798394000D-04 6.896205313120414000D-06 -1.945354436502418000D-06
| -3.200378955458601000D-09 -1.651081924037123000D+05 -5.139339051717686000D+03
| 1.015794557977462000D+04 -4.553846622692269000D+01 -5.808942242147765000D+01
| 1.457404960948452000D+00 2.295132928088312000D-01 -1.486290860650997000D-02
| -1.034465222072878000D-03 1.255110681205843000D-04 4.336211305809377000D-06
| -1.041751463520571000D-06 -6.650216942119815000D-09 3.050849004461691000D+05
| 1.049112388697014000D+05 -1.614237359949549000D+04 -6.972821392769958000D+02
| 8.028788588579406000D+01 -1.215274927026372000D+00 -1.390313363030668000D-01
| 2.046199121715177000D-02 -6.922350409211587000D-04 -5.989574929937195000D-05
| 7.962678007285943000D-06 -2.781496655583555000D-07 -2.772071467511825000D-08
| -1.810570644480511000D+05 1.186084832208067000D+05 9.511149457718332000D+03
| -1.140555445064031000D+03 -1.575863311494180000D+01 3.444751719694581000D+00
| -2.023195087395673000D-01 4.606534297914537000D-04 1.163428434960731000D-03
| -7.898639413382863000D-05 -3.300910500195305000D-07 4.858528708484219000D-07
| -3.583170203972548000D-08 -1.048071194236825000D+05 6.150253905651799000D+04
| 5.530104618718129000D+03 -6.005693841013222000D+02 -1.055543367094977000D+01
| 1.897921330642960000D+00 -1.057349719073611000D-01 -2.659197021167752000D-04
| 6.462749706938467000D-04 -4.120141490911796000D-05 -3.782174211747951000D-07
| 2.695490511109675000D-07 -1.867558365735251000D-08 3.731854160471272000D+05
| -3.813825211394422000D+04 -1.779063803831727000D+04 3.516661300386925000D+02
| 5.330440458757989000D+01 -6.948906016224162000D-01 7.116855755036684000D-02
| -2.845754079532603000D-03 -4.123650737567957000D-04 1.482668863043789000D-05
| -7.574708685993722000D-07 3.444151025047842000D-08 5.157256894097910000D-09
| 8.838878737536511000D+04 1.394102811840125000D+05 -4.140951433958978000D+03
| -1.054949489867898000D+03 1.778118703789084000D+01 7.830495606594257000D-01
| 3.016011707210131000D-03 7.036023176383278000D-03 -2.418823656689279000D-04
| -6.525798876683056000D-06 2.657486324523070000D-07 -8.053601145683150000D-08
| 4.319651570657829000D-09 3.898913288621172000D+04 7.613611429003303000D+04
| -1.824713529646242000D+03 -5.799516486229721000D+02 8.387963557222323000D+00
| 4.486189483716597000D-01 -1.184171182159439000D-04 3.865244733950968000D-03
| -1.210746722609868000D-04 -3.929485430096345000D-06 1.628294337260598000D-07
| -4.428675715879447000D-08 2.214805955044112000D-09 1.777288580900666000D+05
| -1.497631401074030000D+05 -8.738645610435952000D+03 1.137438242649561000D+03
| 4.404463236454013000D+01 -1.027493457630931000D+00 -1.253614752108902000D-01
| -8.169298713937064000D-03 2.196317765274171000D-04 3.524360389588386000D-05
| 9.447538072391792000D-07 -8.509861943287943000D-08 -1.019208639992897000D-08
| 2.983672509207455000D+05 6.195600893995397000D+04 -1.442498075498644000D+04
| -5.929187483354481000D+02 4.460938349573199000D+01 2.102950600866827000D+00
| 5.047164723665897000D-02 -5.320768845203031000D-03 -5.242172286662507000D-04
| 7.954531508208996000D-07 1.632951809587695000D-06 1.037411019616375000D-07
| -2.414841564463412000D-09 1.567737722879332000D+05 3.680795146448991000D+04
| -7.605444426325699000D+03 -3.469264532557947000D+02 2.325219985133144000D+01
| 1.165088855400895000D+00 2.982457875495112000D-02 -2.704076268281943000D-03
| -2.887381614431014000D-04 -3.337430973541729000D-07 8.644564087234929000D-07
| 5.797817697533025000D-08 -1.089591113316698000D-09 -1.418953283126412000D+05
| -1.559933687783242000D+05 7.786684262369934000D+03 1.466030954247424000D+03
| -1.606206459797312000D+01 -4.862770950727618000D+00 -8.730172579761311000D-02
| 1.328475046343464000D-02 5.349791629585114000D-04 -5.302704043470739000D-05
| -2.711808467655954000D-06 2.601833334936494000D-07 1.879912565923992000D-08
| 2.950646238561638000D+05 -6.651809090566426000D+04 -1.586503041364577000D+04
| 4.442196825345593000D+02 7.908485217744532000D+01 2.218556748203777000D-01
| -2.176860940306903000D-01 -6.673666597278868000D-03 7.053930932530941000D-04
| 3.137160996086341000D-05 -3.289173048354147000D-06 -1.940350191689300000D-07
| 1.573427260744017000D-08 1.620355258080388000D+05 -3.253537452568788000D+04
| -8.751286670408525000D+03 2.087304476214919000D+02 4.324972313551443000D+01
| 2.239608115987032000D-01 -1.162375367172945000D-01 -3.902673734112128000D-03
| 3.704962777086785000D-04 1.815062040533113000D-05 -1.721711001989089000D-06
| -1.106874626760554000D-07 8.116228230672579000D-09 -3.438852210605215000D+05
| -3.514741442950924000D+04 2.063418428926582000D+04 4.538051819722285000D+02
| -1.021954488046422000D+02 -2.405980625016994000D+00 2.417691349625479000D-01
| 4.584846718888153000D-03 -4.071107026286239000D-04 3.405612777929049000D-05
| 1.079233578911143000D-06 -3.508230202063710000D-07 -5.970873915691920000D-09
| 6.616138077593575000D+04 -1.522254060030910000D+05 -3.769702763943140000D+03
| 1.446852460367665000D+03 3.004143338129067000D+01 -4.653090838334274000D+00
| -1.117966491556609000D-01 8.924267026053698000D-03 -7.627160691664026000D-05
| -1.558321173223516000D-05 3.538234489565380000D-06 6.907149558209518000D-08
| -2.285563488154646000D-08 4.274624529067523000D+04 -8.148337233981829000D+04
| -2.475287440559196000D+03 7.742551199140080000D+02 1.847019202570018000D+01
| -2.474393528301336000D+00 -6.589691105479472000D-02 4.747938372019243000D-03
| -3.160212770025548000D-05 -9.167276984585426000D-06 1.890174453232762000D-06
| 4.482530443480130000D-08 -1.225102487102398000D-08 -2.525103903267147000D+05
| 1.224556401952753000D+05 1.578483382741491000D+04 -1.221959851951827000D+03
| -8.582338928416178000D+01 4.226701304794482000D+00 2.820110714349941000D-01
| -4.294573142248687000D-03 -9.001692767127141000D-04 -5.950259544048832000D-05
| 2.225036914467645000D-06 5.690339851437499000D-07 5.592829397007750000D-09
| -2.127104732924889000D+05 -1.123772508697034000D+05 1.315642438225550000D+04
| 1.119095173236430000D+03 -7.018521666949073000D+01 -4.339235683899455000D+00
| 1.626862972832188000D-01 1.392745887087270000D-02 2.496581528259346000D-04
| -4.310746377368725000D-05 -5.680151363918957000D-06 5.042996262743523000D-08
| 3.833886706695308000D-08 -1.097534276442685000D+05 -6.319974849900037000D+04
| 6.797045905418932000D+03 6.318340540249838000D+02 -3.627612088331972000D+01
| -2.443628433907014000D+00 8.250686314775161000D-02 7.651643803496237000D-03
| 1.529257914989707000D-04 -2.218392431372301000D-05 -3.120516782544638000D-06
| 1.581182907381165000D-08 2.066092892425278000D-08 1.377205564880883000D+05
| -5.474729212037010000D+03 1.694904509072963000D+01 -6.785986070354586000D-02
| -1.740760965873354000D-03 -3.765720468029815000D-04 -7.623100014018669000D-05
| -6.906945940120817000D-06 -1.264599156251062000D-07 8.260477404865933000D-08
| 1.689246192087001000D-08 3.491406175642305000D+05 -3.782767053050222000D+03
| -1.272079131106644000D+01 1.170002442022240000D-01 2.056495794163627000D-03
| 3.827202624076962000D-04 1.722494527673430000D-06 -5.492779565432708000D-06
| -9.677952642378353000D-07 -9.128101569351854000D-08 -1.669864631798498000D-09
| 1.383836059144802000D+05 -1.528875537552632000D+03 -5.895822320271029000D+00
| 5.625234401629299000D-02 1.542511506872121000D-03 2.508107700098287000D-04
| 8.750767564564832000D-06 -2.218610178271117000D-06 -5.037237887216459000D-07
| -5.734193683282762000D-08 -2.645155383368738000D-09 1.269029398464143000D+05
| -5.344161109091776000D+03 1.537861167890801000D+01 -2.451006910725627000D-01
| -1.754416699284691000D-02 1.342067367793204000D-03 4.927012359434115000D-04
| 1.190453647978175000D-06 -1.366021675499432000D-05 -9.513198905800550000D-07
| 3.398032023444226000D-07 3.414782249432699000D+05 -3.878299573116424000D+03
| -1.116073243092872000D+01 1.013500875131726000D-01 -1.587683114203898000D-02
| -2.557451209434390000D-03 1.095080407273670000D-04 7.338111257748369000D-05
| 2.126269305349445000D-06 -1.958586261987232000D-06 -2.013528369531248000D-07
| 1.352812515555836000D+05 -1.572746645515183000D+03 -5.029244753251076000D+00
| 7.154820459089207000D-02 -6.274193288547393000D-03 -1.500836377632083000D-03
| 7.165183882642479000D-06 3.906590447493683000D-05 2.554332420422968000D-06
| -9.472457446110273000D-07 -1.428315401880043000D-07 -1.938871616927947000D-05
| 2.346502841516034000D-06 7.950196697705546000D-07 -2.130996560487209000D-07
| -8.659285445614298000D-08 1.557587909138266000D-08 4.766093750903756000D-09
| -7.260844674322715000D-10 -2.134798958871523000D-10 2.530656854758084000D-11
| 4.028603072583245000D-05 -6.181564242683270000D-07 2.315063976316353000D-07
| 1.352053229895531000D-07 -2.602838702872007000D-08 -9.286997131436340000D-09
| 1.493006660079185000D-09 4.239290287640851000D-10 -5.850733924089564000D-11
| -1.595920008281124000D-11 -1.625175869987759000D-05 -2.535458148856337000D-07
| -3.238177186849133000D-07 2.165631811363521000D-07 3.861125308022399000D-09
| -6.392711646989772000D-09 8.129913538876570000D-10 -8.727687320915143000D-11
| -1.117887118148599000D-11 7.594947944164197000D-12 4.060694057550294000D-05
| 3.676684825842319000D-07 -2.846895470116383000D-07 -2.649221589392532000D-08
| 1.938653787256835000D-08 -8.628724869170240000D-10 -1.099916962459619000D-10
| 7.386774494580628000D-11 -2.656643881530722000D-11 6.391058038480415000D-13
| -1.448532496407884000D-05 2.484261723355302000D-06 1.564688801072012000D-07
| -2.290797856774064000D-07 -2.341811452916864000D-08 8.479117587419220000D-09
| 1.523304912071247000D-09 -9.296778594667320000D-11 -5.001031420690577000D-11
| -4.832890030031281000D-13 4.051308939540793000D-05 1.280422532505887000D-07
| 3.433780448802947000D-07 2.749024014687594000D-08 -2.219018240920684000D-08
| -2.904041344002883000D-09 3.665557793568519000D-10 1.401638936506469000D-10
| 9.351398847058572000D-12 -4.360172844610763000D-12 -1.229843126161916000D-05
| -9.350229806904187000D-08 4.249835494135614000D-07 2.895629222520161000D-07
| -3.605339838742730000D-08 -1.785084597684993000D-08 1.003267814079754000D-09
| 5.257204959088289000D-10 -5.957668448953597000D-12 -5.999436850825735000D-12
| 4.132780382343781000D-05 -3.720046091855670000D-08 -3.515475607104023000D-07
| 6.384314075781677000D-08 3.630151498635001000D-08 -3.016126635916672000D-09
| -1.496076733713447000D-09 4.831970351874228000D-11 3.134934059407994000D-11
| 7.565822941823349000D-13 -1.605879091590289000D-02 1.863260587810313000D-04
| -3.046552357759987000D-04 1.282720324238702000D-05 1.211053656841072000D-05
| -7.575475107407927000D-07 -3.645773404446877000D-07 2.765436768329606000D-08
| 1.025002187537945000D-08 -7.878536390642173000D-10 3.830086766701517000D-01
| -4.237115974681966000D-04 1.642672624110054000D-05 2.421728077411331000D-05
| -1.347521249145401000D-06 -7.968330168497097000D-07 5.861724367795301000D-08
| 2.287420011284514000D-08 -1.851145420151512000D-09 -6.468629775344335000D-10
| -1.638838512063085000D+03 9.196788263732177000D-01 2.870240984297288000D-04
| -9.396084258784924000D-06 -1.131539823336135000D-05 6.473466440552619000D-07
| 3.383932859956216000D-07 -2.422005096619591000D-08 -9.488215017921727000D-09
| 6.883516790184239000D-10 -1.672811213048532000D-02 -4.801666050018056000D-04
| 1.081701107513639000D-04 8.666680484811042000D-06 -4.240250430422458000D-06
| 6.409982146786048000D-07 4.173930059860762000D-08 -1.304376647466616000D-08
| 1.128163233683693000D-09 -7.122942832135234000D-11 3.826716518449852000D-01
| 1.071010621186732000D-04 4.914690051955089000D-05 -9.409991719946619000D-06
| 6.824490085834579000D-07 2.037587151213294000D-07 -4.241273997163593000D-08
| 5.961668650258708000D-10 2.835136571300880000D-10 -7.886194363968976000D-11
| -1.636998104423057000D+03 9.203848086254627000D-01 -9.066254305871006000D-05
| -9.592249347583056000D-06 3.811324693654094000D-06 -5.724851925363746000D-07
| -4.002691280748823000D-08 1.211459694498543000D-08 -1.008506148763895000D-09
| 5.965847015534200000D-11 -1.671961723379373000D-02 5.240838187662647000D-04
| 1.597206840187196000D-04 1.082271177954946000D-05 -1.037460290751905000D-06
| -9.218091945988517000D-07 -7.961088627692402000D-08 1.592979835942208000D-08
| 2.806752081066140000D-09 -2.135076636289939000D-11 3.830667899767748000D-01
| 2.452175546470785000D-04 -8.116329793312770000D-06 -7.350468152374816000D-06
| -1.599162024801515000D-06 -8.140725610354416000D-08 5.220494791915006000D-08
| 6.293200489782580000D-09 -4.603151851661088000D-10 -1.545553975038530000D-10
| -1.635158220934767000D+03 9.194494174530886000D-01 -1.576143905531697000D-04
| -1.095480479497593000D-05 1.130557376800198000D-06 8.627686238784140000D-07
| 7.248946891537781000D-08 -1.489194842739530000D-08 -2.586777889246312000D-09
| 2.359963793074628000D-11 -1.487707731991930000D-02 1.031093953347661000D-03
| -1.750998683635100000D-04 -5.291681064945982000D-05 5.246120645233013000D-06
| 1.853856422299516000D-06 -9.222953189161091000D-08 -4.400452982585850000D-08
| 7.577044744130455000D-10 6.503454845793653000D-10 3.831189027855200000D-01
| -2.702720215142252000D-04 -8.568500612432354000D-05 1.260777879591673000D-05
| 3.871139047202216000D-06 -2.790751966699871000D-07 -1.125744261815735000D-07
| 3.785251368356937000D-09 2.163775108895147000D-09 -7.017702094783553000D-12
| -1.633320136276063000D+03 9.189061800678219000D-01 1.580540115238278000D-04
| 5.064255753940704000D-05 -4.764679990459245000D-06 -1.727561069575743000D-06
| 8.426260821467222000D-08 4.051022516688144000D-08 -6.990783024809673000D-10
| -5.941518184249737000D-10 0.000000000000000000D+00 0.000000000000000000D+00
| """.stripMargin
val content2 = """2 1018
| 2.433296500000000000D+06 2.433328500000000000D+06 -2.689229672372822000D+07
| -1.692656326814795000D+07 9.281544272509860000D+05 7.253291988816533000D+04
| -6.843155875103169000D+03 2.006392980456117000D+01 3.604749419955046000D+01
| -2.385398822512717000D+00 -8.291701106436857000D-02 2.210367485406621000D-02
| -8.928464030283223000D-04 -1.050479709148965000D-04 1.420674585479567000D-05
| -1.739321877852503000D-07 3.313631526550170000D+07 -8.409668506753178000D+06
| -1.153522453104271000D+06 7.768562562619453000D+04 2.010549714997069000D+03
| -4.641577152358908000D+02 1.681035125828479000D+01 1.774616647335263000D+00
| -2.251701495545176000D-01 2.587351821035822000D-03 1.461256506666112000D-03
| -1.199136287526499000D-04 -2.979560338286904000D-06 1.161698633472279000D-06
| 2.045619233789579000D+07 -2.733523319085991000D+06 -7.124611665546571000D+05
| 3.395715196364438000D+04 1.784482060671218000D+03 -2.499793919334064000D+02
| 5.234283488488194000D+00 1.195519186532527000D+00 -1.116466220469749000D-01
| -9.138052717693919000D-04 8.731534092982519000D-04 -5.313217995637409000D-05
| -3.066059542782626000D-06 6.387209514744089000D-07 -5.173339049039613000D+07
| -7.656438222927905000D+06 1.246501623810221000D+06 -8.460995543411681000D+03
| -2.750663176481489000D+03 2.144373961221556000D+02 -7.682867951542537000D+00
| -2.361284990352750000D-01 5.506246000892102000D-02 -3.902279207993558000D-03
| 9.078574439736089000D-05 1.136168365722639000D-05 -1.563777673525113000D-06
| 9.070203057311414000D-08 1.007347698709476000D+07 -1.389004124957010000D+07
| -2.458772956175495000D+05 6.296148782457186000D+04 -2.328665131974917000D+03
| -1.331435252167887000D+01 9.776264620723735000D+00 -7.839650977031583000D-01
| 2.592613474990718000D-02 1.335322847823282000D-03 -2.518088695806127000D-04
| 1.694019718158304000D-05 -2.949933002988925000D-07 -6.194188043321855000D-08
| 1.071870557173981000D+07 -6.623198776267618000D+06 -2.607713634056937000D+05
| 3.450494553213579000D+04 -9.580155453825100000D+02 -2.938121386590597000D+01
| 6.019176789554767000D+00 -3.941746007920577000D-01 8.128059531070615000D-03
| 1.118437033855657000D-03 -1.439141952516648000D-04 7.867415276613279000D-06
| 4.816867614038258000D-09 -4.266988472832580000D-08 -5.775097901411539000D+07
| 1.420726465314426000D+06 9.894903104972345000D+05 -2.818259368144785000D+04
| -2.419266581189389000D+02 5.336141336925468000D+01 -4.119591524608261000D+00
| 1.909724225973131000D-01 -4.873272350026797000D-03 -1.548361911487220000D-04
| 2.950148122195781000D-05 -2.204911403663774000D-06 1.070627385254667000D-07
| -1.769780372429223000D-09 -1.770754928399534000D+07 -1.343536985266701000D+07
| 3.026018492297944000D+05 3.066735034841337000D+04 -1.512297267896649000D+03
| 5.512257684766403000D+01 -7.736875201409460000D-01 -8.546920132536526000D-02
| 9.475079250135842000D-03 -5.851370100660832000D-04 2.264561183288521000D-05
| -1.563821958699079000D-07 -6.343569986687580000D-08 6.261540501583859000D-09
| -3.493548224512211000D+06 -7.323068699231082000D+06 5.885010308777998000D+04
| 1.930559427869635000D+04 -7.825567260292953000D+02 2.389783216268691000D+01
| 1.462999202466966000D-02 -6.548044308560858000D-02 5.566528979500921000D-03
| -2.964279646563225000D-04 9.029676361866070000D-06 1.454225307369490000D-07
| -4.428430098699348000D-08 3.630637312039566000D-09 -4.807480733673475000D+07
| 7.971963814655862000D+06 6.500596391399901000D+05 -2.740051057282757000D+04
| 1.753271040215985000D+02 6.833364852589741000D-01 -9.173430757102867000D-01
| 5.288612593631110000D-02 -2.613726034359336000D-03 9.448833430784470000D-05
| -2.097401624627291000D-06 -3.320338606596994000D-08 6.245809671171545000D-09
| -1.109908261907361000D-09 -4.123275186046051000D+07 -9.878866451429666000D+06
| 5.573712087651699000D+05 1.388990983234938000D+04 -6.747339393249110000D+02
| 2.805591646765467000D+01 -1.042576125291222000D+00 1.971560927694319000D-02
| 1.578277463778045000D-04 -6.348650352072165000D-05 4.460832676994923000D-06
| -2.359015489930391000D-07 8.990151323780589000D-09 -4.092559781536005000D-10
| -1.706272460036146000D+07 -6.103994755799625000D+06 2.301678013906657000D+05
| 1.026394063585208000D+04 -3.785678305760710000D+02 1.491302423412220000D+01
| -4.615450799302687000D-01 5.037175851759099000D-03 3.557388508557778000D-04
| -4.371991449442786000D-05 2.599741038893713000D-06 -1.225309853976635000D-07
| 4.547829397875193000D-09 3.142226779264140000D-11 -4.981623765388854000D+07
| -2.130594053717949000D+07 6.426904173154900000D+05 4.555664530524429000D+04
| -6.859390833957602000D+02 -3.112137550723088000D+01 3.106187936145454000D-01
| 1.259042953928998000D-02 -8.939006573481468000D-05 -6.237267125034967000D-06
| 8.451655552113932000D+07 -1.094670097193038000D+07 -1.083435070632773000D+06
| 2.315355498728945000D+04 1.183055495685978000D+03 -1.517414914923546000D+01
| -5.840022794375754000D-01 5.378467852154670000D-03 2.118711299971499000D-04
| -1.299569487231016000D-07 4.115250169952124000D+07 -3.573786438813741000D+06
| -5.279406528581610000D+05 7.527233814762834000D+03 5.754923728518009000D+02
| -4.850777861041309000D+00 -2.823317471859811000D-01 1.599507885372984000D-03
| 1.018132911835314000D-04 2.065779186186999000D-07 -8.571632477397374000D+07
| -1.420838226691943000D+07 1.104601003429122000D+06 3.011927222176927000D+04
| -1.207593355050305000D+03 -1.991202345060516000D+01 5.988570481112346000D-01
| 7.241600845032236000D-03 -2.316763144472515000D-04 -2.190751652123297000D-06
| 5.504452138025214000D+07 -1.820872359334228000D+07 -7.043288241482647000D+05
| 3.895749653410269000D+04 7.553725141852897000D+02 -2.667062945429432000D+01
| -3.488494003957498000D-01 1.082032476499605000D-02 1.081912054365455000D-04
| -3.777781463168655000D-06 3.017162794415294000D+07 -7.289133403827880000D+06
| -3.867033168966052000D+05 1.561256597387823000D+04 4.162018291384364000D+02
| -1.073494807641838000D+01 -1.948966436369885000D-01 4.426882009943085000D-03
| 6.310299511321845000D-05 -1.750146248084750000D-06 -7.993697278891502000D+07
| -1.754316889556509000D+07 3.987670854337322000D+05 1.436749301312400000D+04
| -1.816348120344845000D+02 -3.901110896946467000D+00 4.494149597523505000D-02
| 5.973291292833685000D-04 3.554397878749837000D-05 -1.834664689158128000D-06
| -8.975515851494606000D-07 3.813800766100708000D-07 -2.733266474358985000D-08
| 1.129128813879770000D+08 -1.036885800138052000D+07 -5.608497886766215000D+05
| 8.813573823649733000D+03 2.377852227275766000D+02 -2.727111239151727000D+00
| -4.821770214881915000D-02 9.435535209320271000D-04 -1.522484167529254000D-05
| -8.181069995778181000D-06 2.076140780490916000D-06 -1.781014423840475000D-07
| -8.884485845755597000D-08 4.895576209234159000D+07 -4.496968370796427000D+06
| -2.432284262571611000D+05 3.822701277509181000D+03 1.031173754719121000D+02
| -1.183923486632755000D+00 -2.101982851561914000D-02 3.967686542041742000D-04
| 1.307123311733873000D-07 -3.130833190336205000D-06 6.524692933514499000D-07
| -4.239308537704134000D-08 -3.286006992794084000D-08 -1.113257319397612000D+08
| -1.371831300236782000D+07 5.513733936304469000D+05 1.090239120237987000D+04
| -2.473332227658918000D+02 -2.614369707505976000D+00 5.744192342247236000D-02
| 9.219821929666676000D-05 9.805898492278230000D-05 8.662388456335728000D-06
| -5.316449074716687000D-06 -1.835072335819254000D-07 2.260111854343763000D-07
| 8.806597023831955000D+07 -1.437244425852897000D+07 -4.342432376427838000D+05
| 1.212599071635875000D+04 1.732904294699776000D+02 -3.652439120180545000D+00
| -2.756379705805475000D-02 1.236535131636363000D-03 -2.122701374962842000D-05
| -2.211396513978207000D-05 1.792806885515035000D-06 8.815951004678434000D-07
| -1.394534831568661000D-07 3.817977085702082000D+07 -6.233219197594839000D+06
| -1.883178779180345000D+05 5.258974776959200000D+03 7.513706167400379000D+01
| -1.582958433406764000D+00 -1.204349386348095000D-02 5.172104019058570000D-04
| 3.061595880773365000D-06 -9.189866572571614000D-06 1.189294418102959000D-07
| 3.892325311343658000D-07 -3.196657534339511000D-08 -2.331758205073152000D+08
| -1.008873524269524000D+07 9.595829289867076000D+05 7.351229940461785000D+03
| -2.317614147076386000D+02 -5.635229755268182000D-01 -3.982556268920513000D-02
| -3.549907483664979000D-04 1.166631492542818000D-05 2.551006017218096000D-06
| -5.248384642237348000D-07 7.454981714776048000D+07 -2.610769344671064000D+07
| -3.052491331274603000D+05 1.772660895421223000D+04 8.781752008016714000D+01
| -4.918493056331142000D-01 8.259151294452652000D-03 -7.105056705275818000D-04
| 4.817533954373774000D-06 -1.695796212852444000D-06 -2.513203588502565000D-07
| 4.050529348991553000D+07 -1.170013647052439000D+07 -1.660543669850136000D+05
| 7.930632960950638000D+03 4.656983392018764000D+01 -2.102922690786790000D-01
| 4.861151889217491000D-03 -3.207341445898429000D-04 2.390125894206236000D-06
| -5.701549741751633000D-07 -1.520279396129032000D-07 5.338298770625415000D+08
| 1.259508750342014000D+07 -7.781108449961724000D+04 -3.454051754276959000D+02
| 7.657800168632331000D-01 4.111715415439872000D-03 3.236887180347005000D-07
| 6.829801241224460000D-09 -4.889864563229950000D+08 1.260099795759951000D+07
| 7.133397820538485000D+04 -2.704629383105547000D+02 -1.145172833548371000D+00
| 1.192842317947395000D-03 1.242298876619163000D-05 -5.442585037066713000D-08
| -2.226431004943043000D+08 5.094650553167490000D+06 3.247674427586441000D+04
| -1.075209106408494000D+02 -5.095813720132382000D-01 4.108797492852656000D-04
| 5.307803481914463000D-06 -1.826931591825445000D-08 -1.355137787395983000D+09
| -4.051981101546368000D+06 3.129367254971605000D+04 7.855594958584558000D+00
| -6.696492895452974000D-02 2.502982984824248000D-05 1.824330554418937000D-07
| 3.023134809096902000D+08 -1.204366809462274000D+07 -6.975385806450141000D+03
| 4.804601818324937000D+01 -3.585387269407427000D-03 -6.522949898111889000D-05
| 1.320418817471656000D-07 1.830526397242040000D+08 -4.798877383018048000D+06
| -4.224762962709027000D+03 1.950013316606097000D+01 1.396348589218896000D-03
| -2.796233433007307000D-05 5.370494834001889000D-08 -2.033283405559471000D+08
| -9.461611801654696000D+06 5.670665210053985000D+02 4.442530257539225000D+00
| 3.805666963220404000D-04 -6.090579323452780000D-07 2.587944895498591000D+09
| -1.070956455797927000D+06 -7.219365665350917000D+03 -4.866946650176128000D-02
| 1.757379511508754000D-03 1.448796724741809000D-07 1.136341284240241000D+09
| -3.349628136238680000D+05 -3.169957204200675000D+03 -8.427237207573448000D-02
| 7.640815512423276000D-04 6.950961180991103000D-08 -4.348209450751870000D+09
| 2.064895184763604000D+06 2.965526758667886000D+03 -2.431398783348254000D-01
| -1.603639784668502000D-04 -3.852909608869259000D-08 -1.216413121184321000D+09
| -6.613807801037284000D+06 8.298400309845004000D+02 7.496614744806421000D-01
| -5.380632852485448000D-05 -5.711177047144433000D-08 -3.896720893805372000D+08
| -2.758473521004695000D+06 2.658629715290812000D+02 3.129097016064206000D-01
| -1.812906868363332000D-05 -2.020891252036265000D-08 -3.975082092383796000D+09
| -3.072578945240005000D+06 1.575314251390106000D+03 4.460334660261815000D-01
| 2.753668983245636000D-05 -4.388317381159834000D-09 3.019608638384909000D+09
| -6.323466030021700000D+06 -1.196701745296745000D+03 2.325186552329156000D-01
| 1.120543779912136000D-04 5.041604114688292000D-08 2.139726999398052000D+09
| -1.047242065267922000D+06 -8.480315140134529000D+02 -6.193125131552247000D-02
| 2.709600100057454000D-05 7.559446021055397000D-10 6.121413474213483000D+04
| 1.750628026223295000D+05 -3.417074298816277000D+03 -1.661776212136849000D+03
| 3.686019389193277000D+01 5.872648242633480000D+00 -2.390470265051171000D-01
| -2.025342003463782000D-02 1.243684809168423000D-03 1.066581971600170000D-04
| -7.230852080572124000D-06 -6.562897580057614000D-07 4.960406933761754000D-08
| -3.062602760918006000D+05 2.290842847882294000D+04 1.800988713816135000D+04
| -3.662363735395490000D+02 -9.146239056714132000D+01 2.808573090867996000D+00
| 2.856352754029066000D-01 -1.544755880731027000D-02 -1.230247272933902000D-03
| 8.209074183954785000D-05 7.263990056848417000D-06 -5.270820847236782000D-07
| -4.498485533618949000D-08 -1.665262277811191000D+05 8.971675856685360000D+03
| 9.821882494254984000D+03 -1.647116382731594000D+02 -5.038201417840112000D+01
| 1.401238661131394000D+00 1.598792990845183000D-01 -7.961944543791831000D-03
| -6.920866591027361000D-04 4.234729463764904000D-05 4.081827330956270000D-06
| -2.723972581665553000D-07 -2.537767523270503000D-08 3.322025660206078000D+05
| 8.434231155222905000D+04 -1.723592547095339000D+04 -5.350757253450493000D+02
| 8.023078326882256000D+01 -1.097813174889403000D+00 -1.548379778989200000D-01
| 1.668530322337325000D-02 -1.341106347345906000D-04 -7.174553935659681000D-05
| 4.297033598082505000D-06 1.286659867431704000D-07 -3.151985168162663000D-08
| -1.440536340441763000D+05 1.303160474759385000D+05 7.395862180339744000D+03
| -1.191074143022413000D+03 -1.026882517960938000D+01 3.406527198841221000D+00
| -1.655065261160604000D-01 -3.366896020911124000D-03 1.039922952961214000D-03
| -3.279890227600505000D-05 -3.187844108638829000D-06 3.510059920764529000D-07
| -1.929632890218395000D-09 -8.397421973912524000D+04 6.894252075760761000D+04
| 4.344088320392975000D+03 -6.356384772106988000D+02 -7.241485525040814000D+00
| 1.871768855055946000D+00 -8.636445770404919000D-02 -2.164795131053023000D-03
| 5.662863688071825000D-04 -1.631803342684712000D-05 -1.814586560073786000D-06
| 1.875710184404339000D-07 -4.114478530247974000D-10 3.565630872790536000D+05
| -5.970402085676137000D+04 -1.696375388068313000D+04 5.020140022525400000D+02
| 5.049069296688183000D+01 -9.614966132330988000D-01 7.799985164034102000D-02
| -1.041078261295838000D-03 -4.791335957397253000D-04 1.123370319979775000D-05
| -5.624551710215778000D-07 -2.889771954785132000D-08 7.096205949791660000D-09
| 1.310739954111065000D+05 1.333486543922331000D+05 -6.296733632015530000D+03
| -1.008190633088260000D+03 2.408288407279049000D+01 6.876443894107616000D-01
| -1.418414886415204000D-02 7.654327712645484000D-03 -1.307104404226976000D-04
| -1.083680864111900000D-05 5.965756080826634000D-07 -8.606847503967984000D-08
| -8.229998335638189000D-11 6.454177311622802000D+04 7.334399342344836000D+04
| -3.095738392089591000D+03 -5.575775197586823000D+02 1.208716924768252000D+01
| 3.994664269095189000D-01 -9.139163881344626000D-03 4.165038355122239000D-03
| -6.149932032212256000D-05 -6.135753770654640000D-06 3.331814770535031000D-07
| -4.604206626204594000D-08 -1.843590482687876000D-10 1.295704422011984000D+05
| -1.585480770256011000D+05 -6.492076083209414000D+03 1.220090190295573000D+03
| 3.977521644102378000D+01 -9.172445298115994000D-01 -1.247454329548391000D-01
| -1.163854819914043000D-02 -4.018983997273604000D-05 3.955453470490717000D-05
| 2.943436733652129000D-06 6.313429501376814000D-08 -1.194256287956483000D-08
| 3.145148187965922000D+05 4.236966888940351000D+04 -1.551983582945448000D+04
| -4.712927532395470000D+02 4.788990633884916000D+01 2.090353615939309000D+00
| 7.973425011444569000D-02 -3.967808415936175000D-03 -7.132491124184255000D-04
| -2.264852477952492000D-05 8.712876022829649000D-07 1.962093404552069000D-07
| 1.037555141190808000D-08 1.680536304400278000D+05 2.580411245855069000D+04
| -8.307800723494752000D+03 -2.789647687877614000D+02 2.536089563317334000D+01
| 1.157630154555940000D+00 4.534955780843831000D-02 -1.953982330169024000D-03
| -3.870983602762720000D-04 -1.301490799828619000D-05 4.232916831133075000D-07
| 1.055431999757457000D-07 5.844610465177285000D-09 -1.873411760838627000D+05
| -1.440320069418001000D+05 1.057760334795214000D+04 1.457645725348145000D+03
| -2.752710861773531000D+01 -6.000950470960174000D+00 -1.418811287007630000D-01
| 2.120491623165032000D-02 1.584447116371303000D-03 -6.344108428841174000D-05
| -1.136288116345551000D-05 -2.614836265194690000D-08 7.172767584673229000D-08
| 2.686654946996315000D+05 -8.792363431940469000D+04 -1.508530141799394000D+04
| 6.458150799148771000D+02 8.576605766954028000D+01 6.611137861212265000D-02
| -3.137920040981556000D-01 -1.475415746565857000D-02 1.086029839746944000D-03
| 1.193478103576476000D-04 -2.167458351740934000D-06 -8.085046428001913000D-07
| -1.640927584917833000D-08 1.487469931179267000D+05 -4.521111318696774000D+04
| -8.382334874855269000D+03 3.251189530156970000D+02 4.718724510129451000D+01
| 1.413527863001131000D-01 -1.683309769527231000D-01 -8.393464445853722000D-03
| 5.628090792015692000D-04 6.600382086719406000D-05 -9.795554435925942000D-07
| -4.390941190102734000D-07 -1.017300973334802000D-08 -3.466880366610202000D+05
| -6.004582846332815000D+03 2.155805286541777000D+04 1.261758637404264000D+02
| -1.212369182607272000D+02 -7.769706636685047000D-01 4.556138246161800000D-01
| -2.091613517239521000D-03 -2.063405175630222000D-03 7.125002825258536000D-05
| 1.115369762565590000D-05 -7.553628100667344000D-07 -5.975228100627449000D-08
| 1.135457636249423000D+04 -1.571224605076863000D+05 -5.215713349886165000D+02
| 1.582602350488754000D+03 8.530003920717830000D+00 -6.530668428658123000D+00
| -1.859313746358134000D-02 2.604541355416033000D-02 -4.886257948568556000D-04
| -1.314921684626426000D-04 6.834465894710167000D-06 7.204201827379158000D-07
| -6.230041376160042000D-08 1.181217349475955000D+04 -8.521775006906651000D+04
| -6.556695654858024000D+02 8.587396044510005000D+02 6.793311197854174000D+00
| -3.541396929188812000D+00 -1.823488096527835000D-02 1.420387086421373000D-02
| -2.291275950037653000D-04 -7.273088546254955000D-05 3.519616636366579000D-06
| 4.048393987163527000D-07 -3.282333628212938000D-08 -2.033561964770032000D+05
| 1.417315616886131000D+05 1.256654978673091000D+04 -1.452275226427256000D+03
| -5.548785712154656000D+01 5.592095517869496000D+00 4.808791865917125000D-02
| -1.131042008905777000D-02 5.553108882891362000D-04 -2.845907214550919000D-05
| -3.306688867932742000D-06 4.310502857674835000D-07 1.900264328259666000D-09
| -2.513273732918988000D+05 -9.211413698367190000D+04 1.542635999990119000D+04
| 8.338166558793661000D+02 -8.637479594402127000D+01 -1.969648702278258000D+00
| 2.483593580609547000D-01 -3.002675493662136000D-03 -1.578373926758877000D-04
| 4.351305243189026000D-05 -3.947550185310693000D-06 -1.332199446940638000D-07
| 2.891150188835680000D-08 -1.332454423797816000D+05 -5.239148888667248000D+04
| 8.178439077959464000D+03 4.789965408675041000D+02 -4.605677423033987000D+01
| -1.173104073759136000D+00 1.343774379458615000D-01 -1.420599356108451000D-03
| -9.580677202691918000D-05 2.411349964165488000D-05 -2.088916498828375000D-06
| -7.988901561842419000D-08 1.569739416963886000D-08 1.203905131546543000D+05
| 1.659303283186325000D+05 -6.532986017746363000D+03 -1.468750315118216000D+03
| 4.788688887828814000D+01 3.883012600531084000D+00 -1.788197211661853000D-01
| -6.894596537572886000D-03 2.639569696240347000D-04 3.743880608808940000D-05
| 4.470114743090106000D-07 -2.764307844964442000D-07 -1.808380969283650000D-09
| -2.977845756821518000D+05 4.678436314017404000D+04 1.680554166759328000D+04
| -5.702599156806922000D+02 -7.359766476762262000D+01 2.841742794766821000D+00
| 1.369188675184640000D-01 -7.280288346264738000D-03 -3.795333792993772000D-04
| 2.034539953479722000D-06 2.944970576167238000D-06 3.077152062802724000D-08
| -1.807707320695682000D-08 -1.637456398073297000D+05 2.278138481637791000D+04
| 9.257251104673717000D+03 -2.855191708957761000D+02 -4.094010403363727000D+01
| 1.479665086672817000D+00 7.783575720152382000D-02 -3.849197113354711000D-03
| -2.116980252346641000D-04 5.023079763930538000D-07 1.596667223919862000D-06
| 2.138167000141614000D-08 -9.819976767746148000D-09 1.163275334609084000D+05
| -5.233774549774082000D+03 1.242236944504417000D+01 -1.494868630833210000D-01
| 1.569831792621883000D-02 -1.071125497533110000D-03 -7.299295976869664000D-05
| 3.414214399009480000D-05 -3.776121327641368000D-06 -1.901238019949167000D-07
| 1.177430646949676000D-07 3.336322921741571000D+05 -3.968203953605706000D+03
| -1.180069464766910000D+01 -1.546159987265415000D-01 -9.204002185776568000D-04
| 1.177602571139573000D-03 -2.093095287143333000D-04 9.238299684508924000D-06
| 3.088688468146653000D-06 -6.926937085717815000D-07 3.866254033714984000D-08
| 1.320963208954748000D+05 -1.612274075872885000D+03 -5.121411515106209000D+00
| -6.846010092551236000D-02 -1.689857786775307000D-03 7.405159649285284000D-04
| -1.043865181746372000D-04 1.388170422891744000D-06 2.041784799822882000D-06
| -3.502127354259571000D-07 8.422395003407415000D-09 1.059557280069567000D+05
| -5.138751764981239000D+03 1.153400940602268000D+01 -2.900649365286712000D-02
| 3.517533855536887000D-03 -4.924853491600722000D-05 2.098682850099308000D-05
| -2.406909536966648000D-06 1.891645847828958000D-07 -7.246188246691320000D-09
| -6.987820713789639000D-10 3.255958769012800000D+05 -4.069631031260755000D+03
| -1.350563852346413000D+01 -1.300513302216289000D-01 4.646835797454477000D-04
| -1.462777106941042000D-04 1.336785766067573000D-05 6.025839449708471000D-08
| -1.229146521099767000D-07 2.060000333269163000D-08 -2.176491957104015000D-09
| 1.288282160321177000D+05 -1.656494058180052000D+03 -5.915671146119998000D+00
| -6.111764653478851000D-02 2.792924704382539000D-04 -7.683477026674286000D-05
| 4.802409685751547000D-06 2.840256936385391000D-07 -8.531976971898509000D-08
| 1.174537284526304000D-08 -1.086447298765369000D-09 -9.554761450448868000D-06
| 1.434312236871492000D-06 -8.766113753395242000D-07 -6.283201927589807000D-08
| 7.386762784589545000D-08 -2.410085997501012000D-09 -2.909485118272052000D-09
| 3.803104167220162000D-10 7.223929914054835000D-11 -1.858459423225638000D-11
| 4.177626931814747000D-05 9.802731081144035000D-07 1.254600806910524000D-07
| -1.224735762166997000D-07 -1.982582024239994000D-09 6.857865302028515000D-09
| -5.697227681785048000D-10 -2.132407458102419000D-10 4.061959680349635000D-11
| 4.877379181518982000D-12 -9.260607280958800000D-06 3.482084684301744000D-07
| 6.766496809704799000D-07 2.339976661417670000D-08 -3.715967682131847000D-08
| -1.775827221840528000D-09 6.428512545030324000D-11 4.439406589992871000D-11
| 2.047736309639337000D-11 1.270131546146417000D-12 4.262143030479423000D-05
| -2.289116836173530000D-07 -2.564910005748294000D-09 8.839375330780667000D-08
| 2.335221980343577000D-09 -2.037236446461158000D-09 -1.083880600412434000D-10
| -2.347876517186210000D-11 -4.151780353295098000D-12 -2.502639951800846000D-14
| -7.653072369286110000D-06 -2.134023302867161000D-07 -7.838822270197664000D-07
| 1.112166537084108000D-07 8.430321189250021000D-08 -3.468896845007000000D-09
| -4.348115028157815000D-09 -2.940337129403315000D-11 1.653570716129616000D-10
| 4.276725819490434000D-12 4.353711539227511000D-05 1.024017458162366000D-06
| -1.710724523735323000D-07 -1.344409647340223000D-07 1.081290950638457000D-08
| 9.042356173694689000D-09 -8.460555602167981000D-11 -3.788676344783658000D-10
| -9.270181500923639000D-12 1.258532202587341000D-11 -7.261331031206917000D-06
| 1.671613461282400000D-06 1.967367605950376000D-07 -3.031351823001121000D-07
| -1.291672027693284000D-08 1.680623121829279000D-08 -1.795938084733062000D-10
| -4.513482117210160000D-10 1.669538555866869000D-11 6.791138121274818000D-12
| 4.413607246911647000D-05 1.620175131618098000D-07 3.698059429744919000D-07
| 3.503101552416530000D-08 -3.568778233768713000D-08 -5.226804778766725000D-10
| 1.339656075394713000D-09 -3.307152474890510000D-11 -2.249753836396115000D-11
| 4.706469454170747000D-13 -1.454740346559805000D-02 -5.529527959534361000D-04
| -2.744625303055995000D-05 4.122116246471978000D-05 -4.900322508296825000D-06
| -7.603380294978852000D-07 2.288453868164065000D-07 -4.899024829454945000D-11
| -5.270863020689615000D-09 4.484241777217460000D-10 3.825659637103087000D-01
| -1.296264163077870000D-04 8.199089925898537000D-05 -4.206432768162415000D-06
| -2.330252152463294000D-06 4.562979342216049000D-07 2.399502966836091000D-08
| -1.373362133274331000D-08 5.891898403938502000D-10 2.582872002141968000D-10
| -1.631480690453121000D+03 9.204161874814854000D-01 3.732120977499369000D-05
| -3.818834672814443000D-05 4.274267178144473000D-06 7.084649355024578000D-07
| -2.083765545053930000D-07 -1.986206726415233000D-10 4.826584522127224000D-09
| -4.071568373005917000D-10 -1.517484101993981000D-02 1.750670990469197000D-05
| 1.035616499204270000D-04 5.545067187737023000D-06 2.499969374619908000D-06
| 9.682609992531770000D-08 -9.422178597185805000D-08 -4.177021305877736000D-09
| 4.107955105276460000D-11 3.117516466811592000D-11 3.827106009501331000D-01
| 2.163687550019332000D-04 1.684049588853109000D-05 -9.623773560934046000D-07
| -7.758814802663651000D-09 -2.441316211913099000D-07 -9.730186776739911000D-09
| 2.930133064632142000D-09 1.759873917750634000D-10 4.169093204005277000D-11
| -1.629640242952054000D+03 9.199324940942862000D-01 -9.969010097204942000D-05
| -6.885767802963532000D-06 -2.262997623249585000D-06 -7.576272127583016000D-08
| 8.752844387273525000D-08 3.995688540968921000D-09 -4.808571986324076000D-11
| -3.200657493160959000D-11 -1.402664349484028000D-02 1.150087738662580000D-03
| 9.800260137197455000D-05 -4.139117332294734000D-05 -7.970629808643200000D-06
| 7.210126473158571000D-07 3.047632367204969000D-07 -2.995304304613235000D-09
| -8.037123411593898000D-09 -2.558413099548946000D-10 3.830670222484092000D-01
| 5.262090984412466000D-05 -8.493550132999200000D-05 -1.154106830136530000D-05
| 2.206905076887479000D-06 6.177023703285697000D-07 -2.503288745373604000D-08
| -1.900124168356325000D-08 -2.615222937371982000D-10 4.642188506929323000D-10
| -1.627801480393484000D+03 9.188065499026344000D-01 -1.007624353628490000D-04
| 3.958308595174692000D-05 7.601264965838409000D-06 -6.878952511422162000D-07
| -2.855321161536578000D-07 3.154801513518041000D-09 7.516316017820841000D-09
| 2.271446510028981000D-10 -1.259588407868799000D-02 -2.736498514133223000D-05
| -2.321067441417971000D-04 3.431705088070213000D-05 6.616156130675796000D-06
| -1.562529057099060000D-06 -9.877013456320228000D-08 3.851681568587802000D-08
| 4.869507965700893000D-10 -5.330585267687222000D-10 3.826577887830988000D-01
| -3.414763158083871000D-04 4.523348022534542000D-05 1.596975777886987000D-05
| -2.997265884797822000D-06 -3.230327457119349000D-07 9.780871615136277000D-08
| 3.207659393384025000D-09 -1.819173439646149000D-09 5.461315674870711000D-12
| -1.625963077654582000D+03 9.198986514006647000D-01 2.247804356068623000D-04
| -3.075522297335725000D-05 -6.301108782031560000D-06 1.439505660017538000D-06
| 9.199613228408182000D-08 -3.566728837881373000D-08 -4.263299572394406000D-10
| 4.926449815864463000D-10 0.000000000000000000D+00 0.000000000000000000D+00
| """.stripMargin
val content3 = """3 1018
| 2.433328500000000000D+06 2.433360500000000000D+06 -2.794098488491469000D+07
| 1.190010361859789000D+07 3.357036815384641000D+05 -2.531418404714961000D+04
| 5.116002033591039000D+01 -1.044570590686599000D+01 -2.089861269847875000D-01
| 8.260816380815832000D-03 -5.721466514011423000D-04 2.885879389878558000D-05
| -8.160649077223646000D-07 3.683436998918206000D-08 -6.460254842998920000D-10
| 1.110950673731887000D-10 -5.611014432496096000D+07 -4.901988754759905000D+06
| 6.738855968606416000D+05 6.403429355699267000D+03 -3.219124891553867000D+02
| 8.965547005278323000D+00 -6.043489166859830000D-01 8.472409866831759000D-03
| -5.254293371761822000D-04 -1.439667343065169000D-06 1.087818064935111000D-07
| -2.562381680457644000D-08 9.649622747272081000D-10 3.496315021287363000D-10
| -2.709937393311112000D+07 -3.853911849919371000D+06 3.250426895827206000D+05
| 6.048914330899217000D+03 -1.772391371875398000D+02 5.873108476324986000D+00
| -3.010644574886321000D-01 3.666986888187164000D-03 -2.211992779958913000D-04
| -3.765199704469295000D-06 1.427701617534027000D-07 -1.740925165119289000D-08
| 5.212259659926501000D-10 -1.630496924364385000D-10 -2.418676148102585000D+06
| 1.336719660676814000D+07 2.921667629784199000D+04 -2.638736030513310000D+04
| -1.963748734007373000D+02 -1.394588110894743000D+01 -1.055018835765160000D-01
| 3.235143144544824000D-03 2.423332436698885000D-04 2.640840412540139000D-05
| 7.463060320736543000D-07 4.510574692136390000D-08 1.115968264976010000D-09
| -7.485134180546233000D-11 -6.033559497978733000D+07 7.176674863590802000D+05
| 7.233243757958316000D+05 1.945079854738689000D+03 -2.784712658752919000D+02
| -4.648473916228525000D+00 -6.129244775037180000D-01 -9.715977369528156000D-03
| -6.513055583696094000D-04 -7.288298970217790000D-06 -1.646864349802628000D-07
| 1.098654556522505000D-08 1.888747716869666000D-09 2.252338329993734000D-10
| -3.200668096631588000D+07 -1.004945160907814000D+06 3.832768363559137000D+05
| 3.779265925070915000D+03 -1.283308710032276000D+02 -1.034313651121088000D+00
| -3.163918253116120000D-01 -5.525055957027433000D-03 -3.730124886197110000D-04
| -6.634820332010752000D-06 -1.639730108194799000D-07 1.056574977727855000D-09
| -5.468379563022467000D-10 -6.850318117403408000D-11 2.349464913813688000D+07
| 1.225909585535181000D+07 -3.157811686846276000D+05 -3.183755889976258000D+04
| -4.839075891379311000D+02 -1.331610031433833000D+01 3.049917415185761000D-01
| 3.685225585058867000D-02 2.365173060381534000D-03 1.134086583386868000D-04
| 4.005955757806312000D-06 8.801135589923792000D-08 -2.365738963206380000D-09
| -2.790254854665209000D-10 -5.310153570934411000D+07 6.508981169443998000D+06
| 7.139133038738203000D+05 -4.191559013929016000D+03 -5.538044242542977000D+02
| -2.521991167481417000D+01 -1.213385820146607000D+00 -3.368591242573810000D-02
| -6.810080136196945000D-04 2.581628203580403000D-05 3.188708019618737000D-06
| 2.125830706851279000D-07 1.058429408734883000D-08 -2.581078823464211000D-11
| -3.083433290265596000D+07 2.203144989261918000D+06 4.140801213493694000D+05
| 1.067851197377125000D+03 -2.455185905126371000D+02 -1.208643969086244000D+01
| -6.797152765457250000D-01 -2.181809116336743000D-02 -6.093427832304445000D-04
| 2.009423308717849000D-06 1.286504099314665000D-06 1.043884746597053000D-07
| 5.974824691634206000D-09 2.477021746197153000D-10 4.417369106775857000D+07
| 8.057934943247107000D+06 -7.503291606452896000D+05 -4.074815883717609000D+04
| -5.288750766401956000D+02 1.990572400828648000D+01 3.245267821355282000D+00
| 2.094240304005657000D-01 8.968049839802218000D-03 1.609848680199572000D-04
| -1.371223184870394000D-05 -1.782320660434865000D-06 -1.199069371894809000D-07
| -4.899582819306185000D-09 -3.467065421322235000D+07 1.181814984518651000D+07
| 5.877465917691305000D+05 -1.901031165367035000D+04 -1.429644525499235000D+03
| -6.572486450512942000D+01 -1.984549927105928000D+00 1.402702994176990000D-02
| 6.648772919728132000D-03 5.523593092419058000D-04 2.880870546527211000D-05
| 8.477606370564253000D-07 -1.857703120960834000D-08 -4.247676737667116000D-09
| -2.313841345799452000D+07 5.474960095487886000D+06 3.918270285022671000D+05
| -5.921102103556970000D+03 -7.086142104727993000D+02 -3.716938928757298000D+01
| -1.396934482409674000D+00 -1.425797118828737000D-02 2.619586975544773000D-03
| 2.782830406008492000D-04 1.681050375016709000D-05 6.378796569879645000D-07
| 1.872455365997818000D-09 -1.658298268927763000D-09 -1.044001922489243000D+08
| -4.279228460625685000D+06 1.339641684737426000D+06 8.446038740195739000D+03
| -1.450198659439290000D+03 -3.983421435003392000D+00 6.924559952940531000D-01
| -5.635197637690244000D-04 -2.345579513104507000D-04 1.585335847740323000D-06
| 1.458959984975889000D+07 -2.181089382989858000D+07 -1.831046292824653000D+05
| 4.642956840461862000D+04 1.642193701982736000D+02 -3.115387404720001000D+01
| -2.008047947858180000D-02 1.183646582333749000D-02 -4.590110098552541000D-05
| -3.979313379997519000D-06 1.316126236757658000D+07 -9.537894964142049000D+06
| -1.671821285626765000D+05 2.034552418092868000D+04 1.656863974784622000D+02
| -1.375867907632055000D+01 -5.281779720065843000D-02 5.354853167849338000D-03
| -5.803521676894698000D-06 -1.859217882812419000D-06 -1.021993522110486000D+08
| 6.448586840680480000D+06 1.302021414130521000D+06 -1.451128674028442000D+04
| -1.369046562366280000D+03 1.166941893054752000D+01 5.838201935024804000D-01
| -6.664210923160852000D-03 -1.350093634557523000D-04 3.514608895996619000D-06
| -2.873216379483130000D+07 -2.104942605062330000D+07 3.694661758723555000D+05
| 4.415442500569149000D+04 -4.381956650343183000D+02 -2.794144400398848000D+01
| 2.724241903753071000D-01 8.533628574370436000D-03 -1.448583483454686000D-04
| -1.384478892614328000D-06 -6.460959054262621000D+06 -9.874811844133617000D+06
| 8.370297559380028000D+04 2.077610106723328000D+04 -1.103709856686181000D+02
| -1.330465958124586000D+01 8.554394702191209000D-02 4.259158134297611000D-03
| -5.653332855453802000D-05 -8.494190218142482000D-07 -1.339868702227251000D+08
| -8.854755239598338000D+06 6.569849208646412000D+05 6.605116034282513000D+03
| -2.850775948510776000D+02 -1.148397656128151000D+00 6.003942915349858000D-02
| -1.635558010760301000D-04 4.079231710795525000D-05 3.460657251499010000D-06
| 2.391046644960363000D-07 4.670630414032365000D-09 -7.708361716052643000D-08
| 5.633741521321286000D+07 -1.722294913670723000D+07 -2.745989069051841000D+05
| 1.428667214235239000D+04 9.553685041144202000D+01 -4.042643168219174000D+00
| -4.927982074395724000D-03 9.412829719748869000D-04 -1.348076240955211000D-05
| -2.978636421551268000D-06 -1.182594600744640000D-07 -1.779678207414675000D-07
| 4.292099270546004000D-09 2.441941834280880000D+07 -7.469382821786028000D+06
| -1.190821376823179000D+05 6.195912142056830000D+03 4.143361910926639000D+01
| -1.753183008862855000D+00 -2.289323276602286000D-03 4.281962639424831000D-04
| -4.658867598400225000D-07 -1.652471227082955000D-06 -1.136210258428952000D-07
| -9.060608582942079000D-08 -4.725180287943990000D-09 -1.462450720531028000D+08
| -3.360608177451535000D+06 7.083745431784396000D+05 1.938330890766852000D+03
| -2.937281755936900000D+02 2.526681600673975000D-01 5.247389642003456000D-02
| -1.367829203878811000D-04 8.449243095065674000D-05 -1.842049943067199000D-05
| -1.911523709462448000D-06 9.210943920209495000D-07 -9.623729688107614000D-08
| 2.025190770261759000D+07 -1.871414408912258000D+07 -9.665646814968747000D+04
| 1.516904516395742000D+04 1.510650176468253000D+01 -3.933810989340495000D+00
| 1.400311897152566000D-02 7.495897511466640000D-04 -5.310997115401943000D-05
| -4.464521727160675000D-06 2.291417602964313000D-06 -1.912608870917796000D-07
| -5.382215783579226000D-08 8.769637026076113000D+06 -8.116033447031986000D+06
| -4.191116922569883000D+04 6.578593639988240000D+03 6.551501603468541000D+00
| -1.706619524664093000D+00 6.081287720773140000D-03 3.693962536383229000D-04
| -2.568414307199274000D-05 -4.127086231581188000D-06 1.294358852930823000D-06
| -3.313333754253823000D-08 -4.172365111677426000D-08 -2.454425641825255000D+08
| -2.123458896997632000D+06 1.025001383126984000D+06 3.498448794210761000D+03
| -2.531567550195071000D+02 -1.597272689838311000D+00 -4.569705652859726000D-02
| -1.800269347571226000D-04 8.739183262869769000D-06 2.050373397725417000D-06
| -7.143768218250321000D-08 2.058243949775869000D+07 -2.767563510782455000D+07
| -8.440818392669556000D+04 1.905670543159312000D+04 7.825314188883074000D+01
| -5.545293734216426000D-01 -1.470741190450032000D-02 -9.090271308355407000D-04
| 6.696759913351055000D-07 -6.182096143008343000D-07 -2.300663979373870000D-07
| 1.608669687552772000D+07 -1.263552796957933000D+07 -6.654288352295726000D+04
| 8.645283276159986000D+03 4.276578777732174000D+01 -2.104040192463914000D-01
| -5.469639291436232000D-03 -4.215692044694314000D-04 -9.145675253061151000D-07
| -1.628130832025000000D-07 -8.705786413069498000D-08 5.583845891475878000D+08
| 1.195623392277837000D+07 -8.187971717310784000D+04 -3.324946203710456000D+02
| 8.480315249554636000D-01 4.108624816117851000D-03 -9.370591105364504000D-07
| -9.087169116935312000D-08 -4.632242847862929000D+08 1.315837799224280000D+07
| 6.797932541576402000D+04 -2.885788785764904000D+02 -1.118381193423720000D+00
| 1.485786465441505000D-03 1.227397055897834000D-05 4.256029709165445000D-08
| -2.121981686282132000D+08 5.349165566638058000D+06 3.113786693128861000D+04
| -1.156016181675644000D+02 -5.001034636957886000D-01 5.367801017673974000D-04
| 5.263541053296201000D-06 1.182082096534837000D-08 -1.362991114537219000D+09
| -3.801272827354836000D+06 3.138152840655691000D+04 6.788422091767355000D+00
| -6.641467559292400000D-02 3.022268153657812000D-05 2.101823857030384000D-07
| 2.781721666295584000D+08 -1.209716604558355000D+07 -6.399220352161254000D+03
| 4.797835843608325000D+01 -4.866131505045312000D-03 -6.346620838484653000D-05
| -1.279567388319674000D-08 1.734218280998403000D+08 -4.831739142656906000D+06
| -3.990645592975329000D+03 1.951805213627513000D+01 8.453198933972054000D-04
| -2.742536365435211000D-05 -1.647752446435570000D-08 -2.222468587385094000D+08
| -9.456861925492671000D+06 6.204129931419907000D+02 4.448514776979841000D+00
| 3.671635441769179000D-04 -7.061419770693981000D-07 2.585745226149799000D+09
| -1.128713239019629000D+06 -7.219780889767363000D+03 -2.052591793585695000D-02
| 1.760908832995457000D-03 2.210987211635460000D-07 1.135645995899786000D+09
| -3.603263083911797000D+05 -3.170895072000032000D+03 -7.203462895031751000D-02
| 7.658164825491313000D-04 1.170786743024797000D-07 -4.344055945438404000D+09
| 2.088607684461618000D+06 2.962593667901607000D+03 -2.457103908642208000D-01
| -1.610158329469077000D-04 -6.511649594794673000D-09 -1.229634069589385000D+09
| -6.607133111739004000D+06 8.388307757806393000D+02 7.487941935029535000D-01
| -5.454652566916056000D-05 -3.057286129824816000D-08 -3.951868976317034000D+08
| -2.756331602523398000D+06 2.696161367599361000D+02 3.126171459748109000D-01
| -1.841151236373076000D-05 -1.183690734810855000D-08 -3.981214630805708000D+09
| -3.059955014134752000D+06 1.580669295318856000D+03 4.464737129997522000D-01
| 2.749767491709645000D-05 -7.988221463953164000D-09 3.006952141568161000D+09
| -6.333028452557699000D+06 -1.193900742454126000D+03 2.343164453399797000D-01
| 1.126228423227823000D-04 3.922440035122413000D-08 2.137625728667230000D+09
| -1.054029282692141000D+06 -8.487720797573925000D+02 -6.149573599062828000D-02
| 2.734186629561300000D-05 2.086606176127876000D-08 3.561764159371560000D+05
| 6.037039154196741000D+04 -1.786194967266947000D+04 -3.498262865194830000D+02
| 7.604585089347888000D+01 -8.855781097764073000D-01 -1.332606966343693000D-01
| 9.788431888366322000D-03 1.992238781482697000D-04 -4.117725558313925000D-05
| 2.180597469311019000D-08 1.823513752153601000D-07 -6.037258690158986000D-09
| -1.022492880894319000D+05 1.388974000642142000D+05 5.118135342458016000D+03
| -1.200872419488438000D+03 -4.753665740607880000D+00 3.019588538481292000D+00
| -1.087139096487934000D-01 -4.347245429235697000D-03 5.728573060421188000D-04
| 5.818501499634106000D-06 -2.367389714189337000D-06 3.906915614013596000D-08
| 1.061126277137267000D-08 -6.107400138102141000D+04 7.468210174614926000D+04
| 3.081094018588659000D+03 -6.487339501847434000D+02 -3.918875052495765000D+00
| 1.661695315140653000D+00 -5.683089887561795000D-02 -2.536871095160072000D-03
| 3.088169072636047000D-04 3.869600812725610000D-06 -1.290543456736959000D-06
| 1.821821986982557000D-08 5.884181829661315000D-09 3.340120427645264000D+05
| -8.064667486544904000D+04 -1.565494260048269000D+04 6.422974951290603000D+02
| 4.734404570844178000D+01 -1.185247890953263000D+00 6.301256513449928000D-02
| 9.117559011795940000D-04 -4.481064170092728000D-04 2.173564909928881000D-06
| 1.416304520858962000D-07 -6.092736764004836000D-08 3.246395308724832000D-09
| 1.723508440491812000D+05 1.245148574727939000D+05 -8.243335971652193000D+03
| -9.422572516376157000D+02 2.939691714675315000D+01 7.401202752370752000D-01
| -2.971757274477278000D-02 6.632181346948633000D-03 1.734132676314964000D-05
| -1.441509333182270000D-05 4.807401234987004000D-07 -2.935194995255941000D-08
| -3.910207710780689000D-09 8.887010097336072000D+04 6.912858360775355000D+04
| -4.243037910190091000D+03 -5.248790450449820000D+02 1.523411670454563000D+01
| 4.276921412016361000D-01 -1.715584492066749000D-02 3.596861357801708000D-03
| 1.694671108085954000D-05 -7.907412923675533000D-06 2.584701850536490000D-07
| -1.499646738064828000D-08 -2.188829978739505000D-09 8.008252829769448000D+04
| -1.635187380082756000D+05 -3.963265635817960000D+03 1.272583927406279000D+03
| 3.268410915350445000D+01 -9.363489385660351000D-01 -1.041904145307354000D-01
| -1.237002582673751000D-02 -2.865710518277985000D-04 2.688985439682213000D-05
| 3.225469600950586000D-06 1.999314496576229000D-07 -5.068349856569293000D-10
| 3.260377956647598000D+05 2.250757562488303000D+04 -1.620236780845011000D+04
| -3.335915333200115000D+02 5.055860891421520000D+01 1.868890784427289000D+00
| 8.770151007931789000D-02 -1.723260881670395000D-03 -6.889440230914291000D-04
| -3.608303340685168000D-05 -5.782698820196569000D-07 1.348641365301944000D-07
| 1.634294528120212000D-08 1.765995826020446000D+05 1.480230639844192000D+04
| -8.779800286762875000D+03 -2.028132149986068000D+02 2.711625702197308000D+01
| 1.039323529216076000D+00 4.941980579278874000D-02 -7.520793625618788000D-04
| -3.719083018111089000D-04 -2.014080687258375000D-05 -3.651542642571779000D-07
| 7.061077654940159000D-08 8.930164705298345000D-09 -2.258806122020320000D+05
| -1.281435858208372000D+05 1.306789189512205000D+04 1.395949669024097000D+03
| -3.655212274735148000D+01 -6.609177974806832000D+00 -2.134890533006375000D-01
| 2.253990826969929000D-02 2.673954246537832000D-03 4.289971467079281000D-06
| -1.742918673657498000D-05 -1.059030069276195000D-06 6.240236815649658000D-08
| 2.406197443553740000D+05 -1.060987550866858000D+05 -1.392969865270981000D+04
| 8.146967541739281000D+02 8.825800325939603000D+01 1.031131460832723000D-01
| -3.587987309411747000D-01 -2.394099438660044000D-02 8.255904075374964000D-04
| 1.996249856583127000D-04 5.919246149754371000D-06 -1.033471180030987000D-06
| -1.045103155795108000D-07 1.346743952989621000D+05 -5.595778543271169000D+04
| -7.810114685057616000D+03 4.230783198553177000D+02 4.882996360751819000D+01
| 1.591928750527437000D-01 -1.928767892944549000D-01 -1.342908976512868000D-02
| 4.098901312619231000D-04 1.090093063024599000D-04 3.503028822820053000D-06
| -5.483421811143848000D-07 -5.805943371357740000D-08 -3.383786148390857000D+05
| 2.322325376758661000D+04 2.194853289194303000D+04 -1.869650674990456000D+02
| -1.374771150318556000D+02 5.425821963230788000D-01 6.833053622192028000D-01
| -5.407570315825935000D-03 -4.373273281506255000D-03 7.724436875620391000D-05
| 3.264205338330255000D-05 -8.884370138771674000D-07 -2.588706042476034000D-07
| -3.731950096361994000D+04 -1.579427550603629000D+05 2.550962774724459000D+03
| 1.678234011823309000D+03 -1.034770561297793000D+01 -8.334036843816337000D+00
| 4.441361417524605000D-02 4.654286529212949000D-02 -6.170540795779635000D-04
| -3.272904327786596000D-04 7.585247494829929000D-06 2.533881834165386000D-06
| -8.248346960600887000D-08 -1.537142976715201000D+04 -8.661315069245315000D+04
| 1.055576603854166000D+03 9.199906535660020000D+02 -3.501989961692155000D+00
| -4.563961936361486000D+00 1.363050010009295000D-02 2.551736411688416000D-02
| -2.694689026855370000D-04 -1.800156331611928000D-04 3.640163023597052000D-06
| 1.398122592971279000D-06 -4.106938312772299000D-08 -1.463230454619256000D+05
| 1.579465871496100000D+05 9.117676188436507000D+03 -1.654708603487203000D+03
| -2.582924541257666000D+01 6.817703147988727000D+00 -1.767962963796480000D-01
| -1.527782669583977000D-02 2.090235842989985000D-03 -6.093744581071999000D-05
| -1.008444216123833000D-05 1.075827505758527000D-06 -3.869259699337235000D-09
| -2.780827413884078000D+05 -7.039248248104038000D+04 1.728709767004589000D+04
| 5.479047065320731000D+02 -1.006483521763370000D+02 2.740002596556904000D-01
| 3.178484912821197000D-01 -1.979112740681978000D-02 -1.520063462563789000D-04
| 1.407062470193325000D-04 -8.789727243055584000D-06 -3.701049923109882000D-07
| 8.290706714966719000D-08 -1.497863467895375000D+05 -4.084068020873203000D+04
| 9.308233332304177000D+03 3.251375461082334000D+02 -5.461440563648205000D+01
| 4.209011926836016000D-02 1.764654343300718000D-01 -1.057186677351342000D-02
| -1.154499743179596000D-04 7.780970557584444000D-05 -4.646473164419405000D-06
| -2.188213864899987000D-07 4.535842429132923000D-08 1.803733164006750000D+05
| 1.530006747806600000D+05 -9.618224658054137000D+03 -1.255231949433259000D+03
| 5.981868139524117000D+01 2.008742042711791000D+00 -1.471757787283614000D-01
| 3.724419067610853000D-03 -2.748704948499010000D-04 2.088490861167716000D-06
| 2.763305139670538000D-06 -1.797303394291446000D-07 -9.392971342538845000D-10
| -2.776198354366433000D+05 6.909500744685526000D+04 1.526661373477658000D+04
| -7.664692913910719000D+02 -5.551562831147445000D+01 3.031247012452222000D+00
| 6.285084081334115000D-03 -2.411929602772815000D-03 1.928582853589835000D-04
| -3.366335720289094000D-05 1.435524658403211000D-06 1.052684417650894000D-07
| -1.082316420791096000D-08 -1.544273450807100000D+05 3.548066762723241000D+04
| 8.493110396567049000D+03 -3.997560975679131000D+02 -3.129797694774895000D+01
| 1.625742247051064000D+00 5.843237798835649000D-03 -1.377054276944855000D-03
| 1.093444981046994000D-04 -1.842265079433810000D-05 7.422297219722323000D-07
| 6.031296681326209000D-08 -5.904439847921460000D-09 3.745171809767282000D+05
| 3.400219487201815000D+04 -1.816896090065870000D+04 -1.456071990300640000D+02
| 6.988097695344740000D+01 -7.935387792579055000D-01 -8.516496927933256000D-02
| 3.668732058529720000D-03 1.871317768873762000D-04 -4.834763677993372000D-06
| -1.314900207812082000D-06 4.257969089308720000D-08 4.607403559709484000D-09
| -5.407087049857573000D+04 1.438994520039643000D+05 2.712341839140580000D+03
| -1.181548548278723000D+03 1.759601881889869000D+00 2.432821877026623000D+00
| -6.114902744171910000D-02 -2.484154633753860000D-03 1.211910246050703000D-04
| 1.476138663214390000D-05 -3.401151649782332000D-07 -7.362701311983999000D-08
| 3.426518026254813000D-09 -3.509490246377787000D+04 7.818904136327154000D+04
| 1.758337651214112000D+03 -6.441135791435407000D+02 -1.236157862310047000D-01
| 1.344009187939923000D+00 -3.208350310404696000D-02 -1.418684073382738000D-03
| 6.348344154989991000D-05 8.152179619989964000D-06 -1.661833849131713000D-07
| -4.092443131133749000D-08 1.803441744024064000D-09 9.577008576457427000D+04
| -5.046887919078763000D+03 1.153936938207222000D+01 3.279200605474694000D-02
| 4.550205849728892000D-03 7.902417111988939000D-05 -3.519452551045851000D-07
| -6.659829949434684000D-08 7.034334066263412000D-09 -1.830579777749696000D-09
| 1.096702239726987000D-11 3.173436347782392000D+05 -4.183912890803168000D+03
| -1.506832503051704000D+01 -1.311306659843696000D-01 1.081287356048618000D-04
| 7.696541775864866000D-05 6.003225578241177000D-06 -2.686810083115499000D-08
| 2.407333676397158000D-08 2.966334883172744000D-10 3.539131578035692000D-11
| 1.254655834956732000D+05 -1.706751459005774000D+03 -6.651979815548758000D+00
| -6.256885910729906000D-02 -1.282887457058924000D-04 2.598545760912972000D-05
| 3.149771122035488000D-06 -4.645090447587219000D-09 1.210671116906322000D-08
| 3.467475350375729000D-10 1.516099523145499000D-11 8.577081925492692000D+04
| -4.951648637500083000D+03 1.241769436587879000D+01 1.166406683439932000D-01
| 5.690109478884052000D-03 -1.302265454359953000D-05 -1.546939856258837000D-05
| -2.122463155694044000D-06 -2.253681538574419000D-07 -1.641686123834535000D-08
| -4.664131899484063000D-10 3.088504127273186000D+05 -4.310550774836282000D+03
| -1.655188547865429000D+01 -1.077652037237940000D-01 3.535771257946297000D-03
| 3.115289893675683000D-04 1.739259191867861000D-05 6.471310514014639000D-07
| -4.654475677979016000D-08 -1.543533257339582000D-08 -2.098201414751194000D-09
| 1.219965074976723000D+05 -1.762935660226186000D+03 -7.382464405498991000D+00
| -5.541495455053076000D-02 1.429604967405862000D-03 1.595224036052486000D-04
| 1.088157605162344000D-05 5.686047876601489000D-07 -1.505157703493109000D-09
| -6.527675437572473000D-09 -1.072053056962912000D-09 -7.338850683170156000D-06
| -1.803710664533573000D-06 6.104519575166076000D-08 2.094655333329916000D-07
| -1.582635161547388000D-08 -5.582380906409273000D-09 9.837523000357102000D-10
| -2.671611054670498000D-11 -3.607695236733158000D-11 4.000715983348033000D-12
| 4.526114432235230000D-05 2.733934996994938000D-07 -3.174251134053273000D-07
| 2.181706672600934000D-08 1.801834560844899000D-08 -1.692044949077289000D-09
| -1.603044789001625000D-10 4.512012399814539000D-11 -1.169762512249310000D-11
| 8.752303608311033000D-13 -7.961313573254190000D-06 9.119214174732372000D-07
| -2.512061761493153000D-07 -2.421607293042474000D-07 -1.126803321970074000D-09
| 1.098196892829578000D-08 1.932143582763776000D-09 -5.455789139080348000D-12
| -1.034397265124998000D-10 -2.065898951676052000D-11 4.553239508032509000D-05
| 5.979770208429243000D-07 3.161266454814553000D-07 -2.954581895684330000D-08
| -2.748074632095188000D-08 -2.085091769583819000D-09 6.598868891797837000D-10
| 2.512836083742985000D-10 1.622743066108109000D-11 -1.036920162093851000D-11
| -8.925857230834671000D-06 -7.244721033862011000D-07 8.603389290897036000D-07
| 9.890555743624718000D-08 -1.002316547357892000D-07 -3.600602757188359000D-09
| 5.520573224881978000D-09 -1.978847507782727000D-10 -2.119575439068176000D-10
| 2.499920873295634000D-11 4.622663883423919000D-05 -4.566964427389743000D-07
| -1.633641939938643000D-07 1.544442196869445000D-07 1.222525120113100000D-08
| -1.120377604816390000D-08 -1.358796286029346000D-10 5.098529519015302000D-10
| -2.377000413363028000D-11 -1.773036666748729000D-11 -9.032298006043996000D-06
| -1.071021296326834000D-06 -7.783159759430904000D-07 1.022035668007570000D-07
| 4.635492688514157000D-08 -6.538462585153985000D-09 -7.160846847745524000D-10
| 1.529091304538283000D-10 -9.462705248254956000D-12 5.553778044230018000D-14
| 4.649820593387462000D-05 7.223815164612461000D-07 -1.210079880301798000D-07
| -9.821581915181753000D-08 1.317960436149911000D-08 3.303974834587964000D-09
| -5.889455670622252000D-10 -4.789406044831584000D-11 1.227494128139333000D-11
| 2.736197831274441000D-12 -1.319345380927280000D-02 -3.018684599712477000D-04
| 7.614628206227691000D-05 -1.102175795070014000D-06 -9.049356388797797000D-07
| 7.856162142416799000D-07 -3.295058399450670000D-08 -1.343590034488394000D-08
| 1.535208647832774000D-09 6.671113643499918000D-12 3.824467149929591000D-01
| 1.081782645889364000D-04 3.031104980621472000D-05 -4.292272811866272000D-06
| 1.191289780456973000D-06 -1.128203138779687000D-08 -4.536478000482600000D-08
| 3.097303899440095000D-09 3.630249676897681000D-10 -8.558638483534942000D-11
| -1.624122696308241000D+03 9.202294698922508000D-01 -6.616548173614821000D-05
| -6.714254670546422000D-07 7.603227267394436000D-07 -7.120627900153583000D-07
| 3.033250537495675000D-08 1.230522862750232000D-08 -1.404529133502976000D-09
| -3.856910304507367000D-12 -1.299581268290559000D-02 5.946364251142921000D-04
| 1.834643628931488000D-04 1.010450508213471000D-05 -4.426634068175699000D-06
| -1.101442178686085000D-06 -8.942737378364251000D-09 2.218258733960660000D-08
| 3.336081042542690000D-09 9.566402534412964000D-11 3.828212440990590000D-01
| 2.368763479017264000D-04 -1.247185887465307000D-05 -1.195845575252281000D-05
| -1.725029818317499000D-06 1.413348186518235000D-07 6.718953298968464000D-08
| 4.745589075732903000D-09 -5.935813215284841000D-10 -2.184931777061751000D-10
| -1.622283010021999000D+03 9.193527354223498000D-01 -1.841854959020811000D-04
| -1.004082661942239000D-05 4.334797739304637000D-06 1.045190489555206000D-06
| 8.829970920309504000D-09 -2.097735829507938000D-08 -3.181275078190609000D-09
| -9.065168496252443000D-11 -1.118808807773870000D-02 8.379435263551215000D-04
| -2.295303637806989000D-04 -3.288541377752084000D-05 1.084215496362481000D-05
| 9.366503371251378000D-07 -3.728455325556433000D-07 -1.394360162799858000D-08
| 1.088008897070055000D-08 -2.768446115535783000D-10 3.828089556355139000D-01
| -2.969967745898789000D-04 -5.684510204167975000D-05 1.974003851858283000D-05
| 2.116863950246672000D-06 -7.638531227918084000D-07 -4.721082190130566000D-08
| 2.403931601824441000D-08 1.626462464205114000D-10 -6.612565422433993000D-10
| -1.620444962074825000D+03 9.190581679746696000D-01 2.160848709312226000D-04
| 3.303387170336981000D-05 -1.019190792477980000D-05 -9.080531125161340000D-07
| 3.511900593354100000D-07 1.332576665061730000D-08 -1.027776258004744000D-08
| 2.609474807859819000D-10 -1.099208506538113000D-02 -4.032905645979106000D-04
| 1.293391042045851000D-05 2.000626370785691000D-05 -5.338215261926701000D-06
| 1.213748881956558000D-07 1.593869783506700000D-07 -1.131469913685053000D-08
| -1.802782137086754000D-09 1.642054870112768000D-10 3.823460872616851000D-01
| -6.888104176326763000D-05 5.547450681519462000D-05 -6.627035636962309000D-06
| -4.778219029152231000D-07 3.951365623903070000D-07 -2.123580469314607000D-08
| -6.959472528805534000D-09 5.812765725507356000D-10 5.426737039915039000D-11
| -1.618605393180354000D+03 9.202968505329975000D-01 2.294019186203073000D-07
| -1.953911608922570000D-05 4.781979322718280000D-06 -1.029179534185540000D-07
| -1.473542995795781000D-07 1.055736149646037000D-08 1.659191132357225000D-09
| -1.546423294418287000D-10 0.000000000000000000D+00 0.000000000000000000D+00
""".stripMargin
lazy val content = content1 + content2 + content3
}
| zauberpony/solarsystem-grand-tour | src/test/scala/net/aerospaceresearch/test/jplparser/data/Ascp1950TestData.scala | Scala | gpl-3.0 | 88,804 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.testkit
import java.util.Optional
import scala.collection.JavaConverters._
import akka.testkit.TestProbe
import com.google.common.collect.ImmutableList
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.javadsl.persistence.TestEntity
import scala.annotation.varargs
class PersistentEntityTestDriverSpec extends ActorSystemSpec {
def newDriver() = new PersistentEntityTestDriver(system, new TestEntity(system, TestProbe().ref), "1")
"PersistentEntityTestDriver" must {
"produce events and state from commands" in {
val driver = newDriver()
val outcome1 = driver.run(TestEntity.Add.of("a"))
outcome1.events.asScala.toList should ===(List(new TestEntity.Appended("1", "A")))
outcome1.state.getElements.asScala.toList should ===(List("A"))
outcome1.issues.asScala.toList should be(Nil)
val outcome2 = driver.run(TestEntity.Add.of("b"), TestEntity.Add.of("c"))
outcome2.events.asScala.toList should ===(List(new TestEntity.Appended("1", "B"), new TestEntity.Appended("1", "C")))
outcome2.state.getElements.asScala.toList should ===(List("A", "B", "C"))
outcome2.issues.asScala.toList should be(Nil)
}
"be able to change behavior" in {
val driver = newDriver()
val outcome1 = driver.run(
TestEntity.Add.of("a"),
TestEntity.Add.of("b"),
new TestEntity.ChangeMode(TestEntity.Mode.PREPEND),
TestEntity.Add.of("c")
)
outcome1.events.asScala.toList should ===(List(
new TestEntity.Appended("1", "A"),
new TestEntity.Appended("1", "B"),
new TestEntity.InPrependMode("1"),
new TestEntity.Prepended("1", "c")
))
outcome1.state.getElements.asScala.toList should ===(List("c", "A", "B"))
outcome1.issues.asScala.toList should be(Nil)
}
"produce several events from one command" in {
val driver = newDriver()
val outcome1 = driver.run(new TestEntity.Add("a", 3))
outcome1.events.asScala.toList should ===(List(
new TestEntity.Appended("1", "A"),
new TestEntity.Appended("1", "A"),
new TestEntity.Appended("1", "A")
))
outcome1.state.getElements.asScala.toList should ===(List("A", "A", "A"))
outcome1.issues.asScala.toList should be(Nil)
}
"record reply side effects" in {
val driver = newDriver()
val outcome1 = driver.run(TestEntity.Add.of("a"), TestEntity.Get.instance)
val sideEffects = outcome1.sideEffects.asScala.toVector
sideEffects(0) should be(new PersistentEntityTestDriver.Reply(new TestEntity.Appended("1", "A")))
sideEffects(1) match {
case PersistentEntityTestDriver.Reply(state: TestEntity.State) =>
case other => fail("unexpected: " + other)
}
outcome1.issues.asScala.toList should be(Nil)
}
"record unhandled commands" in {
val driver = newDriver()
val undefined = new TestEntity.UndefinedCmd
val outcome1 = driver.run(undefined)
outcome1.issues.asScala.toList should be(List(PersistentEntityTestDriver.UnhandledCommand(undefined)))
}
"be able to handle snapshot state" in {
val driver = newDriver()
val outcome1 = driver.initialize(Optional.of(
new TestEntity.State(TestEntity.Mode.PREPEND, ImmutableList.of("a", "b", "c"))
), new TestEntity.Prepended("1", "z"))
outcome1.state.getMode should be(TestEntity.Mode.PREPEND)
outcome1.state.getElements.asScala.toList should ===(List("z", "a", "b", "c"))
outcome1.events.asScala.toList should ===(List(new TestEntity.Prepended("1", "z")))
outcome1.issues.asScala.toList should be(Nil)
val outcome2 = driver.run(TestEntity.Add.of("y"))
outcome2.events.asScala.toList should ===(List(new TestEntity.Prepended("1", "y")))
outcome2.state.getElements.asScala.toList should ===(List("y", "z", "a", "b", "c"))
outcome2.issues.asScala.toList should be(Nil)
}
}
}
| edouardKaiser/lagom | testkit/javadsl/src/test/scala/com/lightbend/lagom/javadsl/testkit/PersistentEntityTestDriverSpec.scala | Scala | apache-2.0 | 4,091 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.environment.LocalStreamEnvironment
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.planner.utils.TableTestUtil
import org.apache.calcite.plan.RelOptUtil
import org.junit.Assert.assertEquals
import org.junit.rules.ExpectedException
import org.junit.{Rule, Test}
class TableEnvironmentTest {
// used for accurate exception information checking.
val expectedException: ExpectedException = ExpectedException.none()
@Rule
def thrown: ExpectedException = expectedException
val env = new StreamExecutionEnvironment(new LocalStreamEnvironment())
val tableEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
@Test
def testScanNonExistTable(): Unit = {
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Table 'MyTable' was not found")
tableEnv.scan("MyTable")
}
@Test
def testRegisterDataStream(): Unit = {
val table = env.fromElements[(Int, Long, String, Boolean)]().toTable(tableEnv, 'a, 'b, 'c, 'd)
tableEnv.registerTable("MyTable", table)
val scanTable = tableEnv.scan("MyTable")
val relNode = TableTestUtil.toRelNode(scanTable)
val actual = RelOptUtil.toString(relNode)
val expected = "LogicalTableScan(table=[[default_catalog, default_database, MyTable]])\\n"
assertEquals(expected, actual)
// register on a conflict name
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Could not execute CreateTable in path")
tableEnv.registerDataStream("MyTable", env.fromElements[(Int, Long)]())
}
@Test
def testSimpleQuery(): Unit = {
val table = env.fromElements[(Int, Long, String, Boolean)]().toTable(tableEnv, 'a, 'b, 'c, 'd)
tableEnv.registerTable("MyTable", table)
val queryTable = tableEnv.sqlQuery("SELECT a, c, d FROM MyTable")
val relNode = TableTestUtil.toRelNode(queryTable)
val actual = RelOptUtil.toString(relNode)
val expected = "LogicalProject(a=[$0], c=[$2], d=[$3])\\n" +
" LogicalTableScan(table=[[default_catalog, default_database, MyTable]])\\n"
assertEquals(expected, actual)
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableEnvironmentTest.scala | Scala | apache-2.0 | 3,104 |
// Copyright 2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.test
import commbank.grimlock.framework.environment.implicits._
import commbank.grimlock.framework.environment.tuner._
import commbank.grimlock.framework.position._
import shapeless.nat.{ _0, _1, _2 }
class TestScalaMatrixNames extends TestMatrix with TestScala {
import commbank.grimlock.scala.environment.implicits._
"A Matrix.names" should "return its first over names in 1D" in {
toU(data1)
.names(Over(_0), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first over names in 2D" in {
toU(data2)
.names(Over(_0), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first along names in 2D" in {
toU(data2)
.names(Along(_0), Default())
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second over names in 2D" in {
toU(data2)
.names(Over(_1), Default())
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second along names in 2D" in {
toU(data2)
.names(Along(_1), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first over names in 3D" in {
toU(data3)
.names(Over(_0), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first along names in 3D" in {
toU(data3)
.names(Along(_0), Default())
.toList.sorted shouldBe List(Position(1, "xyz"), Position(2, "xyz"), Position(3, "xyz"), Position(4, "xyz"))
}
it should "return its second over names in 3D" in {
toU(data3)
.names(Over(_1), Default())
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second along names in 3D" in {
toU(data3)
.names(Along(_1), Default())
.toList.sorted shouldBe List(
Position("bar", "xyz"),
Position("baz", "xyz"),
Position("foo", "xyz"),
Position("qux", "xyz")
)
}
it should "return its third over names in 3D" in {
toU(data3)
.names(Over(_2), Default())
.toList.sorted shouldBe List(Position("xyz"))
}
it should "return its third along names in 3D" in {
toU(data3)
.names(Along(_2), Default())
.toList.sorted shouldBe List(
Position("bar", 1),
Position("bar", 2),
Position("bar", 3),
Position("baz", 1),
Position("baz", 2),
Position("foo", 1),
Position("foo", 2),
Position("foo", 3),
Position("foo", 4),
Position("qux", 1)
)
}
}
class TestSparkMatrixNames extends TestMatrix with TestSpark {
import commbank.grimlock.spark.environment.implicits._
"A Matrix.names" should "return its first over names in 1D" in {
toU(data1)
.names(Over(_0), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first over names in 2D" in {
toU(data2)
.names(Over(_0), Default(12))
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first along names in 2D" in {
toU(data2)
.names(Along(_0), Default())
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second over names in 2D" in {
toU(data2)
.names(Over(_1), Default(12))
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second along names in 2D" in {
toU(data2)
.names(Along(_1), Default())
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first over names in 3D" in {
toU(data3)
.names(Over(_0), Default(12))
.toList.sorted shouldBe List(Position("bar"), Position("baz"), Position("foo"), Position("qux"))
}
it should "return its first along names in 3D" in {
toU(data3)
.names(Along(_0), Default())
.toList.sorted shouldBe List(Position(1, "xyz"), Position(2, "xyz"), Position(3, "xyz"), Position(4, "xyz"))
}
it should "return its second over names in 3D" in {
toU(data3)
.names(Over(_1), Default(12))
.toList.sorted shouldBe List(Position(1), Position(2), Position(3), Position(4))
}
it should "return its second along names in 3D" in {
toU(data3)
.names(Along(_1), Default())
.toList.sorted shouldBe List(
Position("bar", "xyz"),
Position("baz", "xyz"),
Position("foo", "xyz"),
Position("qux", "xyz")
)
}
it should "return its third over names in 3D" in {
toU(data3)
.names(Over(_2), Default(12))
.toList.sorted shouldBe List(Position("xyz"))
}
it should "return its third along names in 3D" in {
toU(data3)
.names(Along(_2), Default())
.toList.sorted shouldBe List(
Position("bar", 1),
Position("bar", 2),
Position("bar", 3),
Position("baz", 1),
Position("baz", 2),
Position("foo", 1),
Position("foo", 2),
Position("foo", 3),
Position("foo", 4),
Position("qux", 1)
)
}
}
| CommBank/grimlock | grimlock-core/src/test/scala/commbank/grimlock/matrix/TestMatrixNames.scala | Scala | apache-2.0 | 6,193 |
package scala.slick.driver
import java.sql.Types
import scala.slick.SlickException
import scala.slick.lifted._
import scala.slick.ast._
import scala.slick.util.MacroSupport.macroSupportInterpolation
/**
* Slick driver for <a href="http://www.hsqldb.org/">HyperSQL</a>
* (starting with version 2.0).
*
* This driver implements the [[scala.slick.driver.ExtendedProfile]]
* ''without'' the following capabilities:
*
* <ul>
* <li>[[scala.slick.driver.BasicProfile.capabilities.sequenceCurr]]:
* <code>Sequence.curr</code> to get the current value of a sequence is
* not supported by Hsqldb. Trying to generate SQL code which uses this
* feature throws a SlickException.</li>
* </ul>
*
* @author szeiger
*/
trait HsqldbDriver extends ExtendedDriver { driver =>
override val capabilities: Set[Capability] = (BasicProfile.capabilities.all
- BasicProfile.capabilities.sequenceCurr
)
override val typeMapperDelegates = new TypeMapperDelegates
override def createQueryBuilder(input: QueryBuilderInput): QueryBuilder = new QueryBuilder(input)
override def createTableDDLBuilder(table: Table[_]): TableDDLBuilder = new TableDDLBuilder(table)
override def createSequenceDDLBuilder(seq: Sequence[_]): SequenceDDLBuilder[_] = new SequenceDDLBuilder(seq)
class QueryBuilder(input: QueryBuilderInput) extends super.QueryBuilder(input) with OracleStyleRowNum {
override protected val scalarFrom = Some("(VALUES (0))")
override protected val concatOperator = Some("||")
override def expr(c: Node, skipParens: Boolean = false): Unit = c match {
case c @ ConstColumn(v: String) if (v ne null) && c.typeMapper(driver).sqlType != Types.CHAR =>
/* Hsqldb treats string literals as type CHARACTER and pads them with
* spaces in some expressions, so we cast all string literals to
* VARCHAR. The length is only 16M instead of 2^31-1 in order to leave
* enough room for concatenating strings (which extends the size even if
* it is not needed). */
b"cast("
super.expr(c)
b" as varchar(16777216))"
/* Hsqldb uses the SQL:2008 syntax for NEXTVAL */
case Library.NextValue(SequenceNode(name)) => b"(next value for `$name)"
case Library.CurrentValue(_*) => throw new SlickException("Hsqldb does not support CURRVAL")
case RowNumber(_) => b"rownum()" // Hsqldb uses Oracle ROWNUM semantics but needs parens
case _ => super.expr(c, skipParens)
}
override protected def buildFetchOffsetClause(fetch: Option[Long], offset: Option[Long]) = (fetch, offset) match {
case (Some(t), Some(d)) => b" limit $t offset $d"
case (Some(t), None ) => b" limit $t"
case (None, Some(d) ) => b" offset $d"
case _ =>
}
}
class TypeMapperDelegates extends super.TypeMapperDelegates {
override val byteArrayTypeMapperDelegate = new ByteArrayTypeMapperDelegate {
override val sqlTypeName = "LONGVARBINARY"
}
override val uuidTypeMapperDelegate = new UUIDTypeMapperDelegate {
override def sqlType = java.sql.Types.BINARY
override def sqlTypeName = "BINARY(16)"
}
}
class TableDDLBuilder(table: Table[_]) extends super.TableDDLBuilder(table) {
override protected def createIndex(idx: Index) = {
if(idx.unique) {
/* Create a UNIQUE CONSTRAINT (with an automatically generated backing
* index) because Hsqldb does not allow a FOREIGN KEY CONSTRAINT to
* reference columns which have a UNIQUE INDEX but not a nominal UNIQUE
* CONSTRAINT. */
val sb = new StringBuilder append "ALTER TABLE " append quoteIdentifier(table.tableName) append " ADD "
sb append "CONSTRAINT " append quoteIdentifier(idx.name) append " UNIQUE("
addIndexColumnList(idx.on, sb, idx.table.tableName)
sb append ")"
sb.toString
} else super.createIndex(idx)
}
}
class SequenceDDLBuilder[T](seq: Sequence[T]) extends super.SequenceDDLBuilder(seq) {
override def buildDDL: DDL = {
import seq.integral._
val increment = seq._increment.getOrElse(one)
val desc = increment < zero
val start = seq._start.getOrElse(if(desc) -1 else 1)
val b = new StringBuilder append "CREATE SEQUENCE " append quoteIdentifier(seq.name)
seq._increment.foreach { b append " INCREMENT BY " append _ }
seq._minValue.foreach { b append " MINVALUE " append _ }
seq._maxValue.foreach { b append " MAXVALUE " append _ }
/* The START value in Hsqldb defaults to 0 instead of the more
* conventional 1/-1 so we rewrite it to make 1/-1 the default. */
if(start != 0) b append " START WITH " append start
if(seq._cycle) b append " CYCLE"
DDL(b.toString, "DROP SEQUENCE " + quoteIdentifier(seq.name))
}
}
}
object HsqldbDriver extends HsqldbDriver
| zefonseca/slick-1.0.0-scala.2.11.1 | src/main/scala/scala/slick/driver/HsqldbDriver.scala | Scala | bsd-2-clause | 4,867 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.lang
import java.lang.{Boolean => JBoolean}
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
/** Tests the implementation of the java standard library Boolean
*/
class BooleanTest {
@Test def booleanValue(): Unit = {
assertEquals(true, JBoolean.TRUE.booleanValue())
assertEquals(false, JBoolean.FALSE.booleanValue())
expectThrows(classOf[Exception], (null: JBoolean).booleanValue())
}
@Test def compareTo(): Unit = {
def compare(x: Boolean, y: Boolean): Int =
new JBoolean(x).compareTo(new JBoolean(y))
assertEquals(0, compare(false, false))
assertTrue(compare(false, true) < 0)
assertTrue(compare(true, false) > 0)
assertEquals(0, compare(true, true))
}
@Test def should_be_a_Comparable(): Unit = {
def compare(x: Any, y: Any): Int =
x.asInstanceOf[Comparable[Any]].compareTo(y)
assertEquals(0, compare(false, false))
assertTrue(compare(false, true) < 0)
assertTrue(compare(true, false) > 0)
assertEquals(0, compare(true, true))
}
@Test def should_parse_strings(): Unit = {
def test(s: String, v: Boolean): Unit = {
assertEquals(v, JBoolean.parseBoolean(s))
assertEquals(v, JBoolean.valueOf(s).booleanValue())
assertEquals(v, new JBoolean(s).booleanValue())
}
test("false", false)
test("true", true)
test("TrUe", true)
test(null, false)
test("truee", false)
}
}
| lrytz/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/lang/BooleanTest.scala | Scala | bsd-3-clause | 2,013 |
package com.github.morikuni.locest.area.domain.repository
import com.github.morikuni.locest.area.domain.model.{Area, AreaId, Coordinate}
import com.github.morikuni.locest.util.{Repository, Session, Transaction, TransactionManager}
trait AreaRepositorySession extends Session
trait AreaRepository extends Repository[Area] {
/** id に対応するエリアを取得する。
*
* @param id 取得するID
* @return Transaction(Some(Area)) 成功時
* Transaction(None) id に対応するエリアが存在しないとき
*/
def solve(id: AreaId): Transaction[AreaRepositorySession, Option[Area]]
/** coordinate を含むエリアのIDを取得する。
*
* @param coordinate 検索に用いる座標
* @return Transaction(Some(AreaId)) 成功時
* Transaction(None) coordinate を含むエリアが存在しないとき
*/
def findByCoordinate(coordinate: Coordinate): Transaction[AreaRepositorySession, Option[AreaId]]
/** 全てのエリアIDを取得する。
*
* @return Transaction(List[AreaId]) 成功時
*/
def all: Transaction[AreaRepositorySession, List[AreaId]]
}
trait DependAreaRepository {
def areaRepository: AreaRepository
}
trait DependAreaRepositoryTransactionManager {
def areaRepositoryTransactionManager: TransactionManager[AreaRepositorySession]
} | morikuni/locest | area/app/com/github/morikuni/locest/area/domain/repository/AreaRepository.scala | Scala | mit | 1,358 |
package dotty.tools.benchmarks.tuples
import org.openjdk.jmh.annotations._
@State(Scope.Thread)
class Apply {
@Param(Array("1 0"))
var sizeAndIndex: String = _
var tuple: NonEmptyTuple = _
var index: Int = _
@Setup
def setup(): Unit = {
val size = sizeAndIndex.split(' ')(0).toInt
index = sizeAndIndex.split(' ')(1).toInt
tuple = "elem" *: Tuple()
for (i <- 1 until size)
tuple = "elem" *: tuple
}
@Benchmark
def tupleApply(): Any = {
runtime.Tuples.apply(tuple, index)
}
@Benchmark
def productElement(): Any = {
tuple.asInstanceOf[Product].productElement(index)
}
}
| lampepfl/dotty | bench-run/src/main/scala/dotty/tools/benchmarks/tuples/Apply.scala | Scala | apache-2.0 | 629 |
package com.peterparameter.ecm.common
import com.peterparameter.ecm.common.Alias.Num
import scodec.bits._
import spire.math.SafeLong
import spire.std.bigDecimal._
import spire.syntax.nroot._
import spire.syntax.trig._
import scala.annotation.tailrec
import scala.collection.SeqView
import scala.collection.immutable.LazyList
object Utils {
implicit class SafeLongOps(num: SafeLong) {
def toBitVector: SeqView[Boolean] = {
val seq = BitVector.view(num.toBigInt.toByteArray).toIndexedSeq
val idx = seq.indexOf(true)
if (idx > 0)
seq.view.slice(idx, seq.length)
else seq.view
}
}
implicit class BigIntOps(num: BigInt) {
def toSafeLong: SafeLong = SafeLong(num)
}
val primes: LazyList[Int] = {
@tailrec
def next(n: Int, stream: LazyList[Int]): LazyList[Int] =
if (stream.isEmpty || (stream.head ^ 2) > n)
n #:: loop(n + 2, primes)
else if (n % stream.head == 0)
next(n + 2, primes)
else
next(n, stream.tail)
def loop(n: Int, stream: LazyList[Int]): LazyList[Int] = next(n, stream)
2 #:: loop(3, primes)
}
private def expectedFactorLength(n: Num): Int = n.sqrt().toBigDecimal.log(10).toInt
def b1Bound(n: Num): Long = getB1(expectedFactorLength(n))
private def getB1(expectedFactorLength: Int): Long = expectedFactorLength match {
case x if x > 65 => 2900000000L
case x if x > 60 => 850000000L
case x if x > 55 => 260000000L
case x if x > 50 => 110000000L
case x if x > 45 => 43000000L
case x if x > 40 => 11000000L
case x if x > 35 => 3000000L
case x if x > 30 => 1000000L
case x if x > 25 => 250000
case x if x > 20 => 50000
case x if x > 15 => 11000
case x if x > 12 => 2000
case _ => 400
}
}
| pnosko/spire-ecm | src/main/scala/com/peterparameter/ecm/common/Utils.scala | Scala | mit | 1,774 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.compat.java8.converterImpl
import scala.annotation.switch
import scala.compat.java8.collectionImpl._
import Stepper._
/////////////////////////////
// Stepper implementations //
/////////////////////////////
private[java8] class StepsIntRange(underlying: Range, _i0: Int, _iN: Int)
extends StepsIntLikeIndexed[StepsIntRange](_i0, _iN) {
def nextInt() = if (hasNext()) { val j = i0; i0 += 1; underlying(j) } else throwNSEE
def semiclone(half: Int) = new StepsIntRange(underlying, i0, half)
}
private[java8] class StepsAnyNumericRange[T](underlying: collection.immutable.NumericRange[T], _i0: Int, _iN: Int)
extends StepsLikeIndexed[T, StepsAnyNumericRange[T]](_i0, _iN) {
def next() = if (hasNext()) { val j = i0; i0 += 1; underlying(j) } else throwNSEE
def semiclone(half: Int) = new StepsAnyNumericRange[T](underlying, i0, half)
}
private[java8] class StepsIntNumericRange(underlying: collection.immutable.NumericRange[Int], _i0: Int, _iN: Int)
extends StepsIntLikeIndexed[StepsIntNumericRange](_i0, _iN) {
def nextInt() = if (hasNext()) { val j = i0; i0 += 1; underlying(j) } else throwNSEE
def semiclone(half: Int) = new StepsIntNumericRange(underlying, i0, half)
}
private[java8] class StepsLongNumericRange(underlying: collection.immutable.NumericRange[Long], _i0: Int, _iN: Int)
extends StepsLongLikeIndexed[StepsLongNumericRange](_i0, _iN) {
def nextLong() = if (hasNext()) { val j = i0; i0 += 1; underlying(j) } else throwNSEE
def semiclone(half: Int) = new StepsLongNumericRange(underlying, i0, half)
}
//////////////////////////
// Value class adapters //
//////////////////////////
final class RichRangeCanStep[T](private val underlying: Range) extends AnyVal with MakesStepper[Int, EfficientSubstep] {
def stepper[S <: Stepper[_]](implicit ss: StepperShape[Int, S]) =
new StepsIntRange(underlying, 0, underlying.length).asInstanceOf[S with EfficientSubstep]
}
final class RichNumericRangeCanStep[T](private val underlying: collection.immutable.NumericRange[T]) extends AnyVal with MakesStepper[T, EfficientSubstep] {
def stepper[S <: Stepper[_]](implicit ss: StepperShape[T, S]) = ((ss.shape: @switch) match {
case StepperShape.IntValue => new StepsIntNumericRange (underlying.asInstanceOf[collection.immutable.NumericRange[Int]], 0, underlying.length)
case StepperShape.LongValue => new StepsLongNumericRange (underlying.asInstanceOf[collection.immutable.NumericRange[Long]], 0, underlying.length)
case _ => ss.parUnbox(new StepsAnyNumericRange[T](underlying, 0, underlying.length))
}).asInstanceOf[S with EfficientSubstep]
}
| scala/scala-java8-compat | src/main/scala-2.13-/scala/compat/java8/converterImpl/StepsRange.scala | Scala | apache-2.0 | 2,966 |
/**
* build.scala
*
* @author <a href="mailto:jim@corruptmemory.com">Jim Powers</a>
*
* Copyright 2011 Jim Powers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import Keys._
object BuildSettings {
val buildOrganization = "com.corruptmemory"
val buildScalaVersion = "2.9.1"
val buildVersion = "0.1.0-SNAPSHOT"
val buildSettings = Defaults.defaultSettings ++
Seq (organization := buildOrganization,
scalaVersion := buildScalaVersion,
version := buildVersion,
shellPrompt := ShellPrompt.buildShellPrompt)
}
object ShellPrompt {
object devnull extends ProcessLogger {
def info (s: => String) {}
def error (s: => String) { }
def buffer[T] (f: => T): T = f
}
val current = """\*\s+(\w+)""".r
def gitBranches = ("git branch --no-color" lines_! devnull mkString)
val buildShellPrompt = {
(state: State) => {
val currBranch = current findFirstMatchIn gitBranches map (_ group(1)) getOrElse "-"
val currProject = Project.extract (state).currentProject.id
"%s:%s:%s> ".format (currProject, currBranch, BuildSettings.buildVersion)
}
}
}
object Resolvers {
val corruptmemoryUnfilteredRepo = "repo.corruptmemory.com" at "http://corruptmemory.github.com/Unfiltered/repository"
val jbossResolver = "jboss repo" at "http://repository.jboss.org/nexus/content/groups/public-jboss"
val repo1Resolver = "repo1" at "http://repo1.maven.org/maven2"
val javaNetResolvers = "Java.net Maven 2 Repo" at "http://download.java.net/maven/2"
// val thirdParty = "Third Party" at "http://aws-gem-server1:8081/nexus/content/repositories/thirdparty"
}
object Dependencies {
val scalaCheckVersion = "1.9"
val scalaZVersion = "6.0.3"
val zookeeperVersion = "3.3.4"
val sbinaryVersion = "0.4.1-SNAPSHOT"
val scalaz = "org.scalaz" %% "scalaz-core" % scalaZVersion
val scalaCheck = "org.scala-tools.testing" %% "scalacheck" % scalaCheckVersion % "test"
val zookeeper = "org.apache.zookeeper" % "zookeeper" % zookeeperVersion
val sbinary = "org.scala-tools.sbinary" %% "sbinary" % sbinaryVersion
}
object ArticleServiceBuild extends Build {
val buildShellPrompt = ShellPrompt.buildShellPrompt
import Dependencies._
import BuildSettings._
import Resolvers._
val coreDeps = Seq(scalaz,scalaCheck,zookeeper)
val sbinaryDeps = Seq(sbinary)
def namedSubProject(projectName: String, id: String, path: File, settings: Seq[Setting[_]]) = Project(id, path, settings = buildSettings ++ settings ++ Seq(name := projectName))
lazy val herdingCats = Project("herding-cats",
file("."),
settings = buildSettings ++ Seq(name := "Herding Cats")) aggregate (library, sbinaryModule, examples)
lazy val library = namedSubProject("Herding Cats Lib",
"library",
file("library"),
Seq(scalacOptions += "-deprecation",
libraryDependencies := coreDeps,
resolvers ++= Seq(jbossResolver,javaNetResolvers,corruptmemoryUnfilteredRepo,repo1Resolver)))
lazy val examples = namedSubProject("Herding Cats Examples",
"examples",
file("examples"),
Seq(scalacOptions += "-deprecation")) dependsOn(library)
lazy val sbinaryModule = namedSubProject("Herding Cats SBinary module",
"sbinary-utils",
file("sbinary"),
Seq(scalacOptions += "-deprecation",
libraryDependencies := sbinaryDeps,
resolvers ++= Seq(jbossResolver,javaNetResolvers,corruptmemoryUnfilteredRepo,repo1Resolver))) dependsOn(library)
}
| corruptmemory/herding-cats | project/build.scala | Scala | apache-2.0 | 4,591 |
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.ga
import scala.util.Random
import org.apache.log4j.Logger
import org.scalaml.core.Design.PipeOperator
import org.scalaml.core.XTSeries
import org.scalaml.core.Types.ScalaMl.{DblVector, DblMatrix}
import org.scalaml.ga.state._
import org.scalaml.util.DisplayUtils
import Chromosome._
/**
* <p>Class to select the best solution or Chromosome from an initial population
* or genes pool using a set of policies for selection, mutation and crossover of
* chomosomes. The client code initialize the GA solver with either an initialized
* population or a function () => Population{T] that initialize the population. THe
* class has only one public method search.<br>
* Reference: http://www.kddresearch.org/Publications/Book-Chapters/Hs5.pdf</p>
* @constructor Create a generic GA-based solver. [state] Configuration parameters for the
* GA algorithm, [population] Initialized population of chromosomes (solution candidates)
* @param config Configuration parameters for the GA algorithm
* @param score Scoring method for the chromosomes of this population
* @throws IllegalArgumenException if the configuration is undefined or the population
* is not initialized
*
* @author Patrick Nicolas
* @since August 29, 2013
* @note Scala for Machine Learning Chapter 10 Genetic Algorithm
*/
final protected class GASolver[T <: Gene](
config: GAConfig,
score: Chromosome[T] => Unit) extends PipeOperator[Population[T], Population[T]] {
import GAConfig._
// Initial state of the genetic algorithm solver
private[this] var state: GAState = GA_NOT_RUNNING
private val logger = Logger.getLogger("GASolver")
/**
* <p>Method to resolve any optimization problem using a function to generate
* a population of Chromosomes, instead an existing initialized population
* @param initialize Function to generate the chromosomes of a population
* @throws IllegalArgumenException If the initialization or chromosome generation function is undefined
*/
def |>(initialize: () => Population[T]): Population[T] = this.|>(initialize())
/**
* <p>Uses the genetic algorithm reproduction cycle to select the fittest
* chromosomes (or solutions candidate) after a predefined number of reproduction cycles.<br>
* Convergence criteria used to end the reproduction cycle is somewhat dependent of the
* problem or domain. This implementation makes sense for the exercise in the book
* chapter 10. It needs to be modified to a specific application.</p>
* @throws MatchError if the population is empty
* @return PartialFunction with a parameterized population as input and the population
* containing the fittest chromosomes as output.
*/
override def |> : PartialFunction[Population[T], Population[T]] = {
case population: Population[T] if(state != GA_RUNNING &&population.size > 1) => {
// Create a reproduction cycle manager with a scoring function
val reproduction = Reproduction[T](score)
state = GA_RUNNING
// Trigger a reproduction cycle 'mate' then test if
// any of the convergence criteria applies.
Range(0, config.maxCycles).find(n => {
if( reproduction.mate(population, config, n) )
converge(population, n) != GA_RUNNING
else {
if(population.size == 0)
state = GA_FAILED(s"GASolver.PartialFunction reproduction failed after $n cycles")
else
state = GA_SUCCEED(s"GASolver.PartialFunction Completed in $n cycles")
true
}
}).getOrElse(notConverge)
// The population is returned no matter what..
population
}
}
/*
* Domain dependent convergence criteria. This version tests the size
* of the population and the number of reproduction cycles already executed
*/
private def converge(population: Population[T], cycle: Int): GAState = {
if( population.isNull )
GA_FAILED(s"GASolver.converge Reproduction failed at $cycle")
else if(cycle >= config.maxCycles)
GA_NO_CONVERGENCE(s"GASolver.converge Failed to converge at $cycle ")
else
GA_RUNNING
}
private def notConverge: Int = {
state = GA_NO_CONVERGENCE(s"GASolver.PartialFunction Failed to converge")
-11
}
}
/**
* Object companion for the Solve that defines the two constructors
* @author Patrick Nicolas
* @since August 29, 2013
* @note Scala for Machine Learning Chapter 10 Genetic Algorithm
*/
object GASolver {
/**
* Default constructor for the Genetic Algorithm (class GASolver)
* @param config Configuration parameters for the GA algorithm
* @param score Scoring method for the chromosomes of this population
*/
def apply[T <: Gene](config: GAConfig, score: Chromosome[T] =>Unit): GASolver[T] =
new GASolver[T](config, score)
/**
* Constructor for the Genetic Algorithm (class GASolver) with undefined scoring function
* @param config Configuration parameters for the GA algorithm
*/
def apply[T <: Gene](config: GAConfig): GASolver[T] =
new GASolver[T](config, (c: Chromosome[T]) => Unit)
}
// --------------------------- EOF ----------------------------------------------- | batermj/algorithm-challenger | books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/ga/GASolver.scala | Scala | apache-2.0 | 5,732 |
package com.git.huanghaifeng.spark.sql
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
object SQLParseJsonFile {
def main(args: Array[String]) {
val master = args.length match {
case x: Int if x > 0 => args(0)
case _ => "local"
}
val sc = new SparkContext(master, "SQLParseJsonFile", System.getenv("SPARK_HOME"))
val sqlContext = new SQLContext(sc)
val df = sqlContext.read.json("file:///tmp/json.txt")
df.show()
df.printSchema()
df.select("uid").show()
df.select("aid", "uid").show()
df.select(df("aid"), df("uid") + 1).show()
df.filter(df("event_time") > 21).show()
df.groupBy("event_time").count().show()
}
} | prucehuang/quickly-start-spark | src/main/scala/com/git/huanghaifeng/spark/sql/SQLParseJsonFile.scala | Scala | apache-2.0 | 834 |
package uk.co.turingatemyhamster
package owl2
/**
* An abstraction of: http://www.w3.org/TR/2012/REC-owl2-syntax-20121211/#Object_Property_Axioms
*
* @author Matthew Pocock
*/
trait ObjectPropertyAxiomsModule {
importedModules : owl2.AxiomsModule =>
type EquivalentObjectProperties <: ObjectPropertyAxiom
type DisjointObjectProperties <: ObjectPropertyAxiom
type SubObjectPropertyOf <: ObjectPropertyAxiom
type ObjectPropertyDomain <: ObjectPropertyAxiom
type ObjectPropertyRange <: ObjectPropertyAxiom
type InverseObjectProperties <: ObjectPropertyAxiom
type FunctionalObjectProperty <: ObjectPropertyAxiom
type ReflexiveObjectProperty <: ObjectPropertyAxiom
type InverseFunctionalObjectProperty <: ObjectPropertyAxiom
type IrreflexiveObjectProperty <: ObjectPropertyAxiom
type SymmetricObjectProperty <: ObjectPropertyAxiom
type TransitiveObjectProperty <: ObjectPropertyAxiom
type AsymmetricObjectProperty <: ObjectPropertyAxiom
}
| drdozer/owl2 | core/src/main/scala/uk/co/turingatemyhamster/owl2/ObjectPropertyAxiomsModule.scala | Scala | apache-2.0 | 984 |
package com.sksamuel.elastic4s.requests.searches.aggs
import java.util.TimeZone
import com.sksamuel.elastic4s.ElasticDate
import com.sksamuel.elastic4s.requests.script.Script
import com.sksamuel.exts.OptionImplicits._
case class DateRangeAggregation(name: String,
field: Option[String] = None,
script: Option[Script] = None,
missing: Option[AnyRef] = None,
format: Option[String] = None,
timeZone: Option[TimeZone] = None,
keyed: Option[Boolean] = None,
ranges: Seq[(Option[String], ElasticDate, ElasticDate)] = Nil,
unboundedFrom: List[(Option[String], ElasticDate)] = Nil,
unboundedTo: List[(Option[String], ElasticDate)] = Nil,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends Aggregation {
type T = DateRangeAggregation
def timeZone(timeZone: TimeZone): DateRangeAggregation = copy(timeZone = timeZone.some)
def keyed(keyed: Boolean): DateRangeAggregation = copy(keyed = keyed.some)
def field(field: String): DateRangeAggregation = copy(field = field.some)
def script(script: Script): DateRangeAggregation = copy(script = script.some)
def missing(missing: AnyRef): DateRangeAggregation = copy(missing = missing.some)
def range(from: ElasticDate, to: ElasticDate): DateRangeAggregation = copy(ranges = ranges :+ (None, from, to))
def range(key: String, from: ElasticDate, to: ElasticDate): DateRangeAggregation =
copy(ranges = ranges :+ (key.some, from, to))
def unboundedFrom(from: ElasticDate): DateRangeAggregation = copy(unboundedFrom = unboundedFrom :+ (None, from))
def unboundedFrom(key: String, from: ElasticDate): DateRangeAggregation = copy(unboundedFrom = unboundedFrom :+(key.some, from))
def unboundedTo(from: ElasticDate): DateRangeAggregation = copy(unboundedTo = unboundedTo :+ (None, from))
def unboundedTo(key: String, from: ElasticDate): DateRangeAggregation = copy(unboundedTo = unboundedTo :+ (key.some, from))
def format(fmt: String): DateRangeAggregation = copy(format = fmt.some)
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = map)
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/DateRangeAggregation.scala | Scala | apache-2.0 | 2,609 |
/*
* Copyright 2014 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.shared
import akka.actor.ActorRef
import play.api.libs.json.JsValue
trait WireMessage
trait AgentControllerMessage extends WireMessage
trait AgentMessage extends WireMessage
case class Acknowledgeable[T](msg: T, id: Long) extends AgentMessage
case class Acknowledge(id: Long) extends AgentMessage
case class MessageWithAttachments[T](msg: T, attachments: JsValue) extends AgentMessage
sealed trait GateState
case class GateClosed() extends GateState
case class GateOpen() extends GateState
case class GateStateCheck(ref: ActorRef)
case class GateStateUpdate(state: GateState)
case class Handshake(ref: ActorRef, name: String) extends AgentControllerMessage
case class CommunicationProxyRef(ref: ActorRef) extends AgentControllerMessage
case class GenericJSONMessage(json: String)
case class CreateTap(config: JsValue) extends AgentControllerMessage
case class OpenTap() extends AgentControllerMessage
case class CloseTap() extends AgentControllerMessage
case class RemoveTap() extends AgentControllerMessage
| mglukh/ehub | modules/core/src/main/scala/agent/shared/WireMessage.scala | Scala | apache-2.0 | 1,636 |
package chapter.five
object ExerciseTwo extends App {
// todo: cleanup
// todo: add checks to ensure balance cant go under 0 or over max double
class BankAccount {
private var _balance = 0.0
def deposit(amount: Double) { _balance += amount }
def withdraw(amount: Double) { _balance -= amount }
def balance = _balance
}
}
| deekim/impatient-scala | src/main/scala/chapter/five/ExerciseTwo.scala | Scala | apache-2.0 | 351 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2012 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.worker
import com.signalcollect.interfaces.Storage
import com.signalcollect.interfaces.ComplexAggregation
import com.signalcollect.GraphEditor
import com.signalcollect.interfaces.EdgeId
import com.signalcollect.Vertex
import com.signalcollect.Edge
import com.signalcollect.interfaces.MessageBus
import com.signalcollect.interfaces.WorkerApi
import akka.event.LoggingAdapter
import com.signalcollect.interfaces.WorkerStatistics
import com.signalcollect.interfaces.NodeStatistics
import java.io.DataInputStream
import com.signalcollect.serialization.DefaultSerializer
import java.io.DataOutputStream
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import com.signalcollect.interfaces.StorageFactory
import com.signalcollect.interfaces.WorkerStatus
import akka.actor.ActorRef
import com.signalcollect.interfaces.MessageRecipientRegistry
import com.signalcollect.interfaces.Worker
import com.signalcollect.interfaces.SentMessagesStats
import com.sun.management.OperatingSystemMXBean
import java.lang.management.ManagementFactory
/**
* Main implementation of the WorkerApi interface.
*/
class WorkerImplementation[Id, Signal](
val workerId: Int,
val messageBus: MessageBus[Id, Signal],
val log: LoggingAdapter,
val storageFactory: StorageFactory,
var signalThreshold: Double,
var collectThreshold: Double,
var undeliverableSignalHandler: (Signal, Id, Option[Id], GraphEditor[Id, Signal]) => Unit)
extends Worker[Id, Signal] {
val graphEditor: GraphEditor[Id, Signal] = new WorkerGraphEditor(workerId, this, messageBus)
val vertexGraphEditor: GraphEditor[Any, Any] = graphEditor.asInstanceOf[GraphEditor[Any, Any]]
initialize
var messageBusFlushed: Boolean = _
var systemOverloaded: Boolean = _ // If the coordinator allows this worker to signal.
var operationsScheduled: Boolean = _ // If executing operations has been scheduled.
var isIdle: Boolean = _ // Idle status that was last reported to the coordinator.
var isPaused: Boolean = _
var allWorkDoneWhenContinueSent: Boolean = _
var lastStatusUpdate: Long = _
var vertexStore: Storage[Id] = _
var pendingModifications: Iterator[GraphEditor[Id, Signal] => Unit] = _
val counters: WorkerOperationCounters = new WorkerOperationCounters()
def initialize {
messageBusFlushed = true
systemOverloaded = false
operationsScheduled = false
isIdle = true
isPaused = true
allWorkDoneWhenContinueSent = false
lastStatusUpdate = System.currentTimeMillis
vertexStore = storageFactory.createInstance[Id]
pendingModifications = Iterator.empty
}
/**
* Resets all state apart from that which is part of the constructor.
* Also does not reset the part of the counters which is part of
* termination detection.
*/
def reset {
initialize
counters.resetOperationCounters
messageBus.reset
}
def isAllWorkDone: Boolean = {
if (isPaused) {
pendingModifications.isEmpty
} else {
isConverged
}
}
def setIdle(newIdleState: Boolean) {
if (messageBus.isInitialized && isIdle != newIdleState) {
isIdle = newIdleState
sendStatusToCoordinator
}
}
def sendStatusToCoordinator {
val currentTime = System.currentTimeMillis
if (messageBus.isInitialized) {
val status = getWorkerStatus
messageBus.sendToCoordinator(status)
} else {
val msg = s"Worker $workerId $this is ignoring status request from coordinator because its MessageBus ${messageBus} is not initialized."
println(msg)
log.debug(msg)
throw new Exception(msg)
}
}
def isConverged = {
vertexStore.toCollect.isEmpty &&
vertexStore.toSignal.isEmpty &&
messageBusFlushed
}
def executeCollectOperationOfVertex(vertex: Vertex[Id, _], addToSignal: Boolean = true) {
counters.collectOperationsExecuted += 1
vertex.executeCollectOperation(vertexGraphEditor)
if (addToSignal && vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
}
def executeSignalOperationOfVertex(vertex: Vertex[Id, _]) {
counters.signalOperationsExecuted += 1
vertex.executeSignalOperation(vertexGraphEditor)
}
def processSignal(signal: Signal, targetId: Id, sourceId: Option[Id]) {
val vertex = vertexStore.vertices.get(targetId)
if (vertex != null) {
if (vertex.deliverSignal(signal, sourceId, vertexGraphEditor)) {
counters.collectOperationsExecuted += 1
if (vertex.scoreSignal > signalThreshold) {
//vertexStore.toSignal.put(vertex)
// TODO: Unify scheduling related code. The policy here should be pluggable and the same as
// the one in AkkaWorker.executeOperations.
executeSignalOperationOfVertex(vertex)
}
} else {
if (vertex.scoreCollect > collectThreshold) {
vertexStore.toCollect.put(vertex)
}
}
} else {
undeliverableSignalHandler(signal, targetId, sourceId, graphEditor)
}
messageBusFlushed = false
}
def startComputation {
if (!pendingModifications.isEmpty) {
log.warning("Need to call `awaitIdle` after executiong `loadGraph` or pending operations are ignored.")
}
isPaused = false
sendStatusToCoordinator
}
def pauseComputation {
isPaused = true
sendStatusToCoordinator
}
def signalStep: Boolean = {
counters.signalSteps += 1
vertexStore.toSignal.process(executeSignalOperationOfVertex(_))
messageBus.flush
messageBusFlushed = true
vertexStore.toCollect.isEmpty
}
def collectStep: Boolean = {
counters.collectSteps += 1
vertexStore.toCollect.process(executeCollectOperationOfVertex(_))
vertexStore.toSignal.isEmpty
}
override def addVertex(vertex: Vertex[Id, _]) {
if (vertexStore.vertices.put(vertex)) {
counters.verticesAdded += 1
counters.outgoingEdgesAdded += vertex.edgeCount
vertex.afterInitialization(vertexGraphEditor)
messageBusFlushed = false
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
} else {
val existing = vertexStore.vertices.get(vertex.id)
}
}
override def addEdge(sourceId: Id, edge: Edge[Id]) {
val vertex = vertexStore.vertices.get(sourceId)
if (vertex != null) {
if (vertex.addEdge(edge, vertexGraphEditor)) {
counters.outgoingEdgesAdded += 1
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
}
} else {
log.warning("Did not find vertex with id " + sourceId + " when trying to add outgoing edge (" + sourceId + ", " + edge.targetId + ")")
}
}
override def removeEdge(edgeId: EdgeId[Id]) {
val vertex = vertexStore.vertices.get(edgeId.sourceId)
if (vertex != null) {
if (vertex.removeEdge(edgeId.targetId, vertexGraphEditor)) {
counters.outgoingEdgesRemoved += 1
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
} else {
log.warning("Outgoing edge not found when trying to remove edge with id " + edgeId)
}
} else {
log.warning("Source vertex not found found when trying to remove outgoing edge with id " + edgeId)
}
}
override def removeVertex(vertexId: Id) {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
processRemoveVertex(vertex)
} else {
log.warning("Should remove vertex with id " + vertexId + ": could not find this vertex.")
}
}
protected def processRemoveVertex(vertex: Vertex[Id, _]) {
val edgesRemoved = vertex.removeAllEdges(vertexGraphEditor)
counters.outgoingEdgesRemoved += edgesRemoved
counters.verticesRemoved += 1
vertex.beforeRemoval(vertexGraphEditor)
vertexStore.vertices.remove(vertex.id)
vertexStore.toCollect.remove(vertex.id)
vertexStore.toSignal.remove(vertex.id)
}
def modifyGraph(graphModification: GraphEditor[Id, Signal] => Unit, vertexIdHint: Option[Id]) {
graphModification(graphEditor)
messageBusFlushed = false
}
def loadGraph(graphModifications: Iterator[GraphEditor[Id, Signal] => Unit], vertexIdHint: Option[Id]) {
pendingModifications = pendingModifications ++ graphModifications
}
def setUndeliverableSignalHandler(h: (Signal, Id, Option[Id], GraphEditor[Id, Signal]) => Unit) {
undeliverableSignalHandler = h
}
def setSignalThreshold(st: Double) {
signalThreshold = st
}
def setCollectThreshold(ct: Double) {
collectThreshold = ct
}
def recalculateScores {
vertexStore.vertices.foreach(recalculateVertexScores(_))
}
def recalculateScoresForVertexWithId(vertexId: Id) {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
recalculateVertexScores(vertex)
}
}
protected def recalculateVertexScores(vertex: Vertex[Id, _]) {
if (vertex.scoreCollect > collectThreshold) {
vertexStore.toCollect.put(vertex)
}
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
}
override def forVertexWithId[VertexType <: Vertex[Id, _], ResultType](vertexId: Id, f: VertexType => ResultType): ResultType = {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
val result = f(vertex.asInstanceOf[VertexType])
result
} else {
throw new Exception("Vertex with id " + vertexId + " not found.")
}
}
override def foreachVertex(f: Vertex[Id, _] => Unit) {
vertexStore.vertices.foreach(f)
}
override def foreachVertexWithGraphEditor(f: GraphEditor[Id, Signal] => Vertex[Id, _] => Unit) {
vertexStore.vertices.foreach(f(graphEditor))
messageBusFlushed = false
}
override def aggregateOnWorker[WorkerResult](aggregationOperation: ComplexAggregation[WorkerResult, _]): WorkerResult = {
aggregationOperation.aggregationOnWorker(vertexStore.vertices.stream)
}
override def aggregateAll[WorkerResult, EndResult](aggregationOperation: ComplexAggregation[WorkerResult, EndResult]): EndResult = {
throw new UnsupportedOperationException("AkkaWorker does not support this operation.")
}
/**
* Creates a snapshot of all the vertices in all workers.
* Does not store the toSignal/toCollect collections or pending messages.
* Should only be used when the workers are idle.
* Overwrites any previous snapshot that might exist.
*/
override def snapshot {
// Overwrites previous file if it should exist.
val snapshotFileOutput = new DataOutputStream(new FileOutputStream(s"$workerId.snapshot"))
vertexStore.vertices.foreach { vertex =>
val bytes = DefaultSerializer.write(vertex)
snapshotFileOutput.writeInt(bytes.length)
snapshotFileOutput.write(bytes)
}
snapshotFileOutput.close
}
/**
* Restores the last snapshot of all the vertices in all workers.
* Does not store the toSignal/toCollect collections or pending messages.
* Should only be used when the workers are idle.
*/
override def restore {
reset
val maxSerializedSize = 64768
val snapshotFile = new File(s"$workerId.snapshot")
val buffer = new Array[Byte](maxSerializedSize)
if (snapshotFile.exists) {
val snapshotFileInput = new DataInputStream(new FileInputStream(snapshotFile))
val buffer = new Array[Byte](maxSerializedSize)
while (snapshotFileInput.available > 0) {
val serializedLength = snapshotFileInput.readInt
assert(serializedLength <= maxSerializedSize)
val bytesRead = snapshotFileInput.read(buffer, 0, serializedLength)
assert(bytesRead == serializedLength)
val vertex = DefaultSerializer.read[Vertex[Id, _]](buffer)
addVertex(vertex)
}
snapshotFileInput.close
}
}
/**
* Deletes the worker snapshots if they exist.
*/
def deleteSnapshot {
val snapshotFile = new File(s"$workerId.snapshot")
if (snapshotFile.exists) {
snapshotFile.delete
}
}
def getWorkerStatus: WorkerStatus = {
WorkerStatus(
workerId = workerId,
isIdle = isIdle,
isPaused = isPaused,
messagesSent = SentMessagesStats(
messageBus.messagesSentToWorkers,
messageBus.messagesSentToNodes,
messageBus.messagesSentToCoordinator + 1, // +1 to account for the status message itself.
messageBus.messagesSentToOthers),
messagesReceived = counters.messagesReceived)
}
def getIndividualWorkerStatistics = List(getWorkerStatistics)
def getWorkerStatistics: WorkerStatistics = {
WorkerStatistics(
workerId = workerId,
toSignalSize = vertexStore.toSignal.size,
toCollectSize = vertexStore.toCollect.size,
collectOperationsExecuted = counters.collectOperationsExecuted,
signalOperationsExecuted = counters.signalOperationsExecuted,
numberOfVertices = vertexStore.vertices.size,
verticesAdded = counters.verticesAdded,
verticesRemoved = counters.verticesRemoved,
numberOfOutgoingEdges = counters.outgoingEdgesAdded - counters.outgoingEdgesRemoved, //only valid if no edges are removed during execution
outgoingEdgesAdded = counters.outgoingEdgesAdded,
outgoingEdgesRemoved = counters.outgoingEdgesRemoved,
heartbeatMessagesReceived = counters.heartbeatMessagesReceived,
signalMessagesReceived = counters.signalMessagesReceived,
bulkSignalMessagesReceived = counters.bulkSignalMessagesReceived,
continueMessagesReceived = counters.continueMessagesReceived,
requestMessagesReceived = counters.requestMessagesReceived,
otherMessagesReceived = counters.otherMessagesReceived,
messagesSentToWorkers = messageBus.messagesSentToWorkers.sum,
messagesSentToNodes = messageBus.messagesSentToNodes.sum,
messagesSentToCoordinator = messageBus.messagesSentToCoordinator,
messagesSentToOthers = messageBus.messagesSentToOthers)
}
def getIndividualNodeStatistics = List(getNodeStatistics)
def getNodeStatistics: NodeStatistics = {
val osBean: OperatingSystemMXBean = ManagementFactory.getPlatformMXBean(classOf[OperatingSystemMXBean]);
val runtime: Runtime = Runtime.getRuntime()
NodeStatistics(
nodeId = workerId,
os = System.getProperty("os.name"),
runtime_mem_total = runtime.totalMemory(),
runtime_mem_max = runtime.maxMemory(),
runtime_mem_free = runtime.freeMemory(),
runtime_cores = runtime.availableProcessors(),
jmx_committed_vms = osBean.getCommittedVirtualMemorySize(),
jmx_mem_free = osBean.getFreePhysicalMemorySize(),
jmx_mem_total = osBean.getTotalPhysicalMemorySize(),
jmx_swap_free = osBean.getFreeSwapSpaceSize(),
jmx_swap_total = osBean.getTotalSwapSpaceSize(),
jmx_process_load = osBean.getProcessCpuLoad(),
jmx_process_time = osBean.getProcessCpuTime(),
jmx_system_load = osBean.getSystemCpuLoad())
}
protected def logIntialization {
if (messageBus.isInitialized) {
val msg = s"Worker $workerId has a fully initialized message bus."
//println(msg)
log.debug(msg)
sendStatusToCoordinator
}
}
def registerWorker(otherWorkerId: Int, worker: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerWorker(otherWorkerId, worker)
logIntialization
}
def registerNode(nodeId: Int, node: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerNode(nodeId, node)
logIntialization
}
def registerCoordinator(coordinator: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerCoordinator(coordinator)
logIntialization
}
def registerLogger(logger: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerLogger(logger)
logIntialization
}
}
| gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/worker/WorkerImplementation.scala | Scala | apache-2.0 | 16,717 |
/** Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package djinni
import djinni.ast.Record.DerivingType
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
import djinni.writer.IndentWriter
import scala.collection.mutable
class ObjcGenerator(spec: Spec) extends BaseObjcGenerator(spec) {
class ObjcRefs() {
var body = mutable.TreeSet[String]()
var header = mutable.TreeSet[String]()
def find(ty: TypeRef) { find(ty.resolved) }
def find(tm: MExpr) {
tm.args.foreach(find)
find(tm.base)
}
def find(m: Meta) = for (r <- marshal.references(m)) r match {
case ImportRef(arg) => header.add("#import " + arg)
case DeclRef(decl, _) => header.add(decl)
}
}
override def generateEnum(origin: String, ident: Ident, doc: Doc, e: Enum) {
val refs = new ObjcRefs()
refs.header.add("#import <Foundation/Foundation.h>")
val self = marshal.typename(ident, e)
writeObjcFile(
ident,
isHeader = true,
origin,
refs.header,
w => {
writeDoc(w, doc)
w.wl(if (e.flags) {
s"typedef NS_OPTIONS(NSUInteger, $self)"
} else {
if (spec.objcClosedEnums) {
s"typedef NS_CLOSED_ENUM(NSInteger, $self)"
} else {
s"typedef NS_ENUM(NSInteger, $self)"
}
})
w.bracedSemi {
writeEnumOptionNone(w, e, self + idObjc.enum(_))
writeEnumOptions(w, e, self + idObjc.enum(_))
writeEnumOptionAll(w, e, self + idObjc.enum(_))
}
}
)
}
def bodyName(ident: String): String = idObjc.ty(
ident
) + "." + spec.objcppExt // Must be a Obj-C++ file in case the constants are not compile-time constant expressions
def writeObjcConstMethDecl(c: Const, w: IndentWriter) {
val label = "+"
val nullability = marshal.nullability(c.ty.resolved).fold("")(" __" + _)
val ret = marshal.fqFieldType(c.ty) + nullability
val decl = s"$label ($ret)${idObjc.method(c.ident)}"
writeAlignedObjcCall(w, decl, List(), ";", p => ("", ""))
}
/** Generate Interface
*/
override def generateInterface(
origin: String,
ident: Ident,
doc: Doc,
typeParams: Seq[TypeParam],
i: Interface
) {
val refs = new ObjcRefs()
i.methods.map(m => {
m.params.map(p => refs.find(p.ty))
m.ret.foreach(refs.find)
})
i.consts.map(c => {
refs.find(c.ty)
})
val self = marshal.typename(ident, i)
refs.header.add("#import <Foundation/Foundation.h>")
def writeObjcFuncDecl(method: Interface.Method, w: IndentWriter) {
val label = if (method.static) "+" else "-"
val ret = marshal.returnType(method.ret)
val decl = s"$label ($ret)${idObjc.method(method.ident)}"
writeAlignedObjcCall(
w,
decl,
method.params,
"",
p =>
(
idObjc.field(p.ident),
s"(${marshal.paramType(p.ty)})${idObjc.local(p.ident)}"
)
)
}
// Generate the header file for Interface
writeObjcFile(
ident,
isHeader = true,
origin,
refs.header,
w => {
for (c <- i.consts if marshal.canBeConstVariable(c)) {
writeDoc(w, c.doc)
w.w(s"extern ")
writeObjcConstVariableDecl(w, c, self)
w.wl(s";")
}
w.wl
writeDoc(w, doc)
if (i.ext.objc) w.wl(s"@protocol $self <NSObject>")
else w.wl(s"@interface $self : NSObject")
for (m <- i.methods) {
w.wl
writeMethodDoc(w, m, idObjc.local)
writeObjcFuncDecl(m, w)
w.wl(";")
}
for (c <- i.consts if !marshal.canBeConstVariable(c)) {
w.wl
writeDoc(w, c.doc)
writeObjcConstMethDecl(c, w)
}
w.wl
w.wl("@end")
}
)
// Generate the implementation file for Interface
if (i.consts.nonEmpty) {
refs.body.add(
"#import " + q(spec.objcIncludePrefix + marshal.headerName(ident))
)
writeObjcFile(
ident.name,
isHeader = false,
origin,
refs.body,
w => {
generateObjcConstants(
w,
i.consts,
self,
ObjcConstantType.ConstVariable
)
}
)
// For constants implemented via Methods, we generate their definitions in the
// corresponding ObjcCpp file (i.e.: `ClassName`+Private.mm)
}
}
override def generateRecord(
origin: String,
ident: Ident,
doc: Doc,
params: Seq[TypeParam],
r: Record
) {
val refs = new ObjcRefs()
for (c <- r.consts)
refs.find(c.ty)
for (f <- r.fields)
refs.find(f.ty)
val objcName = ident.name + (if (r.ext.objc) "_base" else "")
val noBaseSelf = marshal.typename(ident, r) // Used for constant names
val self = marshal.typename(objcName, r)
refs.header.add("#import <Foundation/Foundation.h>")
refs.body.add(
"!#import " + q(
(if (r.ext.objc) spec.objcExtendedRecordIncludePrefix
else spec.objcIncludePrefix) + marshal.headerName(ident)
)
)
if (r.ext.objc) {
refs.header.add(s"@class $noBaseSelf;")
}
def checkMutable(tm: MExpr): Boolean = tm.base match {
case MOptional => checkMutable(tm.args.head)
case MString => true
case MList => true
case MSet => true
case MMap => true
case MBinary => true
case _ => false
}
val firstInitializerArg =
if (r.fields.isEmpty) ""
else IdentStyle.camelUpper("with_" + r.fields.head.ident.name)
// Generate the header file for record
writeObjcFile(
objcName,
isHeader = true,
origin,
refs.header,
w => {
writeDoc(w, doc)
w.wl(s"@interface $self : NSObject")
def writeInitializer(sign: String, prefix: String) {
val decl = s"$sign (nonnull instancetype)$prefix$firstInitializerArg"
writeAlignedObjcCall(
w,
decl,
r.fields,
"",
f =>
(
idObjc.field(f.ident),
s"(${marshal.paramType(f.ty)})${idObjc.local(f.ident)}"
)
)
w.wl(";")
}
writeInitializer("-", "init")
if (!r.ext.objc) writeInitializer("+", IdentStyle.camelLower(objcName))
for (c <- r.consts if !marshal.canBeConstVariable(c)) {
w.wl
writeDoc(w, c.doc)
writeObjcConstMethDecl(c, w)
}
for (f <- r.fields) {
w.wl
writeDoc(w, f.doc)
val nullability =
marshal.nullability(f.ty.resolved).fold("")(", " + _)
w.wl(s"@property (nonatomic, readonly${nullability}) ${marshal
.fqFieldType(f.ty)} ${idObjc.field(f.ident)};")
}
if (r.derivingTypes.contains(DerivingType.Ord)) {
w.wl
w.wl(s"- (NSComparisonResult)compare:(nonnull $self *)other;")
}
w.wl
w.wl("@end")
// Constants come last in case one of them is of the record's type
if (r.consts.nonEmpty) {
w.wl
for (c <- r.consts if marshal.canBeConstVariable(c)) {
writeDoc(w, c.doc)
w.w(s"extern ")
writeObjcConstVariableDecl(w, c, noBaseSelf)
w.wl(s";")
}
}
}
)
// Generate the implementation file for record
writeObjcFile(
objcName,
isHeader = false,
origin,
refs.body,
w => {
if (r.consts.nonEmpty)
generateObjcConstants(
w,
r.consts,
noBaseSelf,
ObjcConstantType.ConstVariable
)
w.wl
w.wl(s"@implementation $self")
w.wl
// Constructor from all fields (not copying)
val init = s"- (nonnull instancetype)init$firstInitializerArg"
writeAlignedObjcCall(
w,
init,
r.fields,
"",
f =>
(
idObjc.field(f.ident),
s"(${marshal.paramType(f.ty)})${idObjc.local(f.ident)}"
)
)
w.wl
w.braced {
w.w("if (self = [super init])").braced {
for (f <- r.fields) {
if (checkMutable(f.ty.resolved))
w.wl(
s"_${idObjc.field(f.ident)} = [${idObjc.local(f.ident)} copy];"
)
else
w.wl(s"_${idObjc.field(f.ident)} = ${idObjc.local(f.ident)};")
}
}
w.wl("return self;")
}
w.wl
// Convenience initializer
if (!r.ext.objc) {
val decl =
s"+ (nonnull instancetype)${IdentStyle.camelLower(objcName)}$firstInitializerArg"
writeAlignedObjcCall(
w,
decl,
r.fields,
"",
f =>
(
idObjc.field(f.ident),
s"(${marshal.paramType(f.ty)})${idObjc.local(f.ident)}"
)
)
w.wl
w.braced {
val call = s"return [($self*)[self alloc] init$firstInitializerArg"
writeAlignedObjcCall(
w,
call,
r.fields,
"",
f => (idObjc.field(f.ident), s"${idObjc.local(f.ident)}")
)
w.wl("];")
}
w.wl
}
if (r.consts.nonEmpty)
generateObjcConstants(
w,
r.consts,
noBaseSelf,
ObjcConstantType.ConstMethod
)
if (r.derivingTypes.contains(DerivingType.Eq)) {
w.wl("- (BOOL)isEqual:(id)other")
w.braced {
w.w(s"if (![other isKindOfClass:[$self class]])").braced {
w.wl("return NO;")
}
w.wl(s"$self *typedOther = ($self *)other;")
val skipFirst = SkipFirst()
w.w(s"return ").nestedN(2) {
for (f <- r.fields) {
skipFirst { w.wl(" &&") }
f.ty.resolved.base match {
case MBinary =>
w.w(
s"[self.${idObjc.field(f.ident)} isEqualToData:typedOther.${idObjc
.field(f.ident)}]"
)
case MList =>
w.w(s"[self.${idObjc.field(f.ident)} isEqualToArray:typedOther.${idObjc
.field(f.ident)}]")
case MSet =>
w.w(
s"[self.${idObjc.field(f.ident)} isEqualToSet:typedOther.${idObjc
.field(f.ident)}]"
)
case MMap =>
w.w(s"[self.${idObjc.field(f.ident)} isEqualToDictionary:typedOther.${idObjc
.field(f.ident)}]")
case MOptional =>
f.ty.resolved.args.head.base match {
case df: MDef if df.defType == DEnum =>
w.w(
s"self.${idObjc.field(f.ident)} == typedOther.${idObjc
.field(f.ident)}"
)
case _ =>
w.w(s"((self.${idObjc.field(f.ident)} == nil && typedOther.${idObjc
.field(f.ident)} == nil) || ")
w.w(s"(self.${idObjc.field(f.ident)} != nil && [self.${idObjc
.field(f.ident)} isEqual:typedOther.${idObjc.field(f.ident)}]))")
}
case MString =>
w.w(s"[self.${idObjc.field(f.ident)} isEqualToString:typedOther.${idObjc
.field(f.ident)}]")
case MDate =>
w.w(
s"[self.${idObjc.field(f.ident)} isEqualToDate:typedOther.${idObjc
.field(f.ident)}]"
)
case t: MPrimitive =>
w.w(s"self.${idObjc.field(f.ident)} == typedOther.${idObjc
.field(f.ident)}")
case df: MDef =>
df.defType match {
case DRecord =>
w.w(
s"[self.${idObjc.field(f.ident)} isEqual:typedOther.${idObjc
.field(f.ident)}]"
)
case DEnum =>
w.w(
s"self.${idObjc.field(f.ident)} == typedOther.${idObjc
.field(f.ident)}"
)
case _ => throw new AssertionError("Unreachable")
}
case e: MExtern =>
e.defType match {
case DRecord =>
if (e.objc.pointer.get) {
w.w(
s"[self.${idObjc.field(f.ident)} isEqual:typedOther.${idObjc
.field(f.ident)}]"
)
} else {
w.w(s"self.${idObjc.field(f.ident)} == typedOther.${idObjc
.field(f.ident)}")
}
case DEnum =>
w.w(
s"self.${idObjc.field(f.ident)} == typedOther.${idObjc
.field(f.ident)}"
)
case _ => throw new AssertionError("Unreachable")
}
case _ => throw new AssertionError("Unreachable")
}
}
}
w.wl(";")
}
w.wl
w.wl("- (NSUInteger)hash")
w.braced {
w.w(s"return ").nestedN(2) {
w.w(s"NSStringFromClass([self class]).hash")
for (f <- r.fields) {
w.wl(" ^")
f.ty.resolved.base match {
case MOptional =>
f.ty.resolved.args.head.base match {
case df: MDef if df.defType == DEnum =>
w.w(s"(NSUInteger)self.${idObjc.field(f.ident)}")
case _ => w.w(s"self.${idObjc.field(f.ident)}.hash")
}
case t: MPrimitive =>
w.w(s"(NSUInteger)self.${idObjc.field(f.ident)}")
case df: MDef =>
df.defType match {
case DEnum =>
w.w(s"(NSUInteger)self.${idObjc.field(f.ident)}")
case _ => w.w(s"self.${idObjc.field(f.ident)}.hash")
}
case e: MExtern =>
e.defType match {
case DEnum =>
w.w(s"(NSUInteger)self.${idObjc.field(f.ident)}")
case DRecord =>
w.w(
"(" + e.objc.hash.get
.format("self." + idObjc.field(f.ident)) + ")"
)
case _ => throw new AssertionError("Unreachable")
}
case _ => w.w(s"self.${idObjc.field(f.ident)}.hash")
}
}
}
w.wl(";")
}
w.wl
}
def generatePrimitiveOrder(ident: Ident, w: IndentWriter): Unit = {
w.wl(
s"if (self.${idObjc.field(ident)} < other.${idObjc.field(ident)}) {"
).nested {
w.wl(s"tempResult = NSOrderedAscending;")
}
w.wl(
s"} else if (self.${idObjc.field(ident)} > other.${idObjc.field(ident)}) {"
).nested {
w.wl(s"tempResult = NSOrderedDescending;")
}
w.wl(s"} else {").nested {
w.wl(s"tempResult = NSOrderedSame;")
}
w.wl("}")
}
if (r.derivingTypes.contains(DerivingType.Ord)) {
w.wl(s"- (NSComparisonResult)compare:($self *)other")
w.braced {
w.wl("NSComparisonResult tempResult;")
for (f <- r.fields) {
f.ty.resolved.base match {
case MString | MDate =>
w.wl(s"tempResult = [self.${idObjc
.field(f.ident)} compare:other.${idObjc.field(f.ident)}];")
case t: MPrimitive => generatePrimitiveOrder(f.ident, w)
case df: MDef =>
df.defType match {
case DRecord =>
w.wl(s"tempResult = [self.${idObjc
.field(f.ident)} compare:other.${idObjc.field(f.ident)}];")
case DEnum => generatePrimitiveOrder(f.ident, w)
case _ => throw new AssertionError("Unreachable")
}
case e: MExtern =>
e.defType match {
case DRecord =>
if (e.objc.pointer.get) w.wl(s"tempResult = [self.${idObjc
.field(f.ident)} compare:other.${idObjc.field(f.ident)}];")
else generatePrimitiveOrder(f.ident, w)
case DEnum => generatePrimitiveOrder(f.ident, w)
case _ => throw new AssertionError("Unreachable")
}
case _ => throw new AssertionError("Unreachable")
}
w.w("if (tempResult != NSOrderedSame)").braced {
w.wl("return tempResult;")
}
}
w.wl("return NSOrderedSame;")
}
w.wl
}
w.wl("- (NSString *)description")
w.braced {
w.w(s"return ").nestedN(2) {
w.w("[NSString stringWithFormat:@\\"<%@ %p")
for (f <- r.fields) w.w(s" ${idObjc.field(f.ident)}:%@")
w.w(">\\", self.class, (void *)self")
for (f <- r.fields) {
w.w(", ")
f.ty.resolved.base match {
case MOptional => w.w(s"self.${idObjc.field(f.ident)}")
case t: MPrimitive => w.w(s"@(self.${idObjc.field(f.ident)})")
case df: MDef =>
df.defType match {
case DEnum => w.w(s"@(self.${idObjc.field(f.ident)})")
case _ => w.w(s"self.${idObjc.field(f.ident)}")
}
case e: MExtern =>
if (e.objc.pointer.get) {
w.w(s"self.${idObjc.field(f.ident)}")
} else {
w.w(s"@(self.${idObjc.field(f.ident)})")
}
case _ => w.w(s"self.${idObjc.field(f.ident)}")
}
}
}
w.wl("];")
}
w.wl
w.wl("@end")
}
)
}
def writeObjcFile(
objcName: String,
isHeader: Boolean,
origin: String,
refs: Iterable[String],
f: IndentWriter => Unit
) {
val folder = if (isHeader) spec.objcHeaderOutFolder else spec.objcOutFolder
val fileName =
if (isHeader) marshal.headerName(objcName) else bodyName(objcName)
createFile(
folder.get,
fileName,
(w: IndentWriter) => {
w.wl("// AUTOGENERATED FILE - DO NOT MODIFY!")
w.wl("// This file was generated by Djinni from " + origin)
w.wl
if (refs.nonEmpty) {
// Ignore the ! in front of each line; used to put own headers to the top
// according to Objective-C style guide
refs.foreach(s => w.wl(if (s.charAt(0) == '!') s.substring(1) else s))
w.wl
}
f(w)
}
)
}
}
| cross-language-cpp/djinni-generator | src/main/scala/djinni/ObjcGenerator.scala | Scala | apache-2.0 | 20,478 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.concurrent.TimeLimits
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.storage.StorageLevel
class DatasetCacheSuite extends QueryTest with SharedSparkSession with TimeLimits {
import testImplicits._
/**
* Asserts that a cached [[Dataset]] will be built using the given number of other cached results.
*/
private def assertCacheDependency(df: DataFrame, numOfCachesDependedUpon: Int = 1): Unit = {
val plan = df.queryExecution.withCachedData
assert(plan.isInstanceOf[InMemoryRelation])
val internalPlan = plan.asInstanceOf[InMemoryRelation].cacheBuilder.cachedPlan
assert(internalPlan.find(_.isInstanceOf[InMemoryTableScanExec]).size == numOfCachesDependedUpon)
}
test("get storage level") {
val ds1 = Seq("1", "2").toDS().as("a")
val ds2 = Seq(2, 3).toDS().as("b")
// default storage level
ds1.persist()
ds2.cache()
assert(ds1.storageLevel == StorageLevel.MEMORY_AND_DISK)
assert(ds2.storageLevel == StorageLevel.MEMORY_AND_DISK)
// unpersist
ds1.unpersist(blocking = true)
assert(ds1.storageLevel == StorageLevel.NONE)
// non-default storage level
ds1.persist(StorageLevel.MEMORY_ONLY_2)
assert(ds1.storageLevel == StorageLevel.MEMORY_ONLY_2)
// joined Dataset should not be persisted
val joined = ds1.joinWith(ds2, $"a.value" === $"b.value")
assert(joined.storageLevel == StorageLevel.NONE)
}
test("persist and unpersist") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val cached = ds.cache()
// count triggers the caching action. It should not throw.
cached.count()
// Make sure, the Dataset is indeed cached.
assertCached(cached)
// Check result.
checkDataset(
cached,
2, 3, 4)
// Drop the cache.
cached.unpersist(blocking = true)
assert(cached.storageLevel == StorageLevel.NONE, "The Dataset should not be cached.")
}
test("persist and then rebind right encoder when join 2 datasets") {
val ds1 = Seq("1", "2").toDS().as("a")
val ds2 = Seq(2, 3).toDS().as("b")
ds1.persist()
assertCached(ds1)
ds2.persist()
assertCached(ds2)
val joined = ds1.joinWith(ds2, $"a.value" === $"b.value")
checkDataset(joined, ("2", 2))
assertCached(joined, 2)
ds1.unpersist(blocking = true)
assert(ds1.storageLevel == StorageLevel.NONE, "The Dataset ds1 should not be cached.")
ds2.unpersist(blocking = true)
assert(ds2.storageLevel == StorageLevel.NONE, "The Dataset ds2 should not be cached.")
}
test("persist and then groupBy columns asKey, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupByKey(_._1)
val agged = grouped.mapGroups { (g, iter) => (g, iter.map(_._2).sum) }
agged.persist()
checkDataset(
agged.filter(_._1 == "b"),
("b", 3))
assertCached(agged.filter(_._1 == "b"))
ds.unpersist(blocking = true)
assert(ds.storageLevel == StorageLevel.NONE, "The Dataset ds should not be cached.")
agged.unpersist(blocking = true)
assert(agged.storageLevel == StorageLevel.NONE, "The Dataset agged should not be cached.")
}
test("persist and then withColumn") {
val df = Seq(("test", 1)).toDF("s", "i")
val df2 = df.withColumn("newColumn", lit(1))
df.cache()
assertCached(df)
assertCached(df2)
df.count()
assertCached(df2)
df.unpersist(blocking = true)
assert(df.storageLevel == StorageLevel.NONE)
}
test("cache UDF result correctly") {
val expensiveUDF = udf({x: Int => Thread.sleep(2000); x})
val df = spark.range(0, 2).toDF("a").repartition(1).withColumn("b", expensiveUDF($"a"))
val df2 = df.agg(sum(df("b")))
df.cache()
df.count()
assertCached(df2)
// udf has been evaluated during caching, and thus should not be re-evaluated here
failAfter(2.seconds) {
df2.collect()
}
df.unpersist(blocking = true)
assert(df.storageLevel == StorageLevel.NONE)
}
test("SPARK-24613 Cache with UDF could not be matched with subsequent dependent caches") {
val udf1 = udf({x: Int => x + 1})
val df = spark.range(0, 10).toDF("a").withColumn("b", udf1($"a"))
val df2 = df.agg(sum(df("b")))
df.cache()
df.count()
df2.cache()
assertCacheDependency(df2)
}
test("SPARK-24596 Non-cascading Cache Invalidation") {
val df = Seq(("a", 1), ("b", 2)).toDF("s", "i")
val df2 = df.filter($"i" > 1)
val df3 = df.filter($"i" < 2)
df2.cache()
df.cache()
df.count()
df3.cache()
df.unpersist(blocking = true)
// df un-cached; df2 and df3's cache plan re-compiled
assert(df.storageLevel == StorageLevel.NONE)
assertCacheDependency(df2, 0)
assertCacheDependency(df3, 0)
}
test("SPARK-24596 Non-cascading Cache Invalidation - verify cached data reuse") {
val expensiveUDF = udf({ x: Int => Thread.sleep(5000); x })
val df = spark.range(0, 5).toDF("a")
val df1 = df.withColumn("b", expensiveUDF($"a"))
val df2 = df1.groupBy($"a").agg(sum($"b"))
val df3 = df.agg(sum($"a"))
df1.cache()
df2.cache()
df2.collect()
df3.cache()
assertCacheDependency(df2)
df1.unpersist(blocking = true)
// df1 un-cached; df2's cache plan stays the same
assert(df1.storageLevel == StorageLevel.NONE)
assertCacheDependency(df1.groupBy($"a").agg(sum($"b")))
val df4 = df1.groupBy($"a").agg(sum($"b")).agg(sum("sum(b)"))
assertCached(df4)
// reuse loaded cache
failAfter(3.seconds) {
checkDataset(df4, Row(10))
}
val df5 = df.agg(sum($"a")).filter($"sum(a)" > 1)
assertCached(df5)
// first time use, load cache
checkDataset(df5, Row(10))
}
test("SPARK-26708 Cache data and cached plan should stay consistent") {
val df = spark.range(0, 5).toDF("a")
val df1 = df.withColumn("b", $"a" + 1)
val df2 = df.filter($"a" > 1)
df.cache()
// Add df1 to the CacheManager; the buffer is currently empty.
df1.cache()
// After calling collect(), df1's buffer has been loaded.
df1.collect()
// Add df2 to the CacheManager; the buffer is currently empty.
df2.cache()
// Verify that df1 is a InMemoryRelation plan with dependency on another cached plan.
assertCacheDependency(df1)
val df1InnerPlan = df1.queryExecution.withCachedData
.asInstanceOf[InMemoryRelation].cacheBuilder.cachedPlan
// Verify that df2 is a InMemoryRelation plan with dependency on another cached plan.
assertCacheDependency(df2)
df.unpersist(blocking = true)
// Verify that df1's cache has stayed the same, since df1's cache already has data
// before df.unpersist().
val df1Limit = df1.limit(2)
val df1LimitInnerPlan = df1Limit.queryExecution.withCachedData.collectFirst {
case i: InMemoryRelation => i.cacheBuilder.cachedPlan
}
assert(df1LimitInnerPlan.isDefined && df1LimitInnerPlan.get == df1InnerPlan)
// Verify that df2's cache has been re-cached, with a new physical plan rid of dependency
// on df, since df2's cache had not been loaded before df.unpersist().
val df2Limit = df2.limit(2)
val df2LimitInnerPlan = df2Limit.queryExecution.withCachedData.collectFirst {
case i: InMemoryRelation => i.cacheBuilder.cachedPlan
}
assert(df2LimitInnerPlan.isDefined &&
df2LimitInnerPlan.get.find(_.isInstanceOf[InMemoryTableScanExec]).isEmpty)
}
test("SPARK-27739 Save stats from optimized plan") {
withTable("a") {
spark.range(4)
.selectExpr("id", "id % 2 AS p")
.write
.partitionBy("p")
.saveAsTable("a")
val df = sql("SELECT * FROM a WHERE p = 0")
df.cache()
df.count()
df.queryExecution.withCachedData match {
case i: InMemoryRelation =>
// Optimized plan has non-default size in bytes
assert(i.statsOfPlanToCache.sizeInBytes !==
df.sparkSession.sessionState.conf.defaultSizeInBytes)
}
}
}
}
| jkbradley/spark | sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala | Scala | apache-2.0 | 9,064 |
/*
*
* * Copyright 2016 HM Revenue & Customs
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*
*/
package addressbase
import uk.gov.hmrc.address.osgb.Document
object OSClassification {
val RecordId = "32"
val idx = OSClassificationIdx(
uprn = 3,
code = 5,
scheme = 6,
version = 7)
def apply(csv: Array[String]): OSClassification = {
val scheme = KnownClassificationSchemes.values.find(s => s.scheme == csv(idx.scheme) && s.version == csv(idx.version))
OSClassification(
csv(idx.uprn).toLong,
csv(idx.code),
scheme.map(_.ordinal))
}
}
case class OSClassificationIdx(uprn: Int,
code: Int,
scheme: Int,
version: Int)
case class OSClassification(uprn: Long,
code: String,
scheme: Option[Int]) extends Document {
// For use as input to MongoDbObject (hence it's not a Map)
def tupled: List[(String, Any)] = List(
"uprn" -> uprn,
"code" -> code,
"scheme" -> scheme)
def normalise: OSClassification = this
}
| andywhardy/address-reputation-ingester | app/addressbase/OSClassification.scala | Scala | apache-2.0 | 1,682 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.freespec.getfixture
import org.scalatest.FreeSpec
import collection.mutable.ListBuffer
class ExampleSpec extends FreeSpec {
def fixture =
new {
val builder = new StringBuilder("ScalaTest is ")
val buffer = new ListBuffer[String]
}
"Testing" - {
"should be easy" in {
val f = fixture
f.builder.append("easy!")
assert(f.builder.toString === "ScalaTest is easy!")
assert(f.buffer.isEmpty)
f.buffer += "sweet"
}
"should be fun" in {
val f = fixture
f.builder.append("fun!")
assert(f.builder.toString === "ScalaTest is fun!")
assert(f.buffer.isEmpty)
}
}
}
| travisbrown/scalatest | examples/src/main/scala/org/scalatest/examples/freespec/getfixture/ExampleSpec.scala | Scala | apache-2.0 | 1,290 |
//
// Statistics.scala -- Scala object Statistics
// Project OrcScala
//
// Created by amp on Aug 03, 2017.
//
// Copyright (c) 2017 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.ast.porc
import orc.util.AutoFileDataOutput
import java.util.logging.Level
object Statistics {
/** Collect the direct nodes in `ast`.
*
* @return the set of direct nodes and the set of nested expressions.
*/
def collectDirect(ast: PorcAST.Z): (Set[PorcAST.Z], Set[PorcAST.Z]) = {
val nested: Set[PorcAST.Z] = ast match {
case Continuation.Z(args, body) => Set(ast)
case Method.Z(name, isRoutine, args, body) => Set(ast)
case _ => Set()
}
val direct = (ast.subtrees.toSet) -- nested.flatMap(_.subtrees)
val (directs, nesteds) = (direct.map(collectDirect) + ((direct, nested))).unzip
(directs.flatten + ast, nesteds.flatten)
}
/** Collect the direct nodes in every closure body in `ast`.
*
*/
def collect(ast: PorcAST.Z): Map[PorcAST.Z, Set[PorcAST.Z]] = {
val (direct, nested) = collectDirect(ast)
val nestedMaps = nested.flatMap(a => collect(a.subtrees.head))
(nestedMaps + (ast -> direct)).toMap
}
val tags = Set(
classOf[HaltToken] -> 'ExecutionControl,
classOf[NewToken] -> 'ExecutionControl,
classOf[NewCounter] -> 'ExecutionControl,
classOf[NewTerminator] -> 'ExecutionControl,
classOf[CheckKilled] -> 'ExecutionControl,
classOf[Kill] -> 'ExecutionControl,
classOf[GetField] -> 'DataFlow,
classOf[GetMethod] -> 'DataFlow,
classOf[Force] -> 'DataFlow,
classOf[Bind] -> 'DataFlow,
classOf[BindStop] -> 'DataFlow,
classOf[CallContinuation] -> 'DataFlow,
classOf[New] -> 'Computation,
classOf[MethodCPSCall] -> 'Computation,
classOf[MethodDirectCall] -> 'Computation,
classOf[Let] -> 'Free,
classOf[Argument] -> 'Free,
classOf[Sequence] -> 'Free,
classOf[PorcAST] -> 'All,
)
def count(asts: Set[PorcAST.Z]): Map[Symbol, Int] = {
val foundTags = for(a <- asts.toSeq; (c, t) <- tags if c.isInstance(a.value)) yield t
foundTags.groupBy(identity).mapValues(_.size).view.force.withDefaultValue(0)
}
def percent(f: Double) = {
(f * 100).formatted("%.1f%%")
}
def apply(ast: PorcAST) = {
if (Logger.julLogger.isLoggable(Level.FINER)) {
val dataout = new AutoFileDataOutput("porc_statistics", true)
val rootdatalogger = dataout.logger(Seq("program" -> ast.sourceTextRange.toString))
val m = collect(ast.toZipper)
for((a, direct) <- m) {
val datalogger = rootdatalogger.subLogger(Seq("closure" -> a.value.prettyprintWithoutNested().take(100).replace("\\n", " ").replace("\\t", " ")))
//println(direct.map(_.value.prettyprintWithoutNested()).mkString("=======\\n","\\n---\\n","\\n======="))
val counts = count(direct)
//println(counts)
val computation = counts('Computation)
val dataFlow = counts('DataFlow)
val execution = counts('ExecutionControl)
val nonfree = counts('All) - counts('Free)
datalogger.log("computation", computation)
datalogger.log("dataFlow", dataFlow)
datalogger.log("executionControl", execution)
datalogger.log("nonfree", nonfree)
Logger.fine(s"""
|${a.value.prettyprintWithoutNested()}
|-------
|computation = $computation, data flow = $dataFlow, execution control = $execution, nonfree = $nonfree,
|computation fract = ${percent(computation.toFloat / nonfree)}, data flow fract = ${percent(dataFlow.toFloat / nonfree)}, work fract = ${percent((dataFlow.toFloat + computation) / nonfree)}
""".stripMargin.stripLineEnd)
}
dataout.close()
}
}
} | orc-lang/orc | OrcScala/src/orc/ast/porc/Statistics.scala | Scala | bsd-3-clause | 4,068 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.nodes.exec.{ExecEdge, ExecNode}
import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecTemporalSort
import org.apache.flink.table.planner.plan.utils.{RelExplainUtil, SortUtil}
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel._
import org.apache.calcite.rel.core.Sort
import org.apache.calcite.rex.RexNode
import scala.collection.JavaConversions._
/**
* Stream physical RelNode for time-ascending-order [[Sort]] without `limit`.
*
* @see [[StreamPhysicalRank]] which must be with `limit` order by.
* @see [[StreamPhysicalSort]] which can be used for testing now, its sort key can be any type.
*/
class StreamPhysicalTemporalSort(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
sortCollation: RelCollation)
extends Sort(cluster, traitSet, inputRel, sortCollation)
with StreamPhysicalRel {
override def requireWatermark: Boolean = false
override def copy(
traitSet: RelTraitSet,
input: RelNode,
newCollation: RelCollation,
offset: RexNode,
fetch: RexNode): Sort = {
new StreamPhysicalTemporalSort(cluster, traitSet, input, newCollation)
}
override def explainTerms(pw: RelWriter): RelWriter = {
pw.input("input", getInput())
.item("orderBy", RelExplainUtil.collationToString(sortCollation, getRowType))
}
override def translateToExecNode(): ExecNode[_] = {
new StreamExecTemporalSort(
SortUtil.getSortSpec(sortCollation.getFieldCollations),
ExecEdge.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalTemporalSort.scala | Scala | apache-2.0 | 2,618 |
package Constants
import Chisel._
object AluOpConstants{
var ORI_ALU_OP_ADD = 1
var ORI_ALU_OP_SUB = 2
var ORI_ALU_OP_SLL = 3
var ORI_ALU_OP_SLR = 4
var ORI_ALU_OP_SRL = 5
var ORI_ALU_OP_SRA = 6
var ORI_ALU_OP_AND = 7
var ORI_ALU_OP_OR = 8
var ORI_ALU_OP_XOR = 9
var ORI_ALU_OP_SLT = 10
var ORI_ALU_OP_SLTU = 11
var ORI_ALU_OP_NOP = 0
var ALU_OP_ADD = UInt(ORI_ALU_OP_ADD ,5)
var ALU_OP_SUB = UInt(ORI_ALU_OP_SUB ,5)
var ALU_OP_SLL = UInt(ORI_ALU_OP_SLL ,5)
var ALU_OP_SLR = UInt(ORI_ALU_OP_SLR ,5)
var ALU_OP_SRL = UInt(ORI_ALU_OP_SRL ,5)
var ALU_OP_SRA = UInt(ORI_ALU_OP_SRA ,5)
var ALU_OP_AND = UInt(ORI_ALU_OP_AND ,5)
var ALU_OP_OR = UInt(ORI_ALU_OP_OR ,5)
var ALU_OP_XOR = UInt(ORI_ALU_OP_XOR ,5)
var ALU_OP_SLT = UInt(ORI_ALU_OP_SLT ,5)
var ALU_OP_SLTU = UInt(ORI_ALU_OP_SLTU,5)
var ALU_OP_NOP = UInt(ORI_ALU_OP_NOP ,5)
}
class LowestTrueBitIndexFounder extends Module{
var io = new Bundle{
val input_bits = UInt(INPUT,64)
val index = UInt(OUTPUT,6)
}
io.index := UInt(63)
val i = 0
for(i <- 0 until 64){
when(io.input_bits(i) === UInt(1)){
io.index := UInt(i)
}
}
}
| Coxious/MixCPU | project/const.scala | Scala | gpl-2.0 | 1,143 |
package org.apache.spot.netflow
import org.apache.spark.sql.functions._
import org.apache.spot.utilities.Quantiles
import org.apache.spot.utilities.data.validation.InvalidDataHandler
import scala.util.{Success, Try}
/**
* Pair of netflow words extracted from a netflow record - one for the source IP and one for the destination IP.
*
* @param srcWord The word summarizing the communication from the POV of the source IP.
* @param dstWord The word summarizing the communication from the POV of the destination IP.
*/
case class FlowWords(srcWord: String, dstWord: String)
/**
* Contains methods and Spark SQL udf objects for calculation of netflow words from netflow records.
*
* @param timeCuts Quantile cut-offs for the time of day. Time of day is a floating point number
* >= 0.0 and < 24.0
* @param ibytCuts Quantile cut-offs for the inbytes.
* @param ipktCuts Quantile cut-offs if the incoming packet counts.
*/
class FlowWordCreator(timeCuts: Array[Double],
ibytCuts: Array[Double],
ipktCuts: Array[Double]) extends Serializable {
/**
* Spark SQL UDF for calculating the word summarizing a netflow transaction at the source IP
*
* @return String "word" summarizing a netflow connection.
*/
def srcWordUDF = udf((hour: Int,
minute: Int,
second: Int,
srcIP: String,
dstIP: String,
srcPort: Int,
dstPort: Int,
ipkt: Long,
ibyt: Long) =>
flowWords(hour, minute, second, srcPort, dstPort, ipkt, ibyt).srcWord)
/**
* Spark SQL UDF for calculating the word summarizing a netflow transaction at the destination IP
*
* @return String "word" summarizing a netflow connection.
*/
def dstWordUDF = udf((hour: Int,
minute: Int,
second: Int,
srcIP: String,
dstIP: String,
srcPort: Int,
dstPort: Int,
ipkt: Long,
ibyt: Long) =>
flowWords(hour, minute, second, srcPort, dstPort, ipkt, ibyt).dstWord)
/**
* Calculate the source and destination words summarizing a netflow record.
*
* @param hour
* @param minute
* @param second
* @param srcPort
* @param dstPort
* @param ipkt
* @param ibyt
* @return [[FlowWords]] containing source and destination words.
*/
def flowWords(hour: Int, minute: Int, second: Int, srcPort: Int, dstPort: Int, ipkt: Long, ibyt: Long): FlowWords = {
Try {
val timeOfDay: Double = hour.toDouble + minute.toDouble / 60 + second.toDouble / 3600
val timeBin = Quantiles.bin(timeOfDay, timeCuts)
val ibytBin = Quantiles.bin(ibyt, ibytCuts)
val ipktBin = Quantiles.bin(ipkt, ipktCuts)
val LowToLowPortEncoding = 111111
val HighToHighPortEncoding = 333333
if (dstPort == 0 && srcPort == 0) {
val baseWord = Array("0", timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = baseWord, dstWord = baseWord)
} else if (dstPort == 0 && srcPort > 0) {
val baseWord = Array(srcPort.toString(), timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = "-1_" + baseWord, dstWord = baseWord)
} else if (srcPort == 0 && dstPort > 0) {
val baseWord = Array(dstPort.toString, timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = baseWord, dstWord = "-1_" + baseWord)
} else if (srcPort <= 1024 && dstPort <= 1024) {
val baseWord = Array(LowToLowPortEncoding, timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = baseWord, dstWord = baseWord)
} else if (srcPort <= 1024 && dstPort > 1024) {
val baseWord = Array(srcPort.toString, timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = "-1_" + baseWord, dstWord = baseWord)
} else if (srcPort > 1024 && dstPort <= 1024) {
val baseWord = Array(dstPort.toString, timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = baseWord, dstWord = "-1_" + baseWord)
} else {
// this is the srcPort > 1024 && dstPort > 1024 case
val baseWord = Array(HighToHighPortEncoding, timeBin, ibytBin, ipktBin).mkString("_")
FlowWords(srcWord = baseWord, dstWord = baseWord)
}
} match {
case Success(flowWords) => flowWords
case _ => FlowWords(InvalidDataHandler.WordError, InvalidDataHandler.WordError)
}
}
}
| kpeiruza/incubator-spot | spot-ml/src/main/scala/org/apache/spot/netflow/FlowWordCreator.scala | Scala | apache-2.0 | 4,692 |
/**
* Created by Variant on 16/3/15.
*/
object ForFuctionLazy {
def main(args: Array[String]) {
for (i <- 1 to 2;j <-1 to 2) print(100*i +j + " ")
println
//守卫
for (i <- 1 to 2;j <-1 to 2 if i != j) print(100*i +j + " ")
println
//匿名函数非常重要,函数是有值的
def addA(x : Int) = x + 100
val add = (x : Int) => x+100
println("The result from a function is :" + addA(2))
println("The result from a val is :" + add(2))
//递归运算需要声明返回类型
def fac(n:Int):Int = if(n <= 0) 1 else n * fac(n - 1)
//默认参数
def combine(Content:String,left:String = "{",right:String = "}"): Unit ={
println(left +Content +right)
}
//可变参数
def connected(args:Int*): Int ={
var result = 0
for (arg <- args) result += arg
result
}
println("The rsult from a connected is:" + connected(1,2,3,4,5))
//Lazy值 只有在被使用的时候才会被实例化\\执行
}
}
| sparkLiwei/ProgrammingNote | scalaLearning/scalaBase/ForFuctionLazy.scala | Scala | cc0-1.0 | 1,002 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.filereader
import org.apache.samza.system.SystemFactory
import org.apache.samza.config.Config
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.SamzaException
class FileReaderSystemFactory extends SystemFactory {
/**
* get the FileReaderSystemConsumer. It also tries to get the queue size
* and polling sleep time from config file. If they do not exist, will use the default
* value.
*/
def getConsumer(systemName: String, config: Config, registry: MetricsRegistry) = {
val queueSize = config.getInt("systems." + systemName + ".queue.size", 10000)
val pollingSleepMs = config.getInt("systems." + systemName + ".polling.sleep.ms", 500)
new FileReaderSystemConsumer(systemName, registry, queueSize, pollingSleepMs)
}
/**
* this system is not designed for writing to files. So disable the producer method.
* It throws Exception when the system tries to getProducer.
*/
def getProducer(systemName: String, config: Config, registry: MetricsRegistry) = {
throw new SamzaException("not supposed to write to files")
}
/**
* get the FileReaderSystemAdmin
*/
def getAdmin(systemName: String, config: Config) = {
new FileReaderSystemAdmin
}
} | InnovaCo/samza | samza-core/src/main/scala/org/apache/samza/system/filereader/FileReaderSystemFactory.scala | Scala | apache-2.0 | 2,063 |
package circumflex
package markeven
import core._
import java.io.{StringWriter, Writer, Serializable}
import java.lang.StringBuilder
import java.util.regex.Pattern
import collection.mutable.HashSet
import java.util.Random
object const {
val newLine = Pattern.compile("\r\n|\n|\r")
val empty = Pattern.compile("\\s*")
val entityRefefence = Pattern.compile("&(?:[a-zA-Z]+|(?:#[0-9]+|#[xX][0-9a-fA-F]+));")
val htmlTag = Pattern.compile("</?([a-zA-Z]+)\\b.*?(/)?>", Pattern.DOTALL)
val htmlComment = Pattern.compile("<!--.*?-->", Pattern.DOTALL)
val backslashEscape = Pattern.compile("\\\\([\\.\\+\\*\\[\\]\\(\\)\\`\\{\\}\\_\\!\\-\\|\\~\\\\])")
val fragment = Pattern.compile("\\{\\{([a-zA-Z0-9_-]+)\\}\\}")
val fragmentBlock = Pattern.compile("\\{\\{\\{([a-zA-Z0-9_-]+)\\}\\}\\}\\s*")
val inlineLink = Pattern.compile("\\((.*?)\\)")
val refLink = Pattern.compile("\\[(.+?)\\]")
val selector = Pattern.compile(
"\\{(#[a-zA-Z0-9_-]+)?((\\.[a-zA-Z0-9_-]+)+)?\\}(?=[ \\t]*(?:\\n|\\r|\\Z))")
val hr = Pattern.compile("---\\s*", Pattern.DOTALL)
val table = Pattern.compile("-{3,}>?\\s+.+[\n|\r]\\s*-{3,}\\s*", Pattern.DOTALL)
val tableSeparatorLine = Pattern.compile("[- :|]+(?=(?:\r\n|\n|\r)(?!\n|\r|\\Z))")
val tableEndLine = Pattern.compile("\\s*-{3,}\\s*$")
val ty_leftQuote = Pattern.compile("(?<=\\s|\\A|\\()(?:\"|")(?=\\S)")
val ty_rightQuote = Pattern.compile("(?<=[\\p{L}\\d\\)\\]>?!.;:])(?:\"|")(?=[.,;:?!*\\)\\]<]|\\s|\\Z)")
val blockTags = HashSet[String]("address", "article", "aside", "blockqoute", "canvas",
"dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1",
"h2", "h3", "h4", "h5", "h6", "header", "hgroup", "hr", "nospript", "ol", "output",
"p", "pre", "section", "table", "ul")
}
// Scrambler
class TextScrambler(val alphabet: String,
val percentage: Int) {
def wrap(char: Char): String = "<span class=\"scr\">" + char + "</span>"
val threshold: Double = {
if (percentage < 0) 0d
else if (percentage > 50) .5d
else percentage / 100d
}
protected val rnd = new Random
def getSpan: String = {
if (rnd.nextDouble > threshold) ""
else wrap(alphabet.charAt(rnd.nextInt(alphabet.size)))
}
}
object EmptyTextScrambler extends TextScrambler("", 0) {
override def getSpan = ""
}
// Processor
trait Processor {
def out: Writer
def renderer: MarkevenRenderer
def process(cs: CharSequence) {
val walk = cs match {
case w: Walker => w
case _ => new SubSeqWalker(cs)
}
run(walk)
}
def run(walk: Walker)
}
// Resolvables
class FragmentDef(val body: String,
val mode: ProcessingMode = ProcessingMode.NORMAL)
extends Serializable
trait ProcessingMode extends Serializable
object ProcessingMode {
object NORMAL extends ProcessingMode
object CODE extends ProcessingMode
object PLAIN extends ProcessingMode
}
class LinkDef(_url: String,
_title: String = "")
extends Serializable {
val url = ampEscape.matcher(_url).replaceAll("&")
val title = wrapHtml(_title)
def customAttrs: String = ""
def writeLink(w: Writer, text: String) {
w.write("<a href=\"")
w.write(url)
w.write("\"")
if (title != "") {
w.write(" title=\"")
w.write(title)
w.write("\"")
}
if (customAttrs != "") {
w.write(" ")
w.write(customAttrs)
}
w.write(">")
w.write(text)
w.write("</a>")
}
def toLink(text: String) = {
val w = new StringWriter
writeLink(w, text)
w.toString
}
def writeMedia(w: Writer, alt: String) {
w.write("<img src=\"")
w.write(url)
w.write("\"")
if (title != "") {
w.write(" title=\"")
w.write(title)
w.write("\"")
}
w.write(" alt=\"")
w.write(alt)
w.write("\"")
if (customAttrs != "") {
w.write(" ")
w.write(customAttrs)
}
w.write("/>")
}
def toMedia(alt: String) = {
val w = new StringWriter
writeMedia(w, alt)
w.toString
}
}
// Block selector
class Selector(val renderer: MarkevenRenderer,
var id: String = "",
var classes: Seq[String] = Nil) {
def nextIdCounter(): Int = {
val result = ctx.getAs[Int]("markeven.processor.idCounter").getOrElse(0)
ctx.update("markeven.processor.idCounter", result + 1)
result
}
if (id == "" && renderer.autoAssignIdsPrefix != "") {
id = renderer.autoAssignIdsPrefix + "-" + nextIdCounter()
}
def writeAttrs(w: Writer, idx: Int) {
if (id != "") {
w.write(" id=\"")
w.write(id)
w.write("\"")
}
if (classes.size > 0) {
w.write(" class=\"")
w.write(classes.mkString(" "))
w.write("\"")
}
if (renderer.includeSourceIndex) {
w.write(" data-source-index=\"")
w.write(idx.toString)
w.write("\"")
}
}
override def toString = {
val b = new StringBuilder("{")
if (id != "")
b.append("#").append(id)
classes.foreach(c => b.append(".").append(c))
b.append("}").toString
}
}
| inca/circumflex | markeven/src/main/scala/common.scala | Scala | bsd-2-clause | 5,093 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.text.SimpleDateFormat
import java.util.{Calendar, Locale, TimeZone}
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.{PST, UTC_OPT}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
class CsvExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with PlanTestBase {
val badCsv = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("from_csv") {
val csvData = "1"
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData), UTC_OPT),
InternalRow(1)
)
}
test("from_csv - invalid data") {
val csvData = "---"
val schema = StructType(StructField("a", DoubleType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map("mode" -> PermissiveMode.name), Literal(csvData), UTC_OPT),
InternalRow(null))
// Default mode is Permissive
checkEvaluation(CsvToStructs(schema, Map.empty, Literal(csvData), UTC_OPT), InternalRow(null))
}
test("from_csv null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create(null, StringType), UTC_OPT),
null
)
}
test("from_csv bad UTF-8") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(badCsv), UTC_OPT),
InternalRow(null))
}
test("from_csv with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val csvData1 = "2016-01-01T00:00:00.123Z"
var c = Calendar.getInstance(DateTimeUtils.TimeZoneUTC)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData1), UTC_OPT),
InternalRow(c.getTimeInMillis * 1000L)
)
// The result doesn't change because the CSV string includes timezone string ("Z" here),
// which means the string represents the timestamp string in the timezone regardless of
// the timeZoneId parameter.
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData1), Option(PST.getId)),
InternalRow(c.getTimeInMillis * 1000L)
)
val csvData2 = "2016-01-01T00:00:00"
for (zid <- DateTimeTestUtils.outstandingZoneIds) {
c = Calendar.getInstance(TimeZone.getTimeZone(zid))
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(
CsvToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss"),
Literal(csvData2),
Option(zid.getId)),
InternalRow(c.getTimeInMillis * 1000L)
)
checkEvaluation(
CsvToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> zid.getId),
Literal(csvData2),
UTC_OPT),
InternalRow(c.getTimeInMillis * 1000L)
)
}
}
test("from_csv empty input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create(" ", StringType), UTC_OPT),
InternalRow(null)
)
}
test("forcing schema nullability") {
val input = """1,,"foo""""
val csvSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
val output = InternalRow(1L, null, UTF8String.fromString("foo"))
val expr = CsvToStructs(csvSchema, Map.empty, Literal.create(input, StringType), UTC_OPT)
checkEvaluation(expr, output)
val schema = expr.dataType
val schemaToCompare = csvSchema.asNullable
assert(schemaToCompare == schema)
}
test("from_csv missing columns") {
val schema = new StructType()
.add("a", IntegerType)
.add("b", IntegerType)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create("1"), UTC_OPT),
InternalRow(1, null)
)
}
test("unsupported mode") {
val csvData = "---"
val schema = StructType(StructField("a", DoubleType) :: Nil)
val exception = intercept[TestFailedException] {
checkEvaluation(
CsvToStructs(schema, Map("mode" -> DropMalformedMode.name), Literal(csvData), UTC_OPT),
InternalRow(null))
}.getCause
assert(exception.getMessage.contains("from_csv() doesn't support the DROPMALFORMED mode"))
}
test("infer schema of CSV strings") {
checkEvaluation(new SchemaOfCsv(Literal.create("1,abc")), "STRUCT<`_c0`: INT, `_c1`: STRING>")
}
test("infer schema of CSV strings by using options") {
checkEvaluation(
new SchemaOfCsv(Literal.create("1|abc"), Map("delimiter" -> "|")),
"STRUCT<`_c0`: INT, `_c1`: STRING>")
}
test("to_csv - struct") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(create_row(1), schema)
checkEvaluation(StructsToCsv(Map.empty, struct, UTC_OPT), "1")
}
test("to_csv null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(null, schema)
checkEvaluation(
StructsToCsv(Map.empty, struct, UTC_OPT),
null
)
}
test("to_csv with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val c = Calendar.getInstance(DateTimeUtils.TimeZoneUTC)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
val struct = Literal.create(create_row(c.getTimeInMillis * 1000L), schema)
checkEvaluation(StructsToCsv(Map.empty, struct, UTC_OPT), "2016-01-01T00:00:00.000Z")
checkEvaluation(
StructsToCsv(Map.empty, struct, Option(PST.getId)), "2015-12-31T16:00:00.000-08:00")
checkEvaluation(
StructsToCsv(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> UTC_OPT.get),
struct,
UTC_OPT),
"2016-01-01T00:00:00"
)
checkEvaluation(
StructsToCsv(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> PST.getId),
struct,
UTC_OPT),
"2015-12-31T16:00:00"
)
}
test("parse date with locale") {
Seq("en-US", "ru-RU").foreach { langTag =>
val locale = Locale.forLanguageTag(langTag)
val date = new SimpleDateFormat("yyyy-MM-dd").parse("2018-11-05")
val schema = new StructType().add("d", DateType)
val dateFormat = "MMM yyyy"
val sdf = new SimpleDateFormat(dateFormat, locale)
val dateStr = sdf.format(date)
val options = Map("dateFormat" -> dateFormat, "locale" -> langTag)
checkEvaluation(
CsvToStructs(schema, options, Literal.create(dateStr), UTC_OPT),
InternalRow(17836)) // number of days from 1970-01-01
}
}
test("verify corrupt column") {
checkExceptionInExpression[AnalysisException](
CsvToStructs(
schema = StructType.fromDDL("i int, _unparsed boolean"),
options = Map("columnNameOfCorruptRecord" -> "_unparsed"),
child = Literal.create("a"),
timeZoneId = UTC_OPT),
expectedErrMsg = "The field for corrupt records must be string type and nullable")
}
test("from/to csv with intervals") {
val schema = new StructType().add("a", "interval")
checkEvaluation(
StructsToCsv(Map.empty, Literal.create(create_row(new CalendarInterval(1, 2, 3)), schema)),
"1 months 2 days 0.000003 seconds")
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create("1 day")),
InternalRow(new CalendarInterval(0, 1, 0)))
}
}
| wangmiao1981/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala | Scala | apache-2.0 | 8,843 |
{
import scala.util.{Right => Actor}
}
import collection.mutable.HashMap
println(/* path: scala.collection.mutable.HashMap */ HashMap.getClass)
println(classOf[/* path: scala.collection.mutable.HashMap */ HashMap])
| ilinum/intellij-scala | testdata/resolve2/import/scope/clash/InnerBlock.scala | Scala | apache-2.0 | 220 |
import scala.quoted._
import scala.quoted.staging._
object Test {
given Toolbox = Toolbox.make(getClass.getClassLoader)
def main(args: Array[String]): Unit = withQuoteContext {
implicit def UnliftableInt: Unliftable[Int] = new {
def apply(n: Expr[Int])(using QuoteContext): Option[Int] = n match {
case '{ 0 } => Some(0)
case '{ 1 } => Some(1)
case '{ 2 } => Some(1)
case _ => None
}
}
implicit def UnliftableBoolean: Unliftable[Boolean] = new Unliftable[Boolean] {
def apply(b: Expr[Boolean])(using QuoteContext): Option[Boolean] = b match {
case '{ true } => Some(true)
case '{ false } => Some(false)
case _ => None
}
}
implicit def UnliftableList[T: Unliftable: Type]: Unliftable[List[T]] = new {
def apply(xs: Expr[List[T]])(using QuoteContext): Option[List[T]] = (xs: Expr[Any]) match {
case '{ ($xs1: List[T]).::($x) } =>
for { head <- x.unlift; tail <- xs1.unlift }
yield head :: tail
case '{ Nil } => Some(Nil)
case _ => None
}
}
implicit def UnliftableOption[T: Unliftable: Type]: Unliftable[Option[T]] = new {
def apply(expr: Expr[Option[T]])(using QuoteContext): Option[Option[T]] = expr match {
case '{ Some[T]($x) } => for (v <- x.unlift) yield Some(v)
case '{ None } => Some(None)
case _ => None
}
}
println(('{0}).unlift)
println(('{1}).unlift)
println(('{ println(); 1 }).unlift)
println(('{true}).unlift)
println(('{false}).unlift)
println(('{ println(); false }).unlift)
println(('{ Nil }: Expr[List[String]]).unlift)
println(('{ "a" :: "b" :: "c" :: Nil }: Expr[List[String]]).unlift)
println(('{ None }: Expr[Option[Int]]).unlift)
println(('{ Some("abc") }: Expr[Option[String]]).unlift)
}
}
| som-snytt/dotty | tests/run-staging/quote-valueof-list.scala | Scala | apache-2.0 | 1,874 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xerial.silk.core.util
import java.util.concurrent.locks.ReentrantLock
/**
* Helper class to implement multi-threaded code with conditional variable support.
*
* @author Taro L. Saito
*/
trait Guard {
private[this] val lock = new ReentrantLock
protected def newCondition = lock.newCondition
protected def guard[U](f: => U): U = {
try {
lock.lock
f
}
finally
lock.unlock
}
}
| xerial/silk | silk-core/src/main/scala/xerial/silk/core/util/Guard.scala | Scala | apache-2.0 | 987 |
package blended.updater.tools.configbuilder
import org.scalatest.FreeSpec
import blended.updater.config.FeatureConfig
import scala.collection.immutable._
class FragmentResolverTest extends FreeSpec {
"test" in {
// val features = Seq(FeatureConfig(name = "a", version = "1"))
}
} | lefou/blended | blended.updater.tools/src/test/scala/blended/updater/tools/configbuilder/FragmentResolverTest.scala | Scala | apache-2.0 | 295 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.plans.util
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, AttributeReference, AttributeSet, BitwiseAnd, Cast, Expression, Grouping, GroupingID, Literal, NamedExpression, ShiftRight}
import org.apache.spark.sql.catalyst.plans.JoinType
import org.apache.spark.sql.types.{ByteType, IntegerType}
import org.apache.carbondata.mv.plans.modular.{MatchCastExpression, ModularPlan}
import org.apache.carbondata.mv.plans.modular
import org.apache.carbondata.mv.plans.modular.Flags._
trait SQLBuildDSL {
abstract class Fragment extends Product {
// this class corresponding to TreeNode of SparkSQL or Tree of Scala compiler
def canEqual(that: Any): Boolean = {
throw new UnsupportedOperationException
}
def productArity: Int = throw new UnsupportedOperationException
def productElement(n: Int): Any = throw new UnsupportedOperationException
// TODO: add library needed for SQL building
def isUnsupported: Boolean
def Supported: Boolean
// def pos: Position
def alias: Option[String]
}
case class SPJGFragment(
select: Seq[NamedExpression] = Nil, // TODO: more general, change to Seq[Either[Fragment,
// NamedExpression]]
from: Seq[(Fragment, Option[JoinType], Seq[Expression])] = Nil,
where: Seq[Expression] = Nil,
groupby: (Seq[Expression], Seq[Seq[Expression]]) = (Nil, Nil),
having: Seq[Expression] = Nil,
alias: Option[String] = None,
modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
override def isUnsupported: Boolean = {
select == Nil || from == Nil ||
from.map(_._1.isUnsupported).foldLeft(false)(_ || _)
}
override def Supported: Boolean = !isUnsupported
}
// TODO: find a scheme to break up fragmentExtract() using unapply
//
// object SPJGFragment {
// type ReturnType = (Seq[NamedExpression],Seq[(Fragment, Option[JoinType],Seq[Expression])
// ],Seq[Expression],Seq[Expression],Seq[Expression],Option[String])
//
// def unapply(plan: ModularPlan): Option[ReturnType] = fragmentExtract(plan, None)
// }
case class TABLEFragment(
table: Seq[String] = Nil,
alias: Option[String] = None,
modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
override def isUnsupported: Boolean = table == Nil
override def Supported: Boolean = !isUnsupported
}
case class UNIONFragment(
union: Seq[Fragment] = Nil,
alias: Option[String] = None,
modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
override def isUnsupported: Boolean = {
union == Nil ||
union.map(_.isUnsupported).foldLeft(false)(_ || _)
}
override def Supported: Boolean = !isUnsupported
}
val UnsupportedFragment: Fragment = new Fragment {
def isUnsupported = true
def Supported = false
def alias = None
}
/**
* Turns a bunch of string segments into a single string and separate each segment by a space.
* The segments are trimmed so only a single space appears in the separation.
* For example, `build("a", " b ", " c")` becomes "a b c".
*/
protected def build(segments: String*): String = {
segments.map(_.trim).filter(_.nonEmpty).mkString(" ")
}
def fragmentExtract(plan: ModularPlan, alias: Option[String]): Fragment = {
if (plan.rewritten) {
fragmentExtract_Rewritten(plan, alias)
} else {
fragmentExtract_NonRewritten(plan, alias)
}
}
// Rewritten portion of query plan
private def fragmentExtract_Rewritten(
plan: ModularPlan,
alias: Option[String]): Fragment = {
plan match {
case s1@modular.Select(_, _, _, _, _,
Seq(g@modular.GroupBy(_, _, _, _,
s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
if (!s1.skip && !g.skip && !s2.skip) =>
extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
case s1@modular.Select(_, _, _, _, _,
Seq(g@modular.GroupBy(_, _, _, _,
s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
if (s1.skip && g.skip && s2.skip) =>
extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
case s1@modular.Select(_, _, _, _, _, Seq(g@modular.GroupBy(_, _, _, _,
s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
if (!s1.skip && !g.skip && s2.skip) =>
extractRewrittenSelectGroupBy(s1, g, alias)
case s1@modular.Select(_, _, _, _, _, Seq(s2@modular.Select(_, _, _, _, _, _, _, _, _, _)),
_, _, _, _) if (!s1.skip && s2.skip) =>
extractRewrittenSelect(s1, alias)
case other => extractSimpleOperator(other, alias)
}
}
// Non-rewritten portion of query plan
private def fragmentExtract_NonRewritten(
plan: ModularPlan,
alias: Option[String]): Fragment = {
plan match {
case s1@modular.Select(_, _, _, _, _,
Seq(g@modular.GroupBy(_, _, _, _,
s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
if (s1.aliasMap.isEmpty && !g.rewritten) =>
extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
case g@modular.GroupBy(_, _, _, _, s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)
if g.alias.isEmpty =>
val fragmentList = s2.children.zipWithIndex
.map { case (child, index) => fragmentExtract(child, s2.aliasMap.get(index)) }
val fList = s2.joinEdges.map {
e => {
(e.right, (fragmentList(e.right), Some(e.joinType), s2
.extractRightEvaluableConditions(s2.children(e.left), s2.children(e.right))))
}
}.toMap
val from = (0 to fragmentList.length - 1)
.map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
val excludesPredicate = from.flatMap(_._3).toSet
val (select, (groupByExprs, groupingSet)) = addGroupingSetIfNeeded(g, s2)
SPJGFragment(
select,
from,
s2.predicateList.filter { p => !excludesPredicate(p) },
(groupByExprs, groupingSet),
Nil,
alias,
(g.flags, g.flagSpec))
case g@modular.GroupBy(_, _, _, _, _, _, _, _) if (g.alias.nonEmpty) =>
val from = Seq((fragmentExtract(g.child, g.alias), None, Nil))
SPJGFragment(
g.outputList,
from,
Nil,
(g.predicateList, Seq.empty),
Nil,
alias,
(g.flags, g.flagSpec))
case other => extractSimpleOperator(other, alias)
}
}
// used in both rewritten and non-rewritten cases
// currently in rewritten cases we don't consider grouping set
private def extractRewrittenOrNonRewrittenSelectGroupBySelect(
s1: modular.Select,
g: modular.GroupBy,
s2: modular.Select,
alias: Option[String]): Fragment = {
val fragmentList = s2.children.zipWithIndex
.map { case (child, index) => fragmentExtract(child, s2.aliasMap.get(index)) }
val fList = s2.joinEdges.map {
e => {
(e.right, (fragmentList(e.right), Some(e.joinType), s2
.extractRightEvaluableConditions(s2.children(e.left), s2.children(e.right))))
}
}.toMap
val from = (0 to fragmentList.length - 1)
.map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
val excludesPredicate = from.flatMap(_._3).toSet
val aliasMap = AttributeMap(g.outputList.collect { case a: Alias => (a.toAttribute, a) })
val windowExprs = s1.windowSpec
.map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
.foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
val having = s1.predicateList
.map { case attr: Attribute => aliasMap.get(attr).map(_.child)
.getOrElse(attr);
case expr: Expression => expr.transform { case a: Alias => a.child };
case other => other
}
val (select_g, (groupByExprs, groupingSet)) = addGroupingSetIfNeeded(g, s2)
val gSet = AttributeSet(g.outputList.map(_.toAttribute))
val sSet = AttributeSet(s1.outputList.map(_.toAttribute))
val select = if (groupingSet.nonEmpty) {
if (gSet.equals(sSet) && windowExprs.isEmpty) {
select_g
} else {
throw new UnsupportedOperationException
}
} else
// TODO: how to handle alias of attribute in MV
{
s1.outputList.map { attr => aliasMap.get(attr.toAttribute).getOrElse(attr) } ++ windowExprs
}
SPJGFragment(
select, // select
from, // from
s2.predicateList.filter { p => !excludesPredicate(p) }, // where
(groupByExprs, groupingSet), // group by
having, // having
alias,
(s1.flags, s1.flagSpec))
}
// used in rewritten cases only -- don't consider grouping set
private def extractRewrittenSelectGroupBy(
s1: modular.Select,
g: modular.GroupBy,
alias: Option[String]): Fragment = {
val fragment = fragmentExtract(g.child, g.alias)
val from = Seq((fragment, None, Nil))
val aliasMap = AttributeMap(g.outputList.collect { case a: Alias => (a.toAttribute, a) })
val windowExprs = s1.windowSpec
.map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
.foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
val having = s1.predicateList
.map { case attr: Attribute => aliasMap.get(attr).map(_.child)
.getOrElse(attr);
case expr: Expression => expr.transform { case a: Alias => a.child };
case other => other
}
val (select_g, (groupByExprs, groupingSet)) = (g.outputList, (g.predicateList, Seq.empty))
// TODO: how to handle alias of attribute in MV
val select = s1.outputList.map { attr => aliasMap.get(attr.toAttribute).getOrElse(attr) } ++
windowExprs
SPJGFragment(
select, // select
from, // from
Nil, // where
(groupByExprs, groupingSet), // group by
having, // having
alias,
(s1.flags, s1.flagSpec))
}
private def extractRewrittenSelect(s1: modular.Select, alias: Option[String]): Fragment = {
val fragment = fragmentExtract(s1.children(0), s1.aliasMap.get(0))
val from = Seq((fragment, None, Nil))
val windowExprs = s1.windowSpec
.map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
.foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
val select = s1.outputList.map(_.toAttribute)
SPJGFragment(
select, // select
from, // from
s1.predicateList, // where
(Nil, Nil), // group by
Nil, // having
alias,
(s1.flags, s1.flagSpec))
}
private def extractSimpleOperator(
operator: ModularPlan,
alias: Option[String]): Fragment = {
operator match {
case g@modular.GroupBy(_, _, _, _, s@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _) =>
val fragmentList = s.children.zipWithIndex
.map { case (child, index) => fragmentExtract(child, s.aliasMap.get(index)) }
val fList = s.joinEdges.map {
e => {
(e.right, (fragmentList(e.right), Some(e.joinType), s
.extractRightEvaluableConditions(s.children(e.left), s.children(e.right))))
}
}.toMap
val from = (0 to fragmentList.length - 1)
.map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
val excludesPredicate = from.flatMap(_._3).toSet
val windowExprs = s.windowSpec
.map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
.foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
val select = s.outputList ++ windowExprs
SPJGFragment(
select, // select
from, // from
s.predicateList.filter { p => !excludesPredicate(p) }, // where
(Nil, Nil), // group by
Nil, // having
alias,
(s.flags, s.flagSpec))
case s@modular.Select(_, _, _, _, _, _, _, _, _, _) =>
val fragmentList = s.children.zipWithIndex
.map { case (child, index) => fragmentExtract(child, s.aliasMap.get(index)) }
val fList = s.joinEdges.map {
e => {
(e.right, (fragmentList(e.right), Some(e.joinType), s
.extractRightEvaluableConditions(s.children(e.left), s.children(e.right))))
}
}.toMap
val from = (0 to fragmentList.length - 1)
.map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
val excludesPredicate = from.flatMap(_._3).toSet
val windowExprs = s.windowSpec
.map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
.foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
val select = s.outputList ++ windowExprs
SPJGFragment(
select, // select
from, // from
s.predicateList.filter { p => !excludesPredicate(p) }, // where
(Nil, Nil), // group by
Nil, // having
alias,
(s.flags, s.flagSpec))
case u@modular.Union(_, _, _) =>
UNIONFragment(
u.children.zipWithIndex.map { case (child, index) => fragmentExtract(child, None) },
alias,
(u.flags, u.flagSpec))
case d@modular.ModularRelation(_, _, _, _, _) =>
if (d.databaseName != null && d.tableName != null) {
TABLEFragment(
Seq(d.databaseName, d.tableName), alias, (d.flags, d.rest))
} else {
TABLEFragment(Seq((d.output).toString()), alias, (d.flags, d.rest))
}
case h@modular.HarmonizedRelation(_) =>
fragmentExtract(h.source, alias)
case _ => UnsupportedFragment
}
}
private def addGroupingSetIfNeeded(g: modular.GroupBy, s: modular.Select) = {
if (g.flags.hasFlag(EXPAND)) {
assert(g.predicateList.length > 1)
val flagsNeedExprs =
for {flag <- pickledListOrder if (g.flags.hasFlag(flag))} yield {
flag
}
flagsNeedExprs.zip(g.flagSpec).collect {
case (EXPAND, Seq(projections_, output_, numOriginalOutput_)) =>
val output = output_.asInstanceOf[Seq[Attribute]]
val projections = projections_.asInstanceOf[Seq[Seq[Expression]]]
val numOriginalOutput = numOriginalOutput_.asInstanceOf[Int]
// The last column of Expand is always grouping ID
val gid = output.last
val groupByAttributes = g.predicateList.dropRight(1).map(_.asInstanceOf[Attribute])
// Assumption: project's projectList is composed of
// 1) the original output (Project's child.output),
// 2) the aliased group by expressions.
val expandedAttributes = s.output.drop(numOriginalOutput)
val groupByExprs = s.outputList.drop(numOriginalOutput).map(_.asInstanceOf[Alias].child)
// a map from group by attributes to the original group by expressions.
val groupByAttrMap = AttributeMap(groupByAttributes.zip(groupByExprs))
// a map from expanded attributes to the original group by expressions.
val expandedAttrMap = AttributeMap(expandedAttributes.zip(groupByExprs))
val groupingSet: Seq[Seq[Expression]] = projections.map { project =>
// Assumption: expand.projections is composed of
// 1) the original output (Project's child.output),
// 2) expanded attributes(or null literal)
// 3) gid, which is always the last one in each project in Expand
project.drop(numOriginalOutput).dropRight(1).collect {
case attr: Attribute if expandedAttrMap.contains(attr) => expandedAttrMap(attr)
}
}
val aggExprs = g.outputList.map { case aggExpr =>
val originalAggExpr = aggExpr.transformDown {
// grouping_id() is converted to VirtualColumn.groupingIdName by Analyzer. Revert
// it back.
case ar: AttributeReference if ar == gid => GroupingID(Nil)
case ar: AttributeReference if groupByAttrMap.contains(ar) => groupByAttrMap(ar)
case a@MatchCastExpression(
BitwiseAnd(
ShiftRight(ar: AttributeReference, Literal(value: Any, IntegerType)),
Literal(1, IntegerType)), ByteType) if ar == gid =>
// for converting an expression to its original SQL format grouping(col)
val idx = groupByExprs.length - 1 - value.asInstanceOf[Int]
groupByExprs.lift(idx).map(Grouping).getOrElse(a)
}
originalAggExpr match {
// Ancestor operators may reference the output of this grouping set,
// and we use exprId to generate a unique name for each attribute, so we should
// make sure the transformed aggregate expression won't change the output,
// i.e. exprId and alias name should remain the same.
case ne: NamedExpression if ne.exprId == aggExpr.exprId => ne
case e => Alias(e, aggExpr.name)(exprId = aggExpr.exprId)
}
}
(aggExprs, (groupByExprs, groupingSet))
}.head
} else {
(g.outputList, (g.predicateList, Seq.empty))
}
}
}
object SQLBuildDSL extends SQLBuildDSL
| zzcclp/carbondata | mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuildDSL.scala | Scala | apache-2.0 | 18,195 |
package fr.inria.spirals.sigma.ttc14.fixml
import fr.unice.i3s.sigma.m2t.M2T
import fr.inria.spirals.sigma.ttc14.fixml.objlang.support.ObjLang
import fr.inria.spirals.sigma.ttc14.fixml.objlang.support.ObjLang._objlang._
trait ObjLang2CSharp extends BaseObjLangMTT {
override def class2Code(p: DataType) = {
import XMLMM2ObjLang._
p match {
case DTString => "string"
case DTDouble => "double"
case DTLong => "long"
case DTInteger => "int"
}
}
} | fikovnik/ttc14-fixml-sigma | ttc14-fixml-extension-2/src/fr/inria/spirals/sigma/ttc14/fixml/ObjLang2CSharp.scala | Scala | epl-1.0 | 487 |
package com.intel.ie.evaluation
import com.intel.ie.analytics.IntelConfig
import com.intel.ie.evaluation.preparation.crawl.ProxyConfig
import scala.io.Source
import scala.reflect.io.File
/**
* Created by xianyan on 8/2/16.
*/
object EvalPaths {
def rawPage(company: String) = IntelConfig.RAW_PAGE_PATH + s"raw-${company}.txt"
var urlsPath: String = IntelConfig.URL_LIST
def webContentPath(label: String, i: Int): String = {
val dir = File(s"${IntelConfig.COMPANY_PAGE_PATH}${label}/")
if (!dir.exists) dir.createDirectory()
s"${dir}page-${label}_${i}.txt"
}
def labeledPath(company: String) = IntelConfig.NER_LABELED_PATH + s"labeled-${company}.txt"
/**
*
* @return a map that map url label to url
*/
def urlMap(): Map[String, String] = {
val lines = Source.fromFile(urlsPath).getLines
var map = Map[String, String]()
for (line <- lines) {
val split = line.split("\t", 2)
if (split.length > 1) map += split(0) -> split(1)
}
return map
}
def proxy(): ProxyConfig = {
if (IntelConfig.PROXY_URL != "" && IntelConfig.PROXY_PORT != "")
return new ProxyConfig(IntelConfig.PROXY_URL, IntelConfig.PROXY_PORT, null, null)
return null
}
}
| intel-analytics/InformationExtraction | src/main/scala/com/intel/ie/evaluation/EvalPaths.scala | Scala | gpl-3.0 | 1,233 |
/*
* Copyright (C) 2011 Mathieu Mathieu Leclaire <mathieu.Mathieu Leclaire at openmole.org>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.task.template
import java.io.File
import monocle.macros.Lenses
import org.openmole.core.context.{ Context, Val }
import org.openmole.core.expansion.{ ExpandedString, FromContext }
import org.openmole.core.workflow.builder._
import org.openmole.core.workflow.dsl
import org.openmole.core.workflow.dsl._
import org.openmole.core.workflow.task._
object TemplateFileTask {
implicit def isTask: InputOutputBuilder[TemplateFileTask] = InputOutputBuilder(TemplateFileTask.config)
implicit def isInfo = InfoBuilder(info)
def apply(
template: File,
output: Val[File]
)(implicit name: sourcecode.Name, definitionScope: DefinitionScope) = new TemplateFileTask(template, output, InputOutputConfig(), InfoConfig()) set (dsl.outputs += output)
def apply(
template: Val[File],
output: Val[File])(implicit name: sourcecode.Name, definitionScope: DefinitionScope) =
new TemplateFileFromInputTask(template, output, InputOutputConfig(), InfoConfig()) set (
dsl.inputs += template,
dsl.outputs += output
)
}
@Lenses case class TemplateFileTask(
template: File,
output: Val[File],
config: InputOutputConfig,
info: InfoConfig
) extends Task {
@transient lazy val expanded = template.withInputStream { is ⇒
ExpandedString(is)
}
override protected def process(executionContext: TaskExecutionContext) = FromContext { parameters ⇒
import parameters._
val file = executionContext.moleExecutionDirectory.newFile(template.getName, ".tmp")
file.content = expanded.from(context)
context + (output → file)
}
}
object TemplateFileFromInputTask {
implicit def isTask: InputOutputBuilder[TemplateFileFromInputTask] = InputOutputBuilder(TemplateFileFromInputTask.config)
implicit def isInfo = InfoBuilder(info)
}
@Lenses case class TemplateFileFromInputTask(
template: FromContext[File],
output: Val[File],
config: InputOutputConfig,
info: InfoConfig
) extends Task {
override protected def process(executionContext: TaskExecutionContext) = FromContext { parameters ⇒
import parameters._
val expanded = template.from(context).withInputStream { is ⇒ ExpandedString(is).from(context) }
val file = executionContext.moleExecutionDirectory.newFile("template", ".tmp")
file.content = expanded
context + (output → file)
}
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.task.template/src/main/scala/org/openmole/plugin/task/template/TemplateFileTask.scala | Scala | agpl-3.0 | 3,142 |
/*
* Copyright 2013 Marconi Lanna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
final class Query(table: String) {
private val query = Resource("query/" + table + ".sql")
def apply(q: String) = query(q).get
}
object Query {
def apply(table: String) = new Query(table)
}
| ryantanner/ribbon | app/util/Query.scala | Scala | apache-2.0 | 806 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.util.concurrent.atomic.AtomicInteger
import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd, SparkListenerTaskStart}
import org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec
import org.apache.spark.sql.execution.streaming.StreamExecution
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.test.TestSparkSession
// Trait to configure StreamTest for kafka continuous execution tests.
trait KafkaContinuousTest extends KafkaSourceTest {
override val defaultTrigger = Trigger.Continuous(1000)
override val defaultUseV2Sink = true
// We need more than the default local[2] to be able to schedule all partitions simultaneously.
override protected def createSparkSession = new TestSparkSession(
new SparkContext(
"local[10]",
"continuous-stream-test-sql-context",
sparkConf.set("spark.sql.testkey", "true")))
// In addition to setting the partitions in Kafka, we have to wait until the query has
// reconfigured to the new count so the test framework can hook in properly.
override protected def setTopicPartitions(
topic: String, newCount: Int, query: StreamExecution) = {
testUtils.addPartitions(topic, newCount)
eventually(timeout(streamingTimeout)) {
assert(
query.lastExecution.executedPlan.collectFirst {
case scan: ContinuousScanExec
if scan.readSupport.isInstanceOf[KafkaContinuousReadSupport] =>
scan.scanConfig.asInstanceOf[KafkaContinuousScanConfig]
}.exists(_.knownPartitions.size == newCount),
s"query never reconfigured to $newCount partitions")
}
}
// Continuous processing tasks end asynchronously, so test that they actually end.
private class TasksEndedListener extends SparkListener {
val activeTaskIdCount = new AtomicInteger(0)
override def onTaskStart(start: SparkListenerTaskStart): Unit = {
activeTaskIdCount.incrementAndGet()
}
override def onTaskEnd(end: SparkListenerTaskEnd): Unit = {
activeTaskIdCount.decrementAndGet()
}
}
private val tasksEndedListener = new TasksEndedListener()
override def beforeEach(): Unit = {
super.beforeEach()
spark.sparkContext.addSparkListener(tasksEndedListener)
}
override def afterEach(): Unit = {
eventually(timeout(streamingTimeout)) {
assert(tasksEndedListener.activeTaskIdCount.get() == 0)
}
spark.sparkContext.removeSparkListener(tasksEndedListener)
super.afterEach()
}
test("ensure continuous stream is being used") {
val query = spark.readStream
.format("rate")
.option("numPartitions", "1")
.option("rowsPerSecond", "1")
.load()
testStream(query)(
Execute(q => assert(q.isInstanceOf[ContinuousExecution]))
)
}
}
| hhbyyh/spark | external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousTest.scala | Scala | apache-2.0 | 3,756 |
package org.bitcoins.db
import org.bitcoins.core.config.NetworkParameters
import org.bitcoins.core.protocol.blockchain.ChainParams
import java.nio.file.Path
import java.nio.file.Paths
import org.bitcoins.core.config.MainNet
import org.bitcoins.core.config.TestNet3
import org.bitcoins.core.config.RegTest
import com.typesafe.config._
import org.bitcoins.core.util.BitcoinSLogger
import slick.jdbc.SQLiteProfile
import slick.jdbc.SQLiteProfile.api._
import scala.util.Try
import scala.util.Success
import scala.util.Failure
import slick.basic.DatabaseConfig
import org.bitcoins.core.protocol.blockchain.MainNetChainParams
import org.bitcoins.core.protocol.blockchain.TestNetChainParams
import org.bitcoins.core.protocol.blockchain.RegTestNetChainParams
import java.nio.file.Files
import scala.util.Properties
import scala.util.matching.Regex
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import ch.qos.logback.classic.Level
/**
* Everything needed to configure functionality
* of bitcoin-s applications is found in here.
*
* @see [[https://github.com/bitcoin-s/bitcoin-s-core/blob/master/doc/configuration.md `configuration.md`]]
* for more information.
*/
abstract class AppConfig extends BitcoinSLogger {
/**
* Initializes this project.
* After this future resolves, all operations should be
* able to be performed correctly.
*
* Initializing may include creating database tables,
* making directories or files needed latern or
* something else entirely.
*/
def initialize()(implicit ec: ExecutionContext): Future[Unit]
/** Sub members of AppConfig should override this type with
* the type of themselves, ensuring `withOverrides` return
* the correct type
*/
protected[bitcoins] type ConfigType <: AppConfig
/** Constructor to make a new instance of this config type */
protected[bitcoins] def newConfigOfType(
configOverrides: Seq[Config]): ConfigType
/** List of user-provided configs that should
* override defaults
*/
protected[bitcoins] def configOverrides: List[Config] = List.empty
/**
* This method returns a new `AppConfig`, where every
* key under `bitcoin-s` overrides the configuration
* picked up by other means (the `reference.conf`
* provided by bitcoin-s and the `application.conf`
* provided by the user). If you pass in configs with
* overlapping keys (e.g. several configs with the key
* `bitcoin-s.network`), the latter config overrides the
* first.
*/
def withOverrides(config: Config, configs: Config*): ConfigType = {
// the two val assignments below are workarounds
// for awkward name resolution in the block below
val firstOverride = config
val numOverrides = configs.length + 1
if (logger.isDebugEnabled()) {
// force lazy evaluation before we print
// our lines
val oldConfStr = this.config.asReadableJson
logger.debug(s"Creating AppConfig with $numOverrides override(s) ")
logger.debug(s"Old config:")
logger.debug(oldConfStr)
}
val configOverrides = firstOverride +: configs
if (logger.isTraceEnabled()) {
configOverrides.zipWithIndex.foreach {
case (c, idx) => logger.trace(s"Override no. $idx: ${c.asReadableJson}")
}
}
val newConf = {
// the idea here is that after resolving the configuration,
// we extract the value under the 'bitcoin-s' key and use
// that as our config. here we have to do the reverse, to
// get the keys to resolve correctly
val reconstructedStr = s"""
bitcoin-s: ${this.config.asReadableJson}
"""
val reconstructed = ConfigFactory.parseString(reconstructedStr)
newConfigOfType(reconstructed +: configOverrides)
}
// to avoid non-necessary lazy load
if (logger.isDebugEnabled()) {
// force lazy load before we print
val newConfStr = newConf.config.asReadableJson
logger.debug("New config:")
logger.debug(newConfStr)
}
newConf
}
/**
* Name of the module. `chain`, `wallet`, `node` etc.
*/
protected[bitcoins] def moduleName: String
/**
* The configuration details for connecting/using the database for our projects
* that require datbase connections
*/
lazy val dbConfig: DatabaseConfig[SQLiteProfile] = {
val dbConfig = {
Try {
DatabaseConfig.forConfig[SQLiteProfile](path = moduleName, config)
} match {
case Success(value) =>
value
case Failure(exception) =>
logger.error(s"Error when loading database from config: $exception")
logger.error(s"Configuration: ${config.asReadableJson}")
throw exception
}
}
logger.debug(s"Resolved DB config: ${dbConfig.config}")
val _ = createDbFileIfDNE()
dbConfig
}
/** The database we are connecting to */
lazy val database: Database = {
dbConfig.db
}
/** The path where our DB is located */
// todo: what happens to this if we
// dont use SQLite?
lazy val dbPath: Path = {
val pathStr = config.getString(s"$moduleName.db.path")
val path = Paths.get(pathStr)
logger.debug(s"DB path: $path")
path
}
/** The name of our database */
// todo: what happens to this if we
// dont use SQLite?
lazy val dbName: String = {
config.getString(s"$moduleName.db.name")
}
private def createDbFileIfDNE(): Unit = {
//should add a check in here that we are using sqlite
if (!Files.exists(dbPath)) {
val _ = {
logger.debug(s"Creating database directory=$dbPath")
Files.createDirectories(dbPath)
val dbFilePath = dbPath.resolve(dbName)
logger.debug(s"Creating database file=$dbFilePath")
Files.createFile(dbFilePath)
}
()
}
}
/** Chain parameters for the blockchain we're on */
lazy val chain: ChainParams = {
val networkStr = config.getString("network")
networkStr match {
case "mainnet" => MainNetChainParams
case "testnet3" => TestNetChainParams
case "regtest" => RegTestNetChainParams
case other: String =>
throw new IllegalArgumentException(
s"'$other' is not a recognized network! Available options: mainnet, testnet3, regtest")
}
}
/** The blockchain network we're on */
lazy val network: NetworkParameters = chain.network
/**
* The underlying config that we derive the
* rest of the fields in this class from
*/
private[bitcoins] lazy val config: Config = {
val datadirConfig = {
val file = baseDatadir.resolve("bitcoin-s.conf")
val config = if (Files.isReadable(file)) {
ConfigFactory.parseFile(file.toFile())
} else {
ConfigFactory.empty()
}
val withDatadir =
ConfigFactory.parseString(s"bitcoin-s.datadir = $baseDatadir")
withDatadir.withFallback(config)
}
logger.trace(s"Data directory config:")
if (datadirConfig.hasPath("bitcoin-s")) {
logger.trace(datadirConfig.getConfig("bitcoin-s").asReadableJson)
} else {
logger.trace(ConfigFactory.empty().asReadableJson)
}
// `load` tries to resolve substitions,
// `parseResources` does not
val dbConfig = ConfigFactory
.parseResources("db.conf")
logger.trace(
s"DB config: ${dbConfig.getConfig("bitcoin-s").asReadableJson}")
// we want to NOT resolve substitutions in the configuraton until the user
// provided configs also has been loaded. .parseResources() does not do that
// whereas .load() does
val classPathConfig = {
val applicationConf = ConfigFactory.parseResources("application.conf")
val referenceConf = ConfigFactory.parseResources("reference.conf")
applicationConf.withFallback(referenceConf)
}
logger.trace(
s"Classpath config: ${classPathConfig.getConfig("bitcoin-s").asReadableJson}")
// we want the data directory configuration
// to take preference over any bundled (classpath)
// configurations
// loads reference.conf (provided by Bitcoin-S)
val unresolvedConfig = datadirConfig
.withFallback(classPathConfig)
.withFallback(dbConfig)
logger.trace(s"Unresolved bitcoin-s config:")
logger.trace(unresolvedConfig.getConfig("bitcoin-s").asReadableJson)
val withOverrides =
if (configOverrides.nonEmpty) {
val overrides =
configOverrides
// we reverse to make the configs specified last take precedent
.reverse
.reduce(_.withFallback(_))
val interestingOverrides = overrides.getConfig("bitcoin-s")
logger.trace(
s"${configOverrides.length} user-overrides for bitcoin-s config:")
logger.trace(interestingOverrides.asReadableJson)
// to make the overrides actually override
// the default setings we have to do it
// in this order
overrides.withFallback(unresolvedConfig)
} else {
logger.trace(s"No user-provided overrides")
unresolvedConfig
}
val finalConfig = withOverrides
.resolve()
.getConfig("bitcoin-s")
logger.debug(s"Resolved bitcoin-s config:")
logger.debug(finalConfig.asReadableJson)
finalConfig
}
/** The base data directory. This is where we look for a configuration file */
protected[bitcoins] def baseDatadir: Path
/** The network specific data directory. */
val datadir: Path = {
val lastDirname = network match {
case MainNet => "mainnet"
case TestNet3 => "testnet3"
case RegTest => "regtest"
}
baseDatadir.resolve(lastDirname)
}
private def stringToLogLevel(str: String): Option[Level] =
str.toLowerCase() match {
case "trace" => Some(Level.TRACE)
case "debug" => Some(Level.DEBUG)
case "info" => Some(Level.INFO)
case "warn" => Some(Level.WARN)
case "error" => Some(Level.ERROR)
case "off" => Some(Level.OFF)
case _: String => None
}
/** The default logging level */
lazy val logLevel: Level = {
val levelString = config.getString("logging.level")
stringToLogLevel(levelString).getOrElse(
throw new ConfigException.WrongType(
config.origin(),
s"logging.level ($levelString) is not a valid logging level"))
}
/** Whether or not we should log to file */
lazy val disableFileLogging = config.getBoolean("logging.disable-file")
/** Whether or not we should log to stdout */
lazy val disableConsoleLogging = config.getBoolean("logging.disable-console")
private def levelOrDefault(key: String): Level =
config
.getStringOrNone(key) match {
case None => logLevel
case Some(levelStr) =>
stringToLogLevel(levelStr).getOrElse {
throw new ConfigException.WrongType(
config.origin(),
s"$key ($levelStr) is not a valid logging level")
}
}
/** The logging level for our P2P logger */
lazy val p2pLogLevel: Level = levelOrDefault("logging.p2p")
/** The logging level for our chain verification logger */
lazy val verificationLogLevel: Level =
levelOrDefault("logging.chain-verification")
/** The logging level for our key handling logger */
lazy val keyHandlingLogLevel: Level =
levelOrDefault("logging.key-handling")
/** Logging level for wallet */
lazy val walletLogLeveL: Level =
levelOrDefault("logging.wallet")
/** Logging level for HTTP RPC server */
lazy val httpLogLevel: Level = levelOrDefault("logging.http")
/** Logging level for database interactions */
lazy val databaseLogLevel: Level = levelOrDefault("logging.database")
}
object AppConfig extends BitcoinSLogger {
/** The default data directory
*
* TODO: use different directories on Windows and Mac,
* should probably mimic what Bitcoin Core does
*/
private[bitcoins] val DEFAULT_BITCOIN_S_DATADIR: Path =
Paths.get(Properties.userHome, ".bitcoin-s")
/**
* Matches the default data directory location
* with a network appended,
* both with and without a trailing `/`
*/
private val defaultDatadirRegex: Regex = {
(Properties.userHome + "/.bitcoin-s/(testnet3|mainnet|regtest)/?$").r
}
/**
* Throws if the encountered datadir is the default one. Useful
* in tests, to make sure you don't blow up important data.
*/
private[bitcoins] def throwIfDefaultDatadir(config: AppConfig): Unit = {
val datadirStr = config.datadir.toString()
defaultDatadirRegex.findFirstMatchIn(datadirStr) match {
case None => () // pass
case Some(_) =>
val errMsg =
List(
"It looks like you haven't changed the data directory in your test configuration.",
s"Your data directory is $datadirStr. This would cause tests to potentially",
"overwrite your existing data, which you probably don't want."
).mkString(" ")
logger.error(errMsg)
logger.error(s"Configuration: ${config.config.asReadableJson}")
throw new RuntimeException(errMsg)
}
}
}
| bitcoin-s/bitcoin-s-core | db-commons/src/main/scala/org/bitcoins/db/AppConfig.scala | Scala | mit | 13,132 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xerial.silk.core
import xerial.core.util.CName
//--------------------------------------
//
// SilkException.scala
// Since: 2012/11/19 2:08 PM
//
//--------------------------------------
object SilkException {
def error(m:String) = {
val t = new Throwable
val caller = t.getStackTrace()(2)
throw new SilkExceptionBase(s"$m in ${caller.getMethodName}(${caller.getFileName}:${caller.getLineNumber})") {
}
}
def error(e:Throwable) = {
val caller = e.getStackTrace()(2)
throw new SilkExceptionBase(s"${e.getMessage} in ${caller.getMethodName}(${caller.getFileName}:${caller.getLineNumber})") {}
}
def pending : Nothing = {
val t = new Throwable
val caller = t.getStackTrace()(1)
throw Pending(caller.getMethodName)
}
}
trait SilkException {
def errorCode = CName.toNaturalName(this.getClass.getSimpleName).toUpperCase
override def toString = {
"[%s] %s".format(errorCode, super.toString)
}
}
/**
* @author leo
*/
abstract class SilkExceptionBase(private val message:String) extends Exception(message) with SilkException {
}
abstract class SilkError(private val message:String) extends Error(message) with SilkException {
}
case class Pending(method:String) extends SilkExceptionBase(s"the implementation of $method")
case class NotAvailable(method:String) extends SilkExceptionBase(s"the implementation of $method")
| xerial/silk | silk-core/src/main/scala/xerial/silk/core/SilkException.scala | Scala | apache-2.0 | 1,958 |
package org.jetbrains.plugins.scala.lang.completion.postfix.templates
import com.intellij.codeInsight.template.postfix.templates.ParenthesizedPostfixTemplate
import org.jetbrains.plugins.scala.lang.completion.postfix.templates.selector.{SelectorConditions, AncestorSelector, ScalaPostfixTemplatePsiInfo}
import org.jetbrains.plugins.scala.lang.completion.postfix.templates.selector.SelectorType._
/**
* @author Roman.Shein
* @since 10.09.2015.
*/
class ScalaParenthesizedExpressionPostfixTemplate extends ParenthesizedPostfixTemplate(ScalaPostfixTemplatePsiInfo,
new AncestorSelector(SelectorConditions.ANY_EXPR, All)) {
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/completion/postfix/templates/ScalaParenthesizedExpressionPostfixTemplate.scala | Scala | apache-2.0 | 632 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tools.data.utils.akka.stats
import akka.actor.Cancellable
import akka.stream._
import akka.stream.stage._
import cmwell.tools.data.ingester.Ingester._
import cmwell.tools.data.utils.logging.DataToolsLogging
import cmwell.tools.data.utils.text.Files.toHumanReadable
import nl.grons.metrics.scala.InstrumentedBuilder
import org.apache.commons.lang3.time.DurationFormatUtils
import scala.concurrent.duration._
object IngesterStats {
def apply(isStderr: Boolean = false,
initDelay: FiniteDuration = 1.second,
interval: FiniteDuration = 1.second,
label: Option[String] = None) = new IngesterStats(isStderr, initDelay, interval, label)
}
class IngesterStats(isStderr: Boolean,
initDelay: FiniteDuration = 1.second,
interval: FiniteDuration = 1.second,
label: Option[String] = None) extends GraphStage[FlowShape[IngestEvent, IngestEvent]] with DataToolsLogging{
val in = Inlet[IngestEvent]("ingest-stats.in")
val out = Outlet[IngestEvent]("ingest-stats.out")
override val shape = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = {
new GraphStageLogic(shape) with InstrumentedBuilder {
val start = System.currentTimeMillis()
val metricRegistry = new com.codahale.metrics.MetricRegistry()
var totalIngestedBytes = metrics.meter("total-ingested-bytes")
var totalIngestedInfotons = metrics.meter("total-ingested-infotons")
var totalFailedInfotons = metrics.meter("total-failed-infotons")
var ingestedBytesInWindow = 0L
var lastTime = start
var nextPrint = 0L
var lastMessageSize = 0
val windowSizeMillis = 1000
var eventPoller: Option[Cancellable] = None
val name = label.fold("")(name => s"[$name]")
private var asyncCB: AsyncCallback[Unit] = _
val formatter = java.text.NumberFormat.getNumberInstance
override def preStart(): Unit = {
asyncCB = getAsyncCallback{ _ =>
displayStats()
resetStats()
}
eventPoller = Some(materializer.schedulePeriodically(initDelay, interval, new Runnable() {def run() = asyncCB.invoke(())}))
pull(in)
}
def resetStats() = {
ingestedBytesInWindow = 0
}
def displayStats() = {
try {
val now = System.currentTimeMillis()
// print output message
val message =
s"[ingested: ${toHumanReadable(totalIngestedBytes.count)}] " +
s"[ingested infotons: ${formatter.format(totalIngestedInfotons.count)} " +
s"${formatter.format(totalIngestedInfotons.oneMinuteRate)}/sec] " +
s"[failed infotons: ${formatter.format(totalFailedInfotons.count)}] ".padTo(25, ' ') +
s"[rate=${toHumanReadable(totalIngestedBytes.oneMinuteRate)}/sec ".padTo(20, ' ') +
s"average rate=${toHumanReadable(totalIngestedBytes.meanRate)}/sec] ".padTo(30, ' ') +
s"[${DurationFormatUtils.formatDurationWords(now - start, true, true)}] "
if (isStderr) System.err.print("\\r" * lastMessageSize + message)
logger debug (s"$name $message")
lastMessageSize = message.size
} catch {
case x => logger.error(s"error: $x", x)
}
}
def aggregateStats(ingestEvent: IngestEvent) = ingestEvent match {
case IngestSuccessEvent(sizeInBytes, numInfotons) =>
totalIngestedBytes mark sizeInBytes
totalIngestedInfotons mark numInfotons
ingestedBytesInWindow += sizeInBytes
case IngestFailEvent(numInfotons) =>
totalFailedInfotons mark numInfotons
}
setHandler(in, new InHandler {
override def onPush(): Unit = {
val element = grab(in)
aggregateStats(element)
pull(in)
}
override def onUpstreamFailure(ex: Throwable): Unit = {
failStage(ex)
eventPoller.foreach(_.cancel())
}
override def onUpstreamFinish(): Unit = {
val now = System.currentTimeMillis()
val message =
s"ingested: ${toHumanReadable(totalIngestedBytes.count)} " +
s"ingested infotons: ${formatter.format(totalIngestedInfotons.count)}".padTo(30, ' ') +
s"failed infotons: ${formatter.format(totalFailedInfotons.count)}".padTo(25, ' ') +
s" average rate=${totalIngestedBytes.meanRate}/sec".padTo(30, ' ') +
s"[${DurationFormatUtils.formatDurationWords(now - start, true, true)}] "
System.err.println("")
System.err.println(message)
completeStage()
eventPoller.foreach(_.cancel())
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (!hasBeenPulled(in)) pull(in)
}
})
}
}
}
| nruppin/CM-Well | server/cmwell-data-tools/src/main/scala/cmwell/tools/data/utils/akka/stats/IngesterStats.scala | Scala | apache-2.0 | 5,586 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.