code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.vitrivr.adampro.shared.catalog.catalogs
import org.vitrivr.adampro.shared.catalog.CatalogManager
import slick.driver.H2Driver.api._
/**
* ADAMpro
*
* Ivan Giangreco
* November 2016
*/
private[catalog] class OptionsCatalog(tag: Tag) extends Table[(String, Array[Byte])](tag, Some(CatalogManager.SCHEMA), "ap_options") {
def key = column[String]("key", O.PrimaryKey)
def value = column[Array[Byte]]("value")
/**
* Special fields
*/
override def * = (key, value)
def idx = index("idx_options_key", key)
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/shared/catalog/catalogs/OptionsCatalog.scala | Scala | mit | 547 |
package scala.slick.jdbc
import java.sql.ResultSet
/** Represents a result set holdability mode .*/
sealed abstract class ResultSetHoldability(val intValue: Int) { self =>
/** Run a block of code on top of a JDBC session with this concurrency mode */
def apply[T](base: JdbcBackend#Session)(f: JdbcBackend#Session => T): T = f(base.forParameters(rsHoldability = self))
/** Run a block of code on top of the dynamic, thread-local JDBC session with this holdability mode */
def apply[T](f: => T)(implicit base: JdbcBackend#Session): T = apply(base)(_.asDynamicSession(f))
/** Return this `ResultSetHoldability`, unless it is `Auto` in which case
* the specified holdability mode is returned instead. */
def withDefault(r: ResultSetHoldability) = this
}
object ResultSetHoldability {
/** The current holdability mode of the JDBC driver */
case object Auto extends ResultSetHoldability(0) {
override def withDefault(r: ResultSetHoldability) = r
}
/** The default holdability mode of the JDBC driver */
case object Default extends ResultSetHoldability(0)
/** The holdability mode which indicates that result sets remain open when the
* current transaction is committed. */
case object HoldCursorsOverCommit extends ResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)
/** The holdability mode which indicates that result sets are closed when the
* current transaction is committed. */
case object CloseCursorsAtCommit extends ResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT)
}
| dvinokurov/slick | src/main/scala/scala/slick/jdbc/ResultSetHoldability.scala | Scala | bsd-2-clause | 1,537 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.sdk
object WriteOp extends Enumeration {
type WriteOp = Value
val FullText, Inc, IncBig, Set, Range, AccSet, Max, Min, Avg, AccAvg,
Median, AccMedian, Variance, AccVariance, Stddev, AccStddev, Mode, EntityCount, WordCount = Value
}
| danielcsant/sparta | sdk/src/main/scala/com/stratio/sparta/sdk/WriteOp.scala | Scala | apache-2.0 | 887 |
package coursier.core
import scala.scalajs.js
import js.Dynamic.{global => g}
import org.scalajs.dom.raw.NodeList
import coursier.util.{SaxHandler, Xml}
package object compatibility {
def option[A](a: js.Dynamic): Option[A] =
if (js.typeOf(a) == "undefined") None
else Some(a.asInstanceOf[A])
def dynOption(a: js.Dynamic): Option[js.Dynamic] =
if (js.typeOf(a) == "undefined") None
else Some(a)
private def between(c: Char, lower: Char, upper: Char) = lower <= c && c <= upper
implicit class RichChar(val c: Char) extends AnyVal {
def letterOrDigit: Boolean = {
between(c, '0', '9') || letter
}
def letter: Boolean = between(c, 'a', 'z') || between(c, 'A', 'Z')
}
lazy val DOMParser = {
val defn =
if (js.typeOf(g.DOMParser) == "undefined") g.require("xmldom").DOMParser
else g.DOMParser
js.Dynamic.newInstance(defn)()
}
lazy val XMLSerializer = {
val defn =
if (js.typeOf(g.XMLSerializer) == "undefined") g.require("xmldom").XMLSerializer
else g.XMLSerializer
js.Dynamic.newInstance(defn)()
}
lazy val sax =
if (js.typeOf(g.sax) == "undefined")
g.require("sax")
else
g.sax
// Can't find these from node
val ELEMENT_NODE = 1 // org.scalajs.dom.raw.Node.ELEMENT_NODE
val TEXT_NODE = 3 // org.scalajs.dom.raw.Node.TEXT_NODE
def fromNode(node: org.scalajs.dom.raw.Node): Xml.Node = {
val node0 = node.asInstanceOf[js.Dynamic]
new Xml.Node {
def label =
option[String](node0.nodeName)
.getOrElse("")
def children =
option[NodeList](node0.childNodes)
.map(l => List.tabulate(l.length)(l.item).map(fromNode))
.getOrElse(Nil)
def attributes = {
val attr = node.attributes
(0 until attr.length)
.map(idx => attr(idx))
.map { a =>
(Option(node.lookupNamespaceURI(a.prefix)).getOrElse(""), a.localName, a.value)
}
}
def isText =
option[Int](node0.nodeType)
.contains(TEXT_NODE)
def textContent =
option(node0.textContent)
.getOrElse("")
def isElement =
option[Int](node0.nodeType)
.contains(ELEMENT_NODE)
override def toString =
XMLSerializer.serializeToString(node).asInstanceOf[String]
}
}
def xmlParseSax(str: String, handler: SaxHandler): handler.type = {
val parser = sax.parser(true)
parser.onerror = { (e: js.Dynamic) =>
???
}: js.Function1[js.Dynamic, Unit]
parser.ontext = { (t: String) =>
???
}: js.Function1[String, Unit]
parser.onopentag = { (node: js.Dynamic) =>
handler.startElement(node.name.asInstanceOf[String])
}: js.Function1[js.Dynamic, Unit]
parser.ontext = { (t: String) =>
val a = t.toCharArray
handler.characters(a, 0, a.length)
}: js.Function1[String, Unit]
parser.onclosetag = { (tagName: String) =>
handler.endElement(tagName)
}: js.Function1[String, Unit]
parser.write(str).close()
handler
}
def xmlParseDom(s: String): Either[String, Xml.Node] = {
val doc = {
if (s.isEmpty) None
else {
for {
xmlDoc <- dynOption(DOMParser.parseFromString(s, "text/xml"))
rootNodes <- dynOption(xmlDoc.childNodes)
// From node, rootNodes.head is sometimes just a comment instead of the main root node
// (tested with org.ow2.asm:asm-commons in CentralTests)
rootNode <- rootNodes.asInstanceOf[js.Array[js.Dynamic]]
.flatMap(option[org.scalajs.dom.raw.Node])
.dropWhile(_.nodeType != ELEMENT_NODE)
.headOption
} yield rootNode
}
}
Right(doc.fold(Xml.Node.empty)(fromNode))
}
def encodeURIComponent(s: String): String =
g.encodeURIComponent(s).asInstanceOf[String]
def regexLookbehind: String = ":"
def coloredOutput: Boolean =
// most CIs support colored output now…
true
@deprecated("Unused internally, likely to be removed in the future", "2.0.0-RC6-19")
def hasConsole: Boolean =
true // no System.console in Scala.JS
}
| alexarchambault/coursier | modules/core/js/src/main/scala/coursier/core/compatibility/package.scala | Scala | apache-2.0 | 4,161 |
package ecommerce.shipping
import ecommerce.shipping.view.{ShipmentDao, ShipmentProjection}
import pl.newicom.dddd.messaging.event.EventStoreProvider
import pl.newicom.dddd.view.sql.{SqlViewStore, SqlViewUpdateConfig, SqlViewUpdateService}
import pl.newicom.eventstore.EventSourceProvider
import slick.dbio.DBIO
import slick.jdbc.JdbcProfile
class ShippingViewUpdateService(viewStore: SqlViewStore)(override implicit val profile: JdbcProfile)
extends SqlViewUpdateService(viewStore) with EventStoreProvider with EventSourceProvider {
lazy val shipmentDao: ShipmentDao = new ShipmentDao()
override def vuConfigs: Seq[SqlViewUpdateConfig] = {
List(
SqlViewUpdateConfig("shipping-shipments", ShippingOfficeId, new ShipmentProjection(shipmentDao))
)
}
override def viewUpdateInitAction: DBIO[Unit] = {
super.viewUpdateInitAction >>
shipmentDao.ensureSchemaCreated
}
} | pawelkaczor/ddd-leaven-akka-v2 | shipping/read-back/src/main/scala/ecommerce/shipping/ShippingViewUpdateService.scala | Scala | mit | 908 |
package cromwell
import akka.testkit._
import wdl4s.types.{WdlMapType, WdlStringType, WdlArrayType}
import wdl4s.values.{WdlMap, WdlArray, WdlString}
import cromwell.CromwellSpec.DockerTest
import cromwell.util.SampleWdl
import scala.language.postfixOps
class WdlFunctionsAtWorkflowLevelSpec extends CromwellTestkitSpec {
val outputMap = WdlMap(WdlMapType(WdlStringType, WdlStringType), Map(
WdlString("k1") -> WdlString("v1"),
WdlString("k2") -> WdlString("v2"),
WdlString("k3") -> WdlString("v3")
))
"A workflow with a read_lines() and read_map() at the workflow level" should {
"execute those functions properly" in {
runWdlAndAssertOutputs(
sampleWdl = SampleWdl.WdlFunctionsAtWorkflowLevel,
eventFilter = EventFilter.info(pattern = s"starting calls: w.a", occurrences = 1),
expectedOutputs = Map(
"w.a.x" -> WdlString("one two three four five"),
"w.a.y" -> outputMap
)
)
}
}
}
| dgtester/cromwell | src/test/scala/cromwell/WdlFunctionsAtWorkflowLevelSpec.scala | Scala | bsd-3-clause | 978 |
package com.tritondigital.consul.http.client
import java.util.concurrent.atomic.AtomicInteger
import org.scalatest.{Matchers, WordSpec}
class RoundRobinSelectorTest extends WordSpec with Matchers {
val list = List(1, 2, 3)
"Node Selector" when {
"starting with a counter of 0" should {
"return the nodes in order" in {
val selector = new RoundRobinSelector[Int]
selector.select(list) should be (Some(1))
selector.select(list) should be (Some(2))
selector.select(list) should be (Some(3))
selector.select(list) should be (Some(1))
}
}
"starting with a counter of Integer.MAX_VALUE" should {
"reverse the order after reset" in {
val selector = new RoundRobinSelector[Int](new AtomicInteger(Integer.MAX_VALUE))
selector.select(list) should be (Some(2))
selector.select(list) should be (Some(3))
selector.select(list) should be (Some(2))
selector.select(list) should be (Some(1))
}
}
"starting with a counter of -1" should {
"reverse the order after 0" in {
val selector = new RoundRobinSelector[Int](new AtomicInteger(-1))
selector.select(list) should be (Some(2))
selector.select(list) should be (Some(1))
selector.select(list) should be (Some(2))
selector.select(list) should be (Some(3))
}
}
"given an empty list" should {
"always return None" in {
val selector = new RoundRobinSelector[Int]
selector.select(List.empty) should be (None)
selector.select(List.empty) should be (None)
}
}
}
}
| tritondigital/ConsulHttpClient | src/test/scala/com/tritondigital/consul/http/client/RoundRobinSelectorTest.scala | Scala | mit | 1,629 |
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair.payment
import akka.actor.ActorRef
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.bitcoin.DeterministicWallet.ExtendedPrivateKey
import fr.acinq.bitcoin.{Block, ByteVector32, Crypto, DeterministicWallet, OutPoint, Satoshi, SatoshiLong, TxOut}
import fr.acinq.eclair.FeatureSupport.{Mandatory, Optional}
import fr.acinq.eclair.Features._
import fr.acinq.eclair.channel._
import fr.acinq.eclair.crypto.Sphinx
import fr.acinq.eclair.payment.IncomingPaymentPacket.{ChannelRelayPacket, FinalPacket, NodeRelayPacket, decrypt}
import fr.acinq.eclair.payment.OutgoingPaymentPacket._
import fr.acinq.eclair.router.Router.{ChannelHop, NodeHop}
import fr.acinq.eclair.transactions.Transactions.InputInfo
import fr.acinq.eclair.wire.protocol.OnionPaymentPayloadTlv.{AmountToForward, OutgoingCltv, PaymentData}
import fr.acinq.eclair.wire.protocol.PaymentOnion.{ChannelRelayTlvPayload, FinalTlvPayload}
import fr.acinq.eclair.wire.protocol._
import fr.acinq.eclair.{CltvExpiry, CltvExpiryDelta, Features, InvoiceFeature, MilliSatoshi, MilliSatoshiLong, ShortChannelId, TestConstants, TimestampSecondLong, nodeFee, randomBytes32, randomKey}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
import scodec.Attempt
import scodec.bits.{ByteVector, HexStringSyntax}
import java.util.UUID
import scala.util.Success
/**
* Created by PM on 31/05/2016.
*/
class PaymentPacketSpec extends AnyFunSuite with BeforeAndAfterAll {
import PaymentPacketSpec._
implicit val log: akka.event.LoggingAdapter = akka.event.NoLogging
test("compute fees") {
val feeBaseMsat = 150000 msat
val feeProportionalMillionth = 4L
val htlcAmountMsat = 42000000 msat
// spec: fee-base-msat + htlc-amount-msat * fee-proportional-millionths / 1000000
val ref = feeBaseMsat + htlcAmountMsat * feeProportionalMillionth / 1000000
val fee = nodeFee(feeBaseMsat, feeProportionalMillionth, htlcAmountMsat)
assert(ref === fee)
}
def testBuildOnion(): Unit = {
val finalPayload = FinalTlvPayload(TlvStream(AmountToForward(finalAmount), OutgoingCltv(finalExpiry), PaymentData(paymentSecret, 0 msat)))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, hops, finalPayload)
assert(firstAmount === amount_ab)
assert(firstExpiry === expiry_ab)
assert(onion.packet.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
// let's peel the onion
testPeelOnion(onion.packet)
}
def testPeelOnion(packet_b: OnionRoutingPacket): Unit = {
val add_b = UpdateAddHtlc(randomBytes32(), 0, amount_ab, paymentHash, expiry_ab, packet_b)
val Right(relay_b@ChannelRelayPacket(add_b2, payload_b, packet_c)) = decrypt(add_b, priv_b.privateKey)
assert(add_b2 === add_b)
assert(packet_c.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
assert(payload_b.amountToForward === amount_bc)
assert(payload_b.outgoingCltv === expiry_bc)
assert(payload_b.outgoingChannelId === channelUpdate_bc.shortChannelId)
assert(relay_b.relayFeeMsat === fee_b)
assert(relay_b.expiryDelta === channelUpdate_bc.cltvExpiryDelta)
val add_c = UpdateAddHtlc(randomBytes32(), 1, amount_bc, paymentHash, expiry_bc, packet_c)
val Right(relay_c@ChannelRelayPacket(add_c2, payload_c, packet_d)) = decrypt(add_c, priv_c.privateKey)
assert(add_c2 === add_c)
assert(packet_d.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
assert(payload_c.amountToForward === amount_cd)
assert(payload_c.outgoingCltv === expiry_cd)
assert(payload_c.outgoingChannelId === channelUpdate_cd.shortChannelId)
assert(relay_c.relayFeeMsat === fee_c)
assert(relay_c.expiryDelta === channelUpdate_cd.cltvExpiryDelta)
val add_d = UpdateAddHtlc(randomBytes32(), 2, amount_cd, paymentHash, expiry_cd, packet_d)
val Right(relay_d@ChannelRelayPacket(add_d2, payload_d, packet_e)) = decrypt(add_d, priv_d.privateKey)
assert(add_d2 === add_d)
assert(packet_e.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
assert(payload_d.amountToForward === amount_de)
assert(payload_d.outgoingCltv === expiry_de)
assert(payload_d.outgoingChannelId === channelUpdate_de.shortChannelId)
assert(relay_d.relayFeeMsat === fee_d)
assert(relay_d.expiryDelta === channelUpdate_de.cltvExpiryDelta)
val add_e = UpdateAddHtlc(randomBytes32(), 2, amount_de, paymentHash, expiry_de, packet_e)
val Right(FinalPacket(add_e2, payload_e)) = decrypt(add_e, priv_e.privateKey)
assert(add_e2 === add_e)
assert(payload_e.amount === finalAmount)
assert(payload_e.totalAmount === finalAmount)
assert(payload_e.expiry === finalExpiry)
assert(payload_e.paymentSecret === paymentSecret)
}
test("build onion with final payload") {
testBuildOnion()
}
test("build a command including the onion") {
val Success((add, _)) = buildCommand(ActorRef.noSender, Upstream.Local(UUID.randomUUID), paymentHash, hops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
assert(add.amount > finalAmount)
assert(add.cltvExpiry === finalExpiry + channelUpdate_de.cltvExpiryDelta + channelUpdate_cd.cltvExpiryDelta + channelUpdate_bc.cltvExpiryDelta)
assert(add.paymentHash === paymentHash)
assert(add.onion.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
// let's peel the onion
testPeelOnion(add.onion)
}
test("build a command with no hops") {
val Success((add, _)) = buildCommand(ActorRef.noSender, Upstream.Local(UUID.randomUUID()), paymentHash, hops.take(1), PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, Some(paymentMetadata)))
assert(add.amount === finalAmount)
assert(add.cltvExpiry === finalExpiry)
assert(add.paymentHash === paymentHash)
assert(add.onion.payload.length === PaymentOnionCodecs.paymentOnionPayloadLength)
// let's peel the onion
val add_b = UpdateAddHtlc(randomBytes32(), 0, finalAmount, paymentHash, finalExpiry, add.onion)
val Right(FinalPacket(add_b2, payload_b)) = decrypt(add_b, priv_b.privateKey)
assert(add_b2 === add_b)
assert(payload_b.amount === finalAmount)
assert(payload_b.totalAmount === finalAmount)
assert(payload_b.expiry === finalExpiry)
assert(payload_b.paymentSecret === paymentSecret)
assert(payload_b.paymentMetadata === Some(paymentMetadata))
}
test("build a trampoline payment") {
// simple trampoline route to e:
// .--. .--.
// / \ / \
// a -> b -> c d e
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createMultiPartPayload(finalAmount, finalAmount * 3, finalExpiry, paymentSecret, Some(hex"010203")))
assert(amount_ac === amount_bc)
assert(expiry_ac === expiry_bc)
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
assert(firstAmount === amount_ab)
assert(firstExpiry === expiry_ab)
val add_b = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet)
val Right(ChannelRelayPacket(add_b2, payload_b, packet_c)) = decrypt(add_b, priv_b.privateKey)
assert(add_b2 === add_b)
assert(payload_b === ChannelRelayTlvPayload(channelUpdate_bc.shortChannelId, amount_bc, expiry_bc))
val add_c = UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc, packet_c)
val Right(NodeRelayPacket(add_c2, outer_c, inner_c, packet_d)) = decrypt(add_c, priv_c.privateKey)
assert(add_c2 === add_c)
assert(outer_c.amount === amount_bc)
assert(outer_c.totalAmount === amount_bc)
assert(outer_c.expiry === expiry_bc)
assert(inner_c.amountToForward === amount_cd)
assert(inner_c.outgoingCltv === expiry_cd)
assert(inner_c.outgoingNodeId === d)
assert(inner_c.invoiceRoutingInfo === None)
assert(inner_c.invoiceFeatures === None)
assert(inner_c.paymentSecret === None)
assert(inner_c.paymentMetadata === None)
// c forwards the trampoline payment to d.
val Success((amount_d, expiry_d, onion_d)) = buildPaymentPacket(paymentHash, ChannelHop(c, d, channelUpdate_cd) :: Nil, PaymentOnion.createTrampolinePayload(amount_cd, amount_cd, expiry_cd, randomBytes32(), packet_d))
assert(amount_d === amount_cd)
assert(expiry_d === expiry_cd)
val add_d = UpdateAddHtlc(randomBytes32(), 3, amount_d, paymentHash, expiry_d, onion_d.packet)
val Right(NodeRelayPacket(add_d2, outer_d, inner_d, packet_e)) = decrypt(add_d, priv_d.privateKey)
assert(add_d2 === add_d)
assert(outer_d.amount === amount_cd)
assert(outer_d.totalAmount === amount_cd)
assert(outer_d.expiry === expiry_cd)
assert(inner_d.amountToForward === amount_de)
assert(inner_d.outgoingCltv === expiry_de)
assert(inner_d.outgoingNodeId === e)
assert(inner_d.invoiceRoutingInfo === None)
assert(inner_d.invoiceFeatures === None)
assert(inner_d.paymentSecret === None)
assert(inner_d.paymentMetadata === None)
// d forwards the trampoline payment to e.
val Success((amount_e, expiry_e, onion_e)) = buildPaymentPacket(paymentHash, ChannelHop(d, e, channelUpdate_de) :: Nil, PaymentOnion.createTrampolinePayload(amount_de, amount_de, expiry_de, randomBytes32(), packet_e))
assert(amount_e === amount_de)
assert(expiry_e === expiry_de)
val add_e = UpdateAddHtlc(randomBytes32(), 4, amount_e, paymentHash, expiry_e, onion_e.packet)
val Right(FinalPacket(add_e2, payload_e)) = decrypt(add_e, priv_e.privateKey)
assert(add_e2 === add_e)
assert(payload_e === FinalTlvPayload(TlvStream(AmountToForward(finalAmount), OutgoingCltv(finalExpiry), PaymentData(paymentSecret, finalAmount * 3), OnionPaymentPayloadTlv.PaymentMetadata(hex"010203"))))
}
test("build a trampoline payment with non-trampoline recipient") {
// simple trampoline route to e where e doesn't support trampoline:
// .--.
// / \
// a -> b -> c d -> e
val routingHints = List(List(Bolt11Invoice.ExtraHop(randomKey().publicKey, ShortChannelId(42), 10 msat, 100, CltvExpiryDelta(144))))
val invoiceFeatures = Features[InvoiceFeature](VariableLengthOnion -> Mandatory, PaymentSecret -> Mandatory, BasicMultiPartPayment -> Optional)
val invoice = Bolt11Invoice(Block.RegtestGenesisBlock.hash, Some(finalAmount), paymentHash, priv_a.privateKey, Left("#reckless"), CltvExpiryDelta(18), None, None, routingHints, features = invoiceFeatures, paymentMetadata = Some(hex"010203"))
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolineToLegacyPacket(invoice, trampolineHops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, invoice.paymentSecret.get, None))
assert(amount_ac === amount_bc)
assert(expiry_ac === expiry_bc)
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
assert(firstAmount === amount_ab)
assert(firstExpiry === expiry_ab)
val add_b = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet)
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(add_b, priv_b.privateKey)
val add_c = UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc, packet_c)
val Right(NodeRelayPacket(_, outer_c, inner_c, packet_d)) = decrypt(add_c, priv_c.privateKey)
assert(outer_c.amount === amount_bc)
assert(outer_c.totalAmount === amount_bc)
assert(outer_c.expiry === expiry_bc)
assert(outer_c.paymentSecret !== invoice.paymentSecret)
assert(inner_c.amountToForward === amount_cd)
assert(inner_c.outgoingCltv === expiry_cd)
assert(inner_c.outgoingNodeId === d)
assert(inner_c.invoiceRoutingInfo === None)
assert(inner_c.invoiceFeatures === None)
assert(inner_c.paymentSecret === None)
// c forwards the trampoline payment to d.
val Success((amount_d, expiry_d, onion_d)) = buildPaymentPacket(paymentHash, ChannelHop(c, d, channelUpdate_cd) :: Nil, PaymentOnion.createTrampolinePayload(amount_cd, amount_cd, expiry_cd, randomBytes32(), packet_d))
assert(amount_d === amount_cd)
assert(expiry_d === expiry_cd)
val add_d = UpdateAddHtlc(randomBytes32(), 3, amount_d, paymentHash, expiry_d, onion_d.packet)
val Right(NodeRelayPacket(_, outer_d, inner_d, _)) = decrypt(add_d, priv_d.privateKey)
assert(outer_d.amount === amount_cd)
assert(outer_d.totalAmount === amount_cd)
assert(outer_d.expiry === expiry_cd)
assert(outer_d.paymentSecret !== invoice.paymentSecret)
assert(inner_d.amountToForward === finalAmount)
assert(inner_d.outgoingCltv === expiry_de)
assert(inner_d.outgoingNodeId === e)
assert(inner_d.totalAmount === finalAmount)
assert(inner_d.paymentSecret === invoice.paymentSecret)
assert(inner_d.paymentMetadata === Some(hex"010203"))
assert(inner_d.invoiceFeatures === Some(hex"024100")) // var_onion_optin, payment_secret, basic_mpp
assert(inner_d.invoiceRoutingInfo === Some(routingHints))
}
test("fail to build a trampoline payment when too much invoice data is provided") {
val routingHintOverflow = List(List.fill(7)(Bolt11Invoice.ExtraHop(randomKey().publicKey, ShortChannelId(1), 10 msat, 100, CltvExpiryDelta(12))))
val invoice = Bolt11Invoice(Block.RegtestGenesisBlock.hash, Some(finalAmount), paymentHash, priv_a.privateKey, Left("#reckless"), CltvExpiryDelta(18), None, None, routingHintOverflow)
assert(buildTrampolineToLegacyPacket(invoice, trampolineHops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, invoice.paymentSecret.get, invoice.paymentMetadata)).isFailure)
}
test("fail to decrypt when the onion is invalid") {
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, hops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val add = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet.copy(payload = onion.packet.payload.reverse))
val Left(failure) = decrypt(add, priv_b.privateKey)
assert(failure.isInstanceOf[InvalidOnionHmac])
}
test("fail to decrypt when the trampoline onion is invalid") {
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createMultiPartPayload(finalAmount, finalAmount * 2, finalExpiry, paymentSecret, None))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet.copy(payload = trampolineOnion.packet.payload.reverse)))
val add_b = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet)
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(add_b, priv_b.privateKey)
val add_c = UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc, packet_c)
val Left(failure) = decrypt(add_c, priv_c.privateKey)
assert(failure.isInstanceOf[InvalidOnionHmac])
}
test("fail to decrypt when payment hash doesn't match associated data") {
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash.reverse, hops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val add = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet)
val Left(failure) = decrypt(add, priv_b.privateKey)
assert(failure.isInstanceOf[InvalidOnionHmac])
}
test("fail to decrypt at the final node when amount has been modified by next-to-last node") {
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, hops.take(1), PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val add = UpdateAddHtlc(randomBytes32(), 1, firstAmount - 100.msat, paymentHash, firstExpiry, onion.packet)
val Left(failure) = decrypt(add, priv_b.privateKey)
assert(failure === FinalIncorrectHtlcAmount(firstAmount - 100.msat))
}
test("fail to decrypt at the final node when expiry has been modified by next-to-last node") {
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, hops.take(1), PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val add = UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry - CltvExpiryDelta(12), onion.packet)
val Left(failure) = decrypt(add, priv_b.privateKey)
assert(failure === FinalIncorrectCltvExpiry(firstExpiry - CltvExpiryDelta(12)))
}
test("fail to decrypt at the final trampoline node when amount has been modified by next-to-last trampoline") {
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createMultiPartPayload(finalAmount, finalAmount, finalExpiry, paymentSecret, None))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet), priv_b.privateKey)
val Right(NodeRelayPacket(_, _, _, packet_d)) = decrypt(UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc, packet_c), priv_c.privateKey)
// c forwards the trampoline payment to d.
val Success((amount_d, expiry_d, onion_d)) = buildPaymentPacket(paymentHash, ChannelHop(c, d, channelUpdate_cd) :: Nil, PaymentOnion.createTrampolinePayload(amount_cd, amount_cd, expiry_cd, randomBytes32(), packet_d))
val Right(NodeRelayPacket(_, _, _, packet_e)) = decrypt(UpdateAddHtlc(randomBytes32(), 3, amount_d, paymentHash, expiry_d, onion_d.packet), priv_d.privateKey)
// d forwards an invalid amount to e (the outer total amount doesn't match the inner amount).
val invalidTotalAmount = amount_de + 100.msat
val Success((amount_e, expiry_e, onion_e)) = buildPaymentPacket(paymentHash, ChannelHop(d, e, channelUpdate_de) :: Nil, PaymentOnion.createTrampolinePayload(amount_de, invalidTotalAmount, expiry_de, randomBytes32(), packet_e))
val Left(failure) = decrypt(UpdateAddHtlc(randomBytes32(), 4, amount_e, paymentHash, expiry_e, onion_e.packet), priv_e.privateKey)
assert(failure === FinalIncorrectHtlcAmount(invalidTotalAmount))
}
test("fail to decrypt at the final trampoline node when expiry has been modified by next-to-last trampoline") {
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createMultiPartPayload(finalAmount, finalAmount, finalExpiry, paymentSecret, None))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet), priv_b.privateKey)
val Right(NodeRelayPacket(_, _, _, packet_d)) = decrypt(UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc, packet_c), priv_c.privateKey)
// c forwards the trampoline payment to d.
val Success((amount_d, expiry_d, onion_d)) = buildPaymentPacket(paymentHash, ChannelHop(c, d, channelUpdate_cd) :: Nil, PaymentOnion.createTrampolinePayload(amount_cd, amount_cd, expiry_cd, randomBytes32(), packet_d))
val Right(NodeRelayPacket(_, _, _, packet_e)) = decrypt(UpdateAddHtlc(randomBytes32(), 3, amount_d, paymentHash, expiry_d, onion_d.packet), priv_d.privateKey)
// d forwards an invalid expiry to e (the outer expiry doesn't match the inner expiry).
val invalidExpiry = expiry_de - CltvExpiryDelta(12)
val Success((amount_e, expiry_e, onion_e)) = buildPaymentPacket(paymentHash, ChannelHop(d, e, channelUpdate_de) :: Nil, PaymentOnion.createTrampolinePayload(amount_de, amount_de, invalidExpiry, randomBytes32(), packet_e))
val Left(failure) = decrypt(UpdateAddHtlc(randomBytes32(), 4, amount_e, paymentHash, expiry_e, onion_e.packet), priv_e.privateKey)
assert(failure === FinalIncorrectCltvExpiry(invalidExpiry))
}
test("fail to decrypt at intermediate trampoline node when amount is invalid") {
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet), priv_b.privateKey)
// A trampoline relay is very similar to a final node: it can validate that the HTLC amount matches the onion outer amount.
val Left(failure) = decrypt(UpdateAddHtlc(randomBytes32(), 2, amount_bc - 100.msat, paymentHash, expiry_bc, packet_c), priv_c.privateKey)
assert(failure === FinalIncorrectHtlcAmount(amount_bc - 100.msat))
}
test("fail to decrypt at intermediate trampoline node when expiry is invalid") {
val Success((amount_ac, expiry_ac, trampolineOnion)) = buildTrampolinePacket(paymentHash, trampolineHops, PaymentOnion.createSinglePartPayload(finalAmount, finalExpiry, paymentSecret, None))
val Success((firstAmount, firstExpiry, onion)) = buildPaymentPacket(paymentHash, trampolineChannelHops, PaymentOnion.createTrampolinePayload(amount_ac, amount_ac, expiry_ac, randomBytes32(), trampolineOnion.packet))
val Right(ChannelRelayPacket(_, _, packet_c)) = decrypt(UpdateAddHtlc(randomBytes32(), 1, firstAmount, paymentHash, firstExpiry, onion.packet), priv_b.privateKey)
// A trampoline relay is very similar to a final node: it can validate that the HTLC expiry matches the onion outer expiry.
val Left(failure) = decrypt(UpdateAddHtlc(randomBytes32(), 2, amount_bc, paymentHash, expiry_bc - CltvExpiryDelta(12), packet_c), priv_c.privateKey)
assert(failure === FinalIncorrectCltvExpiry(expiry_bc - CltvExpiryDelta(12)))
}
}
object PaymentPacketSpec {
/** Build onion from arbitrary tlv stream (potentially invalid). */
def buildTlvOnion(packetPayloadLength: Int, nodes: Seq[PublicKey], payloads: Seq[TlvStream[OnionPaymentPayloadTlv]], associatedData: ByteVector32): OnionRoutingPacket = {
require(nodes.size == payloads.size)
val sessionKey = randomKey()
val payloadsBin: Seq[ByteVector] = payloads.map(PaymentOnionCodecs.tlvPerHopPayloadCodec.encode)
.map {
case Attempt.Successful(bitVector) => bitVector.bytes
case Attempt.Failure(cause) => throw new RuntimeException(s"serialization error: $cause")
}
Sphinx.create(sessionKey, packetPayloadLength, nodes, payloadsBin, Some(associatedData)).get.packet
}
def makeCommitments(channelId: ByteVector32, testAvailableBalanceForSend: MilliSatoshi = 50000000 msat, testAvailableBalanceForReceive: MilliSatoshi = 50000000 msat, testCapacity: Satoshi = 100000 sat): Commitments = {
val params = LocalParams(null, null, null, null, null, null, null, 0, isFunder = true, null, None, null)
val remoteParams = RemoteParams(randomKey().publicKey, null, null, null, null, null, maxAcceptedHtlcs = 0, null, null, null, null, null, null, None)
val commitInput = InputInfo(OutPoint(randomBytes32(), 1), TxOut(testCapacity, Nil), Nil)
val channelFlags = ChannelFlags.Private
new Commitments(channelId, ChannelConfig.standard, ChannelFeatures(), params, remoteParams, channelFlags, null, null, null, null, 0, 0, Map.empty, null, commitInput, null) {
override lazy val availableBalanceForSend: MilliSatoshi = testAvailableBalanceForSend.max(0 msat)
override lazy val availableBalanceForReceive: MilliSatoshi = testAvailableBalanceForReceive.max(0 msat)
}
}
def randomExtendedPrivateKey: ExtendedPrivateKey = DeterministicWallet.generate(randomBytes32())
val (priv_a, priv_b, priv_c, priv_d, priv_e) = (TestConstants.Alice.nodeKeyManager.nodeKey, TestConstants.Bob.nodeKeyManager.nodeKey, randomExtendedPrivateKey, randomExtendedPrivateKey, randomExtendedPrivateKey)
val (a, b, c, d, e) = (priv_a.publicKey, priv_b.publicKey, priv_c.publicKey, priv_d.publicKey, priv_e.publicKey)
val sig = Crypto.sign(Crypto.sha256(ByteVector.empty), priv_a.privateKey)
val defaultChannelUpdate = ChannelUpdate(sig, Block.RegtestGenesisBlock.hash, ShortChannelId(0), 0 unixsec, ChannelUpdate.ChannelFlags.DUMMY, CltvExpiryDelta(0), 42000 msat, 0 msat, 0, Some(500000000 msat))
val channelUpdate_ab = defaultChannelUpdate.copy(shortChannelId = ShortChannelId(1), cltvExpiryDelta = CltvExpiryDelta(4), feeBaseMsat = 642000 msat, feeProportionalMillionths = 7)
val channelUpdate_bc = defaultChannelUpdate.copy(shortChannelId = ShortChannelId(2), cltvExpiryDelta = CltvExpiryDelta(5), feeBaseMsat = 153000 msat, feeProportionalMillionths = 4)
val channelUpdate_cd = defaultChannelUpdate.copy(shortChannelId = ShortChannelId(3), cltvExpiryDelta = CltvExpiryDelta(10), feeBaseMsat = 60000 msat, feeProportionalMillionths = 1)
val channelUpdate_de = defaultChannelUpdate.copy(shortChannelId = ShortChannelId(4), cltvExpiryDelta = CltvExpiryDelta(7), feeBaseMsat = 766000 msat, feeProportionalMillionths = 10)
// simple route a -> b -> c -> d -> e
val hops =
ChannelHop(a, b, channelUpdate_ab) ::
ChannelHop(b, c, channelUpdate_bc) ::
ChannelHop(c, d, channelUpdate_cd) ::
ChannelHop(d, e, channelUpdate_de) :: Nil
val finalAmount = 42000000 msat
val currentBlockCount = 400000
val finalExpiry = CltvExpiry(currentBlockCount) + Channel.MIN_CLTV_EXPIRY_DELTA
val paymentPreimage = randomBytes32()
val paymentHash = Crypto.sha256(paymentPreimage)
val paymentSecret = randomBytes32()
val paymentMetadata = randomBytes32().bytes
val expiry_de = finalExpiry
val amount_de = finalAmount
val fee_d = nodeFee(channelUpdate_de, amount_de)
val expiry_cd = expiry_de + channelUpdate_de.cltvExpiryDelta
val amount_cd = amount_de + fee_d
val fee_c = nodeFee(channelUpdate_cd, amount_cd)
val expiry_bc = expiry_cd + channelUpdate_cd.cltvExpiryDelta
val amount_bc = amount_cd + fee_c
val fee_b = nodeFee(channelUpdate_bc, amount_bc)
val expiry_ab = expiry_bc + channelUpdate_bc.cltvExpiryDelta
val amount_ab = amount_bc + fee_b
// simple trampoline route to e:
// .--. .--.
// / \ / \
// a -> b -> c d e
val trampolineHops =
NodeHop(a, c, channelUpdate_ab.cltvExpiryDelta + channelUpdate_bc.cltvExpiryDelta, fee_b) ::
NodeHop(c, d, channelUpdate_cd.cltvExpiryDelta, fee_c) ::
NodeHop(d, e, channelUpdate_de.cltvExpiryDelta, fee_d) :: Nil
val trampolineChannelHops =
ChannelHop(a, b, channelUpdate_ab) ::
ChannelHop(b, c, channelUpdate_bc) :: Nil
}
| ACINQ/eclair | eclair-core/src/test/scala/fr/acinq/eclair/payment/PaymentPacketSpec.scala | Scala | apache-2.0 | 28,176 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tools.data.downloader
import java.nio.file.{Files, Paths}
import akka.stream.scaladsl.Sink
import cmwell.tools.data.downloader.consumer.Downloader
import cmwell.tools.data.utils.ArgsManipulations._
import cmwell.tools.data.utils.akka.Implicits._
import cmwell.tools.data.utils.akka._
import cmwell.tools.data.utils.akka.stats.DownloaderStats
import cmwell.tools.data.utils.chunkers.GroupChunker
import cmwell.tools.data.utils.ops._
import nl.grons.metrics.scala.InstrumentedBuilder
import org.rogach.scallop.ScallopConf
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
object ConsumerMain extends App with InstrumentedBuilder{
object Opts extends ScallopConf(args) {
version(s"cm-well downloader ${getVersionFromManifest()} (c) 2015")
banner(""" |usage: --host <HOST> """.stripMargin)
footer("..........................................")
val host = opt[String]("host", descr = "cm-well host name server", required = true)
val path = opt[String]("path", short = 'p', descr = "path in cm-well", default = Some("/"))
val params = opt[String]("params", descr = "params string in cm-well", default = Some(""))
val qp = opt[String]("qp", descr = "query params in cm-well", default = Some(""))
val recursive = opt[Boolean]("recursive", short = 'r', descr = "flag to get download data recursively", default = Some(false))
val format = opt[String]("format", descr = "desired record format (i.e., json, jsonld, jsonldq, n3, ntriples, nquads, trig, rdfxml)", default = Some("trig"))
val state = opt[String]("state", short = 's', descr = "position state file")
val follow = opt[String]("follow", short = 'f', descr = "continue consumption data after given update frequency (i.e., 5.seconds, 10.minutes etc.)")
val bulk = opt[Boolean]("bulk", default = Some(false), descr = "use bulk consumer mode in download")
val numConnections = opt[Int]("num-connections", descr = "number of http connections to open")
val indexTime = opt[Long]("index-time", descr = "index-time lower bound", default = Some(0))
verify()
}
val stateFilePath = Opts.state.toOption.map(Paths.get(_))
// resize akka http connection pool
Opts.numConnections.toOption.map { numConnections =>
System.setProperty("akka.http.host-connection-pool.max-connections", numConnections.toString)
}
val metricRegistry = new com.codahale.metrics.MetricRegistry()
val metricDownloading = metrics.timer("consuming")
val totalDownloadedBytes = metrics.counter("received-bytes")
var bytesPerToken = 0L
val bytesPerTokenMeter = metrics.meter("bytes-per-token")
var bytesInWindow = 0L
val metricRateBytes = metrics.meter("rate-bytes")
var nextTimeToReport = 0L
var lastTime = 0L
var lastMessageSize = 0
// check if input contains a valid state file which contains initial token
val initToken = if (stateFilePath.isEmpty || !stateFilePath.get.toFile.exists()) {
None
} else {
Option(scala.io.Source.fromFile(stateFilePath.get.toString).mkString)
}
var lastToken: Option[String] = None
val start = System.currentTimeMillis()
val tokenToQuery = lastToken match {
case Some(t) => lastToken
case None => initToken
}
// extract update frequency if was requested to follow
val updateFreq = Opts.follow.toOption.map { duration =>
val d = Duration(duration)
FiniteDuration(d.length, d.unit)
}
val graph = Downloader.createDataSource(
baseUrl = formatHost( Opts.host() ),
path = formatPath( Opts.path() ),
params = Opts.params(),
qp = Opts.qp(),
recursive = Opts.recursive(),
format = Opts.format(),
isBulk = Opts.bulk(),
token = tokenToQuery,
updateFreq = updateFreq,
indexTime = Opts.indexTime()
)
val result = graph
.map {case (token, data) =>
if (Some(token) != lastToken) {
// save new token in state file
stateFilePath.foreach { path => Files.write(path, token.getBytes("UTF-8")) }
lastToken = Some(token)
}
data}
.via(GroupChunker(GroupChunker.formatToGroupExtractor(Opts.format())))
.map(concatByteStrings(_, endl))
.map { infoton => println(infoton.utf8String); infoton} // print to stdout
.map(_ -> None)
.via (DownloaderStats(format = Opts.format(), isStderr = true))
.runWith(Sink.ignore)
result.onComplete { x =>
System.err.println(s"finished: $x")
cleanup()
}
}
| nruppin/CM-Well | server/cmwell-data-tools-app/src/main/scala/cmwell/tools/data/downloader/ConsumerMain.scala | Scala | apache-2.0 | 5,139 |
package org.backuity.puppet
import org.backuity.ansi.AnsiFormatter.FormattedHelper
case class Module(description: Module.Description, dependencies: Module.Graph) {
def name = description.name
def version = description.version
def tag = description.tag
def uri = description.uri
}
object Module {
type Graph = Set[Module]
def apply(name: String, tag: Option[String], uri: String, dependencies: Set[Module] = Set.empty) : Module = {
Module(Module.Description(name,tag,uri),dependencies)
}
case class Description(name: String, tag: Option[String], uri: String) {
lazy val version = Version(tag)
}
def showGraph(graph: Graph, withUri: Boolean = true) : String = {
val root = Module("Puppetfile", None, "", graph)
def showModule(m: Module) = {
if( m.name == "Puppetfile" ) {
"Puppetfile"
} else {
val uri = if( withUri ) ansi"%blue{ @ ${m.uri}}" else ""
ansi"${m.name}(%bold{${m.version}})$uri"
}
}
Graph.toAscii[Module](root, _.dependencies.toList.sortBy(_.name), showModule)
}
} | backuity/puppet-module-installer | src/main/scala/org/backuity/puppet/Module.scala | Scala | apache-2.0 | 1,069 |
package controllers.threadbuilding
object ThreadBuildingSuccess {
val message = "スレッドは正しく建てられました。"
}
| windymelt/p2p2ch | app/controllers/threadbuilding/ThreadBuildingSuccess.scala | Scala | bsd-3-clause | 136 |
package com.cloudera.spark.hbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
import org.apache.spark._
import org.apache.hadoop.hbase.HBaseTestingUtility
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.HConnectionManager
import org.apache.hadoop.hbase.client.Increment
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Result
import com.cloudera.spark.hbase.HBaseContext
class HBaseContextSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll { // with LocalSparkContext {
var htu: HBaseTestingUtility = null
val tableName = "t1"
val columnFamily = "c"
var sc:SparkContext = null;
override def beforeAll() {
htu = HBaseTestingUtility.createLocalHTU()
htu.cleanupTestDir()
println("starting minicluster")
htu.startMiniZKCluster();
htu.startMiniHBaseCluster(1, 1);
println(" - minicluster started")
try {
htu.deleteTable(Bytes.toBytes(tableName))
} catch {
case e: Exception => {
println(" - no table " + tableName + " found")
}
}
println(" - creating table " + tableName)
htu.createTable(Bytes.toBytes(tableName), Bytes.toBytes(columnFamily))
println(" - created table")
val sparkConfig = new SparkConf();
sparkConfig.set("spark.broadcast.compress", "false");
sc = new SparkContext("local", "test", sparkConfig)
}
override def afterAll() {
htu.deleteTable(Bytes.toBytes(tableName))
println("shuting down minicluster")
htu.shutdownMiniHBaseCluster()
htu.shutdownMiniZKCluster()
println(" - minicluster shut down")
htu.cleanupTestDir()
sc.stop();
}
test("bulkput to test HBase client") {
val config = htu.getConfiguration
val rdd = sc.parallelize(Array(
(Bytes.toBytes("1"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo1")))),
(Bytes.toBytes("2"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("b"), Bytes.toBytes("foo2")))),
(Bytes.toBytes("3"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("c"), Bytes.toBytes("foo3")))),
(Bytes.toBytes("4"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("d"), Bytes.toBytes("foo")))),
(Bytes.toBytes("5"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("e"), Bytes.toBytes("bar"))))))
val hbaseContext = new HBaseContext(sc, config);
hbaseContext.bulkPut[(Array[Byte], Array[(Array[Byte], Array[Byte], Array[Byte])])](rdd,
tableName,
(putRecord) => {
val put = new Put(putRecord._1)
putRecord._2.foreach((putValue) => put.add(putValue._1, putValue._2, putValue._3))
put
},
true);
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("1"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("a")).
getValue()).equals("foo1"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("2"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("b")).
getValue()).equals("foo2"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("3"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("c")).
getValue()).equals("foo3"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("4"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("d")).
getValue()).equals("foo"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("5"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("e")).
getValue()).equals("bar"))
}
test("bulkput to test HBase client fs storage of Config") {
val config = htu.getConfiguration
val rdd = sc.parallelize(Array(
(Bytes.toBytes("1x"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo1")))),
(Bytes.toBytes("2x"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("b"), Bytes.toBytes("foo2")))),
(Bytes.toBytes("3x"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("c"), Bytes.toBytes("foo3")))),
(Bytes.toBytes("4x"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("d"), Bytes.toBytes("foo")))),
(Bytes.toBytes("5x"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("e"), Bytes.toBytes("bar"))))))
val tmpPath = "tmp/HBaseConfig"
val fs = FileSystem.newInstance(new Configuration())
if (fs.exists(new Path(tmpPath))) {
fs.delete(new Path(tmpPath), false)
}
val hbaseContext = new HBaseContext(sc, config, tmpPath);
hbaseContext.bulkPut[(Array[Byte], Array[(Array[Byte], Array[Byte], Array[Byte])])](rdd,
tableName,
(putRecord) => {
val put = new Put(putRecord._1)
putRecord._2.foreach((putValue) => put.add(putValue._1, putValue._2, putValue._3))
put
},
true);
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("1x"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("a")).
getValue()).equals("foo1"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("2x"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("b")).
getValue()).equals("foo2"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("3x"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("c")).
getValue()).equals("foo3"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("4x"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("d")).
getValue()).equals("foo"))
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("5x"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("e")).
getValue()).equals("bar"))
}
test("bulkIncrement to test HBase client") {
val config = htu.getConfiguration
val rdd = sc.parallelize(Array(
(Bytes.toBytes("1"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("counter"), 1L))),
(Bytes.toBytes("2"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("counter"), 2L))),
(Bytes.toBytes("3"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("counter"), 3L))),
(Bytes.toBytes("4"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("counter"), 4L))),
(Bytes.toBytes("5"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("counter"), 5L)))))
val hbaseContext = new HBaseContext(sc, config);
hbaseContext.bulkIncrement[(Array[Byte], Array[(Array[Byte], Array[Byte], Long)])](rdd,
tableName,
(incrementRecord) => {
val increment = new Increment(incrementRecord._1)
incrementRecord._2.foreach((incrementValue) =>
increment.addColumn(incrementValue._1, incrementValue._2, incrementValue._3))
increment
},
4);
hbaseContext.bulkIncrement[(Array[Byte], Array[(Array[Byte], Array[Byte], Long)])](rdd,
tableName,
(incrementRecord) => {
val increment = new Increment(incrementRecord._1)
incrementRecord._2.foreach((incrementValue) =>
increment.addColumn(incrementValue._1, incrementValue._2, incrementValue._3))
increment
},
4);
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
assert(Bytes.toLong(htable.get(new Get(Bytes.toBytes("1"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("counter")).
getValue()) == 2L)
assert(Bytes.toLong(htable.get(new Get(Bytes.toBytes("2"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("counter")).
getValue()) == 4L)
assert(Bytes.toLong(htable.get(new Get(Bytes.toBytes("3"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("counter")).
getValue()) == 6L)
assert(Bytes.toLong(htable.get(new Get(Bytes.toBytes("4"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("counter")).
getValue()) == 8L)
assert(Bytes.toLong(htable.get(new Get(Bytes.toBytes("5"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("counter")).
getValue()) == 10L)
}
test("bulkDelete to test HBase client") {
val config = htu.getConfiguration
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
var put = new Put(Bytes.toBytes("delete1"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo1"))
htable.put(put)
put = new Put(Bytes.toBytes("delete2"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo2"))
htable.put(put)
put = new Put(Bytes.toBytes("delete3"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
htable.put(put)
val rdd = sc.parallelize(Array(
(Bytes.toBytes("delete1")),
(Bytes.toBytes("delete3"))))
val hbaseContext = new HBaseContext(sc, config);
hbaseContext.bulkDelete[Array[Byte]](rdd,
tableName,
putRecord => new Delete(putRecord),
4);
assert(htable.get(new Get(Bytes.toBytes("delete1"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("a")) == null)
assert(htable.get(new Get(Bytes.toBytes("delete3"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("a")) == null)
assert(Bytes.toString(htable.get(new Get(Bytes.toBytes("delete2"))).
getColumnLatest(Bytes.toBytes(columnFamily), Bytes.toBytes("a")).
getValue()).equals("foo2"))
}
test("bulkGet to test HBase client") {
val config = htu.getConfiguration
config.set("spark.broadcast.compress", "false");
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
var put = new Put(Bytes.toBytes("get1"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo1"))
htable.put(put)
put = new Put(Bytes.toBytes("get2"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo2"))
htable.put(put)
put = new Put(Bytes.toBytes("get3"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
htable.put(put)
val rdd = sc.parallelize(Array(
(Bytes.toBytes("get1")),
(Bytes.toBytes("get2")),
(Bytes.toBytes("get3")),
(Bytes.toBytes("get4"))))
val hbaseContext = new HBaseContext(sc, config);
val getRdd = hbaseContext.bulkGet[Array[Byte], Object](
tableName,
2,
rdd,
record => {
new Get(record)
},
(result: Result) => {
if (result.list() != null) {
val it = result.list().iterator()
val B = new StringBuilder
B.append(Bytes.toString(result.getRow()) + ":")
while (it.hasNext()) {
val kv = it.next()
val q = Bytes.toString(kv.getQualifier())
if (q.equals("counter")) {
B.append("(" + Bytes.toString(kv.getQualifier()) + "," + Bytes.toLong(kv.getValue()) + ")")
} else {
B.append("(" + Bytes.toString(kv.getQualifier()) + "," + Bytes.toString(kv.getValue()) + ")")
}
}
"" + B.toString
} else {
""
}
})
val getArray = getRdd.collect
getArray.foreach(f => println(f));
assert(getArray.length == 4)
assert(getArray.contains("get1:(a,foo1)"))
assert(getArray.contains("get2:(a,foo2)"))
assert(getArray.contains("get3:(a,foo3)"))
}
test("distributedScan to test HBase client") {
val config = htu.getConfiguration
config.set("spark.broadcast.compress", "false");
val connection = HConnectionManager.createConnection(config)
val htable = connection.getTable(Bytes.toBytes("t1"))
var put = new Put(Bytes.toBytes("scan1"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo1"))
htable.put(put)
put = new Put(Bytes.toBytes("scan2"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo2"))
htable.put(put)
put = new Put(Bytes.toBytes("scan3"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
htable.put(put)
put = new Put(Bytes.toBytes("scan4"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
htable.put(put)
put = new Put(Bytes.toBytes("scan5"))
put.add(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
htable.put(put)
var scan = new Scan()
scan.setCaching(100)
scan.setStartRow(Bytes.toBytes("scan2"))
scan.setStopRow(Bytes.toBytes("scan4_"))
val hbaseContext = new HBaseContext(sc, config);
val scanRdd = hbaseContext.hbaseRDD(tableName, scan)
val scanList = scanRdd.collect
//assert(scanList.length == 3)
}
} | blademainer/SparkOnHBase | src/test/scala/com/cloudera/spark/hbase/HBaseContextSuite.scala | Scala | apache-2.0 | 13,343 |
object Test {
def main(args: Array[String]): Unit = {
val gs = for (x <- (1 to 5)) yield { if (x % 2 == 0) List(1).seq else List(1).par }
println(gs.flatten)
println(gs.transpose)
val s = Stream(Vector(1).par, Vector(2).par)
println(s.flatten.toList)
println(s.transpose.map(_.toList).toList)
}
}
| yusuke2255/dotty | tests/run/t4761.scala | Scala | bsd-3-clause | 326 |
package at.fh.swengb.android_resifo
import android.app.Activity
import android.content.Intent
import android.os.Bundle
import android.view.View
import android.widget._
import scala.util.matching.Regex
/**
* Created by Martin on 15.01.2017.
*/
class AnmeldungActivity extends Activity{
var db: Db = _
var person_id = 0
val d = new Data
override protected def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
setContentView(R.layout.anmeldung)
db = Db(getApplicationContext())
fillAllSpinner()
person_id = getIntent.getExtras.get("person_id").asInstanceOf[Int]
/*
val dataMap = d.fillAnmeldeDaten(db, person_id)
fillDataInTextView(dataMap, person_id)
*/
}
/*
def fillDataInTextView(anmeldungData: Map[Int, Any], person_id: Int) : Unit = {
val bundesland = anmeldungData(person_id).asInstanceOf[Anmeldung].getBundesland()
val anAusland = anmeldungData(person_id).asInstanceOf[Anmeldung].getZuzugAusAusland()
val hauptwohnsitzCheckbox = anmeldungData(person_id).asInstanceOf[Anmeldung].getHauptwohnsitz()
findViewById(R.id.eT_anStraße).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getStrasse())
findViewById(R.id.eT_anHausNr).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getHausnr())
findViewById(R.id.eT_anStiege).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getStiege())
findViewById(R.id.eT_anTuer).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getTuer())
findViewById(R.id.eT_anPLZ).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getPlz())
findViewById(R.id.eT_anOrt).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getOrt())
if(bundesland == "Steiermark") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(0)
} else if(bundesland == "Kärnten") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(1)
} else if(bundesland == "Burgenland") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(2)
} else if(bundesland == "Tirol") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(3)
} else if(bundesland == "Vorarlberg") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(4)
} else if(bundesland == "Salzburg") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(5)
} else if(bundesland == "Niederösterreich") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(6)
} else if(bundesland == "Oberösterreich") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(7)
} else if(bundesland == "Wien") {
findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].setSelection(8)
}
if(anAusland == "ja") {
findViewById(R.id.rB_anAuslandJa).asInstanceOf[RadioButton].setChecked(true)
findViewById(R.id.rB_anAuslandNein).asInstanceOf[RadioButton].setChecked(false)
} else {
findViewById(R.id.rB_anAuslandJa).asInstanceOf[RadioButton].setChecked(false)
findViewById(R.id.rB_anAuslandNein).asInstanceOf[RadioButton].setChecked(true)
}
if(hauptwohnsitzCheckbox == "ja") {
findViewById(R.id.rB_anHWSJa).asInstanceOf[RadioButton].setChecked(true)
findViewById(R.id.rB_anHWSNein).asInstanceOf[RadioButton].setChecked(false)
} else {
findViewById(R.id.rB_anHWSJa).asInstanceOf[RadioButton].setChecked(false)
findViewById(R.id.rB_anHWSNein).asInstanceOf[RadioButton].setChecked(true)
}
findViewById(R.id.eT_anNameUG).asInstanceOf[TextView].setText(anmeldungData(person_id).asInstanceOf[Anmeldung].getUnterkunftgeber())
}
*/
def saveData(view: View): Unit = {
val strasse = findViewById(R.id.eT_anStraße).asInstanceOf[EditText].getText.toString
val hausnummer = findViewById(R.id.eT_anHausNr).asInstanceOf[EditText].getText.toString
val stiege = findViewById(R.id.eT_anStiege).asInstanceOf[EditText].getText.toString
val tuer = findViewById(R.id.eT_anTuer).asInstanceOf[EditText].getText.toString
val plz = findViewById(R.id.eT_anPLZ).asInstanceOf[EditText].getText.toString
val ort = findViewById(R.id.eT_anOrt).asInstanceOf[EditText].getText.toString
val bundesland = findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].getSelectedItem().toString()
val rb_auslandJa = findViewById(R.id.rB_anAuslandJa).asInstanceOf[RadioButton]
val rb_HWSJa = findViewById(R.id.rB_anHWSJa).asInstanceOf[RadioButton]
val ausland = if (rb_auslandJa.isChecked == true) "ja" else "nein"
val hws = if (rb_HWSJa.isChecked == true) "ja" else "nein"
val nameUG = findViewById(R.id.eT_anNameUG).asInstanceOf[EditText].getText.toString
val anmeldeDaten: AnmeldeDaten = AnmeldeDaten(person_id, strasse, hausnummer, stiege, tuer, plz, ort, bundesland, ausland, hws, nameUG)
val anmDao = db.mkAnmDao()
anmDao.insert(anmeldeDaten)
if (hws == "ja"){
val hwsDaten: HauptwohnsitzDaten = HauptwohnsitzDaten(person_id, strasse, hausnummer, stiege, tuer, plz, ort, bundesland)
val hwsDao = db.mkHwsDao()
hwsDao.insert(hwsDaten)
}
}
def updateData(view: View) = {
val strasse = findViewById(R.id.eT_anStraße).asInstanceOf[EditText].getText.toString
val hausnummer = findViewById(R.id.eT_anHausNr).asInstanceOf[EditText].getText.toString
val stiege = findViewById(R.id.eT_anStiege).asInstanceOf[EditText].getText.toString
val tuer = findViewById(R.id.eT_anTuer).asInstanceOf[EditText].getText.toString
val plz = findViewById(R.id.eT_anPLZ).asInstanceOf[EditText].getText.toString
val ort = findViewById(R.id.eT_anOrt).asInstanceOf[EditText].getText.toString
val bundesland = findViewById(R.id.s_anBundesland).asInstanceOf[Spinner].getSelectedItem().toString()
val rb_auslandJa = findViewById(R.id.rB_anAuslandJa).asInstanceOf[RadioButton]
val rb_HWSJa = findViewById(R.id.rB_anHWSJa).asInstanceOf[RadioButton]
val ausland = if (rb_auslandJa.isChecked == true) "ja" else "nein"
val hws = if (rb_HWSJa.isChecked == true) "ja" else "nein"
val nameUG = findViewById(R.id.eT_anNameUG).asInstanceOf[EditText].getText.toString
val anmeldeDaten: AnmeldeDaten = AnmeldeDaten(person_id, strasse, hausnummer, stiege, tuer, plz, ort, bundesland, ausland, hws, nameUG)
val anmDao = db.mkAnmDao()
anmDao.insert(anmeldeDaten)
if (hws == "ja") {
val hwsDaten: HauptwohnsitzDaten = HauptwohnsitzDaten(person_id, strasse, hausnummer, stiege, tuer, plz, ort, bundesland)
val hwsDao = db.mkHwsDao()
hwsDao.update(hwsDaten, person_id)
}
}
def gotoNext(view:View): Unit ={
val check: String = getIntent.getExtras.get("update").asInstanceOf[String]
val rb_HWSja = findViewById(R.id.rB_anHWSJa).asInstanceOf[RadioButton]
val i = if (rb_HWSja.isChecked() == false) new Intent(this, classOf[HauptwohnsitzActivity]) else new Intent(this, classOf[ErfolgreichActivity])
i.putExtra("person_id", person_id)
if (check == "update") updateData(view) else saveData(view)
finish()
startActivity(i)
}
def goBack(view:View): Unit ={
val i = new Intent(this, classOf[EntscheidungActivity])
i.putExtra("person_id", person_id)
finish()
startActivity(i)
}
def fillAllSpinner(): Unit ={
fillSpinner(findViewById(R.id.s_anBundesland).asInstanceOf[Spinner], Array("Steiermark", "Kärnten", "Burgenland", "Tirol", "Vorarlberg", "Salzburg", "Niederösterreich", "Oberösterreich", "Wien"))
def fillSpinner(spinner: Spinner, content: Array[String]): Unit ={
val adapter = new ArrayAdapter(this, android.R.layout.simple_spinner_item, content)
spinner.setAdapter(adapter)
}
}
/*
def checkText(name: String): String = {
val check: Regex = ".*\\d.*".r
name match {
case `check` => name.replace("1","i").replace("2","z").replace("3","e").replace("4","a").replace("5","s").replace("6","g").replace("7","t").replace("8","b").replace("9","p").replace("0","o")
case _ => name
}
}
def checkNumber(number: String): String = {
val check: Regex = ".*\\s.*".r
number match {
case `check` => ""
case _ => number
}
}
def checkPlz(plz: String) = {
val check: Regex = "\\d\\d\\d\\d".r
plz match {
case `check` => plz
case _ => ""
}
}
*/
} | obsidion15/resifo-android | app/src/main/scala/at.fh.swengb.android_resifo/AnmeldungActivity.scala | Scala | mit | 8,521 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import monix.execution.atomic.Atomic
import monix.execution.exceptions.DummyException
import scala.concurrent.duration.{Duration, _}
object OnErrorRetryIfSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val retriesCount = Atomic(0)
val ex = DummyException("expected")
val o = Observable
.range(0L, sourceCount.toLong)
.endWithError(ex)
.onErrorRestartIf {
case DummyException("expected") => retriesCount.incrementAndGet() <= 3
case _ => false
}
.onErrorHandle(_ => 10L)
val count = sourceCount * 4 + 1
val sum = 1L * sourceCount * (sourceCount - 1) / 2 * 4 + 10
Sample(o, count, sum.toLong, Duration.Zero, Duration.Zero)
}
def observableInError(sourceCount: Int, ex: Throwable) =
if (sourceCount == 1) {
val o = Observable.now(1L).endWithError(ex).onErrorRestartIf(_ => false)
Some(Sample(o, 1, 1, Duration.Zero, Duration.Zero))
} else {
val retriesCount = Atomic(0)
val o = Observable
.range(0L, sourceCount.toLong)
.endWithError(ex)
.onErrorRestartIf(_ => retriesCount.incrementAndGet() <= 3)
val count = sourceCount * 4
val sum = 1L * sourceCount * (sourceCount - 1) / 2 * 4
Some(Sample(o, count, sum.toLong, Duration.Zero, Duration.Zero))
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val retriesCount = Atomic(0)
val o = Observable.range(0L, sourceCount.toLong).endWithError(DummyException("unexpected")).onErrorRestartIf { _ =>
if (retriesCount.incrementAndGet() <= 3)
true
else
throw ex
}
val count = sourceCount * 4
val sum = 1L * sourceCount * (sourceCount - 1) / 2 * 4
Sample(o, count, sum.toLong, Duration.Zero, Duration.Zero)
}
override def cancelableObservables() = {
val dummy = DummyException("dummy")
val sample = Observable
.range(0, 20)
.map(_ => 1L)
.endWithError(dummy)
.delayExecution(1.second)
.onErrorRestartIf(ex => true)
Seq(
Sample(sample, 0, 0, 0.seconds, 0.seconds),
Sample(sample, 20, 20, 1.seconds, 0.seconds),
Sample(sample, 40, 40, 2.seconds, 0.seconds)
)
}
}
| monifu/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/OnErrorRetryIfSuite.scala | Scala | apache-2.0 | 2,991 |
package spray.shapeless
package object marshalling {
import akka.util.Timeout
import scala.language.implicitConversions
import scala.concurrent.duration._
import scala.util._
import shapeless._
import ops.hlist._
import HList._
import spray._
import http._
import httpx.marshalling._
type ValueAndMarshaller = (Any, Marshaller[Any])
/** Used in materializing a [[List]] of values associated with their [[Marshaller]]. */
type ValueAndMarshallerFolder[L <: HList] = RightFolder.Aux[L, List[ValueAndMarshaller], marshallers.type, List[Any]]
//object marshallers extends Poly {
// implicit def caseMarshaller[A, B <: List[Any]](implicit marshaller: Marshaller[A]) =
// use((a: A, lst: B) => {
// (a, marshaller) :: lst
// })
//}
private[spray] object marshallers extends Poly2 {
implicit def caseMarshaller[A, B <: List[Any]](implicit marshaller: Marshaller[A]) =
at[A, B] { (a, lst) =>
(a, marshaller) :: lst
}
}
def zipValuesWithMarshallers[L <: HList](l: L)(implicit folder: ValueAndMarshallerFolder[L]): List[ValueAndMarshaller] =
l.foldRight(List.empty[ValueAndMarshaller])(marshallers).asInstanceOf[List[ValueAndMarshaller]]
def renderValuesWithMarshallers[B](zipped: List[ValueAndMarshaller], z: B)(f: (B, Try[HttpData]) => B): B =
zipped.foldLeft(z) {
case (accum, (value, marshaller)) =>
val marshalled = marshal(value)(marshaller = marshaller, timeout = Timeout(1.minute))
f(accum, marshalled.map(_.data))
}
private[spray] implicit def eitherToTry[T](either: Either[Throwable, T]): Try[T] =
either.fold(Failure.apply, Success.apply)
}
| davidhoyt/spray-shapeless | src/main/scala/spray/shapeless/marshalling/package.scala | Scala | mit | 1,678 |
package org.scalaide.core.internal.hyperlink
import org.eclipse.jface.text.IRegion
import org.eclipse.jface.text.hyperlink.IHyperlink
import org.scalaide.logging.HasLogger
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits._
class ScalaDeclarationHyperlinkComputer extends HasLogger {
def findHyperlinks(icu: InteractiveCompilationUnit, wordRegion: IRegion): Option[List[IHyperlink]] = {
findHyperlinks(icu, wordRegion, wordRegion)
}
def findHyperlinks(icu: InteractiveCompilationUnit, wordRegion: IRegion, mappedRegion: IRegion): Option[List[IHyperlink]] = {
logger.info("detectHyperlinks: wordRegion = " + mappedRegion)
icu.withSourceFile({ (sourceFile, compiler) =>
if (mappedRegion == null || mappedRegion.getLength == 0)
None
else {
val start = mappedRegion.getOffset
val regionEnd = mappedRegion.getOffset + mappedRegion.getLength
// removing 1 handles correctly hyperlinking requests @ EOF
val end = if (sourceFile.length == regionEnd) regionEnd - 1 else regionEnd
val pos = compiler.rangePos(sourceFile, start, start, end)
import compiler.{ log => _, _ }
val typed = askTypeAt(pos).getOption()
val symsOpt: Option[List[(Symbol,String)]] = compiler.asyncExec {
val targetsOpt = typed map {
case Import(expr, sels) =>
if (expr.pos.includes(pos)) {
@annotation.tailrec
def locate(p: Position, inExpr: Tree): Symbol = inExpr match {
case Select(qualifier, _) =>
if (qualifier.pos.includes(p)) locate(p, qualifier)
else inExpr.symbol
case tree => tree.symbol
}
List(locate(pos, expr))
} else {
sels find (selPos => selPos.namePos >= pos.start && selPos.namePos <= pos.end) map { sel =>
val tpe = stabilizedType(expr)
List(tpe.member(sel.name), tpe.member(sel.name.toTypeName))
} getOrElse Nil
}
case Annotated(atp, _) => List(atp.symbol)
case Literal(const) if const.tag == compiler.ClazzTag => List(const.typeValue.typeSymbol)
case ap @ Select(qual, nme.apply) => List(qual.symbol, ap.symbol)
case st if st.symbol ne null => List(st.symbol)
case _ => List()
} map (_.filterNot{ sym => sym == NoSymbol || sym.hasPackageFlag || sym.isJavaDefined })
for {
targets <- targetsOpt.toList
target <- targets
} yield (target -> target.toString)
}.getOption()
symsOpt map { syms =>
syms flatMap {
case (sym, symName) => compiler.mkHyperlink(sym, s"Open Declaration (${symName})", wordRegion, icu.scalaProject.javaProject).toList
}
}
}
}).flatten
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/hyperlink/ScalaDeclarationHyperlinkComputer.scala | Scala | bsd-3-clause | 3,116 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.reader
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoCursorBase
import com.stratio.datasource.mongodb.client.MongodbClientFactory
import com.stratio.datasource.mongodb.config.{MongodbConfig, MongodbCredentials, MongodbSSLOptions}
import com.stratio.datasource.mongodb.partitioner.MongodbPartition
import com.stratio.datasource.mongodb.query.FilterSection
import com.stratio.datasource.util.Config
import org.apache.spark.Partition
import scala.util.Try
/**
*
* @param config Configuration object.
* @param requiredColumns Pruning fields
* @param filters Added query filters
*/
class MongodbReader(config: Config,
requiredColumns: Array[String],
filters: FilterSection) {
private var mongoClient: Option[MongodbClientFactory.Client] = None
private var mongoClientKey: Option[String] = None
private var dbCursor: Option[MongoCursorBase] = None
private val batchSize = config.getOrElse[Int](MongodbConfig.CursorBatchSize, MongodbConfig.DefaultCursorBatchSize)
private val connectionsTime = config.get[String](MongodbConfig.ConnectionsTime).map(_.toLong)
def close(): Unit = {
dbCursor.fold(ifEmpty = ()) { cursor =>
cursor.close()
dbCursor = None
}
mongoClient.fold(ifEmpty = ()) { client =>
mongoClientKey.fold({
MongodbClientFactory.closeByClient(client)
}) {key =>
MongodbClientFactory.closeByKey(key)
}
mongoClient = None
}
}
def hasNext: Boolean = {
dbCursor.fold(ifEmpty = false)(cursor => cursor.hasNext)
}
def next(): DBObject = {
dbCursor.fold(ifEmpty = throw new IllegalStateException("DbCursor is not initialized"))(cursor => cursor.next())
}
/**
* Initialize MongoDB reader
* @param partition Where to read from
*/
def init(partition: Partition): Unit = {
Try {
val mongoPartition = partition.asInstanceOf[MongodbPartition]
val hosts = mongoPartition.hosts.map(add => new ServerAddress(add)).toList
val credentials = config.getOrElse[List[MongodbCredentials]](MongodbConfig.Credentials, MongodbConfig.DefaultCredentials).map {
case MongodbCredentials(user, database, password) =>
MongoCredential.createCredential(user, database, password)
}
val sslOptions = config.get[MongodbSSLOptions](MongodbConfig.SSLOptions)
val clientOptions = config.properties.filterKeys(_.contains(MongodbConfig.ListMongoClientOptions))
val mongoClientResponse = MongodbClientFactory.getClient(hosts, credentials, sslOptions, clientOptions)
mongoClient = Option(mongoClientResponse.clientConnection)
mongoClientKey = Option(mongoClientResponse.key)
val emptyFilter = MongoDBObject(List())
val filter = Try(queryPartition(filters)).getOrElse(emptyFilter)
dbCursor = (for {
client <- mongoClient
collection <- Option(client(config(MongodbConfig.Database))(config(MongodbConfig.Collection)))
dbCursor <- Option(collection.find(filter, selectFields(requiredColumns)))
} yield {
mongoPartition.partitionRange.minKey.foreach(min => dbCursor.addSpecial("$min", min))
mongoPartition.partitionRange.maxKey.foreach(max => dbCursor.addSpecial("$max", max))
dbCursor.batchSize(batchSize)
}).headOption
}.recover {
case throwable =>
throw MongodbReadException(throwable.getMessage, throwable)
}
}
/**
* Create query partition using given filters.
*
* @param filters the Spark filters to be converted to Mongo filters
* @return the dB object
*/
private def queryPartition(filters: FilterSection): DBObject = {
implicit val c: Config = config
filters.filtersToDBObject()
}
/**
*
* Prepared DBObject used to specify required fields in mongodb 'find'
* @param fields Required fields
* @return A mongodb object that represents required fields.
*/
private def selectFields(fields: Array[String]): DBObject =
MongoDBObject(
if (fields.isEmpty) List()
else fields.toList.filterNot(_ == "_id").map(_ -> 1) ::: {
List("_id" -> fields.find(_ == "_id").fold(0)(_ => 1))
})
}
case class MongodbReadException(
msg: String,
causedBy: Throwable) extends RuntimeException(msg, causedBy)
| darroyocazorla/spark-mongodb | spark-mongodb/src/main/scala/com/stratio/datasource/mongodb/reader/MongodbReader.scala | Scala | apache-2.0 | 5,024 |
package net.mrkeks.clave.game
import net.mrkeks.clave.map.GameMap
import org.denigma.threejs.Vector3
import net.mrkeks.clave.util.markovIf
import net.mrkeks.clave.map.MapData
object PositionedObjectData {
object Direction extends Enumeration {
val Up, Down, Left, Right = Value
/** converts vector to four-way direction (in x-z-plane)
* prioritizes up-down movement; defaults to Down */
def fromVec(v: Vector3) = {
if (v.z < 0) {
Up
} else if (v.z == 0 && v.x > 0) {
Right
} else if (v.z == 0 && v.x < 0) {
Left
} else {
Down
}
}
def toVec(d: Direction.Value) = d match {
case Up => new Vector3( 0, 0,-1)
case Down => new Vector3( 0, 0, 1)
case Left => new Vector3(-1, 0, 0)
case Right => new Vector3( 1, 0, 0)
}
//
// def fromRnd(rnd: Double) = {
// if (rnd < .25) {
// Up
// } else if (rnd < .5) {
// Down
// } else if (rnd < .75) {
// Left
// } else {
// Right
// }
// }
def randomDirection() = {
//fromRnd(Math.random())
markovIf (.25) {
Up
}.markovElseIf (.25) {
Down
}.markovElseIf (.25) {
Left
} markovElse {
Right
}
}
}
type Direction = Direction.Value
}
trait PositionedObjectData {
protected var positionOnMap = MapData.notOnMap
protected val position = new Vector3()
/** returns a copy of the internal position information */
def getPosition = position.clone()
/** returns the position on the map if the entity is placed on the map */
def getPositionOnMap = {
if (positionOnMap._1 >= 0 && positionOnMap._2 >= 0) {
Some(positionOnMap)
} else {
None
}
}
} | benkeks/clave | src/main/scala/net/mrkeks/clave/game/PositionedObjectData.scala | Scala | gpl-3.0 | 1,801 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalactic.Prettifier
import org.scalatest.Resources
/**
* The result of a match operation, such as one performed by a <a href="Matcher.html"><code>Matcher</code></a> or
* <a href="BeMatcher.html"><code>BeMatcher</code></a>, which
* contains one field that indicates whether the match succeeded, four fields that provide
* raw failure messages to report under different circumstances, four fields providing
* arguments used to construct the final failure messages using raw failure messages
* and a <a href="../../Prettifier.html"><code>Prettifier</code></a>. Using the default constructor,
* failure messages will be constructed lazily (when required).
*
* <p>
* A <code>MatchResult</code>'s <code>matches</code> field indicates whether a match succeeded. If it succeeded,
* <code>matches</code> will be <code>true</code>.
* There are four methods, <code>failureMessage</code>, <code>negatedfailureMessage</code>, <code>midSentenceFailureMessage</code>
* and <code>negatedMidSentenceFailureMessage</code> that can be called to get final failure message strings, one of which will be
* presented to the user in case of a match failure. If a match succeeds, none of these strings will be used, because no failure
* message will be reported (<em>i.e.</em>, because there was no failure to report). If a match fails (<code>matches</code> is <code>false</code>),
* the <code>failureMessage</code> (or <code>midSentenceFailure</code>—more on that below) will be reported to help the user understand what went wrong.
* </p>
*
* <h2>Understanding <code>negatedFailureMessage</code></h2>
*
* <p>
* The <code>negatedFailureMessage</code> exists so that it can become the <code>failureMessage</code> if the matcher is <em>inverted</em>,
* which happens, for instance, if it is passed to <code>not</code>. Here's an example:
* </p>
*
* <pre class="stHighlight">
* val equalSeven = equal (7)
* val notEqualSeven = not (equalSeven)
* </pre>
*
* <p>
* The <code>Matcher[Int]</code> that results from passing 7 to <code>equal</code>, which is assigned to the <code>equalSeven</code>
* variable, will compare <code>Int</code>s passed to its
* <code>apply</code> method with 7. If 7 is passed, the <code>equalSeven</code> match will succeed. If anything other than 7 is passed, it
* will fail. By contrast, the <code>notEqualSeven</code> matcher, which results from passing <code>equalSeven</code> to <code>not</code>, does
* just the opposite. If 7 is passed, the <code>notEqualSeven</code> match will fail. If anything other than 7 is passed, it will succeed.
* </p>
*
* <p>
* For example, if 8 is passed, <code>equalSeven</code>'s <code>MatchResult</code> will contain:
* </p>
*
* <pre class="stExamples">
* expression: equalSeven(8)
* matches: false
* failureMessage: 8 did not equal 7
* negatedFailureMessage: 8 equaled 7
* </pre>
*
* <p>
* Although the <code>negatedFailureMessage</code> is nonsensical, it will not be reported to the user. Only the <code>failureMessage</code>,
* which does actually explain what caused the failure, will be reported by the user. If you pass 8 to <code>notEqualSeven</code>'s <code>apply</code>
* method, by contrast, the <code>failureMessage</code> and <code>negatedFailureMessage</code> will be:
* </p>
*
* <pre class="stExamples">
* expression: notEqualSeven(8)
* matches: true
* failureMessage: 8 equaled 7
* negatedFailureMessage: 8 did not equal 7
* </pre>
*
* <p>
* Note that the messages are swapped from the <code>equalSeven</code> messages. This swapping was effectively performed by the <code>not</code> matcher,
* which in addition to swapping the <code>failureMessage</code> and <code>negatedFailureMessage</code>, also inverted the
* <code>matches</code> value. Thus when you pass the same value to both <code>equalSeven</code> and <code>notEqualSeven</code> the <code>matches</code>
* field of one <code>MatchResult</code> will be <code>true</code> and the other <code>false</code>. Because the
* <code>matches</code> field of the <code>MatchResult</code> returned by <code>notEqualSeven(8)</code> is <code>true</code>,
* the nonsensical <code>failureMessage</code>, "<code>8 equaled 7</code>", will <em>not</em> be reported to the user.
* </p>
*
* <p>
* If 7 is passed, by contrast, the <code>failureMessage</code> and <code>negatedFailureMessage</code> of <code>equalSeven</code>
* will be:
* </p>
*
* <pre class="stExamples">
* expression: equalSeven(7)
* matches: true
* failureMessage: 7 did not equal 7
* negatedFailureMessage: 7 equaled 7
* </pre>
*
* <p>
* In this case <code>equalSeven</code>'s <code>failureMessage</code> is nonsensical, but because the match succeeded, the nonsensical message will
* not be reported to the user.
* If you pass 7 to <code>notEqualSeven</code>'s <code>apply</code>
* method, you'll get:
* </p>
*
* <pre class="stExamples">
* expression: notEqualSeven(7)
* matches: false
* failureMessage: 7 equaled 7
* negatedFailureMessage: 7 did not equal 7
* </pre>
*
* <p>
* Again the messages are swapped from the <code>equalSeven</code> messages, but this time, the <code>failureMessage</code> makes sense
* and explains what went wrong: the <code>notEqualSeven</code> match failed because the number passed did in fact equal 7. Since
* the match failed, this failure message, "<code>7 equaled 7</code>", will be reported to the user.
* </p>
*
* <h2>Understanding the "<code>midSentence</code>" messages</h2>
*
* <p>
* When a ScalaTest matcher expression that involves <code>and</code> or <code>or</code> fails, the failure message that
* results is composed from the failure messages of the left and right matcher operatnds to <code>and</code> or </code>or</code>.
* For example:
* </p>
*
* <pre class="stExamples">
* 8 should (equal (7) or equal (9))
* </pre>
*
* <p>
* This above expression would fail with the following failure message reported to the user:
* </p>
*
* <pre class="stExamples">
* 8 did not equal 7, and 8 did not equal 9
* </pre>
*
* <p>
* This works fine, but what if the failure messages being combined begin with a capital letter, such as:
* </p>
*
* <pre class="stExamples">
* The name property did not equal "Ricky"
* </pre>
*
* <p>
* A combination of two such failure messages might result in an abomination of English punctuation, such as:
* </p>
*
* <pre class="stExamples">
* The name property did not equal "Ricky", and The name property did not equal "Bobby"
* </pre>
*
* <p>
* Because ScalaTest is an internationalized application, taking all of its strings from a property file
* enabling it to be localized, it isn't a good idea to force the first character to lower case. Besides,
* it might actually represent a String value which should stay upper case. The <code>midSentenceFailureMessage</code>
* exists for this situation. If the failure message is used at the beginning of the sentence, <code>failureMessage</code>
* will be used. But if it appears mid-sentence, or at the end of the sentence, <code>midSentenceFailureMessage</code>
* will be used. Given these failure message strings:
* </p>
*
* <pre class="stExamples">
* failureMessage: The name property did not equal "Bobby"
* midSentenceFailureMessage: the name property did not equal "Bobby"
* </pre>
*
* <p>
* The resulting failure of the <code>or</code> expression involving to matchers would make any English teacher proud:
* </p>
*
* <pre class="stExamples">
* The name property did not equal "Ricky", and the name property did not equal "Bobby"
* </pre>
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param rawMidSentenceFailureMessage raw failure message suitable for appearing mid-sentence
* @param rawMidSentenceNegatedFailureMessage raw negated failure message suitable for appearing mid-sentence
* @param failureMessageArgs arguments for constructing failure message to report if a match fails
* @param negatedFailureMessageArgs arguments for constructing message with a meaning opposite to that of the failure message
* @param midSentenceFailureMessageArgs arguments for constructing failure message suitable for appearing mid-sentence
* @param midSentenceNegatedFailureMessageArgs arguments for constructing negated failure message suitable for appearing mid-sentence
* @param prettifier a <code>Prettifier</code> to prettify arguments before constructing the messages
*
* @author Bill Venners
* @author Chee Seng
*/
final case class MatchResult(
matches: Boolean,
rawFailureMessage: String,
rawNegatedFailureMessage: String,
rawMidSentenceFailureMessage: String,
rawMidSentenceNegatedFailureMessage: String,
failureMessageArgs: IndexedSeq[Any],
negatedFailureMessageArgs: IndexedSeq[Any],
midSentenceFailureMessageArgs: IndexedSeq[Any],
midSentenceNegatedFailureMessageArgs: IndexedSeq[Any],
prettifier: Prettifier = Prettifier.default
) {
/**
* Constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>rawFailureMessage</code>, and
* <code>rawNegativeFailureMessage</code> fields. The <code>rawMidSentenceFailureMessage</code> will return the same
* string as <code>rawFailureMessage</code>, and the <code>rawMidSentenceNegatedFailureMessage</code> will return the
* same string as <code>rawNegatedFailureMessage</code>. <code>failureMessageArgs</code>, <code>negatedFailureMessageArgs</code>,
* <code>midSentenceFailureMessageArgs</code>, <code>midSentenceNegatedFailureMessageArgs</code> will be <code>Vector.empty</code>
* and <code>Prettifier.default</code> will be used.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
*/
def this(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String) =
this(
matches,
rawFailureMessage,
rawNegatedFailureMessage,
rawFailureMessage,
rawNegatedFailureMessage,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Prettifier.default
)
/**
* Construct failure message to report if a match fails, using <code>rawFailureMessage</code>, <code>failureMessageArgs</code> and <code>prettifier</code>
*
* @return failure message to report if a match fails
*/
def failureMessage: String = if (failureMessageArgs.isEmpty) rawFailureMessage else makeString(rawFailureMessage, failureMessageArgs)
/**
* Construct message with a meaning opposite to that of the failure message, using <code>rawNegatedFailureMessage</code>, <code>negatedFailureMessageArgs</code> and <code>prettifier</code>
*
* @return message with a meaning opposite to that of the failure message
*/
def negatedFailureMessage: String = if (negatedFailureMessageArgs.isEmpty) rawNegatedFailureMessage else makeString(rawNegatedFailureMessage, negatedFailureMessageArgs)
/**
* Construct failure message suitable for appearing mid-sentence, using <code>rawMidSentenceFailureMessage</code>, <code>midSentenceFailureMessageArgs</code> and <code>prettifier</code>
*
* @return failure message suitable for appearing mid-sentence
*/
def midSentenceFailureMessage: String = if (midSentenceFailureMessageArgs.isEmpty) rawMidSentenceFailureMessage else makeString(rawMidSentenceFailureMessage, midSentenceFailureMessageArgs)
/**
* Construct negated failure message suitable for appearing mid-sentence, using <code>rawMidSentenceNegatedFailureMessage</code>, <code>midSentenceNegatedFailureMessageArgs</code> and <code>prettifier</code>
*
* @return negated failure message suitable for appearing mid-sentence
*/
def midSentenceNegatedFailureMessage: String = if (midSentenceNegatedFailureMessageArgs.isEmpty) rawMidSentenceNegatedFailureMessage else makeString(rawMidSentenceNegatedFailureMessage, midSentenceNegatedFailureMessageArgs)
/**
* Get a negated version of this MatchResult, matches field will be negated and all messages field will be substituted with its counter-part.
*
* @return a negated version of this MatchResult
*/
def negated: MatchResult = MatchResult(!matches, rawNegatedFailureMessage, rawFailureMessage, rawMidSentenceNegatedFailureMessage, rawMidSentenceFailureMessage, negatedFailureMessageArgs, failureMessageArgs, midSentenceNegatedFailureMessageArgs, midSentenceFailureMessageArgs)
private def makeString(rawString: String, args: IndexedSeq[Any]): String =
Resources.formatString(rawString, args.map(prettifier).toArray)
}
/**
* Companion object for the <code>MatchResult</code> case class.
*
* @author Bill Venners
*/
object MatchResult {
/**
* Factory method that constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>failureMessage</code>,
* <code>negativeFailureMessage</code>, <code>midSentenceFailureMessage</code>,
* <code>midSentenceNegatedFailureMessage</code>, <code>failureMessageArgs</code>, and <code>negatedFailureMessageArgs</code> fields.
* <code>failureMessageArgs</code>, and <code>negatedFailureMessageArgs</code> will be used in place of <code>midSentenceFailureMessageArgs</code>
* and <code>midSentenceNegatedFailureMessageArgs</code>.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param rawMidSentenceFailureMessage raw failure message to report if a match fails
* @param rawMidSentenceNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param failureMessageArgs arguments for constructing failure message to report if a match fails
* @param negatedFailureMessageArgs arguments for constructing message with a meaning opposite to that of the failure message
* @return a <code>MatchResult</code> instance
*/
def apply(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String, rawMidSentenceFailureMessage: String,
rawMidSentenceNegatedFailureMessage: String, failureMessageArgs: IndexedSeq[Any], negatedFailureMessageArgs: IndexedSeq[Any]): MatchResult =
new MatchResult(matches, rawFailureMessage, rawNegatedFailureMessage, rawMidSentenceFailureMessage, rawMidSentenceNegatedFailureMessage, failureMessageArgs, negatedFailureMessageArgs, failureMessageArgs, negatedFailureMessageArgs, Prettifier.default)
/**
* Factory method that constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>rawFailureMessage</code>,
* <code>rawNegativeFailureMessage</code>, <code>rawMidSentenceFailureMessage</code>, and
* <code>rawMidSentenceNegatedFailureMessage</code> fields. All argument fields will have <code>Vector.empty</code> values.
* This is suitable to create MatchResult with eager error messages, and its mid-sentence messages need to be different.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param rawMidSentenceFailureMessage raw failure message to report if a match fails
* @param rawMidSentenceNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @return a <code>MatchResult</code> instance
*/
def apply(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String, rawMidSentenceFailureMessage: String,
rawMidSentenceNegatedFailureMessage: String): MatchResult =
new MatchResult(matches, rawFailureMessage, rawNegatedFailureMessage, rawMidSentenceFailureMessage, rawMidSentenceNegatedFailureMessage, Vector.empty, Vector.empty, Vector.empty, Vector.empty, Prettifier.default)
/**
* Factory method that constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>rawFailureMessage</code>, and
* <code>rawNegativeFailureMessage</code> fields. The <code>rawMidSentenceFailureMessage</code> will return the same
* string as <code>rawFailureMessage</code>, and the <code>rawMidSentenceNegatedFailureMessage</code> will return the
* same string as <code>rawNegatedFailureMessage</code>. All argument fields will have <code>Vector.empty</code> values.
* This is suitable to create MatchResult with eager error messages that have same mid-sentence messages.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @return a <code>MatchResult</code> instance
*/
def apply(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String): MatchResult =
new MatchResult(matches, rawFailureMessage, rawNegatedFailureMessage, rawFailureMessage, rawNegatedFailureMessage, Vector.empty, Vector.empty, Vector.empty, Vector.empty, Prettifier.default)
/**
* Factory method that constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>rawFailureMessage</code>,
* <code>rawNegativeFailureMessage</code> and <code>args</code> fields. The <code>rawMidSentenceFailureMessage</code> will return the same
* string as <code>rawFailureMessage</code>, and the <code>rawMidSentenceNegatedFailureMessage</code> will return the
* same string as <code>rawNegatedFailureMessage</code>. All argument fields will use <code>args</code> as arguments.
* This is suitable to create MatchResult with lazy error messages that have same mid-sentence messages and arguments.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param args arguments for error messages construction
* @return a <code>MatchResult</code> instance
*/
def apply(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String, args: IndexedSeq[Any]) =
new MatchResult(
matches,
rawFailureMessage,
rawNegatedFailureMessage,
rawFailureMessage,
rawNegatedFailureMessage,
args,
args,
args,
args,
Prettifier.default
)
/**
* Factory method that constructs a new <code>MatchResult</code> with passed <code>matches</code>, <code>rawFailureMessage</code>,
* <code>rawNegativeFailureMessage</code>, <code>failureMessageArgs</code> and <code>negatedFailureMessageArgs</code> fields.
* The <code>rawMidSentenceFailureMessage</code> will return the same string as <code>rawFailureMessage</code>, and the
* <code>rawMidSentenceNegatedFailureMessage</code> will return the same string as <code>rawNegatedFailureMessage</code>.
* The <code>midSentenceFailureMessageArgs</code> will return the same as <code>failureMessageArgs</code>, and the
* <code>midSentenceNegatedFailureMessageArgs</code> will return the same as <code>negatedFailureMessageArgs</code>.
* This is suitable to create MatchResult with lazy error messages that have same mid-sentence and use different arguments for
* negated messages.
*
* @param matches indicates whether or not the matcher matched
* @param rawFailureMessage raw failure message to report if a match fails
* @param rawNegatedFailureMessage raw message with a meaning opposite to that of the failure message
* @param failureMessageArgs arguments for constructing failure message to report if a match fails
* @param negatedFailureMessageArgs arguments for constructing message with a meaning opposite to that of the failure message
* @return a <code>MatchResult</code> instance
*/
def apply(matches: Boolean, rawFailureMessage: String, rawNegatedFailureMessage: String, failureMessageArgs: IndexedSeq[Any], negatedFailureMessageArgs: IndexedSeq[Any]) =
new MatchResult(
matches,
rawFailureMessage,
rawNegatedFailureMessage,
rawFailureMessage,
rawNegatedFailureMessage,
failureMessageArgs,
negatedFailureMessageArgs,
failureMessageArgs,
negatedFailureMessageArgs,
Prettifier.default
)
}
| cheeseng/scalatest | scalatest/src/main/scala/org/scalatest/matchers/MatchResult.scala | Scala | apache-2.0 | 21,488 |
package sbt
import DependencyFilter._
final case class ConflictWarning(label: String, filter: ModuleFilter, group: ModuleID => String, level: Level.Value, failOnConflict: Boolean)
object ConflictWarning
{
def disable: ConflictWarning = ConflictWarning("", (_: ModuleID) => false, org, Level.Warn, false)
private[this] def org = (_: ModuleID).organization
def default(label: String): ConflictWarning = ConflictWarning(label, moduleFilter(organization = GlobFilter(SbtArtifacts.Organization) | GlobFilter(ScalaArtifacts.Organization)), org, Level.Warn, false)
def strict(label: String): ConflictWarning = ConflictWarning(label, (id: ModuleID) => true, (id: ModuleID) => id.organization + ":" + id.name, Level.Error, true)
def apply(config: ConflictWarning, report: UpdateReport, log: Logger)
{
val conflicts = IvyActions.groupedConflicts(config.filter, config.group)(report)
if(!conflicts.isEmpty)
{
val prefix = if(config.failOnConflict) "Incompatible" else "Potentially incompatible"
val msg = prefix + " versions of dependencies of " + config.label + ":\\n "
val conflictMsgs =
for( (label, versions) <- conflicts ) yield
label + ": " + versions.mkString(", ")
log.log(config.level, conflictMsgs.mkString(msg, "\\n ", ""))
}
if(config.failOnConflict && !conflicts.isEmpty)
error("Conflicts in " + conflicts.map(_._1).mkString(", ") )
}
}
| harrah/xsbt | ivy/src/main/scala/sbt/ConflictWarning.scala | Scala | bsd-3-clause | 1,389 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, Date, Timestamp}
import java.util.{Properties, TimeZone}
import java.math.BigDecimal
import org.apache.spark.sql.{DataFrame, QueryTest, Row, SaveMode}
import org.apache.spark.sql.execution.{RowDataSourceScanExec, WholeStageCodegenExec}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.tags.DockerTest
/**
* This patch was tested using the Oracle docker. Created this integration suite for the same.
* The ojdbc6-11.2.0.2.0.jar was to be downloaded from the maven repository. Since there was
* no jdbc jar available in the maven repository, the jar was downloaded from oracle site
* manually and installed in the local; thus tested. So, for SparkQA test case run, the
* ojdbc jar might be manually placed in the local maven repository(com/oracle/ojdbc6/11.2.0.2.0)
* while Spark QA test run.
*
* The following would be the steps to test this
* 1. Pull oracle 11g image - docker pull wnameless/oracle-xe-11g
* 2. Start docker - sudo service docker start
* 3. Download oracle 11g driver jar and put it in maven local repo:
* (com/oracle/ojdbc6/11.2.0.2.0/ojdbc6-11.2.0.2.0.jar)
* 4. The timeout and interval parameter to be increased from 60,1 to a high value for oracle test
* in DockerJDBCIntegrationSuite.scala (Locally tested with 200,200 and executed successfully).
* 5. Run spark test - ./build/sbt "test-only org.apache.spark.sql.jdbc.OracleIntegrationSuite"
*
* All tests in this suite are ignored because of the dependency with the oracle jar from maven
* repository.
*/
@DockerTest
class OracleIntegrationSuite extends DockerJDBCIntegrationSuite with SharedSQLContext {
import testImplicits._
override val db = new DatabaseOnDocker {
override val imageName = "wnameless/oracle-xe-11g:16.04"
override val env = Map(
"ORACLE_ROOT_PASSWORD" -> "oracle"
)
override val usesIpc = false
override val jdbcPort: Int = 1521
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:oracle:thin:system/oracle@//$ip:$port/xe"
override def getStartupProcessName: Option[String] = None
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE TABLE datetime (id NUMBER(10), d DATE, t TIMESTAMP)")
.executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetime VALUES
|(1, {d '1991-11-09'}, {ts '1996-01-01 01:23:45'})
""".stripMargin.replaceAll("\\n", " ")).executeUpdate()
conn.commit()
conn.prepareStatement(
"CREATE TABLE ts_with_timezone (id NUMBER(10), t TIMESTAMP WITH TIME ZONE)").executeUpdate()
conn.prepareStatement(
"INSERT INTO ts_with_timezone VALUES " +
"(1, to_timestamp_tz('1999-12-01 11:00:00 UTC','YYYY-MM-DD HH:MI:SS TZR'))").executeUpdate()
conn.prepareStatement(
"INSERT INTO ts_with_timezone VALUES " +
"(2, to_timestamp_tz('1999-12-01 12:00:00 PST','YYYY-MM-DD HH:MI:SS TZR'))").executeUpdate()
conn.commit()
conn.prepareStatement(
"CREATE TABLE tableWithCustomSchema (id NUMBER, n1 NUMBER(1), n2 NUMBER(1))").executeUpdate()
conn.prepareStatement(
"INSERT INTO tableWithCustomSchema values(12312321321321312312312312123, 1, 0)").executeUpdate()
conn.commit()
sql(
s"""
|CREATE TEMPORARY VIEW datetime
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', dbTable 'datetime', oracle.jdbc.mapDateToTimestamp 'false')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("CREATE TABLE datetime1 (id NUMBER(10), d DATE, t TIMESTAMP)")
.executeUpdate()
conn.commit()
sql(
s"""
|CREATE TEMPORARY VIEW datetime1
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', dbTable 'datetime1', oracle.jdbc.mapDateToTimestamp 'false')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("CREATE TABLE numerics (b DECIMAL(1), f DECIMAL(3, 2), i DECIMAL(10))").executeUpdate()
conn.prepareStatement(
"INSERT INTO numerics VALUES (4, 1.23, 9999999999)").executeUpdate()
conn.commit()
conn.prepareStatement("CREATE TABLE oracle_types (d BINARY_DOUBLE, f BINARY_FLOAT)").executeUpdate()
conn.commit()
}
test("SPARK-16625 : Importing Oracle numeric types") {
val df = sqlContext.read.jdbc(jdbcUrl, "numerics", new Properties)
val rows = df.collect()
assert(rows.size == 1)
val row = rows(0)
// The main point of the below assertions is not to make sure that these Oracle types are
// mapped to decimal types, but to make sure that the returned values are correct.
// A value > 1 from DECIMAL(1) is correct:
assert(row.getDecimal(0).compareTo(BigDecimal.valueOf(4)) == 0)
// A value with fractions from DECIMAL(3, 2) is correct:
assert(row.getDecimal(1).compareTo(BigDecimal.valueOf(1.23)) == 0)
// A value > Int.MaxValue from DECIMAL(10) is correct:
assert(row.getDecimal(2).compareTo(BigDecimal.valueOf(9999999999l)) == 0)
}
test("SPARK-12941: String datatypes to be mapped to Varchar in Oracle") {
// create a sample dataframe with string type
val df1 = sparkContext.parallelize(Seq(("foo"))).toDF("x")
// write the dataframe to the oracle table tbl
df1.write.jdbc(jdbcUrl, "tbl2", new Properties)
// read the table from the oracle
val dfRead = sqlContext.read.jdbc(jdbcUrl, "tbl2", new Properties)
// get the rows
val rows = dfRead.collect()
// verify the data type is inserted
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(0).equals("class java.lang.String"))
// verify the value is the inserted correct or not
assert(rows(0).getString(0).equals("foo"))
}
test("SPARK-16625: General data types to be mapped to Oracle") {
val props = new Properties()
props.put("oracle.jdbc.mapDateToTimestamp", "false")
val schema = StructType(Seq(
StructField("boolean_type", BooleanType, true),
StructField("integer_type", IntegerType, true),
StructField("long_type", LongType, true),
StructField("float_Type", FloatType, true),
StructField("double_type", DoubleType, true),
StructField("byte_type", ByteType, true),
StructField("short_type", ShortType, true),
StructField("string_type", StringType, true),
StructField("binary_type", BinaryType, true),
StructField("date_type", DateType, true),
StructField("timestamp_type", TimestampType, true)
))
val tableName = "test_oracle_general_types"
val booleanVal = true
val integerVal = 1
val longVal = 2L
val floatVal = 3.0f
val doubleVal = 4.0
val byteVal = 2.toByte
val shortVal = 5.toShort
val stringVal = "string"
val binaryVal = Array[Byte](6, 7, 8)
val dateVal = Date.valueOf("2016-07-26")
val timestampVal = Timestamp.valueOf("2016-07-26 11:49:45")
val data = spark.sparkContext.parallelize(Seq(
Row(
booleanVal, integerVal, longVal, floatVal, doubleVal, byteVal, shortVal, stringVal,
binaryVal, dateVal, timestampVal
)))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.jdbc(jdbcUrl, tableName, props)
val dfRead = spark.read.jdbc(jdbcUrl, tableName, props)
val rows = dfRead.collect()
// verify the data type is inserted
val types = dfRead.schema.map(field => field.dataType)
assert(types(0).equals(DecimalType(1, 0)))
assert(types(1).equals(DecimalType(10, 0)))
assert(types(2).equals(DecimalType(19, 0)))
assert(types(3).equals(DecimalType(19, 4)))
assert(types(4).equals(DecimalType(19, 4)))
assert(types(5).equals(DecimalType(3, 0)))
assert(types(6).equals(DecimalType(5, 0)))
assert(types(7).equals(StringType))
assert(types(8).equals(BinaryType))
assert(types(9).equals(DateType))
assert(types(10).equals(TimestampType))
// verify the value is the inserted correct or not
val values = rows(0)
assert(values.getDecimal(0).compareTo(BigDecimal.valueOf(1)) == 0)
assert(values.getDecimal(1).compareTo(BigDecimal.valueOf(integerVal)) == 0)
assert(values.getDecimal(2).compareTo(BigDecimal.valueOf(longVal)) == 0)
assert(values.getDecimal(3).compareTo(BigDecimal.valueOf(floatVal)) == 0)
assert(values.getDecimal(4).compareTo(BigDecimal.valueOf(doubleVal)) == 0)
assert(values.getDecimal(5).compareTo(BigDecimal.valueOf(byteVal)) == 0)
assert(values.getDecimal(6).compareTo(BigDecimal.valueOf(shortVal)) == 0)
assert(values.getString(7).equals(stringVal))
assert(values.getAs[Array[Byte]](8).mkString.equals("678"))
assert(values.getDate(9).equals(dateVal))
assert(values.getTimestamp(10).equals(timestampVal))
}
test("SPARK-19318: connection property keys should be case-sensitive") {
def checkRow(row: Row): Unit = {
assert(row.getDecimal(0).equals(BigDecimal.valueOf(1)))
assert(row.getDate(1).equals(Date.valueOf("1991-11-09")))
assert(row.getTimestamp(2).equals(Timestamp.valueOf("1996-01-01 01:23:45")))
}
checkRow(sql("SELECT * FROM datetime where id = 1").head())
sql("INSERT INTO TABLE datetime1 SELECT * FROM datetime where id = 1")
checkRow(sql("SELECT * FROM datetime1 where id = 1").head())
}
test("SPARK-20557: column type TIMESTAMP with TIME ZONE should be recognized") {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
val rows = dfRead.collect()
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(1).equals("class java.sql.Timestamp"))
}
test("Column type TIMESTAMP with SESSION_LOCAL_TIMEZONE is different from default") {
val defaultJVMTimeZone = TimeZone.getDefault
// Pick the timezone different from the current default time zone of JVM
val sofiaTimeZone = TimeZone.getTimeZone("Europe/Sofia")
val shanghaiTimeZone = TimeZone.getTimeZone("Asia/Shanghai")
val localSessionTimeZone =
if (defaultJVMTimeZone == shanghaiTimeZone) sofiaTimeZone else shanghaiTimeZone
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> localSessionTimeZone.getID) {
val e = intercept[java.sql.SQLException] {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
dfRead.collect()
}.getMessage
assert(e.contains("Unrecognized SQL type -101"))
}
}
/**
* Change the Time Zone `timeZoneId` of JVM before executing `f`, then switches back to the
* original after `f` returns.
* @param timeZoneId the ID for a TimeZone, either an abbreviation such as "PST", a full name such
* as "America/Los_Angeles", or a custom ID such as "GMT-8:00".
*/
private def withTimeZone(timeZoneId: String)(f: => Unit): Unit = {
val originalLocale = TimeZone.getDefault
try {
// Add Locale setting
TimeZone.setDefault(TimeZone.getTimeZone(timeZoneId))
f
} finally {
TimeZone.setDefault(originalLocale)
}
}
test("Column TIMESTAMP with TIME ZONE(JVM timezone)") {
def checkRow(row: Row, ts: String): Unit = {
assert(row.getTimestamp(1).equals(Timestamp.valueOf(ts)))
}
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> TimeZone.getDefault.getID) {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
withTimeZone("PST") {
assert(dfRead.collect().toSet ===
Set(
Row(BigDecimal.valueOf(1), java.sql.Timestamp.valueOf("1999-12-01 03:00:00")),
Row(BigDecimal.valueOf(2), java.sql.Timestamp.valueOf("1999-12-01 12:00:00"))))
}
withTimeZone("UTC") {
assert(dfRead.collect().toSet ===
Set(
Row(BigDecimal.valueOf(1), java.sql.Timestamp.valueOf("1999-12-01 11:00:00")),
Row(BigDecimal.valueOf(2), java.sql.Timestamp.valueOf("1999-12-01 20:00:00"))))
}
}
}
test("SPARK-18004: Make sure date or timestamp related predicate is pushed down correctly") {
val props = new Properties()
props.put("oracle.jdbc.mapDateToTimestamp", "false")
val schema = StructType(Seq(
StructField("date_type", DateType, true),
StructField("timestamp_type", TimestampType, true)
))
val tableName = "test_date_timestamp_pushdown"
val dateVal = Date.valueOf("2017-06-22")
val timestampVal = Timestamp.valueOf("2017-06-22 21:30:07")
val data = spark.sparkContext.parallelize(Seq(
Row(dateVal, timestampVal)
))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.jdbc(jdbcUrl, tableName, props)
val dfRead = spark.read.jdbc(jdbcUrl, tableName, props)
val millis = System.currentTimeMillis()
val dt = new java.sql.Date(millis)
val ts = new java.sql.Timestamp(millis)
// Query Oracle table with date and timestamp predicates
// which should be pushed down to Oracle.
val df = dfRead.filter(dfRead.col("date_type").lt(dt))
.filter(dfRead.col("timestamp_type").lt(ts))
val parentPlan = df.queryExecution.executedPlan
assert(parentPlan.isInstanceOf[WholeStageCodegenExec])
val node = parentPlan.asInstanceOf[WholeStageCodegenExec]
val metadata = node.child.asInstanceOf[RowDataSourceScanExec].metadata
// The "PushedFilters" part should exist in Dataframe's
// physical plan and the existence of right literals in
// "PushedFilters" is used to prove that the predicates
// pushing down have been effective.
assert(metadata.get("PushedFilters").isDefined)
assert(metadata("PushedFilters").contains(dt.toString))
assert(metadata("PushedFilters").contains(ts.toString))
val row = df.collect()(0)
assert(row.getDate(0).equals(dateVal))
assert(row.getTimestamp(1).equals(timestampVal))
}
test("SPARK-20427/SPARK-20921: read table use custom schema by jdbc api") {
// default will throw IllegalArgumentException
val e = intercept[org.apache.spark.SparkException] {
spark.read.jdbc(jdbcUrl, "tableWithCustomSchema", new Properties()).collect()
}
assert(e.getMessage.contains(
"requirement failed: Decimal precision 39 exceeds max precision 38"))
// custom schema can read data
val props = new Properties()
props.put("customSchema",
s"ID DECIMAL(${DecimalType.MAX_PRECISION}, 0), N1 INT, N2 BOOLEAN")
val dfRead = spark.read.jdbc(jdbcUrl, "tableWithCustomSchema", props)
val rows = dfRead.collect()
// verify the data type
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(0).equals("class java.math.BigDecimal"))
assert(types(1).equals("class java.lang.Integer"))
assert(types(2).equals("class java.lang.Boolean"))
// verify the value
val values = rows(0)
assert(values.getDecimal(0).equals(new java.math.BigDecimal("12312321321321312312312312123")))
assert(values.getInt(1).equals(1))
assert(values.getBoolean(2).equals(false))
}
test("SPARK-22303: handle BINARY_DOUBLE and BINARY_FLOAT as DoubleType and FloatType") {
val tableName = "oracle_types"
val schema = StructType(Seq(
StructField("d", DoubleType, true),
StructField("f", FloatType, true)))
val props = new Properties()
// write it back to the table (append mode)
val data = spark.sparkContext.parallelize(Seq(Row(1.1, 2.2f)))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.mode(SaveMode.Append).jdbc(jdbcUrl, tableName, props)
// read records from oracle_types
val dfRead = sqlContext.read.jdbc(jdbcUrl, tableName, new Properties)
val rows = dfRead.collect()
assert(rows.size == 1)
// check data types
val types = dfRead.schema.map(field => field.dataType)
assert(types(0).equals(DoubleType))
assert(types(1).equals(FloatType))
// check values
val values = rows(0)
assert(values.getDouble(0) === 1.1)
assert(values.getFloat(1) === 2.2f)
}
}
| lxsmnv/spark | external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/OracleIntegrationSuite.scala | Scala | apache-2.0 | 16,839 |
package alignment
case class Codon(first:Base,second:Base,third:Base)
object Codon{
def apply(s:String):Codon = {
require(s.length == 3)
new Codon(Base fromChar s(0),Base fromChar s(1),Base fromChar s(2))
}
} | izziiyt/biutil | src/main/scala/alignment/Codon.scala | Scala | mit | 224 |
package eu.phisikus.plotka.network.talker
import java.util.UUID
import java.util.concurrent.TimeUnit
import eu.phisikus.plotka.conf.model.BasicNodeConfiguration
import eu.phisikus.plotka.model.{NetworkMessage, NetworkPeer}
import eu.phisikus.plotka.network.listener.NetworkListener
import eu.phisikus.plotka.network.listener.dto.TestMessage
import eu.phisikus.plotka.network.listener.handlers.ListMessageHandler
import org.apache.commons.lang3.RandomUtils
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{FunSuite, Matchers}
import scala.concurrent.Future
import scala.concurrent.duration.Duration
class NetworkTalkerTest extends FunSuite with Eventually with Matchers {
private val testNodeConfiguration = BasicNodeConfiguration(peers = Nil, port = 3034)
private val localPeer = NetworkPeer(
testNodeConfiguration.id, testNodeConfiguration.address,
testNodeConfiguration.port)
test("Should send message using NetworkTalker") {
val testMessageConsumer = new ListMessageHandler()
val testTalker = new NetworkTalker(localPeer)
val testListener = new NetworkListener(testNodeConfiguration, testMessageConsumer)
val testMessage = NetworkMessage(localPeer, localPeer, getRandomTestMessageBody)
testListener.start()
val result = testTalker.send(testMessage.recipient.asInstanceOf[NetworkPeer], testMessage.message)
assert(result.isSuccess)
eventually(timeout(Span(10, Seconds)), interval(Span(300, Millis))) {
testMessageConsumer.receivedMessages should contain(testMessage)
}
testListener.stop()
}
test("Should send message asynchronously using NetworkTalker") {
val testMessageConsumer = new ListMessageHandler()
val testTalker = new NetworkTalker(localPeer)
val testListener = new NetworkListener(testNodeConfiguration, testMessageConsumer)
val testMessage = NetworkMessage(localPeer, localPeer, getRandomTestMessageBody)
testListener.start()
var wasCallbackSuccessful = false
testTalker.send(testMessage.recipient.asInstanceOf[NetworkPeer],
testMessage.message,
sendResult => wasCallbackSuccessful = sendResult.isSuccess)
eventually(timeout(Span(10, Seconds)), interval(Span(300, Millis))) {
testMessageConsumer.receivedMessages should contain(testMessage)
wasCallbackSuccessful shouldBe true
}
testListener.stop()
}
test("Should send multiple messages using NetworkTalker") {
val testMessageConsumer = new ListMessageHandler()
val testTalker = new NetworkTalker(localPeer)
val testListener = new NetworkListener(testNodeConfiguration, testMessageConsumer)
val testMessages = getMultipleRandomTestMessages(10000)
testListener.start()
testMessages.par.map(testMessage => {
testTalker.send(testMessage.recipient.asInstanceOf[NetworkPeer], testMessage.message)
}).foreach(sendResult => assert(sendResult.isSuccess))
eventually(timeout(Span(10, Seconds)), interval(Span(300, Millis))) {
testMessageConsumer.receivedMessages should contain allElementsOf testMessages
}
testListener.stop()
}
test("Should fail on sending message to peer that is not listening") {
val testTalker = new NetworkTalker(localPeer)
val testMessage = NetworkMessage(localPeer, localPeer, getRandomTestMessageBody)
val sendResult = testTalker.send(NetworkPeer("fake", "127.0.0.2", 9090), testMessage)
sendResult.isFailure shouldBe true
}
test("Should fail on sending message to peer that disconnected") {
val testMessageConsumer = new ListMessageHandler()
val testTalker = new NetworkTalker(localPeer)
val testMessage = NetworkMessage(localPeer, localPeer, getRandomTestMessageBody)
val testListener = new NetworkListener(testNodeConfiguration, testMessageConsumer)
testListener.start()
testTalker.send(localPeer, testMessage)
testListener.stop()
eventually(timeout(Span(10, Seconds)), interval(Span(300, Millis))) {
testTalker.send(localPeer, testMessage).isFailure shouldBe true
}
}
private def getMultipleRandomTestMessages(count: Int): List[NetworkMessage] = {
Range(0, count).map(i => NetworkMessage(
localPeer,
localPeer,
getRandomTestMessageBody)).toList
}
private def getRandomTestMessageBody: TestMessage = {
TestMessage(UUID.randomUUID().toString, RandomUtils.nextInt())
}
}
| phisikus/plotka | networking/src/test/scala/eu/phisikus/plotka/network/talker/NetworkTalkerTest.scala | Scala | bsd-3-clause | 4,428 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.core.service
import com.twitter.util.{Future, Time, Duration, JavaTimer}
import com.typesafe.scalalogging.LazyLogging
import java.net.{InetAddress, InetSocketAddress}
import java.util.UUID
import java.util.concurrent.locks.ReentrantLock
import scala.collection.mutable.HashMap
import scray.service.qmodel.thrifscala.ScrayUUID
import scray.service.qservice.thrifscala.{ScrayTServiceEndpoint, ScrayMetaTService}
// For alternative concurrent map implementation
// import java.util.concurrent.ConcurrentHashMap
// import scala.collection.JavaConversions._
object ScrayMetaTServiceImpl extends ScrayMetaTService[Future] with LazyLogging {
val REQUESTLOGPREFIX = "Received meta service request."
case class ScrayServiceEndpoint(addr: InetSocketAddress, expires: Time)
private val lock = new ReentrantLock
private val endpoints =
// Alternative concurrent map implementation: ConcurrentHashMap[ScrayUUID, ScrayTServiceEndpoint]
new HashMap[UUID, ScrayServiceEndpoint]
/**
* Remove expired endpoints.
* The function is being triggered by a timer thread and applies CAS semantics.
*/
def removeExpiredEndpoints = {
lock.lock()
try {
val localHashMap = new HashMap ++ endpoints
localHashMap.filter{ case (id, ep) => ep.expires < Time.now }.foreach {
case (id, ep) =>
endpoints.remove(id).map(_ => logger.warn(s"Removed expired endpoint ${ep.addr}"))
}
} finally {
lock.unlock()
}
}
implicit def sep2tep(ep: (UUID, ScrayServiceEndpoint)): ScrayTServiceEndpoint = {
ScrayTServiceEndpoint(
ep._2.addr.getHostString,
ep._2.addr.getPort,
Some(ep._1),
Some(ep._2.expires.sinceEpoch.inMillis))
}
/**
* Fetch a list of service endpoints.
* Each endpoint provides ScrayStatelessTService and ScrayStatefulTService alternatives.
* Queries can address different endpoints for load distribution.
*/
def getServiceEndpoints(): Future[Seq[ScrayTServiceEndpoint]] = {
lock.lock
try {
logger.trace(REQUESTLOGPREFIX + " Operation='getServiceEndpoints'")
removeExpiredEndpoints
Future.value(endpoints.iterator.map[ScrayTServiceEndpoint] { ep => ep } toSeq)
} finally {
lock.unlock()
}
}
implicit def tep2sep(tep: ScrayTServiceEndpoint): ScrayServiceEndpoint = new ScrayServiceEndpoint(
new InetSocketAddress(InetAddress.getByName(tep.host), tep.port), EXPIRATION.fromNow)
/**
* Add new service endpoint.
* The endpoint will be removed after a default expiration period.
*/
def addServiceEndpoint(tEndpoint: ScrayTServiceEndpoint): Future[ScrayTServiceEndpoint] = {
val ep: (UUID, ScrayServiceEndpoint) = (UUID.randomUUID() -> tEndpoint)
lock.lock()
try {
logger.trace(REQUESTLOGPREFIX + s" Operation='addServiceEndpoint' with address=${ep._2.addr} expiring at ${ep._2.expires}.")
// we'll always add the endpoint regardless of redundancy (since we have an 'auto clean' feature)
endpoints.iterator.find { p => p._2.addr.getHostString == tEndpoint._1 }.map{ p =>
endpoints.put(p._1, p._2.copy(expires = EXPIRATION.fromNow))
Future.value(sep2tep(p))
}.getOrElse {
endpoints.put(ep._1, ep._2)
Future.value(ep)
}
} finally {
lock.unlock()
}
}
/**
* Restore the default expiration period of an endpoint.
*/
def refreshServiceEndpoint(endpointID: ScrayUUID): Future[Unit] = {
lock.lock()
try {
logger.trace(REQUESTLOGPREFIX + s" Operation='refreshServiceEndpoint' with endpointID=$endpointID")
endpoints.get(endpointID) match {
// refresh w/ CAS semantics
case Some(_ep) => endpoints.put(endpointID, _ep.copy(expires = EXPIRATION.fromNow))
case None =>
}
Future.value()
} finally {
lock.unlock()
}
}
/**
* Return vital sign.
*/
def ping(): Future[Boolean] = { logger.debug(REQUESTLOGPREFIX + " Operation='ping'"); Future.value(true) }
/**
* Shutdown the server.
*/
def shutdown(waitNanos: Option[Long]): Future[Unit] = {
val DEFAULT_SHUTDOWN_TIMEOUT = Duration.fromSeconds(10).fromNow
logger.warn(s"Meta service request: 'shutdown' with waitNanos=$waitNanos")
val waitUntil = waitNanos.map(Duration.fromNanoseconds(_).fromNow).getOrElse(DEFAULT_SHUTDOWN_TIMEOUT)
new JavaTimer(false).schedule(waitUntil)(System.exit(0))
Future.value()
}
}
| scray/scray | scray-service/src/main/scala/scray/core/service/ScrayMetaTServiceImpl.scala | Scala | apache-2.0 | 5,152 |
package com.tapad.docker
import java.io.{ File, FileWriter }
import java.util
import java.util.regex.{ Matcher, Pattern }
import com.tapad.docker.DockerComposeKeys._
import org.yaml.snakeyaml.Yaml
import sbt.Keys._
import sbt._
import scala.collection.JavaConverters._
import scala.collection.JavaConversions._
import scala.collection.{ Iterable, Seq }
import scala.io.Source._
import scala.util.{ Failure, Success, Try }
trait ComposeFile extends SettingsHelper with ComposeCustomTagHelpers with PrintFormatting {
// Compose file Yaml keys
val imageKey = "image"
val environmentKey = "environment"
val portsKey = "ports"
val servicesKey = "services"
val envFileKey = "env_file"
val volumesKey = "volumes"
val networksKey = "networks"
//Set of values representing the source location of a Docker Compose image
val cachedImageSource = "cache"
val definedImageSource = "defined"
val buildImageSource = "build"
//Custom tags
val useLocalBuildTag = "<localbuild>"
val skipPullTag = "<skippull>"
val environmentDebugKey = "JAVA_TOOL_OPTIONS"
//List of docker-compose fields that are currently unsupported by the plugin
val unsupportedFields = List("build", "container_name", "extends")
type yamlData = Map[String, java.util.LinkedHashMap[String, Any]]
val useStaticPortsArg = "-useStaticPorts"
val dynamicPortIdentifier = "0"
/**
* processCustomTags performs any pre-processing of Custom Tags in the Compose File before the Compose file is used
* by Docker. This function will also determine any debug ports and rename any 'env_file' defined files to use their
* fully qualified paths so that they can be accessed from the tmp location the docker-compose.yml is launched from
* This function can be overridden in derived plug-ins to add additional custom tags to process
*
* @param state The sbt state
* @param args Args passed to sbt command
* @return The collection of ServiceInfo objects. The Compose Yaml passed in is also modified in-place so the calling
* function will have the updates performed here
*/
def processCustomTags(implicit state: State, args: Seq[String], composeYaml: yamlData): Iterable[ServiceInfo] = {
val useExistingImages = getSetting(composeNoBuild)
val localService = getSetting(composeServiceName)
val usedStaticPorts = scala.collection.mutable.Set[String]()
getComposeFileServices(composeYaml).map { service =>
val (serviceName, serviceData) = service
for (field <- unsupportedFields if serviceData.containsKey(field)) {
throw ComposeFileFormatException(getUnsupportedFieldErrorMsg(field))
}
val imageName = serviceData.get(imageKey).toString
//Update Compose yaml with any images built as part of dockerComposeUp regardless of how it's defined in the
//compose file
val (updatedImageName, imageSource) = if (!useExistingImages && serviceName == localService) {
//If the image does not contain a tag or has the tag "latest" it will not be replaced
(replaceDefinedVersionTag(imageName, getComposeServiceVersion(state)), buildImageSource)
} else if (imageName.toLowerCase.contains(useLocalBuildTag)) {
(processImageTag(state, args, imageName), buildImageSource)
} else if (imageName.toLowerCase.contains(skipPullTag) || containsArg(DockerComposePlugin.skipPullArg, args)) {
(processImageTag(state, args, imageName), cachedImageSource)
} else {
(imageName, definedImageSource)
}
//Update env_file files to use the fully qualified path so that it can still be accessed from the tmp location
if (serviceData.containsKey(envFileKey)) {
val composeFileFullPath = new File(getSetting(composeFile)).getAbsolutePath
val composeFileDir = composeFileFullPath.substring(0, composeFileFullPath.lastIndexOf(File.separator))
val entry = serviceData.get(envFileKey)
entry match {
case e: String =>
val updated = getFullyQualifiedPath(e, composeFileDir)
serviceData.put(envFileKey, updated)
case e: util.ArrayList[_] =>
val updated = e.asScala.map(file => getFullyQualifiedPath(file.asInstanceOf[String], composeFileDir))
serviceData.put(envFileKey, updated.asJava)
}
}
//Update relative volumes to use the fully qualified path so they can still be accessed from the tmp location
if (serviceData.containsKey(volumesKey)) {
val composeFileFullPath = new File(getSetting(composeFile)).getAbsolutePath
val composeFileDir = composeFileFullPath.substring(0, composeFileFullPath.lastIndexOf(File.separator))
val volumes = serviceData.get(volumesKey).asInstanceOf[util.List[String]].asScala
val updated = volumes.map { volume =>
volume match {
case relativeVolume if relativeVolume.startsWith(".") =>
val Array(relativeLocalPath, mountPath) = relativeVolume.split(":", 2)
val fullyQualifiedLocalPath = getFullyQualifiedPath(relativeLocalPath, composeFileDir)
s"$fullyQualifiedLocalPath:$mountPath"
case nonRelativeVolume =>
nonRelativeVolume
}
}
serviceData.put(volumesKey, updated.asJava)
}
serviceData.put(imageKey, updatedImageName)
val useStatic = args.contains(useStaticPortsArg)
val (updatedPortInfo, updatedPortList) = getPortInfo(serviceData, useStatic).zipped.map { (portInfo, portMapping) =>
if (useStatic) {
if (usedStaticPorts.add(portMapping)) {
(portInfo, portMapping)
} else {
val containerPort = portMapping.split(":").last
printWarning(s"Could not define a static host port '$containerPort' for service '$serviceName' " +
s"because port '$containerPort' was already in use. A dynamically assigned port will be used instead.", getSetting(suppressColorFormatting)(state))
(PortInfo(dynamicPortIdentifier, portInfo.containerPort, portInfo.isDebug), s"$dynamicPortIdentifier:$containerPort")
}
} else
(portInfo, portMapping)
}.unzip
serviceData.put(portsKey, new util.ArrayList[String](updatedPortList))
ServiceInfo(serviceName, updatedImageName, imageSource, updatedPortInfo)
}
}
/**
* Gets the version to use for local image tagging in docker compose
* @param state The sbt state
* @return The version to use for local image tagging in docker compose
*/
def getComposeServiceVersion(implicit state: State): String = {
val extracted = Project.extract(state)
val (_, version) = extracted.runTask(composeServiceVersionTask, state)
version
}
def getUnsupportedFieldErrorMsg(fieldName: String): String = {
s"Docker Compose field '$fieldName:' is currently not supported by sbt-docker-compose. Please see the README for " +
s"more information on the set of unsupported fields."
}
/**
* Attempt to get the fully qualified path to a file. It will first attempt to find the file using the
* path provided. If that fails it will attempt to find the file relative to the docker-compose yml location. Otherwise,
* it will throw an exception with information about the file that could not be located.
*
* @param fileName The file name to find
* @param composePath The path to the directory of the docker-compose yml file being used
* @return The fully qualified path to the file
*/
def getFullyQualifiedPath(fileName: String, composePath: String): String = {
if (new File(fileName).exists) {
new File(fileName).getCanonicalFile.getAbsolutePath
} else if (new File(s"$composePath/$fileName").exists) {
new File(s"$composePath/$fileName").getCanonicalFile.getAbsolutePath
} else {
throw new IllegalStateException(s"Could not find file: '$fileName' either at the specified path or in the '$composePath' directory.")
}
}
/**
* If the Yaml is in the Docker 1.6 format which includes a new "services" key work with that sub-set of data.
* Otherwise, return the original Yaml
*
* @param composeYaml Docker Compose yaml to process
* @return The 'services' section of the Yaml file
*/
def getComposeFileServices(composeYaml: yamlData): yamlData = {
composeYaml.get(servicesKey) match {
case Some(services) => services.asInstanceOf[java.util.Map[String, java.util.LinkedHashMap[String, Any]]].
asScala.toMap
case None => composeYaml
}
}
def getComposeVersion(composeYaml: yamlData): Int = {
composeYaml.get(servicesKey) match {
case Some(services) => 2
case None => 1
}
}
/**
* Get all non-external network names defined under the 'networks' key in the docker-compose file.
* @param composeYaml Docker Compose yaml to process
* @return The keys for the internal 'networks' section of the Yaml file
*/
def composeInternalNetworkNames(composeYaml: yamlData): Seq[String] = {
composeYaml.get(networksKey) match {
case Some(networks) => networks.filterNot { network =>
val (_, networkData) = network
Option(networkData).exists(_.asInstanceOf[java.util.Map[String, Any]].containsKey("external"))
}.keys.toSeq
case None => Seq.empty
}
}
/**
* Get all named volumes defined under the 'volumes' key in the docker-compose file.
* @param composeYaml Docker Compose yaml to process
* @return The keys for the 'volumes' section of the Yaml file
*/
def composeNamedVolumes(composeYaml: yamlData): Seq[String] = {
composeYaml.get(volumesKey) match {
case Some(volumes) => volumes.keys.toSeq
case None => Seq.empty
}
}
/**
* Function that reads plug-in defined "<customTag>" fields from the Docker Compose file and performs some
* transformation on the Docker File based on the tag. The file after transformations are applied is what is used by
* Docker Compose to launch the instance. This function can be overridden in derived plug-ins to add additional tags
* pre-processing features.
*
* @param state The sbt state
* @param args Args passed to sbt command
* @param imageName The image name and tag to be processed for example "testimage:1.0.0<skipPull>" This plugin just
* removes the tags from the image name.
* @return The updated image value after any processing indicated by the custom tags
*/
def processImageTag(implicit state: State, args: Seq[String], imageName: String): String = {
imageName.replaceAll(s"(?i)$useLocalBuildTag", "").replaceAll(s"(?i)$skipPullTag", "")
}
/**
* Parses the Port information from the Yaml content for a service. It will also report any ports that are exposed as
* Debugging ports and expand any defined port ranges. Static ports will be used rather than the Docker dynamically
* assigned ports when the '-useStaticPorts' argument is supplied.
*
* @param serviceKeys The Docker Compose Yaml representing a service
* @param useStatic The flag used to indicate whether the '-useStaticPorts' argument is supplied
* @return PortInfo collection and port mapping collection for all defined ports
*/
def getPortInfo(serviceKeys: java.util.LinkedHashMap[String, Any], useStatic: Boolean): (List[PortInfo], List[String]) = {
if (serviceKeys.containsKey(portsKey)) {
//Determine if there is a debug port set on the service
val debugPort = if (serviceKeys.containsKey(environmentKey)) {
val debugAddress = {
serviceKeys.get(environmentKey) match {
case key: util.LinkedHashMap[_, _] =>
val env = key.asInstanceOf[java.util.LinkedHashMap[String, String]].asScala
val debugOptions = env.filter(_._1 == environmentDebugKey)
debugOptions.flatMap(_._2.split(','))
case key: util.ArrayList[_] =>
val env = key.asInstanceOf[util.ArrayList[String]].asScala
val debugOptions = env.filter(_.startsWith(environmentDebugKey))
debugOptions.flatMap(_.split(','))
}
}.filter(_.contains("address")).mkString.split("=")
if (debugAddress.size == 2) debugAddress(1) else "none"
}
//If any port ranges are defined expand them into individual ports
val portRangeChar = "-"
val (needsExpansion, noExpansion) = serviceKeys.get(portsKey).asInstanceOf[java.util.ArrayList[String]].asScala.partition(_.contains(portRangeChar))
val expandedPorts: Seq[String] = needsExpansion.flatMap { p =>
val portParts = p.replaceFirst("^0:", "").split(':')
val portSplitL = portParts(0).split(portRangeChar)
val (rangeStartL, rangeEndL) = (portSplitL(0), portSplitL(1))
val startL = rangeStartL.toInt
val endL = rangeEndL.toInt
val rangeL = endL - startL
if (portParts.length == 1) {
for (i <- 0 to rangeL)
yield s"${startL + i}"
} else {
val portSplitR = portParts(1).split(portRangeChar)
val (rangeStartR, rangeEndR) = (portSplitR(0), portSplitR(1))
val startR = rangeStartR.toInt
val endR = rangeEndR.toInt
val rangeR = endR - startR
if (rangeL != rangeR)
throw new IllegalStateException(s"Invalid port range mapping specified for $p")
for (i <- 0 to rangeR)
yield s"${startL + i}:${startR + i}"
}
}
val ports = expandedPorts ++ noExpansion
val list = {
if (useStatic)
getStaticPortMappings(ports)
else
new java.util.ArrayList[String](ports)
}
serviceKeys.put(portsKey, list)
(serviceKeys.get(portsKey).asInstanceOf[java.util.ArrayList[String]].asScala.map(port => {
val portArray = port.split(':')
val (hostPort, containerPort) = if (portArray.length == 2) (portArray(0), portArray(1)) else (portArray(0), portArray(0))
val debugMatch = portArray.contains(debugPort)
PortInfo(hostPort, containerPort, debugMatch)
}).toList, list.toList)
} else {
(List.empty, List.empty)
}
}
def getStaticPortMappings(ports: Seq[String]): java.util.ArrayList[String] = {
val Pattern1 = (dynamicPortIdentifier + """:(\d+)(\D*)""").r
val Pattern2 = """(\d+)(\D*)""".r
val staticPorts = ports.map {
case Pattern1(port, protocol) => s"$port:$port$protocol"
case Pattern2(port, protocol) => s"$port:$port$protocol"
case otherwise => otherwise
}
new java.util.ArrayList[String](staticPorts)
}
def readComposeFile(composePath: String, variables: Vector[(String, String)] = Vector.empty): yamlData = {
val yamlString = fromFile(composePath).getLines().mkString("\n")
val yamlUpdated = processVariableSubstitution(yamlString, variables)
new Yaml().load(yamlUpdated).asInstanceOf[java.util.Map[String, java.util.LinkedHashMap[String, Any]]].asScala.toMap
}
/**
* Substitute all docker-compose variables in the YAML file. This is traditionally done by docker-compose itself,
* but is being performed by the plugin to support other functionality.
*
* @param yamlString Stringified docker-compose file.
* @param variables Substitution variables.
* @return An updated stringified docker-compile file.
*/
def processVariableSubstitution(yamlString: String, variables: Vector[(String, String)]): String = {
//Substitute all defined environment variables allowing for the optional default value syntax ':-'
val substitutedCompose = variables.foldLeft(yamlString) {
case (y, (key, value)) => y.replaceAll("\\$\\{" + key + "(:-.*)?\\}", Matcher.quoteReplacement(value))
}
//Find all remaining undefined environment variables which have a corresponding default value
val defaultEnvRegex = "\\$\\{.*:-.*\\}".r
val envToReplace = defaultEnvRegex.findAllIn(substitutedCompose).map { env =>
env.split(":-") match {
case Array(_, default) => env -> default.replace("}", "")
}
}
//Replace all undefined environment variables with the corresponding default value
envToReplace.foldLeft(substitutedCompose) {
case (y, (key, value)) => y.replaceAll(Pattern.quote(key), Matcher.quoteReplacement(value.replace("$", "$$")))
}
}
def deleteComposeFile(composePath: String): Boolean = {
Try(new File(composePath).delete()) match {
case Success(i) => true
case Failure(t) => false
}
}
/**
* Saves the supplied Docker Compose Yaml data to a temporary file
*
* @param finalYaml Compose Yaml to save
* @return The path to the temporary Compose File
*/
def saveComposeFile(finalYaml: yamlData): String = {
val updatedComposePath = File.createTempFile("compose-updated", ".yml").getPath
val writer = new FileWriter(updatedComposePath)
try {
new Yaml().dump(finalYaml.asJava, writer)
} finally {
writer.close()
}
updatedComposePath
}
}
| Tapad/sbt-docker-compose | src/main/scala/com/tapad/docker/ComposeFile.scala | Scala | bsd-3-clause | 17,084 |
class C {
var x: String = ""
var y: String | Null = null
var child: C | Null = null
}
class S {
val c: C = new C
val d: C | Null = c
def test1 = {
val x1: String = c.x
val x2: String | Null = c.x
val y1: String = c.y // error
val y2: String | Null = c.y
val c1: C = c.child // error
val c2: C | Null = c.child
val yy: String = c.child.child.y // error
}
def test2 = {
c.x = ""
c.x = null // error
c.y = ""
c.y = null
c.child = c
c.child = null
}
def test3 = {
d.x = "" // error
d.y = "" // error
d.child = c // error
}
} | dotty-staging/dotty | tests/explicit-nulls/unsafe-common/unsafe-select.scala | Scala | apache-2.0 | 611 |
package dotty.tools.dotc
package transform
package sjs
import scala.collection.mutable
import ast.tpd
import core._
import typer.Checking
import util.SrcPos
import Annotations._
import Constants._
import Contexts._
import Decorators._
import DenotTransformers._
import Flags._
import NameKinds.{DefaultGetterName, ModuleClassName}
import NameOps._
import StdNames._
import Symbols._
import SymUtils._
import Types._
import JSSymUtils._
import org.scalajs.ir.Trees.JSGlobalRef
import dotty.tools.backend.sjs.JSDefinitions.jsdefn
/** A macro transform that runs after typer and before pickler to perform
* additional Scala.js-specific checks and transformations necessary for
* interoperability with JavaScript.
*
* It performs the following functions:
*
* - Sanity checks for the js.Any hierarchy
* - Annotate subclasses of js.Any to be treated specially
* - Create JSExport methods: Dummy methods that are propagated
* through the whole compiler chain to mark exports. This allows
* exports to have the same semantics than methods.
*
* This is the equivalent of `PrepJSInterop` in Scala 2, minus the handling
* of `scala.Enumeration`.
*
* The reason for making this a macro transform is that some functions (in particular
* all the checks that behave differently depending on properties of classes in
* the enclosing class chain) are naturally top-down and don't lend themselves to the
* bottom-up approach of a mini phase.
*
* In addition, the addition of export forwarders must be done before pickling to
* be signature-compatible with scalac, and there are only macro transforms before
* pickling.
*/
class PrepJSInterop extends MacroTransform with IdentityDenotTransformer { thisPhase =>
import PrepJSInterop._
import tpd._
override def phaseName: String = PrepJSInterop.name
override def description: String = PrepJSInterop.description
override def isEnabled(using Context): Boolean =
ctx.settings.scalajs.value
override def changesMembers: Boolean = true // the phase adds export forwarders
protected def newTransformer(using Context): Transformer =
new ScalaJSPrepJSInteropTransformer
class ScalaJSPrepJSInteropTransformer extends Transformer with Checking {
import PrepJSExports._
/** Kind of the directly enclosing (most nested) owner. */
private var enclosingOwner: OwnerKind = OwnerKind.None
/** Cumulative kinds of all enclosing owners. */
private var allEnclosingOwners: OwnerKind = OwnerKind.None
/** Nicer syntax for `allEnclosingOwners is kind`. */
private def anyEnclosingOwner: OwnerKind = allEnclosingOwners
private def enterOwner[A](kind: OwnerKind)(body: => A): A = {
require(kind.isBaseKind, kind)
val oldEnclosingOwner = enclosingOwner
val oldAllEnclosingOwners = allEnclosingOwners
enclosingOwner = kind
allEnclosingOwners |= kind
try {
body
} finally {
enclosingOwner = oldEnclosingOwner
allEnclosingOwners = oldAllEnclosingOwners
}
}
/** DefDefs in class templates that export methods to JavaScript */
private val exporters = mutable.Map.empty[Symbol, mutable.ListBuffer[Tree]]
override def transform(tree: Tree)(using Context): Tree = {
tree match {
case tree: ValDef if tree.symbol.is(Module) =>
/* Never apply this transformation on the term definition of modules.
* Instead, all relevant checks are performed on the module class definition.
* We still need to mark exposed if required, since that needs to be done
* on the module symbol, not its module class.
*/
markExposedIfRequired(tree.symbol)
super.transform(tree)
case tree: MemberDef => transformMemberDef(tree)
case tree: Template => transformTemplate(tree)
case _ => transformStatOrExpr(tree)
}
}
private def transformMemberDef(tree: MemberDef)(using Context): Tree = {
val sym = tree.symbol
checkInternalAnnotations(sym)
stripJSAnnotsOnExported(sym)
/* Checks related to @js.native:
* - if @js.native, verify that it is allowed in this context, and if
* yes, compute and store the JS native load spec
* - if not @js.native, verify that we do not use any other annotation
* reserved for @js.native members (namely, JS native load spec annots)
*/
val isJSNative = sym.getAnnotation(jsdefn.JSNativeAnnot) match {
case Some(annot) =>
checkJSNativeDefinition(tree, annot.tree, sym)
true
case None =>
checkJSNativeSpecificAnnotsOnNonJSNative(tree)
false
}
checkJSNameAnnots(sym)
constFoldJSExportTopLevelAndStaticAnnotations(sym)
markExposedIfRequired(tree.symbol)
tree match {
case tree: TypeDef if tree.isClassDef =>
checkClassOrModuleExports(sym)
if (isJSAny(sym))
transformJSClassDef(tree)
else
transformScalaClassDef(tree)
case _: TypeDef =>
super.transform(tree)
case tree: ValOrDefDef =>
// Prepare exports
exporters.getOrElseUpdate(sym.owner, mutable.ListBuffer.empty) ++= genExportMember(sym)
if (sym.isLocalToBlock)
super.transform(tree)
else if (isJSNative)
transformJSNativeValOrDefDef(tree)
else if (enclosingOwner is OwnerKind.JSType)
transformValOrDefDefInJSType(tree)
else
super.transform(tree) // There is nothing special to do for a Scala val or def
}
}
private def transformScalaClassDef(tree: TypeDef)(using Context): Tree = {
val sym = tree.symbol
// In native JS things, only js.Any stuff is allowed
if (enclosingOwner is OwnerKind.JSNative) {
/* We have to allow synthetic companion objects here, as they get
* generated when a nested native JS class has default arguments in
* its constructor (see #1891).
*/
if (!sym.is(Synthetic)) {
report.error(
"Native JS traits, classes and objects cannot contain inner Scala traits, classes or objects (i.e., not extending js.Any)",
tree)
}
}
if (sym == jsdefn.PseudoUnionClass)
sym.addAnnotation(jsdefn.JSTypeAnnot)
val kind =
if (sym.is(Module)) OwnerKind.ScalaMod
else OwnerKind.ScalaClass
enterOwner(kind) {
super.transform(tree)
}
}
private def transformTemplate(tree: Template)(using Context): Template = {
// First, recursively transform the template
val transformedTree = super.transform(tree).asInstanceOf[Template]
val clsSym = ctx.owner
// Check that @JSExportStatic fields come first
if (clsSym.is(ModuleClass)) { // quick check to avoid useless work
var foundStatOrNonStaticVal: Boolean = false
for (tree <- transformedTree.body) {
tree match {
case vd: ValDef if vd.symbol.hasAnnotation(jsdefn.JSExportStaticAnnot) =>
if (foundStatOrNonStaticVal) {
report.error(
"@JSExportStatic vals and vars must be defined before any other val/var, and before any constructor statement.",
vd)
}
case vd: ValDef if !vd.symbol.is(Lazy) =>
foundStatOrNonStaticVal = true
case _: MemberDef =>
case _ =>
foundStatOrNonStaticVal = true
}
}
}
// Add exports to the template, if there are any
exporters.get(clsSym).fold {
transformedTree
} { exports =>
checkNoDoubleDeclaration(clsSym)
cpy.Template(transformedTree)(
transformedTree.constr,
transformedTree.parents,
Nil,
transformedTree.self,
transformedTree.body ::: exports.toList
)
}
}
private def transformStatOrExpr(tree: Tree)(using Context): Tree = {
tree match {
case Closure(env, call, functionalInterface) =>
val tpeSym = functionalInterface.tpe.typeSymbol
if (tpeSym.isJSType) {
def reportError(reasonAndExplanation: String): Unit = {
report.error(
"Using an anonymous function as a SAM for the JavaScript type " +
i"${tpeSym.fullName} is not allowed because " +
reasonAndExplanation,
tree)
}
if (!tpeSym.is(Trait) || tpeSym.asClass.superClass != jsdefn.JSFunctionClass) {
reportError(
"it is not a trait extending js.Function. " +
"Use an anonymous class instead.")
} else if (tpeSym.hasAnnotation(jsdefn.JSNativeAnnot)) {
reportError(
"it is a native JS type. " +
"It is not possible to directly implement it.")
} else if (!tree.tpe.possibleSamMethods.exists(_.symbol.hasJSCallCallingConvention)) {
reportError(
"its single abstract method is not named `apply`. " +
"Use an anonymous class instead.")
}
}
super.transform(tree)
// Validate js.constructorOf[T]
case TypeApply(ctorOfTree, List(tpeArg))
if ctorOfTree.symbol == jsdefn.JSPackage_constructorOf =>
validateJSConstructorOf(tree, tpeArg)
super.transform(tree)
/* Rewrite js.ConstructorTag.materialize[T] into
* runtime.newConstructorTag[T](js.constructorOf[T])
*/
case TypeApply(ctorOfTree, List(tpeArg))
if ctorOfTree.symbol == jsdefn.JSConstructorTag_materialize =>
validateJSConstructorOf(tree, tpeArg)
val ctorOf = ref(jsdefn.JSPackage_constructorOf).appliedToTypeTree(tpeArg)
ref(jsdefn.Runtime_newConstructorTag).appliedToType(tpeArg.tpe).appliedTo(ctorOf)
// Compile-time errors and warnings for js.Dynamic.literal
case Apply(Apply(fun, nameArgs), args)
if fun.symbol == jsdefn.JSDynamicLiteral_applyDynamic ||
fun.symbol == jsdefn.JSDynamicLiteral_applyDynamicNamed =>
// Check that the first argument list is a constant string "apply"
nameArgs match {
case List(Literal(Constant(s: String))) =>
if (s != "apply")
report.error(i"js.Dynamic.literal does not have a method named $s", tree)
case _ =>
report.error(i"js.Dynamic.literal.${tree.symbol.name} may not be called directly", tree)
}
// TODO Warn for known duplicate property names
super.transform(tree)
case _: Export =>
if enclosingOwner is OwnerKind.JSNative then
report.error("Native JS traits, classes and objects cannot contain exported definitions.", tree)
else if enclosingOwner is OwnerKind.JSTrait then
report.error("Non-native JS traits cannot contain exported definitions.", tree)
super.transform(tree)
case _ =>
super.transform(tree)
}
}
private def validateJSConstructorOf(tree: Tree, tpeArg: Tree)(using Context): Unit = {
val tpe = checkClassType(tpeArg.tpe, tpeArg.srcPos, traitReq = false, stablePrefixReq = false)
tpe.underlyingClassRef(refinementOK = false) match {
case typeRef: TypeRef if typeRef.symbol.isOneOf(Trait | ModuleClass) =>
report.error(i"non-trait class type required but $tpe found", tpeArg)
case _ =>
// an error was already reported above
}
}
/** Performs checks and rewrites specific to classes / objects extending `js.Any`. */
private def transformJSClassDef(classDef: TypeDef)(using Context): Tree = {
val sym = classDef.symbol.asClass
val isJSNative = sym.hasAnnotation(jsdefn.JSNativeAnnot)
sym.addAnnotation(jsdefn.JSTypeAnnot)
// Forbid @EnableReflectiveInstantiation on JS types
sym.getAnnotation(jsdefn.EnableReflectiveInstantiationAnnot).foreach { annot =>
report.error(
"@EnableReflectiveInstantiation cannot be used on types extending js.Any.",
annot.tree)
}
// Forbid package objects that extends js.Any
if (sym.isPackageObject)
report.error("Package objects may not extend js.Any.", classDef)
// Check that we do not have a case modifier
if (sym.is(Case)) {
report.error(
"Classes and objects extending js.Any may not have a case modifier",
classDef)
}
// Check the parents
for (parentSym <- sym.parentSyms) {
parentSym match {
case parentSym if parentSym == defn.ObjectClass =>
// AnyRef is valid, except for non-native JS classes and objects
if (!isJSNative && !sym.is(Trait)) {
report.error(
"Non-native JS classes and objects cannot directly extend AnyRef. They must extend a JS class (native or not).",
classDef)
}
case parentSym if isJSAny(parentSym) =>
// A non-native JS type cannot extend a native JS trait
// Otherwise, extending a JS type is valid
if (!isJSNative && parentSym.is(Trait) && parentSym.hasAnnotation(jsdefn.JSNativeAnnot)) {
report.error(
"Non-native JS types cannot directly extend native JS traits.",
classDef)
}
case parentSym if parentSym == defn.DynamicClass =>
/* We have to allow scala.Dynamic to be able to define js.Dynamic
* and similar constructs.
* This causes the unsoundness filed as scala-js/scala-js#1385.
*/
case parentSym =>
/* This is a Scala class or trait other than AnyRef and Dynamic,
* which is never valid.
*/
report.error(
i"${sym.name} extends ${parentSym.fullName} which does not extend js.Any.",
classDef)
}
}
// Checks for non-native JS stuff
if (!isJSNative) {
// It cannot be in a native JS class or trait
if (enclosingOwner is OwnerKind.JSNativeClass) {
report.error(
"Native JS classes and traits cannot contain non-native JS classes, traits or objects",
classDef)
}
// Unless it is a trait, it cannot be in a native JS object
if (!sym.is(Trait) && (enclosingOwner is OwnerKind.JSNativeMod)) {
report.error(
"Native JS objects cannot contain inner non-native JS classes or objects",
classDef)
}
// Local JS classes cannot be abstract (implementation restriction)
if (sym.is(Abstract, butNot = Trait) && sym.isLocalToBlock) {
report.error(
"Implementation restriction: local JS classes cannot be abstract",
classDef)
}
}
// Check for consistency of JS semantics across overriding
val overridingPairsCursor = new OverridingPairs.Cursor(sym)
while (overridingPairsCursor.hasNext) {
val overriding = overridingPairsCursor.overriding
val overridden = overridingPairsCursor.overridden
overridingPairsCursor.next() // prepare for next turn
val clsSym = sym
if (overriding.isTerm) {
def errorPos = {
if (clsSym == overriding.owner) overriding.srcPos
else if (clsSym == overridden.owner) overridden.srcPos
else clsSym.srcPos
}
// Some utils inspired by RefChecks
def infoString0(sym: Symbol, showLocation: Boolean): String = {
val sym1 = sym.underlyingSymbol
def info = clsSym.thisType.memberInfo(sym1)
val infoStr =
if (sym1.is(Module)) ""
else i" of type $info"
val ccStr = s" called from JS as '${sym.jsCallingConvention.displayName}'"
i"${if (showLocation) sym1.showLocated else sym1}$infoStr$ccStr"
}
def infoString(sym: Symbol): String = infoString0(sym, sym.owner != clsSym)
def infoStringWithLocation(sym: Symbol): String = infoString0(sym, true)
def emitOverrideError(msg: String): Unit = {
report.error(
"error overriding %s;\\n %s %s".format(
infoStringWithLocation(overridden), infoString(overriding), msg),
errorPos)
}
// Check for overrides with different JS names - issue scala-js/scala-js#1983
if (overriding.jsCallingConvention != overridden.jsCallingConvention)
emitOverrideError("has a different JS calling convention")
/* Cannot override a non-@JSOptional with an @JSOptional. Unfortunately
* at this point the symbols do not have @JSOptional yet, so we need
* to detect whether it would be applied.
*/
if (!isJSNative) {
def isJSOptional(sym: Symbol): Boolean = {
sym.owner.is(Trait) && !sym.is(Deferred) && !sym.isConstructor &&
!sym.owner.hasAnnotation(jsdefn.JSNativeAnnot)
}
if (isJSOptional(overriding) && !(overridden.is(Deferred) || isJSOptional(overridden)))
emitOverrideError("cannot override a concrete member in a non-native JS trait")
}
}
}
val kind = {
if (!isJSNative) {
if (sym.is(ModuleClass)) OwnerKind.JSMod
else if (sym.is(Trait)) OwnerKind.JSTrait
else OwnerKind.JSNonTraitClass
} else {
if (sym.is(ModuleClass)) OwnerKind.JSNativeMod
else OwnerKind.JSNativeClass
}
}
enterOwner(kind) {
super.transform(classDef)
}
}
private def checkJSNativeDefinition(treePos: SrcPos, annotPos: SrcPos, sym: Symbol)(using Context): Unit = {
// Check if we may have a JS native here
if (sym.isLocalToBlock) {
report.error("@js.native is not allowed on local definitions", annotPos)
} else if (!sym.isClass && (anyEnclosingOwner is (OwnerKind.ScalaClass | OwnerKind.JSType))) {
report.error("@js.native vals and defs can only appear in static Scala objects", annotPos)
} else if (sym.isClass && !isJSAny(sym)) {
report.error("Classes, traits and objects not extending js.Any may not have an @js.native annotation", annotPos)
} else if (anyEnclosingOwner is OwnerKind.ScalaClass) {
report.error("Scala traits and classes may not have native JS members", annotPos)
} else if (enclosingOwner is OwnerKind.JSNonNative) {
report.error("non-native JS classes, traits and objects may not have native JS members", annotPos)
} else {
// The symbol can be annotated with @js.native. Now check its JS native loading spec.
if (sym.is(Trait)) {
for (annot <- sym.annotations) {
val annotSym = annot.symbol
if (isJSNativeLoadingSpecAnnot(annotSym))
report.error(i"Traits may not have an @${annotSym.name} annotation.", annot.tree)
}
} else {
checkJSNativeLoadSpecOf(treePos, sym)
}
}
}
private def checkJSNativeLoadSpecOf(pos: SrcPos, sym: Symbol)(using Context): Unit = {
def checkGlobalRefName(globalRef: String): Unit = {
if (!JSGlobalRef.isValidJSGlobalRefName(globalRef))
report.error(s"The name of a JS global variable must be a valid JS identifier (got '$globalRef')", pos)
}
if (enclosingOwner is OwnerKind.JSNative) {
/* We cannot get here for @js.native vals and defs. That would mean we
* have an @js.native val/def inside a JavaScript type, which is not
* allowed and already caught in checkJSNativeDefinition().
*/
assert(sym.isClass,
s"undetected @js.native val or def ${sym.fullName} inside JS type at $pos")
for (annot <- sym.annotations) {
val annotSym = annot.symbol
if (isJSNativeLoadingSpecAnnot(annotSym))
report.error(i"Nested JS classes and objects cannot have an @${annotSym.name} annotation.", annot.tree)
}
if (sym.owner.isStaticOwner) {
for (annot <- sym.annotations) {
if (annot.symbol == jsdefn.JSNameAnnot && !(annot.arguments.head.tpe.derivesFrom(defn.StringClass))) {
report.error(
"Implementation restriction: " +
"@JSName with a js.Symbol is not supported on nested native classes and objects",
annot.tree)
}
}
if (sym.owner.hasAnnotation(jsdefn.JSGlobalScopeAnnot)) {
val jsName = sym.jsName match {
case JSName.Literal(jsName) =>
checkGlobalRefName(jsName)
case JSName.Computed(_) =>
() // compile error above or in `checkJSNameArgument`
}
}
}
} else {
def checkGlobalRefPath(pathName: String): Unit = {
val dotIndex = pathName.indexOf('.')
val globalRef =
if (dotIndex < 0) pathName
else pathName.substring(0, dotIndex).nn
checkGlobalRefName(globalRef)
}
checkAndGetJSNativeLoadingSpecAnnotOf(pos, sym) match {
case Some(annot) if annot.symbol == jsdefn.JSGlobalScopeAnnot =>
if (!sym.is(Module)) {
report.error(
"@JSGlobalScope can only be used on native JS objects (with @js.native).",
annot.tree)
}
case Some(annot) if annot.symbol == jsdefn.JSGlobalAnnot =>
checkJSGlobalLiteral(annot)
val pathName = annot.argumentConstantString(0).getOrElse {
val symTermName = sym.name.exclude(NameKinds.ModuleClassName).toTermName
if (symTermName == nme.apply) {
report.error(
"Native JS definitions named 'apply' must have an explicit name in @JSGlobal",
annot.tree)
} else if (symTermName.isSetterName) {
report.error(
"Native JS definitions with a name ending in '_=' must have an explicit name in @JSGlobal",
annot.tree)
}
sym.defaultJSName
}
checkGlobalRefPath(pathName)
case Some(annot) if annot.symbol == jsdefn.JSImportAnnot =>
checkJSImportLiteral(annot)
if (annot.arguments.sizeIs < 2) {
val symTermName = sym.name.exclude(NameKinds.ModuleClassName).toTermName
if (symTermName == nme.apply) {
report.error(
"Native JS definitions named 'apply' must have an explicit name in @JSImport",
annot.tree)
} else if (symTermName.isSetterName) {
report.error(
"Native JS definitions with a name ending in '_=' must have an explicit name in @JSImport",
annot.tree)
}
}
annot.argumentConstantString(2).foreach { globalPathName =>
checkGlobalRefPath(globalPathName)
}
case _ =>
// We already emitted an error in checkAndGetJSNativeLoadingSpecAnnotOf
()
}
}
}
/** Verify a ValOrDefDef that is annotated with `@js.native`. */
private def transformJSNativeValOrDefDef(tree: ValOrDefDef)(using Context): ValOrDefDef = {
val sym = tree.symbol
def annotPos(annotSym: Symbol): SrcPos =
sym.getAnnotation(annotSym).get.tree
if (sym.is(Lazy) || sym.isJSSetter)
report.error("@js.native is not allowed on vars, lazy vals and setter defs", annotPos(jsdefn.JSNativeAnnot))
else if (sym.isJSBracketAccess)
report.error("@JSBracketAccess is not allowed on @js.native vals and defs", annotPos(jsdefn.JSBracketAccessAnnot))
else if (sym.isJSBracketCall)
report.error("@JSBracketCall is not allowed on @js.native vals and defs", annotPos(jsdefn.JSBracketCallAnnot))
checkRHSCallsJSNative(tree, "@js.native members")
// Check that we do not override or implement anything from a superclass
val overriddenSymbols = sym.allOverriddenSymbols
if (overriddenSymbols.hasNext) {
val overridden = overriddenSymbols.next()
val verb = if (overridden.is(Deferred)) "implement" else "override"
report.error(i"An @js.native member cannot $verb the inherited member ${overridden.fullName}", tree)
}
tree
}
/** Verify a ValOrDefDef inside a js.Any */
private def transformValOrDefDefInJSType(tree: ValOrDefDef)(using Context): Tree = {
val sym = tree.symbol
assert(!sym.isLocalToBlock, i"$tree at ${tree.span}")
sym.name match {
case nme.apply if !sym.hasAnnotation(jsdefn.JSNameAnnot) && (!sym.is(Method) || sym.isJSGetter) =>
report.error(
"A member named apply represents function application in JavaScript. " +
"A parameterless member should be exported as a property. " +
"You must add @JSName(\\"apply\\")",
sym)
case nme.equals_ if sym.info.matches(defn.Any_equals.info) =>
report.error(
"error overriding method equals(that: Any): Boolean in a JS class;\\n" +
" method equals(that: Any): Boolean is considered final in trait js.Any;\\n" +
" if you want to define a method named \\"equals\\" in JavaScript, use a different name and add @JSName(\\"equals\\").",
sym)
case nme.hashCode_ if sym.info.matches(defn.Any_hashCode.info) =>
report.error(
"error overriding method hashCode(): Int in a JS class;\\n" +
" method hashCode(): Int is considered final in trait js.Any;\\n" +
" if you want to define a method named \\"hashCode\\" in JavaScript, use a different name and add @JSName(\\"hashCode\\").",
sym)
case _ =>
}
if (sym.isJSSetter)
checkSetterSignature(sym, tree, exported = false)
if (enclosingOwner is OwnerKind.JSNonNative) {
JSCallingConvention.of(sym) match {
case JSCallingConvention.Property(_) => // checked above
case JSCallingConvention.Method(_) => // no checks needed
case JSCallingConvention.Call if !sym.is(Deferred) =>
report.error("A non-native JS class cannot declare a concrete method named `apply` without `@JSName`", tree)
case JSCallingConvention.Call => // if sym.isDeferred
/* Allow an abstract `def apply` only if the owner is a plausible
* JS function SAM trait.
*/
val owner = sym.owner
val isPlausibleJSFunctionType = {
owner.is(Trait) &&
owner.asClass.superClass == jsdefn.JSFunctionClass &&
owner.typeRef.possibleSamMethods.map(_.symbol) == Seq(sym) &&
!sym.info.isInstanceOf[PolyType]
}
if (!isPlausibleJSFunctionType) {
report.error(
"A non-native JS type can only declare an abstract method named `apply` without `@JSName` " +
"if it is the SAM of a trait that extends js.Function",
tree)
}
case JSCallingConvention.BracketAccess =>
report.error("@JSBracketAccess is not allowed in non-native JS classes", tree)
case JSCallingConvention.BracketCall =>
report.error("@JSBracketCall is not allowed in non-native JS classes", tree)
case JSCallingConvention.UnaryOp(_) =>
report.error("A non-native JS class cannot declare a method named like a unary operation without `@JSName`", tree)
case JSCallingConvention.BinaryOp(_) =>
report.error("A non-native JS class cannot declare a method named like a binary operation without `@JSName`", tree)
}
} else {
def checkNoDefaultOrRepeated(subject: String) = {
if (sym.info.paramInfoss.flatten.exists(_.isRepeatedParam))
report.error(s"$subject may not have repeated parameters", tree)
if (sym.hasDefaultParams)
report.error(s"$subject may not have default parameters", tree)
}
JSCallingConvention.of(sym) match {
case JSCallingConvention.Property(_) => // checked above
case JSCallingConvention.Method(_) => // no checks needed
case JSCallingConvention.Call => // no checks needed
case JSCallingConvention.UnaryOp(_) => // no checks needed
case JSCallingConvention.BinaryOp(_) =>
checkNoDefaultOrRepeated("methods representing binary operations")
case JSCallingConvention.BracketAccess =>
val paramCount = sym.info.paramNamess.map(_.size).sum
if (paramCount != 1 && paramCount != 2)
report.error("@JSBracketAccess methods must have one or two parameters", tree)
else if (paramCount == 2 && !sym.info.finalResultType.isRef(defn.UnitClass))
report.error("@JSBracketAccess methods with two parameters must return Unit", tree)
checkNoDefaultOrRepeated("@JSBracketAccess methods")
case JSCallingConvention.BracketCall =>
// JS bracket calls must have at least one non-repeated parameter
sym.info.stripPoly match {
case mt: MethodType if mt.paramInfos.nonEmpty && !mt.paramInfos.head.isRepeatedParam =>
// ok
case _ =>
report.error("@JSBracketCall methods must have at least one non-repeated parameter", tree)
}
}
}
if (sym.hasAnnotation(defn.NativeAnnot)) {
// Native methods are not allowed
report.error("Methods in a js.Any may not be @native", tree)
}
/* In native JS types, there should not be any private member, except
* private[this] constructors.
*/
if ((enclosingOwner is OwnerKind.JSNative) && isPrivateMaybeWithin(sym)) {
if (sym.isClassConstructor) {
if (!sym.isAllOf(PrivateLocal)) {
report.error(
"Native JS classes may not have private constructors. " +
"Use `private[this]` to declare an internal constructor.",
sym)
}
} else if (!sym.is(ParamAccessor)) {
report.error(
"Native JS classes may not have private members. " +
"Use a public member in a private facade instead.",
tree)
}
}
if (enclosingOwner is OwnerKind.JSNonNative) {
// Private methods cannot be overloaded
if (sym.is(Method) && isPrivateMaybeWithin(sym)) {
val alts = sym.owner.info.memberBasedOnFlags(sym.name, required = Method)
if (alts.isOverloaded) {
report.error(
"Private methods in non-native JS classes cannot be overloaded. Use different names instead.",
tree)
}
}
// private[Scope] methods must be final
if (!sym.isOneOf(Final | Protected) && sym.privateWithin.exists && !sym.isClassConstructor)
report.error("Qualified private members in non-native JS classes must be final", tree)
// Traits must be pure interfaces, except for js.undefined members
if (sym.owner.is(Trait) && sym.isTerm && !sym.isConstructor) {
if (sym.is(Method) && isPrivateMaybeWithin(sym)) {
report.error("A non-native JS trait cannot contain private members", tree)
} else if (sym.is(Lazy)) {
report.error("A non-native JS trait cannot contain lazy vals", tree)
} else if (!sym.is(Deferred)) {
/* Tell the back-end not to emit this thing. In fact, this only
* matters for mixed-in members created from this member.
*/
sym.addAnnotation(jsdefn.JSOptionalAnnot)
if (!sym.isSetter) {
// Check that methods do not have parens
if (sym.is(Method, butNot = Accessor) && sym.info.stripPoly.isInstanceOf[MethodType])
report.error("In non-native JS traits, defs with parentheses must be abstract.", tree.rhs)
// Check that the rhs is `js.undefined`
tree.rhs match {
case sel: Select if sel.symbol == jsdefn.JSPackage_undefined =>
// ok
case Apply(Apply(TypeApply(fromTypeConstructorFun, _), (sel: Select) :: Nil), _)
if sel.symbol == jsdefn.JSPackage_undefined
&& fromTypeConstructorFun.symbol == jsdefn.PseudoUnion_fromTypeConstructor =>
// ok: js.|.fromTypeConstructor(js.undefined)(...)
case _ =>
report.error(
"Members of non-native JS traits must either be abstract, or their right-hand-side must be `js.undefined`.",
tree)
}
}
}
}
} else { // enclosingOwner isnt OwnerKind.JSNonNative
// Check that the rhs is valid
if (sym.isPrimaryConstructor || sym.isOneOf(Param | ParamAccessor | Deferred | Synthetic)
|| sym.name.is(DefaultGetterName) || sym.isSetter) {
/* Ignore, i.e., allow:
* - primary constructor
* - all kinds of parameters
* - setters
* - default parameter getters (i.e., the default value of parameters)
* - abstract members
* - synthetic members (to avoid double errors with case classes, e.g. generated copy method)
*/
} else if (sym.isConstructor) {
// Force secondary ctor to have only a call to the primary ctor inside
tree.rhs match {
case Block(List(Apply(trg, _)), Literal(Constant(())))
if trg.symbol.isPrimaryConstructor && trg.symbol.owner == sym.owner =>
// everything is fine here
case _ =>
report.error(
"A secondary constructor of a native JS class may only call the primary constructor",
tree.rhs)
}
} else {
// Check that the tree's rhs is exactly `= js.native`
checkRHSCallsJSNative(tree, "Concrete members of JS native types")
}
}
super.transform(tree)
}
/** Removes annotations from exported definitions (e.g. `export foo.bar`):
* - `js.native`
* - `js.annotation.*`
*/
private def stripJSAnnotsOnExported(sym: Symbol)(using Context): Unit =
if !sym.is(Exported) then
return // only remove annotations from exported definitions
val JSNativeAnnot = jsdefn.JSNativeAnnot
val JSAnnotPackage = jsdefn.JSAnnotPackage
extension (sym: Symbol) def isJSAnnot =
(sym eq JSNativeAnnot) || (sym.owner eq JSAnnotPackage)
val newAnnots = sym.annotations.filterConserve(!_.symbol.isJSAnnot)
if newAnnots ne sym.annotations then
sym.annotations = newAnnots
end stripJSAnnotsOnExported
private def checkRHSCallsJSNative(tree: ValOrDefDef, longKindStr: String)(using Context): Unit = {
if tree.symbol.is(Exported) then
return // we already report an error that exports are not allowed here, this prevents extra errors.
// Check that the rhs is exactly `= js.native`
tree.rhs match {
case sel: Select if sel.symbol == jsdefn.JSPackage_native =>
// ok
case _ =>
val pos = if (tree.rhs != EmptyTree) tree.rhs.srcPos else tree.srcPos
report.error(s"$longKindStr may only call js.native.", pos)
}
// Check that the resul type was explicitly specified
// (This is stronger than Scala 2, which only warns, and only if it was inferred as Nothing.)
if (tree.tpt.span.isSynthetic)
report.error(i"The type of ${tree.name} must be explicitly specified because it is JS native.", tree)
}
private def checkJSNativeSpecificAnnotsOnNonJSNative(memberDef: MemberDef)(using Context): Unit = {
for (annot <- memberDef.symbol.annotations) {
annot.symbol match {
case annotSym if annotSym == jsdefn.JSGlobalAnnot =>
report.error("@JSGlobal can only be used on native JS definitions (with @js.native).", annot.tree)
case annotSym if annotSym == jsdefn.JSImportAnnot =>
report.error("@JSImport can only be used on native JS definitions (with @js.native).", annot.tree)
case annotSym if annotSym == jsdefn.JSGlobalScopeAnnot =>
report.error("@JSGlobalScope can only be used on native JS objects (with @js.native).", annot.tree)
case _ =>
// ok
}
}
}
private def checkJSNameAnnots(sym: Symbol)(using Context): Unit = {
val allJSNameAnnots = sym.annotations.filter(_.symbol == jsdefn.JSNameAnnot).reverse
for (annot <- allJSNameAnnots.headOption) {
// Check everything about the first @JSName annotation
if (sym.isLocalToBlock || (enclosingOwner isnt OwnerKind.JSType))
report.error("@JSName can only be used on members of JS types.", annot.tree)
else if (sym.is(Trait))
report.error("@JSName cannot be used on traits.", annot.tree)
else if (isPrivateMaybeWithin(sym))
report.error("@JSName cannot be used on private members.", annot.tree)
else
checkJSNameArgument(sym, annot)
// Check that there is at most one @JSName annotation.
for (duplicate <- allJSNameAnnots.tail)
report.error("Duplicate @JSName annotation.", duplicate.tree)
}
}
/** Checks that the argument to `@JSName` annotations on `memberSym` is legal.
*
* Reports an error on each annotation where this is not the case.
* One one `@JSName` annotation is allowed, but that is handled somewhere else.
*/
private def checkJSNameArgument(memberSym: Symbol, annot: Annotation)(using Context): Unit = {
val argTree = annot.arguments.head
if (argTree.tpe.derivesFrom(defn.StringClass)) {
// We have a String. It must be a literal.
if (!annot.argumentConstantString(0).isDefined)
report.error("A String argument to JSName must be a literal string", argTree)
} else {
// We have a js.Symbol. It must be a stable reference.
val sym = argTree.symbol
if (!sym.isStatic || !sym.isStableMember) {
report.error("A js.Symbol argument to JSName must be a static, stable identifier", argTree)
} else if ((enclosingOwner is OwnerKind.JSNonNative) && sym.owner == memberSym.owner) {
report.warning(
"This symbol is defined in the same object as the annotation's target. " +
"This will cause a stackoverflow at runtime",
argTree)
}
}
}
/** Constant-folds arguments to `@JSExportTopLevel` and `@JSExportStatic`.
*
* Unlike scalac, dotc does not constant-fold expressions in annotations.
* Our back-end needs to have access to the arguments to those two
* annotations as literal strings, so we specifically constant-fold them
* here.
*/
private def constFoldJSExportTopLevelAndStaticAnnotations(sym: Symbol)(using Context): Unit = {
val annots = sym.annotations
val newAnnots = annots.mapConserve { annot =>
if (annot.symbol == jsdefn.JSExportTopLevelAnnot || annot.symbol == jsdefn.JSExportStaticAnnot) {
annot.tree match {
case app @ Apply(fun, args) =>
val newArgs = args.mapConserve { arg =>
arg match {
case _: Literal =>
arg
case _ =>
arg.tpe.widenTermRefExpr.normalized match {
case ConstantType(c) => Literal(c).withSpan(arg.span)
case _ => arg // PrepJSExports will emit an error for those cases
}
}
}
if (newArgs eq args)
annot
else
Annotation(cpy.Apply(app)(fun, newArgs))
case _ =>
annot
}
} else {
annot
}
}
if (newAnnots ne annots)
sym.annotations = newAnnots
}
/** Mark the symbol as exposed if it is a non-private term member of a
* non-native JS class.
*
* @param sym
* The symbol, which must be the module symbol for a module, not its
* module class symbol.
*/
private def markExposedIfRequired(sym: Symbol)(using Context): Unit = {
val shouldBeExposed: Boolean = {
// it is a term member
sym.isTerm &&
// it is a member of a non-native JS class
(enclosingOwner is OwnerKind.JSNonNative) && !sym.isLocalToBlock &&
// it is not synthetic
!sym.isOneOf(Synthetic) &&
// it is not private
!isPrivateMaybeWithin(sym) &&
// it is not a constructor
!sym.isConstructor &&
// it is not a default getter
!sym.name.is(DefaultGetterName)
}
if (shouldBeExposed)
sym.addAnnotation(jsdefn.ExposedJSMemberAnnot)
}
}
}
object PrepJSInterop {
val name: String = "prepjsinterop"
val description: String = "additional checks and transformations for Scala.js"
private final class OwnerKind private (private val baseKinds: Int) extends AnyVal {
inline def isBaseKind: Boolean =
Integer.lowestOneBit(baseKinds) == baseKinds && baseKinds != 0 // exactly 1 bit on
// cannot be `inline` because it accesses the private constructor
@inline def |(that: OwnerKind): OwnerKind =
new OwnerKind(this.baseKinds | that.baseKinds)
inline def is(that: OwnerKind): Boolean =
(this.baseKinds & that.baseKinds) != 0
inline def isnt(that: OwnerKind): Boolean =
!this.is(that)
}
private object OwnerKind {
/** No owner, i.e., we are at the top-level. */
val None = new OwnerKind(0x00)
// Base kinds - those form a partition of all possible enclosing owners
/** A Scala class/trait. */
val ScalaClass = new OwnerKind(0x01)
/** A Scala object. */
val ScalaMod = new OwnerKind(0x02)
/** A native JS class/trait, which extends js.Any. */
val JSNativeClass = new OwnerKind(0x04)
/** A native JS object, which extends js.Any. */
val JSNativeMod = new OwnerKind(0x08)
/** A non-native JS class (not a trait). */
val JSNonTraitClass = new OwnerKind(0x10)
/** A non-native JS trait. */
val JSTrait = new OwnerKind(0x20)
/** A non-native JS object. */
val JSMod = new OwnerKind(0x40)
// Compound kinds
/** A Scala class, trait or object, i.e., anything not extending js.Any. */
val ScalaType = ScalaClass | ScalaMod
/** A native JS class/trait/object. */
val JSNative = JSNativeClass | JSNativeMod
/** A non-native JS class/trait/object. */
val JSNonNative = JSNonTraitClass | JSTrait | JSMod
/** A JS type, i.e., something extending js.Any. */
val JSType = JSNative | JSNonNative
/** Any kind of class/trait, i.e., a Scala or JS class/trait. */
val AnyClass = ScalaClass | JSNativeClass | JSNonTraitClass | JSTrait
}
/** Tests if the symbol extend `js.Any`.
*
* This is different from `sym.isJSType` because it returns `false` for the
* pseudo-union type.
*/
def isJSAny(sym: Symbol)(using Context): Boolean =
sym.isSubClass(jsdefn.JSAnyClass)
/** Checks that a setter has the right signature.
*
* Reports error messages otherwise.
*/
def checkSetterSignature(sym: Symbol, pos: SrcPos, exported: Boolean)(using Context): Unit = {
val typeStr = if (exported) "Exported" else "JS"
val tpe = sym.info
// The result type must be Unit
if (!tpe.resultType.isRef(defn.UnitClass))
report.error(s"$typeStr setters must return Unit", pos)
// There must be exactly one non-varargs, non-default parameter
tpe.paramInfoss match {
case List(List(argInfo)) =>
// Arg list is OK. Do additional checks.
if (tpe.isVarArgsMethod)
report.error(s"$typeStr setters may not have repeated params", pos)
if (sym.hasDefaultParams)
report.error(s"$typeStr setters may not have default params", pos)
case _ =>
report.error(s"$typeStr setters must have exactly one argument", pos)
}
}
/** Tests whether the symbol has `private` in any form, either `private`,
* `private[this]` or `private[Enclosing]`.
*/
def isPrivateMaybeWithin(sym: Symbol)(using Context): Boolean =
sym.is(Private) || (sym.privateWithin.exists && !sym.is(Protected))
/** Checks that the optional argument to an `@JSGlobal` annotation is a
* literal.
*
* Reports an error on the annotation if it is not the case.
*/
private def checkJSGlobalLiteral(annot: Annotation)(using Context): Unit = {
if (annot.arguments.nonEmpty) {
assert(annot.arguments.size == 1,
s"@JSGlobal annotation $annot has more than 1 argument")
val argIsValid = annot.argumentConstantString(0).isDefined
if (!argIsValid)
report.error("The argument to @JSGlobal must be a literal string.", annot.arguments.head)
}
}
/** Checks that arguments to an `@JSImport` annotation are literals.
*
* The second argument can also be the singleton `JSImport.Namespace`
* object.
*
* Reports an error on the annotation if it is not the case.
*/
private def checkJSImportLiteral(annot: Annotation)(using Context): Unit = {
val args = annot.arguments
val argCount = args.size
assert(argCount >= 1 && argCount <= 3,
i"@JSImport annotation $annot does not have between 1 and 3 arguments")
val firstArgIsValid = annot.argumentConstantString(0).isDefined
if (!firstArgIsValid)
report.error("The first argument to @JSImport must be a literal string.", args.head)
val secondArgIsValid = argCount < 2 || annot.argumentConstantString(1).isDefined || args(1).symbol == jsdefn.JSImportNamespaceModule
if (!secondArgIsValid)
report.error("The second argument to @JSImport must be literal string or the JSImport.Namespace object.", args(1))
val thirdArgIsValid = argCount < 3 || annot.argumentConstantString(2).isDefined
if (!thirdArgIsValid)
report.error("The third argument to @JSImport, when present, must be a literal string.", args(2))
}
private def checkAndGetJSNativeLoadingSpecAnnotOf(pos: SrcPos, sym: Symbol)(
using Context): Option[Annotation] = {
// Must not have @JSName
for (annot <- sym.getAnnotation(jsdefn.JSNameAnnot))
report.error("@JSName can only be used on members of JS types.", annot.tree)
// Must have exactly one JS native load spec annotation
val annots = sym.annotations.filter(annot => isJSNativeLoadingSpecAnnot(annot.symbol))
val badAnnotCountMsg =
if (sym.is(Module)) "Native JS objects must have exactly one annotation among @JSGlobal, @JSImport and @JSGlobalScope."
else "Native JS classes, vals and defs must have exactly one annotation among @JSGlobal and @JSImport."
annots match {
case Nil =>
report.error(badAnnotCountMsg, pos)
None
case result :: Nil =>
Some(result)
case _ =>
// Annotations are stored in reverse order, which we re-reverse now
val result :: duplicates = annots.reverse
for (annot <- duplicates)
report.error(badAnnotCountMsg, annot.tree)
Some(result)
}
}
/* Note that we consider @JSGlobalScope as a JS native loading spec because
* it's convenient for the purposes of PrepJSInterop. Actually @JSGlobalScope
* objects do not receive a JS loading spec in their IR.
*/
private def isJSNativeLoadingSpecAnnot(sym: Symbol)(using Context): Boolean = {
sym == jsdefn.JSGlobalAnnot
|| sym == jsdefn.JSImportAnnot
|| sym == jsdefn.JSGlobalScopeAnnot
}
private def checkInternalAnnotations(sym: Symbol)(using Context): Unit = {
/** Returns true iff it is a compiler annotations. */
def isCompilerAnnotation(annotation: Annotation): Boolean = {
annotation.symbol == jsdefn.ExposedJSMemberAnnot
|| annotation.symbol == jsdefn.JSTypeAnnot
|| annotation.symbol == jsdefn.JSOptionalAnnot
}
for (annotation <- sym.annotations) {
if (isCompilerAnnotation(annotation)) {
report.error(
i"@${annotation.symbol.fullName} is for compiler internal use only. Do not use it yourself.",
annotation.tree)
}
}
}
}
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/transform/sjs/PrepJSInterop.scala | Scala | apache-2.0 | 49,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.infer
import org.apache.mxnet.{Context, DataDesc, NDArray}
import java.io.File
import org.slf4j.LoggerFactory
import scala.io
import scala.collection.mutable.ListBuffer
trait ClassifierBase {
/**
* Takes an array of floats and returns corresponding (Label, Score) tuples
* @param input Indexed sequence one-dimensional array of floats
* @param topK (Optional) How many result (sorting based on the last axis)
* elements to return. Default returns unsorted output.
* @return Indexed sequence of (Label, Score) tuples
*/
def classify(input: IndexedSeq[Array[Float]],
topK: Option[Int] = None): IndexedSeq[(String, Float)]
/**
* Takes a sequence of NDArrays and returns (Label, Score) tuples
* @param input Indexed sequence of NDArrays
* @param topK (Optional) How many result (sorting based on the last axis)
* elements to return. Default returns unsorted output.
* @return Traversable sequence of (Label, Score) tuple
*/
def classifyWithNDArray(input: IndexedSeq[NDArray],
topK: Option[Int] = None): IndexedSeq[IndexedSeq[(String, Float)]]
}
/**
* A class for classifier tasks
* @param modelPathPrefix Path prefix from where to load the model artifacts
* These include the symbol, parameters, and synset.txt
* Example: file://model-dir/resnet-152 (containing
* resnet-152-symbol.json, resnet-152-0000.params, and synset.txt)
* @param inputDescriptors Descriptors defining the input node names, shape,
* layout and type parameters
* @param contexts Device contexts on which you want to run inference; defaults to CPU
* @param epoch Model epoch to load; defaults to 0
*/
class Classifier(modelPathPrefix: String,
protected val inputDescriptors: IndexedSeq[DataDesc],
protected val contexts: Array[Context] = Context.cpu(),
protected val epoch: Option[Int] = Some(0))
extends ClassifierBase {
private val logger = LoggerFactory.getLogger(classOf[Classifier])
protected[infer] val predictor: PredictBase = getPredictor()
protected[infer] val synsetFilePath = getSynsetFilePath(modelPathPrefix)
protected[infer] val synset = readSynsetFile(synsetFilePath)
protected[infer] val handler = MXNetHandler()
/**
* Takes flat arrays as input and returns (Label, Score) tuples.
* @param input Indexed sequence one-dimensional array of floats
* @param topK (Optional) How many result (sorting based on the last axis)
* elements to return. Default returns unsorted output.
* @return Indexed sequence of (Label, Score) tuples
*/
override def classify(input: IndexedSeq[Array[Float]],
topK: Option[Int] = None): IndexedSeq[(String, Float)] = {
// considering only the first output
val predictResult = predictor.predict(input)(0)
var result: IndexedSeq[(String, Float)] = IndexedSeq.empty
if (topK.isDefined) {
val sortedIndex = predictResult.zipWithIndex.sortBy(-_._1).map(_._2).take(topK.get)
result = sortedIndex.map(i => (synset(i), predictResult(i))).toIndexedSeq
} else {
result = synset.zip(predictResult).toIndexedSeq
}
result
}
/**
* Perform multiple classification operations on NDArrays.
* Also works with batched input.
* @param input Indexed sequence of NDArrays
* @param topK (Optional) How many result (sorting based on the last axis)
* elements to return. Default returns unsorted output.
* @return Traversable sequence of (Label, Score) tuples
*/
override def classifyWithNDArray(input: IndexedSeq[NDArray], topK: Option[Int] = None)
: IndexedSeq[IndexedSeq[(String, Float)]] = {
// considering only the first output
val predictResultND: NDArray = predictor.predictWithNDArray(input)(0)
val predictResult: ListBuffer[Array[Float]] = ListBuffer[Array[Float]]()
// iterating over the individual items(batch size is in axis 0)
for (i <- 0 until predictResultND.shape(0)) {
val r = predictResultND.at(i)
predictResult += r.toArray
r.dispose()
}
var result: ListBuffer[IndexedSeq[(String, Float)]] =
ListBuffer.empty[IndexedSeq[(String, Float)]]
if (topK.isDefined) {
val sortedIndices = predictResult.map(r =>
r.zipWithIndex.sortBy(-_._1).map(_._2).take(topK.get)
)
for (i <- sortedIndices.indices) {
result += sortedIndices(i).map(sIndx =>
(synset(sIndx), predictResult(i)(sIndx))).toIndexedSeq
}
} else {
for (i <- predictResult.indices) {
result += synset.zip(predictResult(i)).toIndexedSeq
}
}
handler.execute(predictResultND.dispose())
result.toIndexedSeq
}
private[infer] def getSynsetFilePath(modelPathPrefix: String): String = {
val dirPath = modelPathPrefix.substring(0, 1 + modelPathPrefix.lastIndexOf(File.separator))
val d = new File(dirPath)
require(d.exists && d.isDirectory, s"directory: $dirPath not found")
val s = new File(dirPath + "synset.txt")
require(s.exists() && s.isFile,
s"File synset.txt should exist inside modelPath: ${dirPath + "synset.txt"}")
s.getCanonicalPath
}
private[infer] def readSynsetFile(synsetFilePath: String): IndexedSeq[String] = {
val f = io.Source.fromFile(synsetFilePath)
try {
f.getLines().toIndexedSeq
} finally {
f.close
}
}
private[infer] def getPredictor(): PredictBase = {
new Predictor(modelPathPrefix, inputDescriptors, contexts, epoch)
}
}
| mbaijal/incubator-mxnet | scala-package/infer/src/main/scala/org/apache/mxnet/infer/Classifier.scala | Scala | apache-2.0 | 6,754 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.runtime.fragment
package user.join
import com.asakusafw.runtime.flow.{ ArrayListBuffer, ListBuffer }
import com.asakusafw.runtime.model.DataModel
trait MasterCheck[M <: DataModel[M], T <: DataModel[T]] extends Join[M, T] {
def missed: Fragment[T]
def found: Fragment[T]
override def join(master: M, tx: T): Unit = {
(if (master != null) found else missed).add(tx) // scalastyle:ignore
}
}
| ashigeru/asakusafw-spark | runtime/src/main/scala/com/asakusafw/spark/runtime/fragment/user/join/MasterCheck.scala | Scala | apache-2.0 | 1,046 |
package com.twitter.finagle.tracing
/**
* This is a tracing system similar to Dapper:
*
* “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”,
* Benjamin H. Sigelman, Luiz André Barroso, Mike Burrows, Pat
* Stephenson, Manoj Plakal, Donald Beaver, Saul Jaspan, Chandan
* Shanbhag, 2010.
*
* It is meant to be independent of whatever underlying RPC mechanism
* is being used, and it is up to the underlying codec to implement
* the transport.
*/
import scala.util.Random
import java.nio.ByteBuffer
import java.net.InetSocketAddress
import com.twitter.util.{Time, Local}
/**
* `Trace` maintains an interleaved stack of `TraceId`s and `Tracer`s.
* The semantics are as follows: when reporting, we always report the
* topmost `TraceId`. That action is reported to all the `Tracer`s
* that are _below_ that point in the stack.
*/
object Trace {
private[this] type Stack = List[Either[TraceId, Tracer]]
private[this] val rng = new Random
private[this] val defaultId = TraceId(None, None, SpanId(rng.nextLong()), None)
private[this] val local = new Local[Stack]
@volatile private[this] var tracingEnabled = true
/**
* Get the current trace identifier. If no identifiers have been
* pushed, a default one is provided.
*/
def id: TraceId = idOption getOrElse defaultId
/**
* Get the current identifier, if it exists.
*/
def idOption: Option[TraceId] =
local() flatMap { stack =>
stack collect { case Left(id) => id } headOption
}
/**
* Completely clear the trace stack.
*/
def clear() {
local.clear()
}
/**
* Turn trace recording on.
*/
def enable() = tracingEnabled = true
/**
* Turn trace recording off.
*/
def disable() = tracingEnabled = false
/**
* Create a derivative TraceId. If there isn't a
* current ID, this becomes the root id.
*/
def nextId: TraceId = {
val currentId = idOption
TraceId(currentId map { _.traceId },
currentId map { _.spanId },
SpanId(rng.nextLong()),
None)
}
/**
* Create a derivative TraceId and push it. If there isn't a
* current ID, this becomes the root id.
*/
def pushId(): TraceId = {
pushId(nextId)
}
/**
* Push a new trace id.
*/
def pushId(traceId: TraceId): TraceId = {
// todo: should this to parent/trace management?
local() = Left(traceId) :: (local() getOrElse Nil)
traceId
}
/**
* Pop the topmost trace id and return it.
*/
def popId(): Option[TraceId] = {
local() match {
case None | Some(Nil) => None
case Some(Left(topmost@_) :: rest) =>
local() = rest
Some(topmost)
case Some(Right(_) :: rest) =>
local() = rest
popId()
}
}
/**
* Push the given tracer.
*/
def pushTracer(tracer: Tracer) {
local() = Right(tracer) :: (local() getOrElse Nil)
}
/**
* Invoke `f` and then unwind the stack to the starting point.
*/
def unwind[T](f: => T): T = {
val saved = local()
try f finally local.set(saved)
}
/*
* Recording methods report the topmost trace id to every tracer
* lower in the stack.
*/
/**
* Find the set of tracers appropriate for the given ID.
*/
private[this] def tracers: (Stack, Option[TraceId], List[Tracer]) => Seq[Tracer] = {
case (Nil, _, ts) => (Set() ++ ts).toSeq
case (Left(stackId) :: rest, Some(lookId), _) if stackId == lookId => tracers(rest, None, Nil)
case (Left(_) :: rest, id, ts) => tracers(rest, id, ts)
case (Right(t) :: rest, id, ts) => tracers(rest, id, t :: ts)
}
/**
* Record a raw ''Record''. This will record to a _unique_ set of
* tracers:
*
* 1. if the ID specified is in the stack, the record will be
* recorded to those traces _below_ the first ID entry with that
* value in the stack.
*
* 2. if the ID is *not* in the stack, we report it to all of the
* tracers in the stack.
*/
def record(rec: Record) {
if (tracingEnabled)
tracers(local() getOrElse Nil, Some(rec.traceId), Nil) foreach { _.record(rec) }
}
/*
* Convenience methods that construct records of different kinds.
*/
def record(ann: Annotation) {
record(Record(id, Time.now, ann))
}
def record(message: String) {
record(Annotation.Message(message))
}
def recordRpcname(service: String, rpc: String) {
record(Annotation.Rpcname(service, rpc))
}
def recordClientAddr(ia: InetSocketAddress) {
record(Annotation.ClientAddr(ia))
}
def recordServerAddr(ia: InetSocketAddress) {
record(Annotation.ServerAddr(ia))
}
def recordBinary(key: String, value: ByteBuffer) {
record(Annotation.BinaryAnnotation(key, value))
}
def recordBinary(key: String, value: String) {
record(Annotation.BinaryAnnotation(key, ByteBuffer.wrap(value.getBytes)))
}
}
| enachb/finagle_2.9_durgh | finagle-core/src/main/scala/com/twitter/finagle/tracing/Trace.scala | Scala | apache-2.0 | 4,903 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.models.citizen
import play.api.libs.json.Json
case class Address(country: Option[String])
object Address {
implicit val formats = Json.format[Address]
}
| hmrc/nisp-frontend | app/uk/gov/hmrc/nisp/models/citizen/Address.scala | Scala | apache-2.0 | 788 |
/* Utility: Miscellaneous Scala utilities */
package utility
import scala.annotation.tailrec
import scala.collection.immutable.Set
import scala.collection.mutable
import org.apache.commons.lang.StringEscapeUtils.escapeJava
object Utility {
def notImplemented = throw new NotImplementedError("not implemented")
def impossible = throw new InternalError("impossible")
// Binary search a sequence, returning Some(k) if s(k) == key, otherwise None.
def binarySearch[A](s: Seq[A], key: A)(implicit ord: math.Ordering[A]): Option[Int] = {
var left: Int = 0
var right: Int = s.length - 1
while (right >= left) {
val mid = left + (right - left) / 2
val comp = ord.compare(s(mid), key)
if (comp == 0) // s(mid) == key
return Some(mid)
else if (comp > 0) // s(mid) > key
right = mid - 1
else if (comp < 0) // s(mid) < key
left = mid + 1
}
None
}
// mapOrElse(x)(f,y) = x map f getOrElse y
def mapOrElse[A,B](x: Option[A])(f: A => B, y: B): B = x match {
case None => y
case Some(x) => f(x)
}
// Turn a list of pairs into a map to lists, preserving duplicates
def toMapList[A,B](c: Iterable[(A,B)]): Map[A,List[B]] = {
val m = mutable.Map[A,List[B]]()
c.foreach { case (a,b) => m.update(a, b :: m.getOrElse(a,Nil)) }
m.toMap
}
// Update a map of lists. Fairly slow.
def addToMapList[A,B](m0: Map[A,List[B]], bs: Iterable[(A,B)]): Map[A,List[B]] = {
val m = mutable.Map[A,List[B]]()
m0.foreach { case (a,bs) => m.update(a, bs ::: m.getOrElse(a,Nil)) }
bs.foreach { case (a,b) => m.update(a, b :: m.getOrElse(a,Nil)) }
m.toMap
}
// Turn a list of pairs into a map to sets
def toMapSet[A,B](c: Iterable[(A,B)]): Map[A,Set[B]] =
toMapList(c) mapValues (_.toSet)
// Given two maps to sets, union their values pointwise
def mergeMapSets[A,B](t: Map[A,Set[B]], t2: Map[A,Set[B]]): Map[A,Set[B]] =
((t.keySet ++ t2.keySet) map { s => (s, t.getOrElse(s,Set()) ++ t2.getOrElse(s,Set())) } ).toMap
// Call f until it returns false
def doWhile(f: => Boolean): Unit =
if (f) doWhile(f)
// Chop a string into pieces at whitespace
def splitWhitespace(s: String): List[String] =
s.split("""\\s+""").toList match {
case "" :: x => x
case x => x
}
// Place s between each adjacent pair in xs. E.g., abcd => asbscsd.
def intersperse[A](s: A, xs: List[A]): List[A] = xs match {
case Nil => Nil
case List(_) => xs
case x::xs => x :: s :: intersperse(s,xs)
}
// Do f, and turn any null pointer exception into a null result
def silenceNulls[A >: Null](f: => A): A =
try f catch { case _:NullPointerException => null }
// xs.reverse append ys, but tail recursive
@tailrec def revAppend[A](xs: List[A], ys: List[A]): List[A] = xs match {
case Nil => ys
case x::xs => revAppend(xs,x::ys)
}
// Run-length encode a list
def runs[A](xs: List[A]): List[(A,Int)] = {
@tailrec def loop(next: List[A], prev: List[(A,Int)]): List[(A,Int)] = next match {
case Nil => prev.reverse
case n::ns => loop(ns,prev match {
case (p,i)::ps if p==n => (p,i+1)::ps
case ps => (n,1)::ps
})
}
loop(xs,Nil)
}
// Expand a run-length encoded list. unruns(runs(xs)) == xs
def unruns[A](xs: List[(A,Int)]): List[A] = xs flatMap {case (a,n) => List.fill(n)(a)}
// Chop a list up into segments equal according to a predicate
def segmentBy[A](xs: List[A])(f: (A,A) => Boolean): List[List[A]] = xs match {
case Nil => Nil
case x::xs =>
@tailrec def loop(done: List[List[A]], cur: List[A], x: A, rest: List[A]): List[List[A]] = rest match {
case Nil => (cur.reverse :: done).reverse
case y::ys if f(x,y) => loop(done,y::cur,y,ys)
case y::ys => loop(cur::done,List(y),y,ys)
}
loop(Nil,List(x),x,xs)
}
// Escape a string according to Java string literal syntax
def escape(raw: String): String =
escapeJava(raw)
// Apply a partial function as much as we can to the front of a list
def takeCollect[A,B](xs: List[A])(f: PartialFunction[A,B]): (List[B],List[A]) = {
@tailrec def loop(xs: List[A], bs: List[B]): (List[B],List[A]) = xs match {
case x::xs if f.isDefinedAt(x) => loop(xs,f(x)::bs)
case xs => (bs.reverse,xs)
}
loop(xs,Nil)
}
// Iterate a function until referential equality fixpoint is reached
def fixRef[A <: AnyRef](x: A)(f: A => A): A = {
val fx = f(x)
if (x eq fx) x else fixRef(fx)(f)
}
// Transpose the list and option monads.
// If xs == ys map Some, Some(ys), else None.
def allSome[A](xs: List[Option[A]]): Option[List[A]] = xs match {
case Nil => Some(Nil)
case None::_ => None
case Some(x)::xs => allSome(xs) match {
case None => None
case Some(xs) => Some(x::xs)
}
}
// Transpose the set and option monads.
// If xs = ys map Some, Some(ys), else None.
def allSome[A](xs: Set[Option[A]]): Option[Set[A]] = allSome(xs.toList) map (_.toSet)
// (xs collect f).headOption, but faster
def collectOne[A,B](xs: List[A])(f: PartialFunction[A,B]): Option[B] = xs match {
case Nil => None
case x::_ if f.isDefinedAt(x) => Some(f(x))
case _::xs => collectOne(xs)(f)
}
def collectOne[A,B](xs: Set[A])(f: PartialFunction[A,B]): Option[B] = collectOne(xs.toList)(f)
// Memoize the fixpoint of a recursive function. Usage:
// lazy val f = fixpoint(base, a => b) // where b refers to f
def fixpoint[A,B](base: B, f: A => B): A => B = {
val done = mutable.Map[A,B]()
val next = mutable.Map[A,B]()
val active = mutable.Set[A]()
var changed = false
var outer = true
def fix(a: A): B = done.getOrElse(a, {
def inner = next get a match {
case None =>
changed = true
active += a
next(a) = base
val b = f(a)
if (b != base)
next(a) = b
b
case Some(b) =>
if (active contains a)
b
else {
active += a
val c = f(a)
if (b != c) {
changed = true
next(a) = c
}
c
}
}
if (!outer) inner
else {
outer = false
def loop: B = {
val b = inner
if (changed) {
changed = false
active.clear()
loop
} else {
outer = true
done ++= next
next.clear()
active.clear()
b
}
}
loop
}
})
fix
}
def fixpoint[A,B,C](base: C, f: (A,B) => C): (A,B) => C = {
lazy val g: ((A,B)) => C = fixpoint(base, x => f(x._1,x._2))
(a,b) => g((a,b))
}
// Memoization. Usage:
// val f = memoize(a => b) // b doesn't depend on f
// lazy val f = memoize(a => b) // b depends on f, but with laziness on all cycles
def memoize[A,B](f: A => B): A => B = {
val cache = mutable.Map[A,B]()
def mem(a: A): B = cache.getOrElse(a,{
val b = f(a)
cache(a) = b
b
})
mem
}
def memoize[A,B,C](f: (A,B) => C): (A,B) => C = {
val cache = mutable.Map[(A,B),C]()
def mem(a: A, b: B): C = cache.getOrElse((a,b),{
val c = f(a,b)
cache((a,b)) = c
c
})
mem
}
// Write to a file. From http://stackoverflow.com/questions/4604237/how-to-write-to-a-file-in-scala
def writeTo(f: java.io.File)(op: java.io.PrintWriter => Unit) {
val p = new java.io.PrintWriter(f)
try { op(p) } finally { p.close() }
}
// Create and then destroy a temporary file
def withTemp[A](prefix: String, suffix: String, delete: Boolean = true)(f: java.io.File => A): A = {
val file = java.io.File.createTempFile(prefix,suffix)
try { f(file) } finally { if (delete) file.delete }
}
// Trait for comparison by referential equality
trait RefEq extends AnyRef {
override def hashCode = System.identityHashCode(this)
override def equals(x: Any) = x match {
case x:AnyRef => this eq x
case _ => false
}
}
// Run f inside timing scope name
def scoped[A](name: String, f: => A): A = {
JavaUtils.pushScope(name)
try f finally JavaUtils.popScope()
}
// Run f with inner timing scopes suppressed
def silenced[A](f: => A): A = {
val s = JavaUtils.skipScopes
JavaUtils.skipScopes = true
try f finally JavaUtils.skipScopes = s
}
// Check if an error was generated by the Scala plugin
def fromScalaPlugin(e: Throwable): Boolean =
e.getStackTrace.exists(_.getClassName contains "org.jetbrains.plugin.scala.")
// Dodge checked exception compile errors for a block of code
abstract class Unchecked[A] { @throws(classOf[Exception]) def apply: A }
def unchecked[A](f: Unchecked[A]): A = f.apply
// Tuple construction from Java
def tuple[A,B] (a: A, b: B): (A,B) = (a,b)
def tuple[A,B,C] (a: A, b: B, c: C): (A,B,C) = (a,b,c)
def tuple[A,B,C,D](a: A, b: B, c: C, d: D): (A,B,C,D) = (a,b,c,d)
def capitalize(s: Array[Char]): Array[Char] = if (s.length == 0) s else s.updated(0,s(0).toUpper)
// For low effort debug logging
def appender(path: String): String => Unit = {
val f = new java.io.FileWriter(path,true)
(s: String) => {
f.write(s+"\\n")
f.flush()
}
}
} | eddysystems/eddy | utility/src/utility/Utility.scala | Scala | bsd-2-clause | 9,384 |
package com.metamx.tranquility.test
/**
* Created by dkhera on 11/23/15.
*/
/*
* Tranquility.
* Copyright 2013, 2014, 2015 Metamarkets Group, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.metamx.common.scala.Predef._
import com.metamx.common.scala.timekeeper.TestingTimekeeper
import com.metamx.tranquility.beam.Beam
import com.metamx.tranquility.spark.BeamFactory
import com.metamx.tranquility.spark.DruidRDD.addDruidFunctionsToRDD
import com.metamx.tranquility.test.common.{JulUtils, CuratorRequiringSuite, DruidIntegrationSuite}
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.retry.BoundedExponentialBackoffRetry
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.runner.RunWith
import org.scala_tools.time.Imports._
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.scalatest.junit.JUnitRunner
import com.metamx.common.scala.Logging
import scala.collection.mutable
@RunWith(classOf[JUnitRunner])
class SparkDruidTest
extends FunSuite with DruidIntegrationSuite with CuratorRequiringSuite with Logging with BeforeAndAfterAll {
private var sparkContext: SparkContext = null
private var checkpointDir: String = _
private var ssc: StreamingContext = null
override def beforeAll(): Unit = {
sparkContext = new SparkContext(
new SparkConf().setMaster("local").setAppName("SparkDruidTest").set("tranquility.zk.connect", "localhost:2181"))
checkpointDir = "./checkpoint" //change this
ssc = new StreamingContext(sparkContext, Seconds(3))
//ssc.checkpoint(checkpointDir) //queue stream does not supper checkpointing
}
override def afterAll(): Unit = {
if (sparkContext != null) {
sparkContext.stop()
}
if (ssc!=null)
ssc.stop()
}
JulUtils.routeJulThroughSlf4j()
test("Spark to Druid") {
withDruidStack {
(curator, broker, overlord) =>
val zkConnect = curator.getZookeeperClient.getCurrentConnectionString
val now = new DateTime().hourOfDay().roundFloorCopy()
val inputs = DirectDruidTest.generateEventsQ(now)
val t = sparkContext.parallelize(inputs)
val lines = mutable.Queue[RDD[SimpleEvent]]()
val dstream = ssc.queueStream(lines)
lines += sparkContext.makeRDD(inputs)
// print("dstream created" + dstream.count())
// implicit val beamFactory = new SimpleEventBeam
dstream.foreachRDD(rdd => rdd.writeToDruid(new SimpleEventBeam))
// dstream.foreachRDD(rdd => print("len==="+rdd.partitions.length))
ssc.start()
runTestQueriesAndAssertions(
broker, new TestingTimekeeper withEffect {
timekeeper =>
timekeeper.now = now
}
)
}
}
}
class SimpleEventBeam extends BeamFactory[SimpleEvent] {
override def makeBeam: Beam[SimpleEvent] = {
val aDifferentCurator = CuratorFrameworkFactory.newClient(
"localhost:2181",
new BoundedExponentialBackoffRetry(100, 500, 5)
)
aDifferentCurator.start()
val builder = DirectDruidTest.newBuilder(
aDifferentCurator, new TestingTimekeeper withEffect {
timekeeper =>
timekeeper.now = DateTime.now
}
)
print("reaches here ok")
builder.buildBeam()
}
def main(args: Array[String]) {
makeBeam
}
}
| deepikakhera/spark-tranquility | spark/src/test/scala/com/metamx/tranquility/test/SparkDruidTest.scala | Scala | apache-2.0 | 3,953 |
package coursier.test.util
import coursier.util.Task
import scala.concurrent.{ExecutionContext, Future}
trait ToFuture[F[_]] {
def toFuture[T](ec: ExecutionContext, f: F[T]): Future[T]
}
object ToFuture {
def apply[F[_]](implicit toFuture: ToFuture[F]): ToFuture[F] =
toFuture
implicit val taskToFuture: ToFuture[Task] =
new ToFuture[Task] {
def toFuture[T](ec: ExecutionContext, f: Task[T]) =
f.future()(ec)
}
}
| alexarchambault/coursier | modules/tests/shared/src/test/scala/coursier/test/util/ToFuture.scala | Scala | apache-2.0 | 452 |
package info.andreaswolf.roadhopper.road
import com.graphhopper.util.shapes.GHPoint
class StopSign(override val id: Int, override val coordinates: GHPoint) extends RoadSign {
val typeInfo = "StopSign"
}
| andreaswolf/roadhopper | src/main/scala/info/andreaswolf/roadhopper/road/StopSign.scala | Scala | mit | 209 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.filters.gzip
import java.util.function.BiFunction
import javax.inject.{ Inject, Provider, Singleton }
import akka.stream.scaladsl._
import akka.stream.{ FlowShape, Materializer, OverflowStrategy }
import akka.util.ByteString
import com.typesafe.config.ConfigMemorySize
import play.api.Configuration
import play.api.http._
import play.api.inject._
import play.api.libs.streams.GzipFlow
import play.api.mvc.RequestHeader.acceptHeader
import play.api.mvc._
import play.core.j
import scala.compat.java8.FunctionConverters._
import scala.concurrent.{ ExecutionContext, Future }
/**
* A gzip filter.
*
* This filter may gzip the responses for any requests that aren't HEAD requests and specify an accept encoding of gzip.
*
* It won't gzip under the following conditions:
*
* - The response code is 204 or 304 (these codes MUST NOT contain a body, and an empty gzipped response is 20 bytes
* long)
* - The response already defines a Content-Encoding header
* - A custom shouldGzip function is supplied and it returns false
*
* Since gzipping changes the content length of the response, this filter may do some buffering - it will buffer any
* streamed responses that define a content length less than the configured chunked threshold. Responses that are
* greater in length, or that don't define a content length, will not be buffered, but will be sent as chunked
* responses.
*/
@Singleton
class GzipFilter @Inject() (config: GzipFilterConfig)(implicit mat: Materializer) extends EssentialFilter {
import play.api.http.HeaderNames._
def this(bufferSize: Int = 8192, chunkedThreshold: Int = 102400,
shouldGzip: (RequestHeader, Result) => Boolean = (_, _) => true)(implicit mat: Materializer) =
this(GzipFilterConfig(bufferSize, chunkedThreshold, shouldGzip))
def apply(next: EssentialAction) = new EssentialAction {
implicit val ec = mat.executionContext
def apply(request: RequestHeader) = {
if (mayCompress(request)) {
next(request).mapFuture(result => handleResult(request, result))
} else {
next(request)
}
}
}
private def handleResult(request: RequestHeader, result: Result): Future[Result] = {
implicit val ec = mat.executionContext
if (shouldCompress(result) && config.shouldGzip(request, result)) {
val header = result.header.copy(headers = setupHeader(result.header))
result.body match {
case HttpEntity.Strict(data, contentType) =>
compressStrictEntity(Source.single(data), contentType).map(entity =>
result.copy(header = header, body = entity)
)
case entity @ HttpEntity.Streamed(_, Some(contentLength), contentType) if contentLength <= config.chunkedThreshold =>
// It's below the chunked threshold, so buffer then compress and send
compressStrictEntity(entity.data, contentType).map(strictEntity =>
result.copy(header = header, body = strictEntity)
)
case HttpEntity.Streamed(data, _, contentType) if request.version == HttpProtocol.HTTP_1_0 =>
// It's above the chunked threshold, but we can't chunk it because we're using HTTP 1.0.
// Instead, we use a close delimited body (ie, regular body with no content length)
val gzipped = data via GzipFlow.gzip(config.bufferSize)
Future.successful(
result.copy(header = header, body = HttpEntity.Streamed(gzipped, None, contentType))
)
case HttpEntity.Streamed(data, _, contentType) =>
// It's above the chunked threshold, compress through the gzip flow, and send as chunked
val gzipped = data via GzipFlow.gzip(config.bufferSize) map (d => HttpChunk.Chunk(d))
Future.successful(
result.copy(header = header, body = HttpEntity.Chunked(gzipped, contentType))
)
case HttpEntity.Chunked(chunks, contentType) =>
val gzipFlow = Flow.fromGraph(GraphDSL.create[FlowShape[HttpChunk, HttpChunk]]() { implicit builder =>
import GraphDSL.Implicits._
val extractChunks = Flow[HttpChunk] collect { case HttpChunk.Chunk(data) => data }
val createChunks = Flow[ByteString].map[HttpChunk](HttpChunk.Chunk.apply)
val filterLastChunk = Flow[HttpChunk]
.filter(_.isInstanceOf[HttpChunk.LastChunk])
// Since we're doing a merge by concatenating, the filter last chunk won't receive demand until the gzip
// flow is finished. But the broadcast won't start broadcasting until both flows start demanding. So we
// put a buffer of one in to ensure the filter last chunk flow demands from the broadcast.
.buffer(1, OverflowStrategy.backpressure)
val broadcast = builder.add(Broadcast[HttpChunk](2))
val concat = builder.add(Concat[HttpChunk]())
// Broadcast the stream through two separate flows, one that collects chunks and turns them into
// ByteStrings, sends those ByteStrings through the Gzip flow, and then turns them back into chunks,
// the other that just allows the last chunk through. Then concat those two flows together.
broadcast.out(0) ~> extractChunks ~> GzipFlow.gzip(config.bufferSize) ~> createChunks ~> concat.in(0)
broadcast.out(1) ~> filterLastChunk ~> concat.in(1)
new FlowShape(broadcast.in, concat.out)
})
Future.successful(
result.copy(header = header, body = HttpEntity.Chunked(chunks via gzipFlow, contentType))
)
}
} else {
Future.successful(result)
}
}
private def compressStrictEntity(source: Source[ByteString, Any], contentType: Option[String])(implicit ec: ExecutionContext) = {
val compressed = source.via(GzipFlow.gzip(config.bufferSize)).runFold(ByteString.empty)(_ ++ _)
compressed.map(data => HttpEntity.Strict(data, contentType))
}
/**
* Whether this request may be compressed.
*/
private def mayCompress(request: RequestHeader) =
request.method != "HEAD" && gzipIsAcceptedAndPreferredBy(request)
private def gzipIsAcceptedAndPreferredBy(request: RequestHeader) = {
val codings = acceptHeader(request.headers, ACCEPT_ENCODING)
def explicitQValue(coding: String) = codings collectFirst { case (q, c) if c equalsIgnoreCase coding => q }
def defaultQValue(coding: String) = if (coding == "identity") 0.001d else 0d
def qvalue(coding: String) = explicitQValue(coding) orElse explicitQValue("*") getOrElse defaultQValue(coding)
qvalue("gzip") > 0d && qvalue("gzip") >= qvalue("identity")
}
/**
* Whether this response should be compressed. Responses that may not contain content won't be compressed, nor will
* responses that already define a content encoding. Empty responses also shouldn't be compressed, as they will
* actually always get bigger.
*/
private def shouldCompress(result: Result) = isAllowedContent(result.header) &&
isNotAlreadyCompressed(result.header) &&
!result.body.isKnownEmpty
/**
* Certain response codes are forbidden by the HTTP spec to contain content, but a gzipped response always contains
* a minimum of 20 bytes, even for empty responses.
*/
private def isAllowedContent(header: ResponseHeader) = header.status != Status.NO_CONTENT && header.status != Status.NOT_MODIFIED
/**
* Of course, we don't want to double compress responses
*/
private def isNotAlreadyCompressed(header: ResponseHeader) = header.headers.get(CONTENT_ENCODING).isEmpty
private def setupHeader(rh: ResponseHeader): Map[String, String] = {
rh.headers + (CONTENT_ENCODING -> "gzip") + rh.varyWith(ACCEPT_ENCODING)
}
}
/**
* Configuration for the gzip filter
*
* @param bufferSize The size of the buffer to use for gzipping.
* @param chunkedThreshold The content length threshold, after which the filter will switch to chunking the result.
* @param shouldGzip Whether the given request/result should be gzipped. This can be used, for example, to implement
* black/white lists for gzipping by content type.
*/
case class GzipFilterConfig(
bufferSize: Int = 8192,
chunkedThreshold: Int = 102400,
shouldGzip: (RequestHeader, Result) => Boolean = (_, _) => true) {
// alternate constructor and builder methods for Java
def this() = this(shouldGzip = (_, _) => true)
def withShouldGzip(shouldGzip: (RequestHeader, Result) => Boolean): GzipFilterConfig = copy(shouldGzip = shouldGzip)
def withShouldGzip(shouldGzip: BiFunction[play.mvc.Http.RequestHeader, play.mvc.Result, Boolean]): GzipFilterConfig =
withShouldGzip((req: RequestHeader, res: Result) => shouldGzip.asScala(req.asJava, res.asJava))
def withChunkedThreshold(threshold: Int): GzipFilterConfig = copy(chunkedThreshold = threshold)
def withBufferSize(size: Int): GzipFilterConfig = copy(bufferSize = size)
}
object GzipFilterConfig {
def fromConfiguration(conf: Configuration) = {
val config = conf.get[Configuration]("play.filters.gzip")
val whiteList = MediaRange.parse(config.get[Seq[String]]("contentType.whiteList").mkString(", ").toLowerCase)
val blackList = MediaRange.parse(config.get[Seq[String]]("contentType.blackList").mkString(", ").toLowerCase)
GzipFilterConfig(
bufferSize = config.get[ConfigMemorySize]("bufferSize").toBytes.toInt,
chunkedThreshold = config.get[ConfigMemorySize]("chunkedThreshold").toBytes.toInt,
shouldGzip = (req, res) => {
res.body.contentType match {
case Some(MediaType.parse(mt)) =>
val mimeType = s"${mt.mediaType}/${mt.mediaSubType}".toLowerCase
if (whiteList.nonEmpty) {
whiteList.exists(_.accepts(mimeType))
} else {
blackList.forall(!_.accepts(mimeType))
}
case _ => whiteList.isEmpty
}
})
}
}
/**
* The gzip filter configuration provider.
*/
@Singleton
class GzipFilterConfigProvider @Inject() (config: Configuration) extends Provider[GzipFilterConfig] {
lazy val get = GzipFilterConfig.fromConfiguration(config)
}
/**
* The gzip filter module.
*/
class GzipFilterModule extends SimpleModule(
bind[GzipFilterConfig].toProvider[GzipFilterConfigProvider],
bind[GzipFilter].toSelf
)
/**
* The gzip filter components.
*/
trait GzipFilterComponents {
def configuration: Configuration
def materializer: Materializer
lazy val gzipFilterConfig: GzipFilterConfig = GzipFilterConfig.fromConfiguration(configuration)
lazy val gzipFilter: GzipFilter = new GzipFilter(gzipFilterConfig)(materializer)
}
| Shruti9520/playframework | framework/src/play-filters-helpers/src/main/scala/play/filters/gzip/GzipFilter.scala | Scala | apache-2.0 | 10,716 |
package org.apache.camel.component.cassandra
import org.scalatest.matchers.ShouldMatchers
import java.lang.String
import org.scalatest.FunSuite
import org.apache.camel.scala.dsl.builder.RouteBuilder
import java.util.{Set => JSet, HashMap => JMap}
import org.apache.camel.{Message, Exchange, ExchangePattern, CamelContext}
import org.apache.camel.component.cassandra.ProducerSuite._
import org.apache.camel.component.cassandra.CassandraComponent._
import com.shorrockin.cascal.utils.Conversions._
import org.apache.camel.builder.ExpressionBuilder
import org.springframework.context.support.ClassPathXmlApplicationContext
import org.apache.camel.impl._
import java.util.Collections
import java.io.ByteArrayInputStream
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class ProducerSuite extends FunSuite with CassandraSuite with ShouldMatchers {
test("sending to an endpoint with keyspace columnfamily and column results in an insert to cassandra") {
var context = new DefaultCamelContext
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theKey"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpaceFamilyColumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("stringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theKey")
out.getHeader(superColumnHeader) should be(null)
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\ "stringCols" \\ "theKey" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("sending to an endpoint with keyspace columnfamily and column and Value DataFormat results in an insert to cassandra") {
var context = new DefaultCamelContext
val reg = new SimpleRegistry
context.setRegistry(reg)
val format = new SerializationDataFormat
reg.put(dataFormatOption, format)
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theFormatKey"
var body = Collections.singleton("TEST123")
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpaceFamilyColumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("stringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theFormatKey")
out.getHeader(superColumnHeader) should be(null)
withSession {
session =>
session.get("camel-cassandra" \\ "stringCols" \\ "theFormatKey" \\ "testcolumn") match {
case Some(x) => {
val bais = new ByteArrayInputStream(x.value)
val bodyOut = format.unmarshal(exchange, bais)
bodyOut.asInstanceOf[JSet[String]].contains("TEST123") should be(true)
bodyOut.asInstanceOf[JSet[String]].size should be(1)
}
case None => fail
}
}
}
test("sending to an endpoint with keyspace columnfamily supercolumn and column results in an insert to cassandra") {
var context = new DefaultCamelContext
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theSupercolKey"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpaceFamilySupercolumnColumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("superStringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theSupercolKey")
out.getHeader(superColumnHeader, classOf[String]) should be("superduper")
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\\\ "superStringCols" \\ "theSupercolKey" \\ "superduper" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("sending to an endpoint with keyspace columnfamily key and supercolumn results in an insert to cassandra") {
var context = new DefaultCamelContext
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += columnHeader -> "testcolumn"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpaceFamilyKeySupercolumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("superStringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("testUrlKey")
out.getHeader(superColumnHeader, classOf[String]) should be("superduper")
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\\\ "superStringCols" \\ "testUrlKey" \\ "superduper" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("sending to an endpoint with nothing but extractors results in an insert to cassandra") {
var context = new DefaultCamelContext
val reg = new SimpleRegistry
context.setRegistry(reg)
reg.put("keyspaceEx", ExpressionBuilder.constantExpression("camel-cassandra"))
reg.put("cfEx", ExpressionBuilder.constantExpression("stringCols"))
reg.put("colEx", ExpressionBuilder.constantExpression("testcolumn"))
reg.put("keyEx", ExpressionBuilder.headerExpression(keyHeader))
reg.put("valEx", ExpressionBuilder.bodyExpression)
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theExtractorKey"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundExtractSpaceFamilyColumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("stringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theExtractorKey")
out.getHeader(superColumnHeader) should be(null)
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\ "stringCols" \\ "theExtractorKey" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("sending to an endpoint with nothing but extractors,including supercolumn results in an insert to cassandra") {
var context = new DefaultCamelContext
val reg = new SimpleRegistry
context.setRegistry(reg)
reg.put("keyspaceEx", ExpressionBuilder.constantExpression("camel-cassandra"))
reg.put("cfEx", ExpressionBuilder.constantExpression("superStringCols"))
reg.put("colEx", ExpressionBuilder.constantExpression("testcolumn"))
reg.put("supEx", ExpressionBuilder.constantExpression("superduper"))
reg.put("keyEx", ExpressionBuilder.headerExpression(keyHeader))
reg.put("valEx", ExpressionBuilder.bodyExpression)
context.addRoutes(new ProducerTestRouteBuilder)
context.start
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theSuperExtractorKey"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundExtractSpaceFamilySuperColumn, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("superStringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theSuperExtractorKey")
out.getHeader(superColumnHeader,classOf[String]) should be("superduper")
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\\\ "superStringCols" \\ "theSuperExtractorKey" \\ "superduper" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("loading routes and extractors via spring and specifying extractor beans with options and sending to an endpoint results in an insert to cassandra") {
var spring = new ClassPathXmlApplicationContext("classpath:producer-suite-context-1.xml")
var context = spring.getBean("camel").asInstanceOf[CamelContext]
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theSpringKey"
asMap(headers) += columnFamilyHeader -> "stringCols"
asMap(headers) += columnHeader -> "testcolumn"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpring, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("stringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theSpringKey")
out.getHeader(superColumnHeader) should be(null)
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\ "stringCols" \\ "theSpringKey" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
test("loading routes and extractors via spring using default extractor bean names instead of options and sending to an endpoint results in an insert to cassandra") {
var spring = new ClassPathXmlApplicationContext("classpath:producer-suite-context-2.xml")
var context = spring.getBean("camel").asInstanceOf[CamelContext]
var headers = new JMap[String, java.lang.Object]
asMap(headers) += keyHeader -> "theSpringDefaultKey"
asMap(headers) += columnFamilyHeader -> "stringCols"
asMap(headers) += columnHeader -> "testcolumn"
var body = "TEST123"
var exchange: Exchange = new DefaultExchange(context)
exchange.getIn.setHeaders(headers)
exchange.getIn.setBody(body)
exchange.setPattern(ExchangePattern.InOut)
var template = new DefaultProducerTemplate(context)
template.start
exchange = template.send(inboundSpring, exchange)
val out: Message = exchange.getOut
out.getHeader(keyspaceHeader, classOf[String]) should be("camel-cassandra")
out.getHeader(columnFamilyHeader, classOf[String]) should be("stringCols")
out.getHeader(columnHeader, classOf[String]) should be("testcolumn")
out.getHeader(keyHeader, classOf[String]) should be("theSpringDefaultKey")
out.getHeader(superColumnHeader) should be(null)
out.getBody(classOf[String]) should be("TEST123")
withSession {
session =>
session.get("camel-cassandra" \\ "stringCols" \\ "theSpringDefaultKey" \\ "testcolumn") match {
case Some(x) => string(x.value) should be("TEST123")
case None => fail
}
}
}
}
class ProducerTestRouteBuilder extends RouteBuilder {
from(inboundSpaceFamilyColumn) --> outboundSpaceFamilyColumn
from(inboundExtractSpaceFamilyColumn) --> outboundExtractSpaceFamilyColumn
from(inboundSpaceFamilySupercolumnColumn) --> outboundSpaceFamilySupercolumnColumn
from(inboundExtractSpaceFamilySuperColumn) --> outboundExtractSpaceFamilySuperColumn
from(inboundSpaceFamilyKeySupercolumn) --> outboundSpaceFamilyKeySupercolumn
}
object ProducerSuite {
val inboundSpaceFamilyColumn = "direct:inboundSpaceFamilyColumn"
val outboundSpaceFamilyColumn = "cassandra:/camel-cassandra/stringCols/testcolumn"
val inboundSpaceFamilySupercolumnColumn = "direct:inboundSpaceFamilySupercolumnColumn"
val outboundSpaceFamilySupercolumnColumn = "cassandra:/camel-cassandra/superStringCols/!superduper/testcolumn"
val inboundSpaceFamilyKeySupercolumn = "direct:inboundSpaceFamilyKeySupercolumnColumn"
val outboundSpaceFamilyKeySupercolumn = "cassandra:/camel-cassandra/superStringCols/~testUrlKey/!superduper"
val inboundSpring = "direct:spring"
val outboundSpring = "cassandra:/camel-cassandra?columnFamilyExtractor=cfEx&columnExtractor=colEx"
val inboundExtractSpaceFamilyColumn = "direct:inboundExtractSpaceFamilyColumn"
val outboundExtractSpaceFamilyColumn = "cassandra:/?keyspaceExtractor=keyspaceEx&columnFamilyExtractor=cfEx&columnExtractor=colEx&keyExtractor=keyEx&valueExtractor=valEx"
val inboundExtractSpaceFamilySuperColumn = "direct:inboundExtractSpaceFamilySuperColumn"
val outboundExtractSpaceFamilySuperColumn = "cassandra:/?keyspaceExtractor=keyspaceEx&columnFamilyExtractor=cfEx&columnExtractor=colEx&keyExtractor=keyEx&valueExtractor=valEx&superColumnExtractor=supEx"
}
| ticktock/camel-cassandra | src/test/scala/org/apache/camel/component/cassandra/ProducerSuite.scala | Scala | apache-2.0 | 15,481 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.net.URLEncoder
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.HashSet
import scala.xml.{Elem, Node, Unparsed}
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.SparkConf
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo, TaskLocality}
import org.apache.spark.ui._
import org.apache.spark.ui.exec.ExecutorsListener
import org.apache.spark.ui.jobs.UIData._
import org.apache.spark.util.{Distribution, Utils}
/** Page showing statistics and task list for a given stage */
private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") {
import StagePage._
private val progressListener = parent.progressListener
private val operationGraphListener = parent.operationGraphListener
private val executorsListener = parent.executorsListener
private val TIMELINE_LEGEND = {
<div class="legend-area">
<svg>
{
val legendPairs = List(("scheduler-delay-proportion", "Scheduler Delay"),
("deserialization-time-proportion", "Task Deserialization Time"),
("shuffle-read-time-proportion", "Shuffle Read Time"),
("executor-runtime-proportion", "Executor Computing Time"),
("shuffle-write-time-proportion", "Shuffle Write Time"),
("serialization-time-proportion", "Result Serialization Time"),
("getting-result-time-proportion", "Getting Result Time"))
legendPairs.zipWithIndex.map {
case ((classAttr, name), index) =>
<rect x={5 + (index / 3) * 210 + "px"} y={10 + (index % 3) * 15 + "px"}
width="10px" height="10px" class={classAttr}></rect>
<text x={25 + (index / 3) * 210 + "px"}
y={20 + (index % 3) * 15 + "px"}>{name}</text>
}
}
</svg>
</div>
}
// TODO: We should consider increasing the number of this parameter over time
// if we find that it's okay.
private val MAX_TIMELINE_TASKS = parent.conf.getInt("spark.ui.timeline.tasks.maximum", 1000)
private def getLocalitySummaryString(stageData: StageUIData): String = {
val localities = stageData.taskData.values.map(_.taskInfo.taskLocality)
val localityCounts = localities.groupBy(identity).mapValues(_.size)
val localityNamesAndCounts = localityCounts.toSeq.map { case (locality, count) =>
val localityName = locality match {
case TaskLocality.PROCESS_LOCAL => "Process local"
case TaskLocality.NODE_LOCAL => "Node local"
case TaskLocality.RACK_LOCAL => "Rack local"
case TaskLocality.ANY => "Any"
}
s"$localityName: $count"
}
localityNamesAndCounts.sorted.mkString("; ")
}
def render(request: HttpServletRequest): Seq[Node] = {
progressListener.synchronized {
val parameterId = request.getParameter("id")
require(parameterId != null && parameterId.nonEmpty, "Missing id parameter")
val parameterAttempt = request.getParameter("attempt")
require(parameterAttempt != null && parameterAttempt.nonEmpty, "Missing attempt parameter")
val parameterTaskPage = request.getParameter("task.page")
val parameterTaskSortColumn = request.getParameter("task.sort")
val parameterTaskSortDesc = request.getParameter("task.desc")
val parameterTaskPageSize = request.getParameter("task.pageSize")
val parameterTaskPrevPageSize = request.getParameter("task.prevPageSize")
val taskPage = Option(parameterTaskPage).map(_.toInt).getOrElse(1)
val taskSortColumn = Option(parameterTaskSortColumn).map { sortColumn =>
UIUtils.decodeURLParameter(sortColumn)
}.getOrElse("Index")
val taskSortDesc = Option(parameterTaskSortDesc).map(_.toBoolean).getOrElse(false)
val taskPageSize = Option(parameterTaskPageSize).map(_.toInt).getOrElse(100)
val taskPrevPageSize = Option(parameterTaskPrevPageSize).map(_.toInt).getOrElse(taskPageSize)
val stageId = parameterId.toInt
val stageAttemptId = parameterAttempt.toInt
val stageDataOption = progressListener.stageIdToData.get((stageId, stageAttemptId))
val stageHeader = s"Details for Stage $stageId (Attempt $stageAttemptId)"
if (stageDataOption.isEmpty) {
val content =
<div id="no-info">
<p>No information to display for Stage {stageId} (Attempt {stageAttemptId})</p>
</div>
return UIUtils.headerSparkPage(stageHeader, content, parent)
}
if (stageDataOption.get.taskData.isEmpty) {
val content =
<div>
<h4>Summary Metrics</h4> No tasks have started yet
<h4>Tasks</h4> No tasks have started yet
</div>
return UIUtils.headerSparkPage(stageHeader, content, parent)
}
val stageData = stageDataOption.get
val tasks = stageData.taskData.values.toSeq.sortBy(_.taskInfo.launchTime)
val numCompleted = stageData.numCompleteTasks
val totalTasks = stageData.numActiveTasks +
stageData.numCompleteTasks + stageData.numFailedTasks
val totalTasksNumStr = if (totalTasks == tasks.size) {
s"$totalTasks"
} else {
s"$totalTasks, showing ${tasks.size}"
}
val allAccumulables = progressListener.stageIdToData((stageId, stageAttemptId)).accumulables
val externalAccumulables = allAccumulables.values.filter { acc => !acc.internal }
val hasAccumulators = externalAccumulables.size > 0
val summary =
<div>
<ul class="unstyled">
<li>
<strong>Total Time Across All Tasks: </strong>
{UIUtils.formatDuration(stageData.executorRunTime)}
</li>
<li>
<strong>Locality Level Summary: </strong>
{getLocalitySummaryString(stageData)}
</li>
{if (stageData.hasInput) {
<li>
<strong>Input Size / Records: </strong>
{s"${Utils.bytesToString(stageData.inputBytes)} / ${stageData.inputRecords}"}
</li>
}}
{if (stageData.hasOutput) {
<li>
<strong>Output: </strong>
{s"${Utils.bytesToString(stageData.outputBytes)} / ${stageData.outputRecords}"}
</li>
}}
{if (stageData.hasShuffleRead) {
<li>
<strong>Shuffle Read: </strong>
{s"${Utils.bytesToString(stageData.shuffleReadTotalBytes)} / " +
s"${stageData.shuffleReadRecords}"}
</li>
}}
{if (stageData.hasShuffleWrite) {
<li>
<strong>Shuffle Write: </strong>
{s"${Utils.bytesToString(stageData.shuffleWriteBytes)} / " +
s"${stageData.shuffleWriteRecords}"}
</li>
}}
{if (stageData.hasBytesSpilled) {
<li>
<strong>Shuffle Spill (Memory): </strong>
{Utils.bytesToString(stageData.memoryBytesSpilled)}
</li>
<li>
<strong>Shuffle Spill (Disk): </strong>
{Utils.bytesToString(stageData.diskBytesSpilled)}
</li>
}}
</ul>
</div>
val showAdditionalMetrics =
<div>
<span class="expand-additional-metrics">
<span class="expand-additional-metrics-arrow arrow-closed"></span>
<a>Show Additional Metrics</a>
</span>
<div class="additional-metrics collapsed">
<ul>
<li>
<input type="checkbox" id="select-all-metrics"/>
<span class="additional-metric-title"><em>(De)select All</em></span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.SCHEDULER_DELAY} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SCHEDULER_DELAY}/>
<span class="additional-metric-title">Scheduler Delay</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.TASK_DESERIALIZATION_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}/>
<span class="additional-metric-title">Task Deserialization Time</span>
</span>
</li>
{if (stageData.hasShuffleRead) {
<li>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_BLOCKED_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}/>
<span class="additional-metric-title">Shuffle Read Blocked Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_REMOTE_SIZE} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}/>
<span class="additional-metric-title">Shuffle Remote Reads</span>
</span>
</li>
}}
<li>
<span data-toggle="tooltip"
title={ToolTips.RESULT_SERIALIZATION_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}/>
<span class="additional-metric-title">Result Serialization Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.GETTING_RESULT_TIME} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.GETTING_RESULT_TIME}/>
<span class="additional-metric-title">Getting Result Time</span>
</span>
</li>
<li>
<span data-toggle="tooltip"
title={ToolTips.PEAK_EXECUTION_MEMORY} data-placement="right">
<input type="checkbox" name={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}/>
<span class="additional-metric-title">Peak Execution Memory</span>
</span>
</li>
</ul>
</div>
</div>
val dagViz = UIUtils.showDagVizForStage(
stageId, operationGraphListener.getOperationGraphForStage(stageId))
val accumulableHeaders: Seq[String] = Seq("Accumulable", "Value")
def accumulableRow(acc: AccumulableInfo): Seq[Node] = {
(acc.name, acc.value) match {
case (Some(name), Some(value)) => <tr><td>{name}</td><td>{value}</td></tr>
case _ => Seq.empty[Node]
}
}
val accumulableTable = UIUtils.listingTable(
accumulableHeaders,
accumulableRow,
externalAccumulables.toSeq)
val page: Int = {
// If the user has changed to a larger page size, then go to page 1 in order to avoid
// IndexOutOfBoundsException.
if (taskPageSize <= taskPrevPageSize) {
taskPage
} else {
1
}
}
val currentTime = System.currentTimeMillis()
val (taskTable, taskTableHTML) = try {
val _taskTable = new TaskPagedTable(
parent.conf,
UIUtils.prependBaseUri(parent.basePath) +
s"/stages/stage?id=${stageId}&attempt=${stageAttemptId}",
tasks,
hasAccumulators,
stageData.hasInput,
stageData.hasOutput,
stageData.hasShuffleRead,
stageData.hasShuffleWrite,
stageData.hasBytesSpilled,
currentTime,
pageSize = taskPageSize,
sortColumn = taskSortColumn,
desc = taskSortDesc,
executorsListener = executorsListener
)
(_taskTable, _taskTable.table(page))
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
val errorMessage =
<div class="alert alert-error">
<p>Error while rendering stage table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
(null, errorMessage)
}
val jsForScrollingDownToTaskTable =
<script>
{Unparsed {
"""
|$(function() {
| if (/.*&task.sort=.*$/.test(location.search)) {
| var topOffset = $("#tasks-section").offset().top;
| $("html,body").animate({scrollTop: topOffset}, 200);
| }
|});
""".stripMargin
}
}
</script>
val taskIdsInPage = if (taskTable == null) Set.empty[Long]
else taskTable.dataSource.slicedTaskIds
// Excludes tasks which failed and have incomplete metrics
val validTasks = tasks.filter(t => t.taskInfo.status == "SUCCESS" && t.metrics.isDefined)
val summaryTable: Option[Seq[Node]] =
if (validTasks.size == 0) {
None
}
else {
def getDistributionQuantiles(data: Seq[Double]): IndexedSeq[Double] =
Distribution(data).get.getQuantiles()
def getFormattedTimeQuantiles(times: Seq[Double]): Seq[Node] = {
getDistributionQuantiles(times).map { millis =>
<td>{UIUtils.formatDuration(millis.toLong)}</td>
}
}
def getFormattedSizeQuantiles(data: Seq[Double]): Seq[Elem] = {
getDistributionQuantiles(data).map(d => <td>{Utils.bytesToString(d.toLong)}</td>)
}
val deserializationTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.executorDeserializeTime.toDouble
}
val deserializationQuantiles =
<td>
<span data-toggle="tooltip" title={ToolTips.TASK_DESERIALIZATION_TIME}
data-placement="right">
Task Deserialization Time
</span>
</td> +: getFormattedTimeQuantiles(deserializationTimes)
val serviceTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.executorRunTime.toDouble
}
val serviceQuantiles = <td>Duration</td> +: getFormattedTimeQuantiles(serviceTimes)
val gcTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.jvmGCTime.toDouble
}
val gcQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.GC_TIME} data-placement="right">GC Time
</span>
</td> +: getFormattedTimeQuantiles(gcTimes)
val serializationTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.resultSerializationTime.toDouble
}
val serializationQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.RESULT_SERIALIZATION_TIME} data-placement="right">
Result Serialization Time
</span>
</td> +: getFormattedTimeQuantiles(serializationTimes)
val gettingResultTimes = validTasks.map { taskUIData: TaskUIData =>
getGettingResultTime(taskUIData.taskInfo, currentTime).toDouble
}
val gettingResultQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.GETTING_RESULT_TIME} data-placement="right">
Getting Result Time
</span>
</td> +:
getFormattedTimeQuantiles(gettingResultTimes)
val peakExecutionMemory = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.peakExecutionMemory.toDouble
}
val peakExecutionMemoryQuantiles = {
<td>
<span data-toggle="tooltip"
title={ToolTips.PEAK_EXECUTION_MEMORY} data-placement="right">
Peak Execution Memory
</span>
</td> +: getFormattedSizeQuantiles(peakExecutionMemory)
}
// The scheduler delay includes the network delay to send the task to the worker
// machine and to send back the result (but not the time to fetch the task result,
// if it needed to be fetched from the block manager on the worker).
val schedulerDelays = validTasks.map { taskUIData: TaskUIData =>
getSchedulerDelay(taskUIData.taskInfo, taskUIData.metrics.get, currentTime).toDouble
}
val schedulerDelayTitle = <td><span data-toggle="tooltip"
title={ToolTips.SCHEDULER_DELAY} data-placement="right">Scheduler Delay</span></td>
val schedulerDelayQuantiles = schedulerDelayTitle +:
getFormattedTimeQuantiles(schedulerDelays)
def getFormattedSizeQuantilesWithRecords(data: Seq[Double], records: Seq[Double])
: Seq[Elem] = {
val recordDist = getDistributionQuantiles(records).iterator
getDistributionQuantiles(data).map(d =>
<td>{s"${Utils.bytesToString(d.toLong)} / ${recordDist.next().toLong}"}</td>
)
}
val inputSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.inputMetrics.bytesRead.toDouble
}
val inputRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.inputMetrics.recordsRead.toDouble
}
val inputQuantiles = <td>Input Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(inputSizes, inputRecords)
val outputSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.outputMetrics.bytesWritten.toDouble
}
val outputRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.outputMetrics.recordsWritten.toDouble
}
val outputQuantiles = <td>Output Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(outputSizes, outputRecords)
val shuffleReadBlockedTimes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.fetchWaitTime.toDouble
}
val shuffleReadBlockedQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_BLOCKED_TIME} data-placement="right">
Shuffle Read Blocked Time
</span>
</td> +:
getFormattedTimeQuantiles(shuffleReadBlockedTimes)
val shuffleReadTotalSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.totalBytesRead.toDouble
}
val shuffleReadTotalRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.recordsRead.toDouble
}
val shuffleReadTotalQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ} data-placement="right">
Shuffle Read Size / Records
</span>
</td> +:
getFormattedSizeQuantilesWithRecords(shuffleReadTotalSizes, shuffleReadTotalRecords)
val shuffleReadRemoteSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleReadMetrics.remoteBytesRead.toDouble
}
val shuffleReadRemoteQuantiles =
<td>
<span data-toggle="tooltip"
title={ToolTips.SHUFFLE_READ_REMOTE_SIZE} data-placement="right">
Shuffle Remote Reads
</span>
</td> +:
getFormattedSizeQuantiles(shuffleReadRemoteSizes)
val shuffleWriteSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleWriteMetrics.bytesWritten.toDouble
}
val shuffleWriteRecords = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.shuffleWriteMetrics.recordsWritten.toDouble
}
val shuffleWriteQuantiles = <td>Shuffle Write Size / Records</td> +:
getFormattedSizeQuantilesWithRecords(shuffleWriteSizes, shuffleWriteRecords)
val memoryBytesSpilledSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.memoryBytesSpilled.toDouble
}
val memoryBytesSpilledQuantiles = <td>Shuffle spill (memory)</td> +:
getFormattedSizeQuantiles(memoryBytesSpilledSizes)
val diskBytesSpilledSizes = validTasks.map { taskUIData: TaskUIData =>
taskUIData.metrics.get.diskBytesSpilled.toDouble
}
val diskBytesSpilledQuantiles = <td>Shuffle spill (disk)</td> +:
getFormattedSizeQuantiles(diskBytesSpilledSizes)
val listings: Seq[Seq[Node]] = Seq(
<tr>{serviceQuantiles}</tr>,
<tr class={TaskDetailsClassNames.SCHEDULER_DELAY}>{schedulerDelayQuantiles}</tr>,
<tr class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}>
{deserializationQuantiles}
</tr>
<tr>{gcQuantiles}</tr>,
<tr class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}>
{serializationQuantiles}
</tr>,
<tr class={TaskDetailsClassNames.GETTING_RESULT_TIME}>{gettingResultQuantiles}</tr>,
<tr class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}>
{peakExecutionMemoryQuantiles}
</tr>,
if (stageData.hasInput) <tr>{inputQuantiles}</tr> else Nil,
if (stageData.hasOutput) <tr>{outputQuantiles}</tr> else Nil,
if (stageData.hasShuffleRead) {
<tr class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}>
{shuffleReadBlockedQuantiles}
</tr>
<tr>{shuffleReadTotalQuantiles}</tr>
<tr class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}>
{shuffleReadRemoteQuantiles}
</tr>
} else {
Nil
},
if (stageData.hasShuffleWrite) <tr>{shuffleWriteQuantiles}</tr> else Nil,
if (stageData.hasBytesSpilled) <tr>{memoryBytesSpilledQuantiles}</tr> else Nil,
if (stageData.hasBytesSpilled) <tr>{diskBytesSpilledQuantiles}</tr> else Nil)
val quantileHeaders = Seq("Metric", "Min", "25th percentile",
"Median", "75th percentile", "Max")
// The summary table does not use CSS to stripe rows, which doesn't work with hidden
// rows (instead, JavaScript in table.js is used to stripe the non-hidden rows).
Some(UIUtils.listingTable(
quantileHeaders,
identity[Seq[Node]],
listings,
fixedWidth = true,
id = Some("task-summary-table"),
stripeRowsWithCss = false))
}
val executorTable = new ExecutorTable(stageId, stageAttemptId, parent)
val maybeAccumulableTable: Seq[Node] =
if (hasAccumulators) { <h4>Accumulators</h4> ++ accumulableTable } else Seq()
val aggMetrics =
<span class="collapse-aggregated-metrics collapse-table"
onClick="collapseTable('collapse-aggregated-metrics','aggregated-metrics')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Aggregated Metrics by Executor</a>
</h4>
</span>
<div class="aggregated-metrics collapsible-table">
{executorTable.toNodeSeq}
</div>
val content =
summary ++
dagViz ++
showAdditionalMetrics ++
makeTimeline(
// Only show the tasks in the table
stageData.taskData.values.toSeq.filter(t => taskIdsInPage.contains(t.taskInfo.taskId)),
currentTime) ++
<h4>Summary Metrics for <a href="#tasks-section">{numCompleted} Completed Tasks</a></h4> ++
<div>{summaryTable.getOrElse("No tasks have reported metrics yet.")}</div> ++
aggMetrics ++
maybeAccumulableTable ++
<h4 id="tasks-section">Tasks ({totalTasksNumStr})</h4> ++
taskTableHTML ++ jsForScrollingDownToTaskTable
UIUtils.headerSparkPage(stageHeader, content, parent, showVisualization = true)
}
}
def makeTimeline(tasks: Seq[TaskUIData], currentTime: Long): Seq[Node] = {
val executorsSet = new HashSet[(String, String)]
var minLaunchTime = Long.MaxValue
var maxFinishTime = Long.MinValue
val executorsArrayStr =
tasks.sortBy(-_.taskInfo.launchTime).take(MAX_TIMELINE_TASKS).map { taskUIData =>
val taskInfo = taskUIData.taskInfo
val executorId = taskInfo.executorId
val host = taskInfo.host
executorsSet += ((executorId, host))
val launchTime = taskInfo.launchTime
val finishTime = if (!taskInfo.running) taskInfo.finishTime else currentTime
val totalExecutionTime = finishTime - launchTime
minLaunchTime = launchTime.min(minLaunchTime)
maxFinishTime = finishTime.max(maxFinishTime)
def toProportion(time: Long) = time.toDouble / totalExecutionTime * 100
val metricsOpt = taskUIData.metrics
val shuffleReadTime =
metricsOpt.map(_.shuffleReadMetrics.fetchWaitTime).getOrElse(0L)
val shuffleReadTimeProportion = toProportion(shuffleReadTime)
val shuffleWriteTime =
(metricsOpt.map(_.shuffleWriteMetrics.writeTime).getOrElse(0L) / 1e6).toLong
val shuffleWriteTimeProportion = toProportion(shuffleWriteTime)
val serializationTime = metricsOpt.map(_.resultSerializationTime).getOrElse(0L)
val serializationTimeProportion = toProportion(serializationTime)
val deserializationTime = metricsOpt.map(_.executorDeserializeTime).getOrElse(0L)
val deserializationTimeProportion = toProportion(deserializationTime)
val gettingResultTime = getGettingResultTime(taskUIData.taskInfo, currentTime)
val gettingResultTimeProportion = toProportion(gettingResultTime)
val schedulerDelay =
metricsOpt.map(getSchedulerDelay(taskInfo, _, currentTime)).getOrElse(0L)
val schedulerDelayProportion = toProportion(schedulerDelay)
val executorOverhead = serializationTime + deserializationTime
val executorRunTime = if (taskInfo.running) {
totalExecutionTime - executorOverhead - gettingResultTime
} else {
metricsOpt.map(_.executorRunTime).getOrElse(
totalExecutionTime - executorOverhead - gettingResultTime)
}
val executorComputingTime = executorRunTime - shuffleReadTime - shuffleWriteTime
val executorComputingTimeProportion =
math.max(100 - schedulerDelayProportion - shuffleReadTimeProportion -
shuffleWriteTimeProportion - serializationTimeProportion -
deserializationTimeProportion - gettingResultTimeProportion, 0)
val schedulerDelayProportionPos = 0
val deserializationTimeProportionPos =
schedulerDelayProportionPos + schedulerDelayProportion
val shuffleReadTimeProportionPos =
deserializationTimeProportionPos + deserializationTimeProportion
val executorRuntimeProportionPos =
shuffleReadTimeProportionPos + shuffleReadTimeProportion
val shuffleWriteTimeProportionPos =
executorRuntimeProportionPos + executorComputingTimeProportion
val serializationTimeProportionPos =
shuffleWriteTimeProportionPos + shuffleWriteTimeProportion
val gettingResultTimeProportionPos =
serializationTimeProportionPos + serializationTimeProportion
val index = taskInfo.index
val attempt = taskInfo.attemptNumber
val svgTag =
if (totalExecutionTime == 0) {
// SPARK-8705: Avoid invalid attribute error in JavaScript if execution time is 0
"""<svg class="task-assignment-timeline-duration-bar"></svg>"""
} else {
s"""<svg class="task-assignment-timeline-duration-bar">
|<rect class="scheduler-delay-proportion"
|x="$schedulerDelayProportionPos%" y="0px" height="26px"
|width="$schedulerDelayProportion%"></rect>
|<rect class="deserialization-time-proportion"
|x="$deserializationTimeProportionPos%" y="0px" height="26px"
|width="$deserializationTimeProportion%"></rect>
|<rect class="shuffle-read-time-proportion"
|x="$shuffleReadTimeProportionPos%" y="0px" height="26px"
|width="$shuffleReadTimeProportion%"></rect>
|<rect class="executor-runtime-proportion"
|x="$executorRuntimeProportionPos%" y="0px" height="26px"
|width="$executorComputingTimeProportion%"></rect>
|<rect class="shuffle-write-time-proportion"
|x="$shuffleWriteTimeProportionPos%" y="0px" height="26px"
|width="$shuffleWriteTimeProportion%"></rect>
|<rect class="serialization-time-proportion"
|x="$serializationTimeProportionPos%" y="0px" height="26px"
|width="$serializationTimeProportion%"></rect>
|<rect class="getting-result-time-proportion"
|x="$gettingResultTimeProportionPos%" y="0px" height="26px"
|width="$gettingResultTimeProportion%"></rect></svg>""".stripMargin
}
val timelineObject =
s"""
|{
|'className': 'task task-assignment-timeline-object',
|'group': '$executorId',
|'content': '<div class="task-assignment-timeline-content"
|data-toggle="tooltip" data-placement="top"
|data-html="true" data-container="body"
|data-title="${s"Task " + index + " (attempt " + attempt + ")"}<br>
|Status: ${taskInfo.status}<br>
|Launch Time: ${UIUtils.formatDate(new Date(launchTime))}
|${
if (!taskInfo.running) {
s"""<br>Finish Time: ${UIUtils.formatDate(new Date(finishTime))}"""
} else {
""
}
}
|<br>Scheduler Delay: $schedulerDelay ms
|<br>Task Deserialization Time: ${UIUtils.formatDuration(deserializationTime)}
|<br>Shuffle Read Time: ${UIUtils.formatDuration(shuffleReadTime)}
|<br>Executor Computing Time: ${UIUtils.formatDuration(executorComputingTime)}
|<br>Shuffle Write Time: ${UIUtils.formatDuration(shuffleWriteTime)}
|<br>Result Serialization Time: ${UIUtils.formatDuration(serializationTime)}
|<br>Getting Result Time: ${UIUtils.formatDuration(gettingResultTime)}">
|$svgTag',
|'start': new Date($launchTime),
|'end': new Date($finishTime)
|}
|""".stripMargin.replaceAll("""[\\r\\n]+""", " ")
timelineObject
}.mkString("[", ",", "]")
val groupArrayStr = executorsSet.map {
case (executorId, host) =>
s"""
{
'id': '$executorId',
'content': '$executorId / $host',
}
"""
}.mkString("[", ",", "]")
<span class="expand-task-assignment-timeline">
<span class="expand-task-assignment-timeline-arrow arrow-closed"></span>
<a>Event Timeline</a>
</span> ++
<div id="task-assignment-timeline" class="collapsed">
{
if (MAX_TIMELINE_TASKS < tasks.size) {
<strong>
This stage has more than the maximum number of tasks that can be shown in the
visualization! Only the most recent {MAX_TIMELINE_TASKS} tasks
(of {tasks.size} total) are shown.
</strong>
} else {
Seq.empty
}
}
<div class="control-panel">
<div id="task-assignment-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
{TIMELINE_LEGEND}
</div> ++
<script type="text/javascript">
{Unparsed(s"drawTaskAssignmentTimeline(" +
s"$groupArrayStr, $executorsArrayStr, $minLaunchTime, $maxFinishTime, " +
s"${UIUtils.getTimeZoneOffset()})")}
</script>
}
}
private[ui] object StagePage {
private[ui] def getGettingResultTime(info: TaskInfo, currentTime: Long): Long = {
if (info.gettingResult) {
if (info.finished) {
info.finishTime - info.gettingResultTime
} else {
// The task is still fetching the result.
currentTime - info.gettingResultTime
}
} else {
0L
}
}
private[ui] def getSchedulerDelay(
info: TaskInfo, metrics: TaskMetricsUIData, currentTime: Long): Long = {
if (info.finished) {
val totalExecutionTime = info.finishTime - info.launchTime
val executorOverhead = (metrics.executorDeserializeTime +
metrics.resultSerializationTime)
math.max(
0,
totalExecutionTime - metrics.executorRunTime - executorOverhead -
getGettingResultTime(info, currentTime))
} else {
// The task is still running and the metrics like executorRunTime are not available.
0L
}
}
}
private[ui] case class TaskTableRowInputData(inputSortable: Long, inputReadable: String)
private[ui] case class TaskTableRowOutputData(outputSortable: Long, outputReadable: String)
private[ui] case class TaskTableRowShuffleReadData(
shuffleReadBlockedTimeSortable: Long,
shuffleReadBlockedTimeReadable: String,
shuffleReadSortable: Long,
shuffleReadReadable: String,
shuffleReadRemoteSortable: Long,
shuffleReadRemoteReadable: String)
private[ui] case class TaskTableRowShuffleWriteData(
writeTimeSortable: Long,
writeTimeReadable: String,
shuffleWriteSortable: Long,
shuffleWriteReadable: String)
private[ui] case class TaskTableRowBytesSpilledData(
memoryBytesSpilledSortable: Long,
memoryBytesSpilledReadable: String,
diskBytesSpilledSortable: Long,
diskBytesSpilledReadable: String)
/**
* Contains all data that needs for sorting and generating HTML. Using this one rather than
* TaskUIData to avoid creating duplicate contents during sorting the data.
*/
private[ui] class TaskTableRowData(
val index: Int,
val taskId: Long,
val attempt: Int,
val speculative: Boolean,
val status: String,
val taskLocality: String,
val executorIdAndHost: String,
val launchTime: Long,
val duration: Long,
val formatDuration: String,
val schedulerDelay: Long,
val taskDeserializationTime: Long,
val gcTime: Long,
val serializationTime: Long,
val gettingResultTime: Long,
val peakExecutionMemoryUsed: Long,
val accumulators: Option[String], // HTML
val input: Option[TaskTableRowInputData],
val output: Option[TaskTableRowOutputData],
val shuffleRead: Option[TaskTableRowShuffleReadData],
val shuffleWrite: Option[TaskTableRowShuffleWriteData],
val bytesSpilled: Option[TaskTableRowBytesSpilledData],
val error: String,
val logs: Map[String, String])
private[ui] class TaskDataSource(
tasks: Seq[TaskUIData],
hasAccumulators: Boolean,
hasInput: Boolean,
hasOutput: Boolean,
hasShuffleRead: Boolean,
hasShuffleWrite: Boolean,
hasBytesSpilled: Boolean,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorsListener: ExecutorsListener) extends PagedDataSource[TaskTableRowData](pageSize) {
import StagePage._
// Convert TaskUIData to TaskTableRowData which contains the final contents to show in the table
// so that we can avoid creating duplicate contents during sorting the data
private val data = tasks.map(taskRow).sorted(ordering(sortColumn, desc))
private var _slicedTaskIds: Set[Long] = null
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[TaskTableRowData] = {
val r = data.slice(from, to)
_slicedTaskIds = r.map(_.taskId).toSet
r
}
def slicedTaskIds: Set[Long] = _slicedTaskIds
private def taskRow(taskData: TaskUIData): TaskTableRowData = {
val info = taskData.taskInfo
val metrics = taskData.metrics
val duration = taskData.taskDuration.getOrElse(1L)
val formatDuration = taskData.taskDuration.map(d => UIUtils.formatDuration(d)).getOrElse("")
val schedulerDelay = metrics.map(getSchedulerDelay(info, _, currentTime)).getOrElse(0L)
val gcTime = metrics.map(_.jvmGCTime).getOrElse(0L)
val taskDeserializationTime = metrics.map(_.executorDeserializeTime).getOrElse(0L)
val serializationTime = metrics.map(_.resultSerializationTime).getOrElse(0L)
val gettingResultTime = getGettingResultTime(info, currentTime)
val externalAccumulableReadable = info.accumulables
.filterNot(_.internal)
.flatMap { a =>
(a.name, a.update) match {
case (Some(name), Some(update)) => Some(StringEscapeUtils.escapeHtml4(s"$name: $update"))
case _ => None
}
}
val peakExecutionMemoryUsed = metrics.map(_.peakExecutionMemory).getOrElse(0L)
val maybeInput = metrics.map(_.inputMetrics)
val inputSortable = maybeInput.map(_.bytesRead).getOrElse(0L)
val inputReadable = maybeInput
.map(m => s"${Utils.bytesToString(m.bytesRead)}")
.getOrElse("")
val inputRecords = maybeInput.map(_.recordsRead.toString).getOrElse("")
val maybeOutput = metrics.map(_.outputMetrics)
val outputSortable = maybeOutput.map(_.bytesWritten).getOrElse(0L)
val outputReadable = maybeOutput
.map(m => s"${Utils.bytesToString(m.bytesWritten)}")
.getOrElse("")
val outputRecords = maybeOutput.map(_.recordsWritten.toString).getOrElse("")
val maybeShuffleRead = metrics.map(_.shuffleReadMetrics)
val shuffleReadBlockedTimeSortable = maybeShuffleRead.map(_.fetchWaitTime).getOrElse(0L)
val shuffleReadBlockedTimeReadable =
maybeShuffleRead.map(ms => UIUtils.formatDuration(ms.fetchWaitTime)).getOrElse("")
val totalShuffleBytes = maybeShuffleRead.map(_.totalBytesRead)
val shuffleReadSortable = totalShuffleBytes.getOrElse(0L)
val shuffleReadReadable = totalShuffleBytes.map(Utils.bytesToString).getOrElse("")
val shuffleReadRecords = maybeShuffleRead.map(_.recordsRead.toString).getOrElse("")
val remoteShuffleBytes = maybeShuffleRead.map(_.remoteBytesRead)
val shuffleReadRemoteSortable = remoteShuffleBytes.getOrElse(0L)
val shuffleReadRemoteReadable = remoteShuffleBytes.map(Utils.bytesToString).getOrElse("")
val maybeShuffleWrite = metrics.map(_.shuffleWriteMetrics)
val shuffleWriteSortable = maybeShuffleWrite.map(_.bytesWritten).getOrElse(0L)
val shuffleWriteReadable = maybeShuffleWrite
.map(m => s"${Utils.bytesToString(m.bytesWritten)}").getOrElse("")
val shuffleWriteRecords = maybeShuffleWrite
.map(_.recordsWritten.toString).getOrElse("")
val maybeWriteTime = metrics.map(_.shuffleWriteMetrics.writeTime)
val writeTimeSortable = maybeWriteTime.getOrElse(0L)
val writeTimeReadable = maybeWriteTime.map(t => t / (1000 * 1000)).map { ms =>
if (ms == 0) "" else UIUtils.formatDuration(ms)
}.getOrElse("")
val maybeMemoryBytesSpilled = metrics.map(_.memoryBytesSpilled)
val memoryBytesSpilledSortable = maybeMemoryBytesSpilled.getOrElse(0L)
val memoryBytesSpilledReadable =
maybeMemoryBytesSpilled.map(Utils.bytesToString).getOrElse("")
val maybeDiskBytesSpilled = metrics.map(_.diskBytesSpilled)
val diskBytesSpilledSortable = maybeDiskBytesSpilled.getOrElse(0L)
val diskBytesSpilledReadable = maybeDiskBytesSpilled.map(Utils.bytesToString).getOrElse("")
val input =
if (hasInput) {
Some(TaskTableRowInputData(inputSortable, s"$inputReadable / $inputRecords"))
} else {
None
}
val output =
if (hasOutput) {
Some(TaskTableRowOutputData(outputSortable, s"$outputReadable / $outputRecords"))
} else {
None
}
val shuffleRead =
if (hasShuffleRead) {
Some(TaskTableRowShuffleReadData(
shuffleReadBlockedTimeSortable,
shuffleReadBlockedTimeReadable,
shuffleReadSortable,
s"$shuffleReadReadable / $shuffleReadRecords",
shuffleReadRemoteSortable,
shuffleReadRemoteReadable
))
} else {
None
}
val shuffleWrite =
if (hasShuffleWrite) {
Some(TaskTableRowShuffleWriteData(
writeTimeSortable,
writeTimeReadable,
shuffleWriteSortable,
s"$shuffleWriteReadable / $shuffleWriteRecords"
))
} else {
None
}
val bytesSpilled =
if (hasBytesSpilled) {
Some(TaskTableRowBytesSpilledData(
memoryBytesSpilledSortable,
memoryBytesSpilledReadable,
diskBytesSpilledSortable,
diskBytesSpilledReadable
))
} else {
None
}
val logs = executorsListener.executorToTaskSummary.get(info.executorId)
.map(_.executorLogs).getOrElse(Map.empty)
new TaskTableRowData(
info.index,
info.taskId,
info.attemptNumber,
info.speculative,
info.status,
info.taskLocality.toString,
s"${info.executorId} / ${info.host}",
info.launchTime,
duration,
formatDuration,
schedulerDelay,
taskDeserializationTime,
gcTime,
serializationTime,
gettingResultTime,
peakExecutionMemoryUsed,
if (hasAccumulators) Some(externalAccumulableReadable.mkString("<br/>")) else None,
input,
output,
shuffleRead,
shuffleWrite,
bytesSpilled,
taskData.errorMessage.getOrElse(""),
logs)
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[TaskTableRowData] = {
val ordering: Ordering[TaskTableRowData] = sortColumn match {
case "Index" => Ordering.by(_.index)
case "ID" => Ordering.by(_.taskId)
case "Attempt" => Ordering.by(_.attempt)
case "Status" => Ordering.by(_.status)
case "Locality Level" => Ordering.by(_.taskLocality)
case "Executor ID / Host" => Ordering.by(_.executorIdAndHost)
case "Launch Time" => Ordering.by(_.launchTime)
case "Duration" => Ordering.by(_.duration)
case "Scheduler Delay" => Ordering.by(_.schedulerDelay)
case "Task Deserialization Time" => Ordering.by(_.taskDeserializationTime)
case "GC Time" => Ordering.by(_.gcTime)
case "Result Serialization Time" => Ordering.by(_.serializationTime)
case "Getting Result Time" => Ordering.by(_.gettingResultTime)
case "Peak Execution Memory" => Ordering.by(_.peakExecutionMemoryUsed)
case "Accumulators" =>
if (hasAccumulators) {
Ordering.by(_.accumulators.get)
} else {
throw new IllegalArgumentException(
"Cannot sort by Accumulators because of no accumulators")
}
case "Input Size / Records" =>
if (hasInput) {
Ordering.by(_.input.get.inputSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Input Size / Records because of no inputs")
}
case "Output Size / Records" =>
if (hasOutput) {
Ordering.by(_.output.get.outputSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Output Size / Records because of no outputs")
}
// ShuffleRead
case "Shuffle Read Blocked Time" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadBlockedTimeSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Read Blocked Time because of no shuffle reads")
}
case "Shuffle Read Size / Records" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Read Size / Records because of no shuffle reads")
}
case "Shuffle Remote Reads" =>
if (hasShuffleRead) {
Ordering.by(_.shuffleRead.get.shuffleReadRemoteSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Remote Reads because of no shuffle reads")
}
// ShuffleWrite
case "Write Time" =>
if (hasShuffleWrite) {
Ordering.by(_.shuffleWrite.get.writeTimeSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Write Time because of no shuffle writes")
}
case "Shuffle Write Size / Records" =>
if (hasShuffleWrite) {
Ordering.by(_.shuffleWrite.get.shuffleWriteSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Write Size / Records because of no shuffle writes")
}
// BytesSpilled
case "Shuffle Spill (Memory)" =>
if (hasBytesSpilled) {
Ordering.by(_.bytesSpilled.get.memoryBytesSpilledSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Spill (Memory) because of no spills")
}
case "Shuffle Spill (Disk)" =>
if (hasBytesSpilled) {
Ordering.by(_.bytesSpilled.get.diskBytesSpilledSortable)
} else {
throw new IllegalArgumentException(
"Cannot sort by Shuffle Spill (Disk) because of no spills")
}
case "Errors" => Ordering.by(_.error)
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
private[ui] class TaskPagedTable(
conf: SparkConf,
basePath: String,
data: Seq[TaskUIData],
hasAccumulators: Boolean,
hasInput: Boolean,
hasOutput: Boolean,
hasShuffleRead: Boolean,
hasShuffleWrite: Boolean,
hasBytesSpilled: Boolean,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean,
executorsListener: ExecutorsListener) extends PagedTable[TaskTableRowData] {
override def tableId: String = "task-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped table-head-clickable"
override def pageSizeFormField: String = "task.pageSize"
override def prevPageSizeFormField: String = "task.prevPageSize"
override def pageNumberFormField: String = "task.page"
override val dataSource: TaskDataSource = new TaskDataSource(
data,
hasAccumulators,
hasInput,
hasOutput,
hasShuffleRead,
hasShuffleWrite,
hasBytesSpilled,
currentTime,
pageSize,
sortColumn,
desc,
executorsListener)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
basePath +
s"&$pageNumberFormField=$page" +
s"&task.sort=$encodedSortColumn" +
s"&task.desc=$desc" +
s"&$pageSizeFormField=$pageSize"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$basePath&task.sort=$encodedSortColumn&task.desc=$desc"
}
def headers: Seq[Node] = {
val taskHeadersAndCssClasses: Seq[(String, String)] =
Seq(
("Index", ""), ("ID", ""), ("Attempt", ""), ("Status", ""), ("Locality Level", ""),
("Executor ID / Host", ""), ("Launch Time", ""), ("Duration", ""),
("Scheduler Delay", TaskDetailsClassNames.SCHEDULER_DELAY),
("Task Deserialization Time", TaskDetailsClassNames.TASK_DESERIALIZATION_TIME),
("GC Time", ""),
("Result Serialization Time", TaskDetailsClassNames.RESULT_SERIALIZATION_TIME),
("Getting Result Time", TaskDetailsClassNames.GETTING_RESULT_TIME),
("Peak Execution Memory", TaskDetailsClassNames.PEAK_EXECUTION_MEMORY)) ++
{if (hasAccumulators) Seq(("Accumulators", "")) else Nil} ++
{if (hasInput) Seq(("Input Size / Records", "")) else Nil} ++
{if (hasOutput) Seq(("Output Size / Records", "")) else Nil} ++
{if (hasShuffleRead) {
Seq(("Shuffle Read Blocked Time", TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME),
("Shuffle Read Size / Records", ""),
("Shuffle Remote Reads", TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE))
} else {
Nil
}} ++
{if (hasShuffleWrite) {
Seq(("Write Time", ""), ("Shuffle Write Size / Records", ""))
} else {
Nil
}} ++
{if (hasBytesSpilled) {
Seq(("Shuffle Spill (Memory)", ""), ("Shuffle Spill (Disk)", ""))
} else {
Nil
}} ++
Seq(("Errors", ""))
if (!taskHeadersAndCssClasses.map(_._1).contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
taskHeadersAndCssClasses.map { case (header, cssClass) =>
if (header == sortColumn) {
val headerLink = Unparsed(
basePath +
s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&task.desc=${!desc}" +
s"&task.pageSize=$pageSize")
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th class={cssClass}>
<a href={headerLink}>
{header}
<span> {Unparsed(arrow)}</span>
</a>
</th>
} else {
val headerLink = Unparsed(
basePath +
s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&task.pageSize=$pageSize")
<th class={cssClass}>
<a href={headerLink}>
{header}
</a>
</th>
}
}
}
<thead>{headerRow}</thead>
}
def row(task: TaskTableRowData): Seq[Node] = {
<tr>
<td>{task.index}</td>
<td>{task.taskId}</td>
<td>{if (task.speculative) s"${task.attempt} (speculative)" else task.attempt.toString}</td>
<td>{task.status}</td>
<td>{task.taskLocality}</td>
<td>
<div style="float: left">{task.executorIdAndHost}</div>
<div style="float: right">
{
task.logs.map {
case (logName, logUrl) => <div><a href={logUrl}>{logName}</a></div>
}
}
</div>
</td>
<td>{UIUtils.formatDate(new Date(task.launchTime))}</td>
<td>{task.formatDuration}</td>
<td class={TaskDetailsClassNames.SCHEDULER_DELAY}>
{UIUtils.formatDuration(task.schedulerDelay)}
</td>
<td class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}>
{UIUtils.formatDuration(task.taskDeserializationTime)}
</td>
<td>
{if (task.gcTime > 0) UIUtils.formatDuration(task.gcTime) else ""}
</td>
<td class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}>
{UIUtils.formatDuration(task.serializationTime)}
</td>
<td class={TaskDetailsClassNames.GETTING_RESULT_TIME}>
{UIUtils.formatDuration(task.gettingResultTime)}
</td>
<td class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}>
{Utils.bytesToString(task.peakExecutionMemoryUsed)}
</td>
{if (task.accumulators.nonEmpty) {
<td>{Unparsed(task.accumulators.get)}</td>
}}
{if (task.input.nonEmpty) {
<td>{task.input.get.inputReadable}</td>
}}
{if (task.output.nonEmpty) {
<td>{task.output.get.outputReadable}</td>
}}
{if (task.shuffleRead.nonEmpty) {
<td class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}>
{task.shuffleRead.get.shuffleReadBlockedTimeReadable}
</td>
<td>{task.shuffleRead.get.shuffleReadReadable}</td>
<td class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}>
{task.shuffleRead.get.shuffleReadRemoteReadable}
</td>
}}
{if (task.shuffleWrite.nonEmpty) {
<td>{task.shuffleWrite.get.writeTimeReadable}</td>
<td>{task.shuffleWrite.get.shuffleWriteReadable}</td>
}}
{if (task.bytesSpilled.nonEmpty) {
<td>{task.bytesSpilled.get.memoryBytesSpilledReadable}</td>
<td>{task.bytesSpilled.get.diskBytesSpilledReadable}</td>
}}
{errorMessageCell(task.error)}
</tr>
}
private def errorMessageCell(error: String): Seq[Node] = {
val isMultiline = error.indexOf('\\n') >= 0
// Display the first line by default
val errorSummary = StringEscapeUtils.escapeHtml4(
if (isMultiline) {
error.substring(0, error.indexOf('\\n'))
} else {
error
})
val details = if (isMultiline) {
// scalastyle:off
<span onclick="this.parentNode.querySelector('.stacktrace-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span> ++
<div class="stacktrace-details collapsed">
<pre>{error}</pre>
</div>
// scalastyle:on
} else {
""
}
<td>{errorSummary}{details}</td>
}
}
| jianran/spark | core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala | Scala | apache-2.0 | 54,941 |
package ems
import unfiltered.request.{HttpRequest, Accepts}
object AcceptCollectionJson extends Accepts.Accepting {
val contentType = CollectionJsonResponse.contentType
val ext = "json"
override def unapply[T](r: HttpRequest[T]) = Accepts.Json.unapply(r) orElse super.unapply(r)
}
| javaBin/ems-redux | src/main/scala/ems/AcceptCollectionJson.scala | Scala | apache-2.0 | 292 |
package com.richard.test.spark.initialize
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
object SparkStreaming extends App {
val conf = new SparkConf().setAppName("UseScalaDealSocketStreaming");
val ssc = new StreamingContext(conf, Seconds(1));
val lines = ssc.socketTextStream("hadoop-master", 9999);
val words = lines.flatMap(_.split(" "));
val pairs = words.map(word => (word, 1));
val counts = pairs.reduceByKey(_+_);
counts.print();
ssc.start();
ssc.awaitTermination();
} | richard-yao/AllAlgorithmsImpl | study-java/src/main/java/com/richard/test/spark/initialize/SparkStreaming.scala | Scala | apache-2.0 | 577 |
/*
* Copyright (C) 2015-2015 Paulo Angelo Alves Resende <pa@pauloangelo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License Version 2 as
* published by the Free Software Foundation. You may not use, modify or
* distribute this program under any other version of the GNU General
* Public License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package org.hogzilla
/**
* @author pa
*/
package object dns {
} | pauloangelo/hogzilla | src/org/hogzilla/dns/package.scala | Scala | gpl-2.0 | 904 |
/*
* Copyright © 2016 Schlichtherle IT Services
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package global.namespace.neuron.di.scala.test
import java.util.Date
import global.namespace.neuron.di.scala.sample.FixedClockModule
import org.scalatest.matchers.should.Matchers._
import org.scalatest.wordspec.AnyWordSpec
class FixedClockModuleSpec extends AnyWordSpec {
"Make a fixed clock" in {
val clock = FixedClockModule.clock
clock should be theSameInstanceAs FixedClockModule.clock
clock.now should not be theSameInstanceAs(clock.now)
clock.now shouldBe new Date(0)
}
}
| christian-schlichtherle/neuron-di | core-scala/src/test/scala/global/namespace/neuron/di/scala/test/FixedClockModuleSpec.scala | Scala | apache-2.0 | 1,114 |
package lila.hub
import scala.concurrent.duration._
import scala.concurrent.Promise
import scala.util.Try
import akka.actor._
final class Sequencer(
receiveTimeout: Option[FiniteDuration],
executionTimeout: Option[FiniteDuration] = None,
logger: lila.log.Logger) extends Actor {
receiveTimeout.foreach(context.setReceiveTimeout)
private def idle: Receive = {
case msg =>
context become busy
processThenDone(msg)
}
private def busy: Receive = {
case Done => dequeue match {
case None => context become idle
case Some(work) => processThenDone(work)
}
case msg => queue enqueue msg
}
def receive = idle
private val queue = collection.mutable.Queue[Any]()
private def dequeue: Option[Any] = Try(queue.dequeue).toOption
private case object Done
private def processThenDone(work: Any) {
work match {
case ReceiveTimeout => self ! PoisonPill
case Sequencer.Work(run, promiseOption, timeoutOption) =>
val future = timeoutOption.orElse(executionTimeout).fold(run()) { timeout =>
run().withTimeout(
duration = timeout,
error = lila.common.LilaException(s"Sequencer timed out after $timeout")
)(context.system)
} andThenAnyway {
self ! Done
}
promiseOption foreach (_ completeWith future)
case x => logger.branch("Sequencer").warn(s"Unsupported message $x")
}
}
}
object Sequencer {
case class Work(
run: () => Funit,
promise: Option[Promise[Unit]] = None,
timeout: Option[FiniteDuration] = None)
def work(
run: => Funit,
promise: Option[Promise[Unit]] = None,
timeout: Option[FiniteDuration] = None): Work = Work(() => run, promise, timeout)
}
| clarkerubber/lila | modules/hub/src/main/Sequencer.scala | Scala | agpl-3.0 | 1,770 |
/*
* Copyright 2015 Avira Operations GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.avira.ds.sparser.samples
import com.avira.ds.MacroUtils
import com.avira.ds.sparser._
import scala.util.{Failure, Success, Try}
/** Nested object sample returned as output by [[SampleNestedObjectParser]] */
case class NestedObject(
a: String,
b: String,
c: Ratio)
/**
* @see [[NestedObject]]
*/
case class Ratio(
x: Int,
y: Int)
/** Sample parser implemented to show how to test a
* [[com.avira.ds.sparser.Parser]] implementation which outputs a nested
* object.
*
* Check the tests to see how this kind of parsers can be tested.
*
*/
class SampleNestedObjectParser extends Parser[String, NestedObject] {
import SampleNestedObjectParser._
override def parse(
inputResult: ParseResult[String, String]): ParseResult[String, NestedObject] = {
val columnsResult = inputResult.transform { line: String =>
// Split the TSV line.
val cols = line.split("\t")
cols match {
case Array(a, b, c) => TransformSuccess((a, b, c))
case Array(a, b, c, _*) => TransformWarning((a, b, c),
TooManyColumnsParseError(cols.length))
case _ => TransformFailure(
NotEnoughColumnsParseError(cols.length))
}
}
// Split the components of the [[Ratio]] object and parse the numbers.
columnsResult.transform { case (a, b, cRaw) =>
val ratioSplits = cRaw.split(":")
ratioSplits match {
case Array(xRaw, yRaw) => Try(Ratio(xRaw.toInt, yRaw.toInt)) match {
case Success(c) => TransformSuccess(NestedObject(a, b, c))
case Failure(e: NumberFormatException) => TransformFailure(InvalidNumbersParseError(e))
case Failure(e) => throw new RuntimeException(e)
}
}
}
}
}
/** Companion of [[SampleNestedObjectParser]] which defines
* [[com.avira.ds.sparser.ParseError]]s that be reported by the parser.
*/
object SampleNestedObjectParser {
sealed abstract class SampleNestedObjectParseError(
override val message: Option[String],
override val args: Seq[Any]) extends ParseError
case class TooManyColumnsParseError(colsCount: Int)
extends SampleNestedObjectParseError(Some("Too many columns"), Seq(colsCount))
case class NotEnoughColumnsParseError(colsCount: Int)
extends SampleNestedObjectParseError(Some("Insufficient columns"), Seq(colsCount))
case class InvalidNumbersParseError(e: NumberFormatException)
extends SampleNestedObjectParseError(
Some("At least one of the numbers in Ratio is invalid"), Seq(e))
/** Returns all [[ParseError]]s which could be reported by
* [[SampleNestedObjectParser]].
*/
def parseErrorClasses: Set[Class[_ <: ParseError]] =
MacroUtils.getSealedClassChildren[SampleNestedObjectParseError]
.asInstanceOf[Set[Class[_ <: ParseError]]]
}
| Avira/sparser | core/src/main/scala/com/avira/ds/sparser/samples/SampleNestedObjectParser.scala | Scala | apache-2.0 | 3,424 |
package mavenscala.railroad
/**
* Created by Administrator on 2016/4/23 0023.
*/
case class Edge(toStation: Station,distance: Int){
override def toString: String ={
val name=toStation.name
s"[$name,$distance]"
}
}
| wjingyao2008/homework-tran | myapp/src/main/scala/mavenscala/railroad/Edge.scala | Scala | apache-2.0 | 231 |
object Test {
val x1 = try { "aaa".asInstanceOf[Int] } catch { case _: Throwable => "cce1" }
val x2 = try { (5: Any).asInstanceOf[Int] } catch { case _: Throwable => "cce2" }
val x3 = try { (new java.lang.Short(100.toShort).asInstanceOf[Int]) } catch { case _: Throwable => "cce3" }
def main(args: Array[String]): Unit = {
List(x1, x2, x3) foreach println
}
}
| yusuke2255/dotty | tests/run/t4148.scala | Scala | bsd-3-clause | 375 |
package org.scalameter
package collections
package fast
import collection._
import Key._
class TraversableBenchmarks extends Bench.Regression with Collections {
def persistor = new persistence.SerializationPersistor()
/* traversable collections */
performance of "Traversable" in {
measure method "foreach" config (
exec.minWarmupRuns -> 150,
exec.maxWarmupRuns -> 450,
exec.benchRuns -> 36,
exec.independentSamples -> 3,
reports.regression.significance -> 1e-13,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 200000
val to = 1000000
val by = 200000
using(arrays(from, to, by)) curve("Array") in { xs =>
var sum = 0
xs.foreach(sum += _)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in { xs =>
var sum = 0
xs.foreach(sum += _)
}
using(vectors(from, to, by)) curve("Vector") in { xs =>
var sum = 0
xs.foreach(sum += _)
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 32,
exec.independentSamples -> 4,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 5
) in { xs =>
var sum = 0
xs.foreach(sum += _)
}
using(ranges(from, to, by)) curve("Range") in { xs =>
var sum = 0
xs.foreach(sum += _)
xs.foreach(sum += _)
}
}
measure method "reduce" config (
exec.minWarmupRuns -> 120,
exec.maxWarmupRuns -> 240,
exec.benchRuns -> 36,
exec.independentSamples -> 3,
reports.regression.significance -> 1e-13,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 100000
val to = 600000
val by = 150000
using(arrays(from, to, by)) curve("Array") config (
exec.minWarmupRuns -> 150,
exec.maxWarmupRuns -> 320
) in {
_.reduce(_ + _)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in {
_.reduce(_ + _)
}
using(vectors(from, to, by)) curve("Vector") in {
_.reduce(_ + _)
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 30,
exec.independentSamples -> 4,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 5
) in {
_.reduce(_ + _)
}
using(ranges(from, to, by)) curve("Range") in {
_.reduce(_ + _)
}
}
measure method "filter" config (
exec.minWarmupRuns -> 100,
exec.maxWarmupRuns -> 200,
exec.benchRuns -> 36,
exec.independentSamples -> 4,
reports.regression.significance -> 1e-13,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 100000
val to = 400000
val by = 100000
using(arrays(from, to, by)) curve("Array") in {
_.filter(_ % 2 == 0)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") config (
exec.minWarmupRuns -> 120,
exec.maxWarmupRuns -> 240,
exec.reinstantiation.frequency -> 4
) in {
_.filter(_ % 2 == 0)
}
using(vectors(from, to, by)) curve("Vector") in {
_.filter(_ % 2 == 0)
}
using(lists(from, to, by)) curve("List") config (
exec.minWarmupRuns -> 120,
exec.maxWarmupRuns -> 240,
exec.benchRuns -> 64,
exec.independentSamples -> 6,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 6
) in {
_.filter(_ % 2 == 0)
}
using(ranges(from, to, by)) curve("Range") in {
_.filter(_ % 2 == 0)
}
}
measure method "groupBy" config (
exec.minWarmupRuns -> 80,
exec.maxWarmupRuns -> 160,
exec.benchRuns -> 36,
exec.independentSamples -> 4,
reports.regression.significance -> 1e-13,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 50000
val to = 200000
val by = 50000
using(arrays(from, to, by)) curve("Array") in {
_.groupBy(_ % 10)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in {
_.groupBy(_ % 10)
}
using(vectors(from, to, by)) curve("Vector") in {
_.groupBy(_ % 10)
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 24,
exec.independentSamples -> 4,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 4,
exec.outliers.suspectPercent -> 50,
exec.outliers.covMultiplier -> 2.0
) in {
_.groupBy(_ % 10)
}
using(ranges(from, to, by)) curve("Range") in {
_.groupBy(_ % 10)
}
}
measure method "map" config (
exec.minWarmupRuns -> 100,
exec.maxWarmupRuns -> 200,
exec.benchRuns -> 36,
reports.regression.significance -> 1e-13,
exec.independentSamples -> 4,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 100000
val to = 400000
val by = 100000
using(arrays(from, to, by)) curve("Array") in {
_.map(_ * 2)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in {
_.map(_ * 2)
}
using(vectors(from, to, by)) curve("Vector") in {
_.map(_ * 2)
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 48,
exec.independentSamples -> 4,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 6,
exec.noise.magnitude -> 1.0
) in {
_.map(_ * 2)
}
using(ranges(from, to, by)) curve("Range") in {
_.map(_ * 2)
}
}
measure method "flatMap" config (
exec.minWarmupRuns -> 100,
exec.maxWarmupRuns -> 200,
exec.benchRuns -> 36,
reports.regression.significance -> 1e-13,
exec.independentSamples -> 4,
reports.regression.noiseMagnitude -> 0.2
) in {
val from = 50000
val to = 200000
val by = 50000
using(arrays(from, to, by)) curve("Array") in {
_.flatMap(x => 0 until 2)
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") config (
) in {
_.flatMap(x => 0 until 2)
}
using(vectors(from, to, by)) curve("Vector") config (
exec.minWarmupRuns -> 240,
exec.maxWarmupRuns -> 480
) in {
_.flatMap(x => 0 until 2)
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 64,
exec.independentSamples -> 10,
exec.reinstantiation.fullGC -> true,
exec.reinstantiation.frequency -> 6
) in {
_.flatMap(x => 0 until 2)
}
using(ranges(from, to, by)) curve("Range") in {
_.flatMap(x => 0 until 2)
}
}
}
}
| kjanosz/scalameter | src/test/scala/org/scalameter/collections/fast/TraversableBenchmarks.scala | Scala | bsd-3-clause | 6,933 |
package org.jetbrains.plugins.scala
package compiler
import java.io.{BufferedReader, File, InputStreamReader, Reader}
import java.util.concurrent.Future
import com.intellij.execution.TaskExecutor
import com.intellij.execution.process._
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.io.FileUtil
import com.intellij.util.Consumer
import com.intellij.util.io.BaseDataReader
import _root_.scala.collection.JavaConverters._
/**
* User: Dmitry Naydanov
* Date: 2/11/14
*/
class NonServerRunner(project: Project, errorHandler: Option[ErrorHandler] = None) {
private val SERVER_CLASS_NAME = "org.jetbrains.jps.incremental.scala.remote.Main"
private def classPath(jdk: JDK) = (jdk.tools +: CompileServerLauncher.compilerJars).map(
file => FileUtil toCanonicalPath file.getPath).mkString(File.pathSeparator)
private val jvmParameters = CompileServerLauncher.jvmParameters
def buildProcess(args: Seq[String], listener: String => Unit): CompilationProcess = {
CompileServerLauncher.compilerJars.foreach(p => assert(p.exists(), p.getPath))
CompileServerLauncher.compileServerJdk(project) match {
case None =>
null
case Some(jdk) =>
val commands = ((FileUtil toCanonicalPath jdk.executable.getPath) +: "-cp" +: classPath(jdk) +: jvmParameters :+
SERVER_CLASS_NAME).++(args)
val builder = new ProcessBuilder(commands.asJava)
new CompilationProcess {
var myProcess: Option[Process] = None
var myCallbacks: Seq[() => Unit] = Seq.empty
override def addTerminationCallback(callback: => Unit) {
myCallbacks = myCallbacks :+ (() => callback)
}
override def run() {
val p = builder.start()
myProcess = Some(p)
val reader = new BufferedReader(new InputStreamReader(p.getInputStream))
new MyBase64StreamReader(reader, listener)
val processWaitFor = new ProcessWaitFor(p, new TaskExecutor {
override def executeTask(task: Runnable): Future[_] = BaseOSProcessHandler.ExecutorServiceHolder.submit(task)
})
processWaitFor.setTerminationCallback(new Consumer[Integer] {
override def consume(t: Integer) {
myCallbacks.foreach(c => c())
}
})
}
override def stop() {
myProcess foreach (_.destroy())
myProcess = None
}
}
}
}
private def error(message: String) {
errorHandler.foreach(_.error(message))
}
private class MyBase64StreamReader(private val reader: Reader, listener: String => Unit) extends BaseDataReader(null) {
start(project.getName)
private val charBuffer = new Array[Char](8192)
private val text = new StringBuilder
def executeOnPooledThread(runnable: Runnable): Future[_] =
BaseOSProcessHandler.ExecutorServiceHolder.submit(runnable)
def onTextAvailable(text: String) {
try {
listener(text)
}
catch {
case _: Exception =>
}
}
override def close() {
reader.close()
}
override def readAvailable(): Boolean = {
var read = false
while (reader.ready()) {
val n = reader.read(charBuffer)
if (n > 0) {
read = true
for (i <- 0 until n) {
charBuffer(i) match {
case '=' if i == 0 && text.isEmpty =>
case '=' if i == n - 1 || charBuffer.charAt(i + 1) != '=' =>
if ( (text.length +1) % 4 == 0 ) text.append('=') else if ( (text.length + 2) % 4 == 0 ) text.append("==")
onTextAvailable(text.toString())
text.clear()
case '\n' if text.nonEmpty && text.startsWith("Listening") =>
text.clear()
case c => text.append(c)
}
}
}
}
read
}
}
}
trait ErrorHandler {
def error(message: String): Unit
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/compiler/NonServerRunner.scala | Scala | apache-2.0 | 4,064 |
package slick.driver
import scala.language.{existentials, higherKinds}
import java.sql.{PreparedStatement, Statement}
import scala.collection.mutable.Builder
import scala.concurrent.Future
import scala.util.Try
import scala.util.control.NonFatal
import slick.SlickException
import slick.dbio._
import slick.ast._
import slick.ast.Util._
import slick.ast.TypeUtil.:@
import slick.backend.DatabaseComponent
import slick.jdbc._
import slick.lifted.{CompiledStreamingExecutable, Query, FlatShapeLevel, Shape}
import slick.profile.{FixedSqlStreamingAction, FixedSqlAction, SqlActionComponent}
import slick.relational.{ResultConverter, CompiledMapping}
import slick.util.{CloseableIterator, DumpInfo, SQLBuilder, ignoreFollowOnError}
trait JdbcActionComponent extends SqlActionComponent { driver: JdbcDriver =>
type DriverAction[+R, +S <: NoStream, -E <: Effect] = FixedSqlAction[R, S, E]
type StreamingDriverAction[+R, +T, -E <: Effect] = FixedSqlStreamingAction[R, T, E]
abstract class SimpleJdbcDriverAction[+R](_name: String, val statements: Vector[String]) extends SynchronousDatabaseAction[R, NoStream, Backend, Effect] with DriverAction[R, NoStream, Effect] { self =>
def run(ctx: Backend#Context, sql: Vector[String]): R
final override def getDumpInfo = super.getDumpInfo.copy(name = _name)
final def run(ctx: Backend#Context): R = run(ctx, statements)
final def overrideStatements(_statements: Iterable[String]): DriverAction[R, NoStream, Effect] = new SimpleJdbcDriverAction[R](_name, _statements.toVector) {
def run(ctx: Backend#Context, sql: Vector[String]): R = self.run(ctx, statements)
}
}
protected object StartTransaction extends SynchronousDatabaseAction[Unit, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Unit = {
ctx.pin
ctx.session.startInTransaction
}
def getDumpInfo = DumpInfo(name = "StartTransaction")
}
protected object Commit extends SynchronousDatabaseAction[Unit, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Unit =
try ctx.session.endInTransaction(ctx.session.conn.commit()) finally ctx.unpin
def getDumpInfo = DumpInfo(name = "Commit")
}
protected object Rollback extends SynchronousDatabaseAction[Unit, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Unit =
try ctx.session.endInTransaction(ctx.session.conn.rollback()) finally ctx.unpin
def getDumpInfo = DumpInfo(name = "Rollback")
}
protected class PushStatementParameters(p: JdbcBackend.StatementParameters) extends SynchronousDatabaseAction[Unit, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Unit = ctx.pushStatementParameters(p)
def getDumpInfo = DumpInfo(name = "PushStatementParameters", mainInfo = p.toString)
}
protected object PopStatementParameters extends SynchronousDatabaseAction[Unit, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Unit = ctx.popStatementParameters
def getDumpInfo = DumpInfo(name = "PopStatementParameters")
}
protected class SetTransactionIsolation(ti: Int) extends SynchronousDatabaseAction[Int, NoStream, Backend, Effect] {
def run(ctx: Backend#Context): Int = {
val c = ctx.session.conn
val old = c.getTransactionIsolation
c.setTransactionIsolation(ti)
old
}
def getDumpInfo = DumpInfo(name = "SetTransactionIsolation")
}
class JdbcActionExtensionMethods[E <: Effect, R, S <: NoStream](a: DBIOAction[R, S, E]) {
/** Run this Action transactionally. This does not guarantee failures to be atomic in the
* presence of error handling combinators. If multiple `transactionally` combinators are
* nested, only the outermost one will be backed by an actual database transaction. Depending
* on the outcome of running the Action it surrounds, the transaction is committed if the
* wrapped Action succeeds, or rolled back if the wrapped Action fails. When called on a
* [[slick.dbio.SynchronousDatabaseAction]], this combinator gets fused into the
* action. */
def transactionally: DBIOAction[R, S, E with Effect.Transactional] = SynchronousDatabaseAction.fuseUnsafe(
StartTransaction.andThen(a).cleanUp(eo => if(eo.isEmpty) Commit else Rollback)(DBIO.sameThreadExecutionContext)
.asInstanceOf[DBIOAction[R, S, E with Effect.Transactional]]
)
/** Run this Action with the specified transaction isolation level. This should be used around
* the outermost `transactionally` Action. The semantics of using it inside a transaction are
* database-dependent. It does not create a transaction by itself but it pins the session. */
def withTransactionIsolation(ti: TransactionIsolation): DBIOAction[R, S, E] = {
val isolated =
(new SetTransactionIsolation(ti.intValue)).flatMap(old => a.andFinally(new SetTransactionIsolation(old)))(DBIO.sameThreadExecutionContext)
val fused =
if(a.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) SynchronousDatabaseAction.fuseUnsafe(isolated)
else isolated
fused.withPinnedSession
}
/** Run this Action with the given statement parameters. Any unset parameter will use the
* current value. The following parameters can be set:
*
* @param rsType The JDBC `ResultSetType`
* @param rsConcurrency The JDBC `ResultSetConcurrency`
* @param rsHoldability The JDBC `ResultSetHoldability`
* @param statementInit A function which is run on every `Statement` or `PreparedStatement`
* directly after creating it. This can be used to set additional
* statement parameters (e.g. `setQueryTimeout`). When multuple
* `withStatementParameters` Actions are nested, all init functions
* are run, starting with the outermost one.
* @param fetchSize The fetch size for all statements or 0 for the default. */
def withStatementParameters(rsType: ResultSetType = null,
rsConcurrency: ResultSetConcurrency = null,
rsHoldability: ResultSetHoldability = null,
statementInit: Statement => Unit = null,
fetchSize: Int = 0): DBIOAction[R, S, E] =
(new PushStatementParameters(JdbcBackend.StatementParameters(rsType, rsConcurrency, rsHoldability, statementInit, fetchSize))).
andThen(a).andFinally(PopStatementParameters)
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Query Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type QueryActionExtensionMethods[R, S <: NoStream] = QueryActionExtensionMethodsImpl[R, S]
type StreamingQueryActionExtensionMethods[R, T] = StreamingQueryActionExtensionMethodsImpl[R, T]
def createQueryActionExtensionMethods[R, S <: NoStream](tree: Node, param: Any): QueryActionExtensionMethods[R, S] =
new QueryActionExtensionMethods[R, S](tree, param)
def createStreamingQueryActionExtensionMethods[R, T](tree: Node, param: Any): StreamingQueryActionExtensionMethods[R, T] =
new StreamingQueryActionExtensionMethods[R, T](tree, param)
class MutatingResultAction[T](rsm: ResultSetMapping, elemType: Type, collectionType: CollectionType, sql: String, param: Any, sendEndMarker: Boolean) extends SynchronousDatabaseAction[Nothing, Streaming[ResultSetMutator[T]], Backend, Effect] with DriverAction[Nothing, Streaming[ResultSetMutator[T]], Effect] { streamingAction =>
class Mutator(val prit: PositionedResultIterator[T], val bufferNext: Boolean, val inv: QueryInvokerImpl[T]) extends ResultSetMutator[T] {
val pr = prit.pr
val rs = pr.rs
var current: T = _
/** The state of the stream. 0 = in result set, 1 = before end marker, 2 = after end marker. */
var state = 0
def row = if(state > 0) throw new SlickException("After end of result set") else current
def row_=(value: T): Unit = {
if(state > 0) throw new SlickException("After end of result set")
pr.restart
inv.updateRowValues(pr, value)
rs.updateRow()
}
def += (value: T): Unit = {
rs.moveToInsertRow()
pr.restart
inv.updateRowValues(pr, value)
rs.insertRow()
if(state == 0) rs.moveToCurrentRow()
}
def delete: Unit = {
if(state > 0) throw new SlickException("After end of result set")
rs.deleteRow()
if(invokerPreviousAfterDelete) rs.previous()
}
def emitStream(ctx: Backend#StreamingContext, limit: Long): this.type = {
var count = 0L
try {
while(count < limit && state == 0) {
if(!pr.nextRow) state = if(sendEndMarker) 1 else 2
if(state == 0) {
current = inv.extractValue(pr)
count += 1
ctx.emit(this)
}
}
if(count < limit && state == 1) {
ctx.emit(this)
state = 2
}
} catch {
case NonFatal(ex) =>
try prit.close() catch ignoreFollowOnError
throw ex
}
if(state < 2) this else null
}
def end = if(state > 1) throw new SlickException("After end of result set") else state > 0
override def toString = s"Mutator(state = $state, current = $current)"
}
type StreamState = Mutator
def statements = List(sql)
def run(ctx: Backend#Context) =
throw new SlickException("The result of .mutate can only be used in a streaming way")
override def emitStream(ctx: Backend#StreamingContext, limit: Long, state: StreamState): StreamState = {
val mu = if(state ne null) state else {
val inv = createQueryInvoker[T](rsm, param, sql)
new Mutator(
inv.results(0, defaultConcurrency = invokerMutateConcurrency, defaultType = invokerMutateType)(ctx.session).right.get,
ctx.bufferNext,
inv)
}
mu.emitStream(ctx, limit)
}
override def cancelStream(ctx: Backend#StreamingContext, state: StreamState): Unit = state.prit.close()
override def getDumpInfo = super.getDumpInfo.copy(name = "mutate")
def overrideStatements(_statements: Iterable[String]): MutatingResultAction[T] =
new MutatingResultAction[T](rsm, elemType, collectionType, _statements.head, param, sendEndMarker)
}
class QueryActionExtensionMethodsImpl[R, S <: NoStream](tree: Node, param: Any) extends super.QueryActionExtensionMethodsImpl[R, S] {
def result: DriverAction[R, S, Effect.Read] = {
def findSql(n: Node): String = n match {
case c: CompiledStatement => c.extra.asInstanceOf[SQLBuilder.Result].sql
case ParameterSwitch(cases, default) =>
findSql(cases.find { case (f, n) => f(param) }.map(_._2).getOrElse(default))
}
(tree match {
case (rsm @ ResultSetMapping(_, compiled, CompiledMapping(_, elemType))) :@ (ct: CollectionType) =>
val sql = findSql(compiled)
new StreamingInvokerAction[R, Any, Effect] { streamingAction =>
protected[this] def createInvoker(sql: Iterable[String]) = createQueryInvoker(rsm, param, sql.head)
protected[this] def createBuilder = ct.cons.createBuilder(ct.elementType.classTag).asInstanceOf[Builder[Any, R]]
def statements = List(sql)
override def getDumpInfo = super.getDumpInfo.copy(name = "result")
}
case First(rsm @ ResultSetMapping(_, compiled, _)) =>
val sql = findSql(compiled)
new SimpleJdbcDriverAction[R]("result", Vector(sql)) {
def run(ctx: Backend#Context, sql: Vector[String]): R =
createQueryInvoker[R](rsm, param, sql.head).first(ctx.session)
}
}).asInstanceOf[DriverAction[R, S, Effect.Read]]
}
}
class StreamingQueryActionExtensionMethodsImpl[R, T](tree: Node, param: Any) extends QueryActionExtensionMethodsImpl[R, Streaming[T]](tree, param) with super.StreamingQueryActionExtensionMethodsImpl[R, T] {
override def result: StreamingDriverAction[R, T, Effect.Read] = super.result.asInstanceOf[StreamingDriverAction[R, T, Effect.Read]]
/** Same as `mutate(sendEndMarker = false)`. */
def mutate: DriverAction[Nothing, Streaming[ResultSetMutator[T]], Effect.Read with Effect.Write] = mutate(false)
/** Create an Action that can be streamed in order to modify a mutable result set. All stream
* elements will be the same [[slick.jdbc.ResultSetMutator]] object but it is in a different state each
* time. Thre resulting stream is always non-buffered and events can be processed either
* synchronously or asynchronously (but all processing must happen in sequence).
*
* @param sendEndMarker If set to true, an extra event is sent after the end of the result
* set, poviding you with a chance to insert additional rows after
* seeing all results. Only `end` (to check for this special event) and
* `insert` may be called in the ResultSetMutator in this case. */
def mutate(sendEndMarker: Boolean = false): DriverAction[Nothing, Streaming[ResultSetMutator[T]], Effect.Read with Effect.Write] = {
val sql = tree.findNode(_.isInstanceOf[CompiledStatement]).get
.asInstanceOf[CompiledStatement].extra.asInstanceOf[SQLBuilder.Result].sql
val (rsm @ ResultSetMapping(_, _, CompiledMapping(_, elemType))) :@ (ct: CollectionType) = tree
new MutatingResultAction[T](rsm, elemType, ct, sql, param, sendEndMarker)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Delete Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type DeleteActionExtensionMethods = DeleteActionExtensionMethodsImpl
def createDeleteActionExtensionMethods(tree: Node, param: Any): DeleteActionExtensionMethods =
new DeleteActionExtensionMethods(tree, param)
class DeleteActionExtensionMethodsImpl(tree: Node, param: Any) {
/** An Action that deletes the data selected by this query. */
def delete: DriverAction[Int, NoStream, Effect.Write] = {
val ResultSetMapping(_, CompiledStatement(_, sres: SQLBuilder.Result, _), _) = tree
new SimpleJdbcDriverAction[Int]("delete", Vector(sres.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]): Int = ctx.session.withPreparedStatement(sql.head) { st =>
sres.setter(st, 1, param)
st.executeUpdate
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Schema Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type SchemaActionExtensionMethods = SchemaActionExtensionMethodsImpl
def createSchemaActionExtensionMethods(schema: SchemaDescription): SchemaActionExtensionMethods =
new SchemaActionExtensionMethodsImpl(schema)
class SchemaActionExtensionMethodsImpl(schema: SchemaDescription) extends super.SchemaActionExtensionMethodsImpl {
def create: DriverAction[Unit, NoStream, Effect.Schema] = new SimpleJdbcDriverAction[Unit]("schema.create", schema.createStatements.toVector) {
def run(ctx: Backend#Context, sql: Vector[String]): Unit =
for(s <- sql) ctx.session.withPreparedStatement(s)(_.execute)
}
def drop: DriverAction[Unit, NoStream, Effect.Schema] = new SimpleJdbcDriverAction[Unit]("schema.drop", schema.dropStatements.toVector) {
def run(ctx: Backend#Context, sql: Vector[String]): Unit =
for(s <- sql) ctx.session.withPreparedStatement(s)(_.execute)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Update Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type UpdateActionExtensionMethods[T] = UpdateActionExtensionMethodsImpl[T]
def createUpdateActionExtensionMethods[T](tree: Node, param: Any): UpdateActionExtensionMethods[T] =
new UpdateActionExtensionMethodsImpl[T](tree, param)
class UpdateActionExtensionMethodsImpl[T](tree: Node, param: Any) {
protected[this] val ResultSetMapping(_,
CompiledStatement(_, sres: SQLBuilder.Result, _),
CompiledMapping(_converter, _)) = tree
protected[this] val converter = _converter.asInstanceOf[ResultConverter[JdbcResultConverterDomain, T]]
/** An Action that updates the data selected by this query. */
def update(value: T): DriverAction[Int, NoStream, Effect.Write] = {
new SimpleJdbcDriverAction[Int]("update", Vector(sres.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]): Int = ctx.session.withPreparedStatement(sql.head) { st =>
st.clearParameters
converter.set(value, st)
sres.setter(st, converter.width+1, param)
st.executeUpdate
}
}
}
/** Get the statement usd by `update` */
def updateStatement: String = sres.sql
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Insert Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type InsertActionExtensionMethods[T] = CountingInsertActionComposer[T]
def createInsertActionExtensionMethods[T](compiled: CompiledInsert): InsertActionExtensionMethods[T] =
new CountingInsertActionComposerImpl[T](compiled)
def createReturningInsertActionComposer[U, QR, RU](compiled: CompiledInsert, keys: Node, mux: (U, QR) => RU): ReturningInsertActionComposer[U, RU] =
new ReturningInsertActionComposerImpl[U, QR, RU](compiled, keys, mux)
protected lazy val useServerSideUpsert = capabilities contains JdbcProfile.capabilities.insertOrUpdate
protected lazy val useTransactionForUpsert = !useServerSideUpsert
protected lazy val useServerSideUpsertReturning = useServerSideUpsert
protected lazy val useTransactionForUpsertReturning = !useServerSideUpsertReturning
//////////////////////////////////////////////////////////// InsertActionComposer Traits
/** Extension methods to generate the JDBC-specific insert actions. */
trait SimpleInsertActionComposer[U] extends super.InsertActionExtensionMethodsImpl[U] {
/** The return type for `insertOrUpdate` operations */
type SingleInsertOrUpdateResult
/** Get the SQL statement for a standard (soft) insert */
def insertStatement: String
/** Get the SQL statement for a forced insert */
def forceInsertStatement: String
/** Insert a single row, skipping AutoInc columns. */
def += (value: U): DriverAction[SingleInsertResult, NoStream, Effect.Write]
/** Insert a single row, including AutoInc columns. This is not supported
* by all database engines (see
* [[slick.driver.JdbcProfile.capabilities.forceInsert]]). */
def forceInsert(value: U): DriverAction[SingleInsertResult, NoStream, Effect.Write]
/** Insert multiple rows, skipping AutoInc columns.
* Uses JDBC's batch update feature if supported by the JDBC driver.
* Returns Some(rowsAffected), or None if the database returned no row
* count for some part of the batch. If any part of the batch fails, an
* exception is thrown. */
def ++= (values: Iterable[U]): DriverAction[MultiInsertResult, NoStream, Effect.Write]
/** Insert multiple rows, including AutoInc columns.
* This is not supported by all database engines (see
* [[slick.driver.JdbcProfile.capabilities.forceInsert]]).
* Uses JDBC's batch update feature if supported by the JDBC driver.
* Returns Some(rowsAffected), or None if the database returned no row
* count for some part of the batch. If any part of the batch fails, an
* exception is thrown. */
def forceInsertAll(values: Iterable[U]): DriverAction[MultiInsertResult, NoStream, Effect.Write]
/** Insert a single row if its primary key does not exist in the table,
* otherwise update the existing record. */
def insertOrUpdate(value: U): DriverAction[SingleInsertOrUpdateResult, NoStream, Effect.Write]
}
/** Extension methods to generate the JDBC-specific insert actions. */
trait InsertActionComposer[U] extends SimpleInsertActionComposer[U] {
/** The result type of operations that insert data produced by another query */
type QueryInsertResult
/** Get the SQL statement for inserting a single row from a scalar expression */
def forceInsertStatementFor[TT](c: TT)(implicit shape: Shape[_ <: FlatShapeLevel, TT, U, _]): String
/** Get the SQL statement for inserting data produced by another query */
def forceInsertStatementFor[TT, C[_]](query: Query[TT, U, C]): String
/** Get the SQL statement for inserting data produced by another query */
def forceInsertStatementFor[TT, C[_]](compiledQuery: CompiledStreamingExecutable[Query[TT, U, C], _, _]): String
/** Insert a single row from a scalar expression */
def forceInsertExpr[TT](c: TT)(implicit shape: Shape[_ <: FlatShapeLevel, TT, U, _]): DriverAction[QueryInsertResult, NoStream, Effect.Write]
/** Insert data produced by another query */
def forceInsertQuery[TT, C[_]](query: Query[TT, U, C]): DriverAction[QueryInsertResult, NoStream, Effect.Write]
/** Insert data produced by another query */
def forceInsertQuery[TT, C[_]](compiledQuery: CompiledStreamingExecutable[Query[TT, U, C], _, _]): DriverAction[QueryInsertResult, NoStream, Effect.Write]
}
/** An InsertInvoker that returns the number of affected rows. */
trait CountingInsertActionComposer[U] extends InsertActionComposer[U] {
type SingleInsertResult = Int
type MultiInsertResult = Option[Int]
type SingleInsertOrUpdateResult = Int
type QueryInsertResult = Int
/** Add a mapping from the inserted values and the generated key to compute a new return value. */
def returning[RT, RU, C[_]](value: Query[RT, RU, C]): ReturningInsertActionComposer[U, RU]
}
/** An InsertActionComposer that returns generated keys or other columns. */
trait ReturningInsertActionComposer[U, RU] extends InsertActionComposer[U] with IntoInsertActionComposer[U, RU] { self =>
/** Specifies a mapping from inserted values and generated keys to a desired value.
* @param f Function that maps inserted values and generated keys to a desired value.
* @tparam R target type of the mapping */
def into[R](f: (U, RU) => R): IntoInsertActionComposer[U, R]
}
/** An InsertActionComposer that returns a mapping of the inserted and generated data. */
trait IntoInsertActionComposer[U, RU] extends SimpleInsertActionComposer[U] { self =>
type SingleInsertResult = RU
type MultiInsertResult = Seq[RU]
type SingleInsertOrUpdateResult = Option[RU]
type QueryInsertResult = Seq[RU]
}
//////////////////////////////////////////////////////////// InsertActionComposer Implementations
protected abstract class InsertActionComposerImpl[U](val compiled: CompiledInsert) extends InsertActionComposer[U] {
protected[this] def buildQueryBasedInsert[TT, C[_]](query: Query[TT, U, C]): SQLBuilder.Result =
compiled.forceInsert.ibr.buildInsert(queryCompiler.run(query.toNode).tree)
protected[this] def buildQueryBasedInsert[TT, C[_]](compiledQuery: CompiledStreamingExecutable[Query[TT, U, C], _, _]): SQLBuilder.Result =
compiled.forceInsert.ibr.buildInsert(compiledQuery.compiledQuery)
def insertStatement = compiled.standardInsert.sql
def forceInsertStatement = compiled.forceInsert.sql
def += (value: U): DriverAction[SingleInsertResult, NoStream, Effect.Write] =
new SingleInsertAction(compiled.standardInsert, value)
def forceInsert(value: U): DriverAction[SingleInsertResult, NoStream, Effect.Write] =
new SingleInsertAction(compiled.forceInsert, value)
def ++= (values: Iterable[U]): DriverAction[MultiInsertResult, NoStream, Effect.Write] =
new MultiInsertAction(compiled.standardInsert, values)
def forceInsertAll(values: Iterable[U]): DriverAction[MultiInsertResult, NoStream, Effect.Write] =
new MultiInsertAction(compiled.forceInsert, values)
def insertOrUpdate(value: U): DriverAction[SingleInsertOrUpdateResult, NoStream, Effect.Write] =
new InsertOrUpdateAction(value)
def forceInsertStatementFor[TT](c: TT)(implicit shape: Shape[_ <: FlatShapeLevel, TT, U, _]) =
buildQueryBasedInsert(Query(c)(shape)).sql
def forceInsertStatementFor[TT, C[_]](query: Query[TT, U, C]) =
buildQueryBasedInsert(query).sql
def forceInsertStatementFor[TT, C[_]](compiledQuery: CompiledStreamingExecutable[Query[TT, U, C], _, _]) =
buildQueryBasedInsert(compiledQuery).sql
def forceInsertExpr[TT](c: TT)(implicit shape: Shape[_ <: FlatShapeLevel, TT, U, _]): DriverAction[QueryInsertResult, NoStream, Effect.Write] =
new InsertQueryAction(buildQueryBasedInsert((Query(c)(shape))), null)
def forceInsertQuery[TT, C[_]](query: Query[TT, U, C]): DriverAction[QueryInsertResult, NoStream, Effect.Write] =
new InsertQueryAction(buildQueryBasedInsert(query), null)
def forceInsertQuery[TT, C[_]](compiledQuery: CompiledStreamingExecutable[Query[TT, U, C], _, _]): DriverAction[QueryInsertResult, NoStream, Effect.Write] =
new InsertQueryAction(buildQueryBasedInsert(compiledQuery), compiledQuery.param)
protected def useServerSideUpsert = driver.useServerSideUpsert
protected def useTransactionForUpsert = driver.useTransactionForUpsert
protected def useBatchUpdates(implicit session: Backend#Session) = session.capabilities.supportsBatchUpdates
protected def retOne(st: Statement, value: U, updateCount: Int): SingleInsertResult
protected def retMany(values: Iterable[U], individual: Seq[SingleInsertResult]): MultiInsertResult
protected def retManyBatch(st: Statement, values: Iterable[U], updateCounts: Array[Int]): MultiInsertResult
protected def retOneInsertOrUpdate(st: Statement, value: U, updateCount: Int): SingleInsertOrUpdateResult
protected def retOneInsertOrUpdateFromInsert(st: Statement, value: U, updateCount: Int): SingleInsertOrUpdateResult
protected def retOneInsertOrUpdateFromUpdate: SingleInsertOrUpdateResult
protected def retQuery(st: Statement, updateCount: Int): QueryInsertResult
protected def preparedInsert[T](sql: String, session: Backend#Session)(f: PreparedStatement => T) =
session.withPreparedStatement(sql)(f)
protected def preparedOther[T](sql: String, session: Backend#Session)(f: PreparedStatement => T) =
session.withPreparedStatement(sql)(f)
class SingleInsertAction(a: compiled.Artifacts, value: U) extends SimpleJdbcDriverAction[SingleInsertResult]("SingleInsertAction", Vector(a.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]) = preparedInsert(a.sql, ctx.session) { st =>
st.clearParameters()
a.converter.set(value, st)
val count = st.executeUpdate()
retOne(st, value, count)
}
}
class MultiInsertAction(a: compiled.Artifacts, values: Iterable[U]) extends SimpleJdbcDriverAction[MultiInsertResult]("MultiInsertAction", Vector(a.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]) = {
val sql1 = sql.head
if(!useBatchUpdates(ctx.session) || (values.isInstanceOf[IndexedSeq[_]] && values.asInstanceOf[IndexedSeq[_]].length < 2))
retMany(values, values.map { v =>
preparedInsert(sql1, ctx.session) { st =>
st.clearParameters()
a.converter.set(v, st)
retOne(st, v, st.executeUpdate())
}
}(collection.breakOut): Vector[SingleInsertResult])
else preparedInsert(a.sql, ctx.session) { st =>
st.clearParameters()
for(value <- values) {
a.converter.set(value, st)
st.addBatch()
}
val counts = st.executeBatch()
retManyBatch(st, values, counts)
}
}
}
class InsertOrUpdateAction(value: U) extends SimpleJdbcDriverAction[SingleInsertOrUpdateResult]("InsertOrUpdateAction",
if(useServerSideUpsert) Vector(compiled.upsert.sql) else Vector(compiled.checkInsert.sql, compiled.updateInsert.sql, compiled.standardInsert.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]) = {
def f: SingleInsertOrUpdateResult =
if(useServerSideUpsert) nativeUpsert(value, sql.head)(ctx.session) else emulate(value, sql(0), sql(1), sql(2))(ctx.session)
if(useTransactionForUpsert) ctx.session.withTransaction(f) else f
}
protected def nativeUpsert(value: U, sql: String)(implicit session: Backend#Session): SingleInsertOrUpdateResult =
preparedInsert(sql, session) { st =>
st.clearParameters()
compiled.upsert.converter.set(value, st)
val count = st.executeUpdate()
retOneInsertOrUpdate(st, value, count)
}
protected def emulate(value: U, checkSql: String, updateSql: String, insertSql: String)(implicit session: Backend#Session): SingleInsertOrUpdateResult = {
val found = preparedOther(checkSql, session) { st =>
st.clearParameters()
compiled.checkInsert.converter.set(value, st)
val rs = st.executeQuery()
try rs.next() finally rs.close()
}
if(found) preparedOther(updateSql, session) { st =>
st.clearParameters()
compiled.updateInsert.converter.set(value, st)
st.executeUpdate()
retOneInsertOrUpdateFromUpdate
} else preparedInsert(insertSql, session) { st =>
st.clearParameters()
compiled.standardInsert.converter.set(value, st)
val count = st.executeUpdate()
retOneInsertOrUpdateFromInsert(st, value, count)
}
}
}
class InsertQueryAction(sbr: SQLBuilder.Result, param: Any) extends SimpleJdbcDriverAction[QueryInsertResult]("InsertQueryAction", Vector(sbr.sql)) {
def run(ctx: Backend#Context, sql: Vector[String]) = preparedInsert(sql.head, ctx.session) { st =>
st.clearParameters()
sbr.setter(st, 1, param)
retQuery(st, st.executeUpdate())
}
}
}
protected class CountingInsertActionComposerImpl[U](compiled: CompiledInsert) extends InsertActionComposerImpl[U](compiled) with CountingInsertActionComposer[U] {
def returning[RT, RU, C[_]](value: Query[RT, RU, C]): ReturningInsertActionComposer[U, RU] =
createReturningInsertActionComposer[U, RU, RU](compiled, value.toNode, (_, r) => r)
protected def retOne(st: Statement, value: U, updateCount: Int) = updateCount
protected def retOneInsertOrUpdate(st: Statement, value: U, updateCount: Int) = 1
protected def retOneInsertOrUpdateFromInsert(st: Statement, value: U, updateCount: Int) = 1
protected def retOneInsertOrUpdateFromUpdate = 1
protected def retQuery(st: Statement, updateCount: Int) = updateCount
protected def retMany(values: Iterable[U], individual: Seq[SingleInsertResult]) = Some(individual.sum)
protected def retManyBatch(st: Statement, values: Iterable[U], updateCounts: Array[Int]) = {
var unknown = false
var count = 0
for((res, idx) <- updateCounts.zipWithIndex) res match {
case Statement.SUCCESS_NO_INFO => unknown = true
case Statement.EXECUTE_FAILED => throw new SlickException("Failed to insert row #" + (idx+1))
case i => count += i
}
if(unknown) None else Some(count)
}
}
protected class ReturningInsertActionComposerImpl[U, QR, RU](compiled: CompiledInsert, val keys: Node, val mux: (U, QR) => RU) extends InsertActionComposerImpl[U](compiled) with ReturningInsertActionComposer[U, RU] {
def into[R](f: (U, RU) => R): IntoInsertActionComposer[U, R] =
createReturningInsertActionComposer[U, QR, R](compiled, keys, (v, r) => f(v, mux(v, r)))
override protected def useServerSideUpsert = driver.useServerSideUpsertReturning
override protected def useTransactionForUpsert = driver.useTransactionForUpsertReturning
protected def checkInsertOrUpdateKeys: Unit =
if(keyReturnOther) throw new SlickException("Only a single AutoInc column may be returned from an insertOrUpdate call")
protected def buildKeysResult(st: Statement): Invoker[QR] =
ResultSetInvoker[QR](_ => st.getGeneratedKeys)(pr => keyConverter.read(pr.rs).asInstanceOf[QR])
// Returning keys from batch inserts is generally not supported
override protected def useBatchUpdates(implicit session: Backend#Session) = false
protected lazy val (keyColumns, keyConverter, keyReturnOther) = compiled.buildReturnColumns(keys)
override protected def preparedInsert[T](sql: String, session: Backend#Session)(f: PreparedStatement => T) =
session.withPreparedInsertStatement(sql, keyColumns.toArray)(f)
protected def retOne(st: Statement, value: U, updateCount: Int) = mux(value, buildKeysResult(st).first(null))
protected def retMany(values: Iterable[U], individual: Seq[SingleInsertResult]) = individual
protected def retManyBatch(st: Statement, values: Iterable[U], updateCounts: Array[Int]) =
(values, buildKeysResult(st).buildColl[Vector](null, implicitly)).zipped.map(mux)(collection.breakOut)
protected def retQuery(st: Statement, updateCount: Int) =
buildKeysResult(st).buildColl[Vector](null, implicitly).asInstanceOf[QueryInsertResult] // Not used with "into"
protected def retOneInsertOrUpdate(st: Statement, value: U, updateCount: Int): SingleInsertOrUpdateResult =
if(updateCount != 1) None else buildKeysResult(st).firstOption(null).map(r => mux(value, r))
protected def retOneInsertOrUpdateFromInsert(st: Statement, value: U, updateCount: Int): SingleInsertOrUpdateResult =
Some(mux(value, buildKeysResult(st).first(null)))
protected def retOneInsertOrUpdateFromUpdate: SingleInsertOrUpdateResult = None
}
}
| dotta/slick | slick/src/main/scala/slick/driver/JdbcActionComponent.scala | Scala | bsd-2-clause | 34,489 |
object Test extends App {
def test(f: () => Int) = {
val x = f()
5
}
println(test(() => { println("hi there"); 0 }))
}
| som-snytt/dotty | tests/run/t3726.scala | Scala | apache-2.0 | 134 |
package com.kostassoid.materialist
import java.util.concurrent.atomic.AtomicBoolean
class RouteWorker(route: Route, checkpointInterval: Long) extends Runnable with Logging {
val mustShutdown = new AtomicBoolean(false)
def shutdown() = {
mustShutdown.set(true)
}
def run() = {
log.info(s"Starting route worker for ${route.source} -> ${route.target}")
try {
log.info(s"Starting source ${route.source}")
route.source.start()
log.info(s"Starting target ${route.target}")
route.target.start()
var checkpointTime = System.currentTimeMillis()
Iterator.continually(route.source.pull())
.takeWhile(_ ⇒ !mustShutdown.get())
.filter(_.nonEmpty)
.foreach { batch ⇒
batch filter { route.operationPredicate } foreach { route.target.push }
if (System.currentTimeMillis() - checkpointTime > checkpointInterval) {
log.trace(s"Checkpoint for ${route.source} -> ${route.target}")
route.target.flush()
route.source.commit()
checkpointTime = System.currentTimeMillis()
}
}
route.target.flush()
route.source.commit()
} catch {
case _: InterruptedException ⇒ // ok
case e: Throwable ⇒
log.error("Unexpected exception. Closing.", e)
} finally {
log.info(s"Stopping source ${route.source}")
route.source.stop()
log.info(s"Stopping target ${route.target}")
route.target.stop()
}
}
}
| Kostassoid/materialist | src/main/scala/com/kostassoid/materialist/RouteWorker.scala | Scala | apache-2.0 | 1,515 |
package core.db
import java.util.UUID
import scalikejdbc.ParameterBinderFactory
trait PostgreSQLExtensions {
implicit val uuidParameterBinderFactory: ParameterBinderFactory[UUID] =
ParameterBinderFactory { value => (stmt, idx) =>
stmt.setObject(idx, value)
}
}
| lymr/fun-chat | fun-chat-server/src/main/scala/core/db/PostgreSQLExtensions.scala | Scala | mit | 281 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark
import com.ibm.spark.interpreter.Interpreter
import com.typesafe.config.Config
import org.apache.spark.{SparkContext, SparkConf}
import org.mockito.ArgumentCaptor
import org.scalatest.{Matchers, FunSpec}
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.mockito.Matchers._
class SparkKernelBootstrapSpec extends FunSpec with Matchers with MockitoSugar {
describe("SparkKernelBootstrap") {
describe("when spark.master is set in config") {
it("should set spark.master in SparkConf") {
val config = mock[Config]
val expectedVal: String = "expected val"
val bootstrap = spy(new SparkKernelBootstrap(config))
val captor = ArgumentCaptor.forClass(classOf[SparkConf])
// Mocking
when(config.getString("spark.master")).thenReturn(expectedVal)
bootstrap.interpreter = mock[Interpreter]
bootstrap.sparkContext = mock[SparkContext] // Stub out addJar call
// Verification
bootstrap.initializeSparkContext()
verify(bootstrap).reallyInitializeSparkContext(captor.capture())
captor.getValue().get("spark.master") should be(expectedVal)
}
it("should not add ourselves as a jar if spark.master is not local") {
val config = mock[Config]
val sparkMaster: String = "local[*]"
val bootstrap = spy(new SparkKernelBootstrap(config))
val captor = ArgumentCaptor.forClass(classOf[SparkConf])
// Mocking
val mockSparkContext = mock[SparkContext]
when(config.getString("spark.master")).thenReturn(sparkMaster)
bootstrap.interpreter = mock[Interpreter]
bootstrap.sparkContext = mockSparkContext
// Verification
bootstrap.initializeSparkContext()
bootstrap.reallyInitializeSparkContext(captor.capture())
verify(mockSparkContext, never()).addJar(anyString())
}
it("should add ourselves as a jar if spark.master is not local") {
val config = mock[Config]
val sparkMaster: String = "notlocal"
val bootstrap = spy(new SparkKernelBootstrap(config))
val captor = ArgumentCaptor.forClass(classOf[SparkConf])
// Mocking
val mockSparkContext = mock[SparkContext]
when(config.getString("spark.master")).thenReturn(sparkMaster)
bootstrap.interpreter = mock[Interpreter]
bootstrap.sparkContext = mockSparkContext
// Verification
val expected =
com.ibm.spark.SparkKernel.getClass.getProtectionDomain
.getCodeSource.getLocation.getPath
bootstrap.initializeSparkContext()
bootstrap.reallyInitializeSparkContext(captor.capture())
verify(mockSparkContext, times(2)).addJar(expected)
}
}
}
}
| bpburns/spark-kernel | kernel/src/test/scala/com/ibm/spark/SparkKernelBootstrapSpec.scala | Scala | apache-2.0 | 3,390 |
package concurrent
import java.util.concurrent.{Callable, CountDownLatch, ExecutorService, Executors}
import java.util.concurrent.atomic.AtomicReference
/**
* Created by ariwaranosai on 16/7/12.
*
*/
object NonBlocking {
sealed trait Future[A] {
private[concurrent] def apply(a: A => Unit): Unit
}
type Par[A] = ExecutorService => Future[A]
def run[A](es: ExecutorService)(a: Par[A]): A = {
val ref = new AtomicReference[A]
val latch = new CountDownLatch(1)
a(es) {
x => ref.set(x); latch.countDown()
}
latch.await()
ref.get
}
def unit[A](a: A): Par[A] =
es => new Future[A] {
override private[concurrent] def apply(ax: (A) => Unit): Unit = ax(a)
}
def fork[A](a: => Par[A]): Par[A] =
es => new Future[A] {
def apply(ax: A => Unit): Unit =
eval(es)(a(es)(ax))
}
def eval(es: ExecutorService)(r: => Unit): Unit =
es.submit(new Callable[Unit] { def call = r})
def map2[A, B, C](pa: Par[A], pb: Par[B])(f: (A, B) => C): Par[C] =
es => new Future[C] {
def apply(ax: C => Unit): Unit = {
var ar: Option[A] = None
var br: Option[B] = None
val combinator = Actor[Either[A, B]](es) {
case Left(a) => br match {
case None => ar = Some(a)
case Some(b) => eval(es)(ax(f(a, b)))
}
case Right(b) => ar match {
case None => br = Some(b)
case Some(a) => eval(es)(ax(f(a, b)))
}
}
pa(es) (a => combinator ! Left(a))
pb(es) (b => combinator ! Right(b))
}
}
def map[A, B](pa: Par[A])(f: A => B): Par[B] =
es => new Future[B] {
def apply(cb: B => Unit): Unit =
pa(es) { x => eval(es)(cb(f(x)))}
}
def sequence[A](ps: IndexedSeq[Par[A]]): Par[IndexedSeq[A]] = {
if (ps.isEmpty) unit(IndexedSeq[A]())
else if (ps.length == 1) map(ps.head)(x => IndexedSeq[A](x))
else {
val (l, r) = ps.splitAt(ps.length / 2)
map2(sequence(l), sequence(r))(_ ++ _)
}
}
def sequence[A](ps: List[Par[A]]): Par[List[A]] =
map(sequence(ps.toIndexedSeq))(_.toList)
def lazyUnit[A](a: => A): Par[A] = fork[A](unit[A](a))
def asyncF[A, B](f: A => B): A => Par[B] = (a: A) => lazyUnit(f(a))
def parMap[A, B](ps: List[A])(f: A => B): Par[List[B]] =
sequence(ps.map(asyncF[A, B](f)))
def choice[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
cond(es) { b =>
if (b) eval(es) { t(es)(cb) }
else eval(es) { f(es)(cb) }
}
}
def choiceN[A](n: Par[Int])(t: List[Par[A]]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
n(es) {
b => eval(es) {t(b)(es)(cb)}
}
}
def chooser[I, A](p: Par[I])(t: I => Par[A]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit = p(es) {
a => eval(es) { t(a)(es)(cb)}
}
}
def join[A](p: Par[Par[A]]): Par[A] = es => new Future[A] {
def apply(cb: A => Unit): Unit =
p(es) {a => eval(es) {a(es)(cb)}}
}
def flatMap[A, B](a: Par[A])(f: A => Par[B]): Par[B] =
join(map(a)(f))
}
object NoBlockingTest {
import NonBlocking._
def main(args: Array[String]) {
val p = parMap(List.range(1, 100000))(math.sqrt(_))
val excutor = Executors.newFixedThreadPool(4)
val x = run(excutor)(p)
println(x)
excutor.shutdown()
}
}
| ariwaranosai/FPinScala | src/main/scala/concurrent/NonBlocking.scala | Scala | mit | 3,499 |
/**
* Copyright 2009 Jorge Ortiz
* Copyright 2009 Barry Kaplan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.time
import java.util.Locale
import org.joda.time._
import org.joda.time.base.{AbstractDateTime, AbstractInstant, AbstractPartial}
import org.joda.time.format.DateTimeFormatter
import org.joda.time.field.AbstractReadableInstantFieldProperty
object Implicits extends Implicits
object BuilderImplicits extends Implicits
object IntImplicits extends IntImplicits
object JodaImplicits extends JodaImplicits
trait Implicits extends BuilderImplicits with IntImplicits with JodaImplicits
trait BuilderImplicits {
implicit def forcePeriod(builder: DurationBuilder): Period =
builder.underlying
implicit def forceDuration(builder: DurationBuilder): Duration =
builder.underlying.toStandardDuration
}
trait IntImplicits {
implicit def RichInt(n: Int): RichInt = new org.scala_tools.time.RichInt(n)
implicit def RichLong(n: Long): RichLong = new org.scala_tools.time.RichLong(n)
}
trait JodaImplicits {
implicit def RichAbstractDateTime(dt: AbstractDateTime): RichAbstractDateTime = new RichAbstractDateTime(dt)
implicit def RichAbstractInstant(in: AbstractInstant): RichAbstractInstant = new RichAbstractInstant(in)
implicit def RichAbstractPartial(pt: AbstractPartial): RichAbstractPartial = new RichAbstractPartial(pt)
implicit def RichAbstractReadableInstantFieldProperty(pty: AbstractReadableInstantFieldProperty): RichAbstractReadableInstantFieldProperty =
new RichAbstractReadableInstantFieldProperty(pty)
implicit def RichChronology(ch: Chronology): RichChronology = new RichChronology(ch)
implicit def RichDateMidnight(dm: DateMidnight): RichDateMidnight = new RichDateMidnight(dm)
implicit def RichDateTime(dt: DateTime): RichDateTime = new RichDateTime(dt)
implicit def RichDateTimeFormatter(fmt: DateTimeFormatter): RichDateTimeFormatter = new RichDateTimeFormatter(fmt)
implicit def RichDateTimeProperty(pty: DateTime.Property): RichDateTimeProperty = new RichDateTimeProperty(pty)
implicit def RichDateTimeZone(zone: DateTimeZone): RichDateTimeZone = new RichDateTimeZone(zone)
implicit def RichDuration(dur: Duration): RichDuration = new RichDuration(dur)
implicit def RichInstant(in: Instant): RichInstant = new RichInstant(in)
implicit def RichLocalDate(ld: LocalDate): RichLocalDate = new RichLocalDate(ld)
implicit def RichLocalDateProperty(pty: LocalDate.Property): RichLocalDateProperty = new RichLocalDateProperty(pty)
implicit def RichLocalDateTime(dt: LocalDateTime): RichLocalDateTime = new RichLocalDateTime(dt)
implicit def RichLocalDateTimeProperty(pty: LocalDateTime.Property): RichLocalDateTimeProperty = new RichLocalDateTimeProperty(pty)
implicit def RichLocalTime(lt: LocalTime): RichLocalTime = new RichLocalTime(lt)
implicit def RichLocalTimeProperty(pty: LocalTime.Property): RichLocalTimeProperty = new RichLocalTimeProperty(pty)
implicit def RichPartial(pt: Partial): RichPartial = new RichPartial(pt)
implicit def RichPartialProperty(pty: Partial.Property): RichPartialProperty = new RichPartialProperty(pty)
implicit def RichPeriod(per: Period): RichPeriod = new RichPeriod(per)
implicit def RichReadableDateTime(dt: ReadableDateTime): RichReadableDateTime = new RichReadableDateTime(dt)
implicit def RichReadableDuration(dur: ReadableDuration): RichReadableDuration = new RichReadableDuration(dur)
implicit def RichReadableInstant(in: ReadableInstant): RichReadableInstant = new RichReadableInstant(in)
implicit def RichReadableInterval(in: ReadableInterval): RichReadableInterval = new RichReadableInterval(in)
implicit def RichReadablePartial(rp: ReadablePartial): RichReadablePartial = new RichReadablePartial(rp)
implicit def RichReadablePeriod(per: ReadablePeriod): RichReadablePeriod = new RichReadablePeriod(per)
}
| scalaj/scalaj-time | src/main/scala/org/scala_tools/time/Implicits.scala | Scala | apache-2.0 | 4,385 |
package io.youi.server.rest
import io.youi.http.HttpStatus
case class RestfulResponse[Response](response: Response, status: HttpStatus)
| outr/youi | server/src/main/scala/io/youi/server/rest/RestfulResponse.scala | Scala | mit | 138 |
package arimitsu.sf.test.undertowspring.service
import org.springframework.stereotype.Component
/**
* User: sxend
* Date: 14/02/26
* Time: 22:55
*/
@Component
class ExampleService {
def hello(value: String):String = {
"Hello, " + value + "."
}
}
| sxend/Undertow-with-Spring | src/main/scala/arimitsu/sf/test/undertowspring/service/ExampleService.scala | Scala | mit | 260 |
/*
* Copyright 2012-2013 Eligotech BV.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eligosource.eventsourced.journal.leveldb
import org.eligosource.eventsourced.journal.common.PersistentReplaySpec
class LeveldbReplayPSNativeSpec extends PersistentReplaySpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir)
}
class LeveldbReplaySSNativeSpec extends PersistentReplaySpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withSequenceStructure
}
class LeveldbReplayPSJavaSpec extends PersistentReplaySpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withNative(false)
}
class LeveldbReplaySSJavaSpec extends PersistentReplaySpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withSequenceStructure.withNative(false)
}
| CoderPaulK/eventsourced | es-journal/es-journal-leveldb/src/test/scala/org/eligosource/eventsourced/journal/leveldb/LeveldbReplaySpec.scala | Scala | apache-2.0 | 1,432 |
package io.latent.resilience.cache
import scala.concurrent.Future
import scala.concurrent.duration.Duration
trait Cache[K, V] {
/** Add to cache */
def put(key: K, value: Future[V])
/** Get if exists */
def get(key: K): Option[Future[V]]
/** Invalidate cache entry for key if exists */
def invalidate(key: K): Unit
}
object Cache {
def simple[K, V](maxSize: Int = 100,
timeToLive: Duration = Duration.Inf,
timeToIdle: Duration = Duration.Inf,
softValues: Boolean = false,
weakValues: Boolean = false,
weakKeys: Boolean = false): Cache[K, V] = {
new SimpleCache[K, V](maxSize, timeToLive, timeToIdle, softValues, weakValues, weakKeys)
}
} | ppat/resilience | src/main/scala/io/latent/resilience/cache/Cache.scala | Scala | mit | 757 |
package amailp.intellij.robot.psi
import amailp.intellij.robot.ast
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import scala.jdk.CollectionConverters._
import amailp.intellij.robot.findUsage.UsageFindable
import amailp.intellij.robot.structureView.InStructureView
import amailp.intellij.robot.file.Icons
class KeywordDefinition(node: ASTNode)
extends RobotPsiElement(node)
with PsiNamedElement
with UsageFindable
with InStructureView
with PsiTarget {
private def keywordName = getNode.findChildByType(ast.KeywordName).getPsi(classOf[KeywordName])
override def getNodeText(useFullName: Boolean) = getName
override def getName: String = keywordName.getText
override def setName(name: String): PsiElement = {
val dummyKeyword = createKeywordDefinition(name)
this.getNode.replaceChild(keywordName.getNode, dummyKeyword.keywordName.getNode)
this
}
def getType: String = "keyword"
def getDescriptiveName: String = getName
def structureViewText = getName
def structureViewIcon = Icons.keyword
def structureViewChildrenTokenTypes = Nil
}
object KeywordDefinition {
def findMatchingInFiles(files: LazyList[RobotPsiFile], reference: String) = {
for {
keywordDefinition <- findInFiles(files)
if keywordDefinition.keywordName matches reference
} yield keywordDefinition
}
def findInFiles(files: LazyList[RobotPsiFile]) = {
for {
file <- files
keywordDefinition <- findInFile(file)
} yield keywordDefinition
}
def findInFile(file: RobotPsiFile) =
PsiTreeUtil.findChildrenOfType(file.getNode.getPsi, classOf[KeywordDefinition]).asScala.toSet
}
| AmailP/robot-plugin | src/main/scala/amailp/intellij/robot/psi/KeywordDefinition.scala | Scala | gpl-3.0 | 1,698 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.store.atomic
import com.treode.async.stubs.StubScheduler
import com.treode.cluster.{Cluster, HostId, Peer}
import com.treode.cluster.stubs.StubCluster
import com.treode.store.{Atlas, Bytes, Cohort, StoreTestTools, TxClock}
import org.scalatest.{FreeSpec, ShouldMatchers}
import AtomicMover.{Point, Range, Targets, Tracker}
import Cohort.{issuing, moving, settled}
import StoreTestTools._
class AtomicMoverSpec extends FreeSpec with ShouldMatchers {
private def targets (cohorts: Cohort*) (implicit cluster: Cluster): Targets =
Targets (Atlas (cohorts.toArray, 1))
private def begin (start: Int): Range =
Range (Point.Middle (start, Bytes.MinValue, TxClock.MaxValue), Point.End)
private def range (start: Int, end: Int): Range =
Range (
Point.Middle (start, Bytes.MinValue, TxClock.MaxValue),
Point.Middle (end, Bytes.MinValue, TxClock.MaxValue))
def assertPeers (expected: HostId*) (actual: Set [Peer]): Unit =
assertResult (expected.toSet) (actual map (_.id))
def assertPeers (expected: Map [Int, Set [HostId]]) (actual: Map [Int, Set [Peer]]): Unit =
assertResult (expected) (actual mapValues (_ map (_.id)))
def assertTask (range: Range, targets: (Int, Set [HostId])*) (actual: Option [(Range, Targets)]) {
assert (actual.isDefined)
assertResult (range) (actual.get._1)
assertPeers (targets.toMap) (actual.get._2.targets)
}
def assertNone (actual: Option [Any]): Unit =
assertResult (None) (actual)
private class RichTracker {
val tracker = new Tracker
def deque(): Option [(Range, Targets)] =
tracker.deque()
def continue (point: Point): Unit =
tracker.continue (point)
def continue (table: Int): Unit =
tracker.continue (Point.Middle (table, Bytes.MinValue, TxClock.MaxValue))
def start (cohorts: Cohort*) (implicit cluster: Cluster): Unit =
tracker.start (targets (cohorts: _*))
}
"Deriving targets from cohorts should" - {
def setup() = {
implicit val (random, scheduler, network) = newKit()
implicit val cluster = new StubCluster (1)
cluster
}
"find no targets when no cohorts are moving" in {
implicit val cluster = setup()
val ts = targets (
settled (1, 2, 3),
settled (4, 5, 6),
settled (7, 8, 9),
settled (10, 11, 12))
assert (ts.isEmpty)
}
"find targets only from the cohorts that are moving" in {
implicit val cluster = setup()
val ts = targets (
settled (1, 2, 3),
issuing (1, 2, 3) (1, 2, 4),
moving (1, 2, 3) (1, 2, 5),
moving (1, 2, 3) (1, 6, 7))
assert (!ts.isEmpty)
assert (!(ts contains 0))
assert (!(ts contains 1))
assertPeers (2, 5) (ts (2))
assertPeers (6, 7) (ts (3))
}}
"Points should" - {
import Point.{End, Middle}
"order properly" in {
assert (Middle (0, 0, 1) < Middle (0, 0, 0))
assert (Middle (0, 0, 0) < Middle (0, 1, 0))
assert (Middle (0, 0, 0) < Middle (1, 0, 0))
assert (Middle (0, 0, 0) < End)
}}
"When the tracker" - {
"no points of completed work and" - {
"no work underway and" - {
def setup() = {
implicit val (random, scheduler, network) = newKit()
implicit val cluster = new StubCluster (1)
val tracker = new RichTracker
(cluster, tracker)
}
"rebalance is not started, it should yield no work" in {
implicit val (_, t) = setup()
assertNone (t.deque())
}
"rebalance is started with all cohorts settled, it should yield no work" in {
implicit val (cluster, t) = setup()
t.start (settled (1, 2, 3))
assertNone (t.deque())
assertNone (t.deque())
}
"rebalance is started with a cohort moving, it should start work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 2, 4))
assertTask (begin (0), 0 -> Set (2, 4)) (t.deque())
intercept [AssertionError] (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}}}
"one point of completed work and" - {
"no work underway and" - {
def setup() = {
implicit val (random, scheduler, network) = newKit()
implicit val cluster = new StubCluster (1)
val t = new RichTracker
t.start (moving (1, 2, 3) (1, 2, 4))
assertTask (begin (0), 0 -> Set (2, 4)) (t.deque())
t.continue (7)
(cluster, t)
}
"rebalance is not restarted, it should continue work" in {
implicit val (_, t) = setup()
assertTask (begin (7), 0 -> Set (2, 4)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"rebalance is restarted" - {
"with all cohorts settled, it should yield no work" in {
implicit val (cluster, t) = setup()
t.start (settled (1, 2, 3))
assertNone (t.deque())
}
"with the same cohort moving the same way, it should continue work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 2, 4))
assertTask (begin (7), 0 -> Set (2, 4)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"with the same cohort moving the same way and more, it should restart work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 4, 5))
assertTask (range (0, 7), 0 -> Set (5)) (t.deque())
intercept [AssertionError] (t.deque())
t.continue (7)
assertTask (begin (7), 0 -> Set (4, 5)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"with the same cohort moving a different way, it should restart work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 5, 6))
assertTask (begin (0), 0 -> Set (5, 6)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"with a different cohort moving, it should restart work" in {
implicit val (cluster, t) = setup()
t.start (
settled (1, 2, 3),
moving (1, 2, 3) (1, 2, 4))
assertTask (begin (0), 1 -> Set (2, 4)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}}}}
"two points of completed work and" - {
"no work underway and" - {
def setup() = {
implicit val (random, scheduler, network) = newKit()
implicit val cluster = new StubCluster (1)
val t = new RichTracker
t.start (moving (1, 2, 3) (1, 2, 4))
assertTask (begin (0), 0 -> Set (2, 4)) (t.deque())
t.start (moving (1, 2, 3) (1, 4, 5))
t.continue (7)
assertTask (range (0, 7), 0 -> Set (5)) (t.deque())
t.continue (3)
(cluster, t)
}
"rebalance is not restarted, it should continue work" in {
implicit val (_, t) = setup()
assertTask (range (3, 7), 0 -> Set (5)) (t.deque())
t.continue (7)
assertTask (begin (7), 0 -> Set (4, 5)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"rebalance is restarted" - {
"with all cohorts settled, it should yield no work" in {
implicit val (cluster, t) = setup()
t.start (settled (1, 2, 3))
assertNone (t.deque())
}
"with the same cohort moving the same way, it should continue work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 4, 5))
assertTask (range (3, 7), 0 -> Set (5)) (t.deque())
t.continue (7)
assertTask (begin (7), 0 -> Set (4, 5)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"with the same cohort moving a different way, it should restart work" in {
implicit val (cluster, t) = setup()
t.start (moving (1, 2, 3) (1, 6, 7))
assertTask (begin (0), 0 -> Set (6, 7)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}
"with a different cohort moving, it should restart work" in {
implicit val (cluster, t) = setup()
t.start (
settled (1, 2, 3),
moving (1, 2, 3) (1, 2, 4))
assertTask (begin (0), 1 -> Set (2, 4)) (t.deque())
t.continue (Point.End)
assertNone (t.deque())
}}}}}}
| Treode/store | store/test/com/treode/store/atomic/AtomicMoverSpec.scala | Scala | apache-2.0 | 9,451 |
package org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.extensions.ExtensionPointName
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import scala.collection.mutable.ArrayBuffer
/**
* @author Mikhail.Mutcianko
* @since 26.12.14
*/
class SyntheticMembersInjector {
/**
* This method allows to add custom functions to any class, object or trait.
* This includes synthetic companion object.
*
* Context for this method will be class. So inner types and imports of this class
* will not be available. But you can use anything outside of
* @param source class to inject functions
* @return sequence of functions text
*/
def injectFunctions(source: ScTypeDefinition): Seq[String] = Seq.empty
/**
* Use this method to mark class or trait, that it requires companion object.
* Note that object as source is not possible.
* @param source class or trait
* @return if this source requires companion object
*/
def needsCompanionObject(source: ScTypeDefinition): Boolean = false
}
object SyntheticMembersInjector {
val LOG = Logger.getInstance(getClass)
val EP_NAME: ExtensionPointName[SyntheticMembersInjector] =
ExtensionPointName.create("org.intellij.scala.syntheticMemberInjector")
def inject(source: ScTypeDefinition): Seq[ScFunction] = {
val buffer = new ArrayBuffer[ScFunction]()
for {
injector <- EP_NAME.getExtensions
template <- injector.injectFunctions(source)
} try {
val context = source match {
case o: ScObject if o.isSyntheticObject => o.fakeCompanionClassOrCompanionClass
case _ => source
}
val function = ScalaPsiElementFactory.createMethodWithContext(template, context, source)
function.setSynthetic(context)
function.syntheticContainingClass = Some(source)
buffer += function
} catch {
case e: Throwable =>
LOG.error(s"Error during parsing template from injector: ${injector.getClass.getName}", e)
}
buffer
}
def needsCompanion(source: ScTypeDefinition): Boolean = EP_NAME.getExtensions.exists(_.needsCompanionObject(source))
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/typedef/SyntheticMembersInjector.scala | Scala | apache-2.0 | 2,383 |
package unfiltered.netty.request
import org.specs2.mutable.Specification
import unfiltered.netty.cycle
import unfiltered.request.{ Path => UFPath, POST, & }
import unfiltered.response.{ NotFound, ResponseString }
import unfiltered.specs2.netty.Served
import java.io.{ File => JFile,FileInputStream => FIS }
import java.util.Arrays
import org.apache.commons.io.{ IOUtils => IOU }
object CycleUploadSpec extends Specification
with Served {
def setup = {
val plan = cycle.MultiPartDecoder({
case POST(UFPath("/disk-upload") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Disk(binding).files("f") match {
case Seq(f, _*) => ResponseString(
"disk read file f named %s with content type %s" format(
f.name, f.contentType))
case f => ResponseString("what's f?")
}
}
case POST(UFPath("/disk-upload/write") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Disk(binding).files("f") match {
case Seq(f, _*) =>
f.write(new JFile("upload-test-out.txt")) match {
case Some(outFile) =>
if (Arrays.equals(IOU.toByteArray(new FIS(outFile)), f.bytes)) ResponseString(
"wrote disk read file f named %s with content type %s with correct contents" format(
f.name, f.contentType)
)
else ResponseString(
"wrote disk read file f named %s with content type %s, with differing contents" format(
f.name, f.contentType))
case None => ResponseString(
"did not write disk read file f named %s with content type %s" format(
f.name, f.contentType))
}
case _ => ResponseString("what's f?")
}
}
case POST(UFPath("/stream-upload") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Streamed(binding).files("f") match {
case Seq(f, _*) => ResponseString(
"stream read file f is named %s with content type %s" format(
f.name, f.contentType))
case _ => ResponseString("what's f?")
}
}
case POST(UFPath("/stream-upload/write") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Streamed(binding).files("f") match {
case Seq(f, _*) =>
val src = IOU.toByteArray(getClass.getResourceAsStream("/netty-upload-big-text-test.txt"))
f.write(new JFile("upload-test-out.txt")) match {
case Some(outFile) =>
if (Arrays.equals(IOU.toByteArray(new FIS(outFile)), src)) ResponseString(
"wrote stream read file f named %s with content type %s with correct contents" format(
f.name, f.contentType)
)
else ResponseString(
"wrote stream read file f named %s with content type %s, with differing contents" format(
f.name, f.contentType))
case None => ResponseString(
"did not write stream read file f named %s with content type %s" format(
f.name, f.contentType))
}
case _ => ResponseString("what's f?")
}
}
case POST(UFPath("/mem-upload") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Memory(binding).files("f") match {
case Seq(f, _*) => ResponseString(
"memory read file f is named %s with content type %s" format(
f.name, f.contentType))
case _ => ResponseString("what's f?")
}
}
case POST(UFPath("/mem-upload/write") & MultiPart(req)) => {
case Decode(binding) =>
MultiPartParams.Memory(binding).files("f") match {
case Seq(f, _*) =>
f.write(new JFile("upload-test-out.txt")) match {
case Some(outFile) => ResponseString(
"wrote memory read file f is named %s with content type %s" format(
f.name, f.contentType))
case None => ResponseString(
"did not write memory read file f is named %s with content type %s" format(
f.name, f.contentType))
}
case _ => ResponseString("what's f?")
}
}
})
_.plan(plan).plan(cycle.Planify {
case _ => NotFound
})
}
"Netty cycle.MultiPartDecoder" should {
step {
val out = new JFile("netty-upload-test-out.txt")
if (out.exists) out.delete
}
"handle file uploads written to disk" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
http(req(host / "disk-upload") <<* ("f", file, "text/plain")).as_string must_== "disk read file f named netty-upload-big-text-test.txt with content type text/plain"
}
"handle file uploads streamed" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
http(req(host / "stream-upload") <<* ("f", file, "text/plain")).as_string must_== "stream read file f is named netty-upload-big-text-test.txt with content type text/plain"
}
"handle writing file uploads streamed" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
http(req(host / "stream-upload" / "write") <<* ("f", file, "text/plain")).as_string must_== "wrote stream read file f named netty-upload-big-text-test.txt with content type text/plain with correct contents"
}
"handle file uploads all in memory" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
http(req(host / "mem-upload") <<* ("f", file, "text/plain")).as_string must_== "memory read file f is named netty-upload-big-text-test.txt with content type text/plain"
}
"not write memory read files" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
http(req(host / "mem-upload" / "write") <<* ("f", file, "text/plain")).as_string must_== "did not write memory read file f is named netty-upload-big-text-test.txt with content type text/plain"
}
"respond with a 404" in {
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
val resp = httpx(req(host / "notfound") <<* ("f", file, "text/plain"))
resp.code must_== 404
}
}
}
| jarin/unfiltered | netty-uploads/src/test/scala/CycleUploadSpec.scala | Scala | mit | 6,835 |
package pl.abankowski.musicbrainz.client.service
import scala.concurrent.Future
import pl.abankowski.musicbrainz.client.dto.ArtistId
import pl.abankowski.musicbrainz.client.dto.ArtistInfo
import pl.abankowski.musicbrainz.client.dto.ResourceResult
import pl.abankowski.musicbrainz.client.query._
trait ArtistService {
def get(id: ArtistId): Future[Option[ArtistInfo]]
def search(query: Query): Future[ResourceResult[ArtistInfo]]
} | abankowski/musicbrainz-scala-client | src/main/scala/pl/abankowski/musicbrainz/client/service/ArtistService.scala | Scala | mit | 435 |
package org.dohrm.auth0
import akka.actor.{ActorRef, Props}
import com.typesafe.config.Config
import org.dohrm.auth0.actors.Auth0Verifier
import org.dohrm.auth0.directives.Auth0
/**
* @author michaeldohr
* @since 04/06/16
*/
trait Auth0Context extends Auth0 {
implicit val config: Config
override val auth0VerifierActor: ActorRef = as.actorOf(
Props(
new Auth0Verifier(
config.getString("auth0.clientId"),
config.getString("auth0.clientSecret"),
config.getString("auth0.domain")
)
)
)
}
| dohr-michael/storyline | src/main/scala/org/dohrm/auth0/Auth0Context.scala | Scala | mit | 546 |
package edu.mit.csail.sdg
import scala.xml.{Node, XML}
import java.sql.{Timestamp, DriverManager}
import net.liftweb.json.JsonDSL._
import net.liftweb.json._
import java.io.FileWriter
/**
* Created by IntelliJ IDEA.
* User: Dwayne
* Date: 7/14/11
* Time: 12:00 PM
* To change this template use File | Settings | File Templates.
*/
/**
* Used to convert the contents of the derby database 20110104_SeqSched/lowrate to a text file
* of observations using the JSON format. Used so that a connection to a derby database isn't
* needed when testing the monitor
*/
object SampleDatabaseToJson extends App {
//Connect to the derby database
val db: String = "20110104_SeqSched/lowrate"
val conn = DriverManager.getConnection("jdbc:derby:" + db)
//Select the entries from the database to convert into json
val stmt = conn.createStatement
stmt.execute("SELECT validtime, data FROM tfdmfeaturecollection ORDER BY validtime")
val rs = stmt.getResultSet()
val fileWriter = new FileWriter("observations.json")
while (rs.next) {
val validtime = rs.getTimestamp("validtime")
val data = rs.getClob("data")
val xml: Node = XML.load(data.getCharacterStream)
data.free()
val json = convert(validtime, xml)
if (!json.isEmpty) fileWriter.write(json + "\\n")
}
fileWriter.close()
rs.close()
stmt.close()
conn.close()
def convert(timestamp: Timestamp, xml: Node) = {
val sigs = xml.child flatMap {feature =>
feature.label match {
case "ArrivalProgressFeature" => Some(handleArrivalProgressFeature(feature))
case "DefaultFlightStripFeature" => None
case "DepartureProgressFeature" => Some(handleDepartureProgressFeature(feature))
case "FlightDisplayDataFeature" => Some(handleFlightDisplayDataFeature(feature))
case "FlightModelFeature" => None
case "NasUpdateFeature" => Some(handleNasUpdateFeature(feature))
case "RunwayAssignmentFeature" => None
case "SequenceSchedulingFeature" => None
case "TargetCorrelationFeature" => None
case "TargetDropFeature" => None
case "TargetMergeFeature" => None
case "TaxiRoutingFeature" => None
case "#PCDATA" => None
case otherwise => println(otherwise); println(xml); throw new RuntimeException()
}
}
if (sigs.isEmpty)
""
else
compact(render(("time" -> timestamp.toString) ~ ("sigs" -> sigs)))
}
def handleArrivalProgressFeature(xml: Node): JObject = {
val aircraftId: String = (xml \\ "flightIdentifiers" \\ "acid").text
val arrive = ("name" -> "Arrive") ~ ("id" -> "Arrive")
val intent =
("name" -> "intent") ~
("arity" -> 1) ~
("set" -> ("value" -> List(arrive)))
("name" -> "Aircraft") ~
("id" -> aircraftId) ~
("fields" -> List(intent))
}
def handleDepartureProgressFeature(xml: Node): JObject = {
val aircraftId: String = (xml \\ "flightIdentifiers" \\ "acid").text
val depart = ("name" -> "Depart") ~ ("id" -> "Depart")
val intent =
("name" -> "intent") ~
("arity" -> 1) ~
("set" -> ("value" -> List(depart)))
("name" -> "Aircraft") ~
("id" -> aircraftId) ~
("fields" -> List(intent))
}
def handleFlightDisplayDataFeature(xml: Node): JObject = {
val aircraftId: String = (xml \\ "flightDisplayData" \\ "acid").text
val aircraft = ("name" -> "Aircraft") ~ ("id" -> aircraftId)
val value = ("value" -> List(aircraft))
val flightData =
("name" -> "flightData") ~
("arity" -> 1) ~
("add" -> List(value))
("name" -> "TFDM") ~
("id" -> "TFDM") ~
("fields" -> List(flightData))
}
def handleNasUpdateFeature(xml: Node): JObject = {
val aircraftId: String = (xml \\ "flightIdentifiers" \\ "acid").text
val aircraft = ("name" -> "Aircraft") ~ ("id" -> aircraftId)
val value = ("value" -> List(aircraft))
val flightData =
("name" -> "flightData") ~
("arity" -> 1) ~
("add" -> List(value))
("name" -> "TFDM") ~
("id" -> "TFDM") ~
("fields" -> List(flightData))
}
} | dlreeves/ormolu | src/edu/mit/csail/sdg/SampleDatabaseToJson.scala | Scala | mit | 4,232 |
/*
* Copyright (C) 2010-2011 Mikhail Vorozhtsov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mvv.routineer.tests
import com.github.mvv.routineer.{Args, Dispatch, Route, Routes}
import com.github.mvv.routineer.syntax._
import org.specs2.mutable.Specification
class RoutesSpec extends Specification {
"One-segment echo route" >> {
val rs = Routes.forHandler[Route.Handler.Apply[String]#H](Route.handle(*) { segment =>
segment
})
rs.dispatch(Seq("foo")) must beLike {
case r: Dispatch.Handler[_, Route.Handler.Apply[String]#H] =>
Args.apply(r.handler, r.args) mustEqual "foo"
}
}
"One-segment echo route with env" >> {
val rs = Routes.forHandler[Route.WithEnv.Apply[Int, String]#H](Route.withEnv[Int](*) { (env, segment) =>
s"$env:$segment"
})
rs.dispatch(Seq("foo")) must beLike {
case r: Dispatch.Handler[_, Route.WithEnv.Apply[Int, String]#H] =>
Args.apply(r.handler.handler, r.handler.growable(r.args).prepend(123)) mustEqual "123:foo"
}
}
/*
"Empty route set" in {
val rs = Routes[Any, Any]()
rs((), "") must_== None
rs((), "/") must_== None
rs((), "aaaa") must_== None
rs((), "/aaaa") must_== None
rs((), "/aaaa/bbbb") must_== None
rs((), "/aaaa/bbbb/") must_== None
}
"Empty path" in {
val rs = Routes[Any, Any](PathSpec.empty when (r => r))
rs(1, "").map(_.apply) must_== Some(1)
rs(2, "/").map(_.apply) must_== Some(2)
rs((), "/aaaa") must_== None
rs((), "/aaaa/bbbb") must_== None
}
"Const path" in {
val rs = Routes[Any, Any]("a" whenDo (r => r))
rs((), "") must_== None
rs((), "a").isDefined must_== true
rs((), "/a").isDefined must_== true
rs((), "/a/").isDefined must_== true
rs((), "/a/b").isDefined must_== false
}
"Star pattern" in {
val rs1 = Routes[Any, String]("a" /> * when ((_: Any, s) => s))
rs1((), "") must_== None
rs1((), "/") must_== None
rs1((), "/a") must_== None
rs1((), "/aaaa") must_== None
rs1((), "/a/bbbb").map(_.apply) must_== Some("bbbb")
val rs2 = Routes[Any, String]("a" /> * /> * when ((_: Any, s1, s2) => s1 + s2))
rs2((), "") must_== None
rs2((), "/") must_== None
rs2((), "/a/bbbb") must_== None
rs2((), "/a/bbbb/") must_== None
rs2((), "/a/bbbb/cccc").map(_.apply) must_== Some("bbbbcccc")
rs2((), "/a/bbbb/cccc/d") must_== None
val rs3 = Routes[Any, String](* when ((_: Any, s) => s))
rs3((), "") must_== None
rs3((), "aaaa").map(_.apply) must_== Some("aaaa")
rs3((), "/aaaa").map(_.apply) must_== Some("aaaa")
rs3((), "/aaaa/").map(_.apply) must_== Some("aaaa")
rs3((), "/aaaa/bbbb") must_== None
val rs4 = Routes[Any, String](* /> "b" when ((_: Any, s) => s))
rs4((), "") must_== None
rs4((), "/") must_== None
rs4((), "/b") must_== None
rs4((), "/aaaa") must_== None
rs4((), "/aaaa/b").map(_.apply) must_== Some("aaaa")
}
"Maximum path spec length" in {
val rs0 = Routes[Any, String](PathSpec.empty when (_ => ""))
rs0((), "").map(_.apply) must_== Some("")
val rs1 = Routes[Any, String](* when ((_: Any, s1) => s1))
rs1((), "a").map(_.apply) must_== Some("a")
val rs2 = Routes[Any, String](* /> * when ((_: Any, s1, s2) => s1 + s2))
rs2((), "a/b").map(_.apply) must_== Some("ab")
val rs3 = Routes[Any, String](* /> * /> * when ((_: Any, s1, s2, s3) => s1 + s2 + s3))
rs3((), "a/b/c").map(_.apply) must_== Some("abc")
val rs4 = Routes[Any, String](* /> * /> * /> * when { (_: Any, s1, s2, s3, s4) =>
s1 + s2 + s3 + s4
})
rs4((), "a/b/c/d").map(_.apply) must_== Some("abcd")
val rs5 = Routes[Any, String](* /> * /> * /> * /> * when { (_: Any, s1, s2, s3, s4, s5) =>
s1 + s2 + s3 + s4 + s5
})
rs5((), "a/b/c/d/e").map(_.apply) must_== Some("abcde")
val rs6 = Routes[Any, String](* /> * /> * /> * /> * /> * when { (_: Any, s1, s2, s3, s4, s5, s6) =>
s1 + s2 + s3 + s4 + s5 + s6
})
rs6((), "a/b/c/d/e/f").map(_.apply) must_== Some("abcdef")
val rs7 = Routes[Any, String](* /> * /> * /> * /> * /> * /> * when { (_: Any, s1, s2, s3, s4, s5, s6, s7) =>
s1 + s2 + s3 + s4 + s5 + s6 + s7
})
rs7((), "a/b/c/d/e/f/g").map(_.apply) must_== Some("abcdefg")
}
*/
}
| mvv/routineer | core/src/test/scala/com/github/mvv/routineer/tests/RoutesSpec.scala | Scala | apache-2.0 | 4,820 |
package BIDMat
import scala.math.Numeric._
import scala.reflect._
import java.util.Arrays
import java.util.Comparator
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
class DenseMat[@specialized(Double,Float,Int,Byte,Long) T]
(nr: Int, nc: Int, val data:Array[T])(implicit manifest:ClassTag[T]) extends Mat(nr, nc) {
def this(nr:Int, nc:Int)(implicit manifest:ClassTag[T]) = this(nr, nc, new Array[T](nr*nc))
/** Return the (0,0) value as a scalar. */
def v:T =
if (nrows > 1 || ncols > 1) {
throw new RuntimeException("Matrix should be 1x1 to extract value")
} else {
data(0)
}
/** Returns a string description of this type, i.e., returns "DenseMat". */
override def mytype = "DenseMat"
/** Returns true if this matrix is a row or column vector, false otherwise. */
def isvector(): Boolean = {
if (nrows == 1 || ncols == 1) {
true
} else {
false
}
}
/** Bounds-checked matrix access, 0- or 1-based. */
def apply(r0:Int, c0:Int):T = {
val off = Mat.oneBased
val r = r0 - off
val c = c0 - off
if (r < 0 || r >= nrows || c < 0 || c >= ncols) {
throw new IndexOutOfBoundsException("("+(r+off)+","+(c+off)+") vs ("+nrows+","+ncols+")");
} else {
data(r+c*nrows)
}
}
/** Bounds-checked linear access, 0- or 1-based. */
def apply(i0:Int):T = {
val off = Mat.oneBased
val i = i0 - off
if (i < 0 || i >= length) {
throw new IndexOutOfBoundsException(""+(i+off)+" >= ("+length+")");
} else {
data(i)
}
}
/** Unchecked 0-based matrix access of element at m(r,c). */
def get_(r:Int, c:Int):T = {
data(r+c*nrows)
}
/**
* Returns a single index using linear access (i.e., column-major order) of first occurrence of element '''a''',
* or -1 if it does not exist.
*
* Note that matrices can be 0- or 1-based; the latter occurs in languages like MATLAB. In that case, an
* element not present in the matrix gets an index of 0 instead of 1.
*
* In the following examples, the matrix is 0-based:
*
* {{{
* scala> val a = 3\\4 on 7\\4
* a: BIDMat.IMat =
* 3 4
* 7 4
*
* scala> a.indexOf(7)
* res20: Int = 1
*
* scala> a.indexOf(4)
* res21: Int = 2
* }}}
*/
def indexOf(a:T):Int = {
data.indexOf(a) + Mat.oneBased
}
/**
* Returns a tuple representing the (row, column) index of element '''a''' in this matrix, or (-1,0) if it
* does not exist and if the matrix is 0-based. 1-based arrays get (0,1) for a non-existent element.
*
* In the following examples, the matrix is 0-based:
*
* {{{
* scala> val a = 3\\4 on 7\\4
* a: BIDMat.IMat =
* 3 4
* 7 4
*
* scala> a.indexOf2(7)
* res22: (Int, Int) = (1,0)
*
* scala> a.indexOf2(4)
* res23: (Int, Int) = (0,1)
* }}}
*/
def indexOf2(a:T):(Int, Int) = {
val off = Mat.oneBased
val v = data.indexOf(a)
(v % nrows + off, v / nrows + off)
}
/** Update a matrix value, m(r,c) = v, 0- or 1-based. */
def _update(r0:Int, c0:Int, v:T):T = {
val off = Mat.oneBased
val r = r0 - off
val c = c0 - off
if (r < 0 || r >= nrows || c < 0 || c >= ncols) {
throw new IndexOutOfBoundsException("("+(r+off)+","+(c+off)+") vs ("+nrows+","+ncols+")");
} else {
data(r+c*nrows) = v
}
v
}
/** Update a matrix value with linear access, m(i) = v. */
def _update(i0:Int, v:T):T = {
val off = Mat.oneBased
val i = i0 - off
if (i < 0 || i >= length) {
throw new IndexOutOfBoundsException(""+(i+off)+" vs ("+length+")");
} else {
data(i) = v
}
v
}
/** Unchecked 0-based set, so m(r,c) = v. */
def set_(r:Int, c:Int, v:T):T = {
data(r+c*nrows) = v
v
}
/** Returns the transpose of this matrix. */
def gt(oldmat:Mat):DenseMat[T] = {
var out:DenseMat[T] = DenseMat.newOrCheck(ncols, nrows, oldmat, GUID, "gt".hashCode)
var i = 0
while (i < nrows) {
var j = 0
while (j < ncols) {
out.data(j+i*ncols) = data(i+j*nrows)
j += 1
}
i += 1
}
out
}
/**
* Stack matrices vertically.
*
* Throws a RuntimeException if the number of columns does not match.
*
* Note: As is usual with DenseMat methods, it will return a "correct" matrix in that arithmetic, etc.
* should work, but it is not visible on the command line, so wrap it around with (for instance) a
* DMat to "see" the results on the command line.
*
* Example:
* {{{
* scala> val b = DMat(1\\2 on 3\\4)
* b: BIDMat.DMat =
* 1 2
* 3 4
*
* scala> b.gvertcat(b)
* res12: BIDMat.DenseMat[Double] =
*
*
*
*
*
* scala> DMat(b.gvertcat(b))
* res13: BIDMat.DMat =
* 1 2
* 3 4
* 1 2
* 3 4
*
* scala>
* }}}
*/
def gvertcat(a:DenseMat[T]):DenseMat[T] =
if (ncols != a.ncols) {
throw new RuntimeException("ncols must match")
} else {
var out = DenseMat.newOrCheck(nrows+a.nrows, ncols, null, GUID, a.GUID, "on".hashCode)
var i = 0
while (i < ncols) {
System.arraycopy(data, i*nrows, out.data, i*(nrows+a.nrows), nrows)
System.arraycopy(a.data, i*a.nrows, out.data, nrows+i*(nrows+a.nrows), a.nrows)
i += 1
}
out
}
/**
* Stack matrices horizontally.
*
* Throws a RuntimeException if the number of rows does not match.
*
* Note: As is usual with DenseMat methods, it will return a "correct" matrix in that arithmetic, etc.
* should work, but it is not visible on the command line, so wrap it around with (for instance) a
* DMat to "see" the results on the command line.
*
* Example:
* {{{
* scala> val b = DMat(1\\2 on 3\\4)
* b: BIDMat.DMat =
* 1 2
* 3 4
*
* scala> b.ghorzcat(b)
* res10: BIDMat.DenseMat[Double] =
*
*
*
* scala> DMat(b.ghorzcat(b))
* res11: BIDMat.DMat =
* 1 2 1 2
* 3 4 3 4
* }}}
*/
def ghorzcat(a:DenseMat[T]):DenseMat[T]=
if (nrows != a.nrows) {
throw new RuntimeException("nrows must match")
} else {
var out = DenseMat.newOrCheck(nrows, ncols+a.ncols, null, GUID, a.GUID, "\\\\".hashCode)
System.arraycopy(data, 0, out.data, 0, nrows*ncols)
System.arraycopy(a.data, 0, out.data, nrows*ncols, nrows*a.ncols)
out
}
/** Count number of non-zero entries. */
override def nnz:Int = {
var count:Int = 0
var i = 0
while (i < length) {
if (data(i) != 0) {
count += 1
}
i += 1
}
count
}
/** Helper function for find functions. */
def findInds(out:IMat, off:Int):IMat = {
var count = 0
var i = off
while (i < length+off) {
if (data(i) != 0) {
out.data(count) = i
count += 1
}
i += 1
}
out
}
/** Find indices (linear) for all non-zeros elements. */
def find:IMat = {
var out = IMat.newOrCheckIMat(nnz, 1, null, GUID, "find1".hashCode)
findInds(out, Mat.oneBased)
}
/** Find indices (i,j) for non-zero elements. */
def find2:(IMat, IMat) = {
val iout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "find2_1".hashCode)
val jout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "find2_2".hashCode)
findInds(iout, 0)
val off = Mat.oneBased
var i = 0
while (i < iout.length) {
val ival:Int = iout.data(i)
jout.data(i) = (ival / nrows) + off
iout.data(i) = (ival % nrows) + off
i += 1
}
(iout, jout)
}
/** Find tuples (i,j,v) for non-zero elements. */
def gfind3:(IMat, IMat, DenseMat[T]) = {
val iout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "gfind3_1".hashCode)
val jout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "gfind3_2".hashCode)
val vout = DenseMat.newOrCheck(nnz, 1, null, GUID, "gfind3_3".hashCode)
findInds(iout, 0)
val off = Mat.oneBased
var i = 0
while (i < iout.length) {
val ival:Int = iout.data(i)
vout.data(i) = data(ival)
jout.data(i) = (ival / nrows) + off
iout.data(i) = (ival % nrows) + off
i += 1
}
(iout, jout, vout)
}
/** Return a(im) where im is a matrix of indices. */
def gapply(im:IMat):DenseMat[T] =
im match {
case aa:MatrixWildcard => {
val out = DenseMat.newOrCheck(length, 1, null, GUID, im.GUID, "gapply1dx".hashCode)
System.arraycopy(data, 0, out.data, 0, out.length)
out
}
case _ => {
val out = DenseMat.newOrCheck(im.nrows, im.ncols, null, GUID, im.GUID, "gapply1d".hashCode)
var i = 0
val off = Mat.oneBased
while (i < out.length) {
val ind = im.data(i) - off
if (ind < 0 || ind >= length) {
throw new RuntimeException("bad linear index "+(ind+off)+" vs "+length)
} else {
out.data(i) = data(ind)
}
i += 1
}
out
}
}
/** Implement a(im) = b where im is a matrix of indices to a and im and b are same-sized. */
def _update(im:IMat, b:DenseMat[T]):DenseMat[T] =
im match {
case aaa:MatrixWildcard => {
if (length != b.length || b.ncols != 1) {
throw new RuntimeException("dims mismatch")
} else {
System.arraycopy(b.data, 0, data, 0, length)
}
b
}
case _ => {
if (im.nrows != b.nrows || im.ncols != b.ncols) {
throw new RuntimeException("dims mismatch")
} else {
val off = Mat.oneBased
var i = 0
while (i < im.length) {
val ind = im.data(i) - off
if (ind < 0 || ind >= length) {
throw new RuntimeException("bad linear index "+(ind+off)+" vs "+length)
} else {
data(ind) = b.data(i)
}
i += 1
}
}
b
}
}
/** Implement a(im) = b where im is a matrix of indices to a, and b is a constant. */
def _update(inds:IMat, b:T):DenseMat[T] = {
inds match {
case aaa:MatrixWildcard => {
var i = 0
while (i < length) {
data(i) = b
i += 1
}
}
case _ => {
var i = 0
val off = Mat.oneBased
while (i < inds.length) {
val ind = inds.data(i) - off
if (ind < 0 || ind >= length) {
throw new RuntimeException("bad linear index "+(ind+off)+" vs "+length)
} else {
data(ind) = b
}
i += 1
}
}
}
this
}
/** Throws exception if a string is within a limited index range in a string matrix. */
def checkInds(inds:IMat, limit:Int, typ:String) = {
val off = Mat.oneBased
var i = 0
while (i < inds.length) {
val r = inds.data(i)-off
if (r >= limit) throw new RuntimeException(typ+ " index out of range %d %d" format (r, limit))
i += 1
}
}
/** Implement slicing, a(iv,jv) where iv and jv are vectors, using ? as wildcard. */
def gapply(rowinds:IMat, colinds:IMat):DenseMat[T] = {
var out:DenseMat[T] = null
val off = Mat.oneBased
rowinds match {
case dummy:MatrixWildcard => {
colinds match {
case dummy2:MatrixWildcard => {
out = DenseMat.newOrCheck(nrows, ncols, null, GUID, rowinds.GUID, colinds.GUID, "gapply2d".hashCode)
System.arraycopy(data, 0, out.data, 0, length)
}
case _ => {
out = DenseMat.newOrCheck(nrows, colinds.length, null, GUID, rowinds.GUID, colinds.GUID, "gapply2d".hashCode)
var i = 0
while (i < colinds.length) {
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
System.arraycopy(data, c*nrows, out.data, i*nrows, nrows)
i += 1
}
}
}
}
case _ => {
checkInds(rowinds, nrows, "row")
colinds match {
case dummy2:MatrixWildcard => {
out = DenseMat.newOrCheck(rowinds.length, ncols, null, GUID, rowinds.GUID, colinds.GUID, "gapply2d".hashCode)
var i = 0
while (i < ncols) {
var j = 0
while (j < out.nrows) {
val r = rowinds.data(j)-off
out.data(j+i*out.nrows) = data(r+i*nrows)
j += 1
}
i += 1
}
}
case _ => {
out = DenseMat.newOrCheck(rowinds.length, colinds.length, null, GUID, rowinds.GUID, colinds.GUID, "gapply2d".hashCode)
var i = 0
while (i < out.ncols) {
var j = 0
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
while (j < out.nrows) {
val r = rowinds.data(j)-off
out.data(j+i*out.nrows) = data(r+nrows*c)
j += 1
}
i += 1
}
}
}
}
}
out
}
/** Tries to save a slice into an output matrix, but recreates it if too small. */
def gcolslice(a:Int, b:Int, omat:Mat, c:Int):DenseMat[T] = {
val off = Mat.oneBased
val out = DenseMat.newOrCheck[T](nrows, b-a+c-off, omat, GUID, nrows, b-a+c-off, "gcolslice".##)
if (a-off < 0) throw new RuntimeException("colslice index out of range %d" format (a))
if (b-off > ncols) throw new RuntimeException("colslice index out of range %d %d" format (b, ncols))
System.arraycopy(data, (a-off)*nrows, out.data, (c-off)*nrows, (b-a)*nrows)
out
}
/** Tries to save a slice into an output matrix, but recreates it if too small. */
def growslice(a:Int, b:Int, omat:Mat, c:Int):DenseMat[T] = {
val off = Mat.oneBased
val out = DenseMat.newOrCheck[T](b-a+c-off, ncols, omat, GUID, b-a+c-off, ncols, "growslice".##)
if (a-off < 0) throw new RuntimeException("rowslice index out of range %d" format (a))
if (b-off > nrows) throw new RuntimeException("rowslice index out of range %d %d" format (b, nrows))
var i = 0
while (i < ncols) {
System.arraycopy(data, (a-off)+i*nrows, out.data, (c-off)+i*out.nrows, (b-a))
i += 1
}
out
}
/** Implement slicing, a(iv,j) where iv a vector, j an integer, using ? as wildcard. */
def gapply(iv:IMat, jv:Int):DenseMat[T] = {
gapply(iv, IMat.ielem(jv))
}
/** Implement slicing, a(i,jv) where i integer, jv a vector, using ? as wildcard. */
def gapply(i:Int, jv:IMat):DenseMat[T] = {
gapply(IMat.ielem(i), jv)
}
/** Implement sliced assignment, a(iv,jv) = b where iv and jv are vectors, using ? as wildcard. */
def _update(rowinds:IMat, colinds:IMat, b:DenseMat[T]):DenseMat[T] = {
val off = Mat.oneBased
rowinds match {
case dummy:MatrixWildcard => {
colinds match {
case dummy2:MatrixWildcard => {
if (nrows != b.nrows || ncols != b.ncols) {
throw new RuntimeException("dims mismatch in assignment")
}
System.arraycopy(b.data, 0, data, 0, length)
}
case _ => {
if (nrows != b.nrows || colinds.length != b.ncols) {
throw new RuntimeException("dims mismatch in assignment")
}
var i = 0
while (i < colinds.length) {
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
System.arraycopy(b.data, i*nrows, data, c*nrows, nrows)
i += 1
}
}
}
}
case _ => {
checkInds(rowinds, nrows, "row")
colinds match {
case dummy2:MatrixWildcard => {
if (rowinds.length != b.nrows || ncols != b.ncols) {
throw new RuntimeException("dims mismatch in assignment")
}
var i = 0
while (i < ncols) {
var j = 0
while (j < b.nrows) {
val r = rowinds.data(j)-off
data(r+i*nrows) = b.data(j+i*b.nrows)
j += 1
}
i += 1
}
}
case _ => {
if (rowinds.length != b.nrows || colinds.length != b.ncols) {
throw new RuntimeException("dims mismatch in assignment")
}
var i = 0
while (i < b.ncols) {
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
var j = 0
while (j < b.nrows) {
val r = rowinds.data(j)-off
data(r+nrows*c) = b.data(j+i*b.nrows)
j += 1
}
i += 1
}
}
}
}
}
this
}
/** Sliced assignment, where m(iv,jv) = b. Varies depending on type of matrices involved. */
override def update(iv:IMat, jv:IMat, b:Mat):Mat = {
(this, b) match {
case (me:FMat, bb:FMat) => me.update(iv, jv, bb):FMat
case (me:DMat, bb:DMat) => me.update(iv, jv, bb):DMat
case (me:IMat, bb:IMat) => me.update(iv, jv, bb):IMat
case (me:CMat, bb:CMat) => me.update(iv, jv, bb):CMat
}
}
/** Implement sliced assignment, a(iv,jv) = b:T where iv and jv are vectors, using ? as wildcard. */
def _update(rowinds:IMat, colinds:IMat, b:T):DenseMat[T] = {
val off = Mat.oneBased
rowinds match {
case dummy:MatrixWildcard => {
colinds match {
case dummy2:MatrixWildcard => {
var i = 0
while (i < length) {
data(i) = b
i += 1
}
}
case _ => {
var i = 0
while (i < colinds.length) {
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
var j = 0
while (j < nrows) {
data(j + c*nrows) = b
j += 1
}
i += 1
}
}
}
}
case _ => {
checkInds(rowinds, nrows, "row")
colinds match {
case dummy2:MatrixWildcard => {
var i = 0
while (i < ncols) {
var j = 0
while (j < rowinds.length) {
val r = rowinds.data(j)-off
data(r+i*nrows) = b
j += 1
}
i += 1
}
}
case _ => {
var i = 0
while (i < colinds.length) {
val c = colinds.data(i) - off
if (c >= ncols) throw new RuntimeException("col index out of range %d %d" format (c, ncols))
var j = 0
while (j < rowinds.length) {
val r = rowinds.data(j)-off
data(r+nrows*c) = b
j += 1
}
i += 1
}
}
}
}
}
this
}
/** Implement sliced assignment, a(iv,j) = b where iv a vectors, j integer, using ? as wildcard. */
def _update(iv:IMat, j:Int, b:T):DenseMat[T] = {
_update(iv, IMat.ielem(j), b)
}
/** Implement sliced assignment, a(i,jv) = b where jv a vector, using ? as wildcard. */
def _update(i:Int, jv:IMat, b:T):DenseMat[T] = {
_update(IMat.ielem(i), jv, b)
}
/** Prints '''i''' spaces, useful for building strings. */
def printOne(i:Int):String = " "
/** Returns a string representation of the matrix. */
override def toString:String = {
val sb:StringBuilder = new StringBuilder
if (nrows == 1) {
if (ncols > 0) sb.append(printOne(0))
var i = 1
while (i < math.min(20000, ncols)) {
sb.append(",")
sb.append(printOne(i))
i += 1
}
} else {
val nChars = Mat.terminalWidth-4
val maxRows = 640/nChars
var maxCols = nChars
var fieldWidth = 4
var icols = 0
while (icols < math.min(ncols, maxCols)) {
var newWidth = fieldWidth
for (j <- 0 until math.min(nrows,maxRows)) newWidth = math.max(newWidth, 2+(printOne(j+nrows*icols).length))
if ((icols+1)*newWidth < nChars) {
fieldWidth = newWidth
icols += 1
} else {
maxCols = icols
}
}
val somespaces = " "
for (i <- 0 until math.min(nrows, maxRows)) {
for (j <- 0 until math.min(ncols, icols)) {
val str = printOne(i+j*nrows)
sb.append(somespaces.substring(0,fieldWidth-str.length)+str)
}
if (ncols > icols) {
sb.append("...")
}
sb.append("\\n")
}
if (nrows > maxRows) {
for (j <- 0 until math.min(ncols, maxCols)) {
sb.append(somespaces.substring(0, fieldWidth-2)+"..")
}
sb.append("\\n")
}
}
sb.toString()
}
/** Clears the elements of a matrix by filling in 0s and nulls. */
override def clear:DenseMat[T] ={
if (length == 0) {
this
} else {
val v = data(0)
v match {
case a:Float => Arrays.fill(data.asInstanceOf[Array[Float]], 0, length, 0)
case a:Double => Arrays.fill(data.asInstanceOf[Array[Double]], 0, length, 0)
case a:Int => Arrays.fill(data.asInstanceOf[Array[Int]], 0, length, 0)
case _ => Arrays.fill(data.asInstanceOf[Array[AnyRef]], 0, length, null)
}
}
this
}
/**
* Sets some upper right triangular parts of a matrix to be '''v'''; actual elements depend on '''off'''.
*
* Examples:
*
* {{{
* scala> val a = ones(3,3)
* a: BIDMat.FMat =
* 1 1 1
* 1 1 1
* 1 1 1
*
* scala> a.setUpper(2,1)
* res52: BIDMat.DenseMat[Float] =
* 2 2 2
* 1 2 2
* 1 1 2
*
* scala> val b = ones(3,3)
* b: BIDMat.FMat =
* 1 1 1
* 1 1 1
* 1 1 1
* scala> b.setUpper(2,0)
* res53: BIDMat.DenseMat[Float] =
* 1 2 2
* 1 1 2
* 1 1 1
* }}}
*
* @param v The element which we assign to certain positions of the matrix's elements.
* @param off Determines the "first" diagonal of the matrix to which we assign v, and all diagonals
* above that will also be assigned v.
*/
def setUpper(v:T, off:Int) = {
var i = 0
while (i < ncols) {
var j = 0
while (j < i+off) {
data(j + i*nrows) = v
j += 1
}
i += 1
}
this
}
/**
* Similar to setUpper(v,off), except we set the lower left part of the matrix.
*
* Examples:
* {{{
* scala> val a = ones(3,3)
* a: BIDMat.FMat =
* 1 1 1
* 1 1 1
* 1 1 1
*
* scala> a.setLower(2,2)
* res56: BIDMat.DenseMat[Float] =
* 1 1 1
* 1 1 1
* 1 1 1
*
* scala> a.setLower(2,1)
* res57: BIDMat.DenseMat[Float] =
* 1 1 1
* 1 1 1
* 2 1 1
* }}}
*
* @param v The element which we assign to certain positions of the matrix's elements.
* @param off Determines the "first" diagonal of the matrix to which we assign v, and all diagonals
* below that will also be assigned v.
*/
def setLower(v:T, off:Int) = {
var i = 0
while (i < ncols) {
var j = math.max(0,i+1+off)
while (j < nrows) {
data(j + i*nrows) = v
j += 1
}
i += 1
}
this
}
/** General operation between two matrices. Apply op2 to corresponding elements from the input matrices. */
def ggMatOp(aa:DenseMat[T], op2:(T,T) => T, oldmat:Mat):DenseMat[T] = {
if (nrows==aa.nrows && ncols==1) {
val out = DenseMat.newOrCheck(nrows, aa.ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += aa.length
var i = 0
while (i < aa.ncols) {
var j = 0
while (j < nrows) {
out.data(j+i*nrows) = op2(data(j), aa.data(j+i*aa.nrows))
j += 1
}
i += 1
}
out
} else if (ncols==aa.ncols && nrows==1) {
val out = DenseMat.newOrCheck[T](aa.nrows, ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += aa.length
var i = 0
while (i < ncols) {
var j = 0
while (j < aa.nrows) {
out.data(j+i*aa.nrows) = op2(data(i), aa.data(j+i*aa.nrows))
j += 1
}
i += 1
}
out
} else if (nrows==aa.nrows && aa.ncols==1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
var j = 0
while (j < nrows) {
out.data(j+i*nrows) = op2(data(j+i*nrows), aa.data(j))
j += 1
}
i += 1
}
out
} else if (ncols==aa.ncols && aa.nrows==1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
var j = 0
while (j < nrows) {
out.data(j+i*nrows) = op2(data(j+i*nrows), aa.data(i))
j += 1
}
i += 1
}
out
} else ggMatOpStrict(aa, op2, oldmat)
}
/**
* This version applies the operator op2 with stricter dimension checking,
* either dims must match or one arg must be scalar
*/
def ggMatOpStrict(aa:DenseMat[T], op2:(T,T) => T, oldmat:Mat):DenseMat[T] =
if (nrows==aa.nrows && ncols==aa.ncols) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += length
var i = 0
while (i < aa.length) {
out.data(i) = op2(data(i), aa.data(i))
i += 1
}
out
} else if (aa.nrows == 1 && aa.ncols == 1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += length
val aval = aa.data(0)
var i = 0
while (i < length) {
out.data(i) = op2(data(i), aval)
i += 1
}
out
} else if (nrows == 1 && ncols == 1) {
val out = DenseMat.newOrCheck[T](aa.nrows, aa.ncols, oldmat, GUID, aa.GUID, op2.hashCode)
Mat.nflops += aa.length
val aval = data(0)
var i = 0
while (i < aa.length) {
out.data(i) = op2(aval, aa.data(i))
i += 1
}
out
} else throw new RuntimeException("dims incompatible");
/** Apply the binary operation op2 to the matrix and a scalar argument. */
def ggMatOpScalar(a:T, op2:(T,T) => T, oldmat:Mat):DenseMat[T] = {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, a.hashCode, op2.hashCode)
Mat.nflops += length
var i = 0
while (i < length) {
out.data(i) = op2(data(i), a)
i += 1
}
out
}
/**
* General operation between two matrices. Apply op2 to corresponding elements from the input matrices.
* Implemented with vector operation primitives.
*/
def ggMatOpv(aa:DenseMat[T], opv:(Array[T],Int,Int,Array[T],Int,Int,Array[T],Int,Int,Int) => T, oldmat:Mat):DenseMat[T] =
if (nrows==aa.nrows && ncols==1 && aa.ncols > 1) {
val out = DenseMat.newOrCheck[T](nrows, aa.ncols, oldmat, GUID, aa.GUID, opv.hashCode)
Mat.nflops += aa.length
var i = 0
while (i < aa.ncols) {
opv(data, 0, 1, aa.data, i*aa.nrows, 1, out.data, i*nrows, 1, nrows)
i += 1
}
out
} else if (ncols==aa.ncols && nrows==1 && aa.nrows > 1) {
val out = DenseMat.newOrCheck[T](aa.nrows, ncols, oldmat, GUID, aa.GUID, opv.hashCode)
Mat.nflops += aa.length
var i = 0
while (i < ncols) {
opv(data, i, 0, aa.data, i*aa.nrows, 1, out.data, i*aa.nrows, 1, aa.nrows)
i += 1
}
out
} else if (nrows==aa.nrows && aa.ncols==1 && ncols > 1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, opv.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
opv(data, i*nrows, 1, aa.data, 0, 1, out.data, i*nrows, 1, nrows)
i += 1
}
out
} else if (ncols==aa.ncols && aa.nrows==1 && nrows > 1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, opv.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
opv(data, i*nrows, 1, aa.data, i, 0, out.data, i*nrows, 1, nrows)
i += 1
}
out
} else ggMatOpStrictv(aa, opv, oldmat);
// TODO
def ggMatOpStrictv(aa:DenseMat[T], opv:(Array[T],Int,Int,Array[T],Int,Int,Array[T],Int,Int,Int) => T, oldmat:Mat):DenseMat[T] = {
var out:DenseMat[T] = null
var mylen = 0
if ((nrows==aa.nrows && ncols==aa.ncols) || (aa.nrows == 1 && aa.ncols == 1)) {
out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, aa.GUID, opv.hashCode)
mylen = length
} else if (nrows == 1 && ncols == 1) {
out = DenseMat.newOrCheck[T](aa.nrows, aa.ncols, oldmat, GUID, aa.GUID, opv.hashCode)
mylen = aa.length
} else throw new RuntimeException("dims incompatible")
if (mylen > 100000 && Mat.numThreads > 1) {
val done = IMat(1, Mat.numThreads)
for (ithread<- 0 until Mat.numThreads) {
val istart = (1L*ithread*mylen/Mat.numThreads).toInt
val len = (1L*(ithread+1)*mylen/Mat.numThreads).toInt - istart
Future {
if (nrows==aa.nrows && ncols==aa.ncols) {
opv(data, istart, 1, aa.data, istart, 1, out.data, istart, 1, len)
} else if (aa.nrows == 1 && aa.ncols == 1) {
opv(data, istart, 1, aa.data, 0, 0, out.data, istart, 1, len)
} else {
opv(data, 0, 0, aa.data, istart, 1, out.data, istart, 1, len)
}
done(ithread) = 1
}
}
while (SciFunctions.sum(done).v < Mat.numThreads) {Thread.`yield`()}
} else if (nrows==aa.nrows && ncols==aa.ncols) {
opv(data, 0, 1, aa.data, 0, 1, out.data, 0, 1, aa.length)
} else if (aa.nrows == 1 && aa.ncols == 1) {
opv(data, 0, 1, aa.data, 0, 0, out.data, 0, 1, length)
} else if (nrows == 1 && ncols == 1) {
opv(data, 0, 0, aa.data, 0, 1, out.data, 0, 1, aa.length)
}
Mat.nflops += mylen
out
}
// TODO
def ggMatOpScalarv(a:T, opv:(Array[T],Int,Int,Array[T],Int,Int,Array[T],Int,Int,Int) => T, oldmat:Mat):DenseMat[T] = {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, a.hashCode, opv.##)
Mat.nflops += length
val aa = new Array[T](1)
aa(0) = a
opv(data, 0, 1, aa, 0, 0, out.data, 0, 1, length)
out
}
// TODO
def ggReduceOp(dim0:Int, op1:(T) => T, op2:(T,T) => T, oldmat:Mat):DenseMat[T] = {
var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0)
if (dim == 1) {
val out = DenseMat.newOrCheck[T](1, ncols, oldmat, GUID, 1, op2.##)
Mat.nflops += length
var i = 0
while (i < ncols) {
var j = 1
var acc = op1(data(i*nrows))
while (j < nrows) {
acc = op2(acc, data(j+i*nrows))
j += 1
}
out.data(i) = acc
i += 1
}
out
} else if (dim == 2) {
val out = DenseMat.newOrCheck[T](nrows, 1, oldmat, GUID, 2, op2.##)
Mat.nflops += length
var j = 0
while (j < nrows) {
out.data(j) = op1(data(j))
j += 1
}
var i = 1
while (i < ncols) {
var j = 0
while (j < nrows) {
out.data(j) = op2(out.data(j), data(j+i*nrows))
j += 1
}
i += 1
}
out
} else
throw new RuntimeException("index must 1 or 2");
}
// TODO
def ggOpt2(dim0:Int, op2:(T,T) => Boolean):(DenseMat[T],IMat) = {
var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0)
if (dim == 1) {
val out = DenseMat.newOrCheck[T](1, ncols, null, GUID, op2.hashCode)
val iout = IMat(1, ncols)
Mat.nflops += length
var i = 0
while (i < ncols) {
var j = 1
var acc = data(i*nrows)
var iacc = 0
while (j < nrows) {
val v = data(j+i*nrows)
if (op2(v, acc)) {
acc = v
iacc = j
}
j += 1
}
out.data(i) = acc
iout.data(i) = iacc
i += 1
}
(out, iout)
} else if (dim == 2) {
val out = DenseMat.newOrCheck[T](nrows, 1, null, GUID, op2.hashCode)
val iout = IMat(nrows, 1)
Mat.nflops += length
var j = 0
while (j < nrows) {
out.data(j) = data(j)
iout.data(j) = 0
j += 1
}
var i = 1
while (i < ncols) {
var j = 0
while (j < nrows) {
val v = data(j+i*nrows)
if (op2(v, out.data(j))) {
out.data(j) = v
iout.data(j) = i
}
j += 1
}
i += 1
}
(out, iout)
} else
throw new RuntimeException("index must 1 or 2");
}
// TODO
def ggReduceOpv(dim0:Int, op1:(T) => T, opv:(Array[T],Int,Int,Array[T],Int,Int,Array[T],Int,Int,Int) => T, oldmat:Mat):DenseMat[T] = {
var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0)
if (dim == 1) {
val out = DenseMat.newOrCheck[T](1, ncols, oldmat, GUID, 1, opv.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
out.data(i) = op1(data(i*nrows))
opv(data, i*nrows+1, 1, out.data, i, 0, out.data, i, 0, nrows-1)
i += 1
}
out
} else if (dim == 2) {
val out = DenseMat.newOrCheck[T](nrows, 1, oldmat, GUID, 2, opv.hashCode)
Mat.nflops += length
var j = 0
while (j < nrows) {
out.data(j) = op1(data(j))
j += 1
}
var i = 1
while (i < ncols) {
opv(data, i*nrows, 1, out.data, 0, 1, out.data, 0, 1, nrows)
i += 1
}
out
} else
throw new RuntimeException("index must 1 or 2");
}
// TODO
def ggReduceAll(dim0:Int, op1:(T) => T, op2:(T,T) => T, oldmat:Mat):DenseMat[T] = {
var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0)
if (dim == 1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, 1, op2.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
val i0 = i*nrows
var j = 1
var acc = op1(data(i0))
out.data(i0) = acc
while (j < nrows) {
acc = op2(acc, data(j+i0))
out.data(j+i0) = acc
j += 1
}
i += 1
}
out
} else if (dim == 2) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, 2, op2.hashCode)
Mat.nflops += length
var j = 0
while (j < nrows) {
out.data(j) = op1(data(j))
j += 1
}
var i = 1
while (i < ncols) {
val i0 = i*nrows
var j = 0
while (j < nrows) {
out.data(j+i0) = op2(out.data(j+i0-nrows), data(j+i0))
j += 1
}
i += 1
}
out
} else
throw new RuntimeException("index must 1 or 2")
}
// TODO
def ggReduceAllv(dim0:Int, opv:(Array[T],Int,Int,Array[T],Int,Int,Array[T],Int,Int,Int) => T, oldmat:Mat):DenseMat[T] = {
var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0)
if (dim == 1) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, 1, opv.hashCode)
Mat.nflops += length
var i = 0
while (i < ncols) {
val i0 = i*nrows
out.data(i0) = data(i0)
opv(data, i0+1, 1, out.data, i0, 1, out.data, i0+1, 1, nrows-1)
i += 1
}
out
} else if (dim == 2) {
val out = DenseMat.newOrCheck[T](nrows, ncols, oldmat, GUID, 2, opv.hashCode)
Mat.nflops += length
var j = 0
while (j < nrows) {
out.data(j) = data(j)
j += 1
}
var i = 1
while (i < ncols) {
val i0 = i*nrows
opv(data, i0, 1, out.data, i0-nrows, 1, out.data, i0, 1, nrows)
i += 1
}
out
} else
throw new RuntimeException("index must 1 or 2")
}
/**
* Performs the Hadamard (element-wise) product between this matrix and '''a''', then adds the elements
* together into a single double.
*
* Throws a RuntimeException if the dimensions are incompatible.
*
* Example:
* {{{
* scala> a
* res8: BIDMat.IMat =
* 1 2 3
* 4 5 6
* 7 8 9
*
* scala> val b = IMat(ones(3,3))
* b: BIDMat.IMat =
* 1 1 1
* 1 1 1
* 1 1 1
*
* scala> a.ddot(b)
* res10: Double = 45.0
* }}}
*
* @param a A matrix with the same dimensions and compatible type as this matrix.
*/
def ddot (a : DenseMat[T])(implicit numeric:Numeric[T]):Double =
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("dot dims not compatible")
} else {
Mat.nflops += 2 * length
var v = 0.0
var i = 0
while (i < length){
v += numeric.toDouble(numeric.times(data(i),a.data(i)))
i += 1
}
v
}
// TODO
def gdot (a : DenseMat[T], oldmat:Mat)(implicit numeric:Numeric[T]):DenseMat[T] = {
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("dot dims not compatible")
} else {
val out = DenseMat.newOrCheck[T](1, ncols, oldmat, GUID, a.GUID, "gdot".hashCode)
Mat.nflops += 2 * length
var i = 0
while (i < ncols){
val ix = i*nrows
var j = 0
var sum = numeric.zero
while (j < nrows) {
sum = numeric.plus(sum, numeric.times(data(j+ix),a.data(j+ix)))
j += 1
}
out.data(i) = sum
i += 1
}
out
}
}
// TODO
def gdotr (a : DenseMat[T], oldmat:Mat)(implicit numeric:Numeric[T]):DenseMat[T] =
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("dotr dims not compatible")
} else {
val out = DenseMat.newOrCheck[T](nrows, 1, oldmat, GUID, a.GUID, "gdotr".##)
Mat.nflops += 2 * length
var i = 0
while (i < ncols){
val ix = i*nrows
var j = 0
while (j < nrows) {
out.data(j) = numeric.plus(out.data(j), numeric.times(data(j+ix),a.data(j+ix)))
j += 1
}
i += 1
}
out
}
/**
* Creates a diagonal, square matrix with this vector's elements in the diagonal. Note that while this will
* return a correct matrix, but will be printed as an empty matrix on the command line, so it's recommended
* to use mkdiag(m) instead of this (which is m.mkdiag).
*
* Throws exception if applied to a non-vector matrix.
*
* Example:
* {{{
* scala> val a = (1 on 2 on 3).mkdiag
* a: BIDMat.DenseMat[Int] =
*
*
*
*
* scala> a(1,1)
* res2: Int = 2
*
* scala> a(1,2)
* res3: Int = 0
* }}}
*
* TODO I suggest making this package-protected. ~Daniel Seita
*/
def mkdiag = {
if (math.min(nrows, ncols) > 1) {
throw new RuntimeException("mkdiag needs a vector input")
}
val n = math.max(nrows, ncols)
val out = DenseMat.newOrCheck[T](n, n, null, GUID, "mkdiag".hashCode)
var i = 0
while (i < n) {
out.data(i*(n+1)) = data(i)
i += 1
}
out
}
/**
* Gets the leading diagonal of this matrix as a vector. Again, like m.mkdiag, m.getdiag will return a
* seemingly empty matrix on the command line, but it holds the correct elements. Use getdiag(m) for most
* purposes instead of this method.
*
* Example:
* {{{
* scala> val a = 1\\2\\3 on 4\\5\\6 on 7\\8\\9
* a: BIDMat.IMat =
* 1 2 3
* 4 5 6
* 7 8 9
*
* scala> a.getdiag
* res4: BIDMat.DenseMat[Int] =
*
*
*
*
* scala> a(0)
* res5: Int = 1
*
* scala> a(1)
* res6: Int = 4
*
* scala> a(2)
* res7: Int = 7
* }}}
*
* TODO I suggest making this package-protected. ~Daniel Seita
*/
def getdiag = {
val n = math.min(nrows, ncols)
val out = DenseMat.newOrCheck[T](n, 1, null, GUID, "getdiag".hashCode)
var i = 0
while (i < n) {
out.data(i) = data(i*(nrows+1))
i += 1
}
out
}
}
object DenseMat {
// TODO
def vecCmp[@specialized(Double, Float, Int, Byte, Long) T](xmap:Array[T])(a:Array[T], a0:Int, ainc:Int, b:Array[T], b0:Int, binc:Int, c:Array[T], c0:Int, cinc:Int, n:Int)
(implicit numeric:Numeric[T]):T = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n
while (ci < cend) {
val indx = numeric.compare(a(ai), b(bi)); c(ci) = xmap(indx+1); ai += ainc; bi += binc; ci += cinc
}
numeric.zero
}
// TODO
def getInds(ii:IMat, n:Int):(Int)=>Int = {
var inds:Array[Int] = null
val off = Mat.oneBased
ii match {
case aaa:MatrixWildcard => {
(x:Int)=>x
}
case _ => {
(i:Int) => {
val ind = ii.data(i) - off
if (ind < 0 || ind >= n) {
throw new RuntimeException("index out of range "+(ind+off)+" vs "+n)
}
ind
}
}
}
}
// TODO
def getSInds(in:Seq[Int], n:Int):Array[Int] = {
var inds:Array[Int] = new Array[Int](math.min(in.length,n))
val off = Mat.oneBased
var i = 0
while (i < in.length) {
val ind = in(i) - off
if (ind < 0 || ind >= n) {
throw new RuntimeException("index out of range "+(ind+off)+" vs "+n)
}
i += 1
}
inds
}
// TODO
def genSort[@specialized(Double, Float, Int, Byte, Long) T](a:Array[T],from:Int,to:Int):Unit = {
a match {
case aa:Array[Double] => {
Arrays.sort(aa, from, to)
}
case aa:Array[Float] => {
Arrays.sort(aa, from, to)
}
case aa:Array[Int] => {
Arrays.sort(aa, from, to)
}
case aa:Array[Long] => {
Arrays.sort(aa, from, to)
}
case aa:Array[Byte] => {
Arrays.sort(aa, from, to)
}
}
}
// TODO
def genSort[@specialized(Double, Float, Int, Byte, Long) T](a:Array[T]):Unit = {
genSort(a, 0, a.size)
}
// TODO
def reverse[@specialized(Double, Float, Int, Byte, Long) T](a:Array[T],from:Int,to:Int) = {
var i = 0
var n = to - from
while (2*i < n-1) {
val tmp = a(i+from)
a(i+from) = a(to-i-1)
a(to-i-1) = tmp
i += 1
}
}
// TODO
def reverse[@specialized(Double, Float, Int, Byte, Long) T](a:Array[T]):Unit = {
reverse(a, 0, a.size)
}
// TODO
def sort[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], ik0:Int, asc:Boolean)
(implicit classTag:ClassTag[T], ordering:Ordering[T]):DenseMat[T] = {
import BIDMat.Sorting._
val out = DenseMat.newOrCheck(a.nrows, a.ncols, null, a.GUID, ik0, "DenseMat.sort".hashCode)
var ik = ik0
if (ik0 == 0) {
if (a.nrows == 1) {
ik = 2
} else {
ik = 1
}
}
if (a.nrows == 1 || a.ncols == 1) {
System.arraycopy(a.data, 0, out.data, 0, a.length)
genSort(out.data)
if (!asc) {
reverse(out.data)
}
out
} else if (ik == 1) {
val thiscol = new Array[T](a.nrows)
var i = 0
while (i < a.ncols) {
var j = 0
while (j < a.nrows) {
thiscol(j) = a.data(j+i*a.nrows)
j += 1
}
genSort(thiscol)
j = 0
if (asc) {
while (j < a.nrows) {
out.data(j+i*a.nrows) = thiscol(j)
j += 1
}
} else {
while (j < a.nrows) {
out.data(j+i*a.nrows) = thiscol(a.nrows-j-1)
j += 1
}
}
i += 1
}
out
} else {
val thisrow = new Array[T](a.ncols)
var i = 0
while (i < a.nrows) {
var j = 0
while (j < a.ncols) {
thisrow(j) = a.data(i+j*a.nrows)
j += 1
}
genSort(thisrow)
j = 0
if (asc) {
while (j < a.ncols) {
out.data(i+j*out.nrows) = thisrow(j)
j += 1
}
} else {
while (j < a.ncols) {
out.data(i+j*out.nrows) = thisrow(a.ncols-j-1)
j += 1
}
}
i += 1
}
out
}
}
// TODO
class MyComparator[@specialized(Double, Float, Int, Byte, Long) T](a:Array[T])
(implicit ordering:Ordering[T]) extends java.util.Comparator[Int] {
def compare(ii:Int, jj:Int):Int = {
val c0 = ordering.compare(a(ii), a(jj))
if (c0 != 0) {
c0
} else {
ii compare jj
}
}
}
// TODO
def sort2[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], asc:Boolean)
(implicit classTag:ClassTag[T], ord:Ordering[T]): (DenseMat[T], IMat) =
if (a.nrows == 1) {
sort2(a, 2, asc, null, null)
} else {
sort2(a, 1, asc, null, null)
}
// TODO
def sort2[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], ik:Int, asc:Boolean)
(implicit classTag:ClassTag[T], ord:Ordering[T]):(DenseMat[T], IMat) = sort2(a, ik, asc, null, null)
// TODO
def sort2[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], ik:Int, asc:Boolean, odmat:Mat, oimat:Mat)
(implicit classTag:ClassTag[T], ord:Ordering[T]):(DenseMat[T], IMat) = {
import BIDMat.Sorting._
val out = DenseMat.newOrCheck[T](a.nrows, a.ncols, odmat, a.GUID, ik, "sort2_1".hashCode)
val iout = IMat.newOrCheckIMat(a.nrows, a.ncols, oimat, a.GUID, ik, "sort2_2".hashCode)
if (ik == 1) {
var i = 0
while (i < a.ncols) {
var j = 0
while (j < a.nrows) {
iout.data(j+i*a.nrows) = j
out.data(j+i*a.nrows) = a.data(j+i*a.nrows)
j += 1
}
i += 1
}
i = 0
while (i < a.ncols) {
if (asc) {
quickSort2(out.data, iout.data, i*a.nrows, (i+1)*a.nrows, 1)
} else {
quickSort2(out.data, iout.data, (i+1)*a.nrows-1, i*a.nrows-1, -1)
}
i += 1
}
(out, iout)
} else {
val vcols = new Array[T](a.ncols)
val icols = new Array[Int](a.ncols)
var i = 0
while (i < a.nrows) {
var j = 0
while (j < a.ncols) {
vcols(j) = a.data(i + j*a.nrows)
icols(j) = j
j += 1
}
if (asc) {
quickSort2(vcols, icols, 0, icols.length, 1)
} else {
quickSort2(vcols, icols, icols.length-1, -1, -1)
}
j = 0
while (j < a.ncols) {
out.data(i+j*out.nrows) = vcols(j)
iout.data(i+j*iout.nrows) = icols(j)
j += 1
}
i += 1
}
(out, iout)
}
}
// TODO
def lexcomp[T](a:DenseMat[T], out:IMat)(implicit ordering:Ordering[T]):(Int, Int) => Int = {
val aa = a.data
val nr = a.nrows
val ii = out.data
(i:Int, j:Int) => {
val ip = ii(i)
val jp = ii(j)
var c0 = 0
var k = 0
while (k < a.ncols && c0 == 0) {
c0 = ordering.compare(aa(ip+k*nr), aa(jp+k*nr))
k += 1
}
if (c0 != 0) {
c0
} else {
ip compare jp
}
}
}
// TODO
def isortlex[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], asc:Boolean)(implicit ordering:Ordering[T]):IMat = {
val out = IMat.newOrCheckIMat(a.nrows, 1, null, a.GUID, "sortlex".hashCode)
val compp = lexcomp(a, out)
_isortlex(a, asc, out, compp)
}
// TODO
def _isortlex[@specialized(Double, Float, Int, Byte, Long) T](a:DenseMat[T], asc:Boolean, out:IMat, compp:(Int, Int)=>Int)(implicit ordering:Ordering[T]):IMat = {
import BIDMat.Sorting._
val ii = out.data
val aa = a.data
val nr = a.nrows
var i = 0
while (i < a.nrows) {
out.data(i) = i
i += 1
}
// TODO
def swap(i:Int, j:Int):Unit = {
val tmp = ii(i)
ii(i) = ii(j)
ii(j) = tmp
}
if (asc) {
quickSort(compp, swap, 0, a.nrows)
} else {
quickSort((i:Int,j:Int)=>compp(j,i), swap, 0, a.nrows)
}
out
}
// TODO
def unique2[@specialized(Double, Float, Int, Long) T](a:DenseMat[T])
(implicit manifest:Manifest[T], numeric:Numeric[T], ord:Ordering[T]):(IMat, IMat) = {
val (vss, iss) = sort2(a, true)
val iptrs = IMat.newOrCheckIMat(a.length, 1, null, a.GUID, "unique2".hashCode)
var lastpos = 0
iptrs.data(iss.data(0)) = lastpos
var i = 1
while (i < iss.length) {
if (vss.data(i-1) != vss.data(i)) {
lastpos += 1
}
iptrs.data(iss.data(i)) = lastpos
i += 1
}
val bptrs = IMat.newOrCheckIMat(lastpos+1, 1, null, a.GUID, "unique2_2".hashCode)
i = iss.length
while (i > 0) {
bptrs.data(iptrs.data(i-1)) = i-1
i = i - 1
}
(bptrs, iptrs)
}
// TODO
def uniquerows2[@specialized(Double, Float, Int, Long) T](a:DenseMat[T])(implicit ordering:Ordering[T]):(IMat, IMat) = {
val iss = isortlex(a, true)
def compeq(i:Int, j:Int):Boolean = {
var k:Int = 0;
while (k < a.ncols && ordering.equiv(a(i,k):T, a(j,k):T)) {
k += 1
}
if (k == a.ncols) true
else false
}
val iptrs = IMat.newOrCheckIMat(a.nrows, 1, null, a.GUID, "uniquerows2".hashCode)
var lastpos = 0
iptrs.data(iss.data(0)) = lastpos
var i = 1
while (i < iss.length) {
if (!compeq(iss.data(i-1), iss.data(i))) {
lastpos += 1
}
iptrs.data(iss.data(i)) = lastpos
i += 1
}
val bptrs = IMat.newOrCheckIMat(lastpos+1, 1, null, a.GUID, "uniquerows2_2".hashCode)
i = iss.length
while (i > 0) {
bptrs.data(iptrs.data(i-1)) = i-1
i = i - 1
}
(bptrs, iptrs)
}
// TODO
def maxelem(ii:IMat):Int = {
var max0 = 0;
var i = 0;
while (i < ii.length) {
max0 = math.max(max0, ii.data(i));
i += 1;
}
max0+1;
}
// TODO
def maxcol(ii:IMat, icol:Int):Int = {
var max0 = 0;
var i = 0;
val coloff = icol * ii.nrows;
while (i < ii.nrows) {
max0 = math.max(max0, ii.data(i + coloff));
i += 1;
}
max0+1;
}
// TODO
def accum[@specialized(Double, Float, Int, Long) T](inds:IMat, vals:DenseMat[T], nr0:Int, nc0:Int)
(implicit numeric:Numeric[T], classTag:ClassTag[T]):DenseMat[T] = {
// if (inds.ncols > 2 || (vals.length > 1 && (inds.nrows != vals.nrows)))
if (math.min(inds.nrows, inds.ncols) == 1) { // vector case
if (vals.length > 1 && (inds.ncols != vals.ncols || inds.nrows != vals.nrows)) {
throw new RuntimeException("accum: mismatch in array dimensions")
}
val colvec = (inds.nrows > inds.ncols);
val nr = if (!colvec) 1 else if (nr0 > 0) nr0 else maxelem(inds);
val nc = if (colvec) 1 else if (nc0 > 0) nc0 else maxelem(inds);
val out = DenseMat.newOrCheck(nr, nc, null, inds.GUID, vals.GUID, "accum".hashCode)
out.clear
Mat.nflops += inds.length
var i = 0;
if (vals.length > 1) {
while (i < inds.length) {
out.data(inds.data(i)) = numeric.plus(out.data(inds.data(i)), vals.data(i));
i += 1;
}
} else {
val v = vals.data(0);
while (i < inds.length) {
out.data(inds.data(i)) = numeric.plus(out.data(inds.data(i)), v);
i += 1;
}
}
out
} else {
Mat.nflops += 3L*inds.nrows;
val nr = if (nr0 > 0) nr0 else maxcol(inds, 0);
val nc = if (nc0 > 0) nc0 else maxcol(inds, 1);
val out = DenseMat.newOrCheck(nr, nc, null, inds.GUID, vals.GUID, "accum".hashCode);
out.clear
var i = 0;
if (vals.length > 1) { // Non-scalar case
if (inds.nrows != vals.nrows) {
throw new RuntimeException("accum: mismatch in array dimensions")
}
while (i < inds.nrows) {
if (inds.data(i) >= nr || inds.data(i+inds.nrows) >= nc)
throw new RuntimeException("indices out of bounds "+inds.data(i)+" "+inds.data(i+inds.nrows))
val indx = inds.data(i) + nr*inds.data(i+inds.nrows)
out.data(indx) = numeric.plus(out.data(indx), vals.data(i))
i += 1
}
} else {
while (i < inds.nrows) {
if (inds.data(i) >= nr || inds.data(i+inds.nrows) >= nc)
throw new RuntimeException("indices out of bounds "+inds.data(i)+" "+inds.data(i+inds.nrows))
val v = vals.data(0);
val indx = inds.data(i) + nr*inds.data(i+inds.nrows)
out.data(indx) = numeric.plus(out.data(indx), v)
i += 1
}
}
out
}
}
// TODO
def newOrCheck[T](nr:Int, nc:Int, oldmat:Mat)
(implicit classTag:ClassTag[T]):DenseMat[T] = {
if (oldmat.asInstanceOf[AnyRef] == null || (oldmat.nrows == 0 && oldmat.ncols == 0)) {
new DenseMat[T](nr, nc)
} else {
val omat = oldmat.asInstanceOf[DenseMat[T]]
if (oldmat.nrows != nr || oldmat.ncols != nc) {
if (nr*nc <= omat.data.size) {
new DenseMat[T](nr, nc, omat.data)
} else {
new DenseMat[T](nr, nc)
}
} else {
omat
}
}
}
// TODO
def newOrCheck[T](nr:Int, nc:Int, outmat:Mat, matGuid:Long, opHash:Int)
(implicit classTag:ClassTag[T]):DenseMat[T] = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheck(nr, nc, outmat)
} else {
val key = (matGuid, opHash)
val res = Mat.cache2(key)
if (res != null) {
newOrCheck(nr, nc, res)
} else {
val omat = newOrCheck(nr, nc, null)
Mat.cache2put(key, omat)
omat
}
}
}
// TODO
def newOrCheck[T](nr:Int, nc:Int, outmat:Mat, guid1:Long, guid2:Long, opHash:Int)
(implicit classTag:ClassTag[T]):DenseMat[T] = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheck(nr, nc, outmat)
} else {
val key = (guid1, guid2, opHash)
val res = Mat.cache3(key)
if (res != null) {
newOrCheck(nr, nc, res)
} else {
val omat = newOrCheck(nr, nc, null)
Mat.cache3put(key, omat)
omat
}
}
}
// TODO
def newOrCheck[T](nr:Int, nc:Int, outmat:Mat, guid1:Long, guid2:Long, guid3:Long, opHash:Int)
(implicit classTag:ClassTag[T]):DenseMat[T] = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheck(nr, nc, outmat)
} else {
val key = (guid1, guid2, guid3, opHash)
val res = Mat.cache4(key)
if (res != null) {
newOrCheck(nr, nc, res)
} else {
val omat = newOrCheck(nr, nc, null)
Mat.cache4put(key, omat)
omat
}
}
}
}
// TODO
trait MatrixWildcard extends Mat
| codeaudit/BIDMat | src/main/scala/BIDMat/DenseMat.scala | Scala | bsd-3-clause | 56,318 |
/*
Copyright (c) 2013, Noel Raymond Cower
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.spifftastic.spastic.app
import android.app.Fragment
trait FragmentClass {
val fragmentClass: Class[_ <: Fragment]
}
trait TypedFragmentClass[T <: Fragment] extends FragmentClass {
val fragmentClass: Class[T]
}
| nilium/spastic | src/main/scala/app/FragmentClass.scala | Scala | bsd-2-clause | 1,583 |
package com.softwaremill.macwire.internals
import scala.annotation.tailrec
import scala.quoted.*
private[macwire] class EligibleValuesFinder[Q <: Quotes](log: Logger)(using val q: Q) {
import q.reflect.*
import EligibleValuesFinder.*
private val typeCheckUtil = new TypeCheckUtil[q.type](log)
import typeCheckUtil._
def find(): EligibleValues = {
val wiredDef = Symbol.spliceOwner.owner
val wiredOwner = wiredDef.owner
def doFind(symbol: Symbol, scope: Scope): Map[Scope, List[EligibleValue]] = {
def handleClassDef(s: Symbol): List[EligibleValue] =
(s.declaredMethods ::: s.declaredFields)
.filter(m => !m.fullName.startsWith("java.lang.Object") && !m.fullName.startsWith("scala.Any"))
.map(_.tree)
.collect {
case m: ValDef => EligibleValue(m.rhs.map(_.tpe).getOrElse(m.tpt.tpe), m)
case m: DefDef if m.termParamss.flatMap(_.params).isEmpty =>
EligibleValue(m.rhs.map(_.tpe).getOrElse(m.returnTpt.tpe), m)
}
def handleDefDef(s: Symbol): List[EligibleValue] =
s.tree match {
case DefDef(_, _, _, Some(Match(_, cases))) =>
report.throwError(s"Wire for deconstructed case is not supported yet") //TODO
case DefDef(s, lpc, tt, ot) =>
lpc.flatMap(_.params).collect {
case m: ValDef => EligibleValue(m.rhs.map(_.tpe).getOrElse(m.tpt.tpe), m)
case m: DefDef if m.termParamss.flatMap(_.params).isEmpty =>
EligibleValue(m.rhs.map(_.tpe).getOrElse(m.returnTpt.tpe), m)
}
}
if symbol.isNoSymbol then Map.empty[Scope, List[EligibleValue]]
else if symbol.isDefDef then merge(Map((scope, handleDefDef(symbol))), doFind(symbol.maybeOwner, scope))
else if symbol.isClassDef && !symbol.isPackageDef then Map((scope.widen, handleClassDef(symbol)))
else if symbol == defn.RootPackage then Map.empty
else if symbol == defn.RootClass then Map.empty
else doFind(symbol.maybeOwner, scope.widen)
}
EligibleValues(doFind(Symbol.spliceOwner, Scope.init))
}
private def merge(
m1: Map[Scope, List[EligibleValue]],
m2: Map[Scope, List[EligibleValue]]
): Map[Scope, List[EligibleValue]] =
(m1.toSeq ++ m2.toSeq).groupBy(_._1).view.mapValues(_.flatMap(_._2).toList).toMap
case class EligibleValue(tpe: TypeRepr, expr: Tree) {
// equal trees should have equal hash codes; if trees are equal structurally they should have the same toString?
override def hashCode() = expr.toString().hashCode
override def equals(obj: scala.Any) = obj match {
case EligibleValue(_, e) => expr == e //FIXME not sure if `equalsStructure` -> `==`
case _ => false
}
}
class EligibleValues(val values: Map[Scope, List[EligibleValue]]) {
private lazy val maxScope = values.keys.maxOption.getOrElse(Scope.init)
extension (scope: Scope) def isMax = scope == maxScope
private def doFindInScope(tpe: TypeRepr, scope: Scope): List[Tree] = {
for (scopedValue <- values.getOrElse(scope, Nil) if checkCandidate(target = tpe, tpt = scopedValue.tpe)) yield {
scopedValue.expr
}
}
private def uniqueTrees(trees: List[Tree]): Iterable[Tree] = {
// the only reliable method to compare trees is using structural equality, but there shouldn't be a lot of
// trees with a given type, so the n^2 complexity shouldn't hurt
def addIfUnique(addTo: List[Tree], t: Tree): List[Tree] = {
addTo.find(_ == t).fold(t :: addTo)(_ => addTo)
}
trees.foldLeft(List.empty[Tree])(addIfUnique)
}
def findInScope(tpe: TypeRepr, scope: Scope): Iterable[Tree] = {
uniqueTrees(doFindInScope(tpe, scope))
}
def findInFirstScope(tpe: TypeRepr, startingWith: Scope = Scope.init): Iterable[Tree] = {
@tailrec
def forScope(scope: Scope): Iterable[Tree] = {
findInScope(tpe, scope) match {
case coll if coll.isEmpty && !scope.isMax => forScope(scope.widen)
case coll if coll.isEmpty => log(s"Could not find $tpe in any scope"); Nil
case exprs =>
log(s"Found [${exprs.mkString(", ")}] of type [$tpe] in scope $scope")
exprs
}
}
forScope(startingWith)
}
def findInAllScope(tpe: TypeRepr): Iterable[Tree] = {
@tailrec
def accInScope(scope: Scope, acc: List[Tree]): List[Tree] = {
val newAcc = doFindInScope(tpe, scope) ++ acc
if (!scope.isMax) accInScope(scope.widen, newAcc) else newAcc
}
uniqueTrees(accInScope(Scope.init, Nil))
}
}
object EligibleValues {
val empty: EligibleValues = new EligibleValues(Map.empty)
}
}
object EligibleValuesFinder {
case class Scope(val value: Int) extends Ordered[Scope] {
/** @return the next Scope until Max */
def widen: Scope = copy(value = this.value + 1)
override def compare(that: Scope): Int = this.value.compare(that.value)
override def equals(other: Any): Boolean = other match {
case otherScope: Scope => this.value == otherScope.value
case _ => false
}
override def hashCode = value.hashCode
}
object Scope extends Ordering[Scope] {
/** The smallest Scope */
val init = Scope(1)
override def compare(a: Scope, b: Scope): Int = a.compare(b)
}
}
| adamw/macwire | macros/src/main/scala-3/com/softwaremill/macwire/internals/EligibleValuesFinder.scala | Scala | apache-2.0 | 5,418 |
package com.twitter.algebird
import org.scalacheck.{Arbitrary, Gen}
import scala.collection.mutable.{Map => MMap}
import scala.collection.{Map => ScMap}
import org.scalacheck.Prop._
class CollectionSpecification extends CheckProperties {
import com.twitter.algebird.BaseProperties._
import com.twitter.algebird.scalacheck.arbitrary._
property("OrValMonoid is a commutative monoid") {
commutativeMonoidLaws[OrVal]
}
property("AndValMonoid is a commutative monoid") {
commutativeMonoidLaws[AndVal]
}
property("Either is a Semigroup") {
semigroupLaws[Either[String, Int]]
}
property("Either is a Semigroup, with a Right non-monoid semigroup") {
semigroupLaws[Either[String, Max[Int]]]
}
property("Option Monoid laws") {
monoidLaws[Option[Int]] && monoidLaws[Option[String]]
}
property("Option Group laws") {
implicit val equiv: Equiv[Map[String, Option[Int]]] =
Equiv.fromFunction { (a, b) =>
val keys: Set[String] = a.keySet | b.keySet
keys.forall { key: String =>
val v1: Int = a.getOrElse(key, None).getOrElse(0)
val v2: Int = b.getOrElse(key, None).getOrElse(0)
v1 == v2
}
}
groupLaws[Option[Int]] && groupLaws[Map[String, Option[Int]]]
}
property("List plus") {
forAll { (a: List[Int], b: List[Int]) =>
val mon = implicitly[Monoid[List[Int]]]
((a ++ b == mon.plus(a, b)) && (mon.zero == List[Int]()))
}
}
property("List Monoid laws") {
monoidLaws[List[Int]]
}
implicit def arbSeq[T: Arbitrary]: Arbitrary[Seq[T]] =
Arbitrary { implicitly[Arbitrary[List[T]]].arbitrary.map { _.toSeq } }
property("Seq plus") {
forAll { (a: Seq[Int], b: Seq[Int]) =>
val mon = implicitly[Monoid[Seq[Int]]]
((a ++ b == mon.plus(a, b)) && (mon.zero == Seq[Int]()))
}
}
property("Seq Monoid laws") {
monoidLaws[Seq[Int]]
}
property("Array Monoid laws") {
implicit val equiv: Equiv[Array[Int]] = Equiv.by(_.deep)
monoidLaws[Array[Int]]
}
property("Set plus") {
forAll { (a: Set[Int], b: Set[Int]) =>
val mon = implicitly[Monoid[Set[Int]]]
((a ++ b == mon.plus(a, b)) && (mon.zero == Set[Int]()))
}
}
property("Set Monoid laws") {
monoidLaws[Set[Int]]
}
implicit def mapArb[K: Arbitrary, V: Arbitrary: Monoid] = Arbitrary {
val mv = implicitly[Monoid[V]]
implicitly[Arbitrary[Map[K, V]]].arbitrary
.map {
_.filter { kv =>
mv.isNonZero(kv._2)
}
}
}
implicit def scMapArb[K: Arbitrary, V: Arbitrary: Monoid] = Arbitrary {
mapArb[K, V].arbitrary
.map { map: Map[K, V] =>
map: ScMap[K, V]
}
}
implicit def mMapArb[K: Arbitrary, V: Arbitrary: Monoid] = Arbitrary {
mapArb[K, V].arbitrary
.map { map: Map[K, V] =>
MMap(map.toSeq: _*): MMap[K, V]
}
}
def mapPlusTimesKeys[M <: ScMap[Int, Int]](implicit rng: Ring[ScMap[Int, Int]], arbMap: Arbitrary[M]) =
forAll { (a: M, b: M) =>
// Subsets because zeros are removed from the times/plus values
((rng.times(a, b)).keys.toSet.subsetOf((a.keys.toSet & b.keys.toSet)) &&
(rng.plus(a, b)).keys.toSet.subsetOf((a.keys.toSet | b.keys.toSet)) &&
(rng.plus(a, a).keys == (a.filter { kv =>
(kv._2 + kv._2) != 0
}).keys))
}
property("Map plus/times keys") {
mapPlusTimesKeys[Map[Int, Int]]
}
property("ScMap plus/times keys") {
mapPlusTimesKeys[ScMap[Int, Int]]
}
property("MMap plus/times keys") {
mapPlusTimesKeys[MMap[Int, Int]]
}
property("Map[Int,Int] Monoid laws") {
isAssociative[Map[Int, Int]] && weakZero[Map[Int, Int]]
}
property("ScMap[Int,Int] Monoid laws") {
isAssociative[ScMap[Int, Int]] && weakZero[ScMap[Int, Int]]
}
property("MMap[Int,Int] Monoid laws") {
isAssociativeDifferentTypes[ScMap[Int, Int], MMap[Int, Int]] && weakZeroDifferentTypes[
ScMap[Int, Int],
MMap[Int, Int]]
}
property("Map[Int,Int] has -") {
hasAdditiveInverses[Map[Int, Int]]
}
property("ScMap[Int,Int] has -") {
hasAdditiveInverses[ScMap[Int, Int]]
}
property("MMap[Int,Int] has -") {
hasAdditiveInversesDifferentTypes[ScMap[Int, Int], MMap[Int, Int]]
}
property("Map[Int,String] Monoid laws") {
isAssociative[Map[Int, String]] && weakZero[Map[Int, String]]
}
property("ScMap[Int,String] Monoid laws") {
isAssociative[ScMap[Int, String]] && weakZero[ScMap[Int, String]]
}
property("MMap[Int,String] Monoid laws") {
isAssociativeDifferentTypes[ScMap[Int, Int], MMap[Int, Int]] && weakZeroDifferentTypes[
ScMap[Int, Int],
MMap[Int, Int]]
}
// We haven't implemented ring.one yet for the Map, so skip the one property
property("Map is distributive") {
isDistributive[Map[Int, Int]]
}
property("ScMap is distributive") {
isDistributive[ScMap[Int, Int]]
}
property("MMap is distributive") {
isDistributiveDifferentTypes[ScMap[Int, Int], MMap[Int, Int]]
}
implicit def arbIndexedSeq[T: Arbitrary]: Arbitrary[IndexedSeq[T]] =
Arbitrary {
implicitly[Arbitrary[List[T]]].arbitrary.map { _.toIndexedSeq }
}
property("IndexedSeq (of a Semigroup) is a semigroup") {
semigroupLaws[IndexedSeq[Max[Int]]]
}
// TODO: this test fails sometimes due to the equiv not doing the right thing.
// Fix by defining an Equiv.
property("IndexedSeq is a pseudoRing") {
pseudoRingLaws[IndexedSeq[Int]]
}
property("Either is a Monoid") {
monoidLaws[Either[String, Int]]
}
property("MapAlgebra.removeZeros works") {
forAll { (m: Map[Int, Int]) =>
(MapAlgebra.removeZeros(m).values.toSet.contains(0) == false)
}
}
property("Monoid.sum performs w/ or w/o MapAlgebra.removeZeros") {
forAll { (m: Map[Int, Int]) =>
(Monoid.sum(m) == Monoid.sum(MapAlgebra.removeZeros(m)))
}
}
property("MapAlgebra.sumByKey works") {
forAll { (keys: List[Int], values: List[Int]) =>
import com.twitter.algebird.Operators._
val tupList = keys.zip(values)
val expected = tupList
.groupBy { _._1 }
.mapValues { v =>
v.map { _._2 }.sum
}
.filter { _._2 != 0 }
MapAlgebra.sumByKey(tupList) == expected && tupList.sumByKey == expected
}
}
property("MapAlgebra.group works") {
forAll { (keys: List[Int], values: List[Int]) =>
import com.twitter.algebird.Operators._
val tupList = keys.zip(values)
val expected = tupList.groupBy(_._1).mapValues(_.map(_._2).toList)
MapAlgebra.group(tupList) == expected && tupList.group == expected
}
}
property("MapAlgebra.dot works") {
forAll { (m1: Map[Int, Int], m2: Map[Int, Int]) =>
// .toList below is to make sure we don't remove duplicate values
(MapAlgebra.dot(m1, m2) ==
(m1.keySet ++ m2.keySet).toList.map { k =>
m1.getOrElse(k, 0) * m2.getOrElse(k, 0)
}.sum)
}
}
property("MapAlgebra.toGraph is correct") {
forAll { (l: Set[(Int, Int)]) =>
(MapAlgebra
.toGraph(l)
.toIterable
.flatMap {
case (k, sv) =>
sv.map { v =>
(k, v)
}
}
.toSet == l)
}
}
property("MapAlgebra.sparseEquiv is correct") {
forAll { (l: Map[Int, String], empties: Set[Int]) =>
(!empties.isEmpty) ==> {
val mapEq = MapAlgebra.sparseEquiv[Int, String]
mapEq.equiv(l -- empties, l ++ empties.map(_ -> "").toMap) && !mapEq
.equiv(l -- empties, l ++ empties.map(_ -> "not empty").toMap)
}
}
}
property("MapAlgebra.invert works") {
forAll { (m: Map[Int, Int]) =>
val m2 = MapAlgebra.invert(m)
val m3 = Monoid.sum(for {
(v, ks) <- m2.toIterable
k <- ks.toIterable
} yield Map(k -> v))
(m3 == m)
}
}
property("MapAlgebra.invertExact works") {
forAll { (m: Map[Option[Int], Set[Int]]) =>
(MapAlgebra.invertExact(MapAlgebra.invertExact(m)) == m.filterKeys(_.isDefined))
}
}
property("MapAlgebra.join works") {
forAll { (m1: Map[Int, Int], m2: Map[Int, Int]) =>
val m3 = MapAlgebra.join(m1, m2)
val m1after = m3
.mapValues { vw =>
vw._1
}
.filter { _._2.isDefined }
.mapValues { _.get }
val m2after = m3
.mapValues { vw =>
vw._2
}
.filter { _._2.isDefined }
.mapValues { _.get }
val m1Orm2 = (m1.keySet | m2.keySet)
((m1after == m1) && (m2after == m2) && (m3.keySet == m1Orm2))
}
}
def square(x: Int) = if (x % 2 == 0) Some(x * x) else None
def mapEq[K] = MapAlgebra.sparseEquiv[K, Int]
property("MapAlgebra.mergeLookup works") {
forAll { (items: Set[Int]) =>
(mapEq.equiv(
MapAlgebra.mergeLookup[Int, Option[Int], Int](items)(square)(_ => None),
Map((None: Option[Int]) -> Monoid.sum(items.map(x => square(x).getOrElse(0))))) && mapEq.equiv(
MapAlgebra.mergeLookup[Int, Int, Int](items)(square)(identity),
MapAlgebra.sumByKey(items.map(x => x -> square(x).getOrElse(0)))))
}
}
def arbAV[T: Arbitrary](sparse: T): Gen[AdaptiveVector[T]] =
Gen.oneOf(
for {
l <- Arbitrary.arbitrary[List[T]]
} yield AdaptiveVector.fromVector(Vector(l: _*), sparse),
for {
m <- Arbitrary.arbitrary[Map[Int, T]]
} yield
AdaptiveVector.fromMap(m.filter {
case (k, _) => (k < 1000) && (k >= 0)
}, sparse, 1000),
for {
size <- Gen.posNum[Int]
} yield AdaptiveVector.fromMap(Map.empty, sparse, size)
)
property("AdaptiveVector[Int] has a semigroup") {
implicit val arb = Arbitrary(arbAV(2))
semigroupLaws[AdaptiveVector[Int]]
}
property("AdaptiveVector[Int] has a monoid") {
// TODO: remove this equiv instance once #583 is resolved.
implicit val equiv = AdaptiveVector.denseEquiv[Int]
implicit val arb = Arbitrary(arbAV(0))
monoidLaws[AdaptiveVector[Int]]
}
property("AdaptiveVector[Int] has a group") {
implicit val arb = Arbitrary(arbAV(1))
groupLaws[AdaptiveVector[Int]]
}
property("AdaptiveVector[String] has a monoid") {
// TODO: remove this equiv instance once #583 is resolved.
implicit val equiv = AdaptiveVector.denseEquiv[String]
implicit val arb = Arbitrary(arbAV(""))
monoidLaws[AdaptiveVector[String]]
}
}
| nevillelyh/algebird | algebird-test/src/test/scala/com/twitter/algebird/CollectionSpecification.scala | Scala | apache-2.0 | 10,442 |
package models
import java.util.regex.Pattern
import org.w3.banana._
import org.w3.banana.io.{RDFWriter, Turtle}
import play.{Logger, Play}
import scala.util.Try
trait ResourceBuilderDependencies
extends RDFModule
with RDFOpsModule
/**
* Created by jorge on 6/10/15.
*/
trait ResourceBuilder extends ResourceBuilderDependencies {
import ops._
def rewrite(uri: Rdf#URI) = {
val host = Play.application().configuration().getString("wesby.host")
val datasetBase = Play.application().configuration().getString("wesby.datasetBase")
val dereferencedUri = uri.toString.replaceFirst(Pattern.quote(datasetBase), host)
URI(dereferencedUri)
}
def getProperties(graph: Rdf#Graph, uri: Rdf#URI): Map[Rdf#URI, Iterable[Rdf#Node]] = {
import ops._
val triples = graph.triples.filter(_._1.equals(uri))
val l = for(Triple(s, p, o) <- triples) yield {
if (o.isURI)
(rewrite(p), rewrite(o.asInstanceOf[Rdf#URI]).asInstanceOf[Rdf#Node])
else (rewrite(p), o)
}
l.groupBy(e => e._1).mapValues(e => e.map(x => x._2))
}
def getInverseProperties(graph: Rdf#Graph, uri: Rdf#URI): Iterable[(Rdf#URI, Rdf#URI)] = {
val inverseTriples = graph.triples.filter(_._3.equals(uri))
for(Triple(s, p, o) <- inverseTriples) yield {
(rewrite(URI(s.toString)), rewrite(p))
}
}
def build(uriString: String, graph: Rdf#Graph, shapes: List[String]) = {
val mainLabelProp = Play.application().configuration().getString("wesby.altLabelProperty")
val uri = URI(uriString)
val pg = PointedGraph(uri, graph)
val ncname = uri.lastPathSegment
val rdfs: RDFSPrefix[Rdf] = RDFSPrefix[Rdf]
val rdfsLabelsPg = pg / rdfs.label
val mainLabelsPg = pg / URI(mainLabelProp)
val rdfsLabels = for (label <- rdfsLabelsPg.map(_.pointer)) yield label match {
case l: Rdf#Literal => l
}
val defaultLabels = for (label <- mainLabelsPg.map(_.pointer)) yield label match {
case l: Rdf#Literal => l
}
val labels = defaultLabels ++ rdfsLabels
val properties = getProperties(graph, uri)
val inverseProperties = getInverseProperties(graph, uri)
val resource = new Resource[Rdf](uri, labels, shapes, properties, inverseProperties)
resource
}
}
import org.w3.banana.jena.JenaModule
object ResourceBuilderWithJena extends ResourceBuilder with JenaModule | weso/Wesby | app/models/ResourceBuilder.scala | Scala | mit | 2,375 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence.multinode
import akka.actor.setup.ActorSystemSetup
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.BootstrapSetup
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.pattern.pipe
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import akka.util.Timeout
import com.lightbend.lagom.scaladsl.persistence._
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Environment
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.Future
import com.lightbend.lagom.internal.cluster.STMultiNodeSpec
abstract class AbstractClusteredPersistentEntityConfig extends MultiNodeConfig {
val node1 = role("node1")
val node2 = role("node2")
val node3 = role("node3")
val databasePort = System.getProperty("scaladsl.database.port").toInt
val environment = Environment.simple()
commonConfig(
additionalCommonConfig(databasePort).withFallback(
ConfigFactory
.parseString(
"""
akka.loglevel = INFO
lagom.persistence.run-entities-on-role = "backend"
lagom.persistence.read-side.run-on-role = "read-side"
terminate-system-after-member-removed = 60s
# increase default timeouts to leave wider margin for Travis.
akka.testconductor.barrier-timeout=90s
## use coprime values for the timeouts below because it'll be easier to spot interferences.
## Also, make Akka's `single-expect-default` timeout higher since this test often `expect`'s over an ask operation.
## NOTE: these values used to be '9s' and '11s' but '9s' triggered timeouts quite often in Travis. If '13s'
## also triggers timeouts in Travis it's possible there's something worth reviewing on this test.
lagom.persistence.ask-timeout = 13s
akka.test.single-expect-default = 15s
lagom.persistence.read-side.offset-timeout = 17s
# Don't terminate the actor system when doing a coordinated shutdown
# See http://doc.akka.io/docs/akka/2.6/project/migration-guide-2.4.x-2.5.x.html#Coordinated_Shutdown
akka.coordinated-shutdown.terminate-actor-system = off
akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off
akka.cluster.run-coordinated-shutdown-when-down = off
# multi-jvm tests forms the cluster programmatically
# therefore we disable Akka Cluster Bootstrap
lagom.cluster.bootstrap.enabled = off
# no jvm exit on tests
lagom.cluster.exit-jvm-when-system-terminated = off
akka.cluster.sharding.waiting-for-state-timeout = 5s
# make sure ensure active kicks in fast enough on tests
lagom.persistence.cluster.distribution.ensure-active-interval = 2s
"""
)
.withFallback(ConfigFactory.parseResources("play/reference-overrides.conf"))
)
)
def additionalCommonConfig(databasePort: Int): Config
nodeConfig(node1) {
ConfigFactory.parseString("""akka.cluster.roles = ["backend", "read-side"]""")
}
nodeConfig(node2) {
ConfigFactory.parseString("""akka.cluster.roles = ["backend"]""")
}
nodeConfig(node3) {
ConfigFactory.parseString("""akka.cluster.roles = ["read-side"]""")
}
}
object AbstractClusteredPersistentEntitySpec {
// Copied from MultiNodeSpec
private def getCallerName(clazz: Class[_]): String = {
val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(".*MultiNodeSpec.?$"))
val reduced = s.lastIndexWhere(_ == clazz.getName) match {
case -1 => s
case z => s.drop(z + 1)
}
reduced.head.replaceFirst(""".*\\.""", "").replaceAll("[^a-zA-Z_0-9]", "_")
}
def createActorSystem(jsonSerializerRegistry: JsonSerializerRegistry): (Config) => ActorSystem = { config =>
val setup = ActorSystemSetup(
BootstrapSetup(ConfigFactory.load(config)),
JsonSerializerRegistry.serializationSetupFor(jsonSerializerRegistry)
)
ActorSystem(getCallerName(classOf[MultiNodeSpec]), setup)
}
}
abstract class AbstractClusteredPersistentEntitySpec(config: AbstractClusteredPersistentEntityConfig)
extends MultiNodeSpec(config, AbstractClusteredPersistentEntitySpec.createActorSystem(TestEntitySerializerRegistry))
with STMultiNodeSpec
with ImplicitSender {
import config._
// implicit EC needed for pipeTo
import system.dispatcher
override def initialParticipants = roles.size
def join(from: RoleName, to: RoleName): Unit = {
runOn(from) {
Cluster(system).join(node(to).address)
}
enterBarrierWithDilatedTimeout(from.name + "-joined")
}
def fullAddress(ref: ActorRef): Address =
if (ref.path.address.hasLocalScope) Cluster(system).selfAddress
else ref.path.address
protected override def atStartup(): Unit = {
// Initialize components
registry.register(new TestEntity(system))
components.readSide.register(readSideProcessor())
roles.foreach(n => join(n, node1))
within(15.seconds) {
awaitAssert(Cluster(system).state.members.size should be(3))
awaitAssert(
Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up))
)
}
enterBarrierWithDilatedTimeout("startup")
}
def components: PersistenceComponents
def registry: PersistentEntityRegistry = components.persistentEntityRegistry
protected def readSideProcessor: () => ReadSideProcessor[TestEntity.Evt]
protected def getAppendCount(id: String): Future[Long]
/**
* uses overridden {{getAppendCount}} to assert a given entity {{id}} emitted the {{expected}} number of events. The
* implementation uses polling from only node1 so nodes 2 and 3 will skip this code.
*/
def expectAppendCount(id: String, expected: Long) = {
runOn(node1) {
within(20.seconds) {
awaitAssert {
val count = Await.result(getAppendCount(id), 5.seconds)
count should ===(expected)
}
}
}
}
def enterBarrierWithDilatedTimeout(name: String) = {
import akka.testkit._
testConductor.enter(
Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration.dilated)),
scala.collection.immutable.Seq(name)
)
}
"A PersistentEntity in a Cluster" must {
"send commands to target entity" in within(75.seconds) {
// this barrier at the beginning of the test will be run on all nodes and should be at the
// beginning of the test to ensure it's run.
enterBarrierWithDilatedTimeout("before 'send commands to target entity'")
val ref1 = registry.refFor[TestEntity]("entity-1")
val ref2 = registry.refFor[TestEntity]("entity-2")
// STEP 1: send some commands from all nodes of the test to ref1 and ref2
// note that this is done on node1, node2 and node 3 !!
val r1 = ref1.ask(TestEntity.Add("a"))
r1.pipeTo(testActor)
expectMsg(TestEntity.Appended("A"))
enterBarrier("appended-A")
val r2 = ref2.ask(TestEntity.Add("b"))
r2.pipeTo(testActor)
expectMsg(TestEntity.Appended("B"))
enterBarrier("appended-B")
val r3: Future[TestEntity.Evt] = ref2.ask(TestEntity.Add("c"))
r3.pipeTo(testActor)
expectMsg(TestEntity.Appended("C"))
enterBarrier("appended-C")
// STEP 2: assert both ref's stored all the commands in their respective state.
val r4: Future[TestEntity.State] = ref1.ask(TestEntity.Get)
r4.pipeTo(testActor)
// There are three events of each because the Commands above are executed on all 3 nodes of the multi-jvm test
expectMsgType[TestEntity.State].elements should ===(List("A", "A", "A"))
val r5 = ref2.ask(TestEntity.Get)
r5.pipeTo(testActor)
expectMsgType[TestEntity.State].elements should ===(List("B", "B", "B", "C", "C", "C"))
// STEP 3: assert the number of events consumed in the read-side processors equals the number of expected events.
// NOTE: in nodes node2 and node3 {{expectAppendCount}} is a noop
expectAppendCount("entity-1", 3)
expectAppendCount("entity-2", 6)
}
"run entities on specific node roles" in {
// this barrier at the beginning of the test will be run on all nodes and should be at the
// beginning of the test to ensure it's run.
enterBarrierWithDilatedTimeout("before 'run entities on specific node roles'")
// node1 and node2 are configured with "backend" role
// and lagom.persistence.run-entities-on-role = backend
// i.e. no entities on node3
val entities = for (n <- 10 to 29) yield registry.refFor[TestEntity](n.toString)
val addresses = entities.map { ent =>
val r = ent.ask(TestEntity.GetAddress)
val _: Future[String] = r.map(_.hostPort) // compile check that the reply type is inferred correctly
r.pipeTo(testActor)
expectMsgType[Address]
}.toSet
addresses should not contain node(node3).address
}
"have support for graceful leaving" in {
// this barrier at the beginning of the test will be run on all nodes and should be at the
// beginning of the test to ensure it's run.
enterBarrierWithDilatedTimeout("before 'have support for graceful leaving'")
enterBarrierWithDilatedTimeout("node2-left")
runOn(node1) {
within(35.seconds) {
val ref1 = registry.refFor[TestEntity]("entity-1")
val r1: Future[TestEntity.Evt] = ref1.ask(TestEntity.Add("a"))
r1.pipeTo(testActor)
expectMsg(TestEntity.Appended("A"))
val ref2 = registry.refFor[TestEntity]("entity-2")
val r2: Future[TestEntity.Evt] = ref2.ask(TestEntity.Add("b"))
r2.pipeTo(testActor)
expectMsg(TestEntity.Appended("B"))
val r3: Future[TestEntity.Evt] = ref2.ask(TestEntity.Add("c"))
r3.pipeTo(testActor)
expectMsg(TestEntity.Appended("C"))
}
}
enterBarrierWithDilatedTimeout("node1-working")
}
}
}
| rcavalcanti/lagom | persistence/scaladsl/src/multi-jvm/scala/com/lightbend/lagom/scaladsl/persistence/multinode/AbstractClusteredPersistentEntitySpec.scala | Scala | apache-2.0 | 10,392 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeConformance.TypeConformanceTestBase
import org.junit.experimental.categories.Category
/**
* @author mucianm
* @since 25.03.16.
*/
@Category(Array(classOf[PerfCycleTests]))
class PrimitivesConformanceTest extends TypeConformanceTestBase{
def testSCL5358() = doTest(
"""
|final val x = 0
|val y: Byte = x
|/* True */
""".stripMargin)
}
| loskutov/intellij-scala | test/org/jetbrains/plugins/scala/failed/typeInference/PrimitivesConformanceTest.scala | Scala | apache-2.0 | 532 |
package linkchecker.persistence
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.SupervisorStrategy
import akka.actor.Terminated
import akka.persistence.Processor
import akka.persistence.Persistent
object Receptionist {
private case class Job(client: ActorRef, url: String)
case class Get(url: String)
case class Result(url: String, links: Set[String])
case class Failed(url: String)
def props = Props[Receptionist]
}
class Receptionist extends Actor with ActorLogging with Processor {
import Receptionist._
override def supervisorStrategy = SupervisorStrategy.stoppingStrategy
var reqNo = 0
def controllerProps = Controller.props
def receive = waiting
val waiting: Receive = {
case Persistent(payload, sequenceNr) =>
log.debug("receive persistente get for {}", payload)
self ! payload
case Get(url) =>
log.debug("receive get for {}", url)
context.become(runNext(Vector(Job(sender, url))))
}
def runNext(queue: Vector[Job]): Receive = {
reqNo += 1
if (queue.isEmpty) waiting
else {
val controller = context.actorOf(controllerProps, s"c$reqNo")
context.watch(controller)
controller ! Controller.Check(queue.head.url, 2)
running(queue)
}
}
def running(queue: Vector[Job]): Receive = {
case Controller.Result(links) =>
val job = queue.head
job.client ! Result(job.url, links)
context.stop(context.unwatch(sender))
context.become(runNext(queue.tail))
case Terminated(_) =>
val job = queue.head
job.client ! Failed(job.url)
context.become(runNext(queue.tail))
case Get(url) =>
log.debug("receive get for {}", url)
context.become(enqueueJob(queue, Job(sender, url)))
}
def enqueueJob(queue: Vector[Job], job: Job): Receive = {
if (queue.size > 3) {
sender ! Failed(job.url)
running(queue)
} else running(queue :+ job)
}
} | fabiofumarola/akka-tutorial | src/main/scala/linkchecker/persistence/Receptionist.scala | Scala | cc0-1.0 | 2,029 |
import com.typesafe.config.ConfigFactory
import play.api.{Configuration, GlobalSettings}
import play.api.mvc.WithFilters
import play.filters.csrf.CSRFFilter
import java.io.File
import play.api.Mode
import play.Logger
object Global extends WithFilters(CSRFFilter()) with GlobalSettings {
override def onLoadConfig(config: Configuration, path: File, classloader: ClassLoader, mode: Mode.Mode): Configuration = {
val configFile = s"application.${mode.toString.toLowerCase}.conf"
Logger.debug("Adding more config files!: " + configFile)
val modeSpecificConfig = config ++ Configuration(ConfigFactory.load(configFile))
super.onLoadConfig(modeSpecificConfig, path, classloader, mode)
}
}
| manvesh/politickle | app/Global.scala | Scala | apache-2.0 | 704 |
/*
* HttpSolrServerConnectionProvider.scala
*
* Updated: Oct 3, 2014
*
* Copyright (c) 2014, CodeMettle
*/
package com.codemettle.akkasolr.ext
import org.apache.solr.client.solrj.impl.{HttpClientUtil, HttpSolrClient}
import org.apache.solr.common.params.ModifiableSolrParams
import com.codemettle.akkasolr.client.SolrServerClientConnection
import akka.actor.{ExtendedActorSystem, Props}
import akka.event.Logging
import akka.http.scaladsl.model.Uri
import akka.stream.Materializer
/**
* @author steven
*
*/
class HttpSolrServerConnectionProvider extends ConnectionProvider {
override def connectionActorProps(uri: Uri, username: Option[String], password: Option[String],
system: ExtendedActorSystem)(implicit mat: Materializer): Props = {
def httpSolrServer = {
def clientOpt = for (u <- username; p <- password) yield {
val params = new ModifiableSolrParams
params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 128)
params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 32)
params.set(HttpClientUtil.PROP_FOLLOW_REDIRECTS, false)
params.set(HttpClientUtil.PROP_BASIC_AUTH_USER, u)
params.set(HttpClientUtil.PROP_BASIC_AUTH_PASS, p)
Logging(system, getClass).info(s"Creating new http client, config: $params")
HttpClientUtil.createClient(params)
}
val clientB = new HttpSolrClient.Builder()
.withBaseSolrUrl(uri.toString())
clientOpt.fold(clientB)(clientB.withHttpClient).build()
}
SolrServerClientConnection props httpSolrServer
}
}
| CodeMettle/akka-solr | src/test/scala/com/codemettle/akkasolr/ext/HttpSolrServerConnectionProvider.scala | Scala | apache-2.0 | 1,711 |
package vggames.scala.specs.valvar
import vggames.scala.specs.GameSpecification
import vggames.scala.code.RestrictedFunction0
import vggames.scala.specs.TestRun
class DefineValString extends GameSpecification[RestrictedFunction0[String]] {
def runSignature = ":String"
def extendsType = "RestrictedFunction0[String]"
override def afterCode = "valor"
def challenge = """Defina a constante chamada <code>valor</code> com o valor <code>"val"</code> """
def run(code : Code, submittedCode : String)(implicit cases : TestRun) =
"O seu código" should {
""" definir a constante "val" chamada valor """ in {
code() must_== "val"
}
}
} | rustaeja/rustaeja-project-8273 | games/scala/src/main/scala/vggames/scala/specs/valvar/DefineValString.scala | Scala | gpl-3.0 | 673 |
package todomvc.example
import play.api.db.slick.DatabaseConfigProvider
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile
trait DatabaseSupport {
val dbConfigProvider: DatabaseConfigProvider
val dbConfig: DatabaseConfig[JdbcProfile] =
dbConfigProvider.get[JdbcProfile] ensuring (_ != null)
}
| greencatsoft/scalajs-angular-todomvc | jvm/app/todomvc/example/DatabaseSupport.scala | Scala | apache-2.0 | 319 |
import org.specs2._
import form.Form
class SpecificationWithForms extends Specification with specification.Forms { def is = s2"""
The address must be retrieved from the database with the proper street and number
${Form("Address").
tr(prop("street", actualStreet(123), "Oxford St")).
tr(prop("number", actualNumber(123), 20))}
"""
def actualStreet(no: Int) = "Oxford St"
def actualNumber(no: Int) = 20
} | jaceklaskowski/specs2-sandbox | src/test/scala/SpecificationWithForms.scala | Scala | apache-2.0 | 433 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala
import rx.annotations.Experimental
import rx.lang.scala.subjects.SerializedSubject
/**
* A Subject is an Observable and an Observer at the same time.
*
* @define experimental
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
*
* @define beta
* <span class="badge badge-red" style="float: right;">BETA</span>
*/
trait Subject[T] extends Observable[T] with Observer[T] {
private [scala] val asJavaSubject: rx.subjects.Subject[_ >: T, _<: T]
val asJavaObservable: rx.Observable[_ <: T] = asJavaSubject
override val asJavaObserver: rx.Observer[_ >: T] = asJavaSubject
override def onNext(value: T): Unit = { asJavaObserver.onNext(value)}
override def onError(error: Throwable): Unit = { asJavaObserver.onError(error) }
override def onCompleted() { asJavaObserver.onCompleted() }
/**
* Indicates whether the [[Subject]] has [[Observer]]s subscribed to it.
* @return `true` if there is at least one [[Observer]] subscribed to this [[Subject]], `false` otherwise
*/
def hasObservers: Boolean = asJavaSubject.hasObservers
/**
* Wraps a [[Subject]] so that it is safe to call its various `on` methods from different threads.
*
* When you use an ordinary [[Subject]] as a [[Subscriber]], you must take care not to call its
* [[Subscriber.onNext]] method (or its other `on` methods) from multiple threads, as this could
* lead to non-serialized calls, which violates the [[Observable]] contract and creates an ambiguity
* in the resulting [[Subject]].
*
* To protect a [[Subject]] from this danger, you can convert it into a [[rx.lang.scala.subjects.SerializedSubject SerializedSubject]]
* with code like the following:
* {{{
* mySafeSubject = myUnsafeSubject.toSerialized
* }}}
*
* @return [[rx.lang.scala.subjects.SerializedSubject SerializedSubject]] wrapping the current [[Subject]]
*/
def toSerialized: SerializedSubject[T] = this match {
case s: SerializedSubject[T] => s
case s => SerializedSubject(s)
}
}
/**
* Subject that, once an `Observer` has subscribed, emits all subsequently observed items to the
* subscriber.
* <p>
* <img width="640" height="405" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/S.PublishSubject.png" alt="" />
* <p>
* @example
{{{
val subject = Subject[String]()
// observer1 will receive all onNext and onCompleted events
subject.subscribe(observer1)
subject.onNext("one")
subject.onNext("two")
// observer2 will only receive "three" and onCompleted
subject.subscribe(observer2)
subject.onNext("three")
subject.onCompleted()
}}}
*/
object Subject {
/**
* Creates and returns a new `Subject`.
*
* @return the new `Subject`
*/
def apply[T](): Subject[T] = new rx.lang.scala.subjects.PublishSubject[T](rx.subjects.PublishSubject.create())
}
| joohnnie/RxScala | src/main/scala/rx/lang/scala/Subject.scala | Scala | apache-2.0 | 3,483 |
package scala.meta.internal.scalacp
import scala.meta.internal.{semanticdb => s}
trait AnnotationOps { self: Scalacp =>
implicit class XtensionAnnotation(ann: Int) {
def toSemantic: s.Annotation = {
// FIXME: https://github.com/scalameta/scalameta/issues/1292
s.Annotation()
}
}
}
| MasseGuillaume/scalameta | semanticdb/metacp/src/main/scala/scala/meta/internal/scalacp/AnnotationOps.scala | Scala | bsd-3-clause | 307 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control.controls
import org.orbeon.oxf.xforms.xbl.XBLContainer
import org.orbeon.dom.Element
import org.orbeon.oxf.xforms.event.XFormsEventHandler
import org.orbeon.oxf.xforms.BindingContext
import org.orbeon.oxf.xforms.analysis.ElementAnalysis
import collection.Seq
import org.orbeon.oxf.xforms.control.{XFormsContainerControl, XFormsControl}
class XFormsActionControl(container: XBLContainer, parent: XFormsControl, element: Element, effectiveId: String)
extends XFormsControl(container, parent, element, effectiveId) with XFormsEventHandler {
// Tell the parent about us if the parent is not a container
Option(parent) foreach {
case _: XFormsContainerControl =>
case nonContainer => nonContainer.addChildAction(this)
}
// Don't push the actual binding for actions because it's unnecessary at build/refresh time and the binding needs to
// be re-evaluated when the action runs anyway.
override def computeBinding(parentContext: BindingContext) = computeBindingCopy(parentContext)
// Don't build any children, as in the view we don't support event handlers nested within event handlers, and nested
// actions are evaluated dynamically.
override def buildChildren(buildTree: (XBLContainer, BindingContext, ElementAnalysis, Seq[Int]) => Option[XFormsControl], idSuffix: Seq[Int]) = ()
override def supportAjaxUpdates = false
override def getBackCopy: AnyRef = this
}
| orbeon/orbeon-forms | xforms-runtime/shared/src/main/scala/org/orbeon/oxf/xforms/control/controls/XFormsActionControl.scala | Scala | lgpl-2.1 | 2,091 |
package lila.oauth
import io.lemonlabs.uri.AbsoluteUrl
import org.joda.time.DateTime
import play.api.data._
import play.api.data.Forms._
import reactivemongo.api.bson.BSONObjectID
import lila.common.Bearer
import lila.common.Form.{ absoluteUrl, cleanText }
import lila.user.User
object OAuthTokenForm {
private val scopesField = list(nonEmptyText.verifying(OAuthScope.byKey.contains _))
private val descriptionField = cleanText(minLength = 3, maxLength = 140)
def create = Form(
mapping(
"description" -> descriptionField,
"scopes" -> scopesField
)(Data.apply)(Data.unapply)
)
case class Data(description: String, scopes: List[String])
def adminChallengeTokens = Form(
mapping(
"description" -> descriptionField,
"users" -> cleanText
.verifying("No more than 500 users", _.split(',').size <= 500)
)(AdminChallengeTokensData.apply)(AdminChallengeTokensData.unapply _)
)
case class AdminChallengeTokensData(description: String, usersStr: String) {
def usernames = usersStr.split(',').map(_.trim).distinct.filter(User.couldBeUsername).toList
}
}
| luanlv/lila | modules/oauth/src/main/OAuthTokenForm.scala | Scala | mit | 1,128 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core
import com.typesafe.config.{ Config, ConfigFactory }
import org.scalatest.Inside.inside
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.reflect.ClassTag
import scala.util.{ Failure, Success }
class ConfigurableTypeFactorySpec extends AnyWordSpec with Matchers {
"ConfigurableTypeFactorySpec" should {
"creates a known type" in {
val config = ConfigFactory.parseString(
"""
| type = "known"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Success(s: KnownService) =>
s.config shouldBe config
}
}
"creates a singleton class" in {
val config = ConfigFactory.parseString(
s"""
| class = "com.comcast.money.core.SingletonService"
|""".stripMargin)
val result = ServiceFactory.create(config)
result shouldBe Success(SingletonService)
}
"creates a class with a factory method" in {
val config = ConfigFactory.parseString(
s"""
| class = "${classOf[FactoryService].getCanonicalName}"
| value = "value"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Success(s: FactoryService) =>
s.value shouldBe "value"
}
}
"creates a class with a config constructor" in {
val config = ConfigFactory.parseString(
s"""
| class = "${classOf[ConfigConstructorService].getCanonicalName}"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Success(s: ConfigConstructorService) =>
s.config shouldBe config
}
}
"creates a class with a default constructor" in {
val config = ConfigFactory.parseString(
s"""
| class = "${classOf[DefaultConstructorService].getCanonicalName}"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Success(_: DefaultConstructorService) =>
}
}
"returns the default value for an unknown type" in {
val config = ConfigFactory.parseString(
s"""
| type = "unknown"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Failure(exception: FactoryException) =>
exception.getMessage shouldBe "Could not resolve known Service type 'unknown'."
}
}
"returns the default value for an unknown class" in {
val config = ConfigFactory.parseString(
s"""
| class = "com.comcast.money.core.UnknownService"
|""".stripMargin)
val result = ServiceFactory.create(config)
inside(result) {
case Failure(exception: FactoryException) =>
exception.getMessage shouldBe "Could not create instance of Service class 'com.comcast.money.core.UnknownService'."
}
}
}
}
object ServiceFactory extends ConfigurableTypeFactory[Service] {
override protected val tag: ClassTag[Service] = ClassTag(classOf[Service])
override protected val knownTypes: PartialFunction[String, Config => Service] = {
case "known" => config => new KnownService(config)
}
}
trait Service {}
object FactoryService {
def apply(config: Config): FactoryService = new FactoryService(config.getString("value"))
}
class FactoryService(val value: String) extends Service {}
class ConfigConstructorService(val config: Config) extends Service {}
class DefaultConstructorService extends Service {}
class KnownService(val config: Config) extends Service {}
object SingletonService extends Service {}
object DefaultService extends Service {} | Comcast/money | money-core/src/test/scala/com/comcast/money/core/ConfigurableTypeFactorySpec.scala | Scala | apache-2.0 | 4,408 |
package lib
import com.madgag.git._
import lib.ConfigFinder._
import org.eclipse.jgit.lib.Repository
import org.eclipse.jgit.revwalk.RevWalk
import org.scalatestplus.play._
class ConfigFinderSpec extends PlaySpec {
def configFilesIn(repoPath: String): Set[String] = {
val localGitRepo: Repository = test.unpackRepo(repoPath)
implicit val repoThreadLocal = localGitRepo.getObjectDatabase.threadLocalResources
val master = localGitRepo.resolve("master").asRevCommit(new RevWalk(repoThreadLocal.reader()))
configIdMapFrom(master).keySet
}
"Config finder" must {
"find config in the root directory" in {
configFilesIn("/simple.git.zip") mustEqual Set("/")
}
"find config in sub folders" in {
configFilesIn("/multi-folder.git.zip") mustEqual Set("/foo/","/bar/","/baz/")
}
}
} | guardian/prout | test/lib/ConfigFinderSpec.scala | Scala | apache-2.0 | 840 |
package com.nhlreplay.parser.playbyplay
import GameEventType._
object TestGameEventXhtml
{
val faceoffXhtml = gameEvent(faceoff, "PHI won Off. Zone - PHI #43 KADRI vs MTL #8 PRUST")
val goalWithoutAssistsXhtml = gameEvent(goal, "PHI #10 SCHENN(1), Wrist, Off. Zone, 10 ft.")
val goalWithOneAssistXhtml = gameEvent(goal, """PHI #10 SCHENN(1), Wrist, Off. Zone, 10 ft.
|<br clear="none"/>Assist: #40 LECAVALIER(33)""".stripMargin)
val goalWithTwoAssistsXhtml = gameEvent(goal, """PHI #10 SCHENN(1), Wrist, Off. Zone, 10 ft.
|<br clear="none"/>Assists: #40 LECAVALIER(33); #32 STREIT(10)""".stripMargin)
val hitXhtml = gameEvent(hit, "PHI #76 SUBBAN HIT TOR #42 BOZAK, Off. Zone")
val penaltyDrawnXhtml = gameEvent(penalty, "PHI #81 ELLER Tripping(2 min), Off. Zone Drawn By: TOR #19 LUPUL")
val penaltyMajorXhtml = gameEvent(penalty, "PHI #2 FRASER Fighting (maj)(5 min), Def. Zone Drawn By: MTL #32 MOEN")
val penaltyUndrawnXhtml = gameEvent(penalty, "PHI #2 FRASER Unsportsmanlike conduct(2 min), Def. Zone")
val penaltyServedByXhtml = gameEvent(penalty, "PHI #2 FRASER Interference(2 min) Served By: #81 KESSEL, Def. Zone Drawn By: MTL #8 PRUST")
val penaltyShotXhtml = gameEvent(penalty, "ANA #33 SILFVERBERG PS-Holding on breakaway(0 min), Def. Zone Drawn By: PHI #21 VAN RIEMSDYK")
val penaltyTeamXhtml = gameEvent(penalty, "PHI TEAM Too many men/ice - bench(2 min) Served By: #48 BRIERE, Neu. Zone")
val penaltyWithoutZoneXhtml = gameEvent(penalty, "PHI #71 CLARKSON Hooking(2 min) Drawn By: CBJ #71 FOLIGNO")
val periodEndXhtml = gameEvent(periodEnd, "Period End- Local time: 8:07 EDT")
val periodStartXhtml = gameEvent(periodStart, "Period Start- Local time: 7:27 EDT")
val shotBlockedXhtml = gameEvent(block, "PHI #51 GARDINER BLOCKED BY MTL #26 GORGES, Wrap-around, Off. Zone")
val shotMissedXhtml = gameEvent(miss, "PHI #67 PACIORETTY, Slap, Wide of Net, Off. Zone, 30 ft.")
val shotMissedPenaltyShotXhtml = gameEvent(miss, "PHI #36 GUNNARSSON, Penalty Shot, Backhand, Wide of Net, Off. Zone, 15 ft.")
val shotOnGoalXhtml = gameEvent(shot, "PHI ONGOAL - #11 MCCLEMENT, Tip-In, Def. Zone, 152 ft.")
val shotOnGoalPenaltyShotXhtml = gameEvent(shot, "PHI ONGOAL - #17 SIMMONDS, Penalty Shot, Wrist, Off. Zone, 17 ft.")
private def gameEvent(eventType: String, description: String) =
<tr class="evenColor"><td class="goal + bborder" rowspan="1" colspan="1" align="center">124</td>
<td class="goal + bborder" rowspan="1" colspan="1" align="center">1</td>
<td class="goal + bborder" rowspan="1" colspan="1" align="center">PP</td>
<td class="goal + bborder" rowspan="1" colspan="1" align="center">19:53<br clear="none"/>0:07</td>
<td class="goal + bborder" rowspan="1" colspan="1" align="center">{eventType}</td>
<td class="goal + bborder" rowspan="1" colspan="1">{description}</td>
{onIce}
</tr>
private lazy val onIce =
<td class="bold + bborder + rborder" rowspan="1" colspan="1">
<table cellspacing="0" cellpadding="0" border="0">
<tr>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Center - DAVE BOLLAND" style="cursor:hand;">63</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">C</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Left Wing - MASON RAYMOND" style="cursor:hand;">12</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">L</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Defense - MARK FRASER" style="cursor:hand;">2</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">D</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Defense - CODY FRANSON" style="cursor:hand;">4</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">D</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Goalie - JONATHAN BERNIER" style="cursor:hand;">45</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">G</td></tr>
</table>
</td>
</tr>
</table>
</td>
<td class="bold + bborder" rowspan="1" colspan="1">
<table cellspacing="0" cellpadding="0" border="0">
<tr>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Center - BRAYDEN SCHENN" style="cursor:hand;">10</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">C</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Center - VINCENT LECAVALIER" style="cursor:hand;">40</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">C</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Right Wing - MATT READ" style="cursor:hand;">24</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">R</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Right Wing - CLAUDE GIROUX" style="cursor:hand;">28</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">R</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Defense - MARK STREIT" style="cursor:hand;">32</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">D</td></tr>
</table>
</td>
<td rowspan="1" colspan="1" align="center"> </td>
<td rowspan="1" colspan="1" align="center">
<table cellspacing="0" cellpadding="0" border="0">
<tr><td rowspan="1" colspan="1" align="center"><font title="Goalie - STEVE MASON" style="cursor:hand;">35</font></td></tr>
<tr><td rowspan="1" colspan="1" align="center">G</td></tr>
</table>
</td>
</tr>
</table>
</td>
}
| peruukki/NHLReplay | test/com/nhlreplay/parser/playbyplay/TestGameEventXhtml.scala | Scala | mit | 7,875 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.table.api.internal.TableEnvironmentImpl
import org.apache.flink.table.api.java.StreamTableEnvironment
import org.apache.flink.table.planner.utils.TestTableSourceSinks
import org.apache.flink.types.Row
import org.apache.flink.util.TestLogger
import org.apache.flink.shaded.guava18.com.google.common.collect.Lists
import org.hamcrest.Matchers.containsString
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.rules.{ExpectedException, TemporaryFolder}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Rule, Test}
import _root_.java.util
@RunWith(classOf[Parameterized])
class TableITCase(tableEnvName: String, isStreaming: Boolean) extends TestLogger {
// used for accurate exception information checking.
val expectedException: ExpectedException = ExpectedException.none()
@Rule
def thrown: ExpectedException = expectedException
private val _tempFolder = new TemporaryFolder()
@Rule
def tempFolder: TemporaryFolder = _tempFolder
var tEnv: TableEnvironment = _
private val settings = if (isStreaming) {
EnvironmentSettings.newInstance().inStreamingMode().build()
} else {
EnvironmentSettings.newInstance().inBatchMode().build()
}
@Before
def setup(): Unit = {
tableEnvName match {
case "TableEnvironment" =>
tEnv = TableEnvironmentImpl.create(settings)
case "StreamTableEnvironment" =>
tEnv = StreamTableEnvironment.create(
StreamExecutionEnvironment.getExecutionEnvironment, settings)
case _ => throw new UnsupportedOperationException("unsupported tableEnvName: " + tableEnvName)
}
TestTableSourceSinks.createPersonCsvTemporaryTable(tEnv, "MyTable")
}
@Test
def testExecute(): Unit = {
val query =
"""
|select id, concat(concat(`first`, ' '), `last`) as `full name`
|from MyTable where mod(id, 2) = 0
""".stripMargin
val table = tEnv.sqlQuery(query)
val tableResult = table.execute()
assertTrue(tableResult.getJobClient.isPresent)
assertEquals(ResultKind.SUCCESS_WITH_CONTENT, tableResult.getResultKind)
assertEquals(
TableSchema.builder()
.field("id", DataTypes.INT())
.field("full name", DataTypes.STRING())
.build(),
tableResult.getTableSchema)
val expected = util.Arrays.asList(
Row.of(Integer.valueOf(2), "Bob Taylor"),
Row.of(Integer.valueOf(4), "Peter Smith"),
Row.of(Integer.valueOf(6), "Sally Miller"),
Row.of(Integer.valueOf(8), "Kelly Williams"))
val actual = Lists.newArrayList(tableResult.collect())
actual.sort(new util.Comparator[Row]() {
override def compare(o1: Row, o2: Row): Int = {
o1.getField(0).asInstanceOf[Int].compareTo(o2.getField(0).asInstanceOf[Int])
}
})
assertEquals(expected, actual)
}
@Test
def testExecuteWithUpdateChanges(): Unit = {
if (!isStreaming) {
return
}
// TODO Once FLINK-16998 is finished, all kinds of changes will be supported.
thrown.expect(classOf[TableException])
thrown.expectMessage(containsString(
"AppendStreamTableSink doesn't support consuming update changes"))
tEnv.sqlQuery("select count(*) from MyTable").execute()
}
}
object TableITCase {
@Parameterized.Parameters(name = "{0}:isStream={1}")
def parameters(): util.Collection[Array[_]] = {
util.Arrays.asList(
Array("TableEnvironment", true),
Array("TableEnvironment", false),
Array("StreamTableEnvironment", true)
)
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableITCase.scala | Scala | apache-2.0 | 4,478 |
/*
* Copyright (c) 2016, Groupon, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of GROUPON nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.groupon.metrics
import java.util.concurrent.TimeUnit
import org.apache.spark.groupon.metrics.util.{TestMetricsRpcEndpoint, SparkContextSetup}
import org.apache.spark.rpc.RpcEndpointRef
import org.scalatest.concurrent.Eventually
import org.scalatest.{Matchers, BeforeAndAfter, FlatSpec}
import scala.util.Random
class SparkMetricTest extends FlatSpec with Matchers with BeforeAndAfter with Eventually with SparkContextSetup {
var metricsEndpoint: TestMetricsRpcEndpoint = _
var metricsEndpointRef: RpcEndpointRef = _
before {
metricsEndpoint = new TestMetricsRpcEndpoint(sc.env.rpcEnv)
metricsEndpointRef = sc.env.rpcEnv.setupEndpoint(Random.alphanumeric.take(32).mkString(""), metricsEndpoint)
}
after {
metricsEndpoint.stop()
metricsEndpoint = null
metricsEndpointRef = null
}
"Counter" should "increment correctly" in {
val counterName = "counter"
val counter = new SparkCounter(metricsEndpointRef, counterName)
val incrementVals = Seq(1, 2, 3, -1, -2, -3)
incrementVals.foreach(i => counter.inc(i))
eventually {
metricsEndpoint.getMetricNames shouldBe Seq.fill(incrementVals.length)(counterName)
metricsEndpoint.getMetricValues shouldBe incrementVals
}
}
it should "decrement correctly" in {
val counterName = "counter"
val counter = new SparkCounter(metricsEndpointRef, counterName)
val decrementVals = Seq(1, 2, 3, -1, -2, -3)
decrementVals.foreach(i => counter.dec(i))
eventually {
metricsEndpoint.getMetricNames shouldBe Seq.fill(decrementVals.length)(counterName)
metricsEndpoint.getMetricValues shouldBe decrementVals.map(i => -i)
}
}
"Gauge" should "set its value correctly" in {
val gaugeName = "gauge"
val gauge = new SparkGauge(metricsEndpointRef, gaugeName)
val gaugeVal = 1
gauge.set(gaugeVal)
eventually {
metricsEndpoint.getMetricNames shouldBe Seq(gaugeName)
metricsEndpoint.getMetricValues shouldBe Seq(gaugeVal)
}
}
"Histogram" should "set its value correctly" in {
val histogramName = "histogram"
val histogram = new SparkHistogram(metricsEndpointRef, histogramName, ReservoirClass.ExponentiallyDecaying)
val histogramVals = Seq(1, 2, 3, -1, -2, -3)
histogramVals.foreach(i => histogram.update(i))
eventually {
metricsEndpoint.getMetricNames shouldBe Seq.fill(histogramVals.length)(histogramName)
metricsEndpoint.getMetricValues shouldBe histogramVals
}
}
it should "use the correct Reservoir class" in {
val reservoirClasses = Seq(
ReservoirClass.ExponentiallyDecaying,
ReservoirClass.SlidingTimeWindow,
ReservoirClass.SlidingWindow,
ReservoirClass.Uniform
)
reservoirClasses.foreach(reservoirClass => {
new SparkHistogram(metricsEndpointRef, "histogram", reservoirClass).update(0)
})
eventually {
metricsEndpoint.metricStore.map(metricMessage => {
metricMessage.asInstanceOf[HistogramMessage].reservoirClass
}) shouldBe reservoirClasses
}
}
"Meter" should "set its value correctly" in {
val meterName = "meter"
val meter = new SparkMeter(metricsEndpointRef, meterName)
val meterVals = Seq(1, 2, 3, -1, -2, -3)
meterVals.foreach(i => meter.mark(i))
eventually {
metricsEndpoint.getMetricNames shouldBe Seq.fill(meterVals.length)(meterName)
metricsEndpoint.getMetricValues shouldBe meterVals
}
}
"Timer" should "set its value correctly with 'update'" in {
val timerName = "timer"
val timer = new SparkTimer(metricsEndpointRef, timerName, ReservoirClass.ExponentiallyDecaying, ClockClass.UserTime)
val timerVal = 1000
timer.update(timerVal, TimeUnit.NANOSECONDS)
eventually {
metricsEndpoint.getMetricNames shouldBe Seq(timerName)
metricsEndpoint.getMetricValues shouldBe Seq(timerVal)
}
}
it should "convert time units correctly with 'update'" in {
val timer = new SparkTimer(metricsEndpointRef, "timer", ReservoirClass.ExponentiallyDecaying, ClockClass.UserTime)
val timeUnits = Seq(
TimeUnit.DAYS,
TimeUnit.HOURS,
TimeUnit.MICROSECONDS,
TimeUnit.MILLISECONDS,
TimeUnit.MINUTES,
TimeUnit.NANOSECONDS,
TimeUnit.SECONDS
)
val timerVal = 1
timeUnits.foreach(timeUnit => timer.update(timerVal, timeUnit))
eventually {
metricsEndpoint.getMetricValues shouldBe timeUnits.map(timeUnit => timeUnit.toNanos(timerVal))
}
}
it should "time a function correctly" in {
val timer = new SparkTimer(metricsEndpointRef, "timer", ReservoirClass.ExponentiallyDecaying, ClockClass.UserTime)
// Function should take about 500 milliseconds
val (duration, timeUnit) = (500, TimeUnit.MILLISECONDS)
val toleranceRange = timeUnit.toNanos(duration) +- timeUnit.toNanos(100)
timer.time({
timeUnit.sleep(duration)
})
eventually {
metricsEndpoint.getMetricValues.head.asInstanceOf[Long] shouldBe toleranceRange
}
}
it should "time using a Context correctly" in {
val timer = new SparkTimer(metricsEndpointRef, "timer", ReservoirClass.ExponentiallyDecaying, ClockClass.UserTime)
// Timer should measure about 500 milliseconds
val (duration, timeUnit) = (500, TimeUnit.MILLISECONDS)
val toleranceRange = timeUnit.toNanos(duration) +- timeUnit.toNanos(100)
val timerContext = timer.time()
timeUnit.sleep(duration)
timerContext.stop()
eventually {
metricsEndpoint.getMetricValues.head.asInstanceOf[Long] shouldBe toleranceRange
}
}
it should "use the correct Reservoir class" in {
val reservoirClasses = Seq(
ReservoirClass.ExponentiallyDecaying,
ReservoirClass.SlidingTimeWindow,
ReservoirClass.SlidingWindow,
ReservoirClass.Uniform
)
reservoirClasses.foreach(reservoirClass => {
new SparkTimer(metricsEndpointRef, "timer", reservoirClass, ClockClass.UserTime).update(0, TimeUnit.SECONDS)
})
eventually {
metricsEndpoint.metricStore.map(metricMessage => {
metricMessage.asInstanceOf[TimerMessage].reservoirClass
}) shouldBe reservoirClasses
}
}
it should "use the correct Clock class" in {
val clockClasses = Seq(
ClockClass.UserTime,
ClockClass.CpuTime
)
clockClasses.foreach(clockClass => {
new SparkTimer(metricsEndpointRef, "timer", ReservoirClass.ExponentiallyDecaying, clockClass)
.update(0, TimeUnit.SECONDS)
})
eventually {
metricsEndpoint.metricStore.map(metricMessage => {
metricMessage.asInstanceOf[TimerMessage].clockClass
}) shouldBe clockClasses
}
}
}
| groupon/spark-metrics | src/test/scala/org/apache/spark/groupon/metrics/SparkMetricTest.scala | Scala | bsd-3-clause | 8,238 |
package is.hail.utils
import org.apache.log4j.{LogManager, Logger}
trait Logging {
@transient private var logger: Logger = _
@transient private var consoleLogger: Logger = _
def log: Logger = {
if (logger == null)
logger = LogManager.getRootLogger
logger
}
def consoleLog: Logger = {
if (consoleLogger == null)
consoleLogger = LogManager.getLogger("Hail")
consoleLogger
}
def info(msg: String) {
consoleLog.info(msg)
}
def info(msg: String, t: Truncatable) {
val (screen, logged) = t.strings
if (screen == logged)
consoleLog.info(format(msg, screen))
else {
// writes twice to the log file, but this isn't a big problem
consoleLog.info(format(msg, screen))
log.info(format(msg, logged))
}
}
def warn(msg: String) {
consoleLog.warn(msg)
}
def warn(msg: String, t: Truncatable) {
val (screen, logged) = t.strings
if (screen == logged)
consoleLog.warn(format(msg, screen))
else {
// writes twice to the log file, but this isn't a big problem
consoleLog.warn(format(msg, screen))
log.warn(format(msg, logged))
}
}
def error(msg: String) {
consoleLog.error(msg)
}
}
| cseed/hail | hail/src/main/scala/is/hail/utils/Logging.scala | Scala | mit | 1,222 |
/*
* Copyright ActionML, LLC under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* ActionML licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actionml.core.search.elasticsearch
import com.actionml.core.{HealthCheckResponse, HealthCheckStatus}
import com.actionml.core.HealthCheckStatus.HealthCheckStatus
import java.io.{BufferedReader, IOException, InputStreamReader, UnsupportedEncodingException}
import java.net.{URI, URLEncoder}
import java.time.Instant
import com.actionml.core.model.Comment
import com.actionml.core.search.Filter.{Conditions, Types}
import com.actionml.core.search._
import com.actionml.core.search.elasticsearch.ElasticSearchSupport.{EsDocument, mkClient}
import com.actionml.core.validate.JsonSupport
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.LazyLogging
import org.apache.http.HttpHost
import org.apache.http.auth.{AuthScope, UsernamePasswordCredentials}
import org.apache.http.impl.client.BasicCredentialsProvider
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder
import org.apache.http.util.EntityUtils
import org.apache.spark.rdd.RDD
import org.elasticsearch.client.RestClientBuilder.HttpClientConfigCallback
import org.elasticsearch.client.{Request, Response, ResponseListener, RestClient}
import org.json4s.DefaultReaders._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, JValue, _}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise}
import scala.language.postfixOps
import scala.reflect.ManifestFactory
import scala.util.control.NonFatal
import scala.util.{Properties, Success, Try}
trait ElasticSearchSupport extends SearchSupport[Hit, EsDocument] {
override def createSearchClient(aliasName: String): SearchClient[Hit, EsDocument] = ElasticSearchClient(aliasName)
}
object ElasticSearchSupport {
type EsDocument = (String, Map[String, List[String]])
def healthCheck(): Future[HealthCheckStatus] = {
val p = Promise[HealthCheckStatus]()
healthCheckClient.performRequestAsync(new Request("GET", healthCheckClient.getNodes.get(0).getHost.toURI), new ResponseListener {
override def onSuccess(response: Response): Unit = response.getStatusLine.getStatusCode match {
case 200 => p.success(HealthCheckStatus.green)
case _ => p.success(HealthCheckStatus.red)
}
override def onFailure(e: Exception): Unit = p.success(HealthCheckStatus.red)
})
p.future
}
private lazy val config: Config = ConfigFactory.load()
private[elasticsearch] def mkClient: RestClient = {
val uri = new URI(Properties.envOrElse("ELASTICSEARCH_URI", "http://localhost:9200" ))
val builder = RestClient.builder(
new HttpHost(
uri.getHost,
uri.getPort,
uri.getScheme))
if (config.hasPath("elasticsearch.auth")) {
val authConfig = config.getConfig("elasticsearch.auth")
builder.setHttpClientConfigCallback(new BasicAuthProvider(
authConfig.getString("username"),
authConfig.getString("password")
))
}
builder.build
}
private lazy val healthCheckClient = mkClient
private class BasicAuthProvider(username: String, password: String) extends HttpClientConfigCallback {
private val credentialsProvider = new BasicCredentialsProvider()
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username, password))
override def customizeHttpClient(httpClientBuilder: HttpAsyncClientBuilder): HttpAsyncClientBuilder = {
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider)
}
}
}
trait JsonSearchResultTransformation[T] {
implicit def reader: Reader[T]
implicit def manifest: Manifest[T]
def transform(j: JValue): Seq[T]
}
trait ElasticSearchResultTransformation extends JsonSearchResultTransformation[Hit] {
override implicit val manifest: Manifest[Hit] = ManifestFactory.classType(classOf[Hit])
override implicit val reader: Reader[Hit] = new Reader[Hit] {
def read(value: JValue): Hit = value match {
case JObject(fields) if fields.exists(_._1 == "_id") && fields.exists(_._1 == "_score") =>
Hit(fields.find(_._1 == "_id").get._2.as[String], fields.find(_._1 == "_score").get._2.as[Float])
case x =>
throw new MappingException("Can't convert %s to Hit." format x)
}
}
override def transform(j: JValue): Seq[Hit] = {
(j \\ "hits" \\ "hits").as[Seq[Hit]]
}
}
class ElasticSearchClient private (alias: String, client: RestClient)(implicit w: Writer[EsDocument])
extends SearchClient[Hit, EsDocument] with LazyLogging with WriteToEsSupport with JsonSupport {
this: JsonSearchResultTransformation[Hit] =>
import ElasticSearchClient.ESVersions._
import ElasticSearchClient._
implicit val _ = DefaultFormats
private val esVersion: ESVersions.Value = {
import ESVersions._
val ver = try {
val response = client.performRequest(new Request("GET", "/"))
val version = (JsonMethods.parse(response.getEntity.getContent) \\ "version" \\ "number").as[String]
if (version.startsWith("5.")) v5
else if (version.startsWith("6.")) v6
else if (version.startsWith("7.")) v7
else {
logger.debug(s"Elastic Search version: $version")
v7
}
} catch {
case NonFatal(e) =>
logger.error("Elasticsearch version can't be determined. v7 will be used.", e)
v7
}
logger.info(s"Detected Elasticsearch version $ver")
ver
}
private val indexType = if (esVersion != v7) "items" else "_doc"
override def close: Unit = client.close()
override def createIndex(
fieldNames: List[String],
typeMappings: Map[String, (String, Boolean)],
refresh: Boolean): Boolean = {
val indexName = createIndexName(alias)
createIndexByName(indexName, indexType, fieldNames, typeMappings, refresh)
}
def saveOneByIdAsync(id: String, doc: EsDocument): Future[Comment] = {
val promise = Promise[Comment]()
val msg404 = "The Elasticsearch index does not exist, have you trained yet?"
val updateUri = if (esVersion != ESVersions.v7) s"/$alias/$indexType/${encodeURIFragment(id)}/_update"
else s"/$alias/_update/${encodeURIFragment(id)}"
def sendUpdate = {
val updateRequest = new Request("POST", updateUri)
updateRequest.setJsonEntity(
JsonMethods.compact(JObject(
"doc" -> JsonMethods.asJValue(doc),
"doc_as_upsert" -> JBool(true)
)))
val updateListener = new ResponseListener {
override def onSuccess(response: Response): Unit = {
response.getStatusLine.getStatusCode match {
case 200 | 201 =>
val responseJValue = parse(EntityUtils.toString(response.getEntity))
(responseJValue \\ "result").getAs[String].contains("updated")
promise.success(Comment("Upserted successfully"))
case 404 =>
logger.warn(msg404)
promise.success(Comment(msg404))
case code =>
logger.error(s"Elasticsearch update error. Got response code $code. ${new BufferedReader(new InputStreamReader(response.getEntity.getContent))}")
promise.failure(new RuntimeException(s"Elasticsearch index update error: staus code $code"))
}
}
override def onFailure(exception: Exception): Unit = exception match {
case NonFatal(e) => promise.failure(e)
}
}
client.performRequestAsync(updateRequest, updateListener)
}
try {
val indexExistsRequest = new Request("GET", s"/_alias/$alias")
val indexExistsListerner = new ResponseListener {
override def onSuccess(response: Response): Unit = {
response.getStatusLine.getStatusCode match {
case 200 =>
Try(parse(EntityUtils.toString(response.getEntity))
.extract[Map[String, JValue]]
.keys
.headOption).toOption.flatten match {
case None => promise.success(Comment("The Elasticsearch index does not exist, have you trained yet?"))
case Some(_) => sendUpdate
}
case 404 =>
logger.warn(msg404)
promise.success(Comment(msg404))
case code =>
logger.error(s"Elasticsearch update error. Response code $code. Alias $alias, id $id, document $doc")
promise.failure(new RuntimeException("Unexpected HTTP response code from Elasticsearch"))
}
}
override def onFailure(exception: Exception): Unit = {
logger.warn(msg404)
promise.success(Comment(msg404))
}
}
client.performRequestAsync(indexExistsRequest, indexExistsListerner)
} catch {
case e: IOException =>
logger.error(s"Can't upsert $doc with id $id", e)
promise.failure(new RuntimeException("Can't connect to Elasticsearch nodes"))
case NonFatal(e) =>
logger.error(s"Can't upsert $doc with id $id", e)
promise.failure(new RuntimeException("Elasticsearch error", e))
}
promise.future
}
override def deleteIndex(refresh: Boolean): Boolean = {
// todo: Andrey, this is a deprecated API, also this throws an exception when the Elasticsearch server is not running
// it should give a more friendly error message by testing to see if Elasticsearch is running or maybe we should test
// the required services when an engine is created or updated. This would be more efficient for frequent client requests
client.performRequest(new Request("HEAD", s"/_alias/$alias")) // Does the alias exist?
.getStatusLine
.getStatusCode match {
case 200 =>
val aliasResponse = client.performRequest(new Request("GET", s"/_alias/$alias"))
val responseJValue = parse(EntityUtils.toString(aliasResponse.getEntity))
val indexSet = responseJValue.extract[Map[String, JValue]].keys
indexSet.forall(deleteIndexByName(_, refresh))
case _ => false
}
}
override def search(query: SearchQuery): Seq[Hit] = {
client.performRequest(new Request("HEAD", s"/_alias/$alias")) // Does the alias exist?
.getStatusLine
.getStatusCode match {
case 200 =>
val aliasResponse = client.performRequest(new Request("GET", s"/_alias/$alias"))
val responseJValue = parse(EntityUtils.toString(aliasResponse.getEntity))
val indexSet = responseJValue.extract[Map[String, JValue]].keys
indexSet.headOption.fold(Seq.empty[Hit]) { actualIndexName =>
logger.debug(s"Query for alias $alias and index $actualIndexName:\\n$query")
val request = new Request("POST", s"/$actualIndexName/_search")
request.setJsonEntity(mkQueryString(query))
val response = client.performRequest(request)
response.getStatusLine.getStatusCode match {
case 200 =>
logger.trace(s"Got source from query: $query")
transform(parse(EntityUtils.toString(response.getEntity)))
case _ =>
logger.trace(s"Query: $query\\nproduced status code: ${response.getStatusLine.getStatusCode}")
Seq.empty[Hit]
}
}
case _ => Seq.empty[Hit]
}
}
override def searchAsync(query: SearchQuery): Future[Seq[Hit]] = {
val p = Promise[Seq[Hit]]()
try {
val actualIndexName = alias
logger.debug(s"Query for alias $alias and index $actualIndexName:\\n$query")
val request = new Request("POST", s"/$actualIndexName/_search")
request.setJsonEntity(mkQueryString(query))
val searchListener = new ResponseListener {
override def onSuccess(response: Response): Unit = {
response.getStatusLine.getStatusCode match {
case 200 =>
logger.debug(s"Got source from query: $query")
p.complete(Try(transform(parse(EntityUtils.toString(response.getEntity)))))
case _ =>
logger.trace(s"Query: $query\\nproduced status code: ${response.getStatusLine.getStatusCode}")
p.complete(Success(Seq.empty[Hit]))
}
}
override def onFailure(exception: Exception): Unit = p.failure(exception)
}
client.performRequestAsync(request, searchListener)
} catch {
case e: IOException =>
logger.error(s"Can't make a search $query", e)
p.failure(new RuntimeException("Can't connect to Elasticsearch nodes"))
case NonFatal(e) =>
logger.error(s"Can't make a search $query", e)
p.failure(new RuntimeException("Elasticsearch error", e))
}
p.future
}
def findDocById(id: String): EsDocument = Await.result(findDocByIdAsync(id), Duration.Inf)
def findDocByIdAsync(id: String): Future[EsDocument] = {
val promise = Promise[EsDocument]()
val url = s"/$alias/$indexType/${encodeURIFragment(id)}"
val request = new Request("GET", url)
val listener = new ResponseListener {
override def onSuccess(response: Response): Unit = {
logger.trace(s"Got response: $response")
val rjv: Option[JValue] = response.getStatusLine.getStatusCode match {
case 200 =>
val entity = EntityUtils.toString(response.getEntity)
logger.trace(s"Got status code: 200\\nentity: $entity")
if (entity.isEmpty) {
Option.empty[JValue]
} else {
logger.trace(s"About to parse: $entity")
val result = parse(entity)
logger.trace(s"GetSource for $url result: $result")
Some(result)
}
case 404 =>
logger.trace(s"Got status code: 404")
Some(parse("""{"notFound": "true"}"""))
case _ =>
logger.trace(s"Got status code: ${response.getStatusLine.getStatusCode}\\nentity: ${EntityUtils.toString(response.getEntity)}")
Option.empty[JValue]
}
promise.success(if (rjv.nonEmpty) {
try {
val jobj = (rjv.get \\ "_source").values.asInstanceOf[Map[String, Any]]
id -> jobj.collect {
case (name, value) if value.isInstanceOf[List[Any]] =>
name -> value.asInstanceOf[List[Any]].collect {
case v: String => v
}
}
} catch {
case e: ClassCastException =>
logger.error("Wrong format of ES doc", e)
id -> Map.empty[String, List[String]]
}
} else {
logger.debug(s"Non-existent item-id: $id, creating new item.")
id -> Map.empty[String, List[String]]
})
}
override def onFailure(exception: Exception): Unit = {
exception match {
case e: org.elasticsearch.client.ResponseException =>
if (e.getResponse.getStatusLine.getStatusCode == 404) logger.debug(s"got no data for the item because of $e - ${e.getResponse.getStatusLine.getReasonPhrase}")
else logger.error("Find doc by id error", e)
case NonFatal(e) =>
logger.error("got unknown exception and so no data for the item", e)
}
promise.success(id -> Map.empty[String, List[String]])
}
}
client.performRequestAsync(request, listener)
promise.future
}
override def deleteDoc(query: SearchQuery): Future[Unit] = {
val request = new Request("DELETE", s"/$alias/_delete_by_query")
request.setJsonEntity(mkQueryString(query))
val promise = Promise[Unit]()
client.performRequestAsync(request, new ResponseListener {
override def onSuccess(response: Response): Unit = response.getStatusLine.getStatusCode match {
case 200 | 202 => promise.success ()
case _ => promise.failure(new RuntimeException("Delete error. Query: $query"))
}
override def onFailure(e: Exception): Unit = promise.failure(e)
})
promise.future
}
override def hotSwap(
indexRDD: RDD[Map[String, Any]],
fieldNames: List[String],
typeMappings: Map[String, (String, Boolean)],
numESWriteConnections: Option[Int] = None): Unit = {
import org.elasticsearch.spark._
val newIndex = createIndexName(alias)
logger.info(s"Create new index: $newIndex, $fieldNames, $typeMappings")
// todo: this should have a typeMappings that is Map[String, (String, Boolean)] with the Boolean saying to use norms
// taken out for now since there is no client.admin in the REST client. Have to construct REST call directly
createIndexByName(newIndex, indexType, fieldNames, typeMappings, refresh = false, doNotLinkAlias = true)
// throttle writing to the max bulk-write connections, which is one per ES core.
// todo: can we find this from the cluster itself?
val repartitionedIndexRDD = if (numESWriteConnections.nonEmpty && indexRDD.context.defaultParallelism >
numESWriteConnections.get) {
logger.info(s"defaultParallelism: ${indexRDD.context.defaultParallelism}")
logger.info(s"Coalesce to: ${numESWriteConnections.get} to reduce number of ES connections for saveToEs")
indexRDD.coalesce(numESWriteConnections.get)
} else {
logger.info(s"Number of ES connections for saveToEs: ${indexRDD.context.defaultParallelism}")
indexRDD
}
val newIndexURI = if (esVersion != v7) s"/$newIndex/$indexType"
else s"/$newIndex"
val esConfig = Map("es.mapping.id" -> "id")
repartitionedIndexRDD.saveToEs(newIndexURI, esConfig)
val (oldIndexSet, deleteOldIndexQuery) = client.performRequest(new Request("HEAD", s"/_alias/$alias")) // Does the alias exist?
.getStatusLine.getStatusCode match {
case 200 => {
val response = client.performRequest(new Request("GET", s"/_alias/$alias"))
val responseJValue = parse(EntityUtils.toString(response.getEntity))
val oldIndexSet = responseJValue.extract[Map[String, JValue]].keys
val oldIndexName = oldIndexSet.head
client.performRequest(new Request("HEAD", s"/$oldIndexName")) // Does the old index exist?
.getStatusLine.getStatusCode match {
case 200 => {
val deleteOldIndexQuery = s""",{ "remove_index": { "index": "$oldIndexName"}}"""
(oldIndexSet, deleteOldIndexQuery)
}
case _ => (Set(), "")
}
}
case _ => (Set(), "")
}
val aliasQuery =
s"""
|{
| "actions" : [
| { "add": { "index": "$newIndex", "alias": "$alias" } }
| $deleteOldIndexQuery
| ]
|}
""".stripMargin.replace("\\n", "")
val request = new Request( "POST", "/_aliases")
request.setJsonEntity(aliasQuery)
client.performRequest(request)
oldIndexSet.foreach(deleteIndexByName(_, refresh = false))
}
private def encodeURIFragment(s: String): String = {
var result: String = ""
try
result = URLEncoder.encode(s, "UTF-8")
.replaceAll("\\\\+", "%20")
.replaceAll("\\\\%21", "!")
.replaceAll("\\\\%27", "'")
.replaceAll("\\\\%28", "(")
.replaceAll("\\\\%29", ")")
.replaceAll("\\\\%7E", "%7E")
catch {
case e: UnsupportedEncodingException =>
result = s
}
result
}
private def createIndexByName(
indexName: String,
indexType: String,
fieldNames: List[String],
typeMappings: Map[String, (String, Boolean)],
refresh: Boolean,
doNotLinkAlias: Boolean = false): Boolean = {
def mkAliasField =
if (doNotLinkAlias) Nil
else if (esVersion == v7) List(JField("aliases", JObject("alias" -> JString(alias))))
else List(JField("aliases", JObject(alias -> JObject())))
Try(client.performRequest(new Request("HEAD", s"/$indexName")).getStatusLine.getStatusCode)
.getOrElse(404) match {
case 404 => { // should always be a unique index name so fail unless we get a 404
val body = JsonMethods.compact(JObject(
JField("mappings",
JObject(indexType ->
JObject("properties" -> {
fieldNames.map { fieldName =>
if (typeMappings.contains(fieldName))
JObject(fieldName -> JObject("type" -> JString(typeMappings(fieldName)._1)))
else // unspecified fields are treated as not_analyzed strings
JObject(fieldName -> JObject("type" -> JString("keyword")))
}.reduce(_ ~ _)
})
)
) :: mkAliasField
))
val request = new Request("PUT", s"/$indexName")
if (esVersion == v7) request.addParameter("include_type_name", "true")
request.setJsonEntity(body)
client.performRequest(request)
.getStatusLine.
getStatusCode match {
case 200 =>
// now refresh to get it 'committed'
// todo: should do this after the new index is created so no index downtime
if (refresh) refreshIndexByName(indexName)
case _ =>
logger.warn(s"Index $indexName wasn't created, but may have quietly failed.")
}
true
}
case 200 =>
logger.warn(s"Elasticsearch index: $indexName wasn't created because it already exists. " +
s"This may be an error. Leaving the old index active.")
false
case _ =>
throw new IllegalStateException(s"/$indexName is invalid.")
false
}
}
private def deleteIndexByName(indexName: String, refresh: Boolean): Boolean = {
client.performRequest(new Request("HEAD", s"/$indexName"))
.getStatusLine.getStatusCode match {
case 404 => false
case 200 =>
client.performRequest(new Request("DELETE", s"/$indexName"))
.getStatusLine.getStatusCode match {
case 200 =>
if (refresh) refreshIndexByName(indexName)
case _ =>
logger.warn(s"Index $indexName wasn't deleted, but may have quietly failed.")
}
true
case _ =>
throw new IllegalStateException()
false
}
}
private def refreshIndexByName(indexName: String): Unit = {
client.performRequest(new Request("POST", s"/$indexName/_refresh"))
}
private[elasticsearch] def mkQueryString(query: SearchQuery): String = {
import org.json4s.jackson.JsonMethods._
var clauses = JObject()
val mustIsEmpty = query.must.flatMap(_.values).isEmpty
val shouldNonEmpty = query.should.flatMap(_.values).nonEmpty
if (esVersion == ESVersions.v5) {
clauses = clauses ~ ("must" -> matcherToJson(Map("terms" -> query.must)))
clauses = clauses ~ ("must_not" -> matcherToJson(Map("terms" -> query.mustNot)))
clauses = clauses ~ ("filter" -> filterToJson(query.filters))
clauses = clauses ~ ("should" -> matcherToJson(Map("terms" -> query.should), "constant_score" -> JObject("filter" -> ("match_all" -> JObject()), "boost" -> 0)))
if (mustIsEmpty) clauses = clauses ~ ("minimum_should_match" -> 1)
} else {
clauses = clauses ~ ("must" -> mustToJson(query.must))
clauses = clauses ~ ("must_not" -> clausesToJson(Map("term" -> query.mustNot)))
clauses = clauses ~ ("filter" -> filterToJson(query.filters))
clauses = clauses ~ ("should" -> clausesToJson(Map("term" -> query.should)))
if (mustIsEmpty && shouldNonEmpty) clauses = clauses ~ ("minimum_should_match" -> JInt(1))
}
val esQuery =
("size" -> query.size) ~
("from" -> query.from) ~
("query" -> JObject("bool" -> clauses)) ~
("sort" -> Seq(
"_score" -> JObject("order" -> JString("desc")),
query.sortBy -> (("unmapped_type" -> "double") ~ ("order" -> "desc"))
))
val esquery = compact(render(esQuery))
logger.debug(s"Query for Elasticsearch: $esquery")
esquery
}
private def mustToJson: Seq[Matcher] => JValue = clauses =>
if (clauses.isEmpty)
JObject("match_all" -> JObject("boost" -> JDouble(0)))
else
clausesToJson(Map("term" -> clauses), mkAND = false)
private def clausesToJson(clauses: Map[String, Seq[Matcher]], mkAND: Boolean = true): JArray = {
def mkClause(clause: String, m: Matcher, v: String): (String, JObject) = {
clause -> JObject {
m.name ->
("value" -> JString(v)) ~
m.boost.fold[JObject](JObject())(b => JObject("boost" -> JDouble(b)))
}
}
clauses.view.flatMap { case (clause, matchers) =>
matchers.flatMap {
case m@Matcher(_, values, _) if mkAND || values.isEmpty || values.size == 1 =>
m.values.map { v =>
mkClause(clause, m, v)
}
case m => Seq("bool" -> JObject("should" -> JArray(
m.values.map { v => JObject(
mkClause(clause, m, v)
)}.toList
)))
}
}
}
private def matcherToJson(clauses: Map[String, Seq[Matcher]], others: (String, JObject)*): JArray = {
clauses.map { case (clause, matchers) =>
matchers.map { m =>
clause -> (m.name -> m.values) ~ m.boost.fold(JObject())("boost" -> _)
}
}.flatten.toList ++ others.toList
}
private[elasticsearch] def filterToJson(filters: Seq[Filter]): JArray = {
implicit val _ = CustomFormats
filters.foldLeft(Map.empty[(Types.Type, String), JObject]) { case (acc, f) =>
acc.get(f.`type` -> f.name).fold {
acc + ((f.`type` -> f.name) -> JObject(
f.name -> (if (f.condition == Conditions.eq) Extraction.decompose(f.value)
else JObject(f.condition.toString -> Extraction.decompose(f.value)))
))
} { j =>
acc + ((f.`type` -> f.name) -> j.merge(JObject(
f.name -> (if (f.condition == Conditions.eq) Extraction.decompose(f.value)
else JObject(f.condition.toString -> Extraction.decompose(f.value)))
)))
}
}
}.toList.map {
case ((t, _), j) => JObject(t.toString -> j)
}
}
object ElasticSearchClient extends LazyLogging with JsonSupport {
private implicit val _: Writer[EsDocument] = new Writer[EsDocument] {
override def write(doc: EsDocument): JValue = JObject(
doc._2.foldLeft[List[JField]](List.empty[JField]) { case (acc, (key, values)) =>
JField(key, JArray(values.map(JString))) :: acc
}
)
}
def apply(aliasName: String): ElasticSearchClient = {
new ElasticSearchClient(aliasName, mkClient) with ElasticSearchResultTransformation
}
private def createIndexName(alias: String) = alias + "_" + Instant.now().toEpochMilli.toString
private object ESVersions extends Enumeration {
val v5 = Value(5)
val v6 = Value(6)
val v7 = Value(7)
}
}
| actionml/harness | rest-server/core/src/main/scala/com/actionml/core/search/elasticsearch/ElasticSearchSupport.scala | Scala | apache-2.0 | 27,620 |
package org.nisshiee.toban.util
import org.nisshiee.toban.model._
object ViewHelper {
implicit def color2rich(color: Member.Color): RichColor = RichColor(color)
implicit def member2rich(member: Member): RichMember = RichMember(member)
val colors = List(
Member.Gold
,Member.Silver
,Member.Green
,Member.Blue
,Member.Yellow
,Member.Red
,Member.Dark
)
case class RichColor(base: Member.Color) {
def btnClass = base match {
case Member.Green => "btn-success"
case Member.Yellow => "btn-warning"
case Member.Red => "btn-danger"
case Member.Dark => "btn-dark"
case Member.Silver => "btn-silver"
case Member.Gold => "btn-gold"
case _ => "btn-info"
}
def changeFormId = base match {
case Member.Green => "change-green"
case Member.Yellow => "change-yellow"
case Member.Red => "change-red"
case Member.Dark => "change-dark"
case Member.Silver => "change-silver"
case Member.Gold => "change-gold"
case _ => "change-blue"
}
}
case class RichMember(base: Member) {
def labelClass = (base.status, base.color) match {
case (Member.Deleted, _) => "label-inverse"
case (_, Member.Green) => "label-success"
case (_, Member.Yellow) => "label-warning"
case (_, Member.Red) => "label-important"
case (_, Member.Dark) => "label-dark"
case (_, Member.Silver) => "label-silver"
case (_, Member.Gold) => "label-gold"
case _ => "label-info"
}
def btnClass = base.status match {
case Member.Deleted => "btn-inverse"
case _ => base.color.btnClass
}
}
}
| nisshiee/to-ban | app/utils/ViewHelper.scala | Scala | mit | 1,658 |
package com.github.mijicd.waes
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, ShouldMatchers}
trait TestSpec extends FlatSpecLike with ShouldMatchers with BeforeAndAfterAll
| mijicd/spray-json-diff | src/test/scala/com/github/mijicd/waes/TestSpec.scala | Scala | mit | 183 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.api.expr
import com.intellij.psi.PsiReferenceExpression
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
trait JawaReferenceExpression extends JawaExpression with PsiReferenceExpression {
} | arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/psi/api/expr/JawaReferenceExpression.scala | Scala | epl-1.0 | 630 |
package com.campudus.tableaux.database.model
import com.campudus.tableaux.database._
import com.campudus.tableaux.database.domain._
import com.campudus.tableaux.database.model.ServiceModel.ServiceId
import com.campudus.tableaux.database.model.TableauxModel.Ordering
import com.campudus.tableaux.helper.JsonUtils
import com.campudus.tableaux.helper.ResultChecker._
import com.campudus.tableaux.router.auth.permission.RoleModel
import com.campudus.tableaux.{RequestContext, ShouldBeUniqueException}
import org.vertx.scala.core.json.{Json, JsonArray, JsonObject}
import scala.concurrent.Future
object ServiceModel {
type ServiceId = Long
type Ordering = Long
def apply(connection: DatabaseConnection)(
implicit requestContext: RequestContext,
roleModel: RoleModel
): ServiceModel = {
new ServiceModel(connection)
}
}
class ServiceModel(override protected[this] val connection: DatabaseConnection)(
implicit requestContext: RequestContext,
roleModel: RoleModel
) extends DatabaseQuery {
val table: String = "system_services"
def update(
serviceId: ServiceId,
name: Option[String],
serviceType: Option[ServiceType],
ordering: Option[Ordering],
displayName: Option[MultiLanguageValue[String]],
description: Option[MultiLanguageValue[String]],
active: Option[Boolean],
config: Option[JsonObject],
scope: Option[JsonObject]
): Future[Unit] = {
val updateParamOpts = Map(
"name" -> name,
"type" -> serviceType,
"ordering" -> ordering,
"displayName" -> displayName,
"description" -> description,
"active" -> active,
"config" -> config,
"scope" -> scope
)
val paramsToUpdate = updateParamOpts
.filter({ case (_, v) => v.isDefined })
.map({ case (k, v) => (k, v.get) })
val columnString2valueString: Map[String, String] = paramsToUpdate.map({
case (columnName, value) =>
val columnString = s"$columnName = ?"
val valueString = value match {
case m: MultiLanguageValue[_] => m.getJson.toString
case a => a.toString
}
columnString -> valueString
})
val columnsString = columnString2valueString.keys.mkString(", ")
val update = s"UPDATE $table SET $columnsString, updated_at = CURRENT_TIMESTAMP WHERE id = ?"
val binds = Json.arr(columnString2valueString.values.toSeq: _*).add(serviceId.toString)
for {
_ <- name.map(checkUniqueName).getOrElse(Future.successful(()))
_ <- connection.query(update, binds)
} yield ()
}
private def selectStatement(condition: Option[String]): String = {
val where = condition.map(cond => s"WHERE $cond").getOrElse("")
s"""SELECT
| id,
| type,
| name,
| ordering,
| displayname,
| description,
| active,
| config,
| scope,
| created_at,
| updated_at
|FROM $table $where
|ORDER BY ordering""".stripMargin
}
def retrieve(id: ServiceId): Future[Service] = {
for {
result <- connection.query(selectStatement(Some("id = ?")), Json.arr(id.toString))
resultArr <- Future(selectNotNull(result))
} yield {
convertJsonArrayToService(resultArr.head)
}
}
def delete(id: ServiceId): Future[Unit] = {
val delete = s"DELETE FROM $table WHERE id = ?"
for {
result <- connection.query(delete, Json.arr(id))
_ <- Future(deleteNotNull(result))
} yield ()
}
def create(
name: String,
serviceType: ServiceType,
ordering: Option[Long],
displayName: MultiLanguageValue[String],
description: MultiLanguageValue[String],
active: Boolean,
config: Option[JsonObject],
scope: Option[JsonObject]
): Future[ServiceId] = {
val insert = s"""INSERT INTO $table (
| name,
| type,
| ordering,
| displayname,
| description,
| active,
| config,
| scope)
|VALUES
| (?, ?, ?, ?, ?, ?, ?, ?) RETURNING id""".stripMargin
for {
_ <- checkUniqueName(name)
result <- connection.query(
insert,
Json
.arr(
name,
serviceType.toString,
ordering.orNull,
displayName.getJson.toString,
description.getJson.toString,
active,
config.map(_.toString).orNull,
scope.map(_.toString).orNull
)
)
serviceId = insertNotNull(result).head.get[ServiceId](0)
} yield serviceId
}
def retrieveAll(): Future[Seq[Service]] = {
for {
result <- connection.query(selectStatement(None))
resultArr <- Future(resultObjectToJsonArray(result))
} yield {
resultArr.map(convertJsonArrayToService)
}
}
private def convertJsonArrayToService(arr: JsonArray): Service = {
val config = JsonUtils.parseJson(arr.get[String](7))
val scope = JsonUtils.parseJson(arr.get[String](8))
Service(
arr.get[ServiceId](0), // id
ServiceType(Option(arr.get[String](1))), // type
arr.get[String](2), // name
arr.get[Ordering](3), // ordering
MultiLanguageValue.fromString(arr.get[String](4)), // displayname
MultiLanguageValue.fromString(arr.get[String](5)), // description
arr.get[Boolean](6), // active
config, // config
scope, // scope
convertStringToDateTime(arr.get[String](9)), // created_at
convertStringToDateTime(arr.get[String](10)) // updated_at
)
}
private def checkUniqueName(name: String): Future[Unit] = {
val sql = s"SELECT COUNT(*) = 0 FROM $table WHERE name = ?"
connection
.selectSingleValue[Boolean](sql, Json.arr(name))
.flatMap({
case true => Future.successful(())
case false => Future.failed(ShouldBeUniqueException(s"Name of service should be unique $name.", "service"))
})
}
}
| campudus/tableaux | src/main/scala/com/campudus/tableaux/database/model/ServiceModel.scala | Scala | apache-2.0 | 6,087 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.sql.{Date, Timestamp}
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, _}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
/**
* Test basic expression parsing.
* If the type of an expression is supported it should be tested here.
*
* Please note that some of the expressions test don't have to be sound expressions, only their
* structure needs to be valid. Unsound expressions should be caught by the Analyzer or
* CheckAnalysis classes.
*/
class ExpressionParserSuite extends PlanTest {
import CatalystSqlParser._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
val defaultParser = CatalystSqlParser
def assertEqual(
sqlCommand: String,
e: Expression,
parser: ParserInterface = defaultParser): Unit = {
compareExpressions(parser.parseExpression(sqlCommand), e)
}
def intercept(sqlCommand: String, messages: String*): Unit = {
val e = intercept[ParseException](defaultParser.parseExpression(sqlCommand))
messages.foreach { message =>
assert(e.message.contains(message))
}
}
test("star expressions") {
// Global Star
assertEqual("*", UnresolvedStar(None))
// Targeted Star
assertEqual("a.b.*", UnresolvedStar(Option(Seq("a", "b"))))
}
// NamedExpression (Alias/Multialias)
test("named expressions") {
// No Alias
val r0 = 'a
assertEqual("a", r0)
// Single Alias.
val r1 = 'a as "b"
assertEqual("a as b", r1)
assertEqual("a b", r1)
// Multi-Alias
assertEqual("a as (b, c)", MultiAlias('a, Seq("b", "c")))
assertEqual("a() (b, c)", MultiAlias('a.function(), Seq("b", "c")))
// Numeric literals without a space between the literal qualifier and the alias, should not be
// interpreted as such. An unresolved reference should be returned instead.
// TODO add the JIRA-ticket number.
assertEqual("1SL", Symbol("1SL"))
// Aliased star is allowed.
assertEqual("a.* b", UnresolvedStar(Option(Seq("a"))) as 'b)
}
test("binary logical expressions") {
// And
assertEqual("a and b", 'a && 'b)
// Or
assertEqual("a or b", 'a || 'b)
// Combination And/Or check precedence
assertEqual("a and b or c and d", ('a && 'b) || ('c && 'd))
assertEqual("a or b or c and d", 'a || 'b || ('c && 'd))
// Multiple AND/OR get converted into a balanced tree
assertEqual("a or b or c or d or e or f", (('a || 'b) || 'c) || (('d || 'e) || 'f))
assertEqual("a and b and c and d and e and f", (('a && 'b) && 'c) && (('d && 'e) && 'f))
}
test("long binary logical expressions") {
def testVeryBinaryExpression(op: String, clazz: Class[_]): Unit = {
val sql = (1 to 1000).map(x => s"$x == $x").mkString(op)
val e = defaultParser.parseExpression(sql)
assert(e.collect { case _: EqualTo => true }.size === 1000)
assert(e.collect { case x if clazz.isInstance(x) => true }.size === 999)
}
testVeryBinaryExpression(" AND ", classOf[And])
testVeryBinaryExpression(" OR ", classOf[Or])
}
test("not expressions") {
assertEqual("not a", !'a)
assertEqual("!a", !'a)
assertEqual("not true > true", Not(GreaterThan(true, true)))
}
test("exists expression") {
assertEqual(
"exists (select 1 from b where b.x = a.x)",
Exists(table("b").where(Symbol("b.x") === Symbol("a.x")).select(1)))
}
test("comparison expressions") {
assertEqual("a = b", 'a === 'b)
assertEqual("a == b", 'a === 'b)
assertEqual("a <=> b", 'a <=> 'b)
assertEqual("a <> b", 'a =!= 'b)
assertEqual("a != b", 'a =!= 'b)
assertEqual("a < b", 'a < 'b)
assertEqual("a <= b", 'a <= 'b)
assertEqual("a !> b", 'a <= 'b)
assertEqual("a > b", 'a > 'b)
assertEqual("a >= b", 'a >= 'b)
assertEqual("a !< b", 'a >= 'b)
}
test("between expressions") {
assertEqual("a between b and c", 'a >= 'b && 'a <= 'c)
assertEqual("a not between b and c", !('a >= 'b && 'a <= 'c))
}
test("in expressions") {
assertEqual("a in (b, c, d)", 'a in ('b, 'c, 'd))
assertEqual("a not in (b, c, d)", !('a in ('b, 'c, 'd)))
}
test("in sub-query") {
assertEqual(
"a in (select b from c)",
In('a, Seq(ListQuery(table("c").select('b)))))
}
test("like expressions") {
assertEqual("a like 'pattern%'", 'a like "pattern%")
assertEqual("a not like 'pattern%'", !('a like "pattern%"))
assertEqual("a rlike 'pattern%'", 'a rlike "pattern%")
assertEqual("a not rlike 'pattern%'", !('a rlike "pattern%"))
assertEqual("a regexp 'pattern%'", 'a rlike "pattern%")
assertEqual("a not regexp 'pattern%'", !('a rlike "pattern%"))
}
test("like expressions with ESCAPED_STRING_LITERALS = true") {
val conf = new SQLConf()
conf.setConfString(SQLConf.ESCAPED_STRING_LITERALS.key, "true")
val parser = new CatalystSqlParser(conf)
assertEqual("a rlike '^\\\\x20[\\\\x20-\\\\x23]+$'", 'a rlike "^\\\\x20[\\\\x20-\\\\x23]+$", parser)
assertEqual("a rlike 'pattern\\\\\\\\'", 'a rlike "pattern\\\\\\\\", parser)
assertEqual("a rlike 'pattern\\\\t\\\\n'", 'a rlike "pattern\\\\t\\\\n", parser)
}
test("is null expressions") {
assertEqual("a is null", 'a.isNull)
assertEqual("a is not null", 'a.isNotNull)
assertEqual("a = b is null", ('a === 'b).isNull)
assertEqual("a = b is not null", ('a === 'b).isNotNull)
}
test("is distinct expressions") {
assertEqual("a is distinct from b", !('a <=> 'b))
assertEqual("a is not distinct from b", 'a <=> 'b)
}
test("binary arithmetic expressions") {
// Simple operations
assertEqual("a * b", 'a * 'b)
assertEqual("a / b", 'a / 'b)
assertEqual("a DIV b", ('a / 'b).cast(LongType))
assertEqual("a % b", 'a % 'b)
assertEqual("a + b", 'a + 'b)
assertEqual("a - b", 'a - 'b)
assertEqual("a & b", 'a & 'b)
assertEqual("a ^ b", 'a ^ 'b)
assertEqual("a | b", 'a | 'b)
// Check precedences
assertEqual(
"a * t | b ^ c & d - e + f % g DIV h / i * k",
'a * 't | ('b ^ ('c & ('d - 'e + (('f % 'g / 'h).cast(LongType) / 'i * 'k)))))
}
test("unary arithmetic expressions") {
assertEqual("+a", 'a)
assertEqual("-a", -'a)
assertEqual("~a", ~'a)
assertEqual("-+~~a", -(~(~'a)))
}
test("cast expressions") {
// Note that DataType parsing is tested elsewhere.
assertEqual("cast(a as int)", 'a.cast(IntegerType))
assertEqual("cast(a as timestamp)", 'a.cast(TimestampType))
assertEqual("cast(a as array<int>)", 'a.cast(ArrayType(IntegerType)))
assertEqual("cast(cast(a as int) as long)", 'a.cast(IntegerType).cast(LongType))
}
test("function expressions") {
assertEqual("foo()", 'foo.function())
assertEqual("foo.bar()",
UnresolvedFunction(FunctionIdentifier("bar", Some("foo")), Seq.empty, isDistinct = false))
assertEqual("foo(*)", 'foo.function(star()))
assertEqual("count(*)", 'count.function(1))
assertEqual("foo(a, b)", 'foo.function('a, 'b))
assertEqual("foo(all a, b)", 'foo.function('a, 'b))
assertEqual("foo(distinct a, b)", 'foo.distinctFunction('a, 'b))
assertEqual("grouping(distinct a, b)", 'grouping.distinctFunction('a, 'b))
assertEqual("`select`(all a, b)", 'select.function('a, 'b))
intercept("foo(a x)", "extraneous input 'x'")
}
test("window function expressions") {
val func = 'foo.function(star())
def windowed(
partitioning: Seq[Expression] = Seq.empty,
ordering: Seq[SortOrder] = Seq.empty,
frame: WindowFrame = UnspecifiedFrame): Expression = {
WindowExpression(func, WindowSpecDefinition(partitioning, ordering, frame))
}
// Basic window testing.
assertEqual("foo(*) over w1", UnresolvedWindowExpression(func, WindowSpecReference("w1")))
assertEqual("foo(*) over ()", windowed())
assertEqual("foo(*) over (partition by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (distribute by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (cluster by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (order by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc )))
assertEqual("foo(*) over (sort by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc )))
assertEqual("foo(*) over (partition by a, b order by c)", windowed(Seq('a, 'b), Seq('c.asc)))
assertEqual("foo(*) over (distribute by a, b sort by c)", windowed(Seq('a, 'b), Seq('c.asc)))
// Test use of expressions in window functions.
assertEqual(
"sum(product + 1) over (partition by ((product) + (1)) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
assertEqual(
"sum(product + 1) over (partition by ((product / 2) + 1) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product / 2 + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
// Range/Row
val frameTypes = Seq(("rows", RowFrame), ("range", RangeFrame))
val boundaries = Seq(
("10 preceding", -Literal(10), CurrentRow),
("2147483648 preceding", -Literal(2147483648L), CurrentRow),
("3 + 1 following", Add(Literal(3), Literal(1)), CurrentRow),
("unbounded preceding", UnboundedPreceding, CurrentRow),
("unbounded following", UnboundedFollowing, CurrentRow), // Will fail during analysis
("between unbounded preceding and current row", UnboundedPreceding, CurrentRow),
("between unbounded preceding and unbounded following",
UnboundedPreceding, UnboundedFollowing),
("between 10 preceding and current row", -Literal(10), CurrentRow),
("between current row and 5 following", CurrentRow, Literal(5)),
("between 10 preceding and 5 following", -Literal(10), Literal(5))
)
frameTypes.foreach {
case (frameTypeSql, frameType) =>
boundaries.foreach {
case (boundarySql, begin, end) =>
val query = s"foo(*) over (partition by a order by b $frameTypeSql $boundarySql)"
val expr = windowed(Seq('a), Seq('b.asc), SpecifiedWindowFrame(frameType, begin, end))
assertEqual(query, expr)
}
}
// We cannot use an arbitrary expression.
intercept("foo(*) over (partition by a order by b rows exp(b) preceding)",
"Frame bound value must be a literal.")
}
test("row constructor") {
// Note that '(a)' will be interpreted as a nested expression.
assertEqual("(a, b)", CreateStruct(Seq('a, 'b)))
assertEqual("(a, b, c)", CreateStruct(Seq('a, 'b, 'c)))
assertEqual("(a as b, b as c)", CreateStruct(Seq('a as 'b, 'b as 'c)))
}
test("scalar sub-query") {
assertEqual(
"(select max(val) from tbl) > current",
ScalarSubquery(table("tbl").select('max.function('val))) > 'current)
assertEqual(
"a = (select b from s)",
'a === ScalarSubquery(table("s").select('b)))
}
test("case when") {
assertEqual("case a when 1 then b when 2 then c else d end",
CaseKeyWhen('a, Seq(1, 'b, 2, 'c, 'd)))
assertEqual("case (a or b) when true then c when false then d else e end",
CaseKeyWhen('a || 'b, Seq(true, 'c, false, 'd, 'e)))
assertEqual("case 'a'='a' when true then 1 end",
CaseKeyWhen("a" === "a", Seq(true, 1)))
assertEqual("case when a = 1 then b when a = 2 then c else d end",
CaseWhen(Seq(('a === 1, 'b.expr), ('a === 2, 'c.expr)), 'd))
assertEqual("case when (1) + case when a > b then c else d end then f else g end",
CaseWhen(Seq((Literal(1) + CaseWhen(Seq(('a > 'b, 'c.expr)), 'd.expr), 'f.expr)), 'g))
}
test("dereference") {
assertEqual("a.b", UnresolvedAttribute("a.b"))
assertEqual("`select`.b", UnresolvedAttribute("select.b"))
assertEqual("(a + b).b", ('a + 'b).getField("b")) // This will fail analysis.
assertEqual(
"struct(a, b).b",
namedStruct(NamePlaceholder, 'a, NamePlaceholder, 'b).getField("b"))
}
test("reference") {
// Regular
assertEqual("a", 'a)
// Starting with a digit.
assertEqual("1a", Symbol("1a"))
// Quoted using a keyword.
assertEqual("`select`", 'select)
// Unquoted using an unreserved keyword.
assertEqual("columns", 'columns)
}
test("subscript") {
assertEqual("a[b]", 'a.getItem('b))
assertEqual("a[1 + 1]", 'a.getItem(Literal(1) + 1))
assertEqual("`c`.a[b]", UnresolvedAttribute("c.a").getItem('b))
}
test("parenthesis") {
assertEqual("(a)", 'a)
assertEqual("r * (a + b)", 'r * ('a + 'b))
}
test("type constructors") {
// Dates.
assertEqual("dAte '2016-03-11'", Literal(Date.valueOf("2016-03-11")))
intercept("DAtE 'mar 11 2016'")
// Timestamps.
assertEqual("tImEstAmp '2016-03-11 20:54:00.000'",
Literal(Timestamp.valueOf("2016-03-11 20:54:00.000")))
intercept("timestamP '2016-33-11 20:54:00.000'")
// Binary.
assertEqual("X'A'", Literal(Array(0x0a).map(_.toByte)))
assertEqual("x'A10C'", Literal(Array(0xa1, 0x0c).map(_.toByte)))
intercept("x'A1OC'")
// Unsupported datatype.
intercept("GEO '(10,-6)'", "Literals of type 'GEO' are currently not supported.")
}
test("literals") {
def testDecimal(value: String): Unit = {
assertEqual(value, Literal(BigDecimal(value).underlying))
}
// NULL
assertEqual("null", Literal(null))
// Boolean
assertEqual("trUe", Literal(true))
assertEqual("False", Literal(false))
// Integral should have the narrowest possible type
assertEqual("787324", Literal(787324))
assertEqual("7873247234798249234", Literal(7873247234798249234L))
testDecimal("78732472347982492793712334")
// Decimal
testDecimal("7873247234798249279371.2334")
// Scientific Decimal
testDecimal("9.0e1")
testDecimal(".9e+2")
testDecimal("0.9e+2")
testDecimal("900e-1")
testDecimal("900.0E-1")
testDecimal("9.e+1")
intercept(".e3")
// Tiny Int Literal
assertEqual("10Y", Literal(10.toByte))
intercept("-1000Y", s"does not fit in range [${Byte.MinValue}, ${Byte.MaxValue}]")
// Small Int Literal
assertEqual("10S", Literal(10.toShort))
intercept("40000S", s"does not fit in range [${Short.MinValue}, ${Short.MaxValue}]")
// Long Int Literal
assertEqual("10L", Literal(10L))
intercept("78732472347982492793712334L",
s"does not fit in range [${Long.MinValue}, ${Long.MaxValue}]")
// Double Literal
assertEqual("10.0D", Literal(10.0D))
intercept("-1.8E308D", s"does not fit in range")
intercept("1.8E308D", s"does not fit in range")
// BigDecimal Literal
assertEqual("90912830918230182310293801923652346786BD",
Literal(BigDecimal("90912830918230182310293801923652346786").underlying()))
assertEqual("123.0E-28BD", Literal(BigDecimal("123.0E-28").underlying()))
assertEqual("123.08BD", Literal(BigDecimal("123.08").underlying()))
intercept("1.20E-38BD", "DecimalType can only support precision up to 38")
}
test("strings") {
Seq(true, false).foreach { escape =>
val conf = new SQLConf()
conf.setConfString(SQLConf.ESCAPED_STRING_LITERALS.key, escape.toString)
val parser = new CatalystSqlParser(conf)
// tests that have same result whatever the conf is
// Single Strings.
assertEqual("\\"hello\\"", "hello", parser)
assertEqual("'hello'", "hello", parser)
// Multi-Strings.
assertEqual("\\"hello\\" 'world'", "helloworld", parser)
assertEqual("'hello' \\" \\" 'world'", "hello world", parser)
// 'LIKE' string literals. Notice that an escaped '%' is the same as an escaped '\\' and a
// regular '%'; to get the correct result you need to add another escaped '\\'.
// TODO figure out if we shouldn't change the ParseUtils.unescapeSQLString method?
assertEqual("'pattern%'", "pattern%", parser)
assertEqual("'no-pattern\\\\%'", "no-pattern\\\\%", parser)
// tests that have different result regarding the conf
if (escape) {
// When SQLConf.ESCAPED_STRING_LITERALS is enabled, string literal parsing fallbacks to
// Spark 1.6 behavior.
// 'LIKE' string literals.
assertEqual("'pattern\\\\\\\\%'", "pattern\\\\\\\\%", parser)
assertEqual("'pattern\\\\\\\\\\\\%'", "pattern\\\\\\\\\\\\%", parser)
// Escaped characters.
// Unescape string literal "'\\\\0'" for ASCII NUL (X'00') doesn't work
// when ESCAPED_STRING_LITERALS is enabled.
// It is parsed literally.
assertEqual("'\\\\0'", "\\\\0", parser)
// Note: Single quote follows 1.6 parsing behavior when ESCAPED_STRING_LITERALS is enabled.
val e = intercept[ParseException](parser.parseExpression("'\\''"))
assert(e.message.contains("extraneous input '''"))
// The unescape special characters (e.g., "\\\\t") for 2.0+ don't work
// when ESCAPED_STRING_LITERALS is enabled. They are parsed literally.
assertEqual("'\\\\\\"'", "\\\\\\"", parser) // Double quote
assertEqual("'\\\\b'", "\\\\b", parser) // Backspace
assertEqual("'\\\\n'", "\\\\n", parser) // Newline
assertEqual("'\\\\r'", "\\\\r", parser) // Carriage return
assertEqual("'\\\\t'", "\\\\t", parser) // Tab character
// The unescape Octals for 2.0+ don't work when ESCAPED_STRING_LITERALS is enabled.
// They are parsed literally.
assertEqual("'\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041'", "\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041", parser)
// The unescape Unicode for 2.0+ doesn't work when ESCAPED_STRING_LITERALS is enabled.
// They are parsed literally.
assertEqual("'\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029'",
"\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029", parser)
} else {
// Default behavior
// 'LIKE' string literals.
assertEqual("'pattern\\\\\\\\%'", "pattern\\\\%", parser)
assertEqual("'pattern\\\\\\\\\\\\%'", "pattern\\\\\\\\%", parser)
// Escaped characters.
// See: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
assertEqual("'\\\\0'", "\\u0000", parser) // ASCII NUL (X'00')
assertEqual("'\\\\''", "\\'", parser) // Single quote
assertEqual("'\\\\\\"'", "\\"", parser) // Double quote
assertEqual("'\\\\b'", "\\b", parser) // Backspace
assertEqual("'\\\\n'", "\\n", parser) // Newline
assertEqual("'\\\\r'", "\\r", parser) // Carriage return
assertEqual("'\\\\t'", "\\t", parser) // Tab character
assertEqual("'\\\\Z'", "\\u001A", parser) // ASCII 26 - CTRL + Z (EOF on windows)
// Octals
assertEqual("'\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041'", "Hello!", parser)
// Unicode
assertEqual("'\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029'", "World :)",
parser)
}
}
}
test("intervals") {
def intervalLiteral(u: String, s: String): Literal = {
Literal(CalendarInterval.fromSingleUnitString(u, s))
}
// Empty interval statement
intercept("interval", "at least one time unit should be given for interval literal")
// Single Intervals.
val units = Seq(
"year",
"month",
"week",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond")
val forms = Seq("", "s")
val values = Seq("0", "10", "-7", "21")
units.foreach { unit =>
forms.foreach { form =>
values.foreach { value =>
val expected = intervalLiteral(unit, value)
assertEqual(s"interval $value $unit$form", expected)
assertEqual(s"interval '$value' $unit$form", expected)
}
}
}
// Hive nanosecond notation.
assertEqual("interval 13.123456789 seconds", intervalLiteral("second", "13.123456789"))
assertEqual("interval -13.123456789 second", intervalLiteral("second", "-13.123456789"))
// Non Existing unit
intercept("interval 10 nanoseconds", "No interval can be constructed")
// Year-Month intervals.
val yearMonthValues = Seq("123-10", "496-0", "-2-3", "-123-0")
yearMonthValues.foreach { value =>
val result = Literal(CalendarInterval.fromYearMonthString(value))
assertEqual(s"interval '$value' year to month", result)
}
// Day-Time intervals.
val datTimeValues = Seq(
"99 11:22:33.123456789",
"-99 11:22:33.123456789",
"10 9:8:7.123456789",
"1 0:0:0",
"-1 0:0:0",
"1 0:0:1")
datTimeValues.foreach { value =>
val result = Literal(CalendarInterval.fromDayTimeString(value))
assertEqual(s"interval '$value' day to second", result)
}
// Unknown FROM TO intervals
intercept("interval 10 month to second", "Intervals FROM month TO second are not supported.")
// Composed intervals.
assertEqual(
"interval 3 months 22 seconds 1 millisecond",
Literal(new CalendarInterval(3, 22001000L)))
assertEqual(
"interval 3 years '-1-10' year to month 3 weeks '1 0:0:2' day to second",
Literal(new CalendarInterval(14,
22 * CalendarInterval.MICROS_PER_DAY + 2 * CalendarInterval.MICROS_PER_SECOND)))
}
test("composed expressions") {
assertEqual("1 + r.r As q", (Literal(1) + UnresolvedAttribute("r.r")).as("q"))
assertEqual("1 - f('o', o(bar))", Literal(1) - 'f.function("o", 'o.function('bar)))
intercept("1 - f('o', o(bar)) hello * world", "mismatched input '*'")
}
test("SPARK-17364, fully qualified column name which starts with number") {
assertEqual("123_", UnresolvedAttribute("123_"))
assertEqual("1a.123_", UnresolvedAttribute("1a.123_"))
// ".123" should not be treated as token of type DECIMAL_VALUE
assertEqual("a.123A", UnresolvedAttribute("a.123A"))
// ".123E3" should not be treated as token of type SCIENTIFIC_DECIMAL_VALUE
assertEqual("a.123E3_column", UnresolvedAttribute("a.123E3_column"))
// ".123D" should not be treated as token of type DOUBLE_LITERAL
assertEqual("a.123D_column", UnresolvedAttribute("a.123D_column"))
// ".123BD" should not be treated as token of type BIGDECIMAL_LITERAL
assertEqual("a.123BD_column", UnresolvedAttribute("a.123BD_column"))
}
test("SPARK-17832 function identifier contains backtick") {
val complexName = FunctionIdentifier("`ba`r", Some("`fo`o"))
assertEqual(complexName.quotedString, UnresolvedAttribute("`fo`o.`ba`r"))
intercept(complexName.unquotedString, "mismatched input")
// Function identifier contains countious backticks should be treated correctly.
val complexName2 = FunctionIdentifier("ba``r", Some("fo``o"))
assertEqual(complexName2.quotedString, UnresolvedAttribute("fo``o.ba``r"))
}
test("SPARK-19526 Support ignore nulls keywords for first and last") {
assertEqual("first(a ignore nulls)", First('a, Literal(true)).toAggregateExpression())
assertEqual("first(a)", First('a, Literal(false)).toAggregateExpression())
assertEqual("last(a ignore nulls)", Last('a, Literal(true)).toAggregateExpression())
assertEqual("last(a)", Last('a, Literal(false)).toAggregateExpression())
}
}
| ron8hu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala | Scala | apache-2.0 | 24,361 |
package app.restlike.gtd
import im.mange.little.json.{LittleJodaSerialisers, LittleSerialisers}
object Json {
// import net.liftweb.json.Serialization._
// import net.liftweb.json._
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization.write
import org.json4s.native.{JsonParser, Serialization}
private val theFormats = Serialization.formats(NoTypeHints) ++ LittleJodaSerialisers.all
def deserialise(json: String) = {
implicit val formats = theFormats
parse(json).extract[Universe]
}
def serialise(response: Universe) = {
implicit val formats = theFormats
JsonParser.parse(write(response))
}
def serialise(response: Option[Model]) = {
implicit val formats = theFormats
JsonParser.parse(write(response))
}
}
| alltonp/reprobate | src/main/scala/app/restlike/gtd/Json.scala | Scala | apache-2.0 | 805 |
/*
* Copyright 2013 agwlvssainokuni
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.common
import java.sql.Connection
import play.api.Play.current
import play.api.db._
import play.api.mvc._
trait ActionBuilder {
self: Controller =>
def Authenticated(action: Long => EssentialAction): EssentialAction
def CustomAction(block: Connection => Request[AnyContent] => Result): EssentialAction =
Action { request =>
DB.withTransaction { connection =>
block(connection)(request)
}
}
def AuthnCustomAction(block: Long => Connection => Request[AnyContent] => Result): EssentialAction =
Authenticated { id => CustomAction(block(id)) }
}
| agwlvssainokuni/lifelog | lifelog-common/app/controllers/common/ActionBuilder.scala | Scala | apache-2.0 | 1,208 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.internal
import akka.actor.ActorRef
import akka.annotation.InternalApi
import akka.kafka.ManualSubscription
import akka.stream.SourceShape
import scala.concurrent.Future
/**
* Internal API.
*
* Single source logic for externally provided [[KafkaConsumerActor]].
*/
@InternalApi private abstract class ExternalSingleSourceLogic[K, V, Msg](
shape: SourceShape[Msg],
_consumerActor: ActorRef,
val subscription: ManualSubscription
) extends BaseSingleSourceLogic[K, V, Msg](shape) {
final override protected def logSource: Class[_] = classOf[ExternalSingleSourceLogic[K, V, Msg]]
final val consumerFuture: Future[ActorRef] = Future.successful(_consumerActor)
final def createConsumerActor(): ActorRef = _consumerActor
final def configureSubscription(): Unit =
configureManualSubscription(subscription)
final override def performShutdown(): Unit = {
super.performShutdown()
completeStage()
}
}
| softwaremill/reactive-kafka | core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala | Scala | apache-2.0 | 1,104 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.