code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.datastax.spark.connector.writer
import java.io.IOException
import java.net.InetAddress
import com.datastax.driver.core._
import com.datastax.spark.connector.ColumnSelector
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.util.Quote._
import org.apache.spark.Logging
import scala.collection.JavaConversions._
import scala.collection._
/**
* A utility class for determining the Replica Set (Ip Addresses) of a particular Cassandra Row. Used
* by the [[com.datastax.spark.connector.RDDFunctions.keyByCassandraReplica]] method. Uses the Java
* Driver to obtain replica information.
*/
class ReplicaLocator[T] private(
connector: CassandraConnector,
tableDef: TableDef,
rowWriter: RowWriter[T]) extends Serializable with Logging {
val keyspaceName = tableDef.keyspaceName
val tableName = tableDef.tableName
val columnNames = rowWriter.columnNames
/**
* This query is only used to build a prepared statement so we can more easily extract
* partition tokens from tables. We prepare a statement of the form SELECT * FROM keyspace.table
* where x= .... This statement is never executed.
*/
private lazy val querySelectUsingOnlyPartitionKeys: String = {
val partitionKeys = tableDef.partitionKey
def quotedColumnNames(columns: Seq[ColumnDef]) = partitionKeys.map(_.columnName).map(quote)
val whereClause = quotedColumnNames(partitionKeys).map(c => s"$c = :$c").mkString(" AND ")
s"SELECT * FROM ${quote(keyspaceName)}.${quote(tableName)} WHERE $whereClause"
}
private def prepareDummyStatement(session: Session): PreparedStatement = {
try {
session.prepare(querySelectUsingOnlyPartitionKeys)
}
catch {
case t: Throwable =>
throw new IOException(s"Failed to prepare statement $querySelectUsingOnlyPartitionKeys: " + t.getMessage, t)
}
}
/**
* Pairs each piece of data with the Cassandra Replicas which that data would be found on
* @param data A source of data which can be bound to a statement by BatchStatementBuilder
* @return an Iterator over the same data keyed by the replica's ip addresses
*/
def keyByReplicas(data: Iterator[T]): Iterator[(scala.collection.immutable.Set[InetAddress], T)] = {
connector.withSessionDo { session =>
val stmt = prepareDummyStatement(session)
val routingKeyGenerator = new RoutingKeyGenerator(tableDef, columnNames)
val boundStmtBuilder = new BoundStatementBuilder(rowWriter, stmt)
val clusterMetadata = session.getCluster.getMetadata
data.map { row =>
val hosts = clusterMetadata
.getReplicas(Metadata.quote(keyspaceName), routingKeyGenerator.apply(boundStmtBuilder.bind(row)))
.map(_.getAddress)
.toSet[InetAddress]
(hosts, row)
}
}
}
}
/**
* Helper methods for mapping a set of data to their relative locations in a Cassandra Cluster.
*/
object ReplicaLocator {
def apply[T: RowWriterFactory](
connector: CassandraConnector,
keyspaceName: String,
tableName: String,
partitionKeyMapper: ColumnSelector): ReplicaLocator[T] = {
val schema = Schema.fromCassandra(connector, Some(keyspaceName), Some(tableName))
val tableDef = schema.tables.headOption
.getOrElse(throw new IOException(s"Table not found: $keyspaceName.$tableName"))
val rowWriter = implicitly[RowWriterFactory[T]].rowWriter(
tableDef,
partitionKeyMapper.selectFrom(tableDef)
)
new ReplicaLocator[T](connector, tableDef, rowWriter)
}
}
| debasish83/cassandra-driver-spark | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/writer/ReplicaLocator.scala | Scala | apache-2.0 | 3,574 |
package spark.streaming.dstream
import spark.Logging
import spark.storage.StorageLevel
import spark.streaming.StreamingContext
import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{ReadableByteChannel, SocketChannel}
import java.io.EOFException
import java.util.concurrent.ArrayBlockingQueue
/**
* An input stream that reads blocks of serialized objects from a given network address.
* The blocks will be inserted directly into the block store. This is the fastest way to get
* data into Spark Streaming, though it requires the sender to batch data and serialize it
* in the format that the system is configured with.
*/
private[streaming]
class RawInputDStream[T: ClassManifest](
@transient ssc_ : StreamingContext,
host: String,
port: Int,
storageLevel: StorageLevel
) extends NetworkInputDStream[T](ssc_ ) with Logging {
def getReceiver(): NetworkReceiver[T] = {
new RawNetworkReceiver(host, port, storageLevel).asInstanceOf[NetworkReceiver[T]]
}
}
private[streaming]
class RawNetworkReceiver(host: String, port: Int, storageLevel: StorageLevel)
extends NetworkReceiver[Any] {
var blockPushingThread: Thread = null
override def getLocationPreference = None
def onStart() {
// Open a socket to the target address and keep reading from it
logInfo("Connecting to " + host + ":" + port)
val channel = SocketChannel.open()
channel.configureBlocking(true)
channel.connect(new InetSocketAddress(host, port))
logInfo("Connected to " + host + ":" + port)
val queue = new ArrayBlockingQueue[ByteBuffer](2)
blockPushingThread = new Thread {
setDaemon(true)
override def run() {
var nextBlockNumber = 0
while (true) {
val buffer = queue.take()
val blockId = "input-" + streamId + "-" + nextBlockNumber
nextBlockNumber += 1
pushBlock(blockId, buffer, null, storageLevel)
}
}
}
blockPushingThread.start()
val lengthBuffer = ByteBuffer.allocate(4)
while (true) {
lengthBuffer.clear()
readFully(channel, lengthBuffer)
lengthBuffer.flip()
val length = lengthBuffer.getInt()
val dataBuffer = ByteBuffer.allocate(length)
readFully(channel, dataBuffer)
dataBuffer.flip()
logInfo("Read a block with " + length + " bytes")
queue.put(dataBuffer)
}
}
def onStop() {
if (blockPushingThread != null) blockPushingThread.interrupt()
}
/** Read a buffer fully from a given Channel */
private def readFully(channel: ReadableByteChannel, dest: ByteBuffer) {
while (dest.position < dest.limit) {
if (channel.read(dest) == -1) {
throw new EOFException("End of channel")
}
}
}
}
| koeninger/spark | streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala | Scala | bsd-3-clause | 2,766 |
package geotrellis.config
import geotrellis.spark.etl.config._
import geotrellis.spark.etl.config.json._
import geotrellis.config.json._
import org.apache.spark.SparkContext
import com.github.fge.jackson.JsonLoader
import spray.json._
object TestsEtlConf extends TestsEtlConf
trait TestsEtlConf extends ConfigParse {
val help = s"""
|${Info.name} ${Info.version}
|
|Usage: ${Info.name} [options]
|
| --datasets <value>
| datasets is a non-empty String property
| --backend-profiles <value>
| backend-profiles is a non-empty String property
| --help
| prints this usage text
""".stripMargin
val requiredFields = Set('datasets, 'backendProfiles)
val backendProfilesSchema = schemaFactory.getJsonSchema(JsonLoader.fromResource("/backend-profiles-schema.json"))
val datasetsSchema = schemaFactory.getJsonSchema(JsonLoader.fromResource("/datasets-schema.json"))
def nextOption(map: Map[Symbol, String], list: Seq[String]): Map[Symbol, String] =
list.toList match {
case Nil => map
case "--datasets" :: value :: tail =>
nextOption(map ++ Map('datasets -> value), tail)
case "--backend-profiles" :: value :: tail =>
nextOption(map ++ Map('backendProfiles -> value), tail)
case "--help" :: tail => {
println(help)
sys.exit(1)
}
case option :: tail => {
println(s"Unknown option ${option}")
println(help)
sys.exit(1)
}
}
def apply(args: Seq[String])(implicit sc: SparkContext): List[Dataset] = {
val m = parse(args)
if(m.keySet != requiredFields) {
println(s"missing required field(s): ${(requiredFields -- m.keySet).mkString(", ")}, use --help command to get additional information about input options.")
sys.exit(1)
}
val(backendProfiles, datasets) = m('backendProfiles) -> m('datasets)
val backendProfilesValidation = backendProfilesSchema.validate(JsonLoader.fromString(backendProfiles), true)
val datasetsValidation = datasetsSchema.validate(JsonLoader.fromString(datasets), true)
if(!backendProfilesValidation.isSuccess || !datasetsValidation.isSuccess) {
if(!backendProfilesValidation.isSuccess) {
println("backendProfiles validation error:")
println(backendProfilesValidation)
}
if(!datasetsValidation.isSuccess) {
println("datasets validation error:")
println(datasetsValidation)
}
sys.exit(1)
}
DatasetsFormat(backendProfiles.parseJson.convertTo[Map[String, BackendProfile]]).read(datasets.parseJson)
}
}
| geotrellis/geotrellis-integration-tests-tool | src/main/scala/geotrellis/config/TestsEtlConf.scala | Scala | apache-2.0 | 2,750 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.pages.withdraw
import org.jsoup.Jsoup
import testHelpers.ViewSpecHelpers.CommonViewSpecHelper
import testHelpers.ViewSpecHelpers.withdraw.WithdrawConfirmationSpecMessages
import views.html.pages.withdraw.withdrawConfirmation
class WithdrawConfirmationViewSpec extends CommonViewSpecHelper with WithdrawConfirmationSpecMessages {
"Withdraw Confirmation view" when {
lazy val view = application.injector.instanceOf[withdrawConfirmation]
lazy val doc = Jsoup.parse(view("IP2014").body)
s"have a title ${"pla.withdraw.confirmation.message"}" in {
doc.title() shouldBe plaWithdrawConfirmationMessage(plaWithdrawProtectionIP2014label)
}
s"have a question of ${"pla.withdraw.confirmation.message"}" in {
doc.select("h1").text() shouldBe plaWithdrawConfirmationMessage(plaWithdrawProtectionIP2014label)
}
"have a div tag that" should {
lazy val grid = doc.select("div.grid-row > div.grid")
s"has the first paragraph of ${"pla.withdraw.confirmation.contact.you.if.needed"}" in {
grid.select("p").get(0).text shouldBe plaWithdrawConfirmationContactYouIfNeeded
}
s"have a question of ${"pla.withdraw.confirmation.other.protections.link"}" in {
doc.select("div.grid > p").get(1).text shouldBe plaWithdrawConfirmationOtherProtections
}
"Other protections link " in {
doc.select("div.grid > p a").text() shouldBe plaWithdrawConfirmationOtherProtectionsLink
}
"Other protections link href" in {
doc.select("div.grid > p a").attr("href") shouldBe plaWithdrawConfirmationOtherProtectionsUrl
}
"have a div tag size" in {
grid.size() shouldBe 1
}
}
s"have a message of ${"pla.withdraw.confirm.feedback-heading"}" in {
doc.select("div.grid-row > h2").text shouldBe plaWithdrawConfirmFeedbackHeading
}
s"feedback message of ${"pla.withdraw.confirm.feedback-text"}" in {
doc.select("div.grid-row > p").text shouldBe plaWithdrawConfirmFeedbackText
}
"feedback link " in {
doc.select("div.grid-row > p a").text() shouldBe plaWithdrawConfirmFeedbackLink
}
"feedback link href" in {
doc.select("div.grid-row > p a").attr("href") shouldBe plaWithdrawConfirmFeedbackUrl
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | test/views/pages/withdraw/WithdrawConfirmationViewSpec.scala | Scala | apache-2.0 | 2,895 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.persistence.slick
import akka.Done
import akka.NotUsed
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetDao
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence.slick.SlickReadSide
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import org.slf4j.LoggerFactory
import slick.dbio.DBIOAction
import slick.dbio.NoStream
import slick.jdbc.JdbcProfile
import slick.jdbc.JdbcBackend.Database
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* INTERNAL API
*/
private[lagom] class SlickReadSideImpl(slick: SlickProvider, offsetStore: SlickOffsetStore)(
implicit val executionContext: ExecutionContext
) extends SlickReadSide {
private val log = LoggerFactory.getLogger(this.getClass)
override def builder[Event <: AggregateEvent[Event]](readSideId: String): ReadSideHandlerBuilder[Event] =
new ReadSideHandlerBuilder[Event] {
var globalPrepare: DBIOAction[Any, _, _] = DBIOAction.successful(())
var prepare: (AggregateEventTag[Event]) => DBIOAction[Any, NoStream, Nothing] = (_) => DBIOAction.successful(())
var eventHandlers =
Map.empty[Class[_ <: Event], (EventStreamElement[_ <: Event]) => DBIOAction[Any, NoStream, Nothing]]
override def setGlobalPrepare(callback: DBIOAction[Any, _, _]): ReadSideHandlerBuilder[Event] = {
globalPrepare = callback
this
}
override def setPrepare(
callback: (AggregateEventTag[Event]) => DBIOAction[Any, NoStream, Nothing]
): ReadSideHandlerBuilder[Event] = {
prepare = callback
this
}
override def setEventHandler[E <: Event: ClassTag](
handler: (EventStreamElement[E]) => DBIOAction[Any, NoStream, Nothing]
): ReadSideHandlerBuilder[Event] = {
val eventClass = implicitly[ClassTag[E]].runtimeClass.asInstanceOf[Class[Event]]
eventHandlers += (eventClass -> handler
.asInstanceOf[(EventStreamElement[_ <: Event]) => DBIOAction[Any, NoStream, Nothing]])
this
}
override def build(): ReadSideHandler[Event] =
new SlickReadSideHandler[Event](readSideId, globalPrepare, prepare, eventHandlers)
}
private class SlickReadSideHandler[Event <: AggregateEvent[Event]](
readSideId: String,
globalPrepareCallback: DBIOAction[Any, _, _],
prepareCallback: (AggregateEventTag[Event]) => DBIOAction[Any, NoStream, Nothing],
eventHandlers: Map[Class[_ <: Event], (EventStreamElement[_ <: Event]) => DBIOAction[Any, NoStream, Nothing]]
) extends ReadSideHandler[Event] {
import slick.profile.api._
@volatile
private var offsetDao: SlickOffsetDao = _
override def globalPrepare(): Future[Done] =
slick.ensureTablesCreated().flatMap { _ =>
slick.db.run {
globalPrepareCallback.map(_ => Done.getInstance())
}
}
override def prepare(tag: AggregateEventTag[Event]): Future[Offset] =
for {
_ <- slick.db.run { prepareCallback(tag) }
dao <- offsetStore.prepare(readSideId, tag.tag)
} yield {
offsetDao = dao
dao.loadedOffset
}
override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] =
Flow[EventStreamElement[Event]]
.mapAsync(parallelism = 1) { element =>
val dbAction = eventHandlers
.get(element.event.getClass)
.map { handler =>
// apply handler if found
handler(element)
}
.getOrElse {
// fallback to empty action if no handler is found
if (log.isDebugEnabled) log.debug("Unhandled event [{}]", element.event.getClass.getName)
DBIO.successful(())
}
.flatMap { _ =>
// whatever it happens we save the offset
offsetDao.updateOffsetQuery(element.offset)
}
.map(_ => Done)
slick.db.run(dbAction.transactionally)
}
}
}
| lagom/lagom | persistence-jdbc/scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/persistence/slick/SlickReadSideImpl.scala | Scala | apache-2.0 | 4,579 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
private[spark] object ApplicationState
extends Enumeration("WAITING", "RUNNING", "FINISHED", "FAILED", "UNKNOWN") {
type ApplicationState = Value
val WAITING, RUNNING, FINISHED, FAILED, UNKNOWN = Value
val MAX_NUM_RETRY = 10
}
| windeye/spark | core/src/main/scala/org/apache/spark/deploy/master/ApplicationState.scala | Scala | apache-2.0 | 1,080 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query
import com.twitter.logging.Logger
import com.twitter.ostrich.admin.RuntimeEnvironment
import com.twitter.zipkin.BuildProperties
object Main {
val log = Logger.get(getClass.getName)
def main(args: Array[String]) {
log.info("Loading configuration")
val runtime = RuntimeEnvironment(BuildProperties, args)
val server = runtime.loadRuntimeConfig[ZipkinQuery]()
try {
server.start()
} catch {
case e: Exception =>
e.printStackTrace()
log.error(e, "Unexpected exception: %s", e.getMessage)
System.exit(0)
}
}
}
| capttwinky/zipkin | zipkin-server/src/main/scala/com/twitter/zipkin/query/Main.scala | Scala | apache-2.0 | 1,213 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package api
import FlinkMutableBag.State
import flink._
import org.apache.flink.api.scala.DataSet
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.util.Collector
class FlinkMutableBag[K: Meta, V: Meta] private
(
/* Distributed state modeled naively as a mutable reference to an immutable bag. */
var ss: DataBag[State[K, V]]
)(
implicit flink: ExecutionEnvironment
) extends MutableBag[K, V] with Serializable {
import FlinkDataSet.typeInfoForType
def update[M: Meta](ms: DataBag[Group[K, M]])(f: UpdateFunction[M]): DataBag[(K, V)] = {
val conv = implicitly[DataSet[State[K, V]] => DataBag[State[K, V]]]
ss = FlinkOps.cache(conv((ss.as[DataSet] fullOuterJoin ms.as[DataSet]).where(0).equalTo(0)(
(s: State[K, V], m: Group[K, M], out: Collector[State[K, V]]) => {
val rs = Option(m) match {
case Some(m) =>
val vOld = Option(s)
val vNew = f(m.key, vOld.map(_.v), m.values)
vNew.map(State(m.key, _, true)) orElse vOld.map(_.copy(changed = false))
case None =>
Option(s).map(_.copy(changed = false))
}
rs.foreach(out.collect)
})))
for (s <- ss if s.changed) yield s.k -> s.v
}
def bag(): DataBag[(K, V)] =
for (s <- ss) yield s.k -> s.v
def copy(): MutableBag[K, V] =
new FlinkMutableBag[K, V](ss)
}
object FlinkMutableBag {
case class State[K, V](k: K, v: V, changed: Boolean = true)
def apply[K: Meta, V: Meta](
init: DataBag[(K, V)]
)(
implicit flink: ExecutionEnvironment
): MutableBag[K, V] = new FlinkMutableBag(FlinkOps.cache(for {
(k, v) <- init
} yield State(k, v)))
private[api] val tempNames = Stream.iterate(0)(_ + 1)
.map(i => f"stateful$i%03d")
.toIterator
}
| emmalanguage/emma | emma-flink/src/main/scala/org/emmalanguage/api/FlinkMutableBag.scala | Scala | apache-2.0 | 2,430 |
/*
Copyright (c) 2009-2012, The Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.cdlib.was.weari;
import java.io.File;
import com.typesafe.config.{ ConfigFactory, Config => TSConfig };
class Config (confRoot : TSConfig) {
def this() = this (ConfigFactory.load());
confRoot.checkValid(ConfigFactory.defaultReference(), "weari");
val conf = confRoot.getConfig("weari");
val port = conf.getInt("port");
/* number of threads to use for ConcurrentUpdateSolrServer */
val threadCount = conf.getInt("threadCount");
/* size of queue for ConcurrentUpdateSolrServer */
val queueSize = conf.getInt("queueSize");
val jsonBaseDir = conf.getString("jsonBaseDir");
/* threshold of tracked merged documents at which we should commit */
val trackCommitThreshold = conf.getInt("trackCommitThreshold");
/* maximum number of id queries to send to the server at once (id:A OR id:B ... )*/
val maxIdQuerySize = conf.getInt("maxIdQuerySize");
/* size of groups to send to the batch merge at once */
val batchMergeGroupSize = conf.getInt("batchMergeGroupSize")
/* number of docs to load at once */
val numDocsPerRequest = conf.getInt("numDocsPerRequest");
/* number of doc ids to load at once */
val numDocIdsPerRequest = conf.getInt("numDocIdsPerRequest");
val commitBetweenArcs = conf.getBoolean("commitBetweenArcs");
val commitThreshold = conf.getInt("commitThreshold");
val useHadoop = conf.getBoolean("useHadoop");
val useRealTimeGet = conf.getBoolean("useRealTimeGet");
val useAtomicUpdates = conf.getBoolean("useAtomicUpdates");
val batchArcParseSize = conf.getInt("batchArcParseSize");
val solrServer = conf.getString("solrServer");
val solrZkHost = conf.getString("solrZkHost");
val solrCollection = conf.getString("solrCollection");
}
| cdlib/weari | src/main/scala/org/cdlib/was/weari/Config.scala | Scala | bsd-3-clause | 3,267 |
package me.heaton.functions
object PartialFunctions extends App{
val one: PartialFunction[Int, String] = {
case 1 => "one"
case 2 => "two"
}
println(one isDefinedAt 1)
println(one isDefinedAt 3)
}
| heaton/hello-scala | src/main/scala/me/heaton/functions/PartialFunctions.scala | Scala | mit | 217 |
package com.twitter.finagle.builder
import java.net.{InetSocketAddress, InetAddress}
import com.twitter.finagle._
import com.twitter.finagle.integration.{StringCodec, IntegrationBase}
import com.twitter.util._
import com.twitter.util.registry.{Entry, GlobalRegistry, SimpleRegistry}
import org.jboss.netty.channel.ChannelPipelineFactory
import org.junit.runner.RunWith
import org.mockito.Mockito.{verify, when}
import org.mockito.Matchers
import org.mockito.Matchers._
import org.scalatest.FunSuite
import org.scalatest.mock.MockitoSugar
import org.scalatest.junit.JUnitRunner
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
@RunWith(classOf[JUnitRunner])
class ServerBuilderTest extends FunSuite
with Eventually
with IntegrationPatience
with MockitoSugar
with IntegrationBase {
trait ServerBuilderHelper {
val preparedFactory = mock[ServiceFactory[String, String]]
val preparedServicePromise = new Promise[Service[String, String]]
when(preparedFactory.status) thenReturn Status.Open
when(preparedFactory()) thenReturn preparedServicePromise
when(preparedFactory.close(any[Time])) thenReturn Future.Done
when(preparedFactory.map(Matchers.any())) thenReturn
preparedFactory.asInstanceOf[ServiceFactory[Any, Nothing]]
val m = new MockChannel
when(m.codec.prepareConnFactory(any[ServiceFactory[String, String]])) thenReturn preparedFactory
}
val svc: Service[String, String] = Service.const(Future.value("hi"))
def verifyProtocolRegistry(name: String, expected: String)(build: => Server) = {
test(name + " registers protocol library") {
val simple = new SimpleRegistry()
GlobalRegistry.withRegistry(simple) {
val server = build
val entries = GlobalRegistry.get.toSet
val unspecified = entries.count(_.key.startsWith(Seq("server", "not-specified")))
assert(unspecified == 0, "saw registry keys with 'not-specified' protocol")
val specified = entries.count(_.key.startsWith(Seq("server", expected)))
assert(specified > 0, "did not see expected protocol registry keys")
server.close()
}
}
}
def verifyServerBoundAddress(name: String, expected: String)(build: => Server) = {
test(s"$name registers server with bound address") {
val simple = new SimpleRegistry()
GlobalRegistry.withRegistry(simple) {
val server = build
val entries = GlobalRegistry.get.toSet
val specified = entries.filter(_.key.startsWith(Seq("server", expected)))
// Entries are in the form: Entry(List(server, fancy, test, /127.0.0.1:58904, RequestStats, unit),MILLISECONDS)
val entry = specified.head // data is repeated as entry.key, just take the first
val hostAndPort = entry.key.filter(_.contains("127.0.0.1")).head
assert(!hostAndPort.contains(":0"), "unbounded address in server registry")
server.close()
}
}
}
def loopback = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
verifyProtocolRegistry("#codec(Codec)", expected = "fancy") {
val ctx = new ServerBuilderHelper {}
when(ctx.m.codec.protocolLibraryName).thenReturn("fancy")
ServerBuilder()
.name("test")
.codec(ctx.m.codec)
.bindTo(loopback)
.build(svc)
}
verifyProtocolRegistry("#codec(CodecFactory)", expected = "fancy") {
val ctx = new ServerBuilderHelper {}
val cf = new CodecFactory[String, String] {
def client: Client = ???
def server: Server = (_: ServerCodecConfig) => ctx.m.codec
override def protocolLibraryName = "fancy"
}
ServerBuilder()
.name("test")
.codec(cf)
.bindTo(loopback)
.build(svc)
}
verifyProtocolRegistry("#codec(CodecFactory#Server)", expected = "fancy") {
val ctx = new ServerBuilderHelper {}
when(ctx.m.codec.protocolLibraryName).thenReturn("fancy")
val cfServer: CodecFactory[String, String]#Server =
{ (_: ServerCodecConfig) => ctx.m.codec }
ServerBuilder()
.name("test")
.codec(cfServer)
.bindTo(loopback)
.build(svc)
}
verifyProtocolRegistry("#codec(CodecFactory#Server)FancyCodec", expected = "fancy") {
class FancyCodec extends CodecFactory[String, String] {
def client = { config =>
new com.twitter.finagle.Codec[String, String] {
def pipelineFactory = null
}
}
def server = { config =>
new com.twitter.finagle.Codec[String, String] {
def pipelineFactory = null
}
}
override val protocolLibraryName: String = "fancy"
}
ServerBuilder()
.codec(new FancyCodec)
.bindTo(loopback)
.name("test")
.build(svc)
}
verifyServerBoundAddress("#codec(CodecFactory#Server)FancyCodec", expected = "fancy") {
class FancyCodec extends CodecFactory[String, String] {
def client = { config =>
new com.twitter.finagle.Codec[String, String] {
def pipelineFactory = null
}
}
def server = { config =>
new com.twitter.finagle.Codec[String, String] {
def pipelineFactory = null
}
}
override val protocolLibraryName: String = "fancy"
}
ServerBuilder()
.codec(new FancyCodec)
.bindTo(loopback) // loopback is configured to port 0
.name("test")
.build(svc)
}
}
| adriancole/finagle | finagle-core/src/test/scala/com/twitter/finagle/builder/ServerBuilderTest.scala | Scala | apache-2.0 | 5,363 |
def split(x: M): (M, M) = (f(x), g(x)) | hmemcpy/milewski-ctfp-pdf | src/content/3.7/code/scala/snippet27.scala | Scala | gpl-3.0 | 38 |
package dhg.util
/**
* Enhancement methods for numbers
*
* @author Dan Garrette (dhgarrette@gmail.com)
*/
object NumberUtil {
implicit class Enriched_Int(val self: Int) extends AnyVal {
/**
* Shorthand for a range from this Int to the max integer value.
*/
def up: Range = self to Int.MaxValue
def upi: Iterator[Int] = Iterator.from(self)
/**
* Shorthand for a range from this to n by -1
*/
def downto(n: Int): Range = self to n by -1
}
/**
* A mutable number-holding object
*/
class MutableNumber[N](private[this] var i: N)(implicit num: Numeric[N]) {
def this()(implicit num: Numeric[N]) = this(num.zero)
def +=(o: N) = { i = num.plus(i, o); this }
def get = i
}
}
| dhgarrette/low-resource-pos-tagging-2014 | src/main/scala/dhg/util/NumberUtil.scala | Scala | apache-2.0 | 745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.SparkFunSuite
class DistributionSuite extends SparkFunSuite with Matchers {
test("summary") {
val d = new Distribution((1 to 100).toArray.map{_.toDouble})
val stats = d.statCounter
stats.count should be (100)
stats.mean should be (50.5)
stats.sum should be (50 * 101)
val quantiles = d.getQuantiles()
quantiles(0) should be (1)
quantiles(1) should be (26)
quantiles(2) should be (51)
quantiles(3) should be (76)
quantiles(4) should be (100)
}
}
| hvanhovell/spark | core/src/test/scala/org/apache/spark/util/DistributionSuite.scala | Scala | apache-2.0 | 1,445 |
package org.typedsolutions.aws.util
/**
* Created by matt on 19/07/15.
*/
class Context {
}
| mattroberts297/akka-kinesis | src/test/scala/org/typedsolutions/aws/util/Context.scala | Scala | mit | 96 |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package uk.org.nbn.nbnv.importer.ingestion
import uk.org.nbn.nbnv.jpa.nbncore._
import javax.persistence.EntityManager
import uk.org.nbn.nbnv.importer.data.{Database, Repository}
import uk.org.nbn.nbnv.importer.data.Implicits._
import com.google.inject.Inject
class SampleIngester @Inject()(db: Database) {
def upsertSample(sampleKey: Option[String], survey: Survey): Sample = {
val key = sampleKey getOrElse "1"
val sample = db.repo.getSample(key, survey)
sample match {
case Some(s) => s
case None => {
val s = new Sample()
s.setProviderKey(key)
s.setSurvey(survey)
db.em.persist(s)
s
}
}
}
}
| JNCC-dev-team/nbn-importer | importer/src/main/scala/uk/org/nbn/nbnv/importer/ingestion/SampleIngester.scala | Scala | apache-2.0 | 812 |
package org.talkingpuffin.ui
import javax.swing.{SwingUtilities, SwingWorker}
/**
* Simplifies calling the EventDispatchThread
*/
object SwingInvoke {
def later(f: => Unit) {
SwingUtilities.invokeLater(new Runnable{ def run{f} })
}
def execSwingWorker[T,V](inBackGround: => T, whenDone: (T) => Unit){
new SwingWorker[T, V] {
override def doInBackground = inBackGround
override def done = whenDone(get)
}.execute
}
}
| dcbriccetti/talking-puffin | desktop/src/main/scala/org/talkingpuffin/ui/SwingInvoke.scala | Scala | mit | 459 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import scala.language.implicitConversions
import scala.util.matching.Regex
import org.apache.spark.sql.execution.command._
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.LogServiceFactory
/**
* TODO remove the duplicate code and add the common methods to common class.
* Parser for All Carbon DDL cases
*/
abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
protected val AGGREGATE = carbonKeyWord("AGGREGATE")
protected val AS = carbonKeyWord("AS")
protected val AGGREGATION = carbonKeyWord("AGGREGATION")
protected val ALL = carbonKeyWord("ALL")
protected val HIGH_CARDINALITY_DIMS = carbonKeyWord("NO_DICTIONARY")
protected val BEFORE = carbonKeyWord("BEFORE")
protected val BY = carbonKeyWord("BY")
protected val CASCADE = carbonKeyWord("CASCADE")
protected val CLASS = carbonKeyWord("CLASS")
protected val CLEAN = carbonKeyWord("CLEAN")
protected val COLS = carbonKeyWord("COLS")
protected val COLUMNS = carbonKeyWord("COLUMNS")
protected val COMPACT = carbonKeyWord("COMPACT")
protected val FINISH = carbonKeyWord("FINISH")
protected val STREAMING = carbonKeyWord("STREAMING")
protected val CREATE = carbonKeyWord("CREATE")
protected val CUBE = carbonKeyWord("CUBE")
protected val CUBES = carbonKeyWord("CUBES")
protected val DATA = carbonKeyWord("DATA")
protected val DATABASE = carbonKeyWord("DATABASE")
protected val DATABASES = carbonKeyWord("DATABASES")
protected val DELETE = carbonKeyWord("DELETE")
protected val DELIMITER = carbonKeyWord("DELIMITER")
protected val DESCRIBE = carbonKeyWord("DESCRIBE")
protected val DESC = carbonKeyWord("DESC")
protected val DETAIL = carbonKeyWord("DETAIL")
protected val DIMENSIONS = carbonKeyWord("DIMENSIONS")
protected val DIMFOLDERPATH = carbonKeyWord("DIMFOLDERPATH")
protected val DROP = carbonKeyWord("DROP")
protected val ESCAPECHAR = carbonKeyWord("ESCAPECHAR")
protected val EXCLUDE = carbonKeyWord("EXCLUDE")
protected val EXPLAIN = carbonKeyWord("EXPLAIN")
protected val EXTENDED = carbonKeyWord("EXTENDED")
protected val FORMATTED = carbonKeyWord("FORMATTED")
protected val FACT = carbonKeyWord("FACT")
protected val FIELDS = carbonKeyWord("FIELDS")
protected val FILEHEADER = carbonKeyWord("FILEHEADER")
protected val SERIALIZATION_NULL_FORMAT = carbonKeyWord("SERIALIZATION_NULL_FORMAT")
protected val BAD_RECORDS_LOGGER_ENABLE = carbonKeyWord("BAD_RECORDS_LOGGER_ENABLE")
protected val BAD_RECORDS_ACTION = carbonKeyWord("BAD_RECORDS_ACTION")
protected val IS_EMPTY_DATA_BAD_RECORD = carbonKeyWord("IS_EMPTY_DATA_BAD_RECORD")
protected val IS_EMPTY_COMMA_DATA_BAD_RECORD = carbonKeyWord("IS_NULL_DATA_BAD_RECORD")
protected val SKIP_EMPTY_LINE = carbonKeyWord("SKIP_EMPTY_LINE")
protected val FILES = carbonKeyWord("FILES")
protected val FROM = carbonKeyWord("FROM")
protected val HIERARCHIES = carbonKeyWord("HIERARCHIES")
protected val IN = carbonKeyWord("IN")
protected val INCLUDE = carbonKeyWord("INCLUDE")
protected val INPATH = carbonKeyWord("INPATH")
protected val INTO = carbonKeyWord("INTO")
protected val LEVELS = carbonKeyWord("LEVELS")
protected val LIKE = carbonKeyWord("LIKE")
protected val LOAD = carbonKeyWord("LOAD")
protected val LOCAL = carbonKeyWord("LOCAL")
protected val MAPPED = carbonKeyWord("MAPPED")
protected val MEASURES = carbonKeyWord("MEASURES")
protected val MERGE = carbonKeyWord("MERGE")
protected val MULTILINE = carbonKeyWord("MULTILINE")
protected val COMPLEX_DELIMITER_LEVEL_1 = carbonKeyWord("COMPLEX_DELIMITER_LEVEL_1")
protected val COMPLEX_DELIMITER_LEVEL_2 = carbonKeyWord("COMPLEX_DELIMITER_LEVEL_2")
protected val COMPLEX_DELIMITER_LEVEL_3 = carbonKeyWord("COMPLEX_DELIMITER_LEVEL_3")
protected val OPTIONS = carbonKeyWord("OPTIONS")
protected val OUTPATH = carbonKeyWord("OUTPATH")
protected val OVERWRITE = carbonKeyWord("OVERWRITE")
protected val PARTITION = carbonKeyWord("PARTITION")
protected val PARTITION_COUNT = carbonKeyWord("PARTITION_COUNT")
protected val PARTITIONDATA = carbonKeyWord("PARTITIONDATA")
protected val PARTITIONER = carbonKeyWord("PARTITIONER")
protected val PARTITIONS = carbonKeyWord("PARTITIONS")
protected val QUOTECHAR = carbonKeyWord("QUOTECHAR")
protected val RELATION = carbonKeyWord("RELATION")
protected val SCHEMA = carbonKeyWord("SCHEMA")
protected val SCHEMAS = carbonKeyWord("SCHEMAS")
protected val SET = Keyword("SET")
protected val SHOW = carbonKeyWord("SHOW")
protected val SPLIT = carbonKeyWord("SPLIT")
protected val TABLES = carbonKeyWord("TABLES")
protected val TABLE = carbonKeyWord("TABLE")
protected val TERMINATED = carbonKeyWord("TERMINATED")
protected val TYPE = carbonKeyWord("TYPE")
protected val UPDATE = carbonKeyWord("UPDATE")
protected val USE = carbonKeyWord("USE")
protected val WHERE = Keyword("WHERE")
protected val WITH = carbonKeyWord("WITH")
protected val AGGREGATETABLE = carbonKeyWord("AGGREGATETABLE")
protected val ABS = carbonKeyWord("abs")
protected val EXECUTOR = carbonKeyWord("EXECUTOR")
protected val FOR = carbonKeyWord("FOR")
protected val SCRIPTS = carbonKeyWord("SCRIPTS")
protected val USING = carbonKeyWord("USING")
protected val LIMIT = carbonKeyWord("LIMIT")
protected val DEFAULTS = carbonKeyWord("DEFAULTS")
protected val ALTER = carbonKeyWord("ALTER")
protected val ADD = carbonKeyWord("ADD")
protected val IF = carbonKeyWord("IF")
protected val NOT = carbonKeyWord("NOT")
protected val EXISTS = carbonKeyWord("EXISTS")
protected val DIMENSION = carbonKeyWord("DIMENSION")
protected val STARTTIME = carbonKeyWord("STARTTIME")
protected val HISTORY = carbonKeyWord("HISTORY")
protected val SEGMENTS = carbonKeyWord("SEGMENTS")
protected val SEGMENT = carbonKeyWord("SEGMENT")
protected val METACACHE = carbonKeyWord("METACACHE")
protected val STRING = carbonKeyWord("STRING")
protected val INTEGER = carbonKeyWord("INTEGER")
protected val TIMESTAMP = carbonKeyWord("TIMESTAMP")
protected val DATE = carbonKeyWord("DATE")
protected val CHAR = carbonKeyWord("CHAR")
protected val VARCHAR = carbonKeyWord("VARCHAR")
protected val NUMERIC = carbonKeyWord("NUMERIC")
protected val DECIMAL = carbonKeyWord("DECIMAL")
protected val DOUBLE = carbonKeyWord("DOUBLE")
protected val FLOAT = carbonKeyWord("FLOAT")
protected val SHORT = carbonKeyWord("SHORT")
protected val INT = carbonKeyWord("INT")
protected val BOOLEAN = carbonKeyWord("BOOLEAN")
protected val LONG = carbonKeyWord("LONG")
protected val BIGINT = carbonKeyWord("BIGINT")
protected val BINARY = carbonKeyWord("BINARY")
protected val ARRAY = carbonKeyWord("ARRAY")
protected val STRUCT = carbonKeyWord("STRUCT")
protected val MAP = carbonKeyWord("MAP")
protected val SMALLINT = carbonKeyWord("SMALLINT")
protected val CHANGE = carbonKeyWord("CHANGE")
protected val TBLPROPERTIES = carbonKeyWord("TBLPROPERTIES")
protected val ID = carbonKeyWord("ID")
protected val DATAMAP = carbonKeyWord("DATAMAP")
protected val ON = carbonKeyWord("ON")
protected val DMPROPERTIES = carbonKeyWord("DMPROPERTIES")
protected val SELECT = carbonKeyWord("SELECT")
protected val REBUILD = carbonKeyWord("REBUILD")
protected val DEFERRED = carbonKeyWord("DEFERRED")
protected val STREAM = carbonKeyWord("STREAM")
protected val STREAMS = carbonKeyWord("STREAMS")
protected val STMPROPERTIES = carbonKeyWord("STMPROPERTIES")
protected val CARBONCLI = carbonKeyWord("CARBONCLI")
protected val PATH = carbonKeyWord("PATH")
protected val INSERT = carbonKeyWord("INSERT")
protected val STAGE = carbonKeyWord("STAGE")
protected val INDEX = carbonKeyWord("INDEX")
protected val INDEXES = carbonKeyWord("INDEXES")
protected val REGISTER = carbonKeyWord("REGISTER")
protected val newReservedWords =
this.getClass
.getMethods
.filter(_.getReturnType == classOf[Keyword])
.map(_.invoke(this).asInstanceOf[Keyword].str)
override val lexical = {
val sqllex = new SqlLexical()
sqllex.initialize(newReservedWords)
sqllex
}
import lexical.Identifier
implicit def regexToParser(regex: Regex): Parser[String] = {
acceptMatch(
s"identifier matching regex ${ regex }",
{ case Identifier(str) if regex.unapplySeq(str).isDefined => str }
)
}
/**
* This will convert key word to regular expression.
*
* @param keys
* @return
*/
def carbonKeyWord(keys: String): Regex = {
("(?i)" + keys).r
}
protected lazy val dbTableIdentifier: Parser[Seq[String]] =
(ident <~ ".").? ~ ident ^^ {
case databaseName ~ tableName =>
if (databaseName.isDefined) {
Seq(databaseName.get, tableName)
} else {
Seq(tableName)
}
}
protected lazy val options: Parser[(String, String)] =
(stringLit <~ "=") ~ stringLit ^^ {
case opt ~ optvalue => (opt.trim.toLowerCase(), optvalue)
case _ => ("", "")
}
protected lazy val commandOptions: Parser[String] =
stringLit ^^ {
case optValue => optValue
case _ => ""
}
protected lazy val partitions: Parser[(String, Option[String])] =
(ident <~ "=".?) ~ stringLit.? ^^ {
case opt ~ optvalue => (opt.trim, optvalue)
case _ => ("", None)
}
protected lazy val valueOptions: Parser[(Int, Int)] =
(numericLit <~ ",") ~ numericLit ^^ {
case opt ~ optvalue => (opt.toInt, optvalue.toInt)
case _ => (0, 0)
}
protected lazy val columnOptions: Parser[(String, String)] =
(stringLit <~ ",") ~ stringLit ^^ {
case opt ~ optvalue => (opt, optvalue)
case _ =>
throw new MalformedCarbonCommandException(s"value cannot be empty")
}
protected lazy val dimCol: Parser[Field] = anyFieldDef
protected lazy val primitiveTypes =
STRING ^^^ "string" |BOOLEAN ^^^ "boolean" | INTEGER ^^^ "integer" |
TIMESTAMP ^^^ "timestamp" | NUMERIC ^^^ "numeric" |
(LONG | BIGINT) ^^^ "bigint" | (SHORT | SMALLINT) ^^^ "smallint" |
INT ^^^ "int" | DOUBLE ^^^ "double" | FLOAT ^^^ "double" | decimalType |
DATE ^^^ "date" | charType
protected lazy val miscType = BINARY ^^^ "binary"
/**
* Matching the char data type and returning the same.
*/
private lazy val charType =
(CHAR | VARCHAR ) ~ opt("(" ~>numericLit <~ ")") ^^ {
case (char ~ _) =>
s"$char"
}
/**
* Matching the decimal(10,0) data type and returning the same.
*/
private lazy val decimalType =
DECIMAL ~ (("(" ~> numericLit <~ ",") ~ (numericLit <~ ")")).? ^^ {
case decimal ~ precisionAndScale => if (precisionAndScale.isDefined) {
s"decimal(${ precisionAndScale.get._1 }, ${ precisionAndScale.get._2 })"
} else {
s"decimal(10,0)"
}
}
protected lazy val nestedType: Parser[Field] = structFieldType | arrayFieldType | mapFieldType |
primitiveFieldType | miscFieldType
lazy val anyFieldDef: Parser[Field] =
(ident | stringLit) ~ (":".? ~> nestedType) ~ (IN ~> (ident | stringLit)).? ^^ {
case e1 ~ e2 ~ e3 =>
Field(e1, e2.dataType, Some(e1), e2.children, null, e3)
}
protected lazy val primitiveFieldType: Parser[Field] =
primitiveTypes ^^ {
case e1 =>
Field("unknown", Some(e1), Some("unknown"), Some(null))
}
protected lazy val miscFieldType: Parser[Field] =
miscType ^^ {
case e1 =>
Field("unknown", Some(e1), Some("unknown"), Some(null))
}
protected lazy val arrayFieldType: Parser[Field] =
((ARRAY ^^^ "array") ~> "<" ~> nestedType <~ ">") ^^ {
case e1 =>
Field("unknown", Some("array"), Some("unknown"),
Some(List(Field("val", e1.dataType, Some("val"),
e1.children))))
}
protected lazy val structFieldType: Parser[Field] =
((STRUCT ^^^ "struct") ~> "<" ~> repsep(anyFieldDef, ",") <~ ">") ^^ {
case e1 =>
Field("unknown", Some("struct"), Some("unknown"), Some(e1))
}
// Map<Key,Value> is represented as Map<Struct<Key,Value>>
protected lazy val mapFieldType: Parser[Field] =
(MAP ^^^ "map") ~> "<" ~> primitiveFieldType ~ ("," ~> nestedType) <~ ">" ^^ {
case key ~ value =>
Field("unknown", Some("map"), Some("unknown"),
Some(List(
Field("val", Some("struct"), Some("unknown"),
Some(List(
Field("key", key.dataType, Some("key"), key.children),
Field("value", value.dataType, Some("value"), value.children)))))))
}
protected lazy val measureCol: Parser[Field] =
(ident | stringLit) ~ (INTEGER ^^^ "integer" | NUMERIC ^^^ "numeric" | SHORT ^^^ "smallint" |
BIGINT ^^^ "bigint" | DECIMAL ^^^ "decimal").? ~
(AS ~> (ident | stringLit)).? ~ (IN ~> (ident | stringLit)).? ^^ {
case e1 ~ e2 ~ e3 ~ e4 => Field(e1, e2, e3, Some(null))
}
protected lazy val segmentId: Parser[String] =
numericLit ^^ { u => u } |
elem("decimal", p => {
p.getClass.getSimpleName.equals("FloatLit") ||
p.getClass.getSimpleName.equals("DecimalLit")
}) ^^ (_.chars)
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala | Scala | apache-2.0 | 14,168 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.ml.tensorflow
import com.johnsnowlabs.nlp.annotators.common.{Sentence, SentenceSplit}
import com.johnsnowlabs.nlp.{Annotation, AnnotatorType}
import com.johnsnowlabs.nlp.annotators.tokenizer.bpe.Gpt2Tokenizer
import org.tensorflow.Session
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.math.exp
class TensorflowGPT2(val tensorflow: TensorflowWrapper,
val bpeTokenizer: Gpt2Tokenizer,
configProtoBytes: Option[Array[Byte]] = None
) extends Serializable {
// keys representing the input and output tensors of the GPT2 model
private val inputIdsKey = "serving1_serving1_input_ids:0"
private val attentionMaskKey = "serving1_serving1_attention_mask:0"
private val outputLogitsKey = "StatefulPartitionedCall:0"
private val paddingTokenId = 50256
private val eosTokenId = 50256
private def sessionWarmup(): Unit = {
val dummyInput = Array.fill(128)(0) ++ Array(eosTokenId)
tag(Seq(dummyInput),
minOutputLength = 1,
maxOutputLength = 5,
doSample = false,
temperature = 0f,
topK = 0,
topP = 0f,
repetitionPenalty = 0f,
noRepeatNgramSize = 0,
randomSeed = Option(0),
ignoreTokenIds = Array(paddingTokenId)
)
}
sessionWarmup()
def predict(sentences: Seq[Annotation],
batchSize: Int,
minOutputLength: Int,
maxOutputLength: Int,
doSample: Boolean,
temperature: Double,
topK: Int,
topP: Double,
repetitionPenalty: Double,
noRepeatNgramSize: Int,
task: String,
randomSeed: Option[Int] = None,
ignoreTokenIds: Array[Int] = Array()
): Seq[Annotation] = {
val batchDecoder = sentences.grouped(batchSize).toArray.flatMap { batch =>
val batchSP = encode(batch, task)
val spIds = tag(
batchSP,
minOutputLength,
maxOutputLength,
doSample,
temperature,
topK,
topP,
repetitionPenalty,
noRepeatNgramSize,
randomSeed,
ignoreTokenIds)
decode(spIds)
}
var sentBegin, nextSentEnd = 0
batchDecoder.zip(sentences).map {
case (content, sent) =>
nextSentEnd += content.length - 1
val annots = new Annotation(
annotatorType = AnnotatorType.DOCUMENT,
begin = sentBegin,
end = nextSentEnd,
result = content,
metadata = sent.metadata)
sentBegin += nextSentEnd + 1
annots
}
}
def tag(batch: Seq[Array[Int]],
minOutputLength: Int,
maxOutputLength: Int,
doSample: Boolean,
temperature: Double,
topK: Int,
topP: Double,
repetitionPenalty: Double,
noRepeatNgramSize: Int,
randomSeed: Option[Int],
ignoreTokenIds: Array[Int] = Array()): Array[Array[Int]] = {
val numReturn_sequences = 1
//from config
val vocab_size = 50257
var effectiveBatch_size = 1
// set effective batch size and effective batch multiplier according to do_sample
if (doSample) {
effectiveBatch_size = batch.length * numReturn_sequences
}
else {
effectiveBatch_size = batch.length
}
val session = tensorflow.getTFSessionWithSignature(configProtoBytes = configProtoBytes)
val maxSentenceLength = batch.map(_.length).max
val paddedBatch = batch.map { tokenIds =>
val diff = maxSentenceLength - tokenIds.length
Array.fill[Int](diff)(this.paddingTokenId) ++ tokenIds.take(maxSentenceLength)
}
generateNoBeamSearch(
paddedBatch, maxOutputLength, minOutputLength, doSample, temperature, topK, topP, repetitionPenalty,
noRepeatNgramSize, effectiveBatch_size, vocab_size, randomSeed, session, ignoreTokenIds)
}
def generateNoBeamSearch(inputIds: Seq[Array[Int]],
maxOutputLength: Int,
minOutputLength: Int,
doSample: Boolean,
temperature: Double,
topK: Int,
topP: Double,
repetitionPenalty: Double,
noRepeatNgramSize: Int,
batch_size: Int,
vocab_size: Int,
randomSeed: Option[Int],
session: Session,
ignoreTokenIds: Array[Int] = Array()
): Array[Array[Int]] = {
/**
* Generate sequences for each example without beam search (numBeams == 1). All returned sequence are generated
* independently.
* */
var decoderInputs = inputIds.toArray
var curLen = decoderInputs(0).length
var stopDecoder = false
// length of generated sentences / unfinished sentences
var unfinishedSents = List.fill(decoderInputs.length)(1)
var sentLengths = List.fill(decoderInputs.length)(maxOutputLength)
while (!stopDecoder) {
val decoderInputLength = decoderInputs.head.length
val tensorDecoder = new TensorResources()
val decoderInputBuffers = tensorDecoder.createIntBuffer(decoderInputs.length * decoderInputLength)
val decoderAttentionBuffers = tensorDecoder.createIntBuffer(decoderInputs.length * decoderInputLength)
decoderInputs.zipWithIndex.foreach { case (pieceIds, idx) =>
val offset = idx * decoderInputLength
decoderInputBuffers.offset(offset).write(pieceIds)
val paddingMasks = pieceIds.map(_ => 1)
decoderAttentionBuffers.offset(offset).write(paddingMasks)
}
val inputIdTensors = tensorDecoder.createIntBufferTensor(
Array(decoderInputs.length.toLong, decoderInputLength), decoderInputBuffers)
val attentionMaskTensors = tensorDecoder.createIntBufferTensor(
Array(decoderInputs.length.toLong, decoderInputLength), decoderAttentionBuffers)
val runner = session.runner
// TODO add past to the model and use cache
runner
.feed(inputIdsKey, inputIdTensors)
.feed(attentionMaskKey, attentionMaskTensors)
.fetch(outputLogitsKey)
val decoderOuts = runner.run().asScala
val decoderOutputs = TensorResources.extractFloats(decoderOuts.head).grouped(vocab_size).toArray.grouped(decoderInputLength).toArray
var nextTokenLogits = for (decoderOutput <- decoderOutputs) yield decoderOutput.last
nextTokenLogits = nextTokenLogits.map(logits => {
logits.indices.map(i => {
if (ignoreTokenIds.contains(i)) Float.MinValue else logits(i)
}).toArray
})
// repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if (repetitionPenalty != 1.0) {
nextTokenLogits = createNextTokenLogitsPenalties(
decoderInputs, nextTokenLogits, repetitionPenalty
)
}
if (noRepeatNgramSize > 0) {
// calculate a list of banned tokens to prevent repetitively generating the same ngrams
// from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
val bannedTokens = calcBannedNgramTokens(decoderInputs, batch_size, noRepeatNgramSize, curLen)
// create bannedTokens boolean mask
var bannedTokensIndicesMask = Array.empty[IndexedSeq[Boolean]]
for (bannedTokensSlice <- bannedTokens) {
bannedTokensIndicesMask = bannedTokensIndicesMask :+
(for (token <- 0 until vocab_size) yield if (bannedTokensSlice.contains(token)) true else false)
}
if (!bannedTokensIndicesMask.isEmpty) {
nextTokenLogits = for ((nextTokenLogit, bannedTokensIndexMask) <- nextTokenLogits.zip(bannedTokensIndicesMask)) yield setTensorByIndicesToValue(
nextTokenLogit, bannedTokensIndexMask, Float.NegativeInfinity
)
}
}
// set eos token prob to zero if minLength is not reached
if (!eosTokenId.isNaN && curLen < minOutputLength) {
// create eosTokenId boolean mask
val isTokenLogit_eosToken = for (token <- 0 until vocab_size) yield if (token == eosTokenId) true else false
val eosTokenIndices_mask = Array.fill(batch_size)(isTokenLogit_eosToken)
nextTokenLogits = for ((nextTokenLogit, bannedTokensIndex_mask) <- nextTokenLogits.zip(eosTokenIndices_mask)) yield setTensorByIndicesToValue(
nextTokenLogit, bannedTokensIndex_mask, Float.NegativeInfinity
)
}
var nextToken = Array.ofDim[Int](decoderInputs.length)
if (doSample) {
// Temperature (higher temperature => more likely to sample low probability tokens)
if (temperature != 1.0)
nextTokenLogits = for (nextTokenLogit <- nextTokenLogits) yield nextTokenLogit.map(_ / temperature.toFloat)
// Top-p/top-k filtering
nextTokenLogits = topKTopPFiltering(nextTokenLogits, topK, topP)
// Sample
nextToken = nextTokenLogits.map(input => categoricalSample(input, randomSeed))
}
else {
// Greedy decoding
nextToken = nextTokenLogits.map(input => input.indexOf(input.max))
}
var tokensToAdd = Array.ofDim[Int](decoderInputs.length)
// update generations and finished sentences
if (!eosTokenId.isNaN)
// pad finished sentences if eos_token_id exist
tokensToAdd = nextToken.zip(unfinishedSents).map(x => x._1 * x._2 + paddingTokenId * (1 - x._2))
else
tokensToAdd = nextToken
decoderInputs = decoderInputs.zip(tokensToAdd).map(x => {
x._1 ++ Array(x._2)
})
decoderOuts.foreach(_.close())
curLen += 1
if (!eosTokenId.isNaN) {
val eosInSents = tokensToAdd.map(x => if (x == eosTokenId) 1 else 0)
// if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
val isSentsUnfinishedAndTokenToAddIsEos = unfinishedSents.zip(eosInSents).map(x => x._1 * x._2)
sentLengths = sentLengths.zip(isSentsUnfinishedAndTokenToAddIsEos).map(x => x._1 * (1 - x._2) + curLen * x._2)
// unfinishedSents is set to zero if eos in sentence
unfinishedSents = unfinishedSents.zip(isSentsUnfinishedAndTokenToAddIsEos).map(x => x._1 - x._2)
}
tensorDecoder.clearTensors()
tensorDecoder.clearSession(decoderOuts)
inputIdTensors.close()
// stop when there is a eos in each sentence, or if we exceed the maximum length
// stopDecoder = curLen < maxOutputLength || unfinishedSents.max == 0
stopDecoder = (
!decoderInputs.exists(o => o.last != this.eosTokenId)
|| (decoderInputs.head.length > maxOutputLength))
}
decoderInputs
}
def createNextTokenLogitsPenalties(inputIds: Seq[Array[Int]], logits: Array[Array[Float]], repetitionPenalty: Double): Array[Array[Float]] = {
// create logit penalties for already seen inputIds
val nextTokenLogits = Array.ofDim[Array[Float]](logits.length)
for (i <- logits.indices) {
var nextTokenLogit = logits(i)
val prevInputIds = inputIds.head.distinct
for ((prevInputId, j) <- prevInputIds.zipWithIndex) {
var logitPenalty = 1.0
if (logits(i)(prevInputId.toInt) < 0) {
logitPenalty = repetitionPenalty
}
else {
logitPenalty = 1 / repetitionPenalty
}
nextTokenLogit = nextTokenLogit.updated(prevInputId, (logitPenalty * nextTokenLogit(prevInputId)).toFloat)
}
nextTokenLogits(i) = nextTokenLogit
}
nextTokenLogits
}
private def calcBannedNgramTokens(prevInputIds: Seq[Array[Int]], numHypos: Int, noRepeatNgramSize: Int, curLen: Int): Array[Array[Int]] = {
// based on fairseq for noRepeatNgram in beam_search
if (curLen + 1 < noRepeatNgramSize)
// return no banned tokens if we haven't generated noRepeatNgram_size tokens yet
return Array.ofDim[Int](numHypos, 0)
val generatedNgrams = Array.tabulate(numHypos)(_ => mutable.Map.empty[IndexedSeq[Int], List[Int]])
for (idx <- 0 until numHypos) {
val genTokens = prevInputIds(idx)
val generatedNgram = generatedNgrams(idx)
val ngramArrays = for (e <- 0 until noRepeatNgramSize) yield genTokens.drop(e)
for (ngramInd <- ngramArrays.last.indices) {
val ngram = for (e <- ngramArrays) yield e(ngramInd)
val prevNgramTuple = ngram.dropRight(1)
generatedNgram(prevNgramTuple) = generatedNgram.getOrElse(prevNgramTuple, List.empty[Int]) :+ ngram.last
}
}
(for (hypoIdx <- 0 until numHypos) yield getGeneratedNgrams(prevInputIds, generatedNgrams, hypoIdx, curLen, noRepeatNgramSize)).toArray
}
def getGeneratedNgrams(prevInputIds: Seq[Array[Int]], generatedNgrams: Array[mutable.Map[IndexedSeq[Int], List[Int]]], hypoIdx: Int, curLen: Int, noRepeatNgramSize: Int): Array[Int] = {
// Before decoding the next token, prevent decoding of ngrams that have already appeared
val startIdx = curLen + 1 - noRepeatNgramSize
val ngramIdx = prevInputIds(hypoIdx).slice(startIdx, curLen)
generatedNgrams(hypoIdx).getOrElse(ngramIdx, List.empty[Int]).toArray
}
private def topKTopPFiltering(logits: Array[Array[Float]], topK: Int, topP: Double, filterValue: Float = Float.NegativeInfinity, minTokensToKeep: Int = 1): Array[Array[Float]] = {
/**
* Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
* *
* Args:
* logits: logits distribution shape (batch size, vocabulary size)
* if topK > 0: keep only top k tokens with highest probability (top-k filtering).
* if topP < 1.0: keep the top tokens with cumulative probability >= topP (nucleus filtering).
* Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
* Make sure we keep at least minTokensToKeep per batch example in the output
* From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
* */
var logitsUpd = logits
val logitsShape = Array(logits.length, logits(0).length)
if (topK > 0) {
val topKup = topK.max(minTokensToKeep).min(logitsShape.last) // Safety check
/** Remove all tokens with a probability less than the last token of the top-k */
val removeLimit = logits(0).sortWith(_ > _).take(topKup).min
val indicesToRemove = for (logit <- logits) yield for (elem <- logit) yield if (elem < removeLimit) true else false
logitsUpd = for ((nextTokenLogit, indexToRemove) <- logits.zip(indicesToRemove)) yield setTensorByIndicesToValue(
nextTokenLogit, indexToRemove, Float.NegativeInfinity
)
}
if (topP < 1.0) {
val (sortedLogits, sortedIndices) = logits(0).zipWithIndex.sorted.reverse.unzip
val cumulativeProbs = scanLeft(softmax(sortedLogits))(0.0)(_ + _).drop(1)
/** Remove tokens with cumulative probability above the threshold (token with 0 are kept) */
var sortedIndicesToRemove = for (prob <- cumulativeProbs) yield if (prob > topP) true else false
if (minTokensToKeep > 1) {
/** Keep at least minTokensToKeep (set to minTokensToKeep-1 because we add the first one below) */
sortedIndicesToRemove = List.fill(sortedIndicesToRemove.take(minTokensToKeep).length)(false) ++ sortedIndicesToRemove.drop(minTokensToKeep)
}
/** Shift the indices to the right to keep also the first token above the threshold */
sortedIndicesToRemove = sortedIndicesToRemove.takeRight(1) ++ sortedIndicesToRemove.dropRight(1)
sortedIndicesToRemove = List.fill(sortedIndicesToRemove.take(1).length)(false) ++ sortedIndicesToRemove.drop(1)
/** scatter sorted tensors to original indexing */
val indicesToRemove = scatterValuesOnBatchIndices(sortedIndicesToRemove, sortedIndices)
logitsUpd = for ((nextTokenLogit, indexToRemove) <- logits.zip(IndexedSeq.fill(logits.length)(indicesToRemove))) yield setTensorByIndicesToValue(
nextTokenLogit, indexToRemove.toIndexedSeq, Float.NegativeInfinity
)
}
logitsUpd
}
private def scanLeft[a, b](xs: Iterable[a])(s: b)(f: (b, a) => b) =
xs.foldLeft(List(s))((acc, x) => f(acc.head, x) :: acc).reverse
private def scatterValuesOnBatchIndices(values: List[Boolean], batchIndices: Array[Int]): List[Boolean] = {
// scatter values to pair indices
val (_, initArray) = batchIndices.zip(values).sorted.unzip
initArray.toList
}
private def softmax(values: Array[Float]): Array[Float] = {
val expElem = values.map(exp(_))
val total = expElem.sum
expElem.map(_ / total).map(_.toFloat)
}
private def setTensorByIndicesToValue(prevInputIds: Array[Float], indices: IndexedSeq[Boolean], value: Float): Array[Float] = {
for ((inputId, index) <- prevInputIds.zip(indices)) yield if (index) value else inputId
}
private def categoricalSample(dist: Array[Float], randomSeed: Option[Int]): Int = {
val (distFiltered, indices) = dist.zipWithIndex.filter { case (elem, index) => !elem.isInfinite }.sorted.unzip
if (distFiltered.length == 1)
return indices(0)
// val distMinValue = distFiltered.min
// val distRange = distFiltered.max - distMinValue
// val normalized = distFiltered.map(i => (i - distMinValue)/distRange)
val normalized = softmax(distFiltered)
var randomDouble = 0.0
if (randomSeed.isDefined)
randomDouble = new scala.util.Random(randomSeed.get).nextDouble()
else
randomDouble = scala.util.Random.nextDouble()
var accum = 0.0
for ((itemProb, i) <- normalized.zip(indices)) {
accum += itemProb
if (accum >= randomDouble) {
return i
}
}
indices(0)
}
def decode(sentences: Array[Array[Int]]): Seq[String] = {
sentences.map(s => bpeTokenizer.decodeTokens(s))
}
def encode(sentences: Seq[Annotation], task: String): Seq[Array[Int]] = {
SentenceSplit.unpack(sentences).map(
s => {
val sentWithTask = if (task.nonEmpty)
new Sentence(
content = task.concat(" ").concat(s.content),
start = s.start,
end = s.end + task.length + 1,
index = s.index,
metadata = s.metadata
)
else s
bpeTokenizer.tokenize(sentWithTask).map(bpeTokenizer.encode).flatMap(_.map(_.pieceId))
})
}
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/ml/tensorflow/TensorflowGPT2.scala | Scala | apache-2.0 | 19,163 |
package edu.rice.habanero.benchmarks.radixsort
import java.util.Random
import edu.rice.habanero.actors.{LiftActor, LiftActorState, LiftPool}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object RadixSortLiftActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new RadixSortLiftActorBenchmark)
}
private final class RadixSortLiftActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
RadixSortConfig.parseArgs(args)
}
def printArgInfo() {
RadixSortConfig.printArgs()
}
def runIteration() {
val validationActor = new ValidationActor(RadixSortConfig.N)
validationActor.start()
val sourceActor = new IntSourceActor(RadixSortConfig.N, RadixSortConfig.M, RadixSortConfig.S)
sourceActor.start()
var radix = RadixSortConfig.M / 2
var nextActor: LiftActor[AnyRef] = validationActor
while (radix > 0) {
val sortActor = new SortActor(RadixSortConfig.N, radix, nextActor)
sortActor.start()
radix /= 2
nextActor = sortActor
}
sourceActor.send(NextActorMessage(nextActor))
LiftActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
LiftPool.shutdown()
}
}
}
private case class NextActorMessage(actor: LiftActor[AnyRef])
private case class ValueMessage(value: Long)
private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long) extends LiftActor[AnyRef] {
val random = new Random(seed)
override def process(msg: AnyRef) {
msg match {
case nm: NextActorMessage =>
var i = 0
while (i < numValues) {
val candidate = Math.abs(random.nextLong()) % maxValue
val message = new ValueMessage(candidate)
nm.actor.send(message)
i += 1
}
exit()
}
}
}
private class SortActor(numValues: Int, radix: Long, nextActor: LiftActor[AnyRef]) extends LiftActor[AnyRef] {
private val orderingArray = Array.ofDim[ValueMessage](numValues)
private var valuesSoFar = 0
private var j = 0
override def process(msg: AnyRef): Unit = {
msg match {
case vm: ValueMessage =>
valuesSoFar += 1
val current = vm.value
if ((current & radix) == 0) {
nextActor.send(vm)
} else {
orderingArray(j) = vm
j += 1
}
if (valuesSoFar == numValues) {
var i = 0
while (i < j) {
nextActor.send(orderingArray(i))
i += 1
}
exit()
}
}
}
}
private class ValidationActor(numValues: Int) extends LiftActor[AnyRef] {
private var sumSoFar = 0.0
private var valuesSoFar = 0
private var prevValue = 0L
private var errorValue = (-1L, -1)
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
valuesSoFar += 1
if (vm.value < prevValue && errorValue._1 < 0) {
errorValue = (vm.value, valuesSoFar - 1)
}
prevValue = vm.value
sumSoFar += prevValue
if (valuesSoFar == numValues) {
if (errorValue._1 >= 0) {
println("ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2)
} else {
println("Elements sum: " + sumSoFar)
}
exit()
}
}
}
}
}
| smarr/savina | src/main/scala/edu/rice/habanero/benchmarks/radixsort/RadixSortLiftActorBenchmark.scala | Scala | gpl-2.0 | 3,709 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.cosmosdb
import io.netty.util.ResourceLeakDetector
import io.netty.util.ResourceLeakDetector.Level
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.apache.openwhisk.core.entity.size._
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreBehavior
@RunWith(classOf[JUnitRunner])
class CosmosDBArtifactStoreTests extends FlatSpec with CosmosDBStoreBehaviorBase with ArtifactStoreBehavior {
override protected def maxAttachmentSizeWithoutAttachmentStore = 1.MB
private var initialLevel: Level = _
override protected def beforeAll(): Unit = {
RecordingLeakDetectorFactory.register()
initialLevel = ResourceLeakDetector.getLevel
ResourceLeakDetector.setLevel(Level.PARANOID)
super.beforeAll()
}
override def afterAll(): Unit = {
super.afterAll()
ResourceLeakDetector.setLevel(initialLevel)
//Try triggering GC which may trigger leak detection logic
System.gc()
withClue("Recorded leak count should be zero") {
RecordingLeakDetectorFactory.counter.cur shouldBe 0
}
}
behavior of "CosmosDB Setup"
it should "be configured with default throughput" in {
//Trigger loading of the db
val stores = Seq(entityStore, authStore, activationStore)
stores.foreach { s =>
val doc = s.asInstanceOf[CosmosDBArtifactStore[_]].documentCollection()
val offer = client
.queryOffers(s"SELECT * from c where c.offerResourceId = '${doc.getResourceId}'", null)
.blockingOnlyResult()
.get
withClue(s"Collection ${doc.getId} : ") {
offer.getThroughput shouldBe storeConfig.throughput
}
}
}
}
| starpit/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/database/cosmosdb/CosmosDBArtifactStoreTests.scala | Scala | apache-2.0 | 2,530 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io._
import java.nio.ByteBuffer
import scala.reflect.ClassTag
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.ByteBufferInputStream
import org.apache.spark.util.Utils
private[spark] class JavaSerializationStream(
out: OutputStream, counterReset: Int, extraDebugInfo: Boolean)
extends SerializationStream {
private val objOut = new ObjectOutputStream(out)
private var counter = 0
/**
* Calling reset to avoid memory leak:
* http://stackoverflow.com/questions/1281549/memory-leak-traps-in-the-java-standard-api
* But only call it every 100th time to avoid bloated serialization streams (when
* the stream 'resets' object class descriptions have to be re-written)
*/
def writeObject[T: ClassTag](t: T): SerializationStream = {
try {
objOut.writeObject(t)
} catch {
case e: NotSerializableException if extraDebugInfo =>
throw SerializationDebugger.improveException(t, e)
}
counter += 1
if (counterReset > 0 && counter >= counterReset) {
objOut.reset()
counter = 0
}
this
}
def flush() { objOut.flush() }
def close() { objOut.close() }
}
private[spark] class JavaDeserializationStream(in: InputStream, loader: ClassLoader)
extends DeserializationStream {
private val objIn = new ObjectInputStream(in) {
override def resolveClass(desc: ObjectStreamClass): Class[_] =
Class.forName(desc.getName, false, loader)
}
def readObject[T: ClassTag](): T = objIn.readObject().asInstanceOf[T]
def close() { objIn.close() }
}
private[spark] class JavaSerializerInstance(
counterReset: Int, extraDebugInfo: Boolean, defaultClassLoader: ClassLoader)
extends SerializerInstance {
override def serialize[T: ClassTag](t: T): ByteBuffer = {
val bos = new ByteArrayOutputStream()
val out = serializeStream(bos)
out.writeObject(t)
out.close()
ByteBuffer.wrap(bos.toByteArray)
}
override def deserialize[T: ClassTag](bytes: ByteBuffer): T = {
val bis = new ByteBufferInputStream(bytes)
val in = deserializeStream(bis)
in.readObject()
}
override def deserialize[T: ClassTag](bytes: ByteBuffer, loader: ClassLoader): T = {
val bis = new ByteBufferInputStream(bytes)
val in = deserializeStream(bis, loader)
in.readObject()
}
override def serializeStream(s: OutputStream): SerializationStream = {
new JavaSerializationStream(s, counterReset, extraDebugInfo)
}
override def deserializeStream(s: InputStream): DeserializationStream = {
new JavaDeserializationStream(s, defaultClassLoader)
}
def deserializeStream(s: InputStream, loader: ClassLoader): DeserializationStream = {
new JavaDeserializationStream(s, loader)
}
}
/**
* :: DeveloperApi ::
* A Spark serializer that uses Java's built-in serialization.
*
* Note that this serializer is not guaranteed to be wire-compatible across different versions of
* Spark. It is intended to be used to serialize/de-serialize data within a single
* Spark application.
*/
@DeveloperApi
class JavaSerializer(conf: SparkConf) extends Serializer with Externalizable {
private var counterReset = conf.getInt("spark.serializer.objectStreamReset", 100)
private var extraDebugInfo = conf.getBoolean("spark.serializer.extraDebugInfo", true)
protected def this() = this(new SparkConf()) // For deserialization only
override def newInstance(): SerializerInstance = {
val classLoader = defaultClassLoader.getOrElse(Thread.currentThread.getContextClassLoader)
new JavaSerializerInstance(counterReset, extraDebugInfo, classLoader)
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
out.writeInt(counterReset)
out.writeBoolean(extraDebugInfo)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
counterReset = in.readInt()
extraDebugInfo = in.readBoolean()
}
}
| andrewor14/iolap | core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala | Scala | apache-2.0 | 4,798 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import java.util.{Date, Properties}
import com.vividsolutions.jts.geom.Envelope
import org.geotools.data.Query
import org.geotools.filter.text.ecql.ECQL
import org.geotools.filter.visitor.ExtractBoundsFilterVisitor
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.joda.time.{DateTime, DateTimeZone}
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.index.RecordIndex
import org.locationtech.geomesa.accumulo.{AccumuloFeatureIndexType, TestWithMultipleSfts}
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.iterators.DensityScan
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class DensityIteratorTest extends Specification with TestWithMultipleSfts {
sequential
val testData : Map[String,String] = {
val dataFile = new Properties
dataFile.load(getClass.getClassLoader.getResourceAsStream("data/density-iterator.properties"))
dataFile.toMap
}
val date = new DateTime("2012-01-01T19:00:00", DateTimeZone.UTC).toDate.getTime
def spec(binding: String) = s"an_id:java.lang.Integer,attr:java.lang.Double,dtg:Date,*geom:$binding:srid=4326"
def getDensity(sftName: String,
query: String,
envelope: Option[Envelope] = None,
strategy: Option[AccumuloFeatureIndexType] = None): List[(Double, Double, Double)] = {
val q = new Query(sftName, ECQL.toFilter(query))
val geom = envelope.getOrElse(q.getFilter.accept(ExtractBoundsFilterVisitor.BOUNDS_VISITOR, null).asInstanceOf[Envelope])
q.getHints.put(QueryHints.DENSITY_BBOX, new ReferencedEnvelope(geom, DefaultGeographicCRS.WGS84))
q.getHints.put(QueryHints.DENSITY_WIDTH, 500)
q.getHints.put(QueryHints.DENSITY_HEIGHT, 500)
strategy.foreach(s => q.getHints.put(QueryHints.QUERY_INDEX, s.identifier))
val decode = DensityScan.decodeResult(geom, 500, 500)
SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(q).features).flatMap(decode).toList
}
"DensityIterator" should {
"do density calc on points" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("Point"))
val features =
addFeatures(sft, (0 until 150).map { i =>
// space out the points very slightly around 5 primary longitudes 1 degree apart
val lon = (i / 30) + 1 + (Random.nextDouble() - 0.5) / 1000.0
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, s"POINT($lon 37)")
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, -1, 33, 6, 40)"
val density = getDensity(sft.getTypeName, q)
density.length must beLessThan(150)
density.map(_._3).sum mustEqual 150
val compiled = density.groupBy(d => (d._1, d._2)).map { case (pt, group) => group.map(_._3).sum }
// should be 5 bins of 30
compiled must haveLength(5)
forall(compiled)(_ mustEqual 30)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -1, 33, 6, 40) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.length must beLessThan(150)
density.map(_._3).sum mustEqual 150
val compiled = density.groupBy(d => (d._1, d._2)).map { case (pt, group) => group.map(_._3).sum }
// should be 5 bins of 30
compiled must haveLength(5)
forall(compiled)(_ mustEqual 30)
}
"with record index" >> {
val q = "INCLUDE"
val density = getDensity(sft.getTypeName, q, Some(new Envelope(-180, 180, -90, 90)), Some(RecordIndex))
density.length must beLessThan(150)
density.map(_._3).sum mustEqual 150
val compiled = density.groupBy(d => (d._1, d._2)).map { case (pt, group) => group.map(_._3).sum }
// should be 5 bins of 30
compiled must haveLength(5)
forall(compiled)(_ mustEqual 30)
}
}
"do density calc on a realistic polygon" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("Polygon"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[POLYGON] Charlottesville"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, -78.598118, 37.992204, -78.337364, 38.091238)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -78.598118, 37.992204, -78.337364, 38.091238) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
"do density calc on a realistic multilinestring" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("MultiLineString"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[MULTILINE] Cherry Avenue entirety"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, -78.511236, 38.019947, -78.485830, 38.030265)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -78.511236, 38.019947, -78.485830, 38.030265) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
"do density calc on a realistic linestring" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("LineString"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[LINE] Cherry Avenue segment"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, -78.511236, 38.019947, -78.485830, 38.030265)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -78.511236, 38.019947, -78.485830, 38.030265) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
"do density calc on a linestring with multiLine intersect" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("LineString"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[LINE] Line to MultiLine segment"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, -78.541236, 38.019947, -78.485830, 38.060265)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -78.511236, 38.019947, -78.485830, 38.030265) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
"do density calc on a simplistic multi polygon" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("MultiPolygon"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[MULTIPOLYGON] test box"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, 0.0, 0.0, 10.0, 10.0)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -1.0, -1.0, 11.0, 11.0) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
"do density calc on a simplistic linestring" >> {
var sft: SimpleFeatureType = null
"add features" >> {
sft = createNewSchema(spec("LineString"))
addFeatures(sft, (0 until 15).toArray.map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, i.toString)
sf.setAttribute(1, "1.0")
sf.setAttribute(2, new Date(date + i * 60000))
sf.setAttribute(3, testData("[LINE] test line"))
sf
})
ok
}
"with spatial index" >> {
val q = "BBOX(geom, 0.0, 0.0, 10.0, 10.0)"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
"with spatio-temporal index" >> {
val q = "BBOX(geom, -1.0, -1.0, 11.0, 11.0) AND " +
"dtg between '2012-01-01T18:00:00.000Z' AND '2012-01-01T23:00:00.000Z'"
val density = getDensity(sft.getTypeName, q)
density.map(_._3).sum must beGreaterThan(0.0)
}
}
}
}
| ronq/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/iterators/DensityIteratorTest.scala | Scala | apache-2.0 | 11,501 |
package com.foxmail.ruifengz.optimization
import breeze.linalg.{norm, Vector, DenseVector}
/**
* Created by zrf on 12/4/15.
*/
abstract class Regularizer extends Serializable {
/**
*
* @param weights current weights vector
* @param grad gradient WITHOUT regularization, will be added by regularization's gradient
* @return regularization value
*/
def compute(weights: Vector[Double], grad: Vector[Double]): Double
}
class EmptyRegularizer extends Regularizer {
/**
*
* @param weights current weights vector
* @param grad gradient WITHOUT regularization, will be added by regularization's gradient
* @return regularization value
*/
override def compute(weights: Vector[Double], grad: Vector[Double]): Double = 0.0
}
class L2Regularizer(val regParam: Double) extends Regularizer {
/**
*
* @param weights current weights vector
* @param grad gradient WITHOUT regularization, will be added by regularization's gradient
* @return regularization value
*/
override def compute(weights: Vector[Double], grad: Vector[Double]): Double = {
grad :-= weights * regParam
weights.dot(weights) * regParam / 2.0
}
}
| zhengruifeng/spark-optimization | src/main/scala/com/foxmail/ruifengz/optimization/Regularizer.scala | Scala | apache-2.0 | 1,198 |
package wow.auth
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}
import wow.Application
import wow.auth.AccountsState.{AccountIdentifier, IsOnline, NotifyAccountOnline}
import scala.collection.mutable
/**
* Tracks online state for every account
*/
class AccountsState extends Actor with ActorLogging {
/**
* Map of actor ref representing account (NetworkWorker) by account identifier (login -> NetworkWorker ref)
*/
private val accountRefById = new mutable.HashMap[AccountIdentifier, ActorRef]()
/**
* Map of account identifier by actor ref (NetworkWorker ref -> login)
*/
private val accountByActor = new mutable.HashMap[ActorRef, AccountIdentifier]()
override def receive: Receive = {
case NotifyAccountOnline(id, networkWorker) =>
accountRefById(id) = networkWorker
accountByActor(networkWorker) = id
context.watch(networkWorker)
case Terminated(subject) =>
accountByActor.remove(subject).foreach(id => accountRefById.remove(id))
case IsOnline(id) =>
sender ! accountRefById.contains(id)
}
}
object AccountsState {
type AccountIdentifier = String
def props: Props = Props(new AccountsState)
val PreferredName = "AccountsState"
val ActorPath = s"${Application.ActorPath}/$PreferredName"
/**
* Marks an account as online and ties its online state to the NetworkWorker
*
* @param id account id
* @param networkWorker associated network worker
*/
case class NotifyAccountOnline(id: AccountIdentifier, networkWorker: ActorRef)
/**
* Asks if an account is online.
*
* @param id account identifier
*/
case class IsOnline(id: AccountIdentifier)
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/auth/AccountsState.scala | Scala | mit | 1,714 |
package uk.gov.gds.ier.transaction.forces.statement
import uk.gov.gds.ier.validation._
import play.api.data.Forms._
import uk.gov.gds.ier.model._
import scala.Some
import play.api.data.validation.{Invalid, Valid, Constraint}
import uk.gov.gds.ier.transaction.forces.InprogressForces
trait StatementForms extends StatementConstraints {
self: FormKeys
with ErrorMessages =>
lazy val statementMapping = mapping(
keys.forcesMember.key -> optional(boolean),
keys.partnerForcesMember.key -> optional(boolean)
) (
Statement.apply
) (
Statement.unapply
)
val statementForm = ErrorTransformForm(
mapping(
keys.statement.key -> optional(statementMapping)
) (
statement => InprogressForces(statement = statement)
) (
inprogress => Some(inprogress.statement)
).verifying (atLeastOneStatementSelected)
)
}
trait StatementConstraints {
self: ErrorMessages
with FormKeys =>
lazy val atLeastOneStatementSelected = Constraint[InprogressForces](keys.statement.key) {
application =>
application.statement match {
case None => Invalid("Please answer this question", keys.statement)
case _ => Valid
}
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/forces/statement/StatementForms.scala | Scala | mit | 1,202 |
package io.buoyant.router
import com.twitter.finagle.buoyant.h2.{H2FailureAccrualFactory, Headers, Request, Response, param => h2param, Stream}
import com.twitter.finagle.buoyant.{H2 => FinagleH2}
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.{param, _}
import com.twitter.finagle.server.StackServer
import com.twitter.util.Future
import java.net.SocketAddress
import com.twitter.finagle.liveness.FailureAccrualFactory
import com.twitter.finagle.service.StatsFilter
import io.buoyant.router.context.ResponseClassifierCtx
import io.buoyant.router.context.h2.H2ClassifierCtx
import io.buoyant.router.h2.{ClassifiedRetries => H2ClassifiedRetries, _}
import io.buoyant.router.http.{ForwardClientCertFilter, MaxCallDepthFilter}
import io.buoyant.router.H2Instances._
import io.buoyant.router.DiscardingFactoryToService.RequestDiscarder
object H2 extends Router[Request, Response]
with Client[Request, Response]
with Server[Request, Response] {
/*
* Router
*/
case class Identifier(mk: Stack.Params => RoutingFactory.Identifier[Request])
implicit private[buoyant] object Identifier extends Stack.Param[Identifier] {
private[this] val nilF =
Future.value(new RoutingFactory.UnidentifiedRequest[Request]("no request identifier"))
private[this] val nil = (_req: Request) => nilF
val default = Identifier(params => nil)
}
object Router {
val requestDiscarder = RequestDiscarder[Request](x => {
Stream.readToEnd(x.stream)
()
})
implicit val discarderParam = RequestDiscarder.param[Request]
val pathStack: Stack[ServiceFactory[Request, Response]] = {
val stk = h2.ViaHeaderFilter.module +: h2.ClassifierFilter.module +:
StackRouter.newPathStack[Request, Response]
stk.replace(
ResponseClassifierCtx.Setter.role,
H2ClassifierCtx.Setter.module[Request, Response]
).replace(ClassifiedRetries.role, H2ClassifiedRetries.module)
.replace(StatsFilter.role, StreamStatsFilter.module)
}
val boundStack: Stack[ServiceFactory[Request, Response]] =
StackRouter.newBoundStack
val clientStack: Stack[ServiceFactory[Request, Response]] = {
val stk = FinagleH2.Client.newStack
(ForwardClientCertFilter.module[Request, Headers, Response] +: StackRouter.Client.mkStack(stk))
.replace(PerDstPathStatsFilter.role, PerDstPathStreamStatsFilter.module)
.replace(LocalClassifierStatsFilter.role, LocalClassifierStreamStatsFilter.module)
.replace(FailureAccrualFactory.role, H2FailureAccrualFactory.module)
}
val defaultParams = StackRouter.defaultParams +
param.ProtocolLibrary("h2") + requestDiscarder
}
case class Router(
pathStack: Stack[ServiceFactory[Request, Response]] = Router.pathStack,
boundStack: Stack[ServiceFactory[Request, Response]] = Router.boundStack,
client: StackClient[Request, Response] = FinagleH2.Client(Router.clientStack),
params: Stack.Params = Router.defaultParams
) extends StdStackRouter[Request, Response, Router] {
protected def copy1(
pathStack: Stack[ServiceFactory[Request, Response]] = this.pathStack,
boundStack: Stack[ServiceFactory[Request, Response]] = this.boundStack,
client: StackClient[Request, Response] = this.client,
params: Stack.Params = this.params
): Router = copy(pathStack, boundStack, client, params)
protected def newIdentifier() = params[Identifier].mk(params)
}
val router = Router()
def factory(): ServiceFactory[Request, Response] =
router.factory()
val client = FinagleH2.client
def newService(dest: Name, label: String): Service[Request, Response] =
client.newService(dest, label)
def newClient(dest: Name, label: String): ServiceFactory[Request, Response] =
client.newClient(dest, label)
object Server {
val newStack: Stack[ServiceFactory[Request, Response]] = FinagleH2.Server.newStack
.insertAfter(StackServer.Role.protoTracing, h2.ProxyRewriteFilter.module)
.insertAfter(StackServer.Role.protoTracing, H2AddForwardedHeader.module)
.insertAfter(StackServer.Role.protoTracing, MaxCallDepthFilter.module[Request, Headers, Response](Headers.Via))
.replace(StatsFilter.role, StreamStatsFilter.module)
private val serverResponseClassifier =
ClassifierFilter.SuccessClassClassifier
val defaultParams: Stack.Params =
StackServer.defaultParams + h2param.H2Classifier(serverResponseClassifier)
}
val server = FinagleH2.Server(Server.newStack, Server.defaultParams)
def serve(addr: SocketAddress, service: ServiceFactory[Request, Response]): ListeningServer =
server.serve(addr, service)
}
| linkerd/linkerd | router/h2/src/main/scala/io/buoyant/router/H2.scala | Scala | apache-2.0 | 4,685 |
/*
* Copyright 2010 Michael Fortin <mike@brzy.org>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.calsta
/**
* Object Column Mapping. Classes in this package are for treating objects as a set of columns.
* An example of a mapped class would look like:
* {{{
* case class Entity(id:Long, name:String) extends KeyedEntity[Long]
* object Entity extends Dao[Entity] {
* def mapping = ColumnMapping[Entity]...
* }
*
* }}}
*
* @author Michael Fortin
*/
package object ocm | m410/calista | src/main/scala/org/brzy/calista/ocm/package.scala | Scala | apache-2.0 | 1,008 |
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni
import java.io.{IOException, FileInputStream, InputStreamReader, File, BufferedWriter, FileWriter}
import djinni.generatorTools._
object Main {
def main(args: Array[String]) {
var idlFile: File = null
var cppOutFolder: Option[File] = None
var cppNamespace: String = ""
var cppIncludePrefix: String = ""
var cppExtendedRecordIncludePrefix: String = ""
var cppFileIdentStyle: IdentConverter = IdentStyle.underLower
var cppOptionalTemplate: String = "std::optional"
var cppOptionalHeader: String = "<optional>"
var cppEnumHashWorkaround : Boolean = true
var cppNnHeader: Option[String] = None
var cppNnType: Option[String] = None
var cppNnCheckExpression: Option[String] = None
var javaOutFolder: Option[File] = None
var javaPackage: Option[String] = None
var javaCppException: Option[String] = None
var javaAnnotation: Option[String] = None
var javaNullableAnnotation: Option[String] = None
var javaNonnullAnnotation: Option[String] = None
var jniOutFolder: Option[File] = None
var jniHeaderOutFolderOptional: Option[File] = None
var jniNamespace: String = "djinni_generated"
var jniClassIdentStyleOptional: Option[IdentConverter] = None
var jniIncludePrefix: String = ""
var jniIncludeCppPrefix: String = ""
var jniFileIdentStyleOptional: Option[IdentConverter] = None
var jniBaseLibClassIdentStyleOptional: Option[IdentConverter] = None
var jniBaseLibIncludePrefix: String = ""
var cppHeaderOutFolderOptional: Option[File] = None
var cppExt: String = "cpp"
var cppHeaderExt: String = "hpp"
var javaIdentStyle = IdentStyle.javaDefault
var cppIdentStyle = IdentStyle.cppDefault
var cppTypeEnumIdentStyle: IdentConverter = null
var objcOutFolder: Option[File] = None
var objcppOutFolder: Option[File] = None
var objcppExt: String = "mm"
var objcHeaderExt: String = "h"
var objcIdentStyle = IdentStyle.objcDefault
var objcTypePrefix: String = ""
var objcIncludePrefix: String = ""
var objcExtendedRecordIncludePrefix: String = ""
var objcppIncludePrefix: String = ""
var objcppIncludeCppPrefix: String = ""
var objcppIncludeObjcPrefixOptional: Option[String] = None
var objcFileIdentStyleOptional: Option[IdentConverter] = None
var objcppNamespace: String = "djinni_generated"
var objcBaseLibIncludePrefix: String = ""
var inFileListPath: Option[File] = None
var outFileListPath: Option[File] = None
var skipGeneration: Boolean = false
var yamlOutFolder: Option[File] = None
var yamlOutFile: Option[String] = None
var yamlPrefix: String = ""
val argParser = new scopt.OptionParser[Unit]("djinni") {
def identStyle(optionName: String, update: IdentConverter => Unit) = {
opt[String](optionName).valueName("...").foreach(spec =>
IdentStyle.infer(spec) match {
case None => failure("invalid ident spec: \\"" + spec + "\\"")
case Some(func) => update(func)
}
)
}
override def showUsageOnError = false
help("help")
opt[File]("idl").valueName("<in-file>").required().foreach(idlFile = _)
.text("The IDL file with the type definitions, typically with extension \\".djinni\\".")
note("")
opt[File]("java-out").valueName("<out-folder>").foreach(x => javaOutFolder = Some(x))
.text("The output for the Java files (Generator disabled if unspecified).")
opt[String]("java-package").valueName("...").foreach(x => javaPackage = Some(x))
.text("The package name to use for generated Java classes.")
opt[String]("java-cpp-exception").valueName("<exception-class>").foreach(x => javaCppException = Some(x))
.text("The type for translated C++ exceptions in Java (default: java.lang.RuntimeException that is not checked)")
opt[String]("java-annotation").valueName("<annotation-class>").foreach(x => javaAnnotation = Some(x))
.text("Java annotation (@Foo) to place on all generated Java classes")
opt[String]("java-nullable-annotation").valueName("<nullable-annotation-class>").foreach(x => javaNullableAnnotation = Some(x))
.text("Java annotation (@Nullable) to place on all fields and return values that are optional")
opt[String]("java-nonnull-annotation").valueName("<nonnull-annotation-class>").foreach(x => javaNonnullAnnotation = Some(x))
.text("Java annotation (@Nonnull) to place on all fields and return values that are not optional")
note("")
opt[File]("cpp-out").valueName("<out-folder>").foreach(x => cppOutFolder = Some(x))
.text("The output folder for C++ files (Generator disabled if unspecified).")
opt[File]("cpp-header-out").valueName("<out-folder>").foreach(x => cppHeaderOutFolderOptional = Some(x))
.text("The output folder for C++ header files (default: the same as --cpp-out).")
opt[String]("cpp-include-prefix").valueName("<prefix>").foreach(cppIncludePrefix = _)
.text("The prefix for #includes of header files from C++ files.")
opt[String]("cpp-namespace").valueName("...").foreach(x => cppNamespace = x)
.text("The namespace name to use for generated C++ classes.")
opt[String]("cpp-ext").valueName("<ext>").foreach(cppExt = _)
.text("The filename extension for C++ files (default: \\"cpp\\").")
opt[String]("hpp-ext").valueName("<ext>").foreach(cppHeaderExt = _)
.text("The filename extension for C++ header files (default: \\"hpp\\").")
opt[String]("cpp-optional-template").valueName("<template>").foreach(x => cppOptionalTemplate = x)
.text("The template to use for optional values (default: \\"std::optional\\")")
opt[String]("cpp-optional-header").valueName("<header>").foreach(x => cppOptionalHeader = x)
.text("The header to use for optional values (default: \\"<optional>\\")")
opt[Boolean]("cpp-enum-hash-workaround").valueName("<true/false>").foreach(x => cppEnumHashWorkaround = x)
.text("Work around LWG-2148 by generating std::hash specializations for C++ enums (default: true)")
opt[String]("cpp-nn-header").valueName("<header>").foreach(x => cppNnHeader = Some(x))
.text("The header to use for non-nullable pointers")
opt[String]("cpp-nn-type").valueName("<header>").foreach(x => cppNnType = Some(x))
.text("The type to use for non-nullable pointers (as a substitute for std::shared_ptr)")
opt[String]("cpp-nn-check-expression").valueName("<header>").foreach(x => cppNnCheckExpression = Some(x))
.text("The expression to use for building non-nullable pointers")
note("")
opt[File]("jni-out").valueName("<out-folder>").foreach(x => jniOutFolder = Some(x))
.text("The folder for the JNI C++ output files (Generator disabled if unspecified).")
opt[File]("jni-header-out").valueName("<out-folder>").foreach(x => jniHeaderOutFolderOptional = Some(x))
.text("The folder for the JNI C++ header files (default: the same as --jni-out).")
opt[String]("jni-include-prefix").valueName("<prefix>").foreach(jniIncludePrefix = _)
.text("The prefix for #includes of JNI header files from JNI C++ files.")
opt[String]("jni-include-cpp-prefix").valueName("<prefix>").foreach(jniIncludeCppPrefix = _)
.text("The prefix for #includes of the main header files from JNI C++ files.")
opt[String]("jni-namespace").valueName("...").foreach(x => jniNamespace = x)
.text("The namespace name to use for generated JNI C++ classes.")
opt[String]("jni-base-lib-include-prefix").valueName("...").foreach(x => jniBaseLibIncludePrefix = x)
.text("The JNI base library's include path, relative to the JNI C++ classes.")
note("")
opt[File]("objc-out").valueName("<out-folder>").foreach(x => objcOutFolder = Some(x))
.text("The output folder for Objective-C files (Generator disabled if unspecified).")
opt[String]("objc-h-ext").valueName("<ext>").foreach(objcHeaderExt = _)
.text("The filename extension for Objective-C[++] header files (default: \\"h\\")")
opt[String]("objc-type-prefix").valueName("<pre>").foreach(objcTypePrefix = _)
.text("The prefix for Objective-C data types (usually two or three letters)")
opt[String]("objc-include-prefix").valueName("<prefix>").foreach(objcIncludePrefix = _)
.text("The prefix for #import of header files from Objective-C files.")
note("")
opt[File]("objcpp-out").valueName("<out-folder>").foreach(x => objcppOutFolder = Some(x))
.text("The output folder for private Objective-C++ files (Generator disabled if unspecified).")
opt[String]("objcpp-ext").valueName("<ext>").foreach(objcppExt = _)
.text("The filename extension for Objective-C++ files (default: \\"mm\\")")
opt[String]("objcpp-include-prefix").valueName("<prefix>").foreach(objcppIncludePrefix = _)
.text("The prefix for #import of Objective-C++ header files from Objective-C++ files.")
opt[String]("objcpp-include-cpp-prefix").valueName("<prefix>").foreach(objcppIncludeCppPrefix = _)
.text("The prefix for #include of the main C++ header files from Objective-C++ files.")
opt[String]("objcpp-include-objc-prefix").valueName("<prefix>").foreach(x => objcppIncludeObjcPrefixOptional = Some(x))
.text("The prefix for #import of the Objective-C header files from Objective-C++ files (default: the same as --objcpp-include-prefix)")
opt[String]("cpp-extended-record-include-prefix").valueName("<prefix>").foreach(cppExtendedRecordIncludePrefix = _)
.text("The prefix path for #include of the extended record C++ header (.hpp) files")
opt[String]("objc-extended-record-include-prefix").valueName("<prefix>").foreach(objcExtendedRecordIncludePrefix = _)
.text("The prefix path for #import of the extended record Objective-C header (.h) files")
opt[String]("objcpp-namespace").valueName("<prefix>").foreach(objcppNamespace = _)
.text("The namespace name to use for generated Objective-C++ classes.")
opt[String]("objc-base-lib-include-prefix").valueName("...").foreach(x => objcBaseLibIncludePrefix = x)
.text("The Objective-C++ base library's include path, relative to the Objective-C++ classes.")
note("")
opt[File]("yaml-out").valueName("<out-folder>").foreach(x => yamlOutFolder = Some(x))
.text("The output folder for YAML files (Generator disabled if unspecified).")
opt[String]("yaml-out-file").valueName("<out-file>").foreach(x => yamlOutFile = Some(x))
.text("If specified all types are merged into a single YAML file instead of generating one file per type (relative to --yaml-out).")
opt[String]("yaml-prefix").valueName("<pre>").foreach(yamlPrefix = _)
.text("The prefix to add to type names stored in YAML files (default: \\"\\").")
note("")
opt[File]("list-in-files").valueName("<list-in-files>").foreach(x => inFileListPath = Some(x))
.text("Optional file in which to write the list of input files parsed.")
opt[File]("list-out-files").valueName("<list-out-files>").foreach(x => outFileListPath = Some(x))
.text("Optional file in which to write the list of output files produced.")
opt[Boolean]("skip-generation").valueName("<true/false>").foreach(x => skipGeneration = x)
.text("Way of specifying if file generation should be skipped (default: false)")
note("\\nIdentifier styles (ex: \\"FooBar\\", \\"fooBar\\", \\"foo_bar\\", \\"FOO_BAR\\", \\"m_fooBar\\")\\n")
identStyle("ident-java-enum", c => { javaIdentStyle = javaIdentStyle.copy(enum = c) })
identStyle("ident-java-field", c => { javaIdentStyle = javaIdentStyle.copy(field = c) })
identStyle("ident-cpp-enum", c => { cppIdentStyle = cppIdentStyle.copy(enum = c) })
identStyle("ident-cpp-field", c => { cppIdentStyle = cppIdentStyle.copy(field = c) })
identStyle("ident-cpp-method", c => { cppIdentStyle = cppIdentStyle.copy(method = c) })
identStyle("ident-cpp-type", c => { cppIdentStyle = cppIdentStyle.copy(ty = c) })
identStyle("ident-cpp-enum-type", c => { cppTypeEnumIdentStyle = c })
identStyle("ident-cpp-type-param", c => { cppIdentStyle = cppIdentStyle.copy(typeParam = c) })
identStyle("ident-cpp-local", c => { cppIdentStyle = cppIdentStyle.copy(local = c) })
identStyle("ident-cpp-file", c => { cppFileIdentStyle = c })
identStyle("ident-jni-class", c => {jniClassIdentStyleOptional = Some(c)})
identStyle("ident-jni-file", c => {jniFileIdentStyleOptional = Some(c)})
identStyle("ident-objc-enum", c => { objcIdentStyle = objcIdentStyle.copy(enum = c) })
identStyle("ident-objc-field", c => { objcIdentStyle = objcIdentStyle.copy(field = c) })
identStyle("ident-objc-method", c => { objcIdentStyle = objcIdentStyle.copy(method = c) })
identStyle("ident-objc-type", c => { objcIdentStyle = objcIdentStyle.copy(ty = c) })
identStyle("ident-objc-type-param", c => { objcIdentStyle = objcIdentStyle.copy(typeParam = c) })
identStyle("ident-objc-local", c => { objcIdentStyle = objcIdentStyle.copy(local = c) })
identStyle("ident-objc-file", c => { objcFileIdentStyleOptional = Some(c) })
}
if (!argParser.parse(args)) {
System.exit(1); return
}
val cppHeaderOutFolder = if (cppHeaderOutFolderOptional.isDefined) cppHeaderOutFolderOptional else cppOutFolder
val jniHeaderOutFolder = if (jniHeaderOutFolderOptional.isDefined) jniHeaderOutFolderOptional else jniOutFolder
val jniClassIdentStyle = jniClassIdentStyleOptional.getOrElse(cppIdentStyle.ty)
val jniBaseLibClassIdentStyle = jniBaseLibClassIdentStyleOptional.getOrElse(jniClassIdentStyle)
val jniFileIdentStyle = jniFileIdentStyleOptional.getOrElse(cppFileIdentStyle)
var objcFileIdentStyle = objcFileIdentStyleOptional.getOrElse(objcIdentStyle.ty)
val objcppIncludeObjcPrefix = objcppIncludeObjcPrefixOptional.getOrElse(objcppIncludePrefix)
// Add ObjC prefix to identstyle
objcIdentStyle = objcIdentStyle.copy(ty = IdentStyle.prefix(objcTypePrefix,objcIdentStyle.ty))
objcFileIdentStyle = IdentStyle.prefix(objcTypePrefix, objcFileIdentStyle)
if (cppTypeEnumIdentStyle != null) {
cppIdentStyle = cppIdentStyle.copy(enumType = cppTypeEnumIdentStyle)
}
// Parse IDL file.
System.out.println("Parsing...")
val inFileListWriter = if (inFileListPath.isDefined) {
createFolder("input file list", inFileListPath.get.getParentFile)
Some(new BufferedWriter(new FileWriter(inFileListPath.get)))
} else {
None
}
val idl = try {
(new Parser).parseFile(idlFile, inFileListWriter)
}
catch {
case ex: IOException =>
System.err.println("Error reading from --idl file: " + ex.getMessage)
System.exit(1); return
}
finally {
if (inFileListWriter.isDefined) {
inFileListWriter.get.close()
}
}
// Resolve names in IDL file, check types.
System.out.println("Resolving...")
resolver.resolve(meta.defaults, idl) match {
case Some(err) =>
System.err.println(err)
System.exit(1); return
case _ =>
}
System.out.println("Generating...")
val outFileListWriter = if (outFileListPath.isDefined) {
createFolder("output file list", outFileListPath.get.getParentFile)
Some(new BufferedWriter(new FileWriter(outFileListPath.get)))
} else {
None
}
val outSpec = Spec(
javaOutFolder,
javaPackage,
javaIdentStyle,
javaCppException,
javaAnnotation,
javaNullableAnnotation,
javaNonnullAnnotation,
cppOutFolder,
cppHeaderOutFolder,
cppIncludePrefix,
cppExtendedRecordIncludePrefix,
cppNamespace,
cppIdentStyle,
cppFileIdentStyle,
cppOptionalTemplate,
cppOptionalHeader,
cppEnumHashWorkaround,
cppNnHeader,
cppNnType,
cppNnCheckExpression,
jniOutFolder,
jniHeaderOutFolder,
jniIncludePrefix,
jniIncludeCppPrefix,
jniNamespace,
jniClassIdentStyle,
jniFileIdentStyle,
jniBaseLibIncludePrefix,
cppExt,
cppHeaderExt,
objcOutFolder,
objcppOutFolder,
objcIdentStyle,
objcFileIdentStyle,
objcppExt,
objcHeaderExt,
objcIncludePrefix,
objcExtendedRecordIncludePrefix,
objcppIncludePrefix,
objcppIncludeCppPrefix,
objcppIncludeObjcPrefix,
objcppNamespace,
objcBaseLibIncludePrefix,
outFileListWriter,
skipGeneration,
yamlOutFolder,
yamlOutFile,
yamlPrefix)
try {
val r = generate(idl, outSpec)
r.foreach(e => System.err.println("Error generating output: " + e))
}
finally {
if (outFileListWriter.isDefined) {
outFileListWriter.get.close()
}
}
}
}
| jrogers/djinni | src/source/Main.scala | Scala | apache-2.0 | 17,715 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.util.Utils
class BisectingKMeansSuite extends SparkFunSuite with MLlibTestSparkContext {
test("default values") {
val bkm0 = new BisectingKMeans()
assert(bkm0.getK === 4)
assert(bkm0.getMaxIterations === 20)
assert(bkm0.getMinDivisibleClusterSize === 1.0)
val bkm1 = new BisectingKMeans()
assert(bkm0.getSeed === bkm1.getSeed, "The default seed should be constant.")
}
test("setter/getter") {
val bkm = new BisectingKMeans()
val k = 10
assert(bkm.getK !== k)
assert(bkm.setK(k).getK === k)
val maxIter = 100
assert(bkm.getMaxIterations !== maxIter)
assert(bkm.setMaxIterations(maxIter).getMaxIterations === maxIter)
val minSize = 2.0
assert(bkm.getMinDivisibleClusterSize !== minSize)
assert(bkm.setMinDivisibleClusterSize(minSize).getMinDivisibleClusterSize === minSize)
val seed = 10L
assert(bkm.getSeed !== seed)
assert(bkm.setSeed(seed).getSeed === seed)
intercept[IllegalArgumentException] {
bkm.setK(0)
}
intercept[IllegalArgumentException] {
bkm.setMaxIterations(0)
}
intercept[IllegalArgumentException] {
bkm.setMinDivisibleClusterSize(0.0)
}
}
test("1D data") {
val points = Vectors.sparse(1, Array.empty, Array.empty) +:
(1 until 8).map(i => Vectors.dense(i))
val data = sc.parallelize(points, 2)
val bkm = new BisectingKMeans()
.setK(4)
.setMaxIterations(1)
.setSeed(1L)
// The clusters should be
// (0, 1, 2, 3, 4, 5, 6, 7)
// - (0, 1, 2, 3)
// - (0, 1)
// - (2, 3)
// - (4, 5, 6, 7)
// - (4, 5)
// - (6, 7)
val model = bkm.run(data)
assert(model.k === 4)
// The total cost should be 8 * 0.5 * 0.5 = 2.0.
assert(model.computeCost(data) ~== 2.0 relTol 1e-12)
val predictions = data.map(v => (v(0), model.predict(v))).collectAsMap()
Range(0, 8, 2).foreach { i =>
assert(predictions(i) === predictions(i + 1),
s"$i and ${i + 1} should belong to the same cluster.")
}
val root = model.root
assert(root.center(0) ~== 3.5 relTol 1e-12)
assert(root.height ~== 2.0 relTol 1e-12)
assert(root.children.length === 2)
assert(root.children(0).height ~== 1.0 relTol 1e-12)
assert(root.children(1).height ~== 1.0 relTol 1e-12)
}
test("points are the same") {
val data = sc.parallelize(Seq.fill(8)(Vectors.dense(1.0, 1.0)), 2)
val bkm = new BisectingKMeans()
.setK(2)
.setMaxIterations(1)
.setSeed(1L)
val model = bkm.run(data)
assert(model.k === 1)
}
test("more desired clusters than points") {
val data = sc.parallelize(Seq.tabulate(4)(i => Vectors.dense(i)), 2)
val bkm = new BisectingKMeans()
.setK(8)
.setMaxIterations(2)
.setSeed(1L)
val model = bkm.run(data)
assert(model.k === 4)
}
test("min divisible cluster") {
val data = sc.parallelize(
Seq.tabulate(16)(i => Vectors.dense(i)) ++ Seq.tabulate(4)(i => Vectors.dense(-100.0 - i)),
2)
val bkm = new BisectingKMeans()
.setK(4)
.setMinDivisibleClusterSize(10)
.setMaxIterations(1)
.setSeed(1L)
val model = bkm.run(data)
assert(model.k === 3)
assert(model.predict(Vectors.dense(-100)) === model.predict(Vectors.dense(-97)))
assert(model.predict(Vectors.dense(7)) !== model.predict(Vectors.dense(8)))
bkm.setMinDivisibleClusterSize(0.5)
val sameModel = bkm.run(data)
assert(sameModel.k === 3)
}
test("larger clusters get selected first") {
val data = sc.parallelize(
Seq.tabulate(16)(i => Vectors.dense(i)) ++ Seq.tabulate(4)(i => Vectors.dense(-100.0 - i)),
2)
val bkm = new BisectingKMeans()
.setK(3)
.setMaxIterations(1)
.setSeed(1L)
val model = bkm.run(data)
assert(model.k === 3)
assert(model.predict(Vectors.dense(-100)) === model.predict(Vectors.dense(-97)))
assert(model.predict(Vectors.dense(7)) !== model.predict(Vectors.dense(8)))
}
test("2D data") {
val points = Seq(
(11, 10), (9, 10), (10, 9), (10, 11),
(11, -10), (9, -10), (10, -9), (10, -11),
(0, 1), (0, -1)
).map { case (x, y) =>
if (x == 0) {
Vectors.sparse(2, Array(1), Array(y))
} else {
Vectors.dense(x, y)
}
}
val data = sc.parallelize(points, 2)
val bkm = new BisectingKMeans()
.setK(3)
.setMaxIterations(4)
.setSeed(1L)
val model = bkm.run(data)
assert(model.k === 3)
assert(model.root.center ~== Vectors.dense(8, 0) relTol 1e-12)
model.root.leafNodes.foreach { node =>
if (node.center(0) < 5) {
assert(node.size === 2)
assert(node.center ~== Vectors.dense(0, 0) relTol 1e-12)
} else if (node.center(1) > 0) {
assert(node.size === 4)
assert(node.center ~== Vectors.dense(10, 10) relTol 1e-12)
} else {
assert(node.size === 4)
assert(node.center ~== Vectors.dense(10, -10) relTol 1e-12)
}
}
}
test("BisectingKMeans model save/load") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
val points = (1 until 8).map(i => Vectors.dense(i))
val data = sc.parallelize(points, 2)
val model = new BisectingKMeans().setDistanceMeasure(DistanceMeasure.COSINE).run(data)
try {
model.save(sc, path)
val sameModel = BisectingKMeansModel.load(sc, path)
assert(model.k === sameModel.k)
assert(model.distanceMeasure === sameModel.distanceMeasure)
model.clusterCenters.zip(sameModel.clusterCenters).foreach(c => c._1 === c._2)
assert(model.trainingCost == sameModel.trainingCost)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
| maropu/spark | mllib/src/test/scala/org/apache/spark/mllib/clustering/BisectingKMeansSuite.scala | Scala | apache-2.0 | 6,792 |
package com.blinkboxbooks.mimir.export
import com.typesafe.config.Config
import java.sql.Connection
import java.sql.PreparedStatement
import java.sql.ResultSet
import java.sql.Statement
import javax.sql.DataSource
import org.apache.commons.dbcp.BasicDataSource
import org.squeryl.Session
import org.squeryl.adapters.MySQLAdapter
import scala.util.control.NonFatal
object DbUtils {
/**
* Create a datasource for the given MySQL DB.
*/
def createDatasource(config: Config) = {
val datasource = new BasicDataSource
datasource.setUrl(config.getString("url"))
datasource.setUsername(config.getString("username"))
datasource.setPassword(config.getString("password"))
datasource.setDriverClassName(config.getString("driver"))
datasource.setValidationQuery("SELECT 1")
datasource
}
/**
* Run the given action, providing it a session for the database, and treating any
* changes as a single transaction. On error, any changes will be rolled back.
*/
def withSession(datasource: DataSource)(fn: (Session) => Unit) = {
val connection = datasource.getConnection()
try {
connection.setAutoCommit(false)
val session: Session = Session.create(connection, new StreamingMySqlAdapter(None))
fn(session)
connection.commit()
} catch {
case NonFatal(e) => {
connection.rollback()
throw e
}
} finally {
connection.close()
}
}
/**
* Run the given action, providing it a session for the database.
*/
def withReadOnlySession(datasource: DataSource, fetchSize: Option[Int])(fn: (Session) => Unit) = {
val connection = datasource.getConnection()
try {
connection.setAutoCommit(false)
val session: Session = Session.create(connection, new StreamingMySqlAdapter(fetchSize))
fn(session)
} finally {
connection.close()
}
}
class StreamingMySqlAdapter(fetchSize: Option[Int]) extends MySQLAdapter {
override def prepareStatement(conn: Connection, statement: String): PreparedStatement = {
val stmt = conn.prepareStatement(statement, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
fetchSize.foreach(s => stmt.setFetchSize(s))
stmt
}
override def createStatement(conn: Connection): Statement = {
val stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
fetchSize.foreach(s => stmt.setFetchSize(s))
stmt
}
}
} | blinkboxbooks/data-exporter-service.scala | src/main/scala/com/blinkboxbooks/mimir/export/DbUtils.scala | Scala | mit | 2,467 |
package org.ensime.client
//"-XX:MaxPermSize=384m", "-XX:ReservedCodeCacheSize=192m", "-Xms1536m", "-Xmx1536m", "-Xss3m",
case class MemoryConfig(minMemMb: Int=1536,
maxMemMb: Int=1536,
maxPermSizeMb: Int =384,
reservedCodeCacheSizeMb: Int =192,
stackSizeMb: Int = 3) {
}
| rorygraves/ensime-client | src/main/scala/org/ensime/client/MemoryConfig.scala | Scala | apache-2.0 | 371 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.data
import org.geotools.data.{FeatureListener, Transaction}
import org.locationtech.geomesa.index.geotools.GeoMesaFeatureStore
import org.locationtech.geomesa.index.planning.QueryRunner
import org.locationtech.geomesa.kafka.data.KafkaFeatureWriter.AppendKafkaFeatureWriter
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
class KafkaFeatureStore(ds: KafkaDataStore, sft: SimpleFeatureType, runner: QueryRunner, cache: KafkaCacheLoader)
extends GeoMesaFeatureStore(ds, sft, runner) {
override def removeFeatures(filter: Filter): Unit = filter match {
case Filter.INCLUDE => clearFeatures()
case _ => super.removeFeatures(filter)
}
override def addFeatureListener(listener: FeatureListener): Unit = cache.addListener(this, listener)
override def removeFeatureListener(listener: FeatureListener): Unit = cache.removeListener(this, listener)
private def clearFeatures(): Unit = {
WithClose(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT)) { writer =>
writer.asInstanceOf[AppendKafkaFeatureWriter].clear()
}
}
}
| elahrvivaz/geomesa | geomesa-kafka/geomesa-kafka-datastore/src/main/scala/org/locationtech/geomesa/kafka/data/KafkaFeatureStore.scala | Scala | apache-2.0 | 1,663 |
package sss.db
import java.sql.SQLException
import javax.sql.DataSource
import sss.ancillary.Logging
import scala.jdk.CollectionConverters._
import scala.util.{Failure, Success, Try}
object DbInitialSqlExecutor extends Logging {
def apply(dbConfig: DbConfig, executeSql: String => FutureTx[Int])(implicit syncRunContext: SyncRunContext): Unit = {
dbConfig.deleteSqlOpt foreach { deleteSqlAry =>
deleteSqlAry.asScala.filter(_.nonEmpty) foreach { deleteSql =>
Try(executeSql(deleteSql).runSync) match {
case Failure(e: SQLException) => log.warn(s"${deleteSql} failed, maybe object doesn't exist?!", e)
case Failure(e) => throw e
case Success(deleted) => log.info(s"${deleteSql} Deleted count ${deleted}")
}
}
}
dbConfig.createSqlOpt foreach { createSqlAry =>
createSqlAry.asScala.filter(_.nonEmpty) foreach { createSql =>
Try(executeSql(createSql).runSync) match {
case Failure(e: SQLException) => log.warn(s"Failed to create ${createSql}")
case Failure(e) => throw e //fail fast
case Success(created) => log.info(s"${createSql} Created count ${created}")
}
}
}
}
}
| mcsherrylabs/sss.db | src/main/scala/sss/db/DbInitialSqlExecutor.scala | Scala | gpl-3.0 | 1,288 |
package im.actor.api.rpc.raw
import akka.actor.ActorSystem
import cats.data.Xor
import im.actor.api.rpc.collections.ApiRawValue
import im.actor.api.rpc.{ ClientData, RpcError }
import scala.concurrent.Future
object RawApiRpcErrors {
val InvalidParams = RpcError(400, "INVALID_PARAMS", "", canTryAgain = true, None)
}
/**
* Base class for raw service handlers.
*/
abstract class RawApiService(system: ActorSystem) {
type Response = RpcError Xor ApiRawValue
type Handler = ClientData ⇒ Option[ApiRawValue] ⇒ PartialFunction[String, Future[Response]]
def handleRequests: Handler
} | EaglesoftZJ/actor-platform | actor-server/actor-core/src/main/scala/im/actor/api/rpc/raw/RawApiService.scala | Scala | agpl-3.0 | 599 |
package com.twitter.inject.app.internal
import com.google.inject.util.Modules
import com.google.inject.{Module => GuiceModule, _}
import com.twitter.app.Flag
import com.twitter.inject.{TwitterBaseModule, TwitterModuleLifecycle, Injector, Logging, TwitterModule}
import scala.collection.JavaConverters._
object InstalledModules {
/* Public */
def create(
flags: Seq[Flag[_]],
modules: Seq[GuiceModule],
overrideModules: Seq[GuiceModule],
stage: Stage = Stage.PRODUCTION): InstalledModules = {
val allNonOverrideModules = {
val frameworkModules = Seq(
FlagsModule.create(flags),
TwitterTypeConvertersModule)
val composedModules = modules flatMap findInstalledModules
modules ++ composedModules ++ frameworkModules
}
val allOverrideModules = {
val composedOverrideModules = overrideModules flatMap findInstalledModules
overrideModules ++ composedOverrideModules
}
val combinedModule =
Modules.`override`(
allNonOverrideModules.asJava).
`with`(allOverrideModules.asJava)
new InstalledModules(
injector = Injector(Guice.createInjector(stage, combinedModule)),
modules =
allNonOverrideModules ++ allOverrideModules)
}
/* Private */
/**
* Recursively capture all flags in the GuiceModule object hierarchy.
*
* Note: We will not (cannot?) traverse through a normal Guice AbstractModule, to find 'installed' GuiceModules
*/
private[inject] def findModuleFlags(modules: Seq[GuiceModule]): Seq[Flag[_]] = {
(modules collect {
case injectModule: TwitterBaseModule =>
injectModule.flags ++ findModuleFlags(injectModule.modules)
}).flatten.distinct
}
/** Recursively finds all 'composed' modules */
private def findInstalledModules(module: GuiceModule): Seq[GuiceModule] = module match {
case injectModule: TwitterBaseModule =>
injectModule.modules ++
(injectModule.modules flatMap findInstalledModules)
case _ =>
Seq()
}
}
case class InstalledModules(
injector: Injector,
modules: Seq[GuiceModule])
extends Logging {
def postStartup() {
modules foreach {
case injectModule: TwitterModuleLifecycle =>
try {
injectModule.singletonStartup(injector)
} catch {
case e: Throwable =>
error("Startup method error in " + injectModule, e)
throw e
}
case _ =>
}
}
// Note: We don't rethrow so that all modules have a change to shutdown
def shutdown() {
modules foreach {
case injectModule: TwitterModuleLifecycle =>
try {
injectModule.singletonShutdown(injector)
} catch {
case e: Throwable =>
error("Shutdown method error in " + injectModule, e)
}
case _ =>
}
}
}
| tom-chan/finatra | inject/inject-app/src/main/scala/com/twitter/inject/app/internal/InstalledModules.scala | Scala | apache-2.0 | 2,846 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.{MockFullAccountsRetriever, AccountsPreviousPeriodValidationFixture, AccountsMoneyValidationFixture}
import uk.gov.hmrc.ct.accounts.frs102.retriever.FullAccountsBoxRetriever
class AC15Spec extends AccountsMoneyValidationFixture[FullAccountsBoxRetriever] with AccountsPreviousPeriodValidationFixture[FullAccountsBoxRetriever] with MockFullAccountsRetriever {
testAccountsMoneyValidationWithMin("AC15", 0, AC15.apply)
testAccountsPreviousPoAValidation("AC15", AC15.apply)
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC15Spec.scala | Scala | apache-2.0 | 1,161 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.mesos.scheduler
import java.util.{Collections, UUID}
import java.util.concurrent.atomic.AtomicReference
import akka.actor.FSM.StateTimeout
import akka.testkit._
import com.netflix.fenzo.TaskRequest.{AssignedResources, NamedResourceSetRequest}
import com.netflix.fenzo._
import com.netflix.fenzo.functions.{Action1, Action2}
import com.netflix.fenzo.plugins.VMLeaseObject
import org.apache.flink.api.java.tuple.{Tuple2=>FlinkTuple2}
import org.apache.flink.configuration.Configuration
import org.apache.flink.mesos.scheduler.LaunchCoordinator._
import org.apache.flink.mesos.scheduler.messages._
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.mesos.Protos.{SlaveID, TaskInfo}
import org.apache.mesos.{SchedulerDriver, Protos}
import org.junit.runner.RunWith
import org.mockito.Mockito.{verify, _}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.mockito.{Matchers => MM, Mockito}
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.collection.JavaConverters._
import org.apache.flink.mesos.Utils.range
import org.apache.flink.mesos.Utils.ranges
import org.apache.flink.mesos.Utils.scalar
@RunWith(classOf[JUnitRunner])
class LaunchCoordinatorTest
extends TestKitBase
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll {
lazy val config = new Configuration()
implicit lazy val system = AkkaUtils.createLocalActorSystem(config)
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
def randomFramework = {
Protos.FrameworkID.newBuilder().setValue(UUID.randomUUID.toString).build
}
def randomTask = {
val taskID = Protos.TaskID.newBuilder.setValue(UUID.randomUUID.toString).build
def generateTaskRequest = {
new TaskRequest() {
private[mesos] val assignedResources = new AtomicReference[TaskRequest.AssignedResources]
override def getId: String = taskID.getValue
override def taskGroupName: String = ""
override def getCPUs: Double = 1.0
override def getMemory: Double = 1024.0
override def getNetworkMbps: Double = 0.0
override def getDisk: Double = 0.0
override def getPorts: Int = 1
override def getCustomNamedResources: java.util.Map[String, NamedResourceSetRequest] =
Collections.emptyMap[String, NamedResourceSetRequest]
override def getSoftConstraints: java.util.List[_ <: VMTaskFitnessCalculator] = null
override def getHardConstraints: java.util.List[_ <: ConstraintEvaluator] = null
override def getAssignedResources: AssignedResources = assignedResources.get()
override def setAssignedResources(assignedResources: AssignedResources): Unit = {
this.assignedResources.set(assignedResources)
}
}
}
val task: LaunchableTask = new LaunchableTask() {
override def taskRequest: TaskRequest = generateTaskRequest
override def launch(
slaveId: SlaveID,
taskAssignment: TaskAssignmentResult): Protos.TaskInfo = {
Protos.TaskInfo.newBuilder
.setTaskId(taskID).setName(taskID.getValue)
.setCommand(Protos.CommandInfo.newBuilder.setValue("whoami"))
.setSlaveId(slaveId)
.build()
}
override def toString = taskRequest.getId
}
(taskID, task)
}
def randomSlave = {
val slaveID = Protos.SlaveID.newBuilder.setValue(UUID.randomUUID.toString).build
val hostname = s"host-${slaveID.getValue}"
(slaveID, hostname)
}
def randomOffer(frameworkID: Protos.FrameworkID, slave: (Protos.SlaveID, String)) = {
val offerID = Protos.OfferID.newBuilder().setValue(UUID.randomUUID.toString)
Protos.Offer.newBuilder()
.setFrameworkId(frameworkID)
.setId(offerID)
.setSlaveId(slave._1)
.setHostname(slave._2)
.addResources(scalar("cpus", "*", 0.75))
.addResources(scalar("mem", "*", 4096.0))
.addResources(scalar("disk", "*", 1024.0))
.addResources(ranges("ports", "*", range(9000, 9001)))
.build()
}
def lease(offer: Protos.Offer) = {
new VMLeaseObject(offer)
}
/**
* Mock a successful task assignment result matching a task to an offer.
*/
def taskAssignmentResult(lease: VirtualMachineLease, task: TaskRequest): TaskAssignmentResult = {
val ports = lease.portRanges().get(0)
val assignedPorts = ports.getBeg to ports.getBeg + task.getPorts
val r = mock(classOf[TaskAssignmentResult])
when(r.getTaskId).thenReturn(task.getId)
when(r.getHostname).thenReturn(lease.hostname())
when(r.getAssignedPorts).thenReturn(
assignedPorts.toList.asJava.asInstanceOf[java.util.List[Integer]])
when(r.getRequest).thenReturn(task)
when(r.isSuccessful).thenReturn(true)
when(r.getFitness).thenReturn(1.0)
r
}
/**
* Mock a VM assignment result with the given leases and tasks.
*/
def vmAssignmentResult(hostname: String,
leasesUsed: Seq[VirtualMachineLease],
tasksAssigned: Set[TaskAssignmentResult]): VMAssignmentResult = {
new VMAssignmentResult(hostname, leasesUsed.asJava, tasksAssigned.asJava)
}
/**
* Mock a scheduling result with the given successes and failures.
*/
def schedulingResult(successes: Seq[VMAssignmentResult],
failures: Seq[TaskAssignmentResult] = Nil,
exceptions: Seq[Exception] = Nil,
leasesAdded: Int = 0,
leasesRejected: Int = 0): SchedulingResult = {
val r = mock(classOf[SchedulingResult])
when(r.getResultMap).thenReturn(successes.map(r => r.getHostname -> r).toMap.asJava)
when(r.getExceptions).thenReturn(exceptions.asJava)
val groupedFailures = failures.groupBy(_.getRequest).mapValues(_.asJava)
when(r.getFailures).thenReturn(groupedFailures.asJava)
when(r.getLeasesAdded).thenReturn(leasesAdded)
when(r.getLeasesRejected).thenReturn(leasesRejected)
when(r.getRuntime).thenReturn(0)
when(r.getNumAllocations).thenThrow(new NotImplementedError())
when(r.getTotalVMsCount).thenThrow(new NotImplementedError())
when(r.getIdleVMsCount).thenThrow(new NotImplementedError())
r
}
/**
* Mock a task scheduler.
* The task assigner/unassigner is pre-wired.
*/
def taskScheduler() = {
val optimizer = mock(classOf[TaskScheduler])
val taskAssigner = mock(classOf[Action2[TaskRequest, String]])
when[Action2[TaskRequest, String]](optimizer.getTaskAssigner).thenReturn(taskAssigner)
val taskUnassigner = mock(classOf[Action2[String, String]])
when[Action2[String, String]](optimizer.getTaskUnAssigner).thenReturn(taskUnassigner)
optimizer
}
/**
* Create a task scheduler builder.
*/
def taskSchedulerBuilder(optimizer: TaskScheduler) = new TaskSchedulerBuilder {
var leaseRejectAction: Action1[VirtualMachineLease] = null
override def withLeaseRejectAction(
action: Action1[VirtualMachineLease]): TaskSchedulerBuilder = {
leaseRejectAction = action
this
}
override def build(): TaskScheduler = optimizer
}
/**
* Process a call to scheduleOnce with the given function.
*/
def scheduleOnce(f: (Seq[TaskRequest],Seq[VirtualMachineLease]) => SchedulingResult) = {
new Answer[SchedulingResult] {
override def answer(invocationOnMock: InvocationOnMock): SchedulingResult = {
val args = invocationOnMock.getArguments
val requests = args(0).asInstanceOf[java.util.List[TaskRequest]]
val newLeases = args(1).asInstanceOf[java.util.List[VirtualMachineLease]]
f(requests.asScala, newLeases.asScala)
}
}
}
/**
* The context fixture.
*/
class Context {
val optimizer = taskScheduler()
val optimizerBuilder = taskSchedulerBuilder(optimizer)
val schedulerDriver = mock(classOf[SchedulerDriver])
val trace = Mockito.inOrder(schedulerDriver)
val fsm =
TestFSMRef(new LaunchCoordinator(testActor, config, schedulerDriver, optimizerBuilder))
val framework = randomFramework
val task1 = randomTask
val task2 = randomTask
val task3 = randomTask
val slave1 = {
val slave = randomSlave
(slave._1, slave._2,
randomOffer(framework, slave), randomOffer(framework, slave), randomOffer(framework, slave))
}
val slave2 = {
val slave = randomSlave
(slave._1, slave._2,
randomOffer(framework, slave), randomOffer(framework, slave), randomOffer(framework, slave))
}
}
def inState = afterWord("in state")
def handle = afterWord("handle")
def handlesAssignments(state: TaskState) = {
"Unassign" which {
s"stays in $state with updated optimizer state" in new Context {
optimizer.getTaskAssigner.call(task1._2.taskRequest, slave1._2)
fsm.setState(state)
fsm ! Unassign(task1._1, slave1._2)
verify(optimizer.getTaskUnAssigner).call(task1._1.getValue, slave1._2)
fsm.stateName should be (state)
}
}
"Assign" which {
s"stays in $state with updated optimizer state" in new Context {
fsm.setState(state)
fsm ! Assign(Seq(new FlinkTuple2(task1._2.taskRequest, slave1._2)).asJava)
verify(optimizer.getTaskAssigner).call(MM.any(), MM.any())
fsm.stateName should be (state)
}
}
}
"The LaunchCoordinator" when inState {
"Suspended" should handle {
"Connected" which {
"transitions to Idle when the task queue is empty" in new Context {
fsm.setState(Suspended)
fsm ! new Connected {}
fsm.stateName should be (Idle)
}
"transitions to GatheringOffers when the task queue is non-empty" in new Context {
fsm.setState(Suspended, GatherData(tasks = Seq(task1._2), newLeases = Nil))
fsm ! new Connected {}
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
}
}
"Launch" which {
"stays in Suspended with updated task queue" in new Context {
fsm.setState(Suspended, GatherData(tasks = Seq(task1._2), newLeases = Nil))
fsm ! Launch(Seq(task2._2).asJava)
fsm.stateName should be (Suspended)
fsm.stateData.tasks should contain only (task1._2, task2._2)
}
}
behave like handlesAssignments(Suspended)
}
"Idle" should handle {
"Disconnected" which {
"transitions to Suspended" in new Context {
fsm.setState(Idle)
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
}
}
"ResourceOffers" which {
"stays in Idle with offers declined" in new Context {
fsm.setState(Idle)
fsm ! new ResourceOffers(Seq(slave1._3, slave1._4).asJava)
verify(schedulerDriver).declineOffer(slave1._3.getId)
verify(schedulerDriver).declineOffer(slave1._4.getId)
fsm.stateName should be (Idle)
}
}
"Launch" which {
"transitions to GatheringOffers with updated task queue" in new Context {
fsm.setState(Idle)
fsm ! Launch(Seq(task1._2, task2._2).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2, task2._2)
}
}
behave like handlesAssignments(Idle)
}
"GatheringOffers" should handle {
"(enter)" which {
"revives offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
verify(schedulerDriver).reviveOffers()
}
}
"(exit)" which {
"suppresses offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
fsm ! new Disconnected()
verify(schedulerDriver).suppressOffers()
}
"declines any outstanding offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
fsm ! new Disconnected()
verify(optimizer).expireAllLeases()
verify(optimizer).scheduleOnce(MM.any(), MM.any())
}
}
"Disconnected" which {
"transitions to Suspended with task queue intact" in new Context {
fsm.setState(GatheringOffers, GatherData(tasks = Seq(task1._2)))
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
fsm.stateData.tasks should contain only (task1._2)
}
"transitions to Suspended with offer queue emptied" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
fsm.stateData.newLeases should be (empty)
}
}
"Launch" which {
"stays in GatheringOffers with updated task queue" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! Launch(Seq(task2._2).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2, task2._2)
fsm.stateData.newLeases.map(_.getOffer) should contain only (slave1._3)
}
}
"ResourceOffers" which {
"stays in GatheringOffers with offer queue updated" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new ResourceOffers(Seq(slave1._4, slave2._3).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases.map(_.getOffer) should contain only
(slave1._3, slave1._4, slave2._3)
}
}
"OfferRescinded" which {
"stays in GatheringOffers with offer queue updated" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new OfferRescinded(slave1._3.getId)
verify(optimizer).expireLease(slave1._3.getId.getValue)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases should be (empty)
}
}
"StateTimeout" which {
"sends AcceptOffers message for matched tasks" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
val (l, task) = (newLeases.head, requests.head)
val vm = vmAssignmentResult(l.hostname(), Seq(l), Set(taskAssignmentResult(l, task)))
schedulingResult(successes = Seq(vm))
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
val offers = expectMsgType[AcceptOffers]
offers.hostname() should be (slave1._2)
offers.offerIds() should contain only (slave1._3.getId)
}
"transitions to Idle when task queue is empty" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
val (l, task) = (newLeases.head, requests.head)
val vm = vmAssignmentResult(l.hostname(), Seq(l), Set(taskAssignmentResult(l, task)))
schedulingResult(successes = Seq(vm))
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
fsm.stateName should be (Idle)
fsm.stateData.tasks should be (empty)
fsm.stateData.newLeases should be (empty)
}
"stays in GatheringOffers when task queue is non-empty" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
schedulingResult(successes = Nil)
}
}
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases should be (empty)
}
"declines old offers" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
optimizerBuilder.leaseRejectAction.call(newLeases.head)
schedulingResult(successes = Nil)
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
verify(schedulerDriver).declineOffer(slave1._3.getId)
}
}
behave like handlesAssignments(GatheringOffers)
}
}
override def toString = s"LaunchCoordinatorTest()"
}
| zimmermatt/flink | flink-mesos/src/test/scala/org/apache/flink/mesos/scheduler/LaunchCoordinatorTest.scala | Scala | apache-2.0 | 18,257 |
package com.github.agourlay.cornichon.http.client
import java.security.SecureRandom
import java.security.cert.X509Certificate
import cats.Show
import cats.data.EitherT
import cats.syntax.either._
import cats.syntax.show._
import cats.effect.IO
import cats.effect.unsafe.IORuntime
import com.github.agourlay.cornichon.core.{ CornichonError, CornichonException, Done }
import com.github.agourlay.cornichon.http.HttpMethods._
import com.github.agourlay.cornichon.http._
import com.github.agourlay.cornichon.http.HttpService._
import com.github.agourlay.cornichon.http.HttpStreams.SSE
import com.github.agourlay.cornichon.util.Caching
import com.github.agourlay.cornichon.util.CirceUtil._
import io.circe.Json
import io.circe.generic.auto._
import io.circe.syntax._
import javax.net.ssl.{ SSLContext, TrustManager, X509TrustManager }
import org.http4s._
import org.http4s.blaze.client.BlazeClientBuilder
import org.http4s.client.middleware.{ FollowRedirect, GZip }
import org.typelevel.ci.CIString
import scala.concurrent.duration._
class Http4sClient(
addAcceptGzipByDefault: Boolean,
disableCertificateVerification: Boolean,
followRedirect: Boolean)(implicit ioRuntime: IORuntime)
extends HttpClient {
// Disable JDK built-in checks
private val sslContext = {
if (disableCertificateVerification) {
val ssl = SSLContext.getInstance("SSL")
val byPassTrustManagers = Array[TrustManager](new X509TrustManager() {
override def getAcceptedIssuers: Array[X509Certificate] = Array.empty
override def checkClientTrusted(x509Certificates: Array[X509Certificate], s: String) = ()
override def checkServerTrusted(x509Certificates: Array[X509Certificate], s: String) = ()
})
ssl.init(null, byPassTrustManagers, new SecureRandom)
ssl
} else SSLContext.getDefault
}
// Lives for the duration of the test run
private val uriCache = Caching.buildCache[String, Either[CornichonError, Uri]]()
// Timeouts are managed within the HttpService
private val defaultHighTimeout = Duration.Inf
private val (httpClient, safeShutdown) =
BlazeClientBuilder[IO]
.withSslContext(sslContext)
.withMaxTotalConnections(300)
.withMaxWaitQueueLimit(500)
.withIdleTimeout(2.minute)
.withResponseHeaderTimeout(defaultHighTimeout)
.withRequestTimeout(defaultHighTimeout)
.withRetries(0)
.allocated
.map {
case (client, shutdown) =>
val c1 = if (addAcceptGzipByDefault) GZip()(client) else client
val c2 = if (followRedirect) FollowRedirect(maxRedirects = 10)(client = c1) else c1
c2 -> shutdown
}.unsafeRunSync()
private def toHttp4sMethod(method: HttpMethod): Method = method match {
case GET => org.http4s.Method.GET
case POST => org.http4s.Method.POST
case DELETE => org.http4s.Method.DELETE
case PUT => org.http4s.Method.PUT
case HEAD => org.http4s.Method.HEAD
case OPTIONS => org.http4s.Method.OPTIONS
case PATCH => org.http4s.Method.PATCH
case other => throw CornichonException(s"unsupported HTTP method ${other.name}")
}
private def toHttp4sHeaders(headers: Seq[(String, String)]): List[Header.Raw] =
headers.iterator.map { case (n, v) => Header.Raw(CIString(n), v) }.toList
private def fromHttp4sHeaders(headers: Headers): Seq[(String, String)] =
headers.headers.map(h => (h.name.toString, h.value))
def addQueryParams(uri: Uri, moreParams: Seq[(String, String)]): Uri =
if (moreParams.isEmpty)
uri
else {
val q = Query.fromPairs(moreParams: _*)
// Not sure it is the most efficient way
uri.copy(query = Query.fromVector(uri.query.toVector ++ q.toVector))
}
override def runRequest[A: Show](cReq: HttpRequest[A], t: FiniteDuration)(implicit ee: EntityEncoder[IO, A]): EitherT[IO, CornichonError, HttpResponse] =
parseUri(cReq.url).fold(
e => EitherT.left[HttpResponse](IO.pure(e)),
uri => EitherT {
val req = Request[IO](toHttp4sMethod(cReq.method))
val completeRequest = cReq.body.fold(req)(b => req.withEntity(b))
.putHeaders(toHttp4sHeaders(cReq.headers)) // `withEntity` adds `Content-Type` so we set the headers afterwards to have the possibility to override it
.withUri(addQueryParams(uri, cReq.params))
val cornichonResponse = httpClient.run(completeRequest).use { http4sResp =>
http4sResp
.bodyText
.compile
.string
.map { decodedBody =>
HttpResponse(
status = http4sResp.status.code,
headers = fromHttp4sHeaders(http4sResp.headers),
body = decodedBody
).asRight[CornichonError]
}
}
val timeout = IO.delay(TimeoutErrorAfter(cReq, t).asLeft).delayBy(t)
IO.race(cornichonResponse, timeout)
.map(_.fold(identity, identity))
.handleError { t => RequestError(cReq, t).asLeft }
}
)
private val sseHeader = "text" -> "event-stream"
private def runSSE(streamReq: HttpStreamedRequest, t: FiniteDuration): EitherT[IO, CornichonError, HttpResponse] = {
parseUri(streamReq.url).fold(
e => EitherT.left[HttpResponse](IO.pure(e)),
uri => EitherT {
val req = Request[IO](org.http4s.Method.GET)
.withHeaders(Headers(toHttp4sHeaders(streamReq.addHeaders(sseHeader).headers)))
.withUri(addQueryParams(uri, streamReq.params))
val cornichonResponse = httpClient.run(req).use { http4sResp =>
http4sResp
.body
.through(ServerSentEvent.decoder)
.interruptAfter(streamReq.takeWithin)
.filter(_ != ServerSentEvent.empty) // filter out empty SSE
.compile
.toList
.map { events =>
HttpResponse(
status = http4sResp.status.code,
headers = fromHttp4sHeaders(http4sResp.headers),
body = Json.fromValues(events.iterator.map(_.asJson).toVector).show
).asRight[CornichonError]
}
}
val timeout = IO.delay(TimeoutErrorAfter(streamReq, t).asLeft).delayBy(t)
IO.race(cornichonResponse, timeout)
.map(_.fold(identity, identity))
.handleError { t => RequestError(streamReq, t).asLeft }
}
)
}
def openStream(req: HttpStreamedRequest, t: FiniteDuration): IO[Either[CornichonError, HttpResponse]] =
req.stream match {
case SSE => runSSE(req, t).value
case _ => ??? // TODO implement WS support
}
def shutdown(): IO[Done] =
safeShutdown.map { _ => uriCache.invalidateAll(); Done }
def paramsFromUrl(url: String): Either[CornichonError, List[(String, String)]] =
if (url.contains('?'))
parseUri(url).map(_.params.toList)
else
rightNil
def parseUri(uri: String): Either[CornichonError, Uri] =
uriCache.get(uri, u => Uri.fromString(u).leftMap(e => MalformedUriError(u, e.message)))
} | agourlay/cornichon | cornichon-core/src/main/scala/com/github/agourlay/cornichon/http/client/Http4sClient.scala | Scala | apache-2.0 | 7,045 |
package com.overviewdocs.searchindex
import com.google.common.util.concurrent.ThreadFactoryBuilder
import scala.collection.immutable
import scala.concurrent.{ExecutionContext,Future}
import com.overviewdocs.query.Query
import com.overviewdocs.models.Document
/** Index client that writes directly to Lucene.
*
* Internally, this client uses a MruLuceneIndexCache to keep recently-opened
* files open.
*/
trait LuceneIndexClient extends IndexClient {
/** Thread pool for blocking I/O.
*
* Lucene does synchronous blocking I/O, but a Scala caller wouldn't expect
* that. So you need to build this trait with an ExecutionContext where all
* Lucene operations will take place.
*/
protected implicit val ec: ExecutionContext
/** Open or create an index for a documentSetId we have not seen recently.
*
* A LuceneIndexClient will not call this method twice concurrently for the
* same DocumentSet, nor will it call `.close()` on a DocumentSetLuceneIndex
* while opening one with the same ID.
*
* We assume openIndex() will block. This method will be invoked in a
* `Future()`.
*/
protected def openIndex(documentSetId: Long): DocumentSetLuceneIndex
protected lazy val cache: MruLuceneIndexCache = {
new MruLuceneIndexCache(
loader=documentSetId => openIndex(documentSetId),
executionContext=ec
)
}
protected def getIndex(documentSetId: Long): Future[DocumentSetLuceneIndex] = cache.get(documentSetId)
/** Deletes all indices -- BE CAREFUL!
*
* Useful in unit tests.
*/
override def deleteAllIndices: Future[Unit] = ???
override def addDocumentSet(id: Long): Future[Unit] = {
getIndex(id).map(_ => ())
}
override def removeDocumentSet(id: Long): Future[Unit] = {
getIndex(id).map(_.delete)
}
override def addDocuments(id: Long, documents: immutable.Seq[Document]): Future[Unit] = {
getIndex(id).map(_.addDocuments(documents))
}
override def updateDocuments(id: Long, documents: immutable.Seq[Document]): Future[Unit] = {
getIndex(id).map(_.updateDocuments(documents))
}
override def searchForIds(id: Long, q: Query) = {
getIndex(id).map(_.searchForIds(q))
}
override def highlight(documentSetId: Long, documentId: Long, q: Query) = {
getIndex(documentSetId).map(_.highlight(documentId, q))
}
override def highlights(documentSetId: Long, documentIds: immutable.Seq[Long], q: Query) = {
getIndex(documentSetId).map(_.highlights(documentIds, q))
}
override def refresh(documentSetId: Long): Future[Unit] = {
getIndex(documentSetId).map(_.refresh)
}
}
object LuceneIndexClient {
def createInMemoryLuceneIndexClient(implicit executionContext: ExecutionContext): LuceneIndexClient = {
new LuceneIndexClient {
override protected implicit val ec: ExecutionContext = executionContext
override protected def openIndex(documentSetId: Long) = {
new DocumentSetLuceneIndex(documentSetId, new org.apache.lucene.store.RAMDirectory)
}
}
}
lazy val onDiskSingleton: LuceneIndexClient = new LuceneIndexClient {
import com.typesafe.config.ConfigFactory
import java.nio.file.Paths
private val baseDirectory = ConfigFactory.load().getString("search.baseDirectory")
private val threadPool = java.util.concurrent.Executors.newFixedThreadPool(
Seq(4, Runtime.getRuntime.availableProcessors).min,
new ThreadFactoryBuilder().setNameFormat("lucene-on-disk-pool-%d").build
)
override protected implicit val ec: ExecutionContext = ExecutionContext.fromExecutorService(threadPool)
override protected def openIndex(documentSetId: Long) = {
val rootPath = Paths.get(baseDirectory, documentSetId.toString)
val lockFactory = new org.apache.lucene.store.SingleInstanceLockFactory
val directory = org.apache.lucene.store.FSDirectory.open(rootPath, lockFactory)
val ret = new DocumentSetLuceneIndex(documentSetId, directory)
// If the directory does not exist, that's fine. We special-case empty
// indexes.
ret
}
}
}
| overview/overview-server | worker/src/main/scala/com/overviewdocs/searchindex/LuceneIndexClient.scala | Scala | agpl-3.0 | 4,095 |
package com.artclod.mathml.scalar.apply
import org.junit.runner.RunWith
import play.api.test._
import play.api.test.Helpers._
import org.specs2.mutable._
import com.artclod.mathml._
import com.artclod.mathml.scalar._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import math.E
// LATER try out http://rlegendi.github.io/specs2-runner/ and remove RunWith
@RunWith(classOf[JUnitRunner])
class ApplyRootSpec extends Specification {
"eval" should {
"do nth root" in {
ApplyRoot(3, 8).eval().get must beEqualTo(2)
}
}
"variables" should {
"be empty if elements are constant" in {
ApplyRoot(3, 8).variables must beEmpty
}
"be x if an element constains an x" in {
ApplyRoot(5, x).variables must beEqualTo(Set("x"))
}
}
"c" should {
"return correct log" in {
ApplyRoot(3, 8).c.get must beEqualTo(`2`)
}
"fail if not a constant " in {
ApplyRoot(5, x).c must beEmpty
}
}
"s" should {
"return constant if value is constant" in {
ApplyRoot(4, 16).s must beEqualTo(`2`)
}
"remain unchanged if nothing can be simplified" in {
ApplyRoot(5, x).s must beEqualTo(ApplyRoot(5, x))
}
}
"d" should {
"obey the derivative rule for sqrt" in {
ApplyRoot(2, F).dx must beEqualTo(Fdx / (2 * √(F)))
}
"obey the derivative rule for arbitrary degree" in {
ApplyRoot(5, F).dx must beEqualTo(Fdx / (5 * `n√`(5d / (5-1))(F)))
}
}
"toText" should {
"handle nthRoot" in {
ApplyRoot(3, 5).toMathJS must beEqualTo("nthRoot(5, 3)")
}
}
} | kristiankime/web-education-games | test/com/artclod/mathml/scalar/apply/ApplyRootSpec.scala | Scala | mit | 1,521 |
package repository.hashmap
import java.util.concurrent.ConcurrentHashMap
import model.Id
import model.Identifiable
import scala.collection.JavaConversions._
import model.Id.toEstateId
import model.Id.toLong
import repository.Repository
import scala.Option.option2Iterable
abstract class HashMapRepository[T <: Identifiable] extends Repository[T] {
val repository: ConcurrentHashMap[Long, Option[T]] = new ConcurrentHashMap
override def save(entity: T): T = {
def getNewId(): Long = repository.size + 1
val newId: Id = entity.id.getOrElse(getNewId)
val result = entity.updateId(newId).asInstanceOf[T]
repository.put(newId, Some(result))
result
}
override def clear() {
repository.clear
}
override def delete(entity: T) =
entity.id match {
case Some(id) => repository.put(id, None)
case None =>
}
override def deleteById(id: Id) = repository.remove(id.id)
override def findAll(offset: Int = 0, limit: Int = 0): List[T] = {
repository.flatMap(item => item._2).toList
}
} | tiborbotos/domino | domino-crawler/src/main/scala/repository/hashmap/HashMapRepository.scala | Scala | lgpl-3.0 | 1,045 |
package com.codility.lessons
import com.codility.SolutionTest
class CountDivTest extends SolutionTest(CountDiv, Seq(
(6, 11, 2) -> 3,
(1,4,3) -> 1,
(2,5,3) -> 1,
(3,6,3) -> 2,
(1,5,3) -> 1,
(2,6,3) -> 2,
(0,0,11) -> 1,
(11,11,11) -> 1,
(0, 2000000000, 1) -> 2000000001,
(0, 2000000000, 2000000000) -> 2
))
// 1, 2, 3 , 4, 5, 6, 7 | kastoestoramadus/codility-scala-helpers | src/test/scala/com/codility/lessons/CountDivTest.scala | Scala | gpl-3.0 | 351 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package xml
import scala.collection.Seq
/**
* Unprefixed attributes have the null namespace, and no prefix field
*
* @author Burak Emir
*/
class UnprefixedAttribute(
val key: String,
val value: Seq[Node],
next1: MetaData)
extends Attribute {
final val pre = null
val next = if (value ne null) next1 else next1.remove(key)
/** same as this(key, Text(value), next), or no attribute if value is null */
def this(key: String, value: String, next: MetaData) =
this(key, if (value ne null) Text(value) else null: NodeSeq, next)
/** same as this(key, value.get, next), or no attribute if value is None */
def this(key: String, value: Option[Seq[Node]], next: MetaData) =
this(key, value.orNull, next)
/** returns a copy of this unprefixed attribute with the given next field*/
def copy(next: MetaData) = new UnprefixedAttribute(key, value, next)
final def getNamespace(owner: Node): String = null
/**
* Gets value of unqualified (unprefixed) attribute with given key, null if not found
*
* @param key
* @return value as Seq[Node] if key is found, null otherwise
*/
def apply(key: String): Seq[Node] =
if (key == this.key) value else next(key)
/**
* Forwards the call to next (because caller looks for prefixed attribute).
*
* @param namespace
* @param scope
* @param key
* @return ..
*/
def apply(namespace: String, scope: NamespaceBinding, key: String): Seq[Node] =
next(namespace, scope, key)
}
object UnprefixedAttribute {
def unapply(x: UnprefixedAttribute) = Some((x.key, x.value, x.next))
}
| scala/scala-xml | shared/src/main/scala/scala/xml/UnprefixedAttribute.scala | Scala | apache-2.0 | 1,899 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.utils.benchmark
import scala.util.{Failure, Success, Try}
/**
* This is a service responsible for termination of run process (which have been started during test execution).
* It is being called to terminate processes using java.lang.System.exit() method terminating the currently running
* Java virtual machine.
*
* @author Pavel Tomskikh
*/
object ProcessTerminator {
/**
* Terminates the current process after executing the method
*
* @param f method after which the current process will be terminated
*/
def terminateProcessAfter(f: () => Unit): Unit = {
val exitCode = Try(f()) match {
case Success(_) => 0
case Failure(e) =>
e.printStackTrace()
1
}
System.exit(exitCode)
}
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/utils/benchmark/ProcessTerminator.scala | Scala | apache-2.0 | 1,594 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.tree.impl.{GradientBoostedTrees, TreeTests}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.regression.{LabeledPoint => OldLabeledPoint}
import org.apache.spark.mllib.tree.{EnsembleTestHelper, GradientBoostedTrees => OldGBT}
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.functions.lit
import org.apache.spark.util.Utils
/**
* Test suite for [[GBTRegressor]].
*/
class GBTRegressorSuite extends MLTest with DefaultReadWriteTest {
import GBTRegressorSuite.compareAPIs
import testImplicits._
// Combinations for estimators, learning rates and subsamplingRate
private val testCombinations =
Array((10, 1.0, 1.0), (10, 0.1, 1.0), (10, 0.5, 0.75), (10, 0.1, 0.75))
private var data: RDD[LabeledPoint] = _
private var trainData: RDD[LabeledPoint] = _
private var validationData: RDD[LabeledPoint] = _
override def beforeAll() {
super.beforeAll()
data = sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 10, 100), 2)
.map(_.asML)
trainData =
sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 20, 120), 2)
.map(_.asML)
validationData =
sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 20, 80), 2)
.map(_.asML)
}
test("Regression with continuous features") {
val categoricalFeatures = Map.empty[Int, Int]
GBTRegressor.supportedLossTypes.foreach { loss =>
testCombinations.foreach {
case (maxIter, learningRate, subsamplingRate) =>
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setSubsamplingRate(subsamplingRate)
.setLossType(loss)
.setMaxIter(maxIter)
.setStepSize(learningRate)
.setSeed(123)
compareAPIs(data, None, gbt, categoricalFeatures)
}
}
}
test("GBTRegressor behaves reasonably on toy data") {
val df = Seq(
LabeledPoint(10, Vectors.dense(1, 2, 3, 4)),
LabeledPoint(-5, Vectors.dense(6, 3, 2, 1)),
LabeledPoint(11, Vectors.dense(2, 2, 3, 4)),
LabeledPoint(-6, Vectors.dense(6, 4, 2, 1)),
LabeledPoint(9, Vectors.dense(1, 2, 6, 4)),
LabeledPoint(-4, Vectors.dense(6, 3, 2, 2))
).toDF()
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setMaxIter(2)
val model = gbt.fit(df)
MLTestingUtils.checkCopyAndUids(gbt, model)
testTransformerByGlobalCheckFunc[(Double, Vector)](df, model, "prediction") {
case rows: Seq[Row] =>
val predictions = rows.map(_.getDouble(0))
// Checks based on SPARK-8736 (to ensure it is not doing classification)
assert(predictions.max > 2)
assert(predictions.min < -1)
}
}
test("prediction on single instance") {
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setMaxIter(2)
val model = gbt.fit(trainData.toDF())
testPredictionModelSinglePrediction(model, validationData.toDF)
}
test("Checkpointing") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
sc.setCheckpointDir(path)
val df = data.toDF()
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setMaxIter(5)
.setStepSize(0.1)
.setCheckpointInterval(2)
.setSeed(123)
val model = gbt.fit(df)
sc.checkpointDir = None
Utils.deleteRecursively(tempDir)
}
test("should support all NumericType labels and not support other types") {
val gbt = new GBTRegressor().setMaxDepth(1)
MLTestingUtils.checkNumericTypes[GBTRegressionModel, GBTRegressor](
gbt, spark, isClassification = false) { (expected, actual) =>
TreeTests.checkEqual(expected, actual)
}
}
// TODO: Reinstate test once runWithValidation is implemented SPARK-7132
/*
test("runWithValidation stops early and performs better on a validation dataset") {
val categoricalFeatures = Map.empty[Int, Int]
// Set maxIter large enough so that it stops early.
val maxIter = 20
GBTRegressor.supportedLossTypes.foreach { loss =>
val gbt = new GBTRegressor()
.setMaxIter(maxIter)
.setMaxDepth(2)
.setLossType(loss)
.setValidationTol(0.0)
compareAPIs(trainData, None, gbt, categoricalFeatures)
compareAPIs(trainData, Some(validationData), gbt, categoricalFeatures)
}
}
*/
/////////////////////////////////////////////////////////////////////////////
// Tests of feature importance
/////////////////////////////////////////////////////////////////////////////
test("Feature importance with toy data") {
val gbt = new GBTRegressor()
.setMaxDepth(3)
.setMaxIter(5)
.setSubsamplingRate(1.0)
.setStepSize(0.5)
.setSeed(123)
// In this data, feature 1 is very important.
val data: RDD[LabeledPoint] = TreeTests.featureImportanceData(sc)
val categoricalFeatures = Map.empty[Int, Int]
val df: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0)
val importances = gbt.fit(df).featureImportances
val mostImportantFeature = importances.argmax
assert(mostImportantFeature === 1)
assert(importances.toArray.sum === 1.0)
assert(importances.toArray.forall(_ >= 0.0))
}
/////////////////////////////////////////////////////////////////////////////
// Tests of feature subset strategy
/////////////////////////////////////////////////////////////////////////////
test("Tests of feature subset strategy") {
val numClasses = 2
val gbt = new GBTRegressor()
.setMaxDepth(3)
.setMaxIter(5)
.setSeed(123)
.setFeatureSubsetStrategy("all")
// In this data, feature 1 is very important.
val data: RDD[LabeledPoint] = TreeTests.featureImportanceData(sc)
val categoricalFeatures = Map.empty[Int, Int]
val df: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, numClasses)
val importances = gbt.fit(df).featureImportances
val mostImportantFeature = importances.argmax
assert(mostImportantFeature === 1)
// GBT with different featureSubsetStrategy
val gbtWithFeatureSubset = gbt.setFeatureSubsetStrategy("1")
val importanceFeatures = gbtWithFeatureSubset.fit(df).featureImportances
val mostIF = importanceFeatures.argmax
assert(mostImportantFeature !== mostIF)
}
test("model evaluateEachIteration") {
for (lossType <- GBTRegressor.supportedLossTypes) {
val gbt = new GBTRegressor()
.setSeed(1L)
.setMaxDepth(2)
.setMaxIter(3)
.setLossType(lossType)
val model3 = gbt.fit(trainData.toDF)
val model1 = new GBTRegressionModel("gbt-reg-model-test1",
model3.trees.take(1), model3.treeWeights.take(1), model3.numFeatures)
val model2 = new GBTRegressionModel("gbt-reg-model-test2",
model3.trees.take(2), model3.treeWeights.take(2), model3.numFeatures)
for (evalLossType <- GBTRegressor.supportedLossTypes) {
val evalArr = model3.evaluateEachIteration(validationData.toDF, evalLossType)
val lossErr1 = GradientBoostedTrees.computeError(validationData,
model1.trees, model1.treeWeights, model1.convertToOldLossType(evalLossType))
val lossErr2 = GradientBoostedTrees.computeError(validationData,
model2.trees, model2.treeWeights, model2.convertToOldLossType(evalLossType))
val lossErr3 = GradientBoostedTrees.computeError(validationData,
model3.trees, model3.treeWeights, model3.convertToOldLossType(evalLossType))
assert(evalArr(0) ~== lossErr1 relTol 1E-3)
assert(evalArr(1) ~== lossErr2 relTol 1E-3)
assert(evalArr(2) ~== lossErr3 relTol 1E-3)
}
}
}
test("runWithValidation stops early and performs better on a validation dataset") {
val validationIndicatorCol = "validationIndicator"
val trainDF = trainData.toDF().withColumn(validationIndicatorCol, lit(false))
val validationDF = validationData.toDF().withColumn(validationIndicatorCol, lit(true))
val numIter = 20
for (lossType <- GBTRegressor.supportedLossTypes) {
val gbt = new GBTRegressor()
.setSeed(123)
.setMaxDepth(2)
.setLossType(lossType)
.setMaxIter(numIter)
val modelWithoutValidation = gbt.fit(trainDF)
gbt.setValidationIndicatorCol(validationIndicatorCol)
val modelWithValidation = gbt.fit(trainDF.union(validationDF))
assert(modelWithoutValidation.numTrees === numIter)
// early stop
assert(modelWithValidation.numTrees < numIter)
val errorWithoutValidation = GradientBoostedTrees.computeError(validationData,
modelWithoutValidation.trees, modelWithoutValidation.treeWeights,
modelWithoutValidation.getOldLossType)
val errorWithValidation = GradientBoostedTrees.computeError(validationData,
modelWithValidation.trees, modelWithValidation.treeWeights,
modelWithValidation.getOldLossType)
assert(errorWithValidation < errorWithoutValidation)
val evaluationArray = GradientBoostedTrees
.evaluateEachIteration(validationData, modelWithoutValidation.trees,
modelWithoutValidation.treeWeights, modelWithoutValidation.getOldLossType,
OldAlgo.Regression)
assert(evaluationArray.length === numIter)
assert(evaluationArray(modelWithValidation.numTrees) >
evaluationArray(modelWithValidation.numTrees - 1))
var i = 1
while (i < modelWithValidation.numTrees) {
assert(evaluationArray(i) <= evaluationArray(i - 1))
i += 1
}
}
}
/////////////////////////////////////////////////////////////////////////////
// Tests of model save/load
/////////////////////////////////////////////////////////////////////////////
test("model save/load") {
def checkModelData(
model: GBTRegressionModel,
model2: GBTRegressionModel): Unit = {
TreeTests.checkEqual(model, model2)
assert(model.numFeatures === model2.numFeatures)
}
val gbt = new GBTRegressor()
val rdd = TreeTests.getTreeReadWriteData(sc)
val allParamSettings = TreeTests.allParamSettings ++ Map("lossType" -> "squared")
val continuousData: DataFrame =
TreeTests.setMetadata(rdd, Map.empty[Int, Int], numClasses = 0)
testEstimatorAndModelReadWrite(gbt, continuousData, allParamSettings,
allParamSettings, checkModelData)
}
}
private object GBTRegressorSuite extends SparkFunSuite {
/**
* Train 2 models on the given dataset, one using the old API and one using the new API.
* Convert the old model to the new format, compare them, and fail if they are not exactly equal.
*/
def compareAPIs(
data: RDD[LabeledPoint],
validationData: Option[RDD[LabeledPoint]],
gbt: GBTRegressor,
categoricalFeatures: Map[Int, Int]): Unit = {
val numFeatures = data.first().features.size
val oldBoostingStrategy = gbt.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Regression)
val oldGBT = new OldGBT(oldBoostingStrategy, gbt.getSeed.toInt)
val oldModel = oldGBT.run(data.map(OldLabeledPoint.fromML))
val newData: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, numClasses = 0)
val newModel = gbt.fit(newData)
// Use parent from newTree since this is not checked anyways.
val oldModelAsNew = GBTRegressionModel.fromOld(
oldModel, newModel.parent.asInstanceOf[GBTRegressor], categoricalFeatures, numFeatures)
TreeTests.checkEqual(oldModelAsNew, newModel)
assert(newModel.numFeatures === numFeatures)
assert(oldModelAsNew.numFeatures === numFeatures)
}
}
| tejasapatil/spark | mllib/src/test/scala/org/apache/spark/ml/regression/GBTRegressorSuite.scala | Scala | apache-2.0 | 12,808 |
/*
* Copyright 2017-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package higherkindness.mu.rpc
package server
import cats.~>
import cats.effect.{Async, Resource, Sync}
import cats.syntax.flatMap._
import cats.syntax.functor._
import io.grpc.{Server, ServerBuilder, ServerServiceDefinition}
import io.grpc.netty.NettyServerBuilder
import scala.concurrent.duration.TimeUnit
trait GrpcServer[F[_]] { self =>
def start(): F[Unit]
def getPort: F[Int]
def getServices: F[List[ServerServiceDefinition]]
def getImmutableServices: F[List[ServerServiceDefinition]]
def getMutableServices: F[List[ServerServiceDefinition]]
def shutdown(): F[Unit]
def shutdownNow(): F[Unit]
def isShutdown: F[Boolean]
def isTerminated: F[Boolean]
def awaitTerminationTimeout(timeout: Long, unit: TimeUnit): F[Boolean]
def awaitTermination(): F[Unit]
def mapK[G[_]](fk: F ~> G): GrpcServer[G] =
new GrpcServer[G] {
def start(): G[Unit] = fk(self.start())
def getPort: G[Int] = fk(self.getPort)
def getServices: G[List[ServerServiceDefinition]] = fk(self.getServices)
def getImmutableServices: G[List[ServerServiceDefinition]] = fk(self.getImmutableServices)
def getMutableServices: G[List[ServerServiceDefinition]] = fk(self.getMutableServices)
def shutdown(): G[Unit] = fk(self.shutdown())
def shutdownNow(): G[Unit] = fk(self.shutdownNow())
def isShutdown: G[Boolean] = fk(self.isShutdown)
def isTerminated: G[Boolean] = fk(self.isTerminated)
def awaitTerminationTimeout(timeout: Long, unit: TimeUnit): G[Boolean] =
fk(self.awaitTerminationTimeout(timeout, unit))
def awaitTermination(): G[Unit] = fk(self.awaitTermination())
}
}
object GrpcServer {
/**
* Build a Resource that starts the given [[GrpcServer]] before use,
* and shuts it down afterwards.
*/
def serverResource[F[_]](S: GrpcServer[F])(implicit F: Async[F]): Resource[F, Unit] =
Resource.make(S.start())(_ => S.shutdown() >> S.awaitTermination())
/**
* Start the given server and keep it running forever.
*/
def server[F[_]](S: GrpcServer[F])(implicit F: Async[F]): F[Unit] =
serverResource[F](S).use(_ => F.never[Unit])
/**
* Build a [[GrpcServer]] that uses the default network transport layer.
*
* The transport layer will be Netty, unless you have written your own
* `io.grpc.ServerProvider` implementation and added it to the classpath.
*/
def default[F[_]](port: Int, configList: List[GrpcConfig])(implicit
F: Sync[F]
): F[GrpcServer[F]] =
F.delay(buildServer(ServerBuilder.forPort(port), configList)).map(fromServer[F])
/**
* Build a [[GrpcServer]] that uses the Netty network transport layer.
*/
def netty[F[_]](port: Int, configList: List[GrpcConfig])(implicit F: Sync[F]): F[GrpcServer[F]] =
netty(ChannelForPort(port), configList)
/**
* Build a [[GrpcServer]] that uses the Netty network transport layer.
*/
def netty[F[_]](channelFor: ChannelFor, configList: List[GrpcConfig])(implicit
F: Sync[F]
): F[GrpcServer[F]] =
for {
builder <- F.delay(nettyBuilder(channelFor))
server <- F.delay(buildNettyServer(builder, configList))
} yield fromServer[F](server)
/**
* Helper to convert an `io.grpc.Server` into a [[GrpcServer]].
*/
def fromServer[F[_]: Sync](server: Server): GrpcServer[F] =
handlers.GrpcServerHandler[F].mapK(λ[GrpcServerOps[F, *] ~> F](_.run(server)))
private[this] def buildServer(
bldr: ServerBuilder[SB] forSome { type SB <: ServerBuilder[SB] },
configList: List[GrpcConfig]
): Server = {
configList
.foldLeft(bldr)((bldr, cfg) => SBuilder(bldr)(cfg))
.build()
}
private[this] def buildNettyServer(
bldr: NettyServerBuilder,
configList: List[GrpcConfig]
): Server = {
configList
.foldLeft(bldr)((bldr, cfg) => (SBuilder(bldr) orElse NettySBuilder(bldr))(cfg))
.build()
}
private[this] def nettyBuilder(initConfig: ChannelFor): NettyServerBuilder =
initConfig match {
case ChannelForPort(port) => NettyServerBuilder.forPort(port)
case ChannelForSocketAddress(sa) => NettyServerBuilder.forAddress(sa)
case e =>
throw new IllegalArgumentException(s"ManagedChannel not supported for $e")
}
}
| frees-io/freestyle-rpc | modules/server/src/main/scala/higherkindness/mu/rpc/server/GrpcServer.scala | Scala | apache-2.0 | 4,907 |
package domino
import java.util.{ Dictionary, Hashtable, Vector }
import scala.collection.JavaConverters._
import scala.reflect.runtime.universe._
import org.osgi.framework.BundleContext
import org.osgi.framework.Constants
/**
* Contains utility methods used throughout Domino.
*/
object DominoUtil {
/**
* The OSGi service property key for saving the generic types expression.
*/
val GenericsExpressionKey = "completeTypesExpression"
/**
* Converts the given Scala Map to a Java Dictionary.
*/
def convertToDictionary(map: Map[String, Any]): Dictionary[String, AnyRef] = {
val table = new Hashtable[String, AnyRef]
map.foreach {
case (key, value) =>
table.put(key, value.asInstanceOf[AnyRef])
}
table
}
/**
* Converts the given Java Dictionary to a Scala Map.
*/
def convertToMap(dictionary: Dictionary[_, _]): Map[String, Any] = {
val map = new collection.mutable.HashMap[String, Any]
dictionary.keys.asScala.foreach { key =>
val jValue = dictionary.get(key)
val value = jValue match {
case v: Vector[_] => v.asScala.toList
case a: Array[_] => a.toList
case _ => jValue
}
map(key.asInstanceOf[String]) = value.asInstanceOf[Any]
}
map.toMap
}
/**
* Creates an expression which describes the complete type information of the given types
* including generic type parameters. If none of the given types contains type parameters,
* it returns `None`.
*
* The resulting expression is supposed to be registered as OSGi service property so it can be
* used as a criteria in a service query.
*
* Example input:
* - `Map[String, Map[String, Integer]]`
* - `List[Number]`
* - `String`
*
* Example result: `";Map[String, Map[String, Integer]];List[Number];String;"` (package names omitted)
*
* @note A semicolon is used instead of a comma to separate the types.
*
* @param types Type objects which might contain information about generic type arguments
* @return types expression if generic type parameters are used
*/
def createGenericsExpression(types: Traversable[Type]): Option[String] = {
if (types exists { hasTypeArguments }) {
val sep = ";"
Some(types.mkString(sep, sep, sep))
} else {
None
}
}
/**
* Returns whether the given type tag has type parameters.
*/
def hasTypeArguments(tpe: Type): Boolean = {
!tpe.asInstanceOf[TypeRefApi].args.isEmpty
}
/**
* Returns the qualified name of the given type.
*/
def getFullTypeName(tpe: Type): String = {
tpe.asInstanceOf[TypeRefApi].typeSymbol.fullName
}
/**
* Creates a filter expression that would match the given type with its generic type parameters.
* Uses wildcards because the service can be registered under several types. That would result
* in several generic type expressions separated by semicolon.
*
* If no generic type is used in the type, returns `None`.
*/
def createGenericsFilter(tpe: Type): Option[String] = {
val expression = createGenericsExpression(List(tpe))
expression match {
case Some(e) =>
if (e.isEmpty) {
None
} else {
Some("(" + GenericsExpressionKey + "=*" + e + "*)")
}
case None =>
None
}
}
/**
* Creates a filter criteria expression which matches the given type and the given custom filter.
* Doesn't include the main `OBJECTCLASS` filter criteria. If no custom filter is given and
* generic types are not used, returns `None`.
*
* @param tpe Type information
* @param customFilter A custom filter expression
*/
def createGenericsAndCustomFilter(tpe: Type, customFilter: String): Option[String] = {
// Create the generic type filter criteria
val completeTypeExpressionFilter = createGenericsFilter(tpe)
// Link it with the custom filter
DominoUtil.linkFiltersWithAnd(completeTypeExpressionFilter, Option(customFilter))
}
/**
* Creates a filter criteria expression which matches the given main type, the generic type and the given custom filter.
* Thus, it includes the main `OBJECTCLASS` filter criteria.
*
* @param tpe Type information
* @param customFilter A custom filter expression
*/
def createCompleteFilter(tpe: Type, customFilter: String): String = {
// Create object class and generics and custom filter
val className = getFullTypeName(tpe)
val objectClassFilter = createObjectClassFilter(className)
val genericsAndCustomFilter = createGenericsAndCustomFilter(tpe, customFilter)
// Combine
DominoUtil.linkFiltersWithAnd(Some(objectClassFilter), genericsAndCustomFilter).get
}
/**
* Creates an `OBJECTCLASS` filter for the given class.
*/
def createObjectClassFilter(typeName: String): String = {
// FIXME: check if tpeName is correct. Shouldn't it be the class name?
s"(${Constants.OBJECTCLASS}=$typeName)"
}
/**
* Links to filter expressions with a logical AND if both are given, otherwise returns just one of it.
*
* @param filterOne First filter
* @param filterTwo Second filter
* @return result
*/
def linkFiltersWithAnd(filterOne: Option[String], filterTwo: Option[String]): Option[String] = {
// TODO Do this more elegantly
filterOne match {
case Some(f1) =>
filterTwo match {
case Some(f2) =>
Some("(&" + f1 + f2 + ")")
case None =>
filterOne
}
case None =>
filterTwo
}
}
def dumpBundle(context: BundleContext): String = {
val bundle = context.getBundle()
s"${bundle.getSymbolicName()}[${bundle.getBundleId()}]"
}
}
| domino-osgi/domino | src/main/scala/domino/DominoUtil.scala | Scala | mit | 5,726 |
package ai.verta.client.entities
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import ai.verta.client.entities.subobjects.Tags
import ai.verta.swagger._public.modeldb.model.{ModeldbAddExperimentTags, ModeldbCreateExperimentRun, ModeldbDeleteExperimentTags, ModeldbExperiment}
import ai.verta.swagger.client.ClientSet
import scala.concurrent.ExecutionContext
import scala.util.Try
class Experiment(val clientSet: ClientSet, val proj: Project, val expt: ModeldbExperiment) extends Taggable {
def getOrCreateExperimentRun(name: String = "")(implicit ec: ExecutionContext) = {
val internalName = if (name == "") LocalDateTime.now.format(DateTimeFormatter.ofPattern("YYYY/MM/dd HH:mm:ss.SSSS")) else name
GetOrCreateEntity.getOrCreate[ExperimentRun](
get = () => {
clientSet.experimentRunService.ExperimentRunService_getExperimentRunByName(Some(internalName), expt.id)
.map(r => if (r.experiment_run.isEmpty) null else new ExperimentRun(clientSet, this, r.experiment_run.get))
},
create = () => {
clientSet.experimentRunService.ExperimentRunService_createExperimentRun(ModeldbCreateExperimentRun(
name = Some(internalName),
experiment_id = expt.id,
project_id = proj.proj.id // TODO: remove since we can get from the experiment
))
.map(r => if (r.experiment_run.isEmpty) null else new ExperimentRun(clientSet, this, r.experiment_run.get))
}
)
}
def tags()(implicit ec: ExecutionContext) = new Tags(clientSet, ec, this)
override def getTags()(implicit ec: ExecutionContext): Try[List[String]] = {
clientSet.experimentService.ExperimentService_getExperimentTags(expt.id)
.map(r => r.tags.getOrElse(Nil))
}
override def delTags(tags: List[String])(implicit ec: ExecutionContext): Try[Unit] = {
clientSet.experimentService.ExperimentService_deleteExperimentTags(ModeldbDeleteExperimentTags(
id = expt.id,
tags = Some(tags)
))
.map(_ => {})
}
override def addTags(tags: List[String])(implicit ec: ExecutionContext): Try[Unit] = {
clientSet.experimentService.ExperimentService_addExperimentTags(ModeldbAddExperimentTags(
id = expt.id,
tags = Some(tags)
))
.map(_ => {})
}
}
| mitdbg/modeldb | client/scala/src/main/scala/ai/verta/client/entities/Experiment.scala | Scala | mit | 2,289 |
package org.firedancer3d.scenegraph.geometry
class Pyramid {
} | cyberthinkers/FireDancer3D | firedancer3d_shared/src/main/scala/org/firedancer3d/scenegraph/geometry/Pyramid.scala | Scala | mit | 68 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.nio._
import java.nio.channels._
import java.util.concurrent.atomic._
import java.lang.management._
import java.util.zip.CRC32
import org.apache.log4j.Logger
import javax.management._
import java.util.Properties
import scala.collection._
import scala.collection.mutable
/**
* Helper functions!
*/
object Utils {
private val logger = Logger.getLogger(getClass())
/**
* Wrap the given function in a java.lang.Runnable
* @param fun A function
* @return A Runnable that just executes the function
*/
def runnable(fun: () => Unit): Runnable =
new Runnable() {
def run() = fun()
}
/**
* Wrap the given function in a java.lang.Runnable that logs any errors encountered
* @param fun A function
* @return A Runnable that just executes the function
*/
def loggedRunnable(fun: () => Unit): Runnable =
new Runnable() {
def run() = {
try {
fun()
}
catch {
case t =>
// log any error and the stack trace
logger.error(t, t)
logger.error(stackTrace(t), t)
}
}
}
/**
* Create a daemon thread
* @param name The name of the thread
* @param runnable The runnable to execute in the background
* @return The unstarted thread
*/
def daemonThread(name: String, runnable: Runnable): Thread =
newThread(name, runnable, true)
/**
* Create a daemon thread
* @param name The name of the thread
* @param fun The runction to execute in the thread
* @return The unstarted thread
*/
def daemonThread(name: String, fun: () => Unit): Thread =
daemonThread(name, runnable(fun))
/**
* Create a new thread
* @param name The name of the thread
* @param runnable The work for the thread to do
* @param daemon Should the thread block JVM shutdown?
* @return The unstarted thread
*/
def newThread(name: String, runnable: Runnable, daemon: Boolean): Thread = {
val thread = new Thread(runnable, name)
thread.setDaemon(daemon)
thread
}
/**
* Read a byte array from the given offset and size in the buffer
* TODO: Should use System.arraycopy
*/
def readBytes(buffer: ByteBuffer, offset: Int, size: Int): Array[Byte] = {
val bytes = new Array[Byte](size)
var i = 0
while(i < size) {
bytes(i) = buffer.get(offset + i)
i += 1
}
bytes
}
/**
* Read size prefixed string where the size is stored as a 2 byte short.
* @param buffer The buffer to read from
* @param encoding The encoding in which to read the string
*/
def readShortString(buffer: ByteBuffer, encoding: String): String = {
val size: Int = buffer.getShort()
if(size < 0)
return null
val bytes = new Array[Byte](size)
buffer.get(bytes)
new String(bytes, encoding)
}
/**
* Write a size prefixed string where the size is stored as a 2 byte short
* @param buffer The buffer to write to
* @param string The string to write
* @param encoding The encoding in which to write the string
*/
def writeShortString(buffer: ByteBuffer, string: String, encoding: String): Unit = {
if(string == null) {
buffer.putShort(-1)
} else if(string.length > Short.MaxValue) {
throw new IllegalArgumentException("String exceeds the maximum size of " + Short.MaxValue + ".")
} else {
buffer.putShort(string.length.asInstanceOf[Short])
buffer.put(string.getBytes(encoding))
}
}
/**
* Read a properties file from the given path
* @param filename The path of the file to read
*/
def loadProps(filename: String): Properties = {
val propStream = new FileInputStream(filename)
val props = new Properties()
props.load(propStream)
props
}
/**
* Read a required integer property value or throw an exception if no such property is found
*/
def getInt(props: Properties, name: String): Int = {
if(props.containsKey(name))
return getInt(props, name, -1)
else
throw new IllegalArgumentException("Missing required property '" + name + "'")
}
/**
* Read an integer from the properties instance
* @param props The properties to read from
* @param name The property name
* @param default The default value to use if the property is not found
* @return the integer value
*/
def getInt(props: Properties, name: String, default: Int): Int =
getIntInRange(props, name, default, (Int.MinValue, Int.MaxValue))
/**
* Read an integer from the properties instance. Throw an exception
* if the value is not in the given range (inclusive)
* @param props The properties to read from
* @param name The property name
* @param default The default value to use if the property is not found
* @param range The range in which the value must fall (inclusive)
* @throws IllegalArgumentException If the value is not in the given range
* @return the integer value
*/
def getIntInRange(props: Properties, name: String, default: Int, range: (Int, Int)): Int = {
val v =
if(props.containsKey(name))
props.getProperty(name).toInt
else
default
if(v < range._1 || v > range._2)
throw new IllegalArgumentException(name + " has value " + v + " which is not in the range " + range + ".")
else
v
}
/**
* Read a boolean value from the properties instance
* @param props The properties to read from
* @param name The property name
* @param default The default value to use if the property is not found
* @return the boolean value
*/
def getBoolean(props: Properties, name: String, default: Boolean): Boolean = {
if(!props.containsKey(name))
default
else if("true" == props.getProperty(name))
true
else if("false" == props.getProperty(name))
false
else
throw new IllegalArgumentException("Unacceptable value for property '" + name + "', boolean values must be either 'true' or 'false" )
}
/**
* Get a string property, or, if no such property is defined, return the given default value
*/
def getString(props: Properties, name: String, default: String): String = {
if(props.containsKey(name))
props.getProperty(name)
else
default
}
/**
* Get a string property or throw and exception if no such property is defined.
*/
def getString(props: Properties, name: String): String = {
if(props.containsKey(name))
props.getProperty(name)
else
throw new IllegalArgumentException("Missing required property '" + name + "'")
}
/**
* Get a property of type java.util.Properties or throw and exception if no such property is defined.
*/
def getProps(props: Properties, name: String): Properties = {
if(props.containsKey(name)) {
val propString = props.getProperty(name)
val propValues = propString.split(",")
val properties = new Properties
for(i <- 0 until propValues.length) {
val prop = propValues(i).split("=")
if(prop.length != 2)
throw new IllegalArgumentException("Illegal format of specifying properties '" + propValues(i) + "'")
properties.put(prop(0), prop(1))
}
properties
}
else
throw new IllegalArgumentException("Missing required property '" + name + "'")
}
/**
* Get a property of type java.util.Properties or return the default if no such property is defined
*/
def getProps(props: Properties, name: String, default: Properties): Properties = {
if(props.containsKey(name)) {
val propString = props.getProperty(name)
val propValues = propString.split(",")
if(propValues.length < 1)
throw new IllegalArgumentException("Illegal format of specifying properties '" + propString + "'")
val properties = new Properties
for(i <- 0 until propValues.length) {
val prop = propValues(i).split("=")
if(prop.length != 2)
throw new IllegalArgumentException("Illegal format of specifying properties '" + propValues(i) + "'")
properties.put(prop(0), prop(1))
}
properties
}
else
default
}
/**
* Open a channel for the given file
*/
def openChannel(file: File, mutable: Boolean): FileChannel = {
if(mutable)
new RandomAccessFile(file, "rw").getChannel()
else
new FileInputStream(file).getChannel()
}
/**
* Do the given action and log any exceptions thrown without rethrowing them
* @param log The log method to use for logging. E.g. logger.warn
* @param action The action to execute
*/
def swallow(log: (Object, Throwable) => Unit, action: => Unit) = {
try {
action
} catch {
case e: Throwable => log(e.getMessage(), e)
}
}
/**
* Test if two byte buffers are equal. In this case equality means having
* the same bytes from the current position to the limit
*/
def equal(b1: ByteBuffer, b2: ByteBuffer): Boolean = {
// two byte buffers are equal if their position is the same,
// their remaining bytes are the same, and their contents are the same
if(b1.position != b2.position)
return false
if(b1.remaining != b2.remaining)
return false
for(i <- 0 until b1.remaining)
if(b1.get(i) != b2.get(i))
return false
return true
}
/**
* Translate the given buffer into a string
* @param buffer The buffer to translate
* @param encoding The encoding to use in translating bytes to characters
*/
def toString(buffer: ByteBuffer, encoding: String): String = {
val bytes = new Array[Byte](buffer.remaining)
buffer.get(bytes)
new String(bytes, encoding)
}
/**
* Print an error message and shutdown the JVM
* @param message The error message
*/
def croak(message: String) {
System.err.println(message)
System.exit(1)
}
/**
* Recursively delete the given file/directory and any subfiles (if any exist)
* @param file The root file at which to begin deleting
*/
def rm(file: String): Unit = rm(new File(file))
/**
* Recursively delete the given file/directory and any subfiles (if any exist)
* @param file The root file at which to begin deleting
*/
def rm(file: File): Unit = {
if(file == null) {
return
} else if(file.isDirectory) {
val files = file.listFiles()
if(files != null) {
for(f <- files)
rm(f)
}
file.delete()
} else {
file.delete()
}
}
/**
* Register the given mbean with the platform mbean server,
* unregistering any mbean that was there before
* @param mbean The object to register as an mbean
* @param name The name to register this mbean with
*/
def registerMBean(mbean: Object, name: String) {
val mbs = ManagementFactory.getPlatformMBeanServer()
mbs synchronized {
val objName = new ObjectName(name)
if(mbs.isRegistered(objName))
mbs.unregisterMBean(objName)
mbs.registerMBean(mbean, objName)
}
}
/**
* Unregister the mbean with the given name, if there is one registered
* @param name The mbean name to unregister
*/
def unregisterMBean(name: String) {
val mbs = ManagementFactory.getPlatformMBeanServer()
mbs synchronized {
val objName = new ObjectName(name)
if(mbs.isRegistered(objName))
mbs.unregisterMBean(objName)
}
}
/**
* Read an unsigned integer from the current position in the buffer,
* incrementing the position by 4 bytes
* @param The buffer to read from
* @return The integer read, as a long to avoid signedness
*/
def getUnsignedInt(buffer: ByteBuffer): Long =
buffer.getInt() & 0xffffffffL
/**
* Read an unsigned integer from the given position without modifying the buffers
* position
* @param The buffer to read from
* @param index the index from which to read the integer
* @return The integer read, as a long to avoid signedness
*/
def getUnsignedInt(buffer: ByteBuffer, index: Int): Long =
buffer.getInt(index) & 0xffffffffL
/**
* Write the given long value as a 4 byte unsigned integer. Overflow is ignored.
* @param buffer The buffer to write to
* @param value The value to write
*/
def putUnsignedInt(buffer: ByteBuffer, value: Long): Unit =
buffer.putInt((value & 0xffffffffL).asInstanceOf[Int])
/**
* Write the given long value as a 4 byte unsigned integer. Overflow is ignored.
* @param buffer The buffer to write to
* @param index The position in the buffer at which to begin writing
* @param value The value to write
*/
def putUnsignedInt(buffer: ByteBuffer, index: Int, value: Long): Unit =
buffer.putInt(index, (value & 0xffffffffL).asInstanceOf[Int])
/**
* Compute the CRC32 of the byte array
* @param bytes The array to compute the checksum for
* @return The CRC32
*/
def crc32(bytes: Array[Byte]): Long = crc32(bytes, 0, bytes.length)
/**
* Compute the CRC32 of the segment of the byte array given by the specificed size and offset
* @param bytes The bytes to checksum
* @param offset the offset at which to begin checksumming
* @param size the number of bytes to checksum
* @return The CRC32
*/
def crc32(bytes: Array[Byte], offset: Int, size: Int): Long = {
val crc = new CRC32()
crc.update(bytes, offset, size)
crc.getValue()
}
/**
* Compute the hash code for the given items
*/
def hashcode(as: Any*): Int = {
if(as == null)
return 0
var h = 1
var i = 0
while(i < as.length) {
if(as(i) != null) {
h = 31 * h + as(i).hashCode
i += 1
}
}
return h
}
/**
* Group the given values by keys extracted with the given function
*/
def groupby[K,V](vals: Iterable[V], f: V => K): Map[K,List[V]] = {
val m = new mutable.HashMap[K, List[V]]
for(v <- vals) {
val k = f(v)
m.get(k) match {
case Some(l: List[V]) => m.put(k, v :: l)
case None => m.put(k, List(v))
}
}
m
}
/**
* Read some bytes into the provided buffer, and return the number of bytes read. If the
* channel has been closed or we get -1 on the read for any reason, throw an EOFException
*/
def read(channel: ReadableByteChannel, buffer: ByteBuffer): Int = {
channel.read(buffer) match {
case -1 => throw new EOFException("Received -1 when reading from channel, socket has likely been closed.")
case n: Int => n
}
}
def notNull[V](v: V) = {
if(v == null)
throw new IllegalArgumentException("Value cannot be null.")
else
v
}
def getHostPort(hostport: String) : Tuple2[String, Int] = {
val splits = hostport.split(":")
(splits(0), splits(1).toInt)
}
def getTopicPartition(topicPartition: String) : Tuple2[String, Int] = {
val index = topicPartition.lastIndexOf('-')
(topicPartition.substring(0,index), topicPartition.substring(index+1).toInt)
}
def stackTrace(e: Throwable): String = {
val sw = new StringWriter;
val pw = new PrintWriter(sw);
e.printStackTrace(pw);
sw.toString();
}
/**
* This method gets comma seperated values which contains key,value pairs and returns a map of
* key value pairs. the format of allCSVal is key1:val1, key2:val2 ....
*/
private def getCSVMap[K, V](allCSVals: String, exceptionMsg:String, successMsg:String) :Map[K, V] = {
val map = new mutable.HashMap[K, V]
if("".equals(allCSVals))
return map
val csVals = allCSVals.split(",")
for(i <- 0 until csVals.length)
{
try{
val tempSplit = csVals(i).split(":")
logger.info(successMsg + tempSplit(0) + " : " + Integer.parseInt(tempSplit(1).trim))
map += tempSplit(0).asInstanceOf[K] -> Integer.parseInt(tempSplit(1).trim).asInstanceOf[V]
} catch {
case _ => logger.error(exceptionMsg + ": " + csVals(i))
}
}
map
}
def getTopicRentionHours(retentionHours: String) : Map[String, Int] = {
val exceptionMsg = "Malformed token for topic.log.retention.hours in server.properties: "
val successMsg = "The retention hour for "
getCSVMap(retentionHours, exceptionMsg, successMsg)
}
def getTopicFlushIntervals(allIntervals: String) : Map[String, Int] = {
val exceptionMsg = "Malformed token for topic.flush.Intervals.ms in server.properties: "
val successMsg = "The flush interval for "
getCSVMap(allIntervals, exceptionMsg, successMsg)
}
def getTopicPartitions(allPartitions: String) : Map[String, Int] = {
val exceptionMsg = "Malformed token for topic.partition.counts in server.properties: "
val successMsg = "The number of partitions for topic "
getCSVMap(allPartitions, exceptionMsg, successMsg)
}
def getConsumerTopicMap(consumerTopicString: String) : Map[String, Int] = {
val exceptionMsg = "Malformed token for embeddedconsumer.topics in consumer.properties: "
val successMsg = "The number of consumer thread for topic "
getCSVMap(consumerTopicString, exceptionMsg, successMsg)
}
def getObject[T<:AnyRef](className: String): T = {
className match {
case null => null.asInstanceOf[T]
case _ =>
val clazz = Class.forName(className)
val clazzT = clazz.asInstanceOf[Class[T]]
val constructors = clazzT.getConstructors
require(constructors.length == 1)
constructors.head.newInstance().asInstanceOf[T]
}
}
def propertyExists(prop: String): Boolean = {
if(prop == null)
false
else if(prop.compareTo("") == 0)
false
else true
}
}
class SnapshotStats(private val monitorDurationNs: Long = 600L * 1000L * 1000L * 1000L) {
private val time: Time = SystemTime
private val complete = new AtomicReference(new Stats())
private val current = new AtomicReference(new Stats())
private val numCumulatedRequests = new AtomicLong(0)
def recordRequestMetric(requestNs: Long) {
val stats = current.get
stats.add(requestNs)
numCumulatedRequests.getAndAdd(1)
val ageNs = time.nanoseconds - stats.start
// if the current stats are too old it is time to swap
if(ageNs >= monitorDurationNs) {
val swapped = current.compareAndSet(stats, new Stats())
if(swapped) {
complete.set(stats)
stats.end.set(time.nanoseconds)
}
}
}
def recordThroughputMetric(data: Long) {
val stats = current.get
stats.addData(data)
val ageNs = time.nanoseconds - stats.start
// if the current stats are too old it is time to swap
if(ageNs >= monitorDurationNs) {
val swapped = current.compareAndSet(stats, new Stats())
if(swapped) {
complete.set(stats)
stats.end.set(time.nanoseconds)
}
}
}
def getNumRequests(): Long = numCumulatedRequests.get
def getRequestsPerSecond: Double = {
val stats = complete.get
stats.numRequests / stats.durationSeconds
}
def getThroughput: Double = {
val stats = complete.get
stats.totalData / stats.durationSeconds
}
def getAvgMetric: Double = {
val stats = complete.get
if (stats.numRequests == 0) {
0
}
else {
stats.totalRequestMetric / stats.numRequests
}
}
def getMaxMetric: Double = complete.get.maxRequestMetric
class Stats {
val start = time.nanoseconds
var end = new AtomicLong(-1)
var numRequests = 0
var totalRequestMetric: Long = 0L
var maxRequestMetric: Long = 0L
var totalData: Long = 0L
private val lock = new Object()
def addData(data: Long) {
lock synchronized {
totalData += data
}
}
def add(requestNs: Long) {
lock synchronized {
numRequests +=1
totalRequestMetric += requestNs
maxRequestMetric = scala.math.max(maxRequestMetric, requestNs)
}
}
def durationSeconds: Double = (end.get - start) / (1000.0 * 1000.0 * 1000.0)
def durationMs: Double = (end.get - start) / (1000.0 * 1000.0)
}
}
| quipo/kafka | core/src/main/scala/kafka/utils/Utils.scala | Scala | apache-2.0 | 20,701 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.sql.SparkSession
object HdfsTest {
/** Usage: HdfsTest [file] */
def main(args: Array[String]) {
if (args.length < 1) {
System.err.println("Usage: HdfsTest <file>")
System.exit(1)
}
val spark = SparkSession
.builder
.appName("HdfsTest")
.getOrCreate()
val file = spark.read.text(args(0)).rdd
val mapped = file.map(s => s.length).cache()
for (iter <- 1 to 10) {
val start = System.currentTimeMillis()
for (x <- mapped) {
x + 2
}
val end = System.currentTimeMillis()
println("Iteration " + iter + " took " + (end - start) + " ms")
}
spark.stop()
}
}
// scalastyle:on println
| chgm1006/spark-app | src/main/scala/org/apache/spark/examples/HdfsTest.scala | Scala | apache-2.0 | 1,676 |
package org.jetbrains.plugins.scala.conversion.copy
import com.intellij.openapi.actionSystem.IdeActions
import com.intellij.testFramework.EditorTestUtil
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.util.TypeAnnotationSettings
abstract class CopyPasteTestBase extends ScalaLightCodeInsightFixtureTestAdapter {
protected val Start = EditorTestUtil.SELECTION_START_TAG
protected val End = EditorTestUtil.SELECTION_END_TAG
protected val Caret = EditorTestUtil.CARET_TAG
val fromLangExtension: String = ".scala"
private var oldSettings: ScalaCodeStyleSettings = _
override protected def setUp(): Unit = {
super.setUp()
val project = getProject
oldSettings = ScalaCodeStyleSettings.getInstance(project)
TypeAnnotationSettings.set(project, TypeAnnotationSettings.alwaysAddType(oldSettings))
}
override def tearDown(): Unit = {
TypeAnnotationSettings.set(getProject, oldSettings)
super.tearDown()
}
protected def doTest(from: String, to: String, after: String): Unit = {
def normalize(s: String): String = s.replace("\\r", "")
myFixture.configureByText(s"from.$fromLangExtension", normalize(from))
myFixture.performEditorAction(IdeActions.ACTION_COPY)
myFixture.configureByText("to.scala", normalize(to))
myFixture.performEditorAction(IdeActions.ACTION_PASTE)
myFixture.checkResult(normalize(after), true)
}
protected def doTestWithStrip(from: String, to: String, after: String): Unit = {
doTest(from.stripMargin, to.stripMargin, after.stripMargin)
}
protected def doTestToEmptyFile(fromText: String, expectedText: String): Unit = {
doTest(fromText, Caret, expectedText)
}
} | JetBrains/intellij-scala | scala/conversion/test/org/jetbrains/plugins/scala/conversion/copy/CopyPasteTestBase.scala | Scala | apache-2.0 | 1,817 |
package com.twitter.concurrent
import com.twitter.concurrent.Spool.{*::, seqToSpool}
import com.twitter.conversions.time.intToTimeableNumber
import com.twitter.util.{Await, Future, Promise, Return, Throw}
import java.io.EOFException
import org.junit.runner.RunWith
import org.scalacheck.Arbitrary
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import scala.collection.mutable.ArrayBuffer
@RunWith(classOf[JUnitRunner])
class SpoolTest extends WordSpec with GeneratorDrivenPropertyChecks {
"Empty Spool" should {
val s = Spool.empty[Int]
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
assert(xs.size == 0)
}
"map" in {
assert((s map { _ * 2 } ) == Spool.empty[Int])
}
"mapFuture" in {
val mapFuture = s mapFuture { Future.value(_) }
assert(mapFuture.poll == Some(Return(s)))
}
"deconstruct" in {
assert(s match {
case x *:: Future(rest) => false
case _ => true
})
}
"append via ++" in {
assert((s ++ Spool.empty[Int]) == Spool.empty[Int])
assert((Spool.empty[Int] ++ s) == Spool.empty[Int])
val s2 = s ++ (3 *:: Future.value(4 *:: Future.value(Spool.empty[Int])))
assert(Await.result(s2.toSeq) == Seq(3, 4))
}
"append via ++ with Future rhs" in {
assert(Await.result(s ++ Future(Spool.empty[Int])) == Spool.empty[Int])
assert(Await.result(Spool.empty[Int] ++ Future(s)) == Spool.empty[Int])
val s2 = s ++ Future(3 *:: Future.value(4 *:: Future.value(Spool.empty[Int])))
assert(Await.result(s2 flatMap (_.toSeq)) == Seq(3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(x.toString *:: Future.value((x * 2).toString *:: Future.value(Spool.empty[String])))
assert(Await.result(s flatMap f) == Spool.empty[Int])
}
"fold left" in {
val fold = s.foldLeft(0){(x, y) => x + y}
assert(Await.result(fold) == 0)
}
"reduce left" in {
val fold = s.reduceLeft{(x, y) => x + y}
intercept[UnsupportedOperationException] {
Await.result(fold)
}
}
"zip with empty" in {
val result = s.zip(Spool.empty[Int])
assert(Await.result(result.toSeq) == Nil)
}
"zip with non-empty" in {
val result = s.zip(Seq(1,2,3).toSpool)
assert(Await.result(result.toSeq) == Nil)
}
"take" in {
assert(s.take(10) == Spool.empty[Int])
}
}
"Simple resolved Spool" should {
val s = 1 *:: Future.value(2 *:: Future.value(Spool.empty[Int]))
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
assert(xs.toSeq == Seq(1,2))
}
"buffer to a sequence" in {
assert(Await.result(s.toSeq) == Seq(1, 2))
}
"map" in {
assert(Await.result(s.map { _ * 2 }.toSeq) == Seq(2, 4))
}
"mapFuture" in {
val f = s.mapFuture { Future.value(_) }.flatMap { _.toSeq }.poll
assert(f == Some(Return(Seq(1, 2))))
}
"deconstruct" in {
assert(s match {
case x *:: Future(Return(rest)) =>
assert(x == 1)
rest match {
case y *:: Future(Return(rest)) if y == 2 && rest.isEmpty => true
}
})
}
"append via ++" in {
assert(Await.result((s ++ Spool.empty[Int]).toSeq) == Seq(1, 2))
assert(Await.result((Spool.empty[Int] ++ s).toSeq) == Seq(1, 2))
val s2 = s ++ (3 *:: Future.value(4 *:: Future.value(Spool.empty[Int])))
assert(Await.result(s2.toSeq) == Seq(1, 2, 3, 4))
}
"append via ++ with Future rhs" in {
assert(Await.result(s ++ Future.value(Spool.empty[Int]) flatMap (_.toSeq)) == Seq(1, 2))
assert(Await.result(Spool.empty[Int] ++ Future.value(s) flatMap (_.toSeq)) == Seq(1, 2))
val s2 = s ++ Future.value(3 *:: Future.value(4 *:: Future.value(Spool.empty[Int])))
assert(Await.result(s2 flatMap (_.toSeq)) == Seq(1, 2, 3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(Seq(x.toString, (x * 2).toString).toSpool)
val s2 = s flatMap f
assert(Await.result(s2 flatMap (_.toSeq)) == Seq("1", "2", "2", "4"))
}
"fold left" in {
val fold = s.foldLeft(0){(x, y) => x + y}
assert(Await.result(fold) == 3)
}
"reduce left" in {
val fold = s.reduceLeft{(x, y) => x + y}
assert(Await.result(fold) == 3)
}
"zip with empty" in {
val zip = s.zip(Spool.empty[Int])
assert(Await.result(zip.toSeq) == Nil)
}
"zip with same size spool" in {
val zip = s.zip(Seq("a","b").toSpool)
assert(Await.result(zip.toSeq) == Seq((1, "a"), (2, "b")))
}
"zip with larger spool" in {
val zip = s.zip(Seq("a","b", "c", "d").toSpool)
assert(Await.result(zip.toSeq) == Seq((1, "a"), (2, "b")))
}
"be roundtrippable through toSeq/toSpool" in {
val seq = (0 to 10).toSeq
assert(Await.result(seq.toSpool.toSeq) == seq)
}
"flatten via flatMap of toSpool" in {
val spool = Seq(Seq(1, 2), Seq(3, 4)).toSpool
val seq = Await.result(spool.toSeq)
val flatSpool =
spool.flatMap { inner =>
Future.value(inner.toSpool)
}
assert(Await.result(flatSpool.flatMap(_.toSeq)) == seq.flatten)
}
"take" in {
val ls = (1 to 4).toSeq.toSpool
assert(Await.result(ls.take(2).toSeq) == Seq(1,2))
assert(Await.result(ls.take(1).toSeq) == Seq(1))
assert(Await.result(ls.take(0).toSeq) == Seq.empty)
assert(Await.result(ls.take(-2).toSeq) == Seq.empty)
}
}
"Simple resolved spool with EOFException" should {
val p = new Promise[Spool[Int]](Throw(new EOFException("sad panda")))
val s = 1 *:: Future.value(2 *:: p)
"EOF iteration on EOFException" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
assert(xs.toSeq == Seq(Some(1), Some(2), None))
}
}
"Simple resolved spool with error" should {
val p = new Promise[Spool[Int]](Throw(new Exception("sad panda")))
val s = 1 *:: Future.value(2 *:: p)
"return with exception on error" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
intercept[Exception] {
Await.result(s.toSeq)
}
}
"return with exception on error in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new Exception("sad panda") }
intercept[Exception] {
Await.result(f)
}
}
"return with exception on EOFException in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new EOFException("sad panda") }
intercept[EOFException] {
Await.result(f)
}
}
}
"Simple delayed Spool" should {
class SimpleDelayedSpoolHelper {
val p = new Promise[Spool[Int]]
val p1 = new Promise[Spool[Int]]
val p2 = new Promise[Spool[Int]]
val s = 1 *:: p
}
"iterate as results become available" in {
val h = new SimpleDelayedSpoolHelper
import h._
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
assert(xs.toSeq == Seq(1))
p() = Return(2 *:: p1)
assert(xs.toSeq == Seq(1, 2))
p1() = Return(Spool.empty)
assert(xs.toSeq == Seq(1, 2))
}
"EOF iteration on EOFException" in {
val h = new SimpleDelayedSpoolHelper
import h._
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
assert(xs.toSeq == Seq(Some(1)))
p() = Throw(new EOFException("sad panda"))
assert(xs.toSeq == Seq(Some(1), None))
}
"return with exception on error" in {
val h = new SimpleDelayedSpoolHelper
import h._
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
assert(xs.toSeq == Seq(Some(1)))
p() = Throw(new Exception("sad panda"))
intercept[Exception] {
Await.result(s.toSeq)
}
}
"return with exception on error in callback" in {
val h = new SimpleDelayedSpoolHelper
import h._
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new Exception("sad panda") }
p() = Return(2 *:: p1)
intercept[Exception] {
Await.result(f)
}
}
"return with exception on EOFException in callback" in {
val h = new SimpleDelayedSpoolHelper
import h._
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new EOFException("sad panda") }
p() = Return(2 *:: p1)
intercept[EOFException] {
Await.result(f)
}
}
"return a buffered seq when complete" in {
val h = new SimpleDelayedSpoolHelper
import h._
val f = s.toSeq
assert(f.isDefined == false)
p() = Return(2 *:: p1)
assert(f.isDefined == false)
p1() = Return(Spool.empty)
assert(f.isDefined == true)
assert(Await.result(f) == Seq(1,2))
}
"deconstruct" in {
val h = new SimpleDelayedSpoolHelper
import h._
assert(s match {
case fst *:: rest if fst == 1 && !rest.isDefined => true
case _ => false
})
}
"collect" in {
val h = new SimpleDelayedSpoolHelper
import h._
val f = s collect {
case x if x % 2 == 0 => x * 2
}
assert(f.isDefined == false) // 1 != 2 mod 0
p() = Return(2 *:: p1)
assert(f.isDefined == true)
val s1 = Await.result(f)
assert(s1 match {
case x *:: rest if x == 4 && !rest.isDefined => true
case _ => false
})
p1() = Return(3 *:: p2)
assert(s1 match {
case x *:: rest if x == 4 && !rest.isDefined => true
case _ => false
})
p2() = Return(4 *:: Future.value(Spool.empty[Int]))
val s1s = s1.toSeq
assert(s1s.isDefined == true)
assert(Await.result(s1s) == Seq(4, 8))
}
"fold left" in {
val h = new SimpleDelayedSpoolHelper
import h._
val f = s.foldLeft(0){(x, y) => x + y}
assert(f.isDefined == false)
p() = Return(2 *:: p1)
assert(f.isDefined == false)
p1() = Return(Spool.empty)
assert(f.isDefined == true)
assert(Await.result(f) == 3)
}
"take while" in {
val h = new SimpleDelayedSpoolHelper
import h._
val taken = s.takeWhile(_ < 3)
assert(taken.isEmpty == false)
val f = taken.toSeq
assert(f.isDefined == false)
p() = Return(2 *:: p1)
assert(f.isDefined == false)
p1() = Return(3 *:: p2)
// despite the Spool having an unfulfilled tail, the takeWhile is satisfied
assert(f.isDefined == true)
assert(Await.result(f) == Seq(1, 2))
}
}
// These set of tests assert that any method called on Spool consumes only the
// head, and doesn't force the tail. An exception is collect, which consumes
// up to the first defined value. This is because collect takes a
// PartialFunction, which if not defined for head, recurses on the tail,
// forcing it.
"Lazily evaluated Spool" should {
"be constructed lazily" in {
applyLazily(Future.value _)
}
"collect lazily" in {
applyLazily { spool =>
spool.collect {
case x if x % 2 == 0 => x
}
}
}
"map lazily" in {
applyLazily { spool =>
Future.value(spool.map(_ + 1))
}
}
"mapFuture lazily" in {
applyLazily { spool =>
spool.mapFuture(Future.value(_))
}
}
"flatMap lazily" in {
applyLazily { spool =>
spool.flatMap { item =>
Future.value((item to (item + 5)).toSpool)
}
}
}
"takeWhile lazily" in {
applyLazily { spool =>
Future.value {
spool.takeWhile(_ < Int.MaxValue)
}
}
}
"take lazily" in {
applyLazily { spool =>
Future.value {
spool.take(2)
}
}
}
"zip lazily" in {
applyLazily { spool =>
Future.value(spool.zip(spool).map { case (a,b) => a+b })
}
}
"act eagerly when forced" in {
val (spool, tailReached) =
applyLazily { spool =>
Future.value(spool.map(_ + 1))
}
Await.ready { spool.map(_.force) }
assert(tailReached.isDefined)
}
/**
* Confirms that the given operation does not consume an entire Spool, and then
* returns the resulting Spool and tail check for further validation.
*/
def applyLazily(f: Spool[Int]=>Future[Spool[Int]]): (Future[Spool[Int]], Future[Spool[Int]]) = {
val tail = new Promise[Spool[Int]]
// A spool where only the head is valid.
def spool: Spool[Int] = 0 *:: { tail.setException(new Exception); tail }
// create, apply, poll
val s = f(spool)
assert(!tail.isDefined)
(s, tail)
}
}
// Note: ++ is different from the other methods because it doesn't force its
// argument.
"lazy ++" in {
val nil = Future.value(Spool.empty[Unit])
val a = () *:: nil
val p = new Promise[Unit]
def spool = {
p.setDone()
() *:: nil
}
def f = Future.value(spool)
assert(!p.isDefined)
val b = a ++ spool
assert(!p.isDefined)
val c = a ++ f
assert(!p.isDefined)
Await.result(c.flatMap(_.tail).select(b.tail))
assert(p.isDefined)
}
"Spool.merge should merge" in {
forAll(Arbitrary.arbitrary[List[List[Int]]]) { ss =>
val all = Spool.merge(ss.map(s => Future.value(s.toSpool)))
val interleaved = ss.flatMap(_.zipWithIndex).sortBy(_._2).map(_._1)
assert(Await.result(all.flatMap(_.toSeq)) == interleaved)
}
}
"Spool.merge should merge round robin" in {
val spools: Seq[Future[Spool[String]]] = Seq(
"a" *:: Future.value("b" *:: Future.value("c" *:: Future.value(Spool.empty[String]))),
"1" *:: Future.value("2" *:: Future.value("3" *:: Future.value(Spool.empty[String]))),
Spool.empty,
"foo" *:: Future.value("bar" *:: Future.value("baz" *:: Future.value(Spool.empty[String])))
).map(Future.value)
assert(
Await.result(Spool.merge(spools).flatMap(_.toSeq), 5.seconds) ==
Seq("a", "1", "foo", "b", "2", "bar", "c", "3", "baz")
)
}
"Spool.distinctBy should distinct" in {
forAll { (s: List[Char]) =>
val d = s.toSpool.distinctBy(x => x).toSeq
assert(Await.result(d).toSet == s.toSet)
}
}
"Spool.distinctBy should distinct by in order" in {
val spool: Spool[String] =
"ac" *:: Future.value("bbe" *:: Future.value("ab" *:: Future.value(Spool.empty[String])))
assert(Await.result(spool.distinctBy(_.length).toSeq, 5.seconds) == Seq("ac", "bbe"))
}
}
| BuoyantIO/twitter-util | util-core/src/test/scala/com/twitter/concurrent/SpoolTest.scala | Scala | apache-2.0 | 14,827 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import java.io.{IOException, ObjectInputStream}
import scala.collection.mutable
import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path, PathFilter}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark.rdd.{RDD, UnionRDD}
import org.apache.spark.streaming._
import org.apache.spark.streaming.scheduler.StreamInputInfo
import org.apache.spark.util.{SerializableConfiguration, TimeStampedHashMap, Utils}
/**
* This class represents an input stream that monitors a Hadoop-compatible filesystem for new
* files and creates a stream out of them. The way it works as follows.
* 这类代表一个输入流,监视一个新文件Hadoop文件系统创建一个兼容的流出来,它的工作方式如下
*
* At each batch interval, the file system is queried for files in the given directory and
* 在每个批次间隔,在指定目录中的文件查询和检测到的新文件中选择了一批文件系统
* detected new files are selected for that batch. In this case "new" means files that
* became visible to readers during that time period. Some extra care is needed to deal
* with the fact that files may become visible after they are created. For this purpose, this
* class remembers the information about the files selected in past batches for
* a certain duration (say, "remember window") as shown in the figure below.
*
* |<----- remember window ----->|
* ignore threshold --->| |<--- current batch time
* |____.____.____.____.____.____|
* | | | | | | |
* ---------------------|----|----|----|----|----|----|-----------------------> Time
* |____|____|____|____|____|____|
* remembered batches
*
* The trailing end of the window is the "ignore threshold" and all files whose mod times
* are less than this threshold are assumed to have already been selected and are therefore
* ignored. Files whose mod times are within the "remember window" are checked against files
* that have already been selected. At a high level, this is how new files are identified in
* each batch - files whose mod times are greater than the ignore threshold and
* have not been considered within the remember window. See the documentation on the method
* `isNewFile` for more details.
*
* This makes some assumptions from the underlying file system that the system is monitoring.
* - The clock of the file system is assumed to synchronized with the clock of the machine running
* the streaming app.
* - If a file is to be visible in the directory listings, it must be visible within a certain
* duration of the mod time of the file. This duration is the "remember window", which is set to
* 1 minute (see `FileInputDStream.minRememberDuration`). Otherwise, the file will never be
* selected as the mod time will be less than the ignore threshold when it becomes visible.
* - Once a file is visible, the mod time cannot change. If it does due to appends, then the
* processing semantics are undefined.
*/
private[streaming]
class FileInputDStream[K, V, F <: NewInputFormat[K, V]](
@transient ssc_ : StreamingContext,
directory: String,
filter: Path => Boolean = FileInputDStream.defaultFilter,
newFilesOnly: Boolean = true,
conf: Option[Configuration] = None)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F])
extends InputDStream[(K, V)](ssc_) {
private val serializableConfOpt = conf.map(new SerializableConfiguration(_))
/**
* Minimum duration of remembering the information of selected files. Defaults to 60 seconds.
* 记住选定文件信息的最小持续时间,默认为60秒
* Files with mod times older than this "window" of remembering will be ignored. So if new
* files are visible within this window, then the file will get selected in the next batch.
*/
private val minRememberDurationS = {
Seconds(ssc.conf.getTimeAsSeconds("spark.streaming.fileStream.minRememberDuration",
ssc.conf.get("spark.streaming.minRememberDuration", "60s")))
}
// This is a def so that it works during checkpoint recovery:
//这是一个防御使其工作在检查点恢复:
private def clock = ssc.scheduler.clock
// Data to be saved as part of the streaming checkpoints
//作为流检查点的一部分的数据保存
protected[streaming] override val checkpointData = new FileInputDStreamCheckpointData
// Initial ignore threshold based on which old, existing files in the directory (at the time of
// starting the streaming application) will be ignored or considered
private val initialModTimeIgnoreThreshold = if (newFilesOnly) clock.getTimeMillis() else 0L
/*
* Make sure that the information of files selected in the last few batches are remembered.
* This would allow us to filter away not-too-old files which have already been recently
* selected and processed.
*/
private val numBatchesToRemember = FileInputDStream
.calculateNumBatchesToRemember(slideDuration, minRememberDurationS)
private val durationToRemember = slideDuration * numBatchesToRemember
remember(durationToRemember)
// Map of batch-time to selected file info for the remembered batches
// This is a concurrent map because it's also accessed in unit tests
@transient private[streaming] var batchTimeToSelectedFiles =
new mutable.HashMap[Time, Array[String]] with mutable.SynchronizedMap[Time, Array[String]]
// Set of files that were selected in the remembered batches
//在被记住的批中选择的文件集
@transient private var recentlySelectedFiles = new mutable.HashSet[String]()
// Read-through cache of file mod times, used to speed up mod time lookups
//通过读取文件修改时间的缓存,用于加速MOD时间查找
@transient private var fileToModTime = new TimeStampedHashMap[String, Long](true)
// Timestamp of the last round of finding files
//最后一轮的查找文件的时间戳
@transient private var lastNewFileFindingTime = 0L
@transient private var path_ : Path = null
@transient private var fs_ : FileSystem = null
override def start() { }
override def stop() { }
/**
* Finds the files that were modified since the last time this method was called and makes
* a union RDD out of them. Note that this maintains the list of files that were processed
* in the latest modification time in the previous call to this method. This is because the
* modification time returned by the FileStatus API seems to return times only at the
* granularity of seconds. And new files may have the same modification time as the
* latest modification time in the previous call to this method yet was not reported in
* the previous call.
*/
override def compute(validTime: Time): Option[RDD[(K, V)]] = {
// Find new files 通过findNewFiles找到validTime以后产生的新files的数据
val newFiles = findNewFiles(validTime.milliseconds)
logInfo("New files at time " + validTime + ":\\n" + newFiles.mkString("\\n"))
batchTimeToSelectedFiles += ((validTime, newFiles))
recentlySelectedFiles ++= newFiles
//找到一些新file,以新的数组为参数,通过filesToRDD生成单个RDD实例
val rdds = Some(filesToRDD(newFiles))
// Copy newFiles to immutable.List to prevent from being modified by the user
//复制newfiles到不可变,列表来防止用户修改
val metadata = Map(
"files" -> newFiles.toList,
StreamInputInfo.METADATA_KEY_DESCRIPTION -> newFiles.mkString("\\n"))
val inputInfo = StreamInputInfo(id, 0, metadata)
ssc.scheduler.inputInfoTracker.reportInfo(validTime, inputInfo)
rdds
}
/**
* Clear the old time-to-files mappings along with old RDDs
* 清除旧的文件映射RDDS
* */
protected[streaming] override def clearMetadata(time: Time) {
super.clearMetadata(time)
val oldFiles = batchTimeToSelectedFiles.filter(_._1 < (time - rememberDuration))
batchTimeToSelectedFiles --= oldFiles.keys
recentlySelectedFiles --= oldFiles.values.flatten
logInfo("Cleared " + oldFiles.size + " old files that were older than " +
(time - rememberDuration) + ": " + oldFiles.keys.mkString(", "))
logDebug("Cleared files are:\\n" +
oldFiles.map(p => (p._1, p._2.mkString(", "))).mkString("\\n"))
// Delete file mod times that weren't accessed in the last round of getting new files
//删除在最后一轮获得新的文件没有访问的文件时间
fileToModTime.clearOldValues(lastNewFileFindingTime - 1)
}
/**
* Find new files for the batch of `currentTime`. This is done by first calculating the
* ignore threshold for file mod times, and then getting a list of files filtered based on
* the current batch time and the ignore threshold. The ignore threshold is the max of
* initial ignore threshold and the trailing end of the remember window (that is, which ever
* is later in time).
*/
private def findNewFiles(currentTime: Long): Array[String] = {
try {
lastNewFileFindingTime = clock.getTimeMillis()
// Calculate ignore threshold
val modTimeIgnoreThreshold = math.max(
initialModTimeIgnoreThreshold, // initial threshold based on newFilesOnly setting
currentTime - durationToRemember.milliseconds // trailing end of the remember window
)
logDebug(s"Getting new files for time $currentTime, " +
s"ignoring files older than $modTimeIgnoreThreshold")
val filter = new PathFilter {
def accept(path: Path): Boolean = isNewFile(path, currentTime, modTimeIgnoreThreshold)
}
val newFiles = fs.listStatus(directoryPath, filter).map(_.getPath.toString)
val timeTaken = clock.getTimeMillis() - lastNewFileFindingTime
logInfo("Finding new files took " + timeTaken + " ms")
logDebug("# cached file times = " + fileToModTime.size)
if (timeTaken > slideDuration.milliseconds) {
logWarning(
"Time taken to find new files exceeds the batch size. " +
"Consider increasing the batch size or reducing the number of " +
"files in the monitored directory."
)
}
newFiles
} catch {
case e: Exception =>
logWarning("Error finding new files", e)
reset()
Array.empty
}
}
/**
* Identify whether the given `path` is a new file for the batch of `currentTime`. For it to be
* accepted, it has to pass the following criteria.
* - It must pass the user-provided file filter.
* - It must be newer than the ignore threshold. It is assumed that files older than the ignore
* threshold have already been considered or are existing files before start
* (when newFileOnly = true).
* - It must not be present in the recently selected files that this class remembers.
* - It must not be newer than the time of the batch (i.e. `currentTime` for which this
* file is being tested. This can occur if the driver was recovered, and the missing batches
* (during downtime) are being generated. In that case, a batch of time T may be generated
* at time T+x. Say x = 5. If that batch T contains file of mod time T+5, then bad things can
* happen. Let's say the selected files are remembered for 60 seconds. At time t+61,
* the batch of time t is forgotten, and the ignore threshold is still T+1.
* The files with mod time T+5 are not remembered and cannot be ignored (since, t+5 > t+1).
* Hence they can get selected as new files again. To prevent this, files whose mod time is more
* than current batch time are not considered.
*/
private def isNewFile(path: Path, currentTime: Long, modTimeIgnoreThreshold: Long): Boolean = {
val pathStr = path.toString
// Reject file if it does not satisfy filter
//拒绝文件,如果它不满足过滤器
if (!filter(path)) {
logDebug(s"$pathStr rejected by filter")
return false
}
// Reject file if it was created before the ignore time
//拒绝文件,如果它是在忽略时间之前创建的
val modTime = getFileModTime(path)
if (modTime <= modTimeIgnoreThreshold) {
// Use <= instead of < to avoid SPARK-4518
logDebug(s"$pathStr ignored as mod time $modTime <= ignore time $modTimeIgnoreThreshold")
return false
}
// Reject file if mod time > current batch time
//拒绝文件,如果修改时间>当前批处理时间
if (modTime > currentTime) {
logDebug(s"$pathStr not selected as mod time $modTime > current time $currentTime")
return false
}
// Reject file if it was considered earlier
//拒绝文件,如果它被认为较早
if (recentlySelectedFiles.contains(pathStr)) {
logDebug(s"$pathStr already considered")
return false
}
logDebug(s"$pathStr accepted with mod time $modTime")
return true
}
/**
* Generate one RDD from an array of files
* 从文件的数组生成一个RDD
* */
private def filesToRDD(files: Seq[String]): RDD[(K, V)] = {
val fileRDDs = files.map { file =>
val rdd = serializableConfOpt.map(_.value) match {
case Some(config) => context.sparkContext.newAPIHadoopFile(
file,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
config)
case None => context.sparkContext.newAPIHadoopFile[K, V, F](file)
}
if (rdd.partitions.size == 0) {
logError("File " + file + " has no data in it. Spark Streaming can only ingest " +
"files that have been \\"moved\\" to the directory assigned to the file stream. " +
"Refer to the streaming programming guide for more details.")
}
rdd
}
new UnionRDD(context.sparkContext, fileRDDs)
}
/**
* Get file mod time from cache or fetch it from the file system
* 从缓存中获取文件的时间,或从文件系统中读取它
* */
private def getFileModTime(path: Path) = {
fileToModTime.getOrElseUpdate(path.toString, fs.getFileStatus(path).getModificationTime())
}
private def directoryPath: Path = {
if (path_ == null) path_ = new Path(directory)
path_
}
private def fs: FileSystem = {
if (fs_ == null) fs_ = directoryPath.getFileSystem(ssc.sparkContext.hadoopConfiguration)
fs_
}
private def reset() {
fs_ = null
}
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
logDebug(this.getClass().getSimpleName + ".readObject used")
ois.defaultReadObject()
generatedRDDs = new mutable.HashMap[Time, RDD[(K, V)]]()
batchTimeToSelectedFiles =
new mutable.HashMap[Time, Array[String]] with mutable.SynchronizedMap[Time, Array[String]]
recentlySelectedFiles = new mutable.HashSet[String]()
fileToModTime = new TimeStampedHashMap[String, Long](true)
}
/**
* A custom version of the DStreamCheckpointData that stores names of
* 对dstreamcheckpointdata存储名字的定制版
* Hadoop files as checkpoint data.
*/
private[streaming]
class FileInputDStreamCheckpointData extends DStreamCheckpointData(this) {
private def hadoopFiles = data.asInstanceOf[mutable.HashMap[Time, Array[String]]]
override def update(time: Time) {
hadoopFiles.clear()
hadoopFiles ++= batchTimeToSelectedFiles
}
override def cleanup(time: Time) { }
override def restore() {
hadoopFiles.toSeq.sortBy(_._1)(Time.ordering).foreach {
case (t, f) => {
// Restore the metadata in both files and generatedRDDs
//恢复文件和generatedrdds元数据
logInfo("Restoring files for time " + t + " - " +
f.mkString("[", ", ", "]") )
batchTimeToSelectedFiles += ((t, f))
recentlySelectedFiles ++= f
generatedRDDs += ((t, filesToRDD(f)))
}
}
}
override def toString: String = {
"[\\n" + hadoopFiles.size + " file sets\\n" +
hadoopFiles.map(p => (p._1, p._2.mkString(", "))).mkString("\\n") + "\\n]"
}
}
}
private[streaming]
object FileInputDStream {
def defaultFilter(path: Path): Boolean = !path.getName().startsWith(".")
/**
* Calculate the number of last batches to remember, such that all the files selected in
* at least last minRememberDurationS duration can be remembered.
*/
def calculateNumBatchesToRemember(batchDuration: Duration,
minRememberDurationS: Duration): Int = {
math.ceil(minRememberDurationS.milliseconds.toDouble / batchDuration.milliseconds).toInt
}
}
| tophua/spark1.52 | streaming/src/main/scala/org/apache/spark/streaming/dstream/FileInputDStream.scala | Scala | apache-2.0 | 17,699 |
package com.overviewdocs.blobstorage
import java.nio.file.Path
import scala.concurrent.Future
class BlobStorageStrategySpec extends StrategySpecification {
trait BlankStrategy extends BlobStorageStrategy {
override def get(location: String) = ???
override def getBytes(location: String, maxNBytes: Int) = ???
override def getUrl(location: String, mimeType: String) = ???
override def getUrlOpt(location: String, mimeType: String) = ???
override def delete(location: String): Future[Unit] = ???
override def create(locationPrefix: String, dataPath: Path): Future[String] = ???
}
"#deleteMany" should {
"fail immediately if any #delete fails immediately" in new BaseScope {
val testException = new Throwable("boo")
object TestStrategy extends BlankStrategy {
override def delete(location: String): Future[Unit] = {
if (location == "foo:bar") {
Future.unit
} else {
throw testException // synchronous
}
}
}
TestStrategy.deleteMany(Seq("foo:bar", "bar:baz")) should throwA(testException)
}
"fail eventually if any #delete fails eventually" in new BaseScope {
val testException = new Throwable("boo")
object TestStrategy extends BlankStrategy {
override def delete(location: String): Future[Unit] = {
if (location == "foo:bar") {
Future.unit
} else {
Future.failed(testException) // asynchronous
}
}
}
val future = TestStrategy.deleteMany(Seq("foo:bar", "bar:baz")) // no exception
await(future) must throwA(testException)
}
"succeed if every #delete succeeds" in new BaseScope {
object TestStrategy extends BlankStrategy {
override def delete(location: String) = Future.unit
}
await(TestStrategy.deleteMany(Seq("foo:bar", "bar:baz"))) must beEqualTo(())
}
}
}
| overview/overview-server | common/src/test/scala/com/overviewdocs/blobstorage/BlobStorageStrategySpec.scala | Scala | agpl-3.0 | 1,937 |
package com.grandata.commons.files
import java.nio.file.{FileSystems, FileSystem}
/**
* Created by gustavo on 26/03/15.
*/
class Glob extends GlobImpl with FileSystemComponent {
def fileSystem: FileSystem = FileSystems.getDefault
} | GranData/grandata-commons | src/main/scala/com/grandata/commons/files/Glob.scala | Scala | mit | 237 |
package io.taig.android.concurrent
import java.util.concurrent.TimeUnit
import android.os.AsyncTask
import com.google.android.gms.tasks.TaskExecutors
import io.taig.android.log.Log
import io.taig.android.util._
import monix.execution.Scheduler
import scala.concurrent.ExecutionContext
object Executor {
/**
* Pool thread Scheduler
*/
implicit val Pool: Scheduler = Scheduler {
ExecutionContext.fromExecutor(
AsyncTask.THREAD_POOL_EXECUTOR,
report(_, Log.Tag(Pool.getClass.getCanonicalName))
)
}
/**
* Single thread Scheduler
*/
val Single: Scheduler = Scheduler {
ExecutionContext.fromExecutor(
AsyncTask.SERIAL_EXECUTOR,
report(_, Log.Tag(Single.getClass.getCanonicalName))
)
}
/**
* Ui thread Scheduler
*/
val Ui: Scheduler = Scheduler {
ExecutionContext.fromExecutor(
TaskExecutors.MAIN_THREAD,
report(_, Log.Tag(Ui.getClass.getCanonicalName))
)
}
/**
* Run on the Ui-Thread
*/
def Ui(body: ⇒ Unit): Unit = Ui.execute(() ⇒ body)
def Ui(body: ⇒ Unit, delay: Long): Unit = {
Ui.scheduleOnce(delay, TimeUnit.MILLISECONDS, () ⇒ body)
}
private[concurrent] def report(exception: Throwable, tag: Log.Tag): Unit = {
Log.e("Failure during asynchronous operation", exception)(tag)
}
}
| Taig/Toolbelt | concurrent/src/main/scala/io/taig/android/concurrent/Executor.scala | Scala | mit | 1,327 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.mesos.scheduler
import java.util.concurrent.TimeUnit
import java.util.{Collections, UUID}
import java.util.concurrent.atomic.AtomicReference
import akka.actor.ActorSystem
import akka.actor.FSM.StateTimeout
import akka.testkit._
import com.netflix.fenzo.TaskRequest.{AssignedResources, NamedResourceSetRequest}
import com.netflix.fenzo._
import com.netflix.fenzo.functions.{Action1, Action2}
import org.apache.flink.api.java.tuple.{Tuple2=>FlinkTuple2}
import org.apache.flink.configuration.Configuration
import org.apache.flink.mesos.scheduler.LaunchCoordinator._
import org.apache.flink.mesos.scheduler.messages._
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.mesos.Protos.{SlaveID, TaskInfo}
import org.apache.mesos.{SchedulerDriver, Protos}
import org.junit.runner.RunWith
import org.mockito.Mockito.{verify, _}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.mockito.{ArgumentMatchers => MM, Mockito}
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.collection.JavaConverters._
import org.apache.flink.mesos.Utils.range
import org.apache.flink.mesos.Utils.ranges
import org.apache.flink.mesos.Utils.scalar
import org.apache.flink.mesos.configuration.MesosOptions.DECLINED_OFFER_REFUSE_DURATION
import org.apache.flink.mesos.util.MesosResourceAllocation
import scala.concurrent.duration.Duration
@RunWith(classOf[JUnitRunner])
class LaunchCoordinatorTest
extends TestKitBase
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll {
lazy val config: Configuration = new Configuration()
implicit lazy val system: ActorSystem = AkkaUtils.createLocalActorSystem(config)
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
def randomFramework = {
Protos.FrameworkID.newBuilder().setValue(UUID.randomUUID.toString).build
}
def randomTask = {
val taskID = Protos.TaskID.newBuilder.setValue(UUID.randomUUID.toString).build
def generateTaskRequest = {
new TaskRequest() {
private[mesos] val assignedResources = new AtomicReference[TaskRequest.AssignedResources]
override def getId: String = taskID.getValue
override def taskGroupName: String = ""
override def getCPUs: Double = 1.0
override def getMemory: Double = 1024.0
override def getNetworkMbps: Double = 0.0
override def getDisk: Double = 0.0
override def getPorts: Int = 1
override def getScalarRequests = Collections.singletonMap("gpus", 1.0)
override def getCustomNamedResources: java.util.Map[String, NamedResourceSetRequest] =
Collections.emptyMap[String, NamedResourceSetRequest]
override def getSoftConstraints: java.util.List[_ <: VMTaskFitnessCalculator] = null
override def getHardConstraints: java.util.List[_ <: ConstraintEvaluator] = null
override def getAssignedResources: AssignedResources = assignedResources.get()
override def setAssignedResources(assignedResources: AssignedResources): Unit = {
this.assignedResources.set(assignedResources)
}
}
}
val task: LaunchableTask = new LaunchableTask() {
override def taskRequest: TaskRequest = generateTaskRequest
override def launch(
slaveId: SlaveID,
allocation: MesosResourceAllocation): Protos.TaskInfo = {
Protos.TaskInfo.newBuilder
.setTaskId(taskID).setName(taskID.getValue)
.setCommand(Protos.CommandInfo.newBuilder.setValue("whoami"))
.setSlaveId(slaveId)
.build()
}
override def toString = taskRequest.getId
}
(taskID, task)
}
def randomSlave = {
val slaveID = Protos.SlaveID.newBuilder.setValue(UUID.randomUUID.toString).build
val hostname = s"host-${slaveID.getValue}"
(slaveID, hostname)
}
def randomOffer(frameworkID: Protos.FrameworkID, slave: (Protos.SlaveID, String)) = {
val offerID = Protos.OfferID.newBuilder().setValue(UUID.randomUUID.toString)
Protos.Offer.newBuilder()
.setFrameworkId(frameworkID)
.setId(offerID)
.setSlaveId(slave._1)
.setHostname(slave._2)
.addResources(scalar("cpus", "*", 0.75))
.addResources(scalar("mem", "*", 4096.0))
.addResources(scalar("disk", "*", 1024.0))
.addResources(ranges("ports", "*", range(9000, 9001)))
.build()
}
def lease(offer: Protos.Offer) = {
new Offer(offer)
}
/**
* Mock a successful task assignment result matching a task to an offer.
*/
def taskAssignmentResult(lease: VirtualMachineLease, task: TaskRequest): TaskAssignmentResult = {
val ports = lease.portRanges().get(0)
val assignedPorts = ports.getBeg to ports.getBeg + task.getPorts
val r = mock(classOf[TaskAssignmentResult])
when(r.getTaskId).thenReturn(task.getId)
when(r.getHostname).thenReturn(lease.hostname())
when(r.getAssignedPorts).thenReturn(
assignedPorts.toList.asJava.asInstanceOf[java.util.List[Integer]])
when(r.getRequest).thenReturn(task)
when(r.isSuccessful).thenReturn(true)
when(r.getFitness).thenReturn(1.0)
r
}
/**
* Mock a VM assignment result with the given leases and tasks.
*/
def vmAssignmentResult(hostname: String,
leasesUsed: Seq[VirtualMachineLease],
tasksAssigned: Set[TaskAssignmentResult]): VMAssignmentResult = {
new VMAssignmentResult(hostname, leasesUsed.asJava, tasksAssigned.asJava)
}
/**
* Mock a scheduling result with the given successes and failures.
*/
def schedulingResult(successes: Seq[VMAssignmentResult],
failures: Seq[TaskAssignmentResult] = Nil,
exceptions: Seq[Exception] = Nil,
leasesAdded: Int = 0,
leasesRejected: Int = 0): SchedulingResult = {
val r = mock(classOf[SchedulingResult])
when(r.getResultMap).thenReturn(successes.map(r => r.getHostname -> r).toMap.asJava)
when(r.getExceptions).thenReturn(exceptions.asJava)
val groupedFailures = failures.groupBy(_.getRequest).mapValues(_.asJava)
when(r.getFailures).thenReturn(groupedFailures.asJava)
when(r.getLeasesAdded).thenReturn(leasesAdded)
when(r.getLeasesRejected).thenReturn(leasesRejected)
when(r.getRuntime).thenReturn(0)
when(r.getNumAllocations).thenThrow(new NotImplementedError())
when(r.getTotalVMsCount).thenThrow(new NotImplementedError())
when(r.getIdleVMsCount).thenThrow(new NotImplementedError())
r
}
/**
* Mock a task scheduler.
* The task assigner/unassigner is pre-wired.
*/
def taskScheduler() = {
val optimizer = mock(classOf[TaskScheduler])
val taskAssigner = mock(classOf[Action2[TaskRequest, String]])
when[Action2[TaskRequest, String]](optimizer.getTaskAssigner).thenReturn(taskAssigner)
val taskUnassigner = mock(classOf[Action2[String, String]])
when[Action2[String, String]](optimizer.getTaskUnAssigner).thenReturn(taskUnassigner)
optimizer
}
/**
* Create a task scheduler builder.
*/
def taskSchedulerBuilder(optimizer: TaskScheduler) = new TaskSchedulerBuilder {
var leaseRejectAction: Action1[VirtualMachineLease] = null
var rejectAllExpiredOffers: Boolean = false
var leaseOfferExpiry: Long = 0L
var offersToReject: Int = 0
override def withLeaseRejectAction(
action: Action1[VirtualMachineLease]): TaskSchedulerBuilder = {
leaseRejectAction = action
this
}
override def withRejectAllExpiredOffers(): TaskSchedulerBuilder = {
rejectAllExpiredOffers = true
this
}
override def withLeaseOfferExpirySecs(leaseOfferExpirySecs: Long): TaskSchedulerBuilder = {
leaseOfferExpiry = leaseOfferExpirySecs
this
}
override def build(): TaskScheduler = optimizer
}
/**
* Process a call to scheduleOnce with the given function.
*/
def scheduleOnce(f: (Seq[TaskRequest],Seq[VirtualMachineLease]) => SchedulingResult) = {
new Answer[SchedulingResult] {
override def answer(invocationOnMock: InvocationOnMock): SchedulingResult = {
val args = invocationOnMock.getArguments
val requests = args(0).asInstanceOf[java.util.List[TaskRequest]]
val newLeases = args(1).asInstanceOf[java.util.List[VirtualMachineLease]]
f(requests.asScala, newLeases.asScala)
}
}
}
/**
* The context fixture.
*/
class Context {
val optimizer = taskScheduler()
val optimizerBuilder = taskSchedulerBuilder(optimizer)
val schedulerDriver = mock(classOf[SchedulerDriver])
val trace = Mockito.inOrder(schedulerDriver)
val fsm =
TestFSMRef(new LaunchCoordinator(testActor, config, schedulerDriver, optimizerBuilder))
val refuseFilter =
Protos.Filters.newBuilder()
.setRefuseSeconds(
Duration(config.getLong(DECLINED_OFFER_REFUSE_DURATION), TimeUnit.MILLISECONDS).toSeconds)
.build()
val framework = randomFramework
val task1 = randomTask
val task2 = randomTask
val task3 = randomTask
val slave1 = {
val slave = randomSlave
(slave._1, slave._2,
randomOffer(framework, slave), randomOffer(framework, slave), randomOffer(framework, slave))
}
val slave2 = {
val slave = randomSlave
(slave._1, slave._2,
randomOffer(framework, slave), randomOffer(framework, slave), randomOffer(framework, slave))
}
}
def inState = afterWord("in state")
def handle = afterWord("handle")
def handlesAssignments(state: TaskState) = {
"Unassign" which {
s"stays in $state with updated optimizer state" in new Context {
optimizer.getTaskAssigner.call(task1._2.taskRequest, slave1._2)
fsm.setState(state)
fsm ! Unassign(task1._1, slave1._2)
verify(optimizer.getTaskUnAssigner).call(task1._1.getValue, slave1._2)
fsm.stateName should be (state)
}
}
"Assign" which {
s"stays in $state with updated optimizer state" in new Context {
fsm.setState(state)
fsm ! Assign(Seq(new FlinkTuple2(task1._2.taskRequest, slave1._2)).asJava)
verify(optimizer.getTaskAssigner).call(MM.any(), MM.any())
fsm.stateName should be (state)
}
}
}
"The LaunchCoordinator" when inState {
"Suspended" should handle {
"Connected" which {
"transitions to Idle when the task queue is empty" in new Context {
fsm.setState(Suspended)
fsm ! new Connected {}
fsm.stateName should be (Idle)
}
"transitions to GatheringOffers when the task queue is non-empty" in new Context {
fsm.setState(Suspended, GatherData(tasks = Seq(task1._2), newLeases = Nil))
fsm ! new Connected {}
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
}
}
"Launch" which {
"stays in Suspended with updated task queue" in new Context {
fsm.setState(Suspended, GatherData(tasks = Seq(task1._2), newLeases = Nil))
fsm ! Launch(Seq(task2._2).asJava)
fsm.stateName should be (Suspended)
fsm.stateData.tasks should contain only (task1._2, task2._2)
}
}
behave like handlesAssignments(Suspended)
}
"Idle" should handle {
"Disconnected" which {
"transitions to Suspended" in new Context {
fsm.setState(Idle)
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
}
}
"ResourceOffers" which {
"stays in Idle with offers declined" in new Context {
fsm.setState(Idle)
fsm ! new ResourceOffers(Seq(slave1._3, slave1._4).asJava)
verify(schedulerDriver).declineOffer(slave1._3.getId, refuseFilter)
verify(schedulerDriver).declineOffer(slave1._4.getId, refuseFilter)
fsm.stateName should be (Idle)
}
}
"Launch" which {
"transitions to GatheringOffers with updated task queue" in new Context {
fsm.setState(Idle)
fsm ! Launch(Seq(task1._2, task2._2).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2, task2._2)
}
}
behave like handlesAssignments(Idle)
}
"GatheringOffers" should handle {
"(enter)" which {
"revives offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
verify(schedulerDriver).reviveOffers()
}
}
"(exit)" which {
"suppresses offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
fsm ! new Disconnected()
verify(schedulerDriver).suppressOffers()
}
"declines any outstanding offers" in new Context {
fsm.setState(GatheringOffers, GatherData())
fsm ! new Disconnected()
verify(optimizer).expireAllLeases()
verify(optimizer).scheduleOnce(MM.any(), MM.any())
}
}
"Disconnected" which {
"transitions to Suspended with task queue intact" in new Context {
fsm.setState(GatheringOffers, GatherData(tasks = Seq(task1._2)))
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
fsm.stateData.tasks should contain only (task1._2)
}
"transitions to Suspended with offer queue emptied" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new Disconnected()
fsm.stateName should be (Suspended)
fsm.stateData.newLeases should be (empty)
}
}
"Launch" which {
"stays in GatheringOffers with updated task queue" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! Launch(Seq(task2._2).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2, task2._2)
fsm.stateData.newLeases.map(_.getOffer) should contain only (slave1._3)
}
}
"ResourceOffers" which {
"stays in GatheringOffers with offer queue updated" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new ResourceOffers(Seq(slave1._4, slave2._3).asJava)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases.map(_.getOffer) should contain only
(slave1._3, slave1._4, slave2._3)
}
}
"OfferRescinded" which {
"stays in GatheringOffers with offer queue updated" in new Context {
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! new OfferRescinded(slave1._3.getId)
verify(optimizer).expireLease(slave1._3.getId.getValue)
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases should be (empty)
}
}
"StateTimeout" which {
"sends AcceptOffers message for matched tasks" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
val (l, task) = (newLeases.head, requests.head)
val vm = vmAssignmentResult(l.hostname(), Seq(l), Set(taskAssignmentResult(l, task)))
schedulingResult(successes = Seq(vm))
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
val offers = expectMsgType[AcceptOffers]
offers.hostname() should be (slave1._2)
offers.offerIds() should contain only (slave1._3.getId)
}
"transitions to Idle when task queue is empty" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
val (l, task) = (newLeases.head, requests.head)
val vm = vmAssignmentResult(l.hostname(), Seq(l), Set(taskAssignmentResult(l, task)))
schedulingResult(successes = Seq(vm))
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
fsm.stateName should be (Idle)
fsm.stateData.tasks should be (empty)
fsm.stateData.newLeases should be (empty)
}
"stays in GatheringOffers when task queue is non-empty" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
schedulingResult(successes = Nil)
}
}
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
fsm.stateName should be (GatheringOffers)
fsm.stateData.tasks should contain only (task1._2)
fsm.stateData.newLeases should be (empty)
}
"declines old offers" in new Context {
when(optimizer.scheduleOnce(MM.any(), MM.any())) thenAnswer {
scheduleOnce { (requests, newLeases) =>
optimizerBuilder.leaseRejectAction.call(newLeases.head)
schedulingResult(successes = Nil)
}
} thenReturn(schedulingResult(successes = Nil))
fsm.setState(GatheringOffers,
GatherData(tasks = Seq(task1._2), newLeases = Seq(lease(slave1._3))))
fsm ! StateTimeout
verify(schedulerDriver).declineOffer(slave1._3.getId, refuseFilter)
}
}
behave like handlesAssignments(GatheringOffers)
}
}
override def toString = s"LaunchCoordinatorTest()"
}
| gyfora/flink | flink-mesos/src/test/scala/org/apache/flink/mesos/scheduler/LaunchCoordinatorTest.scala | Scala | apache-2.0 | 19,221 |
package breakybot.spec
import org.scalatest.{Spec,BeforeAndAfterAll,BeforeAndAfterEach}
import akka.actor.Actor.actorOf
import akka.testkit.TestParts
import akka.util.duration._
import akka.actor.ActorRef
import breakybot.entity
import breakybot.message.permission
class BreakyBotSpec extends Spec with BeforeAndAfterAll with BeforeAndAfterEach {
private var machine:ActorRef = _
private val nanny:TestParts = new TestParts()
private val diary:TestParts = new TestParts()
override def beforeEach() = {
machine = actorOf(new entity.BreakyBot(nanny.testActor, diary.testActor)).start
}
override def afterEach() = {
machine.stop
}
override def afterAll() = {
nanny.stopTestActor
diary.stopTestActor
}
describe("BreakyBot") {
it("should request permission to dance with a new partner") {
machine ! "Edgar Allen Poe"
nanny.expectMsg(50 millis, permission.Request)
nanny.expectNoMsg(10 millis)
}
it("should brag to its diary when it can dance with a new partner") {
machine ! "Oscar Wilde"
nanny.expectMsg(50 millis) {
case permission.Request => machine.!(permission.Granted)(Some(nanny.testActor))
case _ => fail
}
diary.expectMsg(500 millis, "Oscar Wilde wants to break dance with me!")
diary.expectNoMsg(40 millis)
}
it("should eventually lose its partner to the ravages of time") {
machine ! "Roger B. Myerson"
nanny.expectMsg(50 millis) {
case permission.Request => machine.!(permission.Granted)(Some(nanny.testActor))
case _ => fail
}
diary.expectMsg(5 millis, "Roger B. Myerson wants to break dance with me!")
diary.expectMsg(2 second, "Dancing with Roger B. Myerson was fun!")
diary.expectNoMsg(50 millis)
}
}
}
| blt/BreakyBot | src/test/scala/BreakyBotSpec.scala | Scala | mit | 1,797 |
package mimir.data
import com.typesafe.scalalogging.LazyLogging
import java.sql.SQLException
import sparsity.Name
import play.api.libs.json._
import mimir.Database
import mimir.algebra._
import mimir.data._
import mimir.views.{ ViewManager, TemporaryViewManager }
import mimir.util.ExperimentalOptions
import mimir.exec.spark.MimirSpark
import mimir.metadata.MetadataManyMany
import mimir.ctables.CoarseDependency
class SystemCatalog(db: Database)
extends LazyLogging
{
// Note, we use String in this map instead of ID, since ID
private val simpleSchemaProviders = scala.collection.mutable.LinkedHashMap[ID, SchemaProvider]()
private var preferredMaterializedTableProvider: ID = null
var coarseDependencies: MetadataManyMany = null
def init()
{
// The order in which the schema providers are registered is the order
// in which they're used to resolve table names.
// Existing restrictions / assumptions on this order include:
//
// --- ViewManager must come BEFORE LoadedTables ---
// db.loader.loadTable creates both a LoadedTable and a View with the
// same name so that users can access both the actual data, as well as
// the post-processed view. Generally, we want users to see the
// post-processed version by default.
//
// --- SparkSchemaProvider must come BEFORE LoadedTables ---
// This is necessary for Spark to take priority over LoadedTables
// for the preferredBulkSchemaProvider.
//
// --- CatalogSchemaProvider *should* come LAST ---
// Not strictly necessary, but the two tables defined by this
// provider use relatively common names. Really it shouldn't
// even be part of the normal search path, but eh?
//
registerSchemaProvider(TemporaryViewManager.SCHEMA, db.tempViews)
registerSchemaProvider(ViewManager.SCHEMA, db.views)
if(ExperimentalOptions.isEnabled("USE-DERBY") || MimirSpark.remoteSpark){
registerSchemaProvider(ID("SPARK"), new SparkSchemaProvider(db))
}
registerSchemaProvider(LoadedTables.SCHEMA, db.loader)
registerSchemaProvider(SystemCatalog.SCHEMA, this.CatalogSchemaProvider)
coarseDependencies = db.metadata.registerManyMany(
ID("MIMIR_COARSE_DEPENDENCIES")
)
}
def registerSchemaProvider(name: ID, provider: SchemaProvider)
{
simpleSchemaProviders.put(name, provider)
}
def getSchemaProvider(name: ID): SchemaProvider =
{
simpleSchemaProviders
.get(name)
.getOrElse {
db.adaptiveSchemas
.getProvider(name)
.getOrElse { throw new SQLException(s"Invalid schema $name") }
}
}
def allSchemaProviders: Seq[(ID, SchemaProvider)] = {
simpleSchemaProviders.toSeq ++ db.adaptiveSchemas.allProviders
}
def tableView: Operator =
{
val tableView =
OperatorUtils.makeUnion(
allSchemaProviders
.filter { _._2.isVisible }
.map { case (name, provider) =>
provider.listTablesQuery
.addColumns( "SCHEMA_NAME" -> StringPrimitive(name.id) )
}.toSeq
)
.projectByID( SystemCatalog.tableCatalogSchema.map { _._1 }:_* )
// sanity check:
db.typechecker.schemaOf(tableView)
logger.debug(s"Table View: \\n$tableView")
return tableView
}
def attrView: Operator =
{
val attrView =
OperatorUtils.makeUnion(
allSchemaProviders
.filter { _._2.isVisible }
.map { case (name, provider) =>
provider.listAttributesQuery
.addColumns( "SCHEMA_NAME" -> StringPrimitive(name.id) )
}.toSeq
)
.projectByID( SystemCatalog.attrCatalogSchema.map { _._1 }:_* )
logger.debug(s"Table View: \\n$attrView")
return attrView
}
// The tables themselves need to be defined lazily, since
// we want them read out at access time
private val hardcodedTables = Map[ID, (Seq[(ID, Type)], () => Operator)](
ID("TABLES") -> ((
SystemCatalog.tableCatalogSchema,
tableView _
)),
ID("ATTRIBUTES") -> ((
SystemCatalog.attrCatalogSchema,
attrView _
))
)
/**
* Source a specified table with a case-insensitive name search
*
* @param table The case-insensitive name of a table
* @return A triple of (providerID, tableID, provider) or None
* if the table doesn't exist.
*/
def resolveTableCaseInsensitive(table: String): Option[(ID, ID, SchemaProvider)] =
{
logger.debug(s"Resolve table (Case INsensitive): $table")
for( (schema, provider) <- allSchemaProviders ) {
logger.trace(s"Trying for $table in $schema")
provider.resolveTableCaseInsensitive(table) match {
case None => {}
case Some(table) => return Some((schema, table, provider))
}
}
return None
}
/**
* Source a specified table with a case-sensitive name search
*
* @param table The case-sensitive name of a table
* @return A triple of (providerID, tableID, provider) or None
* if the table doesn't exist.
*/
def resolveTableCaseSensitive(table: String): Option[(ID, ID, SchemaProvider)] =
{
logger.debug(s"Resolve table (Case Sensitive): $table")
for( (schema, provider) <- allSchemaProviders ) {
logger.trace(s"Trying for $table in $schema")
if(provider.tableExists(ID(table))){
return Some( (schema, ID(table), provider) )
}
}
return None
}
def resolveProviderCaseSensitive(providerName: String): Option[(ID, SchemaProvider)] = {
simpleSchemaProviders
.get(ID(providerName))
.orElse { db.adaptiveSchemas.getProvider(ID(providerName)) }
.map { ( ID(providerName), _) }
}
def resolveProviderCaseInsensitive(providerName: String): Option[(ID, SchemaProvider)] = {
for( (schema, provider) <- allSchemaProviders ) {
if(schema.id.equalsIgnoreCase(providerName)){
return Some( ( schema, provider ) )
}
}
return None
}
def resolveTable(table: Name): Option[(ID, ID, SchemaProvider)] =
{
if(table.quoted) { return resolveTableCaseSensitive(table.name) }
else { return resolveTableCaseInsensitive(table.name) }
}
def resolveTable(table: ID): Option[(ID, ID, SchemaProvider)] =
{
resolveTableCaseSensitive(table.id)
}
def resolveTable(providerName: Name, table: Name): Option[(ID, ID, SchemaProvider)] =
{
val (providerID, provider) = (
if(providerName.quoted) { resolveProviderCaseSensitive(providerName.name) }
else { resolveProviderCaseInsensitive(providerName.name) }
).getOrElse { return None }
provider.resolveTableByName(table)
.map { (providerID, _, provider)}
}
def resolveTable(providerNameMaybe: Option[Name], table: Name): Option[(ID, ID, SchemaProvider)] =
{
providerNameMaybe match {
case None => resolveTable(table)
case Some(providerName) => resolveTable(providerName, table)
}
}
def resolveTable(providerID: ID, table: ID): Option[(ID, ID, SchemaProvider)] =
{
val (_, provider) =
resolveProviderCaseSensitive(providerID.id)
.getOrElse { return None }
if(provider.tableExists(table)){
return Some( (providerID, table, provider) )
} else { return None }
}
def tableExists(name: Name): Boolean =
resolveTable(name) != None
def tableExists(name: ID): Boolean =
resolveTable(name) != None
def tableExists(providerName: Name, name: Name): Boolean =
provider(providerName)
.getOrElse { return false }
._2
.resolveTableByName(name) != None
def tableExists(providerName: ID, name: ID): Boolean =
getSchemaProvider(providerName).tableExists(name)
def tableSchema(name: Name): Option[Seq[(ID, Type)]] =
resolveTable(name).flatMap { case (_, table, provider) => provider.tableSchema(table) }
def tableSchema(name: ID): Option[Seq[(ID, Type)]] =
resolveTable(name).flatMap { case (_, table, provider) => provider.tableSchema(table) }
def tableSchema(providerName: Name, name: Name): Option[Seq[(ID, Type)]] =
provider(providerName).flatMap { case (_, providerImpl) =>
providerImpl.resolveTableByName(name).flatMap { providerImpl.tableSchema(_)} }
def tableSchema(providerName: ID, name: ID): Option[Seq[(ID, Type)]] =
provider(providerName).flatMap { _._2.tableSchema(name) }
def tableOperator(defn: (ID, ID, SchemaProvider)): Operator =
{
val (providerName, tableName, provider) = defn
provider.tableOperator(
providerName,
tableName
)
}
def tableOperator(tableName: Name): Operator =
resolveTable(tableName)
.map { tableOperator(_:(ID, ID, SchemaProvider)) }
.getOrElse {
throw new SQLException(s"No such table or view '$tableName'")
}
def tableOperator(tableName: ID): Operator =
resolveTable(tableName)
.map { tableOperator(_) }
.getOrElse {
throw new SQLException(s"No such table or view '$tableName'")
}
def tableOperator(providerName: Name, tableName: Name): Operator =
tableOperator(
resolveTable(providerName, tableName).getOrElse {
throw new SQLException(s"No such table or view '$providerName.$tableName'")
})
def tableOperator(providerName: ID, tableName: ID): Operator =
tableOperator(
resolveTable(providerName, tableName).getOrElse {
throw new SQLException(s"No such table or view '$providerName.$tableName'")
} )
def provider(providerName: Name): Option[(ID, SchemaProvider)] =
if(providerName.quoted){ resolveProviderCaseSensitive(providerName.name) }
else { resolveProviderCaseInsensitive(providerName.name) }
def provider(providerName: ID): Option[(ID, SchemaProvider)] =
resolveProviderCaseSensitive(providerName.id)
def materializedTableProvider(providerName: ID = null): SchemaProvider with MaterializedTableProvider =
{
var targetProvider = providerName
if(targetProvider == null){ targetProvider = preferredMaterializedTableProvider }
if(targetProvider != null){
simpleSchemaProviders.get(targetProvider) match {
case None => throw new SQLException(s"'$targetProvider' is not a registered schema provider.")
case Some(s: SchemaProvider with MaterializedTableProvider) => return s
case _ => throw new SQLException(s"'$targetProvider' is not a valid bulk storage provider.")
}
}
simpleSchemaProviders.foreach {
case (validProviderName, provider: SchemaProvider with MaterializedTableProvider) => {
preferredMaterializedTableProvider = validProviderName
return provider
}
case _ => ()
}
throw new SQLException("No registered schema providers support bulk storage.")
}
def materializedTableProviderID: ID =
{
if(preferredMaterializedTableProvider != null) { return preferredMaterializedTableProvider }
materializedTableProvider()
return preferredMaterializedTableProvider;
}
/**
* Get all availale table names
*/
def getAllTables(): Set[ID] =
{
allSchemaProviders.flatMap { _._2.listTables }
.toSet
}
private def safeAssembleIdentifierPair(pair: (ID, ID)): ID =
{
ID(Seq(pair._1, pair._2).map {
_.id
.replaceAll("[\\\\\\\\]", "\\\\\\\\")
.replaceAll("[.]", "\\\\\\\\.")
}.mkString("."))
}
/**
* Register a coarse-grained provenance relationship.
* @param target The table affected by the provenance relationship
* @param source The schema/table from which [target] received input
*
* Support for coarse-grained provenance relationships between tables.
* In general, we want all inter-table relationships to be given explicitly
* through views. However, in some cases, it becomes necessary to pipe
* a table through an external process like a python script. See:
* - https://github.com/VizierDB/web-ui/issues/116
* - https://github.com/UBOdin/mimir/issues/319
* In these cases, we still want to preserve the relationship between
* the table(s) read by the external process and the tables written out
* by the external process to propagate VGTerm/DataWarnings through.
*
* This function registers such a relationship.
*/
def createDependency(target: (ID, ID), source: CoarseDependency)
{
coarseDependencies.add(
safeAssembleIdentifierPair(target),
ID(Json.stringify(Json.toJson(source)))
)
}
/**
* Deregister a coarse-grained provenance relationship.
* @param target The table affected by the provenance relationship
* @param source The schema/table from which [target] received input
*
* See the discussion of addProvenance
*/
def dropDependency(target: (ID, ID), source: CoarseDependency)
{
coarseDependencies.rm(
safeAssembleIdentifierPair(target),
ID(Json.stringify(Json.toJson(source)))
)
}
/**
* Retrieve all coarse-grained provenance relationships for a table.
* @param target The table affected by the provenance relationship
* @param source The schema/table from which [target] received input
*
* See the discussion of addProvenance
*/
def getDependencies(target: (ID, ID)): Seq[CoarseDependency] =
{
coarseDependencies.getByLHS(
safeAssembleIdentifierPair(target)
).map { dep =>
Json.parse(dep.id).as[CoarseDependency]
}
}
object CatalogSchemaProvider
extends ViewSchemaProvider
{
def listTables =
hardcodedTables.keys
def tableSchema(table: ID): Option[Seq[(ID, Type)]] =
hardcodedTables.get(table).map { _._1 }
def view(table: ID) =
hardcodedTables(table)._2()
}
}
object SystemCatalog
{
val tableCatalogSchema =
Seq(
ID("SCHEMA_NAME") -> TString(),
ID("TABLE_NAME") -> TString()
)
val attrCatalogSchema =
Seq(
ID("SCHEMA_NAME") -> TString(),
ID("TABLE_NAME") -> TString(),
ID("ATTR_NAME") -> TString(),
ID("ATTR_TYPE") -> TString(),
ID("IS_KEY") -> TBool()
)
val SCHEMA = ID("SYSTEM")
} | UBOdin/mimir | src/main/scala/mimir/data/SystemCatalog.scala | Scala | apache-2.0 | 14,199 |
package org.scalassh
import org.specs2.mutable.Specification
import java.util.Properties
import java.io.{File, FileInputStream}
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: 02/06/2011
* Time: 23:35
* To change this template use File | Settings | File Templates.
*/
class ConfigAndEnvironmentSetupForTestingTest extends Specification{
val directory = System.getProperty("user.home") + System.getProperty("file.separator") + ".scalassh" +
System.getProperty("file.separator")
var login: Login = null
val props = new Properties
props.load(new FileInputStream(new File(directory, "localhost.properties")))
"Retrieving a login configuration for 'password' for the host 'localhost'" should{
"find a LoginBuilder" in {
(Config.loginBuilders("password").isInstanceOf[UsernamePasswordLoginBuilder]) must beTrue
}
"return a Login when LoginBuilders.login is called" in{
login = Config.loginBuilders("password").build(props)
login must be_!=(null)
}
" give me a login with a username and password" in {
login.asInstanceOf[UsernamePasswordLogin].password must be_!=(null)
login.asInstanceOf[UsernamePasswordLogin].username must be_!=(null)
}
}
} | wfaler/ScalaSSH | src/test/scala/org/scalassh/ConfigAndEnvironmentSetupForTestingTest.scala | Scala | bsd-3-clause | 1,224 |
package com.codexica.s3crate.filetree.history.snapshotstore
import scala.concurrent.Future
import com.codexica.s3crate.filetree.{FilePath, WritableFileTree}
/**
* Interface for listing and reading snapshots in the storage system.
*
* @author Josh Albrecht (joshalbrecht@gmail.com)
*/
trait ReadableSnapshotStore {
def list(): Future[Set[RemoteFileSystemTypes.SnapshotId]]
def read(id: RemoteFileSystemTypes.SnapshotId): Future[FileSnapshot]
def download(id: RemoteFileSystemTypes.SnapshotId, path: FilePath, fileSystem: WritableFileTree): Future[Unit]
}
| joshalbrecht/s3crate | src/main/scala/com/codexica/s3crate/filetree/history/snapshotstore/ReadableSnapshotStore.scala | Scala | mit | 566 |
/*
* Deps.scala
*
* Updated: Dec 3, 2014
*
* Copyright (c) 2014, CodeMettle
*/
import sbt._
object Deps {
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Versions.akka
val akkaStream = "com.typesafe.akka" %% "akka-stream" % Versions.akka
val akkaSlf = "com.typesafe.akka" %% "akka-slf4j" % Versions.akka
val akkaTest = "com.typesafe.akka" %% "akka-testkit" % Versions.akka
val logback = "ch.qos.logback" % "logback-classic" % Versions.logback
val scalaTest = "org.scalatest" %% "scalatest" % Versions.scalaTest
val snmp4j = "org.snmp4j" % "snmp4j" % Versions.snmp4j
val ficus = "com.iheart" %% "ficus" % Versions.ficus
}
| CodeMettle/akka-snmp4j | project/Deps.scala | Scala | apache-2.0 | 668 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package xsbt.api
import xsbti.api._
import scala.collection.mutable
object TagTypeVariables
{
type TypeVars = collection.Map[Int, (Int, Int)]
def apply(s: SourceAPI): TypeVars = (new TagTypeVariables).tag(s)
}
import TagTypeVariables.TypeVars
private class TagTypeVariables
{
private val taggedStructures = new mutable.HashSet[Structure]
private val taggedClasses = new mutable.HashSet[ClassLike]
private val tags = new mutable.HashMap[Int, (Int, Int)]
private var level = 0
private var index = 0
def tag(s: SourceAPI): TypeVars =
{
s.definitions.foreach(tagDefinition)
tags
}
def tagDefinitions(ds: Seq[Definition]) = ds.foreach(tagDefinition)
def tagDefinition(d: Definition)
{
d match
{
case c: ClassLike => tagClass(c)
case f: FieldLike => tagField(f)
case d: Def => tagDef(d)
case t: TypeDeclaration => tagTypeDeclaration(t)
case t: TypeAlias => tagTypeAlias(t)
}
}
def tagClass(c: ClassLike): Unit = if(taggedClasses add c) tagClass0(c)
def tagClass0(c: ClassLike): Unit =
tagParameterizedDefinition(c) {
tagType(c.selfType)
tagStructure(c.structure)
}
def tagField(f: FieldLike)
{
tagType(f.tpe)
tagAnnotations(f.annotations)
}
def tagDef(d: Def): Unit =
tagParameterizedDefinition(d) {
tagValueParameters(d.valueParameters)
tagType(d.returnType)
}
def tagValueParameters(valueParameters: Seq[ParameterList]) = valueParameters.foreach(tagValueParameterList)
def tagValueParameterList(list: ParameterList) = list.parameters.foreach(tagValueParameter)
def tagValueParameter(parameter: MethodParameter) = tagType(parameter.tpe)
def tagParameterizedDefinition[T <: ParameterizedDefinition](d: T)(tagExtra: => Unit)
{
tagAnnotations(d.annotations)
scope {
tagTypeParameters(d.typeParameters)
tagExtra
}
}
def tagTypeDeclaration(d: TypeDeclaration): Unit =
tagParameterizedDefinition(d) {
tagType(d.lowerBound)
tagType(d.upperBound)
}
def tagTypeAlias(d: TypeAlias): Unit =
tagParameterizedDefinition(d) {
tagType(d.tpe)
}
def tagTypeParameters(parameters: Seq[TypeParameter]) = parameters.foreach(tagTypeParameter)
def tagTypeParameter(parameter: TypeParameter)
{
recordTypeParameter(parameter.id)
scope {
tagTypeParameters(parameter.typeParameters)
tagType(parameter.lowerBound)
tagType(parameter.upperBound)
}
}
def tagAnnotations(annotations: Seq[Annotation]) = tagTypes(annotations.map(_.base))
def tagTypes(ts: Seq[Type]) = ts.foreach(tagType)
def tagType(t: Type)
{
t match
{
case s: Structure => tagStructure(s)
case e: Existential => tagExistential(e)
case c: Constant => tagConstant(c)
case p: Polymorphic => tagPolymorphic(p)
case a: Annotated => tagAnnotated(a)
case p: Parameterized => tagParameterized(p)
case p: Projection => tagProjection(p)
case _: EmptyType | _: Singleton | _: ParameterRef => ()
}
}
def tagConstant(c: Constant) = tagType(c.baseType)
def tagExistential(e: Existential) = tagParameters(e.clause, e.baseType)
def tagPolymorphic(p: Polymorphic) = tagParameters(p.parameters, p.baseType)
def tagProjection(p: Projection) = tagType(p.prefix)
def tagParameterized(p: Parameterized)
{
tagType(p.baseType)
tagTypes(p.typeArguments)
}
def tagAnnotated(a: Annotated)
{
tagType(a.baseType)
tagAnnotations(a.annotations)
}
def tagStructure(structure: Structure): Unit = if(taggedStructures add structure) tagStructure0(structure)
def tagStructure0(structure: Structure)
{
tagTypes(structure.parents)
tagDefinitions(structure.declared)
tagDefinitions(structure.inherited)
}
def tagParameters(parameters: Seq[TypeParameter], base: Type): Unit =
scope {
tagTypeParameters(parameters)
tagType(base)
}
def scope(action: => Unit)
{
val saveIndex = index
index = 0
level += 1
action
level -= 1
index = saveIndex
}
def recordTypeParameter(id: Int)
{
tags(id) = (level, index)
index += 1
}
} | kuochaoyi/xsbt | compile/api/TagTypeVariables.scala | Scala | bsd-3-clause | 4,013 |
package com.overviewdocs.ingest.process
import akka.actor.ActorRefFactory
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.{Directives,RequestContext,Route,RouteResult}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow,Keep,MergeHub,Sink}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext,Future}
import com.overviewdocs.ingest.File2Writer
import com.overviewdocs.ingest.model.{ConvertOutputElement,WrittenFile2}
trait Step {
val id: String
val progressWeight: Double
val flow: Flow[WrittenFile2, ConvertOutputElement, Route]
}
object Step {
case class SimpleStep(
override val id: String,
override val progressWeight: Double,
override val flow: Flow[WrittenFile2, ConvertOutputElement, Route]
) extends Step
case class ErrorStep(override val id: String, errorMessage: String, file2Writer: File2Writer)(implicit ec: ExecutionContext) extends Step {
override val progressWeight = 1.0
override val flow: Flow[WrittenFile2, ConvertOutputElement, Route] = {
Flow.apply[WrittenFile2]
.mapAsync(1) { writtenFile =>
for {
processedFile <- file2Writer.setProcessed(writtenFile, 0, Some(errorMessage))
} yield {
writtenFile.progressPiece.report(1.0)
ConvertOutputElement.ToIngest(processedFile)
}
}
.mapMaterializedValue(_ => Directives.reject)
}
}
case class StepSpec(stepId: String, progressWeight: Double)
class HttpSteps(
stepSpecs: Vector[StepSpec],
file2Writer: File2Writer,
maxNWorkers: Int,
workerIdleTimeout: FiniteDuration,
httpCreateIdleTimeout: FiniteDuration
) {
/** Builds all Steps that have HTTP server components.
*
* TODO avoid passing a Materializer here. We end up starting actors before
* materialization, which is wrong.
*/
def steps(implicit mat: Materializer): Vector[Step] = {
stepSpecs.map(spec => buildStep(spec, mat.system))
}
private def buildStep(spec: StepSpec, actorRefFactory: ActorRefFactory): Step = {
val fragmentCollector = new StepOutputFragmentCollector(file2Writer, spec.stepId, spec.progressWeight)
val taskServer = new HttpStepHandler(spec.stepId, file2Writer.blobStorage, fragmentCollector, maxNWorkers, workerIdleTimeout, httpCreateIdleTimeout)
val flow = taskServer.flow(actorRefFactory)
SimpleStep(spec.stepId, spec.progressWeight, taskServer.flow(actorRefFactory))
}
}
def all(
file2Writer: File2Writer,
maxNHttpWorkers: Int,
workerIdleTimeout: FiniteDuration,
httpCreateIdleTimeout: FiniteDuration
)(implicit mat: Materializer): Vector[Step] = {
new HttpSteps(
Vector(
StepSpec("Archive", 0.1),
StepSpec("Email", 0.1),
StepSpec("Html", 0.75), // 1.0 if !wantSplitByPage, 0.5 otherwise
StepSpec("Image", 1.0),
StepSpec("Office", 0.75),
StepSpec("Pdf", 1.0),
StepSpec("PdfOcr", 0.75),
StepSpec("Pst", 0.1),
StepSpec("Text", 0.75) // 1.0 if !wantSplitByPage, 0.5 otherwise
),
file2Writer,
maxNHttpWorkers,
workerIdleTimeout,
httpCreateIdleTimeout
).steps ++ Vector(
ErrorStep("Unhandled", "unhandled", file2Writer)(mat.executionContext),
ErrorStep("Canceled", "canceled", file2Writer)(mat.executionContext)
)
}
}
| overview/overview-server | worker/src/main/scala/com/overviewdocs/ingest/process/Step.scala | Scala | agpl-3.0 | 3,432 |
package knot.core.stream.ops
import knot.core.stream.MatelializableOps
import knot.core.stream.plugs.{Input, OutputPort, SinkPlug}
trait SinkOps[I, M]
extends MatelializableOps[SinkPlug[I], M]
with LinearStream
with Downstream {
override val plug = SinkPlug(Input[I](opsId, 0, s"${this.toDebugStringWithId}.in"))
override protected final def aroundOnNext[In](in: Input[In], element: In): Unit = {
onNext(in.asInput[I], element.asInstanceOf[I])
}
def in: Input[I] = plug.in
override protected def preStart(): Unit = requestIfNeeded(in)
protected def onNext(in: Input[I], element: I): Unit
override def requestLength: Long = 1
override protected def onRequest(out: OutputPort): Unit = ()
}
| defvar/knot | knot-core/src/main/scala/knot/core/stream/ops/SinkOps.scala | Scala | mit | 727 |
package lila.message
import akka.actor._
import com.typesafe.config.Config
import lila.hub.actorApi.message.LichessThread
final class Env(
config: Config,
db: lila.db.Env,
mongoCache: lila.memo.MongoCache.Builder,
shutup: ActorSelection,
blocks: (String, String) => Fu[Boolean],
follows: (String, String) => Fu[Boolean],
getPref: String => Fu[lila.pref.Pref],
system: ActorSystem) {
private val CollectionThread = config getString "collection.thread"
private val ThreadMaxPerPage = config getInt "thread.max_per_page"
private val ActorName = config getString "actor.name"
import scala.collection.JavaConversions._
val LichessSenders = (config getStringList "lichess_senders").toList
private[message] lazy val threadColl = db(CollectionThread)
private lazy val unreadCache = new UnreadCache(mongoCache)
lazy val forms = new DataForm(security = security)
lazy val api = new Api(
unreadCache = unreadCache,
shutup = shutup,
maxPerPage = ThreadMaxPerPage,
blocks = blocks,
bus = system.lilaBus)
lazy val security = new MessageSecurity(
follows = follows,
blocks = blocks,
getPref = getPref)
system.actorOf(Props(new Actor {
def receive = {
case thread: LichessThread => api.lichessThread(thread)
}
}), name = ActorName)
def cli = new lila.common.Cli {
import tube.threadTube
def process = {
case "message" :: "typecheck" :: Nil => lila.db.Typecheck.apply[Thread]
}
}
}
object Env {
lazy val current = "message" boot new Env(
config = lila.common.PlayApp loadConfig "message",
db = lila.db.Env.current,
shutup = lila.hub.Env.current.actor.shutup,
mongoCache = lila.memo.Env.current.mongoCache,
blocks = lila.relation.Env.current.api.fetchBlocks,
follows = lila.relation.Env.current.api.fetchFollows,
getPref = lila.pref.Env.current.api.getPref,
system = lila.common.PlayApp.system)
}
| JimmyMow/lila | modules/message/src/main/Env.scala | Scala | mit | 1,951 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.net.URI
import java.util.Locale
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.{mock, when}
import org.mockito.invocation.InvocationOnMock
import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.AnalysisTest
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.logical.{CreateTableAsSelect, CreateV2Table, DropTable, LogicalPlan}
import org.apache.spark.sql.connector.{InMemoryTableCatalog, InMemoryTableProvider}
import org.apache.spark.sql.connector.catalog.{CatalogManager, CatalogNotFoundException, Identifier, TableCatalog}
import org.apache.spark.sql.execution.datasources.{CreateTable, DataSourceResolution}
import org.apache.spark.sql.internal.SQLConf.DEFAULT_V2_CATALOG
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class PlanResolutionSuite extends AnalysisTest {
import CatalystSqlParser._
private val v2Format = classOf[InMemoryTableProvider].getName
private val testCat: TableCatalog = {
val newCatalog = new InMemoryTableCatalog
newCatalog.initialize("testcat", CaseInsensitiveStringMap.empty())
newCatalog
}
private val v2SessionCatalog = {
val newCatalog = new InMemoryTableCatalog
newCatalog.initialize("session", CaseInsensitiveStringMap.empty())
newCatalog
}
private val catalogManagerWithDefault = {
val manager = mock(classOf[CatalogManager])
when(manager.catalog(any())).thenAnswer((invocation: InvocationOnMock) => {
invocation.getArgument[String](0) match {
case "testcat" =>
testCat
case name =>
throw new CatalogNotFoundException(s"No such catalog: $name")
}
})
when(manager.defaultCatalog).thenReturn(Some(testCat))
when(manager.v2SessionCatalog).thenReturn(v2SessionCatalog)
manager
}
private val catalogManagerWithoutDefault = {
val manager = mock(classOf[CatalogManager])
when(manager.catalog(any())).thenAnswer((invocation: InvocationOnMock) => {
invocation.getArgument[String](0) match {
case "testcat" =>
testCat
case name =>
throw new CatalogNotFoundException(s"No such catalog: $name")
}
})
when(manager.defaultCatalog).thenReturn(None)
when(manager.v2SessionCatalog).thenReturn(v2SessionCatalog)
manager
}
def parseAndResolve(query: String, withDefault: Boolean = false): LogicalPlan = {
val newConf = conf.copy()
newConf.setConfString(DEFAULT_V2_CATALOG.key, "testcat")
val catalogManager = if (withDefault) {
catalogManagerWithDefault
} else {
catalogManagerWithoutDefault
}
DataSourceResolution(newConf, catalogManager).apply(parsePlan(query))
}
private def parseResolveCompare(query: String, expected: LogicalPlan): Unit =
comparePlans(parseAndResolve(query), expected, checkAnalysis = true)
private def extractTableDesc(sql: String): (CatalogTable, Boolean) = {
parseAndResolve(sql).collect {
case CreateTable(tableDesc, mode, _) => (tableDesc, mode == SaveMode.Ignore)
}.head
}
test("create table - with partitioned by") {
val query = "CREATE TABLE my_tab(a INT comment 'test', b STRING) " +
"USING parquet PARTITIONED BY (a)"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType()
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType),
provider = Some("parquet"),
partitionColumnNames = Seq("a")
)
parseAndResolve(query) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $query")
}
}
test("create table - partitioned by transforms") {
val transforms = Seq(
"bucket(16, b)", "years(ts)", "months(ts)", "days(ts)", "hours(ts)", "foo(a, 'bar', 34)",
"bucket(32, b), days(ts)")
transforms.foreach { transform =>
val query =
s"""
|CREATE TABLE my_tab(a INT, b STRING) USING parquet
|PARTITIONED BY ($transform)
""".stripMargin
val ae = intercept[AnalysisException] {
parseAndResolve(query)
}
assert(ae.message
.contains(s"Transforms cannot be converted to partition columns: $transform"))
}
}
test("create table - with bucket") {
val query = "CREATE TABLE my_tab(a INT, b STRING) USING parquet " +
"CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", IntegerType).add("b", StringType),
provider = Some("parquet"),
bucketSpec = Some(BucketSpec(5, Seq("a"), Seq("b")))
)
parseAndResolve(query) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $query")
}
}
test("create table - with comment") {
val sql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet COMMENT 'abc'"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", IntegerType).add("b", StringType),
provider = Some("parquet"),
comment = Some("abc"))
parseAndResolve(sql) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("create table - with table properties") {
val sql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet TBLPROPERTIES('test' = 'test')"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", IntegerType).add("b", StringType),
provider = Some("parquet"),
properties = Map("test" -> "test"))
parseAndResolve(sql) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("create table - with location") {
val v1 = "CREATE TABLE my_tab(a INT, b STRING) USING parquet LOCATION '/tmp/file'"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat.empty.copy(locationUri = Some(new URI("/tmp/file"))),
schema = new StructType().add("a", IntegerType).add("b", StringType),
provider = Some("parquet"))
parseAndResolve(v1) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $v1")
}
val v2 =
"""
|CREATE TABLE my_tab(a INT, b STRING)
|USING parquet
|OPTIONS (path '/tmp/file')
|LOCATION '/tmp/file'
""".stripMargin
val e = intercept[AnalysisException] {
parseAndResolve(v2)
}
assert(e.message.contains("you can only specify one of them."))
}
test("create table - byte length literal table name") {
val sql = "CREATE TABLE 1m.2g(a INT) USING parquet"
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("2g", Some("1m")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", IntegerType),
provider = Some("parquet"))
parseAndResolve(sql) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("support for other types in OPTIONS") {
val sql =
"""
|CREATE TABLE table_name USING json
|OPTIONS (a 1, b 0.1, c TRUE)
""".stripMargin
val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("table_name"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty.copy(
properties = Map("a" -> "1", "b" -> "0.1", "c" -> "true")
),
schema = new StructType,
provider = Some("json")
)
parseAndResolve(sql) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test CTAS against data source tables") {
val s1 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val s2 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|LOCATION '/user/external/page_view'
|COMMENT 'This is the staging page view table'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val s3 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
checkParsing(s1)
checkParsing(s2)
checkParsing(s3)
def checkParsing(sql: String): Unit = {
val (desc, exists) = extractTableDesc(sql)
assert(exists)
assert(desc.identifier.database.contains("mydb"))
assert(desc.identifier.table == "page_view")
assert(desc.storage.locationUri.contains(new URI("/user/external/page_view")))
assert(desc.schema.isEmpty) // will be populated later when the table is actually created
assert(desc.comment.contains("This is the staging page view table"))
assert(desc.viewText.isEmpty)
assert(desc.viewDefaultDatabase.isEmpty)
assert(desc.viewQueryColumnNames.isEmpty)
assert(desc.partitionColumnNames.isEmpty)
assert(desc.provider.contains("parquet"))
assert(desc.properties == Map("p1" -> "v1", "p2" -> "v2"))
}
}
test("Test v2 CreateTable with known catalog in identifier") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS testcat.mydb.table_name (
| id bigint,
| description string,
| point struct<x: double, y: double>)
|USING parquet
|COMMENT 'table comment'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|OPTIONS (path 's3://bucket/path/to/data', other 20)
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"other" -> "20",
"provider" -> "parquet",
"location" -> "s3://bucket/path/to/data",
"comment" -> "table comment")
parseAndResolve(sql) match {
case create: CreateV2Table =>
assert(create.catalog.name == "testcat")
assert(create.tableName == Identifier.of(Array("mydb"), "table_name"))
assert(create.tableSchema == new StructType()
.add("id", LongType)
.add("description", StringType)
.add("point", new StructType().add("x", DoubleType).add("y", DoubleType)))
assert(create.partitioning.isEmpty)
assert(create.properties == expectedProperties)
assert(create.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateV2Table].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test v2 CreateTable with default catalog") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS mydb.table_name (
| id bigint,
| description string,
| point struct<x: double, y: double>)
|USING parquet
|COMMENT 'table comment'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|OPTIONS (path 's3://bucket/path/to/data', other 20)
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"other" -> "20",
"provider" -> "parquet",
"location" -> "s3://bucket/path/to/data",
"comment" -> "table comment")
parseAndResolve(sql, withDefault = true) match {
case create: CreateV2Table =>
assert(create.catalog.name == "testcat")
assert(create.tableName == Identifier.of(Array("mydb"), "table_name"))
assert(create.tableSchema == new StructType()
.add("id", LongType)
.add("description", StringType)
.add("point", new StructType().add("x", DoubleType).add("y", DoubleType)))
assert(create.partitioning.isEmpty)
assert(create.properties == expectedProperties)
assert(create.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateV2Table].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test v2 CreateTable with data source v2 provider and no default") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS mydb.page_view (
| id bigint,
| description string,
| point struct<x: double, y: double>)
|USING $v2Format
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"provider" -> v2Format,
"location" -> "/user/external/page_view",
"comment" -> "This is the staging page view table")
parseAndResolve(sql) match {
case create: CreateV2Table =>
assert(create.catalog.name == "session")
assert(create.tableName == Identifier.of(Array("mydb"), "page_view"))
assert(create.tableSchema == new StructType()
.add("id", LongType)
.add("description", StringType)
.add("point", new StructType().add("x", DoubleType).add("y", DoubleType)))
assert(create.partitioning.isEmpty)
assert(create.properties == expectedProperties)
assert(create.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateV2Table].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test v2 CTAS with known catalog in identifier") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS testcat.mydb.table_name
|USING parquet
|COMMENT 'table comment'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|OPTIONS (path 's3://bucket/path/to/data', other 20)
|AS SELECT * FROM src
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"other" -> "20",
"provider" -> "parquet",
"location" -> "s3://bucket/path/to/data",
"comment" -> "table comment")
parseAndResolve(sql) match {
case ctas: CreateTableAsSelect =>
assert(ctas.catalog.name == "testcat")
assert(ctas.tableName == Identifier.of(Array("mydb"), "table_name"))
assert(ctas.properties == expectedProperties)
assert(ctas.writeOptions == Map("other" -> "20"))
assert(ctas.partitioning.isEmpty)
assert(ctas.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateTableAsSelect].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test v2 CTAS with default catalog") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS mydb.table_name
|USING parquet
|COMMENT 'table comment'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|OPTIONS (path 's3://bucket/path/to/data', other 20)
|AS SELECT * FROM src
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"other" -> "20",
"provider" -> "parquet",
"location" -> "s3://bucket/path/to/data",
"comment" -> "table comment")
parseAndResolve(sql, withDefault = true) match {
case ctas: CreateTableAsSelect =>
assert(ctas.catalog.name == "testcat")
assert(ctas.tableName == Identifier.of(Array("mydb"), "table_name"))
assert(ctas.properties == expectedProperties)
assert(ctas.writeOptions == Map("other" -> "20"))
assert(ctas.partitioning.isEmpty)
assert(ctas.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateTableAsSelect].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("Test v2 CTAS with data source v2 provider and no default") {
val sql =
s"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING $v2Format
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val expectedProperties = Map(
"p1" -> "v1",
"p2" -> "v2",
"provider" -> v2Format,
"location" -> "/user/external/page_view",
"comment" -> "This is the staging page view table")
parseAndResolve(sql) match {
case ctas: CreateTableAsSelect =>
assert(ctas.catalog.name == "session")
assert(ctas.tableName == Identifier.of(Array("mydb"), "page_view"))
assert(ctas.properties == expectedProperties)
assert(ctas.writeOptions.isEmpty)
assert(ctas.partitioning.isEmpty)
assert(ctas.ignoreIfExists)
case other =>
fail(s"Expected to parse ${classOf[CreateTableAsSelect].getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}
test("drop table") {
val tableName1 = "db.tab"
val tableIdent1 = TableIdentifier("tab", Option("db"))
val tableName2 = "tab"
val tableIdent2 = TableIdentifier("tab", None)
parseResolveCompare(s"DROP TABLE $tableName1",
DropTableCommand(tableIdent1, ifExists = false, isView = false, purge = false))
parseResolveCompare(s"DROP TABLE IF EXISTS $tableName1",
DropTableCommand(tableIdent1, ifExists = true, isView = false, purge = false))
parseResolveCompare(s"DROP TABLE $tableName2",
DropTableCommand(tableIdent2, ifExists = false, isView = false, purge = false))
parseResolveCompare(s"DROP TABLE IF EXISTS $tableName2",
DropTableCommand(tableIdent2, ifExists = true, isView = false, purge = false))
parseResolveCompare(s"DROP TABLE $tableName2 PURGE",
DropTableCommand(tableIdent2, ifExists = false, isView = false, purge = true))
parseResolveCompare(s"DROP TABLE IF EXISTS $tableName2 PURGE",
DropTableCommand(tableIdent2, ifExists = true, isView = false, purge = true))
}
test("drop table in v2 catalog") {
val tableName1 = "testcat.db.tab"
val tableIdent1 = Identifier.of(Array("db"), "tab")
val tableName2 = "testcat.tab"
val tableIdent2 = Identifier.of(Array.empty, "tab")
parseResolveCompare(s"DROP TABLE $tableName1",
DropTable(testCat, tableIdent1, ifExists = false))
parseResolveCompare(s"DROP TABLE IF EXISTS $tableName1",
DropTable(testCat, tableIdent1, ifExists = true))
parseResolveCompare(s"DROP TABLE $tableName2",
DropTable(testCat, tableIdent2, ifExists = false))
parseResolveCompare(s"DROP TABLE IF EXISTS $tableName2",
DropTable(testCat, tableIdent2, ifExists = true))
}
test("drop view") {
val viewName1 = "db.view"
val viewIdent1 = TableIdentifier("view", Option("db"))
val viewName2 = "view"
val viewIdent2 = TableIdentifier("view")
parseResolveCompare(s"DROP VIEW $viewName1",
DropTableCommand(viewIdent1, ifExists = false, isView = true, purge = false))
parseResolveCompare(s"DROP VIEW IF EXISTS $viewName1",
DropTableCommand(viewIdent1, ifExists = true, isView = true, purge = false))
parseResolveCompare(s"DROP VIEW $viewName2",
DropTableCommand(viewIdent2, ifExists = false, isView = true, purge = false))
parseResolveCompare(s"DROP VIEW IF EXISTS $viewName2",
DropTableCommand(viewIdent2, ifExists = true, isView = true, purge = false))
}
test("drop view in v2 catalog") {
intercept[AnalysisException] {
parseAndResolve("DROP VIEW testcat.db.view")
}.getMessage.toLowerCase(Locale.ROOT).contains(
"view support in catalog has not been implemented")
}
// ALTER VIEW view_name SET TBLPROPERTIES ('comment' = new_comment);
// ALTER VIEW view_name UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
test("alter view: alter view properties") {
val sql1_view = "ALTER VIEW table_name SET TBLPROPERTIES ('test' = 'test', " +
"'comment' = 'new_comment')"
val sql2_view = "ALTER VIEW table_name UNSET TBLPROPERTIES ('comment', 'test')"
val sql3_view = "ALTER VIEW table_name UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')"
val parsed1_view = parseAndResolve(sql1_view)
val parsed2_view = parseAndResolve(sql2_view)
val parsed3_view = parseAndResolve(sql3_view)
val tableIdent = TableIdentifier("table_name", None)
val expected1_view = AlterTableSetPropertiesCommand(
tableIdent, Map("test" -> "test", "comment" -> "new_comment"), isView = true)
val expected2_view = AlterTableUnsetPropertiesCommand(
tableIdent, Seq("comment", "test"), ifExists = false, isView = true)
val expected3_view = AlterTableUnsetPropertiesCommand(
tableIdent, Seq("comment", "test"), ifExists = true, isView = true)
comparePlans(parsed1_view, expected1_view)
comparePlans(parsed2_view, expected2_view)
comparePlans(parsed3_view, expected3_view)
}
// ALTER TABLE table_name SET TBLPROPERTIES ('comment' = new_comment);
// ALTER TABLE table_name UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
test("alter table: alter table properties") {
val sql1_table = "ALTER TABLE table_name SET TBLPROPERTIES ('test' = 'test', " +
"'comment' = 'new_comment')"
val sql2_table = "ALTER TABLE table_name UNSET TBLPROPERTIES ('comment', 'test')"
val sql3_table = "ALTER TABLE table_name UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')"
val parsed1_table = parseAndResolve(sql1_table)
val parsed2_table = parseAndResolve(sql2_table)
val parsed3_table = parseAndResolve(sql3_table)
val tableIdent = TableIdentifier("table_name", None)
val expected1_table = AlterTableSetPropertiesCommand(
tableIdent, Map("test" -> "test", "comment" -> "new_comment"), isView = false)
val expected2_table = AlterTableUnsetPropertiesCommand(
tableIdent, Seq("comment", "test"), ifExists = false, isView = false)
val expected3_table = AlterTableUnsetPropertiesCommand(
tableIdent, Seq("comment", "test"), ifExists = true, isView = false)
comparePlans(parsed1_table, expected1_table)
comparePlans(parsed2_table, expected2_table)
comparePlans(parsed3_table, expected3_table)
}
test("support for other types in TBLPROPERTIES") {
val sql =
"""
|ALTER TABLE table_name
|SET TBLPROPERTIES ('a' = 1, 'b' = 0.1, 'c' = TRUE)
""".stripMargin
val parsed = parseAndResolve(sql)
val expected = AlterTableSetPropertiesCommand(
TableIdentifier("table_name"),
Map("a" -> "1", "b" -> "0.1", "c" -> "true"),
isView = false)
comparePlans(parsed, expected)
}
test("alter table: set location") {
val sql1 = "ALTER TABLE table_name SET LOCATION 'new location'"
val parsed1 = parseAndResolve(sql1)
val tableIdent = TableIdentifier("table_name", None)
val expected1 = AlterTableSetLocationCommand(
tableIdent,
None,
"new location")
comparePlans(parsed1, expected1)
}
}
| bdrillard/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala | Scala | apache-2.0 | 26,307 |
package cloudbreak
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
class CloudbreakSimulation extends Simulation {
val r = scala.util.Random
val host = sys.env("CB_PERFTEST_HOST")
val mockHost = sys.env.getOrElse("CB_MOCK_HOST", "mockhosts")
val delayBeforeTermination = sys.env.get("CB_DELAY_BEFORE_TERM").filter(_.trim.nonEmpty).map(_.toInt).getOrElse(60)
val numberOfUsers = sys.env.getOrElse("CB_NUMBER_OF_USERS", "3").toInt
val rampupSeconds = sys.env.getOrElse("CB_RAMPUP_SECONDS", "5").toInt
val httpConf = http
.baseURL("https://" + host)
.userAgentHeader("curl/7.37.1")
//cloud.eng.hortonworks.com=13.64.248.244;cloud.qa.hortonworks.com=52.174.105.112)
sys.env.get("CB_HOSTNAME_ALIASES").filter(_.trim.nonEmpty).foreach(s => httpConf.hostNameAliases(Utils.stringToMap(s)))
val getToken = http("uaa token request")
.post("/identity/oauth/authorize")
.header("accept", "application/x-www-form-urlencoded")
.header("Content-Type", "application/x-www-form-urlencoded")
.queryParam("response_type", "token")
.queryParam("client_id", "cloudbreak_shell")
.queryParam("scope.0", "openid")
.queryParam("source", "login")
.queryParam("redirect_uri", "http://cloudbreak.shell")
.body(StringBody("""credentials={"username":"${userName}","password":"${password}"}"""))
.disableFollowRedirect
.check(status.is(302), headerRegex("Location", """access_token=(.*?)&""").saveAs("token"))
val scn2 = scenario("cluster creation v2")
.feed(Feeders.userFeeder)
.exec(Utils.addVariableToSession(_, "mockHost", mockHost))
.exec(getToken)
.exec(InfoRequests.queryCbVersion)
//init
.exec(Utils.addVariableToSession(_, "imagecatalogName", "mock-catalog-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(ImageCatalogRequests.createMock)
.pause(1)
.exec(CredentialRequests.queryCredentials)
.exec(Utils.addVariableToSession(_, "blueprintName", "multinode-hdfs-yarn-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(BlueprintRequests.createBlueprint)
.pause(1)
.exec(Utils.addVariableToSession(_, "credentialName", "mock-credential-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(CredentialRequests.createMock)
.pause(1)
.exec(Utils.printSession(_))
//create cluster
.exec(Utils.addVariableToSession(_, "stackName", "perftest-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(StackRequests.createMockStackV2)
.pause(4)
.exec(Utils.printSession(_))
.pause(delayBeforeTermination)
//delete
.exec(StackRequests.deleteStack)
.exec(Utils.addVariableToSession(_, "stackStatus", ""))
.asLongAs(s => !"DELETE_COMPLETED".equals(s("stackStatus").as[String])) {
pause(10)
.exec(StackRequests.getStack)
}
.exec(BlueprintRequests.deleteBlueprint)
.exec(CredentialRequests.deleteMock)
.exec(ImageCatalogRequests.deleteMock)
val scn = scenario("cluster creation")
.feed(Feeders.userFeeder)
.exec(getToken)
//init
.exec(CredentialRequests.queryCredentials)
.exec(Utils.addVariableToSession(_, "blueprintName", "multinode-hdfs-yarn-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(BlueprintRequests.createBlueprint)
.exec(Utils.addVariableToSession(_, "credentialName", "mock-credential-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(CredentialRequests.createMock)
.exec(Utils.addVariableToSession(_, "networkName", "mock-network-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(Utils.addVariableToSession(_, "securitygroupName", "mock-securitygroup-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(Utils.addVariableToSession(_, "templateName", "mock-template-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(Utils.printSession(_))
//create cluster
.exec(Utils.addVariableToSession(_, "stackName", "perftest-" + r.alphanumeric.take(10).mkString.toLowerCase))
.exec(StackRequests.createMockStack)
.exitHereIfFailed
.exec(StackRequests.createMockCluster)
.exec(Utils.printSession(_))
.pause(delayBeforeTermination)
//delete
.exec(StackRequests.deleteStack)
.exec(Utils.addVariableToSession(_, "stackStatus", ""))
.asLongAs(s => !"DELETE_COMPLETED".equals(s("stackStatus").as[String])) {
pause(10)
.exec(StackRequests.getStack)
}
.exec(BlueprintRequests.deleteBlueprint)
.exec(CredentialRequests.deleteMock)
setUp(scn2.inject(rampUsers(numberOfUsers) over (rampupSeconds seconds)).protocols(httpConf))
}
| hortonworks/cloudbreak | performance-test/user-files/simulations/cloudbreak/CloudbreakSimulation.scala | Scala | apache-2.0 | 4,660 |
package quizleague.web.maintain
import quizleague.web.core._
import quizleague.web.core.RouteConfig
import quizleague.web.maintain.user.UserModule
import quizleague.web.maintain.venue.VenueModule
import quizleague.web.maintain.text.TextModule
import quizleague.web.maintain.team.TeamModule
import quizleague.web.maintain.applicationcontext.ApplicationContextModule
import quizleague.web.maintain.globaltext.GlobalTextModule
import quizleague.web.maintain.season.SeasonModule
import quizleague.web.maintain.database.DatabaseModule
import quizleague.web.maintain.stats.StatsModule
import quizleague.web.service.notification.NotificationGetService
import java.time.LocalDateTime
import quizleague.web.maintain.competitionstatistics.CompetitionStatisticsModule
import quizleague.web.model.MaintainMessagePayload
import quizleague.web.shared.SharedModule
import rxscalajs.Observable
import scalajs.js
import js.JSConverters._
object MaintainModule extends Module {
override val modules = @@(
UserModule,
VenueModule,
TextModule,
TeamModule,
ApplicationContextModule,
GlobalTextModule,
SeasonModule,
DatabaseModule,
StatsModule,
CompetitionStatisticsModule,
SharedModule)
override val routes = @@(
RouteConfig(path = "/maintain/*", components = Map("sidenav" -> MaintainMenuComponent)))
}
object NotificationService extends NotificationGetService {
def messages(threshold: LocalDateTime): Observable[String] = super.messages("maintain", threshold)
.map(_.map(m => {
m.payload match {
case p: MaintainMessagePayload => p
case _ => throw new Exception("invalid payload")
}
}).foldLeft("")((s, p) => s + p.message))
} | gumdrop/quizleague-maintain | js/src/main/scala/quizleague/web/maintain/MaintainModule.scala | Scala | mit | 1,706 |
package com.rasterfoundry.database
import com.rasterfoundry.common.Generators.Implicits._
import com.rasterfoundry.datamodel.Metric
import cats.implicits._
import doobie.implicits._
import org.scalacheck.Prop.forAll
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.Checkers
import scala.util.Random
class MetricDaoSpec
extends AnyFunSuite
with Matchers
with Checkers
with DBTestConfig
with PropTestHelpers {
// 0.8 cutoff means it's about 50/50 whether we get three repetitions
// 3 seemed like a nice number to expect on average
def getRepetitionAttempts(init: Int): Int =
if (Random.nextFloat > 0.8) init else getRepetitionAttempts(init + 1)
test("insert and update a metric") {
check {
forAll { (metric: Metric) =>
{
val repetitions = getRepetitionAttempts(0)
val metricIO = for {
_ <- MetricDao.insert(metric)
countOnce <- MetricDao.unsafeGetMetric(metric)
_ <- List.fill(repetitions)(()) traverse { _ =>
MetricDao.insert(metric)
}
countAgain <- MetricDao.unsafeGetMetric(metric)
} yield { (countOnce, countAgain) }
val (initial, afterUpdate) = metricIO.transact(xa).unsafeRunSync
assert(initial.value == 1,
"On insert, the count for this metric should be 1")
assert(
afterUpdate.value == 1 + repetitions,
"After updating, the count for this metric should be 1 + the number of updates")
true
}
}
}
}
}
| raster-foundry/raster-foundry | app-backend/db/src/test/scala/com/azavea/rf/database/MetricDaoSpec.scala | Scala | apache-2.0 | 1,641 |
case class MayErr[+Eee, +Aaa](e: Either[Eee, Aaa]) {
def flatMap[B, EE >: Eee](f: Aaa => MayErr[EE, B]): MayErr[EE, B] = {
MayErr(e.right.flatMap(a => f(a).e))
}
def get(x: Eee): Eee = x
}
object MayErr {
implicit def eitherToError[Ey, EE >: Ey, Ay, AA >: Ay](e: Either[Ey, Ay]): MayErr[EE, AA] = MayErr[Ey, Ay](e)
}
class A
class C
class B extends C
val x: Option[A] = Some(new A)
val z: Product with Either[B, A] with Serializable = x.toRight(new B)
import MayErr._
z.flatMap(meta => {
/*start*/meta/*end*/
MayErr(z)
})
//A | triggerNZ/intellij-scala | testdata/typeInference/bugs5/SCL4095C.scala | Scala | apache-2.0 | 543 |
/*******************************************************************************
Copyright (c) 2012-2013, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing
import scala.collection.mutable.{ LinkedHashSet => MLinkedHashSet }
import kr.ac.kaist.jsaf.analysis.lib.WorkTreeSet
class WorklistFIFO extends Worklist {
private val worklist = new MLinkedHashSet[ControlPoint]
def head: ControlPoint = this.synchronized { worklist.last }
def isEmpty: Boolean = this.synchronized { worklist.isEmpty }
def getSize: Int = this.synchronized { worklist.size }
def getWorkList: WorkTreeSet = WorkTreeSet.Empty // TODO: return type does not match
override def toString: String = {
val str = new StringBuilder
for(w <- worklist.toList.reverse) str.append(w + "\\n")
str.toString()
}
protected def insertWork(work: OrderEntry): Unit = worklist.add(work._2)
protected def removeHead: ControlPoint = {
val cp = worklist.last
worklist.remove(cp)
cp
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/WorklistFIFO.scala | Scala | bsd-3-clause | 1,203 |
package nl.grons.thriftstream.decoders
import nl.grons.thriftstream.decoders.DecodeResult._
import uk.co.real_logic.agrona.DirectBuffer
import nl.grons.thriftstream.decoders.BytesDecoder._
/**
* Bytes encoder.
*
* Protocol: just the bytes (no length prefix).
* @param length expected number of bytes
*/
case class BytesDecoder(length: Int) extends Decoder[Array[Byte]] {
require(length >= 0)
override def decode(buffer: DirectBuffer, readOffset: Int): DecodeResult[Array[Byte]] = {
val value = Array.ofDim[Byte](length)
doDecode(length, 0, value, buffer, readOffset)
}
}
object BytesDecoder {
private class BinaryContinuationDecoder(length: Int, readCount: Int, value: Array[Byte]) extends Decoder[Array[Byte]] {
override def decode(buffer: DirectBuffer, readOffset: Int): DecodeResult[Array[Byte]] =
doDecode(length, readCount, value, buffer, readOffset)
}
private def doDecode(length: Int, readCount: Int, data: Array[Byte], buffer: DirectBuffer, readOffset: Int): DecodeResult[Array[Byte]] with Product with Serializable = {
val availableByteCount = buffer.capacity() - readOffset
val copyCount = Math.min(length - readCount, availableByteCount)
buffer.getBytes(readOffset, data, readCount, copyCount)
val newReadCount = readCount + copyCount
if (newReadCount == length) {
Decoded(data, buffer, readOffset + copyCount)
} else {
DecodeInsufficientData(new BinaryContinuationDecoder(length, newReadCount, data))
}
}
} | erikvanoosten/thrift-stream | src/main/scala/nl/grons/thriftstream/decoders/BytesDecoder.scala | Scala | apache-2.0 | 1,505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import scala.util.Random
import org.apache.spark.SparkContext
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.rdd.RDD
/**
* :: DeveloperApi ::
* Generate test data for KMeans. This class first chooses k cluster centers
* from a d-dimensional Gaussian distribution scaled by factor r and then creates a Gaussian
* cluster with scale 1 around each center.
*/
@DeveloperApi
@Since("0.8.0")
object KMeansDataGenerator {
/**
* Generate an RDD containing test data for KMeans.
*
* @param sc SparkContext to use for creating the RDD
* @param numPoints Number of points that will be contained in the RDD
* @param k Number of clusters
* @param d Number of dimensions
* @param r Scaling factor for the distribution of the initial centers
* @param numPartitions Number of partitions of the generated RDD; default 2
*/
@Since("0.8.0")
def generateKMeansRDD(
sc: SparkContext,
numPoints: Int,
k: Int,
d: Int,
r: Double,
numPartitions: Int = 2)
: RDD[Array[Double]] =
{
// First, generate some centers
val rand = new Random(42)
val centers = Array.fill(k)(Array.fill(d)(rand.nextGaussian() * r))
// Then generate points around each center
sc.parallelize(0 until numPoints, numPartitions).map { idx =>
val center = centers(idx % k)
val rand2 = new Random(42 + idx)
Array.tabulate(d)(i => center(i) + rand2.nextGaussian())
}
}
@Since("0.8.0")
def main(args: Array[String]) {
if (args.length < 6) {
// scalastyle:off println
println("Usage: KMeansGenerator " +
"<master> <output_dir> <num_points> <k> <d> <r> [<num_partitions>]")
// scalastyle:on println
System.exit(1)
}
val sparkMaster = args(0)
val outputPath = args(1)
val numPoints = args(2).toInt
val k = args(3).toInt
val d = args(4).toInt
val r = args(5).toDouble
val parts = if (args.length >= 7) args(6).toInt else 2
val sc = new SparkContext(sparkMaster, "KMeansDataGenerator")
val data = generateKMeansRDD(sc, numPoints, k, d, r, parts)
data.map(_.mkString(" ")).saveAsTextFile(outputPath)
sc.stop()
System.exit(0)
}
}
| esi-mineset/spark | mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala | Scala | apache-2.0 | 3,062 |
import com.github.utaal.m68k._
import com.github.utaal.m68k.ast._
import org.specs2._
class MemorySpec extends mutable.Specification {
"Memory" should {
"create new immutable instances on set and match" in {
val mem: Memory = new LinearMemory(1000L)
val m1 = mem.set(Size.B, 100L, 0xfeL)
m1.get(Size.B, 100L) should_== 0xfeL
m1.get(Size.B, 101L) should_== 0x00L
val m2 = mem.set(Size.L, 100L, 0xdeadbeefL)
m2.get(Size.L, 100L) should_== 0xdeadbeefL
m2.get(Size.W, 100L) should_== 0xdeadL
m2.get(Size.W, 102L) should_== 0xbeefL
m2.get(Size.B, 101L) should_== 0xadL
m2.get(Size.B, 105L) should_== 0x00L
}
}
}
| utaal/m68k-interpreter | src/test/scala/MemorySpec.scala | Scala | mit | 681 |
package BIDMat
import ncsa.hdf.hdf5lib.structs._
import ncsa.hdf.hdf5lib.H5._
import ncsa.hdf.hdf5lib.HDF5Constants._
import scala.reflect._
object MatHDF5 {
var refcount:Long = -1;
def setCompressionPlist(dplist_id:Int, dims:Array[Long]) = {
if (Mat.compressType > 0) {
if (dims.length == 1) {
if (dims(0) > 1024) {
val cdims = new Array[Long](1);
cdims(0) = math.max(1, math.min(dims(0), Mat.chunkSize));
H5Pset_chunk(dplist_id, 1, cdims);
if (Mat.compressType == 1) {
H5Pset_deflate(dplist_id, Mat.compressionLevel)
} else {
H5Pset_szip(dplist_id, H5_SZIP_EC_OPTION_MASK, Mat.szipBlock);
}
}
} else {
if (dims(0)*dims(1) > 1024) {
val cdims = new Array[Long](2);
cdims(0) = math.max(1, math.min(dims(0), 1+Mat.chunkSize/dims(1)));
cdims(1) = math.max(1, math.min(Mat.chunkSize, dims(1)));
H5Pset_chunk(dplist_id, 2, cdims);
if (Mat.compressType == 1) {
H5Pset_deflate(dplist_id, Mat.compressionLevel);
} else {
H5Pset_szip(dplist_id, H5_SZIP_EC_OPTION_MASK, Mat.szipBlock);
}
}
}
}
}
def getStringAttr(id:Int, obj_name:String, attr_name:String):String = {
val attr_id = H5Aopen_by_name(id, obj_name, attr_name, H5P_DEFAULT, H5P_DEFAULT);
val attr_type_id = H5Aget_type(attr_id);
val attr_type_size = H5Tget_size(attr_type_id);
val sbuf = new Array[Byte](attr_type_size + 1);
H5Aread(attr_id, attr_type_id, sbuf);
H5Tclose(attr_type_id);
H5Aclose(attr_id);
new String(sbuf).trim();
}
def putStringAttr(id:Int, attr_name:String, attr_val:String) = {
val space_id = H5Screate(H5S_SCALAR);
val memtype_id = H5Tcopy(H5T_FORTRAN_S1);
H5Tset_size(memtype_id, attr_val.length());
val attr_id = H5Acreate(id, attr_name, memtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(attr_id, memtype_id, attr_val.getBytes());
H5Tclose(memtype_id);
H5Aclose(attr_id);
H5Sclose(space_id);
}
def getLongAttr(id:Int, obj_name:String, attr_name:String):Long = {
val attr_id = H5Aopen_by_name(id, obj_name, attr_name, H5P_DEFAULT, H5P_DEFAULT);
val attr_type_id = H5Aget_type(attr_id);
val attr_type_size = H5Tget_size(attr_type_id);
val sbuf = new Array[Long](attr_type_size/8);
H5Aread(attr_id, attr_type_id, sbuf);
H5Tclose(attr_type_id);
H5Aclose(attr_id);
sbuf(0)
}
def putIntAttr(id:Int, attr_name:String, attr_val:Int) = {
val space_id = H5Screate(H5S_SCALAR);
val attr_id = H5Acreate(id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT);
val lbuf = Array[Int](1);
lbuf(0) = attr_val;
H5Awrite(attr_id, H5T_NATIVE_INT, lbuf);
H5Aclose(attr_id);
H5Sclose(space_id);
}
def putLongAttr(id:Int, attr_name:String, attr_val:Long) = {
val space_id = H5Screate(H5S_SCALAR);
val attr_id = H5Acreate(id, attr_name, H5T_NATIVE_LLONG, space_id, H5P_DEFAULT, H5P_DEFAULT);
val lbuf = Array[Long](1);
lbuf(0) = attr_val;
H5Awrite(attr_id, H5T_NATIVE_LLONG, lbuf);
H5Aclose(attr_id);
H5Sclose(space_id)
}
def putByteAttr(id:Int, attr_name:String, attr_val:Byte) = {
val space_id = H5Screate(H5S_SCALAR);
val attr_id = H5Acreate(id, attr_name, H5T_NATIVE_UCHAR, space_id, H5P_DEFAULT, H5P_DEFAULT);
val lbuf = Array[Byte](1);
lbuf(0) = attr_val;
H5Awrite(attr_id, H5T_NATIVE_UCHAR, lbuf);
H5Aclose(attr_id);
H5Sclose(space_id)
}
def getMatDims(data_id:Int):Array[Long] = {
val space_id = H5Dget_space(data_id);
val ndims = H5Sget_simple_extent_ndims(space_id);
val dims = new Array[Long](ndims);
val ok = H5Sget_simple_extent_dims(space_id, dims, null);
H5Sclose(space_id);
dims;
}
def readMatDims(fid:Int, varname:String):Array[Long] = {
val data_id = H5Dopen(fid, varname, H5P_DEFAULT);
val dims = getMatDims(data_id);
H5Dclose(data_id);
dims;
}
def readMat2Dims(fname:String, varname:String):(Long, Long) = {
val fid = H5Fopen(fname,H5F_ACC_RDONLY,H5P_DEFAULT);
val dims = readMatDims(fid, varname);
H5Fclose(fid);
if (dims(1) == 0) {
(dims(0), dims(1));
} else {
(dims(1), dims(0));
}
}
def getDenseMat[T : ClassTag](fid:Int, varname:String, h5class:Int, dsize:Int):DenseMat[T] = {
val data_id = H5Dopen(fid, varname, H5P_DEFAULT);
val data_type_id = H5Dget_type(data_id);
val data_class = H5Tget_class(data_type_id);
val data_size = H5Tget_size(data_type_id);
val dims = getMatDims(data_id);
var mdata:DenseMat[T] = null;
if (data_class == h5class && data_size == dsize) {
mdata = new DenseMat[T](dims(1).intValue, dims(0).intValue);
H5Dread(data_id, data_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, mdata.data);
} else {
throw new RuntimeException("Bad class or data size "+data_class+" "+data_size);
}
H5Tclose(data_type_id);
H5Dclose(data_id);
mdata;
}
def getFND(fid:Int, varname:String, h5class:Int, dsize:Int):FND = {
val data_id = H5Dopen(fid, varname, H5P_DEFAULT);
val data_type_id = H5Dget_type(data_id);
val data_class = H5Tget_class(data_type_id);
val data_size = H5Tget_size(data_type_id);
val dims = getMatDims(data_id);
val idims = new Array[Int](dims.length);
for (i <- 0 until dims.length) {idims(i) = dims(dims.length - i - 1).toInt}
var mdata:FND = null;
if (data_class == h5class && data_size == dsize) {
mdata = FND(idims);
H5Dread(data_id, data_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, mdata.data);
} else {
throw new RuntimeException("Bad class or data size "+data_class+" "+data_size);
}
H5Tclose(data_type_id);
H5Dclose(data_id);
mdata;
}
def getCellMat(fid:Int, varname:String):CSMat = {
val data_id = H5Dopen(fid, varname, H5P_DEFAULT);
val data_type_id = H5Dget_type(data_id);
val data_class = H5Tget_class(data_type_id);
val data_size = H5Tget_size(data_type_id);
val dims = getMatDims(data_id);
var mdata:CSMat = null;
mdata = CSMat(dims(1).intValue, dims(0).intValue);
val bdata = new Array[Array[Byte]]((dims(0)*dims(1)).intValue);
for (i <- 0 until bdata.length) {
bdata(i) = new Array[Byte](data_size);
}
H5Dread(data_id, data_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, bdata);
val obj_type_out = new Array[Int](1);
obj_type_out(0) = 1;
for (i <- 0 until bdata.length) {
val item_id = H5Rdereference(fid, H5R_OBJECT, bdata(i));
mdata.data(i) = getMat(item_id, ".").asInstanceOf[String];
H5Oclose(item_id);
}
H5Tclose(data_type_id);
H5Dclose(data_id);
mdata
}
def getMatString(fid:Int, varname:String):String = {
val data_id = H5Dopen(fid, varname, H5P_DEFAULT);
val data_type_id = H5Dget_type(data_id);
val data_class = H5Tget_class(data_type_id);
val data_size = H5Tget_size(data_type_id);
val dims = getMatDims(data_id);
val nrows = dims(0).intValue;
val ncols = dims(1).intValue;
val sbuf = new Array[Byte](data_size*nrows*ncols);
H5Dread(data_id, data_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, sbuf);
H5Tclose(data_type_id);
H5Dclose(data_id);
new String(sbuf, "UTF_16LE").trim();
}
def getSparseMat[T](fid:Int, varname:String)(implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = {
val nrows = getLongAttr(fid, varname, "MATLAB_sparse").intValue;
val jc_id = H5Dopen(fid, varname+"/jc", H5P_DEFAULT);
val ncols = getMatDims(jc_id)(0).intValue - 1;
val data_id = H5Dopen(fid, varname+"/data", H5P_DEFAULT);
val data_type_id = H5Dget_type(data_id);
val nnz = getMatDims(data_id)(0).intValue;
var ir_id = -1;
try {
ir_id = H5Dopen(fid, varname+"/ir", H5P_DEFAULT);
} catch {
case _:Throwable => {}
}
val sdata = if (ir_id >= 0) {
SparseMat(nrows, ncols, nnz) ;
} else {
SparseMat.noRows(nrows, ncols, nnz);
}
val convert_ints = H5Tcopy(H5T_NATIVE_INT);
H5Dread_int(jc_id, convert_ints, H5S_ALL, H5S_ALL, H5P_DEFAULT, sdata.jc);
addOne(sdata.jc);
H5Dclose(jc_id);
if (ir_id >= 0) {
H5Dread_int(ir_id, convert_ints, H5S_ALL, H5S_ALL, H5P_DEFAULT, sdata.ir);
addOne(sdata.ir);
H5Dclose(ir_id);
}
H5Tclose(convert_ints);
H5Dread(data_id, data_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, sdata.data);
H5Tclose(data_type_id);
H5Dclose(data_id);
sdata
}
def getMat(fid:Int, varname:String):AnyRef = {
if (fid > 0 && H5Aexists_by_name(fid, varname, "MATLAB_class", H5P_DEFAULT)) {
val attr_class = getStringAttr(fid, varname, "MATLAB_class");
if (attr_class.equals("double")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
SDMat(getSparseMat[Double](fid, varname));
} else {
DMat(getDenseMat[Double](fid, varname, H5T_FLOAT, 8));
}
} else if (attr_class.equals("single")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
SMat(getSparseMat[Float](fid, varname));
} else {
if (readMatDims(fid, varname).length <= 2) {
FMat(getDenseMat[Float](fid, varname, H5T_FLOAT, 4));
} else {
getFND(fid, varname, H5T_FLOAT, 4);
}
}
} else if (attr_class.equals("int32")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
throw new RuntimeException("Sparse arrays of ints unsupported");
} else {
IMat(getDenseMat[Int](fid, varname, H5T_INTEGER, 4));
}
} else if (attr_class.equals("int8")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
SBMat(getSparseMat[Byte](fid, varname));
} else {
throw new RuntimeException("Dense arrays of bytes unsupported");
}
} else if (attr_class.equals("char")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
throw new RuntimeException("Sparse arrays of char unsupported");
} else {
getMatString(fid, varname);
}
} else if (attr_class.equals("cell")) {
if (H5Aexists_by_name(fid, varname, "MATLAB_sparse", H5P_DEFAULT)) {
throw new RuntimeException("Sparse cell arrays unsupported");
} else {
getCellMat(fid, varname);
}
} else throw new RuntimeException("Couldnt read storage class "+attr_class);
} else throw new RuntimeException("Couldnt find matlab var named "+varname);
}
def writeMatHeader(fname:String) = {
val ff = new java.io.RandomAccessFile(fname,"rws");
val sp = new scala.sys.SystemProperties();
val hstring = "MATLAB 7.3 MAT-file, Platform: "+sp.get("os.arch").get+" "+sp.get("os.name").get+" "+sp.get("os.version").get+ " "+
"Created by BIDMat on "+(new java.text.SimpleDateFormat("EEE MMM d HH:mm:ss yyyy")).format(new java.util.Date())+
" HDF5 Schema 1.0 .";
val hb = hstring.getBytes();
val hbytes = new Array[Byte](512);
for (i <- 0 until 116) hbytes(i) = 32;
System.arraycopy(hb, 0, hbytes, 0, math.min(hstring.length(), 116));
val version:Byte = 2;
hbytes(125) = version;
hbytes(126) = 0x49;
hbytes(127) = 0x4D;
ff.write(hbytes);
// ff.write(emptyHDF5file)
ff.close();
}
def putDenseMat[T](fid:Int, a:DenseMat[T], aname:String, h5class:Int, matclass:String):Array[Byte] = {
val dims = new Array[Long](2);
dims(0) = a.ncols;
dims(1) = a.nrows;
val filespace_id = H5Screate_simple(2, dims, null);
val dplist_id = H5Pcreate(H5P_DATASET_CREATE);
// setCompressionPlist(dplist_id, dims)
val dataset_id = H5Dcreate(fid, "/"+aname, h5class, filespace_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
H5Dwrite(dataset_id, h5class, H5S_ALL, H5S_ALL, H5P_DEFAULT, a.data);
H5Pclose(dplist_id);
putStringAttr(dataset_id, "MATLAB_class", matclass);
val ref = H5Rcreate(dataset_id, ".", H5R_OBJECT, -1);
H5Dclose(dataset_id);
H5Sclose(filespace_id);
ref
}
def putFND(fid:Int, a:FND, aname:String, h5class:Int, matclass:String):Array[Byte] = {
val dims = a.dims.data.map(_.toLong);
val filespace_id = H5Screate_simple(dims.length, dims, null);
val dplist_id = H5Pcreate(H5P_DATASET_CREATE);
// setCompressionPlist(dplist_id, dims);
val dataset_id = H5Dcreate(fid, "/"+aname, h5class, filespace_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
H5Dwrite(dataset_id, h5class, H5S_ALL, H5S_ALL, H5P_DEFAULT, a.data);
H5Pclose(dplist_id);
putStringAttr(dataset_id, "MATLAB_class", matclass);
val ref = H5Rcreate(dataset_id, ".", H5R_OBJECT, -1);
H5Dclose(dataset_id);
H5Sclose(filespace_id);
ref
}
def putEmptyRef(id:Int):Array[Byte] = {
val dims = new Array[Long](1);
dims(0) = 2;
val tmp = Array[Long](2);
val dmatspace_id = H5Screate_simple(1, dims, null);
val dmat_id = H5Dcreate(id, "0", H5T_NATIVE_ULLONG, dmatspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
H5Dwrite(dmat_id, H5T_NATIVE_ULLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp);
putStringAttr(dmat_id, "MATLAB_class", "canonical empty");
putByteAttr(dmat_id, "MATLAB_empty", 1);
val ref = H5Rcreate(dmat_id, ".", H5R_OBJECT, -1);
H5Dclose(dmat_id);
H5Sclose(dmatspace_id);
ref
}
def putCellMat(fid:Int, varname:String, a:CSMat) = {
var group_id = 0;
if (refcount < 0) {
group_id = H5Gcreate(fid, "/#refs#", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
putEmptyRef(group_id);
refcount = 1;
} else {
group_id = H5Gopen(fid, "/#refs#", H5P_DEFAULT);
}
val refIds = new Array[Array[Byte]](a.length);
for (i <- 0 until a.length) {
val newname = "%x" format refcount;
refcount += 1;
refIds(i) = putMat(group_id, a.data(i), newname);
}
val dims = new Array[Long](2);
dims(0) = a.ncols;
dims(1) = a.nrows;
val dplist_id = H5Pcreate(H5P_DATASET_CREATE);
setCompressionPlist(dplist_id, dims);
val refspace_id = H5Screate_simple(2, dims, null);
val refs_id = H5Dcreate(fid, varname, H5T_STD_REF_OBJ, refspace_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
H5Dwrite(refs_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, refIds);
putStringAttr(refs_id, "MATLAB_class", "cell");
val ref = H5Rcreate(refs_id, ".", H5R_OBJECT, -1);
H5Dclose(refs_id);
H5Sclose(refspace_id);
H5Pclose(dplist_id);
H5Gclose(group_id);
ref
}
def putSparseMat[T](fid:Int, a:SparseMat[T], varname:String, nativeClass:Int, className:String):Array[Byte] = {
val dims = new Array[Long](1);
val group_id = H5Gcreate(fid, "/"+varname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
putStringAttr(group_id, "MATLAB_class", className);
putLongAttr(group_id, "MATLAB_sparse", a.nrows);
val convert_ints = H5Tcopy(H5T_NATIVE_INT);
dims(0) = a.ncols + 1;
var dplist_id = H5Pcreate(H5P_DATASET_CREATE);
setCompressionPlist(dplist_id, dims);
val jcs_id = H5Screate_simple(1, dims, null);
val jc_id = H5Dcreate(group_id, "jc", H5T_NATIVE_LLONG, jcs_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
subOne(a.jc);
try {
H5Dwrite(jc_id, convert_ints, H5S_ALL, H5S_ALL, H5P_DEFAULT, a.jc);
} catch {
case e:Throwable => {
addOne(a.jc);
throw new RuntimeException("Error writing sparse mat "+e);
}
}
addOne(a.jc);
H5Dclose(jc_id);
H5Sclose(jcs_id);
H5Pclose(dplist_id);
dims(0) = a.nnz;
dplist_id = H5Pcreate(H5P_DATASET_CREATE);
setCompressionPlist(dplist_id, dims);
if (a.ir != null) {
val irs_id = H5Screate_simple(1, dims, null);
val ir_id = H5Dcreate(group_id, "ir", H5T_NATIVE_LLONG, irs_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
subOne(a.ir);
try {
H5Dwrite(ir_id, convert_ints, H5S_ALL, H5S_ALL, H5P_DEFAULT, a.ir);
} catch {
case e:Throwable => {
addOne(a.ir);
throw new RuntimeException("Error writing sparse mat "+e);
}
}
addOne(a.ir);
H5Dclose(ir_id);
H5Sclose(irs_id);
}
val dataspace_id = H5Screate_simple(1, dims, null);
val data_id = H5Dcreate(group_id, "data", nativeClass, dataspace_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
H5Dwrite(data_id, nativeClass, H5S_ALL, H5S_ALL, H5P_DEFAULT, a.data);
H5Dclose(data_id);
H5Sclose(dataspace_id);
H5Pclose(dplist_id);
H5Tclose(convert_ints);
val ref = H5Rcreate(group_id, ".", H5R_OBJECT, -1);
H5Gclose(group_id);
ref
}
def putMatString(id:Int, varname:String, str:String):Array[Byte] = {
val dims = new Array[Long](2);
dims(0) = str.length;
dims(1) = 1;
val dplist_id = H5Pcreate(H5P_DATASET_CREATE);
setCompressionPlist(dplist_id, dims);
val sbytes = str.getBytes("UTF_16LE");
val strspace_id = H5Screate_simple(2, dims, null);
val str_id = H5Dcreate(id, varname, H5T_NATIVE_USHORT, strspace_id, H5P_DEFAULT, dplist_id, H5P_DEFAULT);
putStringAttr(str_id, "MATLAB_class", "char");
putIntAttr(str_id, "MATLAB_int_decode", 2);
H5Dwrite(str_id, H5T_NATIVE_USHORT, H5S_ALL, H5S_ALL, H5P_DEFAULT, sbytes);
val ref = H5Rcreate(str_id, ".", H5R_OBJECT, -1);
H5Dclose(str_id);
H5Sclose(strspace_id);
H5Pclose(dplist_id);
ref
}
def putMat(fid:Int, a:AnyRef, aname:String):Array[Byte] = {
a match {
case aa:DMat => putDenseMat[Double](fid, aa, aname, H5T_NATIVE_DOUBLE, "double")
case aa:FMat => putDenseMat[Float](fid, aa, aname, H5T_NATIVE_FLOAT, "single")
case aa:IMat => putDenseMat[Int](fid, aa, aname, H5T_NATIVE_INT, "int32")
case aa:SBMat => putSparseMat[Byte](fid, aa, aname, H5T_NATIVE_CHAR, "int8")
case aa:SMat => putSparseMat[Float](fid, aa, aname, H5T_NATIVE_FLOAT, "single")
case aa:SDMat => putSparseMat[Double](fid, aa, aname, H5T_NATIVE_DOUBLE, "double")
case aa:CSMat => putCellMat(fid, aname, aa)
case aa:String => putMatString(fid, aname, aa)
case aa:FND => putFND(fid, aa, aname, H5T_NATIVE_FLOAT, "single")
case _ => throw new RuntimeException("unsupported matrix type to save")
}
}
def hload(fname:String, vname:String):AnyRef = {
val fapl = H5Pcreate(H5P_FILE_ACCESS);
// H5Pset_fapl_core(fapl, 16*1024*1024, false); println("core driver")
if (Mat.useStdio) H5Pset_fapl_stdio(fapl); //println("stdio driver"); // Not working on windows
val fid = H5Fopen(fname,H5F_ACC_RDONLY,fapl);
H5Pclose(fapl);
val mat = getMat(fid, vname);
H5Fclose(fid);
mat
}
def hload(fname:String, vnames:List[String]):List[AnyRef] = {
val fapl = H5Pcreate(H5P_FILE_ACCESS);
// H5Pset_fapl_core(fapl, 32*1024*1024, false); println("core driver")
if (Mat.useStdio) H5Pset_fapl_stdio(fapl); //println("stdio driver")
val fid = H5Fopen(fname,H5F_ACC_RDONLY,fapl);
H5Pclose(fapl);
val mats = vnames.map((vname) => getMat(fid, vname));
H5Fclose(fid);
mats
}
def hsaveAsHDF5(fname:String, args:List[AnyRef]) = {
refcount = -1;
val fapl_id = H5Pcreate (H5P_FILE_ACCESS);
if (Mat.useStdio) H5Pset_fapl_stdio(fapl_id);
val fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
H5Pclose(fapl_id);
saveAsImpl(fid, args);
H5Fclose(fid);
}
def hsaveAs(fname:String, args:List[AnyRef]) = {
refcount = -1;
val fapl_id = H5Pcreate (H5P_FILE_ACCESS);
if (Mat.useStdio) H5Pset_fapl_stdio(fapl_id);
val fcplist_id = H5Pcreate(H5P_FILE_CREATE);
H5Pset_userblock(fcplist_id, 512);
val fid = H5Fcreate(fname, H5F_ACC_TRUNC, fcplist_id, fapl_id);
H5Pclose(fcplist_id);
H5Pclose(fapl_id);
saveAsImpl(fid, args);
H5Fclose(fid);
writeMatHeader(fname);
}
def saveAsImpl(fid:Int, argList:List[AnyRef]) = {
var i = 0;
while (i < argList.length) {
argList(i) match {
case a:Mat => {
argList(i+1) match {
case str:String => putMat(fid, a, str);
case _ => throw new RuntimeException("odd numbered args must be String variable names");
}
}
case _ => throw new RuntimeException("even numbered args must be Mat variables");
}
i += 2;
}
}
def addOne(ii:Array[Int]) = {
if (Mat.ioneBased == 1) {
var i = 0;
while (i < ii.length) {
ii(i) += 1;
i += 1;
}
}
}
def subOne(ii:Array[Int]) = {
if (Mat.ioneBased == 1) {
var i = 0;
while (i < ii.length) {
ii(i) = ii(i) - 1;
i += 1;
}
}
}
}
| codeaudit/BIDMat | src/main/scala/BIDMat/MatHDF5.scala | Scala | bsd-3-clause | 20,605 |
/*
* Copyright 2014 Cisco Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cisco.oss.foundation.orchestration.scope.utils
import com.cisco.oss.foundation.flowcontext.FlowContextFactory
import com.cisco.oss.foundation.orchestration.scope.model.Node
import org.apache.commons.net.util.SubnetUtils
import org.jclouds.scriptbuilder.domain.OsFamily
import org.jclouds.ssh.SshKeys
import org.junit.{Assert, Ignore, Test}
import scala.collection.JavaConversions._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
/**
* Created with IntelliJ IDEA.
* User: igreenfi
* Date: 2/4/14
* Time: 12:57 PM
* To change this template use File | Settings | File Templates.
*/
class VMUtilsTest extends Slf4jLogger {
implicit val flowEC = new FlowContextExecutor(global)
val capture =
"""
|{
| "setupProvisioningEnv": true,
| "announceHostNames": true,
| "exposeAccessPoints": {
| "accessPoints": [
| {
| "url": "http://<naama-test-upm0>:6040/upm",
| "name": "upm-private"
| },
| {
| "url": "http://<dns-upm>:6040/upm",
| "name": "upm-public"
| },
| {
| "url": "https://<dns-upm>:5015/ndsconsole/app.html",
| "name": "console"
| }
| ]
| },
| "installModules": {
| "step1": {
| "modules": [
| {
| "nodes": [
| "naama-test-upm0"
| ],
| "version": "4.49.2-2-SNAPSHOT",
| "name": "nds_upm",
| "file": {
| "additionalValues": [
| {
| "value": "set passwordQuality.checkNumericCharacterIncluded false;;set passwordQuality.checkNonAlphaNumericIncluded false;;set passwordQuality.checkNoVowelsIncluded false;;set passwordQuality.checkMinimumPasswordLength false;;set passwordQuality.minimumPasswordLength 5;;set upm.cdplugin.defaults.countryToPopulation.SWE 5;;set upm.cdplugin.defaults.countryToPopulation.FIN 5;;set upm.defaults.household.deviceQuota.ALL.integer 4;;set upm.defaults.household.deviceQuota.IPAD.integer 2;;set upm.defaults.household.deviceQuota.COMPANION.integer 1;;set upm.defaults.household.deviceQuota.PC.integer 3;;set upm.defaults.household.deviceQuota.IOS.integer 3;;set clp.isEnabled true;;set clp.connections.1.host clpserver1;;set clp.connections.2.host clpserver2",
| "key": "upm::config_props"
| },
| {
| "value": "%{::ipaddress}",
| "key": "upm::host"
| },
| {
| "value": "6040",
| "key": "upm::web_port"
| },
| {
| "value": "UPM-CD",
| "key": "upm::topic"
| },
| {
| "value": "27017",
| "key": "mongodb::port"
| },
| {
| "value": "false",
| "key": "upm::hornetq::is_enabled"
| },
| {
| "value": "false",
| "key": "ccp::enabled"
| },
| {
| "value": "mongodb",
| "key": "upm::dbname"
| },
| {
| "value": "set log4j.rootLogger error,logfile,clpAppender;;set log4j.appender.clpAppender com.nds.cab.infra.appenders.CLPAppender;;set CABLogger.asyncAppenderReferences [clpAppender];;",
| "key": "upm::log4j_mongo"
| },
| {
| "value": "canal-digital",
| "key": "flavor"
| },
| {
| "value": "naama-test-mongo0",
| "key": "mongodb::host"
| }
| ],
| "baseConfigProperties": [
| "config.properties"
| ]
| }
| },
| {
| "nodes": [
| "naama-test-upm0"
| ],
| "version": "1.0.0.0",
| "name": "nds_upm_bento"
| },
| {
| "nodes": [
| "naama-test-upm0"
| ],
| "version": "3.49.1-0",
| "name": "nds_umsui",
| "file": {
| "additionalValues": [
| {
| "value": "3.45.1-SNAPSHOT",
| "key": "ndsconsole::version"
| },
| {
| "value": "naama-test-upm0",
| "key": "upm::host"
| }
| ],
| "baseConfigProperties": [
| "config.properties"
| ]
| }
| }
| ]
| },
| "step0": {
| "modules": [
| {
| "nodes": [
| "naama-test-mongo0",
| "naama-test-mongo1"
| ],
| "version": "2.4.5.1",
| "name": "mongodb_tar"
| },
| {
| "nodes": [
| "naama-test-mongo0",
| "naama-test-mongo1"
| ],
| "version": "3.51.0-2",
| "name": "nds_emm",
| "ccp": {
| "processName": "emm",
| "baseConfigProperties": [
| "config.properties"
| ],
| "additionalValues": [
| {
| "value": "naama-test-mongo0",
| "key": "generic.mongodb.host.1"
| },
| {
| "value": "naama-test-mongo1",
| "key": "generic.mongodb.host.2"
| }
| ]
| }
| }
| ]
| }
| },
| "schemaVersion": "0.1",
| "installNodes": {
| "nodes": [
| {
| "name": "naama-test-mongo0",
| "region": "US-EAST",
| "minDisk": "10",
| "minCores": "16",
| "osType": "RedHat",
| "minRam": "256",
| "osVersion": "6.0",
| "arch": "x86-64",
| "id": "",
| "network": [
| {
| "nicType": "internal",
| "nicAlias": "naama-test-mongo0"
| }
| ]
| },
| {
| "name": "naama-test-mongo1",
| "region": "US-EAST",
| "minDisk": "10",
| "minCores": "16",
| "osType": "RedHat",
| "minRam": "256",
| "osVersion": "6.0",
| "arch": "x86-64",
| "id": "",
| "network": [
| {
| "nicType": "internal",
| "nicAlias": "naama-test-mongo1"
| }
| ]
| },
| {
| "name": "naama-test-upm0",
| "region": "US-EAST",
| "minDisk": "10",
| "minCores": "16",
| "osType": "RedHat",
| "minRam": "256",
| "osVersion": "6.0",
| "arch": "x86-64",
| "id": "",
| "network": [
| {
| "nicType": "internal",
| "nicAlias": "naama-test-upm0"
| }
|
| ]
| }
| ]
| }
|}
""".stripMargin
@Test
def createInitScriptTest() {
val util = new VMUtils()
val script = new BootstrapStatements(List((new SubnetUtils("10.0.0.0/24"), "gateway")), "scopeIP", "osVersion", true, "nodename", List("5015", "6040", "6060"), "stub", "scopeMachineName", 6041, "instanceId")
val stringScript = script.render(OsFamily.UNIX)
val s1 = "iptables -A INPUT -i $dev -p tcp --dport 5015 -j ACCEPT"
val s2 = "iptables -A INPUT -i $dev -p tcp --dport 6040 -j ACCEPT"
val s3 = "iptables -A INPUT -i $dev -p tcp --dport 6060 -j ACCEPT"
Assert.assertTrue(s"Does NOT contains ( $s1 )", stringScript.contains(s1))
Assert.assertTrue(s"Does NOT contains ( $s2 )", stringScript.contains(s2))
Assert.assertTrue(s"Does NOT contains ( $s3 )", stringScript.contains(s2))
logInfo("script : {}", stringScript)
}
@Test
def createVmTemplateTest() {
val util = new VMUtils()
val nodeJson =
"""
|{
| "name": "naama-test-upm0",
| "region": "US-EAST",
| "minDisk": "10",
| "minCores": "1",
| "osType": "RedHat",
| "minRam": "256",
| "osVersion": "6.0",
| "arch": "x86-64",
| "id": "",
| "postConfiguration" : true,
| "network": [
| {
| "nicType": "internal",
| "nicAlias": "naama-test-upm0"
| },
| {
| "openPorts": [
| "6040",
| "6060",
| "5015"
| ],
| "dnsServices": [
| "upm"
| ],
| "nicType": "public",
| "nicAlias": "naama-test-upm0_public"
| }
| ]
|}
""".stripMargin
val node = ScopeUtils.mapper.readValue(nodeJson, classOf[Node])
val template = util.createVmTemplate(node, "test-test", SshKeys.generate().toMap, List(), List(), "scopeMachineName", 6041, "instanceId")
val stringScript = template.getOptions.getRunScript.render(OsFamily.UNIX)
val s1 = "iptables -A INPUT -i $dev -p tcp --dport 5015 -j ACCEPT"
val s2 = "iptables -A INPUT -i $dev -p tcp --dport 6040 -j ACCEPT"
val s3 = "iptables -A INPUT -i $dev -p tcp --dport 6060 -j ACCEPT"
Assert.assertTrue(s"Does NOT contains ( $s1 )", stringScript.contains(s1))
Assert.assertTrue(s"Does NOT contains ( $s2 )", stringScript.contains(s2))
Assert.assertTrue(s"Does NOT contains ( $s3 )", stringScript.contains(s2))
logInfo("script : {}", stringScript)
}
@Ignore
@Test
def createVmTest() {
FlowContextFactory.createFlowContext
ScopeUtils.configuration.setProperty("cloud.provider", "vsphere")
val util = new VMUtils()
val nodeJson = """
|{
| "name": "junit-test-test1",
| "region": "US-EAST",
| "minDisk": "30",
| "minCores": "1",
| "osType": "RedHat",
| "minRam": "256",
| "osVersion": "6.0",
| "arch": "x86-64",
| "id": "",
| "network": [
| {
| "nicType": "internal",
| "nicAlias": "naama-test-upm0"
| }
| ]
|}
""".stripMargin
val node = ScopeUtils.mapper.readValue(nodeJson, classOf[Node])
val vmFuture = util.createVM("junit", "test", "test", node, SshKeys.generate().toMap, "scopeMachineName", 6041, "instanceId")
vmFuture.onSuccess {
case vm => //util.deleteVM(vm)
}
Await.result(vmFuture,50 minutes)
}
@Ignore
@Test
def createDNSTest() {
try {
DnsUtilsFactory.instance.createDomain("naama", "test", "upm")
DnsUtilsFactory.instance.createARecord("naama", "test", "upm", "11.11.11.11")
DnsUtilsFactory.instance().deleteDns("upm.test.naama.vcs-foundation.com")
} catch {
case e: Exception => {
logInfo("Failed to create DNS : {}", Array(e, e))
}
}
}
}
| foundation-runtime/orchestration | src/test/scala/com/cisco/oss/foundation/orchestration/scope/utils/VMUtilsTest.scala | Scala | apache-2.0 | 12,995 |
package be.wegenenverkeer.atomium
import be.wegenenverkeer.atomium.format.Feed
import scala.util.Try
package object client {
type FeedEntryUnmarshaller[E] = (String) => Try[Feed[E]]
}
| joachimvda/atomium | modules/client-scala/src/main/scala/be/wegenenverkeer/atomium/client/package.scala | Scala | mit | 191 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.application.exemptions.partner
import iht.config.AppConfig
import iht.connector.{CachingConnector, IhtConnector}
import iht.controllers.application.EstateController
import iht.forms.ApplicationForms._
import iht.models._
import iht.models.application.ApplicationDetails
import iht.models.application.exemptions._
import iht.utils.CommonHelper._
import iht.utils.{CommonHelper, IhtFormValidator}
import iht.views.html.application.exemption.partner.partner_permanent_home_question
import javax.inject.Inject
import play.api.Logging
import play.api.i18n.Messages
import play.api.mvc.{Call, MessagesControllerComponents, Request, Result}
import uk.gov.hmrc.auth.core.AuthConnector
import uk.gov.hmrc.auth.core.retrieve.v2.Retrievals.{nino => ninoRetrieval}
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
import iht.views.html.ihtHelpers.custom.name
import scala.concurrent.Future
class PartnerPermanentHomeQuestionControllerImpl @Inject()(val ihtConnector: IhtConnector,
val cachingConnector: CachingConnector,
val authConnector: AuthConnector,
val partnerPermanentHomeQuestionView: partner_permanent_home_question,
val nameView: name,
implicit val appConfig: AppConfig,
val cc: MessagesControllerComponents) extends FrontendController(cc) with PartnerPermanentHomeQuestionController {
}
trait PartnerPermanentHomeQuestionController extends EstateController with Logging {
lazy val partnerPermanentHomePage = routes.PartnerPermanentHomeQuestionController.onPageLoad()
lazy val exemptionsOverviewPage = addFragmentIdentifier(
iht.controllers.application.exemptions.routes.ExemptionsOverviewController.onPageLoad(), Some(appConfig.ExemptionsPartnerHomeID))
lazy val partnerOverviewPage = addFragmentIdentifier(routes.PartnerOverviewController.onPageLoad(), Some(appConfig.ExemptionsPartnerHomeID))
val partnerPermanentHomeQuestionView: partner_permanent_home_question
val nameView: name
def onPageLoad = authorisedForIhtWithRetrievals(ninoRetrieval) { userNino =>
implicit request =>
withRegistrationDetails { registrationDetails =>
for {
applicationDetails <- ihtConnector.getApplication(getNino(userNino),
CommonHelper.getOrExceptionNoIHTRef(registrationDetails.ihtReference),
registrationDetails.acknowledgmentReference)
} yield {
applicationDetails match {
case Some(appDetails) => {
val filledForm = appDetails.allExemptions.flatMap(_.partner)
.fold(partnerPermanentHomeQuestionForm)(partnerPermanentHomeQuestionForm.fill)
Ok(partnerPermanentHomeQuestionView(filledForm,
registrationDetails,
returnLabel(registrationDetails, appDetails),
returnUrl(registrationDetails, appDetails)
))
}
case _ => {
logger.warn("Application Details not found")
InternalServerError("Application details not found")
}
}
}
}
}
def onSubmit = authorisedForIhtWithRetrievals(ninoRetrieval) { userNino =>
implicit request => {
withRegistrationDetails { regDetails =>
val boundForm = IhtFormValidator.addDeceasedNameToAllFormErrors(partnerPermanentHomeQuestionForm
.bindFromRequest, regDetails.deceasedDetails.fold("")(_.name))
val applicationDetailsFuture = ihtConnector.getApplication(getNino(userNino),
CommonHelper.getOrExceptionNoIHTRef(regDetails.ihtReference),
regDetails.acknowledgmentReference)
applicationDetailsFuture.flatMap {
case Some(appDetails) => {
IhtFormValidator.addDeceasedNameToAllFormErrors(boundForm, regDetails.deceasedDetails.fold("")(_.name))
.fold(
formWithErrors => {
Future.successful(BadRequest(partnerPermanentHomeQuestionView(formWithErrors,
regDetails,
returnLabel(regDetails, appDetails),
returnUrl(regDetails, appDetails))))
},
partnerExemption => {
saveApplication(getNino(userNino), partnerExemption, regDetails, appDetails)
}
)
}
case None => {
logger.warn("Application Details not found")
Future.successful(InternalServerError("Application details not found"))
}
}
}
}
}
def saveApplication(nino: String,
pe: PartnerExemption,
regDetails: RegistrationDetails,
appDetails: ApplicationDetails)(implicit request: Request[_],
hc: HeaderCarrier): Future[Result] = {
val existingIsAssetForDeceasedPartner = appDetails.allExemptions.
flatMap(_.partner.flatMap(_.isAssetForDeceasedPartner))
val existingFirstName = appDetails.allExemptions.flatMap(_.partner.flatMap(_.firstName))
val existingLastName = appDetails.allExemptions.flatMap(_.partner.flatMap(_.lastName))
val existingDateOfBirth = appDetails.allExemptions.flatMap(_.partner.flatMap(_.dateOfBirth))
val existingNino = appDetails.allExemptions.flatMap(_.partner.flatMap(_.nino))
val existingTotalAssets = appDetails.allExemptions.flatMap(_.partner.flatMap(_.totalAssets))
val updatedPartnerExemption = pe.copy(isAssetForDeceasedPartner = existingIsAssetForDeceasedPartner,
firstName = existingFirstName,
lastName = existingLastName,
dateOfBirth = existingDateOfBirth,
nino = existingNino,
totalAssets = existingTotalAssets)
val applicationDetails = appKickoutUpdateKickout(checks = checksEstate,
prioritySection = applicationSection,
registrationDetails = regDetails,
applicationDetails = appDetails.copy(allExemptions = Some(appDetails.allExemptions.fold(new
AllExemptions(partner = Some(updatedPartnerExemption)))(_.copy(partner = Some(updatedPartnerExemption))))))
ihtConnector.saveApplication(nino, applicationDetails, regDetails.acknowledgmentReference).map(_ =>
Redirect(applicationDetails.kickoutReason.fold(partnerOverviewPage)
(_ => kickoutRedirectLocation)))
}
private def returnLabel(regDetails: RegistrationDetails,
appDetails: ApplicationDetails)(implicit messages: Messages): String = {
val deceasedName = nameView(regDetails.deceasedDetails.map(_.name).getOrElse(""))
val partner = appDetails.allExemptions.flatMap(_.partner)
partner match {
case Some(x) => {
if (x.isPartnerHomeInUK.isDefined) {
messages("iht.estateReport.exemptions.partner.returnToAssetsLeftToSpouse")
} else {
messages("page.iht.application.return.to.exemptionsOf", deceasedName)
}
}
case None => {
messages("page.iht.application.return.to.exemptionsOf", deceasedName)
}
}
}
private def returnUrl(regDetails: RegistrationDetails, appDetails: ApplicationDetails): Call = {
val partner = appDetails.allExemptions.flatMap(_.partner)
partner match {
case Some(x) => {
if (x.isPartnerHomeInUK.isDefined) {
routes.PartnerOverviewController.onPageLoad()
} else {
exemptionsOverviewPage
}
}
case None => exemptionsOverviewPage
}
}
}
| hmrc/iht-frontend | app/iht/controllers/application/exemptions/partner/PartnerPermanentHomeQuestionController.scala | Scala | apache-2.0 | 8,395 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.collection.GenTraversable
import scala.collection.mutable.Buffer
import scala.collection.mutable.ListBuffer
// SKIP-SCALATESTJS,NATIVE-START
import SharedHelpers.serializeRoundtrip
import org.scalatest.CompatParColls.Converters._
// SKIP-SCALATESTJS,NATIVE-END
class EverySpec extends UnitSpec {
"An Every" can "be constructed as a One" in {
val onesie = One(3)
onesie.length shouldBe 1
onesie(0) shouldBe 3
}
it can "be constructed as a Many" in {
val twosie = Many(2, 3)
twosie.length shouldBe 2
twosie(0) shouldBe 2
twosie(1) shouldBe 3
}
it can "be constructed as an Every" in {
val threesie = Every(1, 2, 3)
threesie.length shouldBe 3
threesie(0) shouldBe 1
threesie(1) shouldBe 2
threesie(2) shouldBe 3
}
it can "be constructed from a GenTraversable via the from method on Every singleton" in {
Every.from(List.empty[String]) shouldBe None
Every.from(List("1")) shouldBe Some(One("1"))
Every.from(List(1, 2, 3)) shouldBe Some(Many(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-START
Every.from(List.empty[String].par) shouldBe None
Every.from(List("1").par) shouldBe Some(One("1"))
Every.from(List(1, 2, 3).par) shouldBe Some(Many(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-END
}
it can "not be constructed with any null elements" is pending
it can "be deconstructed with One" in {
One(1) match {
case One(x) => x shouldEqual 1
case _ => fail()
}
One("hi") match {
case One(s) => s shouldEqual "hi"
case _ => fail()
}
}
it can "be deconstructed with Many" in {
Many(1, 2, 3) match {
case Many(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Many("hi", "there") match {
case Many(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Many(1, 2, 3) match {
case Many(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Many(1, 2, 3, 4, 5) match {
case Many(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
}
it can "be deconstructed with Every" in {
Many(1, 2, 3) match {
case Every(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Many("hi", "there") match {
case Every(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Many(1, 2, 3) match {
case Every(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Many(1, 2, 3, 4, 5) match {
case Every(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Many(1, 2, 3) match {
case Every(x, _*) => x shouldEqual 1
case _ => fail()
}
One("hi") match {
case Every(s) => s shouldEqual "hi"
case _ => fail()
}
Every(1, 2, 3) match {
case Every(x, y, z) => (x, y, z) shouldEqual (1, 2, 3)
case _ => fail()
}
Every("hi", "there") match {
case Every(s, t) => (s, t) shouldEqual ("hi", "there")
case _ => fail()
}
Every(1, 2, 3) match {
case Every(x, y, _) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Every(1, 2, 3, 4, 5) match {
case Every(x, y, _*) => (x, y) shouldEqual (1, 2)
case _ => fail()
}
Every(1, 2, 3) match {
case Every(x, _*) => x shouldEqual 1
case _ => fail()
}
}
it should "have an apply method" in {
Every(1, 2, 3)(0) shouldEqual 1
Every(1, 2, 3)(1) shouldEqual 2
One("hi")(0) shouldEqual "hi"
Many(7, 8, 9)(2) shouldEqual 9
val vectorOutOfBoundsException = intercept[IndexOutOfBoundsException] {
Vector(1, 2, 3)(3)
}
the [IndexOutOfBoundsException] thrownBy {
Every(1, 2, 3)(3)
} should have message vectorOutOfBoundsException.getMessage
}
it should "have an length method" in {
One(1).length shouldBe 1
Many(1, 2).length shouldBe 2
Many(1, 2, 3, 4, 5).length shouldBe 5
Every(1).length shouldBe 1
Every(1, 2).length shouldBe 2
Every(1, 2, 3, 4, 5).length shouldBe 5
}
it should "have a ++ method that takes another Every" in {
Every(1, 2, 3) ++ One(4) shouldEqual Every(1, 2, 3, 4)
Every(1, 2, 3) ++ Every(4) shouldEqual Every(1, 2, 3, 4)
Many(1, 2, 3) ++ Every(4, 5, 6) shouldEqual Every(1, 2, 3, 4, 5, 6)
Many(1, 2, 3) ++ One(4) shouldEqual Many(1, 2, 3, 4)
Every(1, 2, 3) ++ One(4) shouldEqual Many(1, 2, 3, 4)
Every(1, 2, 3) ++ Every(4) shouldEqual Many(1, 2, 3, 4)
Many(1, 2, 3) ++ Every(4) shouldEqual Many(1, 2, 3, 4)
Many(1, 2, 3) ++ One(4) shouldEqual Many(1, 2, 3, 4)
}
it should "have a ++ method that takes a GenTraversableOnce" in {
Every(1, 2, 3) ++ List(4) shouldEqual Every(1, 2, 3, 4)
Every(1, 2, 3) ++ Vector(4, 5, 6) shouldEqual Every(1, 2, 3, 4, 5, 6)
Many(1, 2, 3) ++ GenTraversable(4) shouldEqual Every(1, 2, 3, 4)
Many(1, 2, 3) ++ Set(4, 5) shouldEqual Many(1, 2, 3, 4, 5)
Many(1, 2, 3) ++ Set(4, 5).iterator shouldEqual Many(1, 2, 3, 4, 5)
}
it should "have a +: method" in {
0 +: One(1) shouldBe Many(0, 1)
0 +: Many(1, 2) shouldBe Many(0, 1, 2)
"zero" +: Every("one", "two") shouldBe Every("zero", "one", "two")
}
it should "implement PartialFunction[Int, T]" in {
val pf1: PartialFunction[Int, Int] = Every(1)
pf1.isDefinedAt(0) shouldBe true
pf1.isDefinedAt(1) shouldBe false
}
it should "have a /: method" in {
(0 /: One(1))(_ + _) shouldBe 1
(1 /: One(1))(_ + _) shouldBe 2
(0 /: Many(1, 2, 3))(_ + _) shouldBe 6
(1 /: Many(1, 2, 3))(_ + _) shouldBe 7
}
it should "have a :+ method" in {
One(1) :+ 2 shouldBe Many(1, 2)
Many(1, 2) :+ 3 shouldBe Many(1, 2, 3)
}
it should "have a :\\\\ method" in {
(One(1) :\\ 0)(_ + _) shouldBe 1
(One(1) :\\ 1)(_ + _) shouldBe 2
(Many(1, 2, 3) :\\ 0)(_ + _) shouldBe 6
(Many(1, 2, 3) :\\ 1)(_ + _) shouldBe 7
}
it should "have 3 addString methods" in {
One("hi").addString(new StringBuilder) shouldBe new StringBuilder("hi")
Many(1, 2, 3).addString(new StringBuilder) shouldBe new StringBuilder("123")
One("hi").addString(new StringBuilder, "#") shouldBe new StringBuilder("hi")
Many(1, 2, 3).addString(new StringBuilder, "#") shouldBe new StringBuilder("1#2#3")
Many(1, 2, 3).addString(new StringBuilder, ", ") shouldBe new StringBuilder("1, 2, 3")
One("hi").addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<hi>")
Many(1, 2, 3).addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<1#2#3>")
Many(1, 2, 3).addString(new StringBuilder, " ( ", ", ", " ) ") shouldBe new StringBuilder(" ( 1, 2, 3 ) ")
}
it should "have an andThen method (inherited from PartialFunction)" in {
val pf1 = One(1) andThen (_ + 1)
pf1(0) shouldEqual 2
val pf2 = Many(1, 2, 3) andThen (_ + 1)
pf2(0) shouldEqual 2
pf2(1) shouldEqual 3
pf2(2) shouldEqual 4
}
it should "have an applyOrElse method (inherited from PartialFunction)" in {
Every(1, 2, 3).applyOrElse(0, (_: Int) * -1) shouldEqual 1
Every(1, 2, 3).applyOrElse(1, (_: Int) * -1) shouldEqual 2
Every(1, 2, 3).applyOrElse(2, (_: Int) * -1) shouldEqual 3
Every(1, 2, 3).applyOrElse(3, (_: Int) * -1) shouldEqual -3
Every(1, 2, 3).applyOrElse(4, (_: Int) * -1) shouldEqual -4
}
it should "have an canEqual method" is pending
// it should "have an charAt method" is pending
// Could have an implicit conversion from Every[Char] to CharSequence like
// there is for Seq in Predef.
/*
scala> Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).collect { case i if i > 10 == 0 => i / 2 }
res1: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an collectFirst method" in {
Every(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) collectFirst { case i if i > 10 => i / 2 } shouldBe None
Every(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) collectFirst { case i if i > 10 => i / 2 } shouldBe Some(5)
}
/*
scala> Vector(1).combinations(2).toVector
res2: Vector[scala.collection.immutable.Vector[Int]] = Vector()
*/
/*
companion method not relevant. Has an empty and other GenTraverable stuff.
*/
it should "have an compose method, inherited from PartialFunction" in {
val fn: Int => Int = Every(1, 2, 3).compose((_: Int) + 1)
fn(-1) shouldBe 1
fn(0) shouldBe 2
fn(1) shouldBe 3
}
it should "have an contains method" in {
val e = Every(1, 2, 3)
e.contains(-1) shouldBe false
e.contains(0) shouldBe false
e.contains(1) shouldBe true
e.contains(2) shouldBe true
e.contains(3) shouldBe true
e.contains(4) shouldBe false
}
// Decided to just overload one for GenSeq and one for Every. Could have done
// what that has a Slicing nature, but that's a bit too fancy pants.
it should "have a containsSlice method that takes GenSeq" in {
val every = Every(1, 2, 3, 4, 5)
every.containsSlice(List(2, 3)) shouldBe true
every.containsSlice(List(2, 3, 5)) shouldBe false
every.containsSlice(List.empty) shouldBe true
every.containsSlice(Vector(2, 3)) shouldBe true
every.containsSlice(Vector(2, 3, 5)) shouldBe false
every.containsSlice(Vector.empty) shouldBe true
every.containsSlice(ListBuffer(2, 3)) shouldBe true
every.containsSlice(ListBuffer(2, 3, 5)) shouldBe false
every.containsSlice(ListBuffer.empty) shouldBe true
}
it should "have a containsSlice method that takes an Every" in {
val every = Every(1, 2, 3, 4, 5)
every.containsSlice(Every(2, 3)) shouldBe true
every.containsSlice(Every(2, 3, 5)) shouldBe false
every.containsSlice(Every(3)) shouldBe true
}
it should "have 3 copyToArray methods" in {
val arr1 = Array.fill(5)(-1)
Many(1, 2, 3, 4, 5).copyToArray(arr1)
arr1 shouldEqual Array(1, 2, 3, 4, 5)
val arr2 = Array.fill(5)(-1)
Many(1, 2, 3, 4, 5).copyToArray(arr2, 1)
arr2 shouldEqual Array(-1, 1, 2, 3, 4)
val arr3 = Array.fill(5)(-1)
Many(1, 2, 3, 4, 5).copyToArray(arr3, 1, 2)
arr3 shouldEqual Array(-1, 1, 2, -1, -1)
}
it should "have a copyToBuffer method" in {
val buf = ListBuffer.fill(3)(-1)
Many(1, 2, 3, 4, 5).copyToBuffer(buf)
buf shouldEqual Buffer(-1, -1, -1, 1, 2, 3, 4, 5)
}
it should "have a corresponds method that takes a GenSeq" in {
val every = Every(1, 2, 3, 4, 5)
every.corresponds(List(2, 4, 6, 8, 10))(_ * 2 == _) shouldBe true
every.corresponds(List(2, 4, 6, 8, 11))(_ * 2 == _) shouldBe false
every.corresponds(List(2, 4, 6, 8))(_ * 2 == _) shouldBe false
every.corresponds(List(2, 4, 6, 8, 10, 12))(_ * 2 == _) shouldBe false
}
it should "have a corresponds method that takes an Every" in {
val every = Every(1, 2, 3, 4, 5)
every.corresponds(Many(2, 4, 6, 8, 10))(_ * 2 == _) shouldBe true
every.corresponds(Many(2, 4, 6, 8, 11))(_ * 2 == _) shouldBe false
every.corresponds(Many(2, 4, 6, 8))(_ * 2 == _) shouldBe false
every.corresponds(Many(2, 4, 6, 8, 10, 12))(_ * 2 == _) shouldBe false
}
it should "have an count method" in {
val every = Every(1, 2, 3, 4, 5)
every.count(_ > 10) shouldBe 0
every.count(_ % 2 == 0) shouldBe 2
every.count(_ % 2 == 1) shouldBe 3
}
/*
it should not have a diff method
scala> Vector(1, 2, 3).diff(Vector(1, 2, 3))
res0: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an distinct method" in {
Every(1, 2, 3).distinct shouldBe Every(1, 2, 3)
Every(1).distinct shouldBe Every(1)
Every(1, 2, 1, 1).distinct shouldBe Every(1, 2)
Every(1, 1, 1).distinct shouldBe Every(1)
}
/*
it should not have an drop method
scala> Vector(1, 2, 3).drop(3)
res1: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropRight method
scala> Vector(1, 2, 3).dropRight(3)
res0: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropWhile method
scala> Vector(1, 2, 3).dropWhile(_ < 10)
res2: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an endsWith method that takes a GenSeq" in {
Every(1).endsWith(List(1)) shouldBe true
Every(1).endsWith(List(1, 2)) shouldBe false
Every(1, 2).endsWith(List(1, 2)) shouldBe true
Every(1, 2, 3, 4, 5).endsWith(List(1, 2)) shouldBe false
Every(1, 2, 3, 4, 5).endsWith(List(5)) shouldBe true
Every(1, 2, 3, 4, 5).endsWith(List(3, 4, 5)) shouldBe true
}
it should "have an endsWith method that takes an Every" in {
Every(1).endsWith(Every(1)) shouldBe true
Every(1).endsWith(Every(1, 2)) shouldBe false
Every(1, 2).endsWith(Every(1, 2)) shouldBe true
Every(1, 2, 3, 4, 5).endsWith(Every(1, 2)) shouldBe false
Every(1, 2, 3, 4, 5).endsWith(Every(5)) shouldBe true
Every(1, 2, 3, 4, 5).endsWith(Every(3, 4, 5)) shouldBe true
}
it should "have an equals method" in {
// This already worked, because the only concrete classes
// are case classes, and comparing One with Many should always
// be false.
One(1) should not equal Many(1, 2)
}
it should "have an exists method" in {
Every(1, 2, 3).exists(_ == 2) shouldBe true
Every(1, 2, 3).exists(_ == 5) shouldBe false
}
/*
it should not have a filter method
scala> Vector(1, 2, 3).filter(_ > 10)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a filterNot method
scala> Vector(1, 2, 3).filterNot(_ < 10)
res13: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a find method" in {
Every(1, 2, 3).find(_ == 5) shouldBe None
Every(1, 2, 3).find(_ == 2) shouldBe Some(2)
}
it should "have a flatMap method" in {
Every(1, 2, 3) flatMap (i => One(i + 1)) shouldBe Every(2, 3, 4)
val ss = Every("hi", "ho")
val is = Every(1, 2, 3)
(for (s <- ss; i <- is) yield (s, i)) shouldBe
Every(
("hi",1), ("hi",2), ("hi",3), ("ho",1), ("ho",2), ("ho",3)
)
One(5) flatMap (i => One(i + 3)) shouldBe One(8)
One(8) flatMap (i => Every(i.toString)) shouldBe One("8")
}
/*
Can only flatten Everys
scala> Vector(Set.empty[Int], Set.empty[Int]).flatten
res17: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a flatten method that works on nested Everys" in {
Every(Every(1, 2, 3), Every(1, 2, 3)).flatten shouldBe Every(1, 2, 3, 1, 2, 3)
Every(Every(1)).flatten shouldBe Every(1)
}
it can "be flattened when in a GenTraversableOnce" in {
// need to keep this commented out until finish implementing all methods
Vector(Every(1, 2, 3), Every(1, 2, 3)).flatten shouldBe Vector(1, 2, 3, 1, 2, 3)
List(Every(1, 2, 3), Every(1, 2, 3)).flatten shouldBe List(1, 2, 3, 1, 2, 3)
List(Every(1, 2, 3), Every(1, 2, 3)).toIterator.flatten.toStream shouldBe List(1, 2, 3, 1, 2, 3).toIterator.toStream
// SKIP-SCALATESTJS,NATIVE-START
List(Every(1, 2, 3), Every(1, 2, 3)).par.flatten shouldBe List(1, 2, 3, 1, 2, 3).par
// SKIP-SCALATESTJS,NATIVE-END
}
it should "have a fold method" in {
One(1).fold(0)(_ + _) shouldBe 1
One(1).fold(1)(_ * _) shouldBe 1
One(2).fold(0)(_ + _) shouldBe 2
One(2).fold(1)(_ * _) shouldBe 2
One(3).fold(0)(_ + _) shouldBe 3
One(3).fold(1)(_ * _) shouldBe 3
Many(1, 2, 3).fold(0)(_ + _) shouldBe 6
Many(1, 2, 3).fold(1)(_ * _) shouldBe 6
Many(1, 2, 3, 4, 5).fold(0)(_ + _) shouldBe 15
Many(1, 2, 3, 4, 5).fold(1)(_ * _) shouldBe 120
}
it should "have a foldLeft method" in {
One(1).foldLeft(0)(_ + _) shouldBe 1
One(1).foldLeft(1)(_ + _) shouldBe 2
Many(1, 2, 3).foldLeft(0)(_ + _) shouldBe 6
Many(1, 2, 3).foldLeft(1)(_ + _) shouldBe 7
}
it should "have a foldRight method" in {
One(1).foldRight(0)(_ + _) shouldBe 1
One(1).foldRight(1)(_ + _) shouldBe 2
Many(1, 2, 3).foldRight(0)(_ + _) shouldBe 6
Many(1, 2, 3).foldRight(1)(_ + _) shouldBe 7
}
it should "have a forall method" in {
Every(1, 2, 3, 4, 5).forall(_ > 0) shouldBe true
Every(1, 2, 3, 4, 5).forall(_ < 0) shouldBe false
}
it should "have a foreach method" in {
var num = 0
Every(1, 2, 3) foreach (num += _)
num shouldBe 6
for (i <- Every(1, 2, 3))
num += i
num shouldBe 12
One(5) foreach (num *= _)
num shouldBe 60
}
it should "have a groupBy method" in {
Every(1, 2, 3, 4, 5).groupBy(_ % 2) shouldBe Map(1 -> Every(1, 3, 5), 0 -> Every(2, 4))
Every(1, 2, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> Every(1, 3, 3, 3), 0 -> Every(2))
Every(1, 1, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> Every(1, 1, 3, 3, 3))
Every(1, 2, 3, 5, 7).groupBy(_ % 2) shouldBe Map(1 -> Every(1, 3, 5, 7), 0 -> Every(2))
}
it should "have a grouped method" in {
Every(1, 2, 3).grouped(2).toList shouldBe List(Every(1, 2), Every(3))
Every(1, 2, 3).grouped(1).toList shouldBe List(Every(1), Every(2), Every(3))
an [IllegalArgumentException] should be thrownBy { Every(1, 2, 3).grouped(0).toList }
Every(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(2).toList shouldBe List(Every(1, 2), Every(3, 4), Every(5, 6), Every(7, 8), Every(9, 10))
Every(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(3).toList shouldBe List(Every(1, 2, 3), Every(4, 5, 6), Every(7, 8, 9), Every(10))
Every(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(4).toList shouldBe List(Every(1, 2, 3, 4), Every(5, 6, 7, 8), Every(9, 10))
Every(1).grouped(2).toList shouldBe List(Every(1))
Every(1).grouped(1).toList shouldBe List(Every(1))
}
it should "have a hasDefiniteSize method" in {
Every(1).hasDefiniteSize shouldBe true
Every(1, 2).hasDefiniteSize shouldBe true
}
it should "have a hashCode method" in {
One(1).hashCode shouldEqual Every(1).hashCode
Many(1, 2).hashCode shouldEqual Every(1, 2).hashCode
}
it should "have a head method" in {
One("hi").head shouldBe "hi"
Many(1, 2, 3).head shouldBe 1
}
it should "have a headOption method" in {
One("hi").headOption shouldBe Some("hi")
Many(1, 2, 3).headOption shouldBe Some(1)
}
it should "have 2 indexOf methods" in {
Every(1, 2, 3, 4, 5).indexOf(3) shouldBe 2
Every(1, 2, 3, 4, 5).indexOf(1) shouldBe 0
Every(1, 2, 3, 4, 5).indexOf(1, 2) shouldBe -1
Every(1, 2, 3, 4, 5).indexOf(6) shouldBe -1
Every(1, 2, 3, 4, 5).indexOf(5, 3) shouldBe 4
}
it should "have 2 indexOfSlice methods that take a GenSeq" in {
Every(1, 2, 3, 4, 5).indexOfSlice(List(2, 3)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3))
Every(1, 2, 3, 4, 5).indexOfSlice(List(2, 3), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3), 3)
Every(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5), 3) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5), 3)
Every(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(2, 3, 5))
Every(1, 2, 3, 4, 5).indexOfSlice(List(5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(5))
Every(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5)) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5))
Every(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 0) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 0)
Every(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), 1)
Every(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), -1) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List(1, 2, 3, 4, 5), -1)
Every(1, 2, 3, 4, 5).indexOfSlice(List.empty) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty)
Every(1, 2, 3, 4, 5).indexOfSlice(List.empty, 6) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty, 6)
Every(1, 2, 3, 4, 5).indexOfSlice(List.empty, 4) shouldBe List(1, 2, 3, 4, 5).indexOfSlice(List.empty, 4)
}
it should "have 2 indexOfSlice methods that take an Every" in {
Every(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3)) shouldBe 1
Every(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3), 3) shouldBe -1
Every(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5), 3) shouldBe -1
Every(1, 2, 3, 4, 5).indexOfSlice(Every(2, 3, 5)) shouldBe -1
Every(1, 2, 3, 4, 5).indexOfSlice(Every(5)) shouldBe 4
Every(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5)) shouldBe 0
Every(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 0) shouldBe 0
Every(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), 1) shouldBe -1
Every(1, 2, 3, 4, 5).indexOfSlice(Every(1, 2, 3, 4, 5), -1) shouldBe 0
}
it should "have 2 indexWhere methods" in {
Every(1, 2, 3, 4, 5).indexWhere(_ == 3) shouldBe 2
Every(1, 2, 3, 4, 5).indexWhere(_ == 1) shouldBe 0
Every(1, 2, 3, 4, 5).indexWhere(_ == 1, 2) shouldBe -1
Every(1, 2, 3, 4, 5).indexWhere(_ == 6) shouldBe -1
Every(1, 2, 3, 4, 5).indexWhere(_ == 5, 3) shouldBe 4
}
it should "have an indices method" in {
Every(1).indices shouldBe Vector(1).indices
Every(1, 2, 3).indices shouldBe (0 to 2)
Every(1, 2, 3, 4, 5).indices shouldBe (0 to 4)
}
/*
it should not have an init method
scala> Vector(1).init
res30: scala.collection.immutable.Vector[Int] = Vector()
it should "have an inits method" is pending
scala> Vector(1).inits.toList
res32: List[scala.collection.immutable.Vector[Int]] = List(Vector(1), Vector())
it should "have an intersect method" is pending
scala> Vector(1, 2, 3) intersect Vector(4, 5)
res33: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an isDefinedAt method, inherited from PartialFunction" in {
Every(1).isDefinedAt(0) shouldBe true
Every(1).isDefinedAt(1) shouldBe false
Every(1, 2, 3).isDefinedAt(1) shouldBe true
Every(1, 2, 3).isDefinedAt(2) shouldBe true
Every(1, 2, 3).isDefinedAt(3) shouldBe false
Every(1, 2, 3).isDefinedAt(0) shouldBe true
Every(1, 2, 3).isDefinedAt(-1) shouldBe false
}
it should "have an isEmpty method" in {
One("hi").isEmpty shouldBe false
Many(1, 2, 3).isEmpty shouldBe false
}
it should "have an isTraversableAgain method" in {
One("hi").isTraversableAgain shouldBe true
Many(1, 2, 3).isTraversableAgain shouldBe true
}
it should "have an iterator method" in {
One("hi").iterator.toList shouldBe List("hi")
Many(1, 2, 3).iterator.toList shouldBe List(1, 2, 3)
}
it should "have a last method" in {
One("hi").last shouldBe "hi"
Many(1, 2, 3).last shouldBe 3
}
it should "have 2 lastIndexOf methods" in {
Every(1, 2, 3, 4, 5).lastIndexOf(2) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOf(0) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOf(5) shouldBe 4
Every(1, 2, 3, 3, 5).lastIndexOf(3) shouldBe 3
Every(1).lastIndexOf(1) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOf(2, 3) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOf(2, 0) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOf(2, 1) shouldBe 1
}
it should "have 2 lastIndexOfSlice methods that take a GenSeq" in {
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3)) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3), 3) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3, 5), 3) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(2, 3, 5)) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(5)) shouldBe 4
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5)) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), 0) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), 1) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List(1, 2, 3, 4, 5), -1) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty) shouldBe 5
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty, 6) shouldBe 5
Every(1, 2, 3, 4, 5).lastIndexOfSlice(List.empty, 4) shouldBe 4
}
it should "have 2 lastIndexOfSlice methods that take an Every" in {
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3)) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3), 3) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3, 5), 3) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(2, 3, 5)) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(5)) shouldBe 4
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5)) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), 0) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), 1) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexOfSlice(Every(1, 2, 3, 4, 5), -1) shouldBe -1
}
it should "have 2 lastIndexWhere methods" in {
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 2) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 0) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 5) shouldBe 4
Every(1, 2, 3, 3, 5).lastIndexWhere(_ == 3) shouldBe 3
Every(1).lastIndexWhere(_ == 1) shouldBe 0
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 3) shouldBe 1
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 0) shouldBe -1
Every(1, 2, 3, 4, 5).lastIndexWhere(_ == 2, 1) shouldBe 1
}
it should "have an lastOption method" in {
One("hi").lastOption shouldBe Some("hi")
Many(1, 2, 3).lastOption shouldBe Some(3)
}
it should "have an lengthCompare method" in {
One("hi").lengthCompare(0) should be > 0
One("hi").lengthCompare(1) shouldEqual 0
One("hi").lengthCompare(2) should be < 0
Many(1, 2, 3).lengthCompare(0) should be > 0
Many(1, 2, 3).lengthCompare(1) should be > 0
Many(1, 2, 3).lengthCompare(2) should be > 0
Many(1, 2, 3).lengthCompare(3) shouldEqual 0
Many(1, 2, 3).lengthCompare(4) should be < 0
}
it should "have an inherited lift method" in {
val liftedOne = One("hi").lift
liftedOne(0) shouldBe Some("hi")
liftedOne(1) shouldBe None
liftedOne(-1) shouldBe None
val liftedMany = Many(1, 2, 3).lift
liftedMany(0) shouldBe Some(1)
liftedMany(1) shouldBe Some(2)
liftedMany(2) shouldBe Some(3)
liftedMany(3) shouldBe None
liftedMany(-1) shouldBe None
}
it should "have a map method" in {
Every(1, 2, 3) map (_ + 1) shouldBe Every(2, 3, 4)
(for (ele <- Every(1, 2, 3)) yield ele * 2) shouldBe Every(2, 4, 6)
One(5) map (_ + 3) shouldBe One(8)
One(8) map (_.toString) shouldBe One("8")
}
it should "have a max method" in {
Every(1, 2, 3, 4, 5).max shouldBe 5
Every(1).max shouldBe 1
Every(-1).max shouldBe -1
Every("aaa", "ccc", "bbb").max shouldBe "ccc"
}
it should "have a maxBy method" in {
Every(1, 2, 3, 4, 5).maxBy(_.abs) shouldBe 5
Every(1, 2, 3, 4, -5).maxBy(_.abs) shouldBe -5
}
it should "have a min method" in {
Every(1, 2, 3, 4, 5).min shouldBe 1
Every(1).min shouldBe 1
Every(-1).min shouldBe -1
Every("aaa", "ccc", "bbb").min shouldBe "aaa"
}
it should "have a minBy method" in {
Every(1, 2, 3, 4, 5).minBy(_.abs) shouldBe 1
Every(-1, -2, 3, 4, 5).minBy(_.abs) shouldBe -1
}
it should "have a mkString method" in {
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6705
One("hi").mkString shouldBe "hi"
Many(1, 2, 3).mkString shouldBe "123"
// SKIP-DOTTY-END
One("hi").mkString("#") shouldBe "hi"
Many(1, 2, 3).mkString("#") shouldBe "1#2#3"
Many(1, 2, 3).mkString(", ") shouldBe "1, 2, 3"
One("hi").mkString("<", "#", ">") shouldBe "<hi>"
Many(1, 2, 3).mkString("<", "#", ">") shouldBe "<1#2#3>"
Many(1, 2, 3).mkString(" ( ", ", ", " ) ") shouldBe " ( 1, 2, 3 ) "
}
it should "have an nonEmpty method" in {
One("hi").nonEmpty shouldBe true
Many(1, 2, 3).nonEmpty shouldBe true
}
it should "have an orElse method, inherited from PartialFunction" in {
val pf: PartialFunction[Int, Int] = { case i => -i }
val f = Every(1, 2, 3) orElse pf
f(0) shouldBe 1
f(1) shouldBe 2
f(2) shouldBe 3
f(3) shouldBe -3
f(-1) shouldBe 1
}
it should "have a padTo method" in {
Every(1).padTo(0, -1) shouldBe Every(1)
Every(1).padTo(1, -1) shouldBe Every(1)
Every(1).padTo(2, -1) shouldBe Every(1, -1)
Every(1).padTo(3, -1) shouldBe Every(1, -1, -1)
Every(1, 2, 3).padTo(3, -1) shouldBe Every(1, 2, 3)
Every(1, 2, 3).padTo(4, -1) shouldBe Every(1, 2, 3, -1)
Every(1, 2, 3).padTo(5, -1) shouldBe Every(1, 2, 3, -1, -1)
}
// it should not have a par method, because I don't want to support that. If the user
// needs a parallel collection, they can use a parallel collection: every.toVector.par...
/*
it should not have an partition method
scala> Vector(1, 2, 3, 4, 5).partition(_ > 10)
res10: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a patch method" in {
Every(1, 2, 3, 4, 5).patch(2, Every(-3, -4), 2) shouldBe Every(1, 2, -3, -4, 5)
Every(1, 2, 3, 4, 5).patch(2, Every(-3, -4), 5) shouldBe Every(1, 2, -3, -4)
Every(1, 2, 3, 4, 5).patch(2, Every(-3, -4), 1) shouldBe Every(1, 2, -3, -4, 4, 5)
Every(1, 2, 3, 4, 5).patch(4, Every(-3, -4), 2) shouldBe Every(1, 2, 3, 4, -3, -4)
Every(1, 2, 3, 4, 5).patch(5, Every(-3, -4), 2) shouldBe Every(1, 2, 3, 4, 5, -3, -4)
Every(1, 2, 3, 4, 5).patch(6, Every(-3, -4), 2) shouldBe Every(1, 2, 3, 4, 5, -3, -4)
Every(1, 2, 3, 4, 5).patch(0, Every(-3, -4), 2) shouldBe Every(-3, -4, 3, 4, 5)
Every(1, 2, 3, 4, 5).patch(0, Every(-3, -4), 3) shouldBe Every(-3, -4, 4, 5)
}
it should "have a permutations method" in {
Every(1, 2, 3).permutations.toStream shouldBe Stream(Every(1, 2, 3), Every(1, 3, 2), Every(2, 1, 3), Every(2, 3, 1), Every(3, 1, 2), Every(3, 2, 1))
Every(1).permutations.toStream shouldBe Stream(Every(1))
Every(1, 2).permutations.toStream shouldBe Stream(Every(1, 2), Every(2, 1))
}
it should "have a prefixLength method" in {
Every(1, 2, 3, 4, 5).prefixLength(_ == 1) shouldBe 1
Every(1, 2, 3, 4, 5).prefixLength(_ == 2) shouldBe 0
Every(1, 2, 3, 4, 5).prefixLength(_ <= 2) shouldBe 2
Every(1, 2, 3, 4, 5).prefixLength(_ <= 10) shouldBe 5
Every(1, 2, 3, 4, 5).prefixLength(_ <= 4) shouldBe 4
}
it should "have a product method" in {
Every(1, 2, 3).product shouldBe 6
Every(3).product shouldBe 3
Every(3, 4, 5).product shouldBe 60
Every(3, 4, 5).product shouldBe 60
Every(3.1, 4.2, 5.3).product shouldBe 69.006
}
it should "have a reduce method" in {
Every(1, 2, 3, 4, 5).reduce(_ + _) shouldBe 15
Every(1, 2, 3, 4, 5).reduce(_ * _) shouldBe 120
Every(5).reduce(_ + _) shouldBe 5
Every(5).reduce(_ * _) shouldBe 5
}
it should "have a reduceLeft method" in {
One(1).reduceLeft(_ + _) shouldBe 1
One(1).reduceLeft(_ * _) shouldBe 1
Many(1, 2, 3).reduceLeft(_ + _) shouldBe 6
Many(1, 2, 3).reduceLeft(_ * _) shouldBe 6
Every(1, 2, 3, 4, 5).reduceLeft(_ * _) shouldBe 120
}
it should "have a reduceLeftOption method" in {
One(1).reduceLeftOption(_ + _) shouldBe Some(1)
One(1).reduceLeftOption(_ * _) shouldBe Some(1)
Many(1, 2, 3).reduceLeftOption(_ + _) shouldBe Some(6)
Many(1, 2, 3).reduceLeftOption(_ * _) shouldBe Some(6)
Every(1, 2, 3, 4, 5).reduceLeftOption(_ * _) shouldBe Some(120)
}
it should "have a reduceOption method" in {
Every(1, 2, 3, 4, 5).reduceOption(_ + _) shouldBe Some(15)
Every(1, 2, 3, 4, 5).reduceOption(_ * _) shouldBe Some(120)
Every(5).reduceOption(_ + _) shouldBe Some(5)
Every(5).reduceOption(_ * _) shouldBe Some(5)
}
it should "have a reduceRight method" in { One(1).reduceRight(_ + _) shouldBe 1
One(1).reduceRight(_ * _) shouldBe 1
Many(1, 2, 3).reduceRight(_ + _) shouldBe 6
Many(1, 2, 3).reduceRight(_ * _) shouldBe 6
Every(1, 2, 3, 4, 5).reduceRight(_ * _) shouldBe 120
}
it should "have a reduceRightOption method" in {
One(1).reduceRightOption(_ + _) shouldBe Some(1)
One(1).reduceRightOption(_ * _) shouldBe Some(1)
Many(1, 2, 3).reduceRightOption(_ + _) shouldBe Some(6)
Many(1, 2, 3).reduceRightOption(_ * _) shouldBe Some(6)
Every(1, 2, 3, 4, 5).reduceRightOption(_ * _) shouldBe Some(120)
}
it should "have a reverse method" in {
Every(33).reverse shouldBe Every(33)
Every(33, 34, 35).reverse shouldBe Every(35, 34, 33)
}
it should "have a reverseIterator method" in {
Every(3).reverseIterator.toStream shouldBe Stream(3)
Every(1, 2, 3).reverseIterator.toList shouldBe Stream(3, 2, 1)
}
it should "have a reverseMap method" in {
Every(3).reverseMap(_ + 1) shouldBe Every(4)
Every(1, 2, 3).reverseMap(_ + 1) shouldBe Every(4, 3, 2)
}
it should "have a runWith method, inherited from PartialFunction" in {
var x = 0
val f = Vector(1, 2, 3).runWith(x += _)
f(0) shouldBe true
x shouldBe 1
f(1) shouldBe true
x shouldBe 3
f(2) shouldBe true
x shouldBe 6
f(3) shouldBe false
var y = 0
val g = Vector(3).runWith(y += _)
g(0) shouldBe true
y shouldBe 3
g(0) shouldBe true
y shouldBe 6
}
it should "have a sameElements method that takes a GenIterable" in {
Every(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 5)) shouldBe true
Every(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4)) shouldBe false
Every(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 5, 6)) shouldBe false
Every(1, 2, 3, 4, 5).sameElements(List(1, 2, 3, 4, 4)) shouldBe false
Every(3).sameElements(List(1, 2, 3, 4, 5)) shouldBe false
Every(3).sameElements(List(1)) shouldBe false
Every(3).sameElements(List(3)) shouldBe true
}
it should "have a sameElements method that takes an Every" in {
Every(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 5)) shouldBe true
Every(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4)) shouldBe false
Every(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 5, 6)) shouldBe false
Every(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 4)) shouldBe false
Every(3).sameElements(Every(1, 2, 3, 4, 5)) shouldBe false
Every(3).sameElements(Every(1)) shouldBe false
Every(3).sameElements(Every(3)) shouldBe true
}
it should "have a scan method" in {
Every(1).scan(0)(_ + _) shouldBe Every(0, 1)
Every(1, 2, 3).scan(0)(_ + _) shouldBe Every(0, 1, 3, 6)
Every(1, 2, 3).scan("z")(_.toString + _.toString) shouldBe Every("z", "z1", "z12", "z123")
Every(0).scan("z")(_.toString + _.toString) shouldBe Every("z", "z0")
}
it should "have a scanLeft method" in {
Every(1).scanLeft(0)(_ + _) shouldBe Every(0, 1)
Every(1, 2, 3).scanLeft(0)(_ + _) shouldBe Every(0, 1, 3, 6)
Every(1, 2, 3).scanLeft("z")(_.toString + _.toString) shouldBe Every("z", "z1", "z12", "z123")
Every(0).scanLeft("z")(_.toString + _.toString) shouldBe Every("z", "z0")
}
it should "have a scanRight method" in {
Every(1).scanRight(0)(_ + _) shouldBe Every(1, 0)
Every(1, 2, 3).scanRight(0)(_ + _) shouldBe Every(6, 5, 3, 0)
Every(1, 2, 3).scanRight("z")(_.toString + _.toString) shouldBe Every("123z", "23z", "3z", "z")
Every(0).scanRight("z")(_.toString + _.toString) shouldBe Every("0z", "z")
}
it should "have a segmentLength method" in {
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 7, 0) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ == 7, 0) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 0) shouldBe 10
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 1, 0) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 10) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 0, 8) shouldBe 2
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ < 3, 0) shouldBe 2
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ < 5, 0) shouldBe 4
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 0) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 5) shouldBe 5
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 4) shouldBe 0
Every(1, 2, 3, 4, 5, 6, 6, 7, 8, 10).segmentLength(_ > 5, 6) shouldBe 4
}
// it should "have an seq method" is pending
it should "have a size method" in {
Every(5).size shouldBe 1
Every(1, 2, 3).size shouldBe 3
}
/*
it should not have a slice method
scala> Vector(3).slice(0, 0)
res83: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3, 4, 5).slice(2, 1)
res84: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have 2 sliding methods" in {
Every(1).sliding(1).toList shouldBe List(Every(1))
Every(1).sliding(2).toList shouldBe List(Every(1))
Every(1, 2, 3).sliding(2).toList shouldBe List(Every(1, 2), Every(2, 3))
Every(1, 2, 3).sliding(1).toList shouldBe List(Every(1), Every(2), Every(3))
Every(1, 2, 3).sliding(3).toList shouldBe List(Every(1, 2, 3))
Every(1, 2, 3, 4, 5).sliding(3).toList shouldBe List(Every(1, 2, 3), Every(2, 3, 4), Every(3, 4, 5))
Every(1, 2, 3, 4, 5).sliding(2).toList shouldBe List(Every(1, 2), Every(2, 3), Every(3, 4), Every(4, 5))
Every(1, 2, 3, 4, 5).sliding(1).toList shouldBe List(Every(1), Every(2), Every(3), Every(4), Every(5))
Every(1, 2, 3, 4, 5).sliding(4).toList shouldBe List(Every(1, 2, 3, 4), Every(2, 3, 4, 5))
Every(1, 2, 3, 4, 5).sliding(5).toList shouldBe List(Every(1, 2, 3, 4, 5))
Every(1).sliding(1, 1).toList shouldBe List(Every(1))
Every(1).sliding(1, 2).toList shouldBe List(Every(1))
Every(1, 2, 3).sliding(1, 1).toList shouldBe List(Every(1), Every(2), Every(3))
Every(1, 2, 3).sliding(2, 1).toList shouldBe List(Every(1, 2), Every(2, 3))
Every(1, 2, 3).sliding(2, 2).toList shouldBe List(Every(1, 2), Every(3))
Every(1, 2, 3).sliding(3, 2).toList shouldBe List(Every(1, 2, 3))
Every(1, 2, 3).sliding(3, 1).toList shouldBe List(Every(1, 2, 3))
Every(1, 2, 3, 4, 5).sliding(3, 1).toList shouldBe List(Every(1, 2, 3), Every(2, 3, 4), Every(3, 4, 5))
Every(1, 2, 3, 4, 5).sliding(2, 2).toList shouldBe List(Every(1, 2), Every(3, 4), Every(5))
Every(1, 2, 3, 4, 5).sliding(2, 3).toList shouldBe List(Every(1, 2), Every(4, 5))
Every(1, 2, 3, 4, 5).sliding(2, 4).toList shouldBe List(Every(1, 2), Every(5))
Every(1, 2, 3, 4, 5).sliding(3, 1).toList shouldBe List(Every(1, 2, 3), Every(2, 3, 4), Every(3, 4, 5))
Every(1, 2, 3, 4, 5).sliding(3, 2).toList shouldBe List(Every(1, 2, 3), Every(3, 4, 5))
Every(1, 2, 3, 4, 5).sliding(3, 3).toList shouldBe List(Every(1, 2, 3), Every(4, 5))
Every(1, 2, 3, 4, 5).sliding(3, 4).toList shouldBe List(Every(1, 2, 3), Every(5))
}
it should "have a sortBy method" in {
val regFun: String => Int = {
case "one" => 1
case "two" => 2
case "three" => 3
case "four" => 4
case "five" => 5
case "-one" => -1
case "-two" => -2
case "-three" => -3
case "-four" => -4
case "-five" => -5
}
val absFun: String => Int = {
case "one" => 1
case "two" => 2
case "three" => 3
case "four" => 4
case "five" => 5
case "-one" => 1
case "-two" => 2
case "-three" => 3
case "-four" => 4
case "-five" => 5
}
Every("five", "four", "three", "two", "one").sortBy(regFun) shouldBe Every("one", "two", "three", "four", "five")
Every("two", "one", "four", "five", "three").sortBy(regFun) shouldBe Every("one", "two", "three", "four", "five")
Every("two", "-one", "four", "-five", "-three").sortBy(regFun) shouldBe Every("-five", "-three", "-one", "two", "four")
Every("two", "-one", "four", "-five", "-three").sortBy(absFun) shouldBe Every("-one", "two", "-three", "four", "-five")
}
it should "have a sortWith method" in {
Every(1, 2, 3, 4, 5).sortWith(_ > _) shouldBe Every(5, 4, 3, 2, 1)
Every(2, 1, 4, 5, 3).sortWith(_ > _) shouldBe Every(5, 4, 3, 2, 1)
Every(2, -1, 4, -5, -3).sortWith(_.abs > _.abs) shouldBe Every(-5, 4, -3, 2, -1)
Every(2, -1, 4, -5, -3).sortWith(_.abs < _.abs) shouldBe Every(-1, 2, -3, 4, -5)
}
it should "have a sorted method" in {
Every(1, 2, 3, 4, 5).sorted shouldBe Every(1, 2, 3, 4, 5)
Every(5, 4, 3, 2, 1).sorted shouldBe Every(1, 2, 3, 4, 5)
Every(2, 1, 4, 5, 3).sorted shouldBe Every(1, 2, 3, 4, 5)
}
/*
it should not have a span method
scala> Vector(1, 2, 3, 4, 5).span(_ > 10)
res105: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
it should not have a splitAt method
scala> Vector(1, 2, 3, 4, 5).splitAt(0)
res106: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have 2 startsWith methods that take a GenSeq" in {
Every(1, 2, 3).startsWith(List(1)) shouldBe true
Every(1, 2, 3).startsWith(List(1, 2)) shouldBe true
Every(1, 2, 3).startsWith(List(1, 2, 3)) shouldBe true
Every(1, 2, 3).startsWith(List(1, 2, 3, 4)) shouldBe false
Every(1).startsWith(List(1, 2, 3, 4)) shouldBe false
Every(1).startsWith(List(1)) shouldBe true
Every(1).startsWith(List(2)) shouldBe false
Every(1).startsWith(List(1), 0) shouldBe true
Every(1).startsWith(List(1), 1) shouldBe false
Every(1, 2, 3).startsWith(List(1), 1) shouldBe false
Every(1, 2, 3).startsWith(List(1), 2) shouldBe false
Every(1, 2, 3).startsWith(List(2), 2) shouldBe false
Every(1, 2, 3).startsWith(List(2), 1) shouldBe true
Every(1, 2, 3).startsWith(List(2, 3), 1) shouldBe true
Every(1, 2, 3).startsWith(List(1, 2, 3), 1) shouldBe false
Every(1, 2, 3).startsWith(List(1, 2, 3), 0) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(List(3, 4), 2) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(List(3, 4, 5), 2) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(List(3, 4, 5, 6), 2) shouldBe false
}
it should "have 2 startsWith methods that take an Every" in {
Every(1, 2, 3).startsWith(Every(1)) shouldBe true
Every(1, 2, 3).startsWith(Every(1, 2)) shouldBe true
Every(1, 2, 3).startsWith(Every(1, 2, 3)) shouldBe true
Every(1, 2, 3).startsWith(Every(1, 2, 3, 4)) shouldBe false
Every(1).startsWith(Every(1, 2, 3, 4)) shouldBe false
Every(1).startsWith(Every(1)) shouldBe true
Every(1).startsWith(Every(2)) shouldBe false
Every(1).startsWith(Every(1), 0) shouldBe true
Every(1).startsWith(Every(1), 1) shouldBe false
Every(1, 2, 3).startsWith(Every(1), 1) shouldBe false
Every(1, 2, 3).startsWith(Every(1), 2) shouldBe false
Every(1, 2, 3).startsWith(Every(2), 2) shouldBe false
Every(1, 2, 3).startsWith(Every(2), 1) shouldBe true
Every(1, 2, 3).startsWith(Every(2, 3), 1) shouldBe true
Every(1, 2, 3).startsWith(Every(1, 2, 3), 1) shouldBe false
Every(1, 2, 3).startsWith(Every(1, 2, 3), 0) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(Every(3, 4), 2) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(Every(3, 4, 5), 2) shouldBe true
Every(1, 2, 3, 4, 5).startsWith(Every(3, 4, 5, 6), 2) shouldBe false
}
it should "have a stringPrefix method" in {
Every(1).stringPrefix shouldBe "One"
Every(1, 2, 3).stringPrefix shouldBe "Many"
}
it should "have a sum method" in {
Every(1).sum shouldBe 1
Every(5).sum shouldBe 5
Every(1, 2, 3).sum shouldBe 6
Every(1, 2, 3, 4, 5).sum shouldBe 15
Every(1.1, 2.2, 3.3).sum shouldBe 6.6
}
/*
it should not have a tail method
scala> Vector(1).tail
res7: scala.collection.immutable.Vector[Int] = Vector()
it should not have a tails method
scala> Vector(1).tails.toList
res8: List[scala.collection.immutable.Vector[Int]] = List(Vector(1), Vector())
it should not have a take method
scala> Vector(1).take(0)
res10: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(0)
res11: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(-1)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeRight method
scala> Vector(1).takeRight(1)
res13: scala.collection.immutable.Vector[Int] = Vector(1)
scala> Vector(1).takeRight(0)
res14: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).takeRight(0)
res15: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeWhile method
scala> Vector(1, 2, 3).takeWhile(_ > 10)
res17: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1).takeWhile(_ > 10)
res18: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a to method" in {
import org.scalactic.ColCompatHelper.Factory._
Every(1).to(List) shouldBe List(1)
Every(1, 2, 3).to(List) shouldBe List(1, 2, 3)
Every(1, 2, 3).to(scala.collection.mutable.ListBuffer) shouldBe ListBuffer(1, 2, 3)
Every(1, 2, 3).to(Vector) shouldBe Vector(1, 2, 3)
}
it should "have a toArray method" in {
Every(1, 2, 3).toArray should === (Array(1, 2, 3))
Many("a", "b").toArray should === (Array("a", "b"))
One(1).toArray should === (Array(1))
}
it should "have a toBuffer method" in {
Every(1, 2, 3).toBuffer should === (Buffer(1, 2, 3))
Many("a", "b").toBuffer should === (Buffer("a", "b"))
One(1).toBuffer should === (Buffer(1))
}
it should "have a toIndexedSeq method" in {
Every(1, 2, 3).toIndexedSeq should === (IndexedSeq(1, 2, 3))
Many("a", "b").toIndexedSeq should === (IndexedSeq("a", "b"))
One(1).toIndexedSeq should === (IndexedSeq(1))
}
it should "have a toIterable method" in {
Every(1, 2, 3).toIterable should === (Iterable(1, 2, 3))
Many("a", "b").toIterable should === (Iterable("a", "b"))
One(1).toIterable should === (Iterable(1))
}
it should "have a toIterator method" in {
Every(1, 2, 3).toIterator.toList should === (Iterator(1, 2, 3).toList)
Many("a", "b").toIterator.toList should === (Iterator("a", "b").toList)
One(1).toIterator.toList should === (Iterator(1).toList)
Every(1, 2, 3).toIterator shouldBe an [Iterator[_]]
Many("a", "b").toIterator shouldBe an [Iterator[_]]
One(1).toIterator shouldBe an [Iterator[_]]
}
it should "have a toList method" in {
Every(1, 2, 3).toList should === (List(1, 2, 3))
Many("a", "b").toList should === (List("a", "b"))
One(1).toList should === (List(1))
}
it should "have a toMap method" in {
Every("1" -> 1, "2" -> 2, "3" -> 3).toMap should === (Map("1" -> 1, "2" -> 2, "3" -> 3))
Many('A' -> "a", 'B' -> "b").toMap should === (Map('A' -> "a", 'B' -> "b"))
One("1" -> 1).toMap should === (Map("1" -> 1))
}
it should "have a toSeq method" in {
Every(1, 2, 3).toSeq should === (Seq(1, 2, 3))
Many("a", "b").toSeq should === (Seq("a", "b"))
One(1).toSeq should === (Seq(1))
}
it should "have a toSet method" in {
Every(1, 2, 3).toSet should === (Set(1, 2, 3))
Many("a", "b").toSet should === (Set("a", "b"))
One(1).toSet should === (Set(1))
}
it should "have a toStream method" in {
Every(1, 2, 3).toStream should === (Stream(1, 2, 3))
Many("a", "b").toStream should === (Stream("a", "b"))
One(1).toStream should === (Stream(1))
}
it should "have a toString method" in {
Every(1, 2, 3).toString should === ("Many(1, 2, 3)")
Many(1, 2, 3).toString should === ("Many(1, 2, 3)")
One(1).toString should === ("One(1)")
}
it should "have a toVector method" in {
Every(1, 2, 3).toVector should === (Vector(1, 2, 3))
Many("a", "b").toVector should === (Vector("a", "b"))
One(1).toVector should === (Vector(1))
}
it should "have a transpose method" in {
Every(Every(1, 2, 3), Every(4, 5, 6), Every(7, 8, 9)).transpose shouldBe Every(Every(1, 4, 7), Every(2, 5, 8), Every(3, 6, 9))
Every(Every(1, 2), Every(3, 4), Every(5, 6), Every(7, 8)).transpose shouldBe Every(Every(1, 3, 5, 7), Every(2, 4, 6, 8))
Every(Every(1, 2), Every(3, 4), Every(5, 6), Every(7, 8)).transpose.transpose shouldBe Every(Every(1, 2), Every(3, 4), Every(5, 6), Every(7, 8))
Every(Every(1, 2, 3), Every(4, 5, 6), Every(7, 8, 9)).transpose.transpose shouldBe Every(Every(1, 2, 3), Every(4, 5, 6), Every(7, 8, 9))
}
it should "have a union method that takes a GenSeq" in {
Every(1) union List(1) shouldBe Every(1, 1)
Every(1) union List(1, 2) shouldBe Every(1, 1, 2)
Every(1, 2) union List(1, 2) shouldBe Every(1, 2, 1, 2)
Every(1, 2) union List(1) shouldBe Every(1, 2, 1)
Every(1, 2) union List(3, 4, 5) shouldBe Every(1, 2, 3, 4, 5)
Every(1, 2, 3) union List(3, 4, 5) shouldBe Every(1, 2, 3, 3, 4, 5)
}
it should "have a union method that takes an Every" in {
Every(1) union Every(1) shouldBe Every(1, 1)
Every(1) union Every(1, 2) shouldBe Every(1, 1, 2)
Every(1, 2) union Every(1, 2) shouldBe Every(1, 2, 1, 2)
Every(1, 2) union Every(1) shouldBe Every(1, 2, 1)
Every(1, 2) union Every(3, 4, 5) shouldBe Every(1, 2, 3, 4, 5)
Every(1, 2, 3) union Every(3, 4, 5) shouldBe Every(1, 2, 3, 3, 4, 5)
}
it should "have an unzip method" in {
Every((1, 2)).unzip shouldBe (Every(1),Every(2))
Every((1, 2), (3, 4)).unzip shouldBe (Every(1, 3), Every(2, 4))
Every((1, 2), (3, 4), (5, 6)).unzip shouldBe (Every(1, 3, 5), Every(2, 4, 6))
}
it should "have an unzip3 method" in {
Every((1, 2, 3)).unzip3 shouldBe (Every(1), Every(2), Every(3))
Every((1, 2, 3), (4, 5, 6)).unzip3 shouldBe (Every(1, 4), Every(2, 5), Every(3, 6))
Every((1, 2, 3), (4, 5, 6), (7, 8, 9)).unzip3 shouldBe (Every(1, 4, 7), Every(2, 5, 8), Every(3, 6, 9))
}
it should "have an updated method" in {
Every(1).updated(0, 2) shouldBe Every(2)
an [IndexOutOfBoundsException] should be thrownBy { Every(1).updated(1, 2) }
Every(1, 1, 1).updated(1, 2) shouldBe Every(1, 2, 1)
Every(1, 1, 1).updated(2, 2) shouldBe Every(1, 1, 2)
Every(1, 1, 1).updated(0, 2) shouldBe Every(2, 1, 1)
}
/*
it should not have 2 view methods, because I don't want to support views in Every
*/
/*
it should not have a zip method
scala> List(1) zip Nil
res0: List[(Int, Nothing)] = List()
*/
it should "have a zipAll method that takes an Iterable" in {
// Empty on right
One(1).zipAll(Nil, -1, -2) shouldBe One((1, -2))
Many(1, 2).zipAll(Nil, -1, -2) shouldBe Many((1, -2), (2, -2))
// Same length
One(1).zipAll(List(1), -1, -2) shouldBe One((1, 1))
Many(1, 2).zipAll(List(1, 2), -1, -2) shouldBe Many((1, 1), (2, 2))
// Non-empty, longer on right
One(1).zipAll(List(10, 20), -1, -2) shouldBe Many((1,10), (-1,20))
Many(1, 2).zipAll(List(10, 20, 30), -1, -2) shouldBe Many((1,10), (2,20), (-1,30))
// Non-empty, shorter on right
Many(1, 2, 3).zipAll(List(10, 20), -1, -2) shouldBe Many((1,10), (2,20), (3,-2))
Many(1, 2, 3, 4).zipAll(List(10, 20, 30), -1, -2) shouldBe Many((1,10), (2,20), (3,30), (4,-2))
}
it should "have a zipAll method that takes an Every" in {
// Same length
One(1).zipAll(Every(1), -1, -2) shouldBe One((1, 1))
Many(1, 2).zipAll(Every(1, 2), -1, -2) shouldBe Many((1, 1), (2, 2))
// Non-empty, longer on right
One(1).zipAll(Every(10, 20), -1, -2) shouldBe Many((1,10), (-1,20))
Many(1, 2).zipAll(Every(10, 20, 30), -1, -2) shouldBe Many((1,10), (2,20), (-1,30))
// Non-empty, shorter on right
Many(1, 2, 3).zipAll(Every(10, 20), -1, -2) shouldBe Many((1,10), (2,20), (3,-2))
Many(1, 2, 3, 4).zipAll(Every(10, 20, 30), -1, -2) shouldBe Many((1,10), (2,20), (3,30), (4,-2))
}
it should "have a zipWithIndex method" in {
Every(99).zipWithIndex shouldBe Every((99,0))
Every(1, 2, 3, 4, 5).zipWithIndex shouldBe Every((1,0), (2,1), (3,2), (4,3), (5,4))
}
// SKIP-SCALATESTJS,NATIVE-START
it should "be serializable" in {
serializeRoundtrip(Every(1)) shouldBe Every(1)
}
// SKIP-SCALATESTJS,NATIVE-END
"A One" can "be widened to an Every type via .asEvery" in {
One(1).asEvery shouldBe One(1)
}
// SKIP-SCALATESTJS,NATIVE-START
it should "be serializable" in {
serializeRoundtrip(One(1))
}
// SKIP-SCALATESTJS,NATIVE-END
"A Many" can "be widened to an Every type via .asEvery" in {
Many(1, 2, 3).asEvery shouldBe Many(1, 2, 3)
}
// SKIP-SCALATESTJS,NATIVE-START
it should "be serializable" in {
serializeRoundtrip(Many(1, 2, 3))
}
// SKIP-SCALATESTJS,NATIVE-END
}
| scalatest/scalatest | jvm/scalactic-test/src/test/scala/org/scalactic/EverySpec.scala | Scala | apache-2.0 | 52,958 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.util
import org.apache.avro.io.{BinaryDecoder, DecoderFactory, Encoder, EncoderFactory}
import org.geotools.factory.Hints
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.SerializationOption.SerializationOptions
import org.locationtech.geomesa.features.avro.serde.Version2ASF
import org.locationtech.geomesa.features.avro.serialization.AvroUserDataSerialization
import org.locationtech.geomesa.features.serialization.HintKeySerialization
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AvroSimpleFeatureWriterTest extends Specification with Mockito with AbstractAvroSimpleFeatureTest {
sequential
"AvroSimpleFeatureWriter2" should {
"correctly serialize all the datatypes provided in AvroSimpleFeature" in {
val features = createComplicatedFeatures(10)
val oldBaos = new ByteArrayOutputStream()
def serializeOld(sf: SimpleFeature) = {
oldBaos.reset()
Version2ASF(sf).write(oldBaos)
oldBaos.toByteArray
}
val afw = new AvroSimpleFeatureWriter(features(0).getFeatureType)
val newBaos = new ByteArrayOutputStream()
val encoder = EncoderFactory.get().directBinaryEncoder(newBaos, null)
def serializeNew(sf: SimpleFeature) = {
newBaos.reset()
afw.write(sf, encoder)
encoder.flush()
newBaos.toByteArray
}
var decoder: BinaryDecoder = null
val fsr = new FeatureSpecificReader(features(0).getFeatureType)
def convert(bytes: Array[Byte]) = {
val bais = new ByteArrayInputStream(bytes)
decoder = DecoderFactory.get().directBinaryDecoder(bais, decoder)
fsr.read(null, decoder)
}
val oldFeatures = features.map(serializeOld).map(convert)
val newFeatures = features.map(serializeNew).map(convert)
import scala.collection.JavaConversions._
newFeatures.zip(oldFeatures).foreach { case (n, o) =>
n.getID mustEqual o.getID
n.getAttributeCount mustEqual o.getAttributeCount
n.getAttributeCount mustEqual 16
n.getAttributes.dropRight(1) mustEqual o.getAttributes.dropRight(1)
util.Arrays.equals(n.getAttributes.last.asInstanceOf[Array[Byte]], o.getAttributes.last.asInstanceOf[Array[Byte]]) must beTrue
}
newFeatures.zip(features).foreach { case (n, o) =>
n.getID mustEqual o.getID
n.getAttributeCount mustEqual o.getAttributeCount
n.getAttributeCount mustEqual 16
n.getAttributes.dropRight(1) mustEqual o.getAttributes.dropRight(1)
util.Arrays.equals(n.getAttributes.last.asInstanceOf[Array[Byte]], o.getAttributes.last.asInstanceOf[Array[Byte]]) must beTrue
}
success
}
"serialize user data when requested" >> {
import org.locationtech.geomesa.security._
val sf = createSimpleFeature
val vis = "private&groupA"
sf.visibility = vis
val userData = sf.getUserData
userData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
userData.put(java.lang.Integer.valueOf(5), null)
userData.put(null, "null key")
val afw = new AvroSimpleFeatureWriter(sf.getType, SerializationOptions.withUserData)
val encoder = mock[Encoder]
afw.write(sf, encoder)
there was one(encoder).writeArrayStart()
there was one(encoder).setItemCount(4)
there was 4.times(encoder).startItem()
// 1 key and 2 values have type String
there was three(encoder).writeString("java.lang.String")
// 1 key and 0 values have type Hints.Key
there was one(encoder).writeString(classOf[Hints.Key].getName)
// 0 keys and 1 value have type Boolean
there was one(encoder).writeString("java.lang.Boolean")
// 1 key and 0 values have type Integer
there was one(encoder).writeString("java.lang.Boolean")
// 1 key and 1 value are null
there was two(encoder).writeString(AvroUserDataSerialization.NullMarkerString)
// visibility data
there was one(encoder).writeString(SecurityUtils.FEATURE_VISIBILITY)
there was one(encoder).writeString(vis)
// hint data
there was one(encoder).writeString(HintKeySerialization.keyToId(Hints.USE_PROVIDED_FID))
there was one(encoder).writeBoolean(true)
// key = 5, value = null
there was one(encoder).writeInt(5)
// key = null, value = "null key"
there was one(encoder).writeString("null key")
there was one(encoder).writeArrayEnd()
}
"use unmangled names when requested" >> {
import org.locationtech.geomesa.security._
val sf = createSimpleFeature
val vis = "private&groupA"
sf.visibility = vis
val userData = sf.getUserData
userData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
userData.put(java.lang.Integer.valueOf(5), null)
userData.put(null, "null key")
val afw = new AvroSimpleFeatureWriter(sf.getType, SerializationOptions.withUserData)
val encoder = mock[Encoder]
afw.write(sf, encoder)
there was one(encoder).writeArrayStart()
}
}
}
| ddseapy/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/AvroSimpleFeatureWriterTest.scala | Scala | apache-2.0 | 5,806 |
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import cats.effect.IO
/**
* Represents a relationship to object A
* [[com.qvantel.jsonapi.ToOne.Reference]] case class is used when A is not loaded but we know it's id.
* In case we do not know the id of the other end of the relationship wrap the relation in option and fill it with None
* [[com.qvantel.jsonapi.ToOne.Loaded]] case class is used when A is loaded
*
* @tparam A Type of the object the relationship points to
*/
sealed trait ToOne[A] {
def fold[B: Identifiable](fId: String => String, fEntity: A => B): ToOne[B]
def id: String
/** Loaded biased get method as a helper when you don't want to pattern match like crazy */
def get: Option[A]
def load(implicit jac: JsonApiClient, rt: ResourceType[A], pt: PathToId[A], reader: JsonApiReader[A]): IO[A]
}
object ToOne {
final case class Reference[A](id: String) extends ToOne[A] {
override def fold[B: Identifiable](fId: String => String, _fEntity: A => B): ToOne[B] =
ToOne.reference(fId(id))
override def get: Option[A] = None
override def load(implicit jac: JsonApiClient,
rt: ResourceType[A],
pt: PathToId[A],
reader: JsonApiReader[A]): IO[A] =
jac.one(id).flatMap {
case Some(x) => IO.pure(x)
case None => IO.raiseError(ApiError.NoEntityForId("id", rt.resourceType))
}
}
final case class Loaded[A: Identifiable](entity: A) extends ToOne[A] {
override val id: String = implicitly[Identifiable[A]].identify(entity)
override def fold[B: Identifiable](_fId: String => String, fEntity: A => B): ToOne[B] =
ToOne.loaded(fEntity(entity))
override def get: Option[A] = Some(entity)
override def load(implicit jac: JsonApiClient,
rt: ResourceType[A],
pt: PathToId[A],
reader: JsonApiReader[A]): IO[A] = IO.pure(entity)
}
def reference[A](id: String): ToOne[A] = Reference(id)
def loaded[A: Identifiable](entity: A): ToOne[A] = Loaded(entity)
}
| qvantel/jsonapi-scala | core/src/main/scala/com/qvantel/jsonapi/ToOne.scala | Scala | bsd-3-clause | 3,560 |
package com.arkxu.aaas
import akka.actor.ActorSystem
import akka.event.{LoggingAdapter, Logging}
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import org.json4s.DefaultFormats
/**
* Created by fangxu on 1/9/16.
*/
trait Implicits {
implicit val system = ActorSystem("aaas")
implicit val materializer = ActorMaterializer()
implicit val ec = system.dispatcher
implicit val log: LoggingAdapter = Logging(system, getClass)
implicit val formats = DefaultFormats ++ Marshallers.all
val aaasConfig = ConfigFactory.load()
}
| arkxu/aaas | src/main/scala/com/arkxu/aaas/Implicits.scala | Scala | mit | 567 |
package cas.analysis.subject.components
case class Attachments(kinds: List[String]) extends Component
| bk0606/CAS | src/main/scala/cas/analysis/subject/components/Attachments.scala | Scala | mit | 103 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.scaledaction.core.kafka
import com.scaledaction.core.config.{ AppConfig, HasAppConfig }
import com.typesafe.config.Config
import java.util.Properties
import org.apache.kafka.clients.producer.ProducerConfig
import scala.util.Try
/**
* Application settings. First attempts to acquire from the deploy environment.
* If not exists, then from -D java system properties, else a default config.
*
* Settings in the environment such as: SPARK_HA_MASTER=local[10] is picked up first.
*
* Settings from the command line in -D will override settings in the deploy environment.
* For example: sbt -Dspark.master="local[12]" run
*
* If you have not yet used Typesafe Config before, you can pass in overrides like so:
*
* {{{
* new Settings(ConfigFactory.parseString("""
* spark.master = "some.ip"
* """))
* }}}
*
* Any of these can also be overridden by your own application.conf.
*
* @param conf Optional config for test
*/
// brokers is a comma-separated list
class KafkaConfig(
val brokers: String,
val topic: String,
val keySerializer: String,
val valueSerializer: String,
rootConfig: Config) extends AppConfig(rootConfig: Config) {
def toProducerProperties: Properties = {
val props = new Properties()
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer)
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer)
props
}
val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers)
val topics = topic.split(",").toSet
override def toString(): String = s"brokers: ${brokers}, topic: ${topic}, keySerializer: ${keySerializer}, valueSerializer: ${valueSerializer}"
}
trait HasKafkaConfig extends HasAppConfig {
def getKafkaConfig: KafkaConfig = getKafkaConfig(rootConfig.getConfig("kafka"))
def getKafkaConfig(rootName: String): KafkaConfig = getKafkaConfig(rootConfig.getConfig(rootName))
//kafka {
// brokers = ["127.0.0.1:9092"]
// brokers = [${?KAFKA_BROKERS}]
//
// topic = "killrweather.raw"
// topic = [${?KAFKA_TOPIC}]
//
// key_serializer = "org.apache.kafka.common.serialization.StringSerializer"
// value_serializer = "org.apache.kafka.common.serialization.StringSerializer"
//
// #ingest-rate = 1s
// #group.id = "killrweather.group"
// #batch.send.size = 100
//}
private def getKafkaConfig(kafka: Config): KafkaConfig = {
val brokers = getRequiredValue("KAFKA_BROKERS", (kafka, "brokers"), "127.0.0.1:9092")
val topic = getRequiredValue("KAFKA_TOPIC", (kafka, "topic"), "killrweather.raw")
val keySerializer = getRequiredValue((kafka, "key_serializer"), "org.apache.kafka.common.serialization.StringSerializer")
val valueSerializer = getRequiredValue((kafka, "value_serializer"), "org.apache.kafka.common.serialization.StringSerializer")
new KafkaConfig(brokers, topic, keySerializer, valueSerializer, kafka)
}
// val KafkaGroupId = kafka.getString("group.id")
// val KafkaTopicRaw = kafka.getString("topic.raw")
// val KafkaEncoderFqcn = kafka.getString("encoder.fqcn")
// val KafkaDecoderFqcn = kafka.getString("decoder.fqcn")
// val KafkaPartitioner = kafka.getString("partitioner.fqcn")
// val KafkaBatchSendSize = kafka.getInt("batch.send.size")
}
| scaledaction/weather-service | core/src/main/scala/com/scaledaction/core/kafka/KafkaConfig.scala | Scala | apache-2.0 | 4,146 |
package es.weso.rdf.validator
import scala.util.{ Failure, Success, Try }
import org.rogach.scallop.Scallop
import org.rogach.scallop.exceptions.Help
import com.typesafe.config.ConfigFactory
import buildinfo.BuildInfo
import es.weso.rdf.{ PrefixMap, RDFReader }
import es.weso.rdf.jena.{ Endpoint, RDFAsJenaModel }
import es.weso.rdf.nodes.{ IRI, RDFNode }
import es.weso.shex.{ Label, PrefixMaps, Schema, ShEx, ShExMatcher }
import es.weso.utils.FileUtils
import es.weso.utils.IO.getContents
import es.weso.utils.PerformanceUtils.{ getTimeFrom, getTimeNow, showRuntimeMemory, showTime }
import es.weso.utils.Debugging2
object Main extends App with Debugging2 {
override def main(args: Array[String]): Unit = {
val conf = ConfigFactory.load()
val opts = new Opts(args, errorDriver)
opts.verify()
if (args.length == 0) {
opts.printHelp()
return
}
if (opts.version()) {
println("Show version")
showVersion()
}
setVerbose(opts.verbose())
setInteractive(opts.interactive())
debug("Interactive " + opts.interactive())
val cut = opts.cut()
val maybe_rdf: Option[RDFReader] = opts.data.get match {
case None => opts.endpoint.get match {
case None => None // RDFFromWeb()
case Some(endpoint) => Some(Endpoint(endpoint))
}
case Some(dataFile) => {
log.debug("Reading from file \'" + dataFile + "\'...")
val ts = getData(dataFile, opts.data_format()).get
Some(ts)
}
}
if (opts.showData() && maybe_rdf.isDefined) {
println(maybe_rdf.get.serialize(opts.outdata_format()))
}
val now = getTimeNow()
if (opts.schema.isDefined) {
if (maybe_rdf.isDefined) {
val rdf = maybe_rdf.get
validateSchema(rdf, opts)
} else {
convertSchema(opts)
}
val micros = getTimeFrom(now)
val runtime = Runtime.getRuntime()
if (opts.time()) { showTime(micros) }
if (opts.memory()) { showRuntimeMemory(runtime) }
} else { // If no schema...check to validate nodeShape declarations in RDF
if (maybe_rdf.isDefined) {
println("Validating only rdf...")
val attempts = ShEx.validateRDF(maybe_rdf.get)
showAttempts(attempts, opts.verbose(), cut, PrefixMaps.commonShacl)
}
}
}
private def showVersion(): Unit = {
println("** Version: " + BuildInfo.version)
println("** Scala version: " + BuildInfo.scalaVersion)
println("** SBT version: " + BuildInfo.sbtVersion)
}
def showAttempts(attempts: Try[Seq[ValidationAttempt[RDFNode, Label]]],
verbose: Boolean,
cut: Int,
pm: PrefixMap): Unit = {
attempts match {
case Failure(e) =>
println("Exception trying to validate RDF: " + e)
case Success(as) => {
println(ValidationAttempt.showAttempts(as, verbose, cut, pm))
}
}
}
/*
def showAttempt(attempt: ValidationAttempt[RDFNode, Label],
verbose: Boolean,
cut: Int,
pm: PrefixMap): Unit = {
attempt.show(verbose, cut, pm)
} */
private def getData(data: String, format: String): Try[RDFReader] = {
log.debug("reading from \'" + data + "\' with format " + format)
for {
cs <- getContents(data)
triples <- RDFAsJenaModel.fromChars(cs, format)
} yield { log.debug("After reading " + triples); triples }
}
private def errorDriver(e: Throwable, scallop: Scallop): Nothing = {
e match {
case Help(s) =>
println("Help: " + s)
scallop.printHelp
sys.exit(0)
case _ =>
println("Error: %s".format(e.getMessage))
scallop.printHelp
sys.exit(1)
}
}
def convertSchema(opts: Opts): Unit = { // Try[(Result[Typing], PrefixMap)] = {
debug("Converting schema")
val result = Schema.fromFile(opts.schema(), opts.schema_format())
result match {
case Success((schema, pm)) => {
val schemaFormat = opts.outschema_format()
val schemaOut = schema.serialize(schemaFormat)
if (opts.showSchema()) {
debug("Schema with format " + schemaFormat)
println(schemaOut)
}
if (opts.outschema_file.isDefined) {
FileUtils.writeFile(opts.outschema_file(), schemaOut)
}
}
case Failure(e) => {
println("Exception parsing schema: " + e.getMessage)
}
}
}
def validateSchema(rdf: RDFReader, opts: Opts): Unit = {
val schema = opts.schema()
val schema_format = opts.schema_format()
val trySchema = Schema.fromFile(schema, schema_format)
trySchema match {
case Failure(e) => println(s"Failure parsing schema $schema with format $schema_format: ${e}")
case Success((schema, pm)) => {
val cut = opts.cut()
if (opts.showSchema()) {
val schemaFormat = opts.outschema_format()
println("Schema with format " + schemaFormat)
println(schema.serialize(schemaFormat))
}
val validator: RDFValidator =
opts.validator() match {
case "SHEX3" => {
ShExMatcher(schema, rdf)
}
/* case "DERIV" => {
ShExMatcher(schema, rdf, opts.withIncoming(), opts.withAny(), ShapeValidatorWithDeriv)
}
case "BACK" => {
ShExMatcher(schema, rdf, opts.withIncoming(), opts.withAny(), ShapeValidatorBacktracking)
} */
case x => throw MainException(s"Unsupported validator type $x")
}
if (opts.node.isSupplied)
if (opts.shape_label.isSupplied) {
val r = validator.match_node_label(IRI(opts.node()))(validator.labelStr(opts.shape_label()))
println(validator.showResult(r,cut,pm))
} else {
val r = validator.match_node_AllLabels(IRI(opts.node()))
println(validator.showResult(r,cut,pm))
}
else if (opts.shape_label.isSupplied) {
val r = validator.matchAllNodes_Label(validator.labelStr(opts.shape_label()))
println(validator.showResult(r,cut,pm))
} else {
Modes.lookup(opts.mode()) match {
case Some(Modes.allNodes_allLabels) => {
println("Validating all nodes - all labels")
val r = validator.matchAllNodes_AllLabels
println(validator.showResult(r,cut,pm))
}
case Some(Modes.declared) => {
println("Validating scope declarations")
val r = validator.validate
println(validator.showResult(r,cut,pm))
// println(ValidationAttempt.showAttempts(attempts, opts.verbose(), cut, pm))
}
case Some(Modes.allNodes_start) => {
println("Not implemented yet validation with start")
}
case Some(m) => {
println(s"Unknown mode $m")
}
case None => {
throw MainException(s"Unsupported mode: ${opts.mode()}")
}
}
}
}
}
}
}
| labra/ShExcala | src/main/scala/es/weso/rdf/validator/Main.scala | Scala | mit | 7,071 |
package elevators.ui
import elevators.queue.RequestQueue
trait DrawsWaiting {
def loadRequests(requests: RequestQueue[Int]): Unit
def repaint(): Unit
}
| wohanley/elevators | src/main/scala/elevators/ui/DrawsWaiting.scala | Scala | agpl-3.0 | 158 |
/** A fully pipelined reduction circuit based on
* "Modular Design of fully pipelined reduction circuits" Huang, Andrews
*/
package chiselutils.math
import Chisel._
private class DataAndLast [ T <: Data ] ( genType : T ) extends Bundle {
val data = genType.cloneType
val last = Bool()
override def cloneType() = {
new DataAndLast(genType.cloneType).asInstanceOf[this.type]
}
}
private class DualFifoBlock[ T <: Data]( genType : T, depth : Int , opLatency : Int) extends Module {
val dataAndLastType = new DataAndLast(genType)
val io = new Bundle {
val dataIn = dataAndLastType.cloneType.asInput
val valid = Bool(INPUT)
val deq = Bool(INPUT)
val dataOutA = dataAndLastType.cloneType.asOutput
val dataOutB = dataAndLastType.cloneType.asOutput
val srOut = dataAndLastType.cloneType.asOutput
}
val fifoSelReg = RegInit(Bool(false))
val fifoA = Module(new Queue(dataAndLastType.cloneType, depth, true))
val fifoB = Module(new Queue(dataAndLastType.cloneType, depth, true))
val srData = dataAndLastType.cloneType
srData.data := fifoA.io.deq.bits.data
srData.last := fifoA.io.deq.bits.last && fifoA.io.deq.valid
val shiftReg = ShiftRegister(srData, opLatency)
when ( io.valid ) {
fifoSelReg := !fifoSelReg
when ( io.dataIn.last ) {
fifoSelReg := Bool(false)
}
fifoA.io.enq.valid := !fifoSelReg && io.valid
fifoB.io.enq.valid := fifoSelReg && io.valid
}
io.dataOutA := fifoA.io.deq.bits
io.dataOutB := fifoB.io.deq.bits
io.srOut := shiftReg
fifoA.io.deq.ready := io.deq | fifoA.io.deq.bits.last
fifoB.io.deq.ready := io.deq
}
private class AccBlock[ T <: Data ] ( genType : T ) extends Module {
val dataAndLastType = new DataAndLast(genType)
val io = new Bundle {
val toFifo = dataAndLastType.cloneType.asInput
val adderRes = dataAndLastType.cloneType.asInput
val adderResValid = Bool(INPUT)
val result = genType.cloneType
val resultValid = Bool(OUTPUT)
}
}
class Accumulator[T <: Data] ( genType : T, reductionOp : ( T, T) => T, opLatency : Int) extends Module {
Predef.assert(opLatency > 0, "opLatency must be atleast 1")
val io = new Bundle {
val dataIn = genType.cloneType.asInput
val valid = Bool(INPUT)
val last = Bool(INPUT)
val result = genType.cloneType.asOutput
val resultValid = Bool(OUTPUT)
}
val stages = log2Ceil(opLatency)
private val dataAndLastType = new DataAndLast(genType)
private val inputA = dataAndLastType.cloneType
private val inputB = dataAndLastType.cloneType
val opRes = reductionOp(inputA.data, inputB.data)
val submitLvl = UInt( width = stages + 1 )
val lvlSR = ShiftRegister( submitLvl, opLatency )
val lastSR = ShiftRegister( inputB.last, opLatency )
// create fifos according to depth bound in paper
private val fifos = (0 until stages).map( x => Module(new DualFifoBlock( genType , stages - x, opLatency)) )
// final queue
private val accFifo = Module( new Queue( dataAndLastType.cloneType, opLatency, true ) )
// final sr
val accSR = ShiftRegister(accFifo.io.deq.bits.data, opLatency)
val validSR = ShiftRegister(accFifo.io.deq.bits.last && accFifo.io.deq.valid, opLatency)
val initFifo = if ( stages > 0 ) {
fifos(0).io.valid := io.valid
fifos(0).io.dataIn.data := io.dataIn
fifos(0).io.dataIn.last := io.last
} else {
accFifo.io.enq.valid := io.valid
accFifo.io.enq.bits.data := io.dataIn
accFifo.io.enq.bits.last := io.last
}
(1 until stages).foreach( x => {
fifos(x).io.dataIn.data := opRes
fifos(x).io.dataIn.last := lastSR
fifos(x).io.valid := lvlSR(x - 1)
})
val prevWasLast = RegInit(Bool(true))
prevWasLast := accFifo.io.deq.bits.last && accFifo.io.deq.ready
val accValue = Reg(genType.cloneType)
val accReadyReg = RegInit(Bool(false))
when ( prevWasLast && accFifo.io.deq.valid ) {
accValue := accFifo.io.deq.bits.data
accReadyReg := Bool(true)
}
accFifo.io.deq.ready := ( prevWasLast && accFifo.io.deq.valid ) || ( accFifo.io.deq.bits.last && accFifo.io.deq.valid )
}
| da-steve101/chisel-utils | src/main/scala/chiselutils/math/Accumulator.scala | Scala | lgpl-3.0 | 4,077 |
/**
* Created by Vignesh on 12/8/15.
*/
object ExerciseDriver {
def main(args: Array[String]) {
val n = 35
println(s"Fibonacci($n) = " + Chapter2.fibonacci(n))
}
}
| svigneshk/LearningScala | FunctionalProgrammingInScala/src/ExerciseDriver.scala | Scala | mit | 181 |
package puzzle2016.q
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import Pancake._
/**
*
*/
@RunWith(classOf[JUnitRunner])
class PancakeTest extends FunSuite {
test("Count Pankcake Jam is ok") {
assert( ln(("-" toList) reverse) === 1, "solution for '-' is KO")
assert( ln(("-+" toList) reverse) === 1, "solution for '-+' is KO")
assert( ln(("+-" toList) reverse) === 2, "solution for '+-' is KO")
assert( ln(("+++" toList) reverse) === 0, "solution for '+++' is KO")
assert( ln(("--+-" toList) reverse) === 3, "solution for '--+-' is KO")
}
}
| javathought/CodeJam | src/test/scala/puzzle2016/q/PancakeTest.scala | Scala | apache-2.0 | 654 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.io.IOException
import java.net.{InetAddress, Socket}
import java.util.concurrent._
import java.util.{Collections, Properties}
import kafka.server.{BaseRequestTest, KafkaConfig}
import kafka.utils.TestUtils
import org.apache.kafka.clients.admin.{Admin, AdminClientConfig}
import org.apache.kafka.common.config.internals.QuotaConfigs
import org.apache.kafka.common.message.ProduceRequestData
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.quota.ClientQuotaEntity
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.requests.{ProduceRequest, ProduceResponse}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.{KafkaException, requests}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import scala.jdk.CollectionConverters._
class DynamicConnectionQuotaTest extends BaseRequestTest {
override def brokerCount = 1
val topic = "test"
val listener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val localAddress = InetAddress.getByName("127.0.0.1")
val unknownHost = "255.255.0.1"
val plaintextListenerDefaultQuota = 30
var executor: ExecutorService = _
override def brokerPropertyOverrides(properties: Properties): Unit = {
properties.put(KafkaConfig.NumQuotaSamplesProp, "2".toString)
properties.put("listener.name.plaintext.max.connection.creation.rate", plaintextListenerDefaultQuota.toString)
}
@BeforeEach
override def setUp(): Unit = {
super.setUp()
TestUtils.createTopic(zkClient, topic, brokerCount, brokerCount, servers)
}
@AfterEach
override def tearDown(): Unit = {
try {
if (executor != null) {
executor.shutdownNow()
assertTrue(executor.awaitTermination(10, TimeUnit.SECONDS))
}
} finally {
super.tearDown()
}
}
@Test
def testDynamicConnectionQuota(): Unit = {
val maxConnectionsPerIP = 5
def connectAndVerify(): Unit = {
val socket = connect()
try {
sendAndReceive[ProduceResponse](produceRequest, socket)
} finally {
socket.close()
}
}
val props = new Properties
props.put(KafkaConfig.MaxConnectionsPerIpProp, maxConnectionsPerIP.toString)
reconfigureServers(props, perBrokerConfig = false, (KafkaConfig.MaxConnectionsPerIpProp, maxConnectionsPerIP.toString))
verifyMaxConnections(maxConnectionsPerIP, connectAndVerify)
// Increase MaxConnectionsPerIpOverrides for localhost to 7
val maxConnectionsPerIPOverride = 7
props.put(KafkaConfig.MaxConnectionsPerIpOverridesProp, s"localhost:$maxConnectionsPerIPOverride")
reconfigureServers(props, perBrokerConfig = false, (KafkaConfig.MaxConnectionsPerIpOverridesProp, s"localhost:$maxConnectionsPerIPOverride"))
verifyMaxConnections(maxConnectionsPerIPOverride, connectAndVerify)
}
@Test
def testDynamicListenerConnectionQuota(): Unit = {
val initialConnectionCount = connectionCount
def connectAndVerify(): Unit = {
val socket = connect("PLAINTEXT")
socket.setSoTimeout(1000)
try {
sendAndReceive[ProduceResponse](produceRequest, socket)
} finally {
socket.close()
}
}
// Reduce total broker MaxConnections to 5 at the cluster level
val props = new Properties
props.put(KafkaConfig.MaxConnectionsProp, "5")
reconfigureServers(props, perBrokerConfig = false, (KafkaConfig.MaxConnectionsProp, "5"))
verifyMaxConnections(5, connectAndVerify)
// Create another listener and verify listener connection limit of 5 for each listener
val newListeners = "PLAINTEXT://localhost:0,INTERNAL://localhost:0"
props.put(KafkaConfig.ListenersProp, newListeners)
props.put(KafkaConfig.ListenerSecurityProtocolMapProp, "PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT")
props.put(KafkaConfig.MaxConnectionsProp, "10")
props.put("listener.name.internal.max.connections", "5")
props.put("listener.name.plaintext.max.connections", "5")
reconfigureServers(props, perBrokerConfig = true, (KafkaConfig.ListenersProp, newListeners))
waitForListener("INTERNAL")
var conns = (connectionCount until 5).map(_ => connect("PLAINTEXT"))
conns ++= (5 until 10).map(_ => connect("INTERNAL"))
conns.foreach(verifyConnection)
conns.foreach(_.close())
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount, "Connections not closed")
// Increase MaxConnections for PLAINTEXT listener to 7 at the broker level
val maxConnectionsPlaintext = 7
val listenerProp = s"${listener.configPrefix}${KafkaConfig.MaxConnectionsProp}"
props.put(listenerProp, maxConnectionsPlaintext.toString)
reconfigureServers(props, perBrokerConfig = true, (listenerProp, maxConnectionsPlaintext.toString))
verifyMaxConnections(maxConnectionsPlaintext, connectAndVerify)
// Verify that connection blocked on the limit connects successfully when an existing connection is closed
val plaintextConnections = (connectionCount until maxConnectionsPlaintext).map(_ => connect("PLAINTEXT"))
executor = Executors.newSingleThreadExecutor
val future = executor.submit((() => createAndVerifyConnection()): Runnable)
Thread.sleep(100)
assertFalse(future.isDone)
plaintextConnections.head.close()
future.get(30, TimeUnit.SECONDS)
plaintextConnections.foreach(_.close())
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount, "Connections not closed")
// Verify that connections on inter-broker listener succeed even if broker max connections has been
// reached by closing connections on another listener
var plaintextConns = (connectionCount until 5).map(_ => connect("PLAINTEXT"))
val internalConns = (5 until 10).map(_ => connect("INTERNAL"))
plaintextConns.foreach(verifyConnection)
internalConns.foreach(verifyConnection)
plaintextConns ++= (0 until 2).map(_ => connect("PLAINTEXT"))
TestUtils.waitUntilTrue(() => connectionCount <= 10, "Internal connections not closed")
plaintextConns.foreach(verifyConnection)
assertThrows(classOf[IOException], () => internalConns.foreach { socket =>
sendAndReceive[ProduceResponse](produceRequest, socket)
})
plaintextConns.foreach(_.close())
internalConns.foreach(_.close())
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount, "Connections not closed")
}
@Test
def testDynamicListenerConnectionCreationRateQuota(): Unit = {
// Create another listener. PLAINTEXT is an inter-broker listener
// keep default limits
val newListenerNames = Seq("PLAINTEXT", "EXTERNAL")
val newListeners = "PLAINTEXT://localhost:0,EXTERNAL://localhost:0"
val props = new Properties
props.put(KafkaConfig.ListenersProp, newListeners)
props.put(KafkaConfig.ListenerSecurityProtocolMapProp, "PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT")
reconfigureServers(props, perBrokerConfig = true, (KafkaConfig.ListenersProp, newListeners))
waitForListener("EXTERNAL")
// The expected connection count after each test run
val initialConnectionCount = connectionCount
// new broker-wide connection rate limit
val connRateLimit = 9
// before setting connection rate to 10, verify we can do at least double that by default (no limit)
verifyConnectionRate(2 * connRateLimit, plaintextListenerDefaultQuota, "PLAINTEXT", ignoreIOExceptions = false)
waitForConnectionCount(initialConnectionCount)
// Reduce total broker connection rate limit to 9 at the cluster level and verify the limit is enforced
props.clear() // so that we do not pass security protocol map which cannot be set at the cluster level
props.put(KafkaConfig.MaxConnectionCreationRateProp, connRateLimit.toString)
reconfigureServers(props, perBrokerConfig = false, (KafkaConfig.MaxConnectionCreationRateProp, connRateLimit.toString))
// verify EXTERNAL listener is capped by broker-wide quota (PLAINTEXT is not capped by broker-wide limit, since it
// has limited quota set and is a protected listener)
verifyConnectionRate(8, connRateLimit, "EXTERNAL", ignoreIOExceptions = false)
waitForConnectionCount(initialConnectionCount)
// Set 4 conn/sec rate limit for each listener and verify it gets enforced
val listenerConnRateLimit = 4
val plaintextListenerProp = s"${listener.configPrefix}${KafkaConfig.MaxConnectionCreationRateProp}"
props.put(s"listener.name.external.${KafkaConfig.MaxConnectionCreationRateProp}", listenerConnRateLimit.toString)
props.put(plaintextListenerProp, listenerConnRateLimit.toString)
reconfigureServers(props, perBrokerConfig = true, (plaintextListenerProp, listenerConnRateLimit.toString))
executor = Executors.newFixedThreadPool(newListenerNames.size)
val futures = newListenerNames.map { listener =>
executor.submit((() => verifyConnectionRate(3, listenerConnRateLimit, listener, ignoreIOExceptions = false)): Runnable)
}
futures.foreach(_.get(40, TimeUnit.SECONDS))
waitForConnectionCount(initialConnectionCount)
// increase connection rate limit on PLAINTEXT (inter-broker) listener to 12 and verify that it will be able to
// achieve this rate even though total connection rate may exceed broker-wide rate limit, while EXTERNAL listener
// should not exceed its listener limit
val newPlaintextRateLimit = 12
props.put(plaintextListenerProp, newPlaintextRateLimit.toString)
reconfigureServers(props, perBrokerConfig = true, (plaintextListenerProp, newPlaintextRateLimit.toString))
val plaintextFuture = executor.submit((() =>
verifyConnectionRate(10, newPlaintextRateLimit, "PLAINTEXT", ignoreIOExceptions = false)): Runnable)
val externalFuture = executor.submit((() =>
verifyConnectionRate(3, listenerConnRateLimit, "EXTERNAL", ignoreIOExceptions = false)): Runnable)
plaintextFuture.get(40, TimeUnit.SECONDS)
externalFuture.get(40, TimeUnit.SECONDS)
waitForConnectionCount(initialConnectionCount)
}
@Test
def testDynamicIpConnectionRateQuota(): Unit = {
val connRateLimit = 10
val initialConnectionCount = connectionCount
// before setting connection rate to 10, verify we can do at least double that by default (no limit)
verifyConnectionRate(2 * connRateLimit, plaintextListenerDefaultQuota, "PLAINTEXT", ignoreIOExceptions = false)
waitForConnectionCount(initialConnectionCount)
// set default IP connection rate quota, verify that we don't exceed the limit
updateIpConnectionRate(None, connRateLimit)
verifyConnectionRate(8, connRateLimit, "PLAINTEXT", ignoreIOExceptions = true)
waitForConnectionCount(initialConnectionCount)
// set a higher IP connection rate quota override, verify that the higher limit is now enforced
val newRateLimit = 18
updateIpConnectionRate(Some(localAddress.getHostAddress), newRateLimit)
verifyConnectionRate(14, newRateLimit, "PLAINTEXT", ignoreIOExceptions = true)
waitForConnectionCount(initialConnectionCount)
}
private def reconfigureServers(newProps: Properties, perBrokerConfig: Boolean, aPropToVerify: (String, String)): Unit = {
val initialConnectionCount = connectionCount
val adminClient = createAdminClient()
TestUtils.incrementalAlterConfigs(servers, adminClient, newProps, perBrokerConfig).all.get()
waitForConfigOnServer(aPropToVerify._1, aPropToVerify._2)
adminClient.close()
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount,
s"Admin client connection not closed (initial = $initialConnectionCount, current = $connectionCount)")
}
private def updateIpConnectionRate(ip: Option[String], updatedRate: Int): Unit = {
val initialConnectionCount = connectionCount
val adminClient = createAdminClient()
try {
val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.IP -> ip.orNull).asJava)
val request = Map(entity -> Map(QuotaConfigs.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(updatedRate.toDouble)))
TestUtils.alterClientQuotas(adminClient, request).all.get()
// use a random throwaway address if ip isn't specified to get the default value
TestUtils.waitUntilTrue(() => servers.head.socketServer.connectionQuotas.
connectionRateForIp(InetAddress.getByName(ip.getOrElse(unknownHost))) == updatedRate,
s"Timed out waiting for connection rate update to propagate"
)
} finally {
adminClient.close()
}
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount,
s"Admin client connection not closed (initial = $initialConnectionCount, current = $connectionCount)")
}
private def waitForListener(listenerName: String): Unit = {
TestUtils.retry(maxWaitMs = 10000) {
try {
assertTrue(servers.head.socketServer.boundPort(ListenerName.normalised(listenerName)) > 0)
} catch {
case e: KafkaException => throw new AssertionError(e)
}
}
}
private def createAdminClient(): Admin = {
val bootstrapServers = TestUtils.bootstrapServers(servers, new ListenerName(securityProtocol.name))
val config = new Properties()
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
config.put(AdminClientConfig.METADATA_MAX_AGE_CONFIG, "10")
val adminClient = Admin.create(config)
adminClient
}
private def waitForConfigOnServer(propName: String, propValue: String, maxWaitMs: Long = 10000): Unit = {
TestUtils.retry(maxWaitMs) {
assertEquals(propValue, servers.head.config.originals.get(propName))
}
}
private def produceRequest: ProduceRequest =
requests.ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(
Collections.singletonList(new ProduceRequestData.TopicProduceData()
.setName(topic)
.setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData()
.setIndex(0)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(System.currentTimeMillis(), "key".getBytes, "value".getBytes))))))
.iterator))
.setAcks((-1).toShort)
.setTimeoutMs(3000)
.setTransactionalId(null))
.build()
def connectionCount: Int = servers.head.socketServer.connectionCount(localAddress)
def connect(listener: String): Socket = {
val listenerName = ListenerName.normalised(listener)
new Socket("localhost", servers.head.socketServer.boundPort(listenerName))
}
private def createAndVerifyConnection(listener: String = "PLAINTEXT"): Unit = {
val socket = connect(listener)
try {
verifyConnection(socket)
} finally {
socket.close()
}
}
private def verifyConnection(socket: Socket): Unit = {
val produceResponse = sendAndReceive[ProduceResponse](produceRequest, socket)
assertEquals(1, produceResponse.data.responses.size)
val topicProduceResponse = produceResponse.data.responses.asScala.head
assertEquals(1, topicProduceResponse.partitionResponses.size)
val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head
assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode))
}
private def verifyMaxConnections(maxConnections: Int, connectWithFailure: () => Unit): Unit = {
val initialConnectionCount = connectionCount
//create connections up to maxConnectionsPerIP - 1, leave space for one connection
var conns = (connectionCount until (maxConnections - 1)).map(_ => connect("PLAINTEXT"))
// produce should succeed on a new connection
createAndVerifyConnection()
TestUtils.waitUntilTrue(() => connectionCount == (maxConnections - 1), "produce request connection is not closed")
conns = conns :+ connect("PLAINTEXT")
// now try one more (should fail)
assertThrows(classOf[IOException], () => connectWithFailure.apply())
//close one connection
conns.head.close()
TestUtils.waitUntilTrue(() => connectionCount == (maxConnections - 1), "connection is not closed")
createAndVerifyConnection()
conns.foreach(_.close())
TestUtils.waitUntilTrue(() => initialConnectionCount == connectionCount, "Connections not closed")
}
private def connectAndVerify(listener: String, ignoreIOExceptions: Boolean): Unit = {
val socket = connect(listener)
try {
sendAndReceive[ProduceResponse](produceRequest, socket)
} catch {
// IP rate throttling can lead to disconnected sockets on client's end
case e: IOException => if (!ignoreIOExceptions) throw e
} finally {
socket.close()
}
}
private def waitForConnectionCount(expectedConnectionCount: Int): Unit = {
TestUtils.waitUntilTrue(() => expectedConnectionCount == connectionCount,
s"Connections not closed (expected = $expectedConnectionCount current = $connectionCount)")
}
/**
* this method simulates a workload that creates connection, sends produce request, closes connection,
* and verifies that rate does not exceed the given maximum limit `maxConnectionRate`
*
* Since producing a request and closing a connection also takes time, this method does not verify that the lower bound
* of actual rate is close to `maxConnectionRate`. Instead, use `minConnectionRate` parameter to verify that the rate
* is at least certain value. Note that throttling is tested and verified more accurately in ConnectionQuotasTest
*/
private def verifyConnectionRate(minConnectionRate: Int, maxConnectionRate: Int, listener: String, ignoreIOExceptions: Boolean): Unit = {
// duration such that the maximum rate should be at most 20% higher than the rate limit. Since all connections
// can fall in the beginning of quota window, it is OK to create extra 2 seconds (window size) worth of connections
val runTimeMs = TimeUnit.SECONDS.toMillis(13)
val startTimeMs = System.currentTimeMillis
val endTimeMs = startTimeMs + runTimeMs
var connCount = 0
while (System.currentTimeMillis < endTimeMs) {
connectAndVerify(listener, ignoreIOExceptions)
connCount += 1
}
val elapsedMs = System.currentTimeMillis - startTimeMs
val actualRate = (connCount.toDouble / elapsedMs) * 1000
val rateCap = if (maxConnectionRate < Int.MaxValue) 1.2 * maxConnectionRate.toDouble else Int.MaxValue.toDouble
assertTrue(actualRate <= rateCap, s"Listener $listener connection rate $actualRate must be below $rateCap")
assertTrue(actualRate >= minConnectionRate, s"Listener $listener connection rate $actualRate must be above $minConnectionRate")
}
}
| guozhangwang/kafka | core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala | Scala | apache-2.0 | 19,664 |
package org.scalacheck.time
/** Stub trait since Scala Native does not have native support for java.time types. */
private[scalacheck] trait JavaTimeChoose
| rickynils/scalacheck | native/src/main/scala/org/scalacheck/time/JavaTimeChoose.scala | Scala | bsd-3-clause | 157 |
package renesca
import com.github.httpmock.api.Stubbing
import com.github.httpmock.builder.RequestBuilder._
import com.github.httpmock.builder.ResponseBuilder._
import com.github.httpmock.dto.ResponseDto
import com.github.httpmock.specs.{HttpMock, HttpMockServer}
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import renesca.graph.Graph
import spray.json._
@RunWith(classOf[JUnitRunner])
class DbServiceWithMockDbSpec extends HttpMockSpecification {
"DbService" should {
"execute graph query" in new DbMock(this) {
val A = json.Node("1")
val B = json.Node("2")
val ArB = json.Relationship("3", "hopfen", A.id, B.id)
val jsonGraph: json.Graph = json.Graph(List(A, B), List(ArB))
when(Query("some statement")).thenRespond(jsonGraph)
val resultGraph = dbService.queryGraph(Query("some statement"))
resultGraph must equalTo(json.GraphFactory(jsonGraph))
}.pendingUntilFixed("stopped working after update to specs 3.6.3")
"for list of queries in graphQuery return list of graphs" in todo
"for list of queries in batchQuery return no data" in todo
}
}
abstract class HttpMockSpecification extends Specification with HttpMockServer
class DbMock(mockServer: HttpMockServer) extends HttpMock(mockServer) {
val dbService = new DbService
dbService.restService = new RestService(requestUrl)
override def requestUrl = s"${ mockServer.baseUri }${ super.requestUrl }"
def when(query: Query): Stubbing = {
val url = "/db/data/transaction/commit"
when(post(url).build())
}
def post(url: String) = request().post(url)
implicit def graphToJsonResponse(jsonGraph: json.Graph): ResponseDto = {
json.Response(results = List(json.Result(Nil, List(json.Data(None, Some(jsonGraph))))))
}
implicit def jsonResponseToHttpResponse(jsonResponse: json.Response): ResponseDto = {
import renesca.json.protocols.ResponseJsonProtocol._
response().contentType("application/json").payload(jsonResponse.toJson.prettyPrint).build()
}
}
| renesca/renesca | jvm/src/it/scala/renesca/DbServiceWithMockDbSpec.scala | Scala | apache-2.0 | 2,077 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.adt
/**
* Status flags for transformer operations.
*/
object TransformerStatus {
sealed trait TransformerStatusType
case object OK extends TransformerStatusType
case object ERROR extends TransformerStatusType
}
| Tensei-Data/tensei-agent | src/main/scala/com/wegtam/tensei/agent/adt/TransformerStatus.scala | Scala | agpl-3.0 | 1,010 |
package net.orfjackal.dimdwarf.actors.dummies
import net.orfjackal.dimdwarf.mq.MessageSender
import net.orfjackal.dimdwarf.controller._
import javax.inject.Inject
@ControllerScoped
class RelayController @Inject()(toActor: MessageSender[Any], spy: Spy) extends Controller {
def process(message: Any) {
message match {
case MessageToActor(m) => {
spy.log("controller forwarded " + m)
toActor.send(m)
}
case m => spy.log("controller got " + m)
}
}
}
| orfjackal/dimdwarf | dimdwarf-core/src/test/scala/net/orfjackal/dimdwarf/actors/dummies/RelayController.scala | Scala | apache-2.0 | 495 |
package com.arcusys.learn.models.request
import org.scalatra.ScalatraBase
import com.arcusys.learn.service.util.Parameter
import scala.util.Try
object LiferayRequest extends BaseCollectionFilteredRequest with BaseRequest {
def apply(scalatra: ScalatraBase) = new Model(scalatra)
class Model(scalatra: ScalatraBase) extends BaseSortableCollectionFilteredRequestModel(scalatra, s => s) {
implicit val httpRequest = scalatra.request
def action = Parameter(Action).required
def courseId = Parameter(CourseId).intRequired
}
}
| icacic/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/models/request/LiferayRequest.scala | Scala | gpl-3.0 | 543 |
/*
*************************************************************************************
* Copyright 2011-2013 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.snippet.node
import net.liftweb.http._
import net.liftweb.common._
import bootstrap.liftweb.RudderConfig
import scala.xml.NodeSeq
import net.liftweb.http.js.JsCmds._
class Nodes extends StatefulSnippet with Loggable {
private[this] val nodeInfoService = RudderConfig.nodeInfoService
val srvGrid = RudderConfig.srvGrid
val dispatch : DispatchIt = {
case "table" => table _
case "loadData" => loadData _
}
private[this] def getNodes() = {
nodeInfoService.getAll match {
case Full(infos) => infos.values.toSeq
case eb:EmptyBox => val fail = eb?~ s"could not find Nodes "
logger.error(fail.msg)
Seq()
}
}
def loadData(xml:NodeSeq) = {
Script(OnLoad(srvGrid.refreshData(() => getNodes, None, "nodes").applied))
}
def table(html:NodeSeq)= {
srvGrid.displayAndInit(Seq(), "nodes", None, Some(() => getNodes()))
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/snippet/node/Nodes.scala | Scala | gpl-3.0 | 2,622 |
package io.github.rollenholt.scala.email.sender
/**
* @author rollenholt
*/
case class EmailMessage(title:String, content:String, from:String, to:List[String], cc:List[String], bcc:List[String]) {}
| rollenholt/scala-email-sender | src/main/scala/io/github/rollenholt/scala/email/sender/EmailMessage.scala | Scala | mit | 202 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.