code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.sksamuel.elastic4s.http.search
import java.util
import com.sksamuel.elastic4s.http.search.queries.QueryBuilderFn
import com.sksamuel.elastic4s.searches.HighlightFieldDefinition
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory, XContentType}
import scala.collection.JavaConverters._
object HighlightFieldBuilderFn {
def apply(fields: Iterable[HighlightFieldDefinition]): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject()
builder.startObject("fields")
fields.foreach { field =>
builder.startObject(field.field)
field.boundaryChars.foreach(chars => builder.field("boundary_chars", String.valueOf(chars)))
field.boundaryMaxScan.foreach(builder.field("boundary_max_scan", _))
field.forceSource.foreach(builder.field("force_source", _))
field.fragmentOffset.foreach(builder.field("fragment_offset", _))
field.fragmentSize.foreach(builder.field("fragment_size", _))
field.highlightQuery.map(QueryBuilderFn.apply).map(_.bytes()).foreach { highlight =>
builder.rawField("highlight_query", highlight, XContentType.JSON)
}
if (field.matchedFields.nonEmpty) {
builder.field("matched_fields", field.matchedFields.asJava)
}
field.highlighterType.foreach(builder.field("type", _))
field.noMatchSize.foreach(builder.field("no_match_size", _))
field.numOfFragments.foreach(builder.field("number_of_fragments", _))
field.order.foreach(builder.field("order", _))
field.phraseLimit.foreach(builder.field("phrase_limit", _))
if (field.postTags.nonEmpty || field.preTags.nonEmpty) {
if (field.postTags.isEmpty)
builder.field("post_tags", util.Arrays.asList("</em>"))
else
builder.field("post_tags", field.postTags.asJava)
if (field.preTags.isEmpty)
builder.field("pre_tags", util.Arrays.asList("<em>"))
else
builder.field("pre_tags", field.preTags.asJava)
}
field.requireFieldMatch.foreach(builder.field("require_field_match", _))
builder.endObject()
}
builder.endObject()
builder.endObject()
}
}
| aroundus-inc/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/HighlightFieldBuilderFn.scala | Scala | apache-2.0 | 2,189 |
package net.sansa_stack.query.spark.graph.jena.expression
import net.sansa_stack.query.spark.graph.jena.model.SparkExecutionModel
import net.sansa_stack.query.spark.graph.jena.util.{BasicGraphPattern, Result}
import org.apache.jena.graph.Node
import org.apache.spark.rdd.RDD
class NotExists(bgp: BasicGraphPattern) extends Pattern {
private val tag = "Not Exists"
private val pattern = SparkExecutionModel.basicGraphPatternMatch(bgp)
override def evaluate(result: RDD[Result[Node]]): RDD[Result[Node]] = {
val join = SparkExecutionModel.leftJoin(result, pattern)
val varSize = join.map(r => r.getField.size).reduce((i, j) => math.max(i, j))
join.filter(r => r.getField.size != varSize)
}
override def getTag: String = { tag }
def getPattern: RDD[Result[Node]] = { pattern }
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-spark/src/main/scala/net/sansa_stack/query/spark/graph/jena/expression/NotExists.scala | Scala | apache-2.0 | 807 |
package com.twitter.finagle.buoyant.h2
import com.twitter.concurrent.AsyncQueue
import com.twitter.finagle.buoyant.h2.service.{H2Classifier, H2ReqRep, H2ReqRepFrame}
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.liveness.FailureAccrualPolicy
import com.twitter.finagle.service.ResponseClass
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.{FactoryToService, Service, ServiceFactory, Status => FStatus}
import com.twitter.util.{Duration, Future, MockTimer, Return}
import io.buoyant.router.DiscardingFactoryToService
import io.buoyant.router.DiscardingFactoryToService.RequestDiscarder
import io.buoyant.router.context.h2.H2ClassifierCtx
import io.buoyant.test.FunSuite
import java.util
import scala.{Stream => SStream}
class H2FailureAccrualFactoryTest extends FunSuite {
val timer = new MockTimer()
test("response failure accrual") {
val classifier = new H2Classifier {
override def responseClassifier: PartialFunction[H2ReqRep, ResponseClass] = {
case H2ReqRep(_, Return(rep)) =>
if (rep.headers.get(":status") == Some("200")) ResponseClass.Success
else ResponseClass.NonRetryableFailure
case _ =>
ResponseClass.NonRetryableFailure
}
}
val stats = new InMemoryStatsReceiver
val underlying = ServiceFactory.const(Service.mk { req: Request =>
Future.value(Response(Status.InternalServerError, Stream.empty()))
})
val fa = new H2FailureAccrualFactory(
underlying,
FailureAccrualPolicy.consecutiveFailures(5, SStream.continually(Duration.Top)),
timer,
stats
)
val svc = new DiscardingFactoryToService(RequestDiscarder[Request](_ => ()), fa)
Contexts.local.let(H2ClassifierCtx, param.H2Classifier(classifier)) {
for (_ <- 1 to 4) {
await(svc(Request(Headers(":method" -> "GET"), Stream.empty())))
assert(fa.status == FStatus.Open)
}
await(svc(Request(Headers(":method" -> "GET"), Stream.empty())))
assert(fa.status == FStatus.Busy)
}
}
test("stream failure accrual") {
val classifier = new H2Classifier {
override def streamClassifier: PartialFunction[H2ReqRepFrame, ResponseClass] = {
case H2ReqRepFrame(_, Return((_, Some(Return(f: Frame.Trailers))))) =>
if (f.get("grpc-status") == Some("0")) ResponseClass.Success
else ResponseClass.NonRetryableFailure
case _ =>
ResponseClass.NonRetryableFailure
}
}
val stats = new InMemoryStatsReceiver
val serverLocalQs = new util.ArrayList[AsyncQueue[Frame]]()
val underlying = ServiceFactory.const(
Service.mk { req: Request =>
val q = new AsyncQueue[Frame]()
serverLocalQs.add(q)
Future.value(Response(Status.Ok, Stream(q)))
}
)
val fa = new H2FailureAccrualFactory(
underlying,
FailureAccrualPolicy.consecutiveFailures(5, SStream.continually(Duration.Top)),
timer,
stats
)
val svc = new FactoryToService(fa)
Contexts.local.let(H2ClassifierCtx, param.H2Classifier(classifier)) {
val rsps = for (_ <- 1 to 5) yield {
await(svc(Request(Headers(":method" -> "GET"), Stream.empty())))
}
assert(fa.status == FStatus.Open)
for (i <- 0 until 4) {
serverLocalQs.get(i).offer(Frame.Trailers("grpc-status" -> "1"))
await(rsps(i).stream.read())
assert(fa.status == FStatus.Open)
}
serverLocalQs.get(4).offer(Frame.Trailers("grpc-status" -> "1"))
await(rsps(4).stream.read())
assert(fa.status == FStatus.Busy)
}
}
}
| BuoyantIO/linkerd | router/h2/src/test/scala/com/twitter/finagle/buoyant/h2/H2FailureAccrualFactoryTest.scala | Scala | apache-2.0 | 3,644 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.marklogic
import slamdata.Predef._
import monocle.Prism
import scalaz._
sealed abstract class DocType {
def fold[A](json: => A, xml: => A): A =
this match {
case DocType.JsonDoc => json
case DocType.XmlDoc => xml
}
}
object DocType {
case object JsonDoc extends DocType
case object XmlDoc extends DocType
type Json = JsonDoc.type
type Xml = XmlDoc.type
val json: DocType = JsonDoc
val xml: DocType = XmlDoc
val name: Prism[String, DocType] =
Prism.partial[String, DocType] {
case "json" => json
case "xml" => xml
} {
case JsonDoc => "json"
case XmlDoc => "xml"
}
implicit val equal: Equal[DocType] =
Equal.equalA
implicit val show: Show[DocType] =
Show.showFromToString
}
| drostron/quasar | marklogic/src/main/scala/quasar/physical/marklogic/DocType.scala | Scala | apache-2.0 | 1,402 |
package org.talkingpuffin.ui
import java.awt.BorderLayout
import swing.{Frame, BorderPanel}
import util.Cancelable
import javax.swing.{JScrollPane, JTextPane}
import org.talkingpuffin.util.WordCounter
class WordFrequenciesFrame(text: String) extends Frame with Cancelable {
title = "Word Frequencies"
contents = new BorderPanel {
peer.add(new JScrollPane(new JTextPane {
setContentType("text/html")
setEditable(false)
setText(createDisplayText(WordCounter(text).frequencies))
setCaretPosition(0)
}), BorderLayout.CENTER
)
}
private def createDisplayText(buckets: WordCounter.FreqToStringsMap): String =
"<div style='font-family: sans-serif'>" +
(for (freq <- buckets.keys.toList.sorted.reverse if freq > 2)
yield "<b>" + freq + "</b>" + ": " + buckets.get(freq).get.sorted.mkString(", ") + "<br>"
).mkString + "</div>"
}
| dcbriccetti/talking-puffin | desktop/src/main/scala/org/talkingpuffin/ui/WordFrequenciesFrame.scala | Scala | mit | 889 |
/*
* Copyright 2014 Mark Goldenstein
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.markgoldenstein.bitcoin
import scala.language.postfixOps
import scala.language.implicitConversions
import scala.collection.mutable
import scala.collection.JavaConversions._
import scala.concurrent.{TimeoutException, Promise}
import scala.concurrent.duration._
import sext._
import java.net.URI
import java.security.KeyStore
import java.io.{FileInputStream, File}
import javax.net.ssl.{SSLContext, TrustManagerFactory}
import akka.actor.{Actor, ActorLogging, ReceiveTimeout, Status}
import akka.pattern.pipe
import play.api.libs.json._
import org.java_websocket.client.WebSocketClient
import org.java_websocket.handshake.ServerHandshake
import org.java_websocket.drafts.{Draft, Draft_17}
import com.markgoldenstein.bitcoin.messages.actor._
import com.markgoldenstein.bitcoin.messages.json._
import JsonImplicits._
class BtcWalletActor(
websocketUri: String,
rpcUser: String,
rpcPass: String,
keyStoreFile: String,
keyStorePass: String,
onConnect: () => Unit,
handleNotification: Actor.Receive,
timeoutDuration: FiniteDuration)
extends Actor
with ActorLogging {
implicit val executionContext = context.dispatcher
// this HashMap maps JSON RPC request IDs to the corresponding response promises
// and a function that converts the JSON RPC response to the final actor response
val rpcRequests = mutable.HashMap.empty[String, (Promise[AnyRef], JsValue => AnyRef)]
// initialize SSL stuff - we need this to open a btcwallet connection
val uri = new URI(websocketUri)
val headers = Map(("Authorization", "Basic " + new sun.misc.BASE64Encoder().encode((rpcUser + ":" + rpcPass).getBytes)) :: Nil: _*)
val factory = {
val ks = KeyStore.getInstance("JKS")
val kf = new File(keyStoreFile)
ks.load(new FileInputStream(kf), keyStorePass.toCharArray)
val tmf = TrustManagerFactory.getInstance("SunX509")
tmf.init(ks)
val sslContext = SSLContext.getInstance("TLS")
sslContext.init(null, tmf.getTrustManagers, null)
sslContext.getSocketFactory
}
var btcWalletClient = tryToConnect()
// start actor in connecting mode
override def receive = connecting
// try to connect to btcwallet
def connecting: Actor.Receive = {
case Connected =>
context.become(active)
onConnect()
case ReceiveTimeout =>
btcWalletClient = tryToConnect()
case _: RequestMessage =>
val message = "Cannot process request: no connection to btcwallet."
sender ! Status.Failure(new IllegalStateException(message))
log.error(message)
}
// connection established, handle requests
def active: Actor.Receive = {
case CompleteRequest(JsonResponse(resultOption, errorOption, id)) =>
// only inspect expected messages
rpcRequests.remove(id).foreach(req => {
val (p: Promise[AnyRef], resultFunc) = req
(resultOption, errorOption) match {
case (Some(result), _) =>
log.debug("Actor Response\\n{}", result.treeString)
p trySuccess resultFunc(result)
case (_, Some(error)) =>
val prettyError = Json.prettyPrint(error)
log.error("Json request returned Error\\n{}", prettyError)
p tryFailure new RuntimeException(prettyError)
case _ => // ignore
}
})
case m: RequestMessage =>
m match {
case m: WalletPassPhrase =>
// do not log wallet pass
log.debug("Actor Request\\n{}", m.copy(walletPass = "hidden").treeString)
case m =>
log.debug("Actor Request\\n{}", m.treeString)
}
m match {
case CreateRawTransaction(inputs, receivers) =>
val resultFunc = (result: JsValue) => result.as[String]
request(JsonMessage.createRawTransaction(inputs, receivers), resultFunc)
case GetNewAddress =>
val resultFunc = (result: JsValue) => result.as[String]
request(JsonMessage.getNewAddress, resultFunc)
case GetRawTransaction(transactionHash) =>
val resultFunc = (result: JsValue) => Json.fromJson[RawTransaction](result).get
request(JsonMessage.getRawTransaction(transactionHash), resultFunc)
case ListUnspentTransactions(minConfirmations, maxConfirmations) =>
val resultFunc = (result: JsValue) => Json.fromJson[Seq[UnspentTransaction]](result).get
request(JsonMessage.listUnspentTransactions(minConfirmations, maxConfirmations), resultFunc)
case SendRawTransaction(signedTransaction) =>
val resultFunc = (result: JsValue) => result.as[String]
request(JsonMessage.sendRawTransaction(signedTransaction), resultFunc)
case SignRawTransaction(transaction) =>
val resultFunc = (result: JsValue) => Json.fromJson[SignedTransaction](result).get
request(JsonMessage.signRawTransaction(transaction), resultFunc)
case WalletPassPhrase(walletPass, timeout) =>
request(JsonMessage.walletPassPhrase(walletPass, timeout))
}
case RemoveRequest(id) =>
rpcRequests -= id
case Disconnected =>
log.info("Connection to btcwallet closed.")
context.become(connecting)
btcWalletClient = tryToConnect()
case m: NotificationMessage =>
log.debug("Actor Notification\\n{}", m.treeString)
handleNotification.applyOrElse(m, unhandled)
}
def handleJsonNotification: PartialFunction[JsonNotification, Unit] = {
// handle a new transaction notification: send ReceivedPayment message
case JsonNotification(_, "newtx", params) =>
Json.fromJson[TransactionNotification](params(1)).map(txNtfn =>
if (txNtfn.category == "receive")
self ! ReceivedPayment(txNtfn.txid, txNtfn.address, txNtfn.amount, txNtfn.confirmations))
case _ => // ignore
}
// helper method for request handling, to be called from handleRequest
// this variant is for commands without a response
def request(request: JsonRequest): Unit = {
// do not log wallet pass
if (request.method == "walletpassphrase")
log.info("Json Request\\n{}", Json.prettyPrint(Json.toJson(request.copy(params = Json.arr("hidden")))))
else
log.info("Json Request\\n{}", Json.prettyPrint(Json.toJson(request)))
btcWalletClient.send(Json.toJson(request).toString())
}
// helper method for request handling, to be called from handleRequest
// this variant is for commands with a response
def request(request: JsonRequest, resultFunc: JsValue => AnyRef): Unit = {
log.info("Json Request\\n{}", Json.prettyPrint(Json.toJson(request)))
val p = Promise[AnyRef]()
val f = p.future
rpcRequests += request.id ->(p, resultFunc)
btcWalletClient.send(Json.toJson(request).toString())
context.system.scheduler.scheduleOnce(5 seconds) {
p tryFailure new TimeoutException("Timeout: btcwallet did not respond in time.")
self ! RemoveRequest(request.id)
}
pipe(f) to sender
}
def tryToConnect(): WebSocketBtcWalletClient = {
rpcRequests.clear()
val btcWalletClient = new WebSocketBtcWalletClient(uri, new Draft_17, headers, 0)
btcWalletClient.setSocket(factory.createSocket())
val connected = btcWalletClient.connectBlocking()
if (connected) {
log.info("Connection to btcwallet established.")
self ! Connected
} else {
log.info(s"Btcwallet not available: $websocketUri")
context.system.scheduler.scheduleOnce(timeoutDuration, self, ReceiveTimeout)
}
btcWalletClient
}
class WebSocketBtcWalletClient(
serverUri: URI,
protocolDraft: Draft,
httpHeaders: Map[String, String],
connectTimeout: Int)
extends WebSocketClient(serverUri, protocolDraft, httpHeaders, connectTimeout) {
override def onMessage(jsonMessage: String) = Json.fromJson[JsonMessage](Json.parse(jsonMessage)) foreach {
case notification: JsonNotification =>
log.info("Json Notification\\n{}", Json.prettyPrint(Json.parse(jsonMessage)))
handleJsonNotification.applyOrElse(notification, unhandled)
case response: JsonResponse =>
log.info("Json Response\\n{}", Json.prettyPrint(Json.parse(jsonMessage)))
self ! CompleteRequest(response)
case _ => // ignore
}
override def onOpen(handshakeData: ServerHandshake) = {}
override def onClose(code: Int, reason: String, remote: Boolean) = self ! Disconnected
override def onError(ex: Exception) = {}
}
} | goldmar/bitcoin-akka | src/main/scala/com/markgoldenstein/bitcoin/BtcWalletActor.scala | Scala | apache-2.0 | 9,034 |
package com.twitter.finatra.http.jsonpatch
/**
* JsonPatch represents a sequence of operations to apply to a JSON document.
* The corresponding HTTP request should use application/json-patch+json as the Content-Type.
* @see [[com.twitter.finagle.http.MediaType application/json-patch+json]]
*
* @param patches a Seq of Json Patch Operations
* @see [[https://tools.ietf.org/html/rfc69012 RFC 6902]]
*/
case class JsonPatch(patches: Seq[PatchOperation])
| twitter/finatra | http-core/src/main/scala/com/twitter/finatra/http/jsonpatch/JsonPatch.scala | Scala | apache-2.0 | 460 |
package au.com.dius.pact.provider.scalasupport
import java.io.File
import au.com.dius.pact.core.model.{DefaultPactReader, RequestResponsePact}
import org.apache.commons.io.FileUtils
object PactFileSource {
def loadFiles(baseDir: File): Seq[RequestResponsePact] = {
import scala.collection.JavaConverters._
FileUtils.listFiles(baseDir, Array("json"), true).asScala
.map(DefaultPactReader.INSTANCE.loadPact(_).asInstanceOf[RequestResponsePact])
.toSeq
}
}
| DiUS/pact-jvm | provider/scalasupport/src/main/scala/au/com/dius/pact/provider/scalasupport/PactFileSource.scala | Scala | apache-2.0 | 481 |
package im.actor.server.session
import scala.annotation.tailrec
import scala.collection.immutable
import akka.actor._
import akka.stream.actor._
import im.actor.api.rpc.ClientData
import im.actor.server.mtproto.protocol._
private[session] object SessionMessagePublisher {
def props() = Props[SessionMessagePublisher]
}
private[session] class SessionMessagePublisher extends ActorPublisher[SessionStreamMessage] with ActorLogging {
import ActorPublisherMessage._
import SessionStreamMessage._
// TODO: MaxQueueSize
private[this] var messageQueue = immutable.Queue.empty[SessionStreamMessage]
def receive = {
case (mb: MessageBox, clientData: ClientData) ⇒
log.info("MessageBox: {} clientData: {}", mb, clientData)
// TODO: tail-recursive function for container unpacking
mb.body match {
case Container(bodies) ⇒
val ackMessage = SendProtoMessage(MessageAck.outgoing(bodies.view.filterNot(_.body.isInstanceOf[MessageAck]).map(_.messageId) :+ mb.messageId))
val handleMessages = bodies.map(HandleMessageBox(_, clientData)).toList
publishMessages(ackMessage :: handleMessages)
case _ ⇒
val handleMessage = HandleMessageBox(mb, clientData)
mb.body match {
case _: MessageAck ⇒ publishMessage(handleMessage)
case _ ⇒ publishMessages(List(SendProtoMessage(MessageAck.outgoing(Seq(mb.messageId))), handleMessage))
}
}
case msg: SendProtoMessage ⇒
publishMessage(msg)
case command: SubscribeCommand ⇒
publishMessage(HandleSubscribe(command))
case Request(_) ⇒
deliverBuf()
case Cancel ⇒
context.stop(self)
case unmatched ⇒
log.debug("Unmatched {}", unmatched)
}
private def publishMessage(message: SessionStreamMessage): Unit = {
log.debug("Publish message {}", message)
if (messageQueue.isEmpty && totalDemand > 0)
onNext(message)
else {
messageQueue = messageQueue.enqueue(message)
deliverBuf()
}
}
private def publishMessages(messages: immutable.Iterable[SessionStreamMessage]): Unit = {
messages foreach { message ⇒
log.debug("Publish message {}", message)
}
messageQueue = messageQueue.enqueue(messages)
deliverBuf()
}
@tailrec final def deliverBuf(): Unit =
if (isActive && totalDemand > 0)
messageQueue.dequeueOption match {
case Some((el, queue)) ⇒
messageQueue = queue
onNext(el)
deliverBuf()
case None ⇒
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
log.error(reason, "Exception thrown, message: {}", message)
}
}
| boneyao/actor-platform | actor-server/actor-session/src/main/scala/im/actor/server/session/SessionMessagePublisher.scala | Scala | mit | 2,728 |
/*
* Copyright (c) 2013 David Soergel <dev@davidsoergel.com>
* Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*/
package worldmake.cookingstrategy
import worldmake._
import scala.concurrent._
import scala.collection.mutable
import worldmake.storage.StoredProvenancesForRecipe
import scala.concurrent.duration.Duration
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import worldmake.storage.Identifier
import worldmake.storage.Identifier
import com.typesafe.scalalogging.slf4j.Logging
/**
* @author <a href="mailto:dev@davidsoergel.com">David Soergel</a>
*/
trait Notifier {
def request[T](id: Identifier[Recipe[T]]): Future[Successful[T]]
def shutdown() {}
}
trait CallbackNotifier extends Notifier {
def announceDone(p: Successful[_])
def announceFailed(p: FailedProvenance[_])
def announceCancelled(p: CancelledProvenance[_])
// ** what to do on failure?
}
class BasicCallbackNotifier extends CallbackNotifier with Logging {
val waitingFor: mutable.Map[Identifier[Recipe[_]], Promise[Successful[_]]] = new mutable.HashMap[Identifier[Recipe[_]], Promise[Successful[_]]] with mutable.SynchronizedMap[Identifier[Recipe[_]], Promise[Successful[_]]]
def announceDone(pr: Successful[_]) = {
for (p <- waitingFor.get(pr.recipeId)) {
waitingFor.remove(pr.recipeId)
p success pr
}
}
def announceFailed(p: FailedProvenance[_]) = announceFailedOrCancelled(p.recipeId)
def announceCancelled(p: CancelledProvenance[_]) = announceFailedOrCancelled(p.recipeId)
private def announceFailedOrCancelled(id:Identifier[Recipe[Any]]) = {
// one provenance may have failed, but the downstream provenances could just use a different one
// see if there are any other successes...
val sp = StoredProvenancesForRecipe(id)
if (sp.successes.isEmpty && sp.potentialSuccesses.isEmpty) {
val t = FailedRecipeException("Failure detected: no potential success for recipe: " + id, id)
for (p <- waitingFor.get(id)) {
if(p.isCompleted) {
logger.error("Promise was already completed; ignoring failure of recipe " + id)
p failure t
}
}
}
}
// the job must actually be started somewhere else. This just promises to notify.
def request[T](id: Identifier[Recipe[T]]): Future[Successful[T]] = {
val p = waitingFor.get(id).getOrElse({
val np = promise[Successful[T]]()
waitingFor.put(id, np.asInstanceOf[Promise[Successful[_]]])
np
})
p.future.asInstanceOf[Future[Successful[T]]]
}
}
class PollingNotifier(pollingActions: Seq[PollingAction]) extends BasicCallbackNotifier {
val actorSystem = ActorSystem()
val scheduler = actorSystem.scheduler
val task = new Runnable {
def run() {
// figure out which derivations we're looking for, and see what provenances exist for those
val sps = waitingFor.keys.map(StoredProvenancesForRecipe[Any])
for (a <- pollingActions) {
a(sps, PollingNotifier.this)
}
}
}
implicit val executor = actorSystem.dispatcher
scheduler.schedule(
initialDelay = Duration(5, TimeUnit.SECONDS),
interval = Duration(10, TimeUnit.SECONDS),
runnable = task)
override def shutdown() {
actorSystem.shutdown()
}
}
trait PollingAction {
def apply(sps: Iterable[StoredProvenancesForRecipe[_]], notifier: CallbackNotifier)
}
object DetectSuccessPollingAction extends PollingAction {
def apply(sps: Iterable[StoredProvenancesForRecipe[_]], notifier: CallbackNotifier) {
for (sp <- sps)
sp.successes.toSeq.headOption.map(pr => notifier.announceDone(pr))
}
}
object DetectFailedPollingAction extends PollingAction {
def apply(sps: Iterable[StoredProvenancesForRecipe[_]], notifier: CallbackNotifier) {
for (sp <- sps) {
if (sp.successes.isEmpty && sp.potentialSuccesses.isEmpty)
notifier.announceFailed(sp.failures.head) // just pick one, it doesn't matter...
}
}
}
| davidsoergel/worldmake | src/main/scala/worldmake/cookingstrategy/Notifier.scala | Scala | apache-2.0 | 4,003 |
package notebook
import java.util.concurrent.ConcurrentHashMap
import akka.actor.{Deploy, _}
import com.typesafe.config.Config
import notebook.kernel.remote.RemoteActorSystem
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.concurrent.duration._
/**
* A kernel is a remote VM with a set of sub-actors, each of which interacts with local resources (for example, WebSockets).
* The local resource must be fully initialized before we will let messages flow through to the remote actor. This is
* accomplished by blocking on actor startup
* to the remote (this is accomplished by blocking on startup waiting for
*/
class Kernel(config: Config, system: ActorSystem, kernelId: String, notebookPath_ : Option[String] = None, customArgs:Option[List[String]]) {
private[this] var _notebookPath = notebookPath_
def notebookPath = _notebookPath
def moveNotebook(to:String) {
_notebookPath = Some(to)
}
implicit val executor = system.dispatcher
val router = system.actorOf(Props(new ExecutionManager))
private val remoteDeployPromise = Promise[Deploy]()
def remoteDeployFuture = remoteDeployPromise.future
case object ShutdownNow
def shutdown() {
router ! ShutdownNow
}
class ExecutionManager extends Actor with ActorLogging {
// These get filled in before we ever receive messages
var remoteInfo: RemoteActorSystem = null
override def preStart() {
remoteInfo = Await.result(RemoteActorSystem.spawn(config, system, "kernel", kernelId, notebookPath, customArgs), 1 minutes)
remoteDeployPromise.success(remoteInfo.deploy)
}
override def postStop() {
if (remoteInfo != null)
remoteInfo.shutdownRemote()
}
def receive = {
case ShutdownNow =>
if (remoteInfo != null) {
remoteInfo.shutdownRemote()
}
}
}
}
object KernelManager {
def shutdown() {
kernels.values foreach {
_.shutdown()
}
}
val kernels = new ConcurrentHashMap[String, Kernel]().asScala
def get(id: String) = kernels.get(id)
def apply(id: String) = kernels(id)
def atPath(path: String) = kernels.find { case (id, k) => k.notebookPath.exists(_ == path) }
def add(id: String, kernel: Kernel) {
kernels += id -> kernel
}
def remove(id: String) {
kernels -= id
}
def stopAll = {
kernels.values.foreach(_.shutdown())
kernels -- kernels.keys
}
}
| shankar-reddy/spark-notebook | modules/subprocess/src/main/scala/notebook/Kernel.scala | Scala | apache-2.0 | 2,425 |
package brainiak.samples.npuzzle.ui.controller
import javafx.concurrent.Task
import javafx.scene.input.KeyEvent
import brainiak.samples.npuzzle.ui.Board
import brainiak.samples.npuzzle.{NPuzzleNode, NPuzzleProblem}
import brainiak.search.IDAStar
import brainiak.search.strategies.BestFirst
import brainiak.search.types.GraphSearch
/**
* Created by thiago on 1/29/14.
*/
class IDAStarController(val b: Board) extends BasicController {
override def handleCommand(evt: KeyEvent) = {
}
override def startAction() = {
new Thread(new Task[Unit]() {
override def call(): Unit = {
val problem = NPuzzleProblem(board.puzzleState, board.numHoax)
if (!problem.goalAchieved) {
board.controls.solvingStatus()
val search = IDAStar(st => st - problem.goal)
val path = search.find(problem).path
board.controls.movingStatus()
path.foreach(n => n match {
case puzzle: NPuzzleNode => if (puzzle.movement != 0) move(puzzle.movement)
})
}
}
}).start()
}
override def board = b
}
| pintowar/brainiak | brainiak-samples/src/main/scala/brainiak/samples/npuzzle/ui/controller/IDAStarController.scala | Scala | apache-2.0 | 1,095 |
import sbt.Keys._
import sbt._
import sbtdocker.DockerPlugin
import BuildSettings._
object SpotlightBuild extends Build {
lazy val root = {
( project in file(".") )
.settings( defaultBuildSettings ++ doNotPublishSettings )
.aggregate( core, /*publisher,*/ graphite, batch )
}
// lazy val publisher = {
// ( project in file("publisher") )
// .settings( defaultBuildSettings ++ publishSettings )
// }
// lazy val subscriber = {
// ( project in file("subscriber") )
// .settings( defaultBuildSettings ++ publishSettings )
// // .enablePlugins( DockerPlugin )
// }
lazy val core = {
( project in file("core") )
.settings( defaultBuildSettings ++ publishSettings )
}
lazy val graphite = {
( project in file("app-graphite") )
.settings( defaultBuildSettings ++ publishSettings )
.dependsOn( core )
.enablePlugins( DockerPlugin )
}
lazy val batch = {
( project in file("app-batch") )
.settings( defaultBuildSettings ++ doNotPublishSettings )
.dependsOn( core )
}
// lazy val root = Project(
// id = "spotlight-root",
// base = file( "." ),
// settings = defaultBuildSettings ++ Seq(
// publish := { },
// publishTo := Some("bogus" at "http://nowhere.com"),
// publishLocal := { }
// )
// ).aggregate( core )
// lazy val core = Project(
// id = "core",
// base = file( "core" ),
// settings = defaultBuildSettings
// )
// lazy val sandbox = Project(
// id = "sandbox",
// base = file( "sandbox" ),
// settings = defaultBuildSettings
// ) dependsOn( core )
// lazy val cli = Project(
// id = "cli",
// base = file( "cli" ),
// settings = defaultBuildSettings
// ) dependsOn( model )
// lazy val webapp = Project(
// id = "webapp",
// base = file( "webapp" ),
// settings = defaultBuildSettings
// ) dependsOn( core )
}
| dmrolfs/lineup | project/Build.scala | Scala | mit | 2,201 |
/*
* Copyright (c) 2016 Mashin (http://mashin.io). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mashin.rich.spark
import org.apache.spark.SparkContext
import org.apache.spark.api.java.function.{Function, Function2}
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
object RichRDD {
def fakeClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
implicit def rddToRichRDDFunctions[T: ClassTag](rdd: RDD[T]): RichRDDFunctions[T] =
new RichRDDFunctions[T](rdd)
implicit def pairRDDToRichPairRDDFunctions[K: ClassTag, V: ClassTag](rdd: RDD[(K, V)])
: RichPairRDDFunctions[K, V] = new RichPairRDDFunctions[K, V](rdd)
implicit def sparkContextToRichSparkContextFunctions(sc: SparkContext)
: RichSparkContextFunctions = new RichSparkContextFunctions(sc)
implicit def toScalaFunction[T, R](fun: Function[T, R]): T => R = {
(x: T) => fun.call(x)
}
implicit def toScalaFunction2[T1, T2, R](fun: Function2[T1, T2, R]): (T1, T2) => R = {
(x: T1, x1: T2) => fun.call(x, x1)
}
implicit def toSparkFunction[T, R](fun: T => R): Function[T, R] = {
new Function[T, R] {
override def call(v1: T): R = fun(v1)
}
}
implicit def toSparkFunction2[T1, T2, R](fun: (T1, T2) => R): Function2[T1, T2, R] = {
new Function2[T1, T2, R] {
override def call(v1: T1, v2: T2): R = fun(v1, v2)
}
}
}
| mashin-io/rich-spark | main/src/main/scala/io/mashin/rich/spark/RichRDD.scala | Scala | apache-2.0 | 1,924 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.io.File
import scala.util.Random
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, QueryTest}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.datasources.v2.orc.OrcScan
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
/**
* Test suite base for testing the redaction of DataSourceScanExec/BatchScanExec.
*/
abstract class DataSourceScanRedactionTest extends QueryTest with SharedSparkSession {
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.redaction.string.regex", "file:/[^\\\\]\\\\s]+")
final protected def isIncluded(queryExecution: QueryExecution, msg: String): Boolean = {
queryExecution.toString.contains(msg) ||
queryExecution.simpleString.contains(msg) ||
queryExecution.stringWithStats.contains(msg)
}
protected def getRootPath(df: DataFrame): Path
test("treeString is redacted") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString)
val df = spark.read.orc(basePath)
val rootPath = getRootPath(df)
assert(rootPath.toString.contains(dir.toURI.getPath.stripSuffix("/")))
assert(!df.queryExecution.sparkPlan.treeString(verbose = true).contains(rootPath.getName))
assert(!df.queryExecution.executedPlan.treeString(verbose = true).contains(rootPath.getName))
assert(!df.queryExecution.toString.contains(rootPath.getName))
assert(!df.queryExecution.simpleString.contains(rootPath.getName))
val replacement = "*********"
assert(df.queryExecution.sparkPlan.treeString(verbose = true).contains(replacement))
assert(df.queryExecution.executedPlan.treeString(verbose = true).contains(replacement))
assert(df.queryExecution.toString.contains(replacement))
assert(df.queryExecution.simpleString.contains(replacement))
}
}
}
/**
* Suite that tests the redaction of DataSourceScanExec
*/
class DataSourceScanExecRedactionSuite extends DataSourceScanRedactionTest {
override protected def sparkConf: SparkConf = super.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST.key, "orc")
override protected def getRootPath(df: DataFrame): Path =
df.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
.asInstanceOf[FileSourceScanExec].relation.location.rootPaths.head
test("explain is redacted using SQLConf") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString)
val df = spark.read.orc(basePath)
val replacement = "*********"
// Respect SparkConf and replace file:/
assert(isIncluded(df.queryExecution, replacement))
assert(isIncluded(df.queryExecution, "FileScan"))
assert(!isIncluded(df.queryExecution, "file:/"))
withSQLConf(SQLConf.SQL_STRING_REDACTION_PATTERN.key -> "(?i)FileScan") {
// Respect SQLConf and replace FileScan
assert(isIncluded(df.queryExecution, replacement))
assert(!isIncluded(df.queryExecution, "FileScan"))
assert(isIncluded(df.queryExecution, "file:/"))
}
}
}
test("FileSourceScanExec metadata") {
withTempPath { path =>
val dir = path.getCanonicalPath
spark.range(0, 10).write.orc(dir)
val df = spark.read.orc(dir)
assert(isIncluded(df.queryExecution, "Format"))
assert(isIncluded(df.queryExecution, "ReadSchema"))
assert(isIncluded(df.queryExecution, "Batched"))
assert(isIncluded(df.queryExecution, "PartitionFilters"))
assert(isIncluded(df.queryExecution, "PushedFilters"))
assert(isIncluded(df.queryExecution, "DataFilters"))
assert(isIncluded(df.queryExecution, "Location"))
}
}
test("SPARK-31793: FileSourceScanExec metadata should contain limited file paths") {
withTempPath { path =>
// create a sub-directory with long name so that each root path will always exceed the limit
// this is to ensure we always test the case for the path truncation
val dataDirName = Random.alphanumeric.take(100).toList.mkString
val dataDir = new File(path, dataDirName)
dataDir.mkdir()
val partitionCol = "partitionCol"
spark.range(10)
.select("id", "id")
.toDF("value", partitionCol)
.write
.partitionBy(partitionCol)
.orc(dataDir.getCanonicalPath)
val paths = (0 to 9).map(i => new File(dataDir, s"$partitionCol=$i").getCanonicalPath)
val plan = spark.read.orc(paths: _*).queryExecution.executedPlan
val location = plan collectFirst {
case f: FileSourceScanExec => f.metadata("Location")
}
assert(location.isDefined)
// The location metadata should at least contain one path
assert(location.get.contains(paths.head))
// The location metadata should have the number of root paths
assert(location.get.contains("(10 paths)"))
// The location metadata should have bracket wrapping paths
assert(location.get.indexOf('[') > -1)
assert(location.get.indexOf(']') > -1)
// extract paths in location metadata (removing classname, brackets, separators)
val pathsInLocation = location.get.substring(
location.get.indexOf('[') + 1, location.get.indexOf(']')).split(", ").toSeq
// the only one path should be available
assert(pathsInLocation.size == 2)
// indicator ("...") should be available
assert(pathsInLocation.exists(_.contains("...")))
}
}
}
/**
* Suite that tests the redaction of BatchScanExec.
*/
class DataSourceV2ScanExecRedactionSuite extends DataSourceScanRedactionTest {
override protected def sparkConf: SparkConf = super.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST.key, "")
override protected def getRootPath(df: DataFrame): Path =
df.queryExecution.sparkPlan.find(_.isInstanceOf[BatchScanExec]).get
.asInstanceOf[BatchScanExec].scan.asInstanceOf[OrcScan].fileIndex.rootPaths.head
test("explain is redacted using SQLConf") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString)
val df = spark.read.orc(basePath)
val replacement = "*********"
// Respect SparkConf and replace file:/
assert(isIncluded(df.queryExecution, replacement))
assert(isIncluded(df.queryExecution, "BatchScan"))
assert(!isIncluded(df.queryExecution, "file:/"))
withSQLConf(SQLConf.SQL_STRING_REDACTION_PATTERN.key -> "(?i)BatchScan") {
// Respect SQLConf and replace FileScan
assert(isIncluded(df.queryExecution, replacement))
assert(!isIncluded(df.queryExecution, "BatchScan"))
assert(isIncluded(df.queryExecution, "file:/"))
}
}
}
test("FileScan description") {
Seq("json", "orc", "parquet").foreach { format =>
withTempPath { path =>
val dir = path.getCanonicalPath
spark.range(0, 10).write.format(format).save(dir)
val df = spark.read.format(format).load(dir)
withClue(s"Source '$format':") {
assert(isIncluded(df.queryExecution, "ReadSchema"))
assert(isIncluded(df.queryExecution, "BatchScan"))
if (Seq("orc", "parquet").contains(format)) {
assert(isIncluded(df.queryExecution, "PushedFilters"))
}
assert(isIncluded(df.queryExecution, "Location"))
}
}
}
}
}
| ueshin/apache-spark | sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala | Scala | apache-2.0 | 8,462 |
package knot.data.buffers
import org.scalatest.FunSpec
import org.scalatest.Matchers._
class UnsafeArrayBufferSpec extends FunSpec {
private def getBuffer(size: Int): UnsafeArrayBuffer = {
new UnsafeArrayBuffer(Array.ofDim[Byte](size), 0, size)
}
describe("UnsafeArrayBuffer") {
it("byte") {
val buffer = getBuffer(2)
buffer.putByte(0, 1.toByte)
buffer.putByte(1, 2.toByte)
buffer.getByte(0) should be(1.toByte)
buffer.getByte(1) should be(2.toByte)
buffer.size should be(2)
}
it("int") {
val buffer = getBuffer(8)
buffer.putInt(0, Int.MinValue)
buffer.putInt(4, Int.MaxValue)
buffer.getInt(0) should be(Int.MinValue)
buffer.getInt(4) should be(Int.MaxValue)
buffer.size should be(8)
}
it("short") {
val buffer = getBuffer(4)
buffer.putShort(0, Short.MinValue)
buffer.putShort(2, Short.MaxValue)
buffer.getShort(0) should be(Short.MinValue)
buffer.getShort(2) should be(Short.MaxValue)
buffer.size should be(4)
}
it("long") {
val buffer = getBuffer(16)
buffer.putLong(0, Long.MinValue)
buffer.putLong(8, Long.MaxValue)
buffer.getLong(0) should be(Long.MinValue)
buffer.getLong(8) should be(Long.MaxValue)
buffer.size should be(16)
}
it("float") {
val buffer = getBuffer(8)
buffer.putFloat(0, Float.MinValue)
buffer.putFloat(4, Float.MaxValue)
buffer.getFloat(0) should be(Float.MinValue)
buffer.getFloat(4) should be(Float.MaxValue)
buffer.size should be(8)
}
it("double") {
val buffer = getBuffer(16)
buffer.putDouble(0, Double.MinValue)
buffer.putDouble(8, Double.MaxValue)
buffer.getDouble(0) should be(Double.MinValue)
buffer.getDouble(8) should be(Double.MaxValue)
buffer.size should be(16)
}
it("slice") {
val buffer = getBuffer(8)
buffer.putInt(0, 1)
buffer.putInt(4, 2)
val slice1 = buffer.slice(0, 4)
slice1.getInt(0) should be(1)
slice1.size should be(4)
val slice2 = buffer.slice(4, 4)
slice2.getInt(0) should be(2)
slice2.size should be(4)
}
it("slice and slice") {
val buffer = getBuffer(16)
buffer.putInt(0, 1)
buffer.putInt(4, 2)
buffer.putInt(8, 3)
buffer.putInt(12, 4)
val slice = buffer.slice(8, 8).slice(4, 4)
slice.getInt(0) should be(4)
slice.size should be(4)
}
it("slice share original buffer") {
val buffer = getBuffer(8)
buffer.putInt(0, 1)
buffer.putInt(4, 2)
val slice = buffer.slice(4, 4)
buffer.putInt(0, 10)
buffer.putInt(4, 20)
slice.getInt(0) should be(20)
}
it("getBytes") {
val buffer = getBuffer(8)
buffer.putInt(0, 1)
buffer.putInt(4, 2)
val dst = Array.ofDim[Byte](8)
buffer.getBytes(0, dst, 0, 4)
dst should be(Array(0.toByte, 0.toByte, 0.toByte, 1.toByte, 0.toByte, 0.toByte, 0.toByte, 0.toByte))
buffer.getBytes(4, dst, 4, 4)
dst should be(Array(0.toByte, 0.toByte, 0.toByte, 1.toByte, 0.toByte, 0.toByte, 0.toByte, 2.toByte))
}
it("putBytes") {
val buffer = getBuffer(4)
val src = Array(0.toByte, 0.toByte, 0.toByte, 1.toByte, 0.toByte, 0.toByte, 0.toByte, 2.toByte)
val r = Array.ofDim[Byte](8)
buffer.putBytes(0, src, 0, 4)
buffer.getBytes(0, r, 0, 4)
r should be(Array(0.toByte, 0.toByte, 0.toByte, 1.toByte, 0.toByte, 0.toByte, 0.toByte, 0.toByte))
buffer.putBytes(4, src, 4, 4)
buffer.getBytes(4, r, 4, 4)
r should be(Array(0.toByte, 0.toByte, 0.toByte, 1.toByte, 0.toByte, 0.toByte, 0.toByte, 2.toByte))
}
}
}
| defvar/knot | knot-data/src/test/scala/knot/data/buffers/UnsafeArrayBufferSpec.scala | Scala | mit | 3,740 |
package com.tierline.scala.activemodel.multitenant.domain
/**
* Created by tomokazu on 17/05/22.
*/
class User {
}
| tierline/scala-activemodel | src/test/scala/com/tierline/scala/activemodel/multitenant/domain/User.scala | Scala | mit | 121 |
/**
* Copyright (C) 2010-2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.limiting
object TokenBucket {
def apply(params: TokenBucketParameters, clock: Clock) =
new FixedIntervalTokenBucket(params, clock)
}
trait TokenBucket {
def tryConsume: Boolean
def refill()
}
class FixedIntervalTokenBucket(params: TokenBucketParameters, clock: Clock) {
private var lastDrip = clock.currentTimeMillis
private var volume: Long = params.initialVolume
def tryConsume: Boolean = {
synchronized {
refill()
if (volume > 0) {
volume -= 1
true
} else {
false
}
}
}
def refill() {
val now = clock.currentTimeMillis
volume = math.min(
volume + params.refillAmount * floor((now - lastDrip)/params.refillInterval),
params.capacity)
if (now - lastDrip >= params.refillInterval) {
lastDrip = lastDrip + (now - lastDrip) / 1000 * 1000
}
}
private def floor(d: Double): Long = math.round(math.floor(d))
}
trait TokenBucketParameters {
def capacity: Long
def initialVolume: Long = 0L
def refillInterval: Long = 1000L // milliseconds
def refillAmount: Long = 1L
}
| 0x6e6562/diffa | kernel/src/main/scala/net/lshift/diffa/kernel/limiting/TokenBucket.scala | Scala | apache-2.0 | 1,734 |
package lagcomp
class WeakMap[K, V](size: Int) {
val array = new Array[(Int, K, V)](size)
def getOrElseUpdate(time: Int, key: K, value: => V): V = {
if(time < 0) throw new IndexOutOfBoundsException(time.toString)
val index = time % size
val element = array(index)
if(element != null && element._1 == time && element._2 == key) {
element._3
} else {
array(index) = (time, key, value)
value
}
}
def get(time: Int, key: K): Option[V] = None
}
| OlivierBlanvillain/scala-lag-comp | lag-comp/src/main/scala/WeakMap.scala | Scala | mit | 497 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2016, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
/**
* `ValueOf[T]` provides the unique value of the type `T` where `T` is a type which has a
* single inhabitant. Eligible types are singleton types of the form `stablePath.type`,
* Unit and singleton types corresponding to value literals.
*
* Instances of `ValueOf[T]` are provided implicitly for all eligible types. Typically
* an instance would be required where a runtime value corresponding to a type level
* computation is needed.
* For example, we might define a type `Residue[M <: Int]` corresponding to the group of
* integers modulo `M`. We could then mandate that residues can be summed only when they
* are parameterized by the same modulus,
*
* {{{
* case class Residue[M <: Int](n: Int) extends AnyVal {
* def +(rhs: Residue[M])(implicit m: ValueOf[M]): Residue[M] =
* Residue((this.n + rhs.n) % valueOf[M])
* }
*
* val fiveModTen = Residue[10](5)
* val nineModTen = Residue[10](9)
*
* fiveModTen + nineModTen // OK == Residue[10](4)
*
* val fourModEleven = Residue[11](4)
*
* fiveModTen + fourModEleven // compiler error: type mismatch;
* // found : Residue[11]
* // required: Residue[10]
* }}}
*
* Notice that here the modulus is encoded in the type of the values and so does not
* incur any additional per-value storage cost. When a runtime value of the modulus
* is required in the implementation of `+` it is provided at the call site via the
* implicit argument `m` of type `ValueOf[M]`.
*/
@scala.annotation.implicitNotFound(msg = "No singleton value available for ${T}.")
final class ValueOf[T](val value: T) extends AnyVal
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/ValueOf.scala | Scala | apache-2.0 | 2,191 |
package controllers.submission
import play.api.Play._
import play.api.mvc.{Call, Controller}
import models.view.{CachedChangeOfCircs, CachedClaim}
import services.ClaimTransactionComponent
import services.submission.{ClaimSubmissionService, AsyncClaimSubmissionService}
import play.api.Logger
import models.domain.Claim
import AsyncClaimSubmissionService._
import ClaimSubmissionService._
import play.api.i18n._
object StatusRoutingController {
def redirectThankYou(implicit claim: Claim):Call = {
if (claimType(claim) == FULL_CLAIM) controllers.routes.ClaimEnding.thankyou
else controllers.routes.CircsEnding.thankyou
}
def redirectSubmitting(implicit claim: Claim):Call = {
if (claimType(claim) == FULL_CLAIM) controllers.submission.routes.ClaimStatusRoutingController.present
else controllers.submission.routes.CofCStatusRoutingController.present
}
def redirectErrorRetry(implicit claim: Claim):Call = {
if (claimType(claim) == FULL_CLAIM) controllers.submission.routes.ClaimStatusRoutingController.errorRetry
else controllers.submission.routes.CofCStatusRoutingController.errorRetry
}
def redirectError(implicit claim: Claim):Call = {
if (claimType(claim) == FULL_CLAIM) controllers.submission.routes.ClaimStatusRoutingController.error
else controllers.submission.routes.CofCStatusRoutingController.error
}
def redirectTimeout(implicit claim: Claim):Call = {
if (claimType(claim) == FULL_CLAIM) controllers.routes.ClaimEnding.timeout()
else controllers.routes.CircsEnding.timeout()
}
}
class StatusRoutingController extends Controller with CachedClaim with ClaimTransactionComponent with I18nSupport {
override val messagesApi: MessagesApi = current.injector.instanceOf[MMessages]
val claimTransaction = new ClaimTransaction
def present = claiming{ implicit claim => implicit request => implicit request2lang =>
Logger.debug(s"Showing async submitting ${claim.key} ${claim.uuid}")
Ok(views.html.common.asyncSubmitting(request2lang))
}
def submit = claiming { implicit claim => implicit request => implicit request2lang =>
import StatusRoutingController._
val transactionStatus = claimTransaction.getTransactionStatusById(claim.transactionId.getOrElse(""))
Logger.info(s"Checking transaction status: $transactionStatus for ${claim.key} ${claim.uuid}")
transactionStatus match {
case Some(ts) if ts.status == SUCCESS || ts.status == ACKNOWLEDGED => Redirect(redirectThankYou)
case Some(ts) if ts.status == GENERATED || ts.status == SUBMITTED => Redirect(redirectSubmitting)
case Some(ts) if ts.status == ClaimSubmissionService.SERVICE_UNAVAILABLE => Redirect(redirectErrorRetry)
case None => Redirect(redirectTimeout)
case _ => Redirect(redirectError)
}
}
def error = endingOnError { implicit claim => implicit request => implicit request2lang =>
Ok(views.html.common.error(startPage))
}
def errorRetry = claiming { implicit claim => implicit request => implicit request2lang =>
if (claimType(claim) == FULL_CLAIM){
Ok(views.html.common.error_retry(controllers.s_consent_and_declaration.routes.GDeclaration.present().url))
}else{
Ok(views.html.common.error_retry(controllers.circs.consent_and_declaration.routes.GCircsDeclaration.present().url))
}
}
}
object ClaimStatusRoutingController extends StatusRoutingController with CachedClaim
object CofCStatusRoutingController extends StatusRoutingController with CachedChangeOfCircs
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/controllers/submission/StatusRoutingController.scala | Scala | mit | 3,680 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.pmml.udfs
import java.util.UUID
import org.joda.time.{LocalDate, DateTime}
import com.ligadata.pmml.udfs._
import com.ligadata.pmml.udfs.Udfs._
import scala.reflect.runtime.universe._
import org.joda.time.LocalDate
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.joda.time.format.DateTimeFormatter
import org.apache.logging.log4j.{ Logger, LogManager }
import com.ligadata.pmml.runtime._
/**
* Sample udfs .. com.ligadata.pmml.udfs.CustomUdfs
This is a sample udf library to illustrate how one would add their own library of
functions to the Kamanja system such that they could be used in the PMML models run
there.
The udfs used in Kamanja must be declared in an object (i.e., they are static methods
for those of you familiar with java).
NOTE: If you want to invoke functions in the core library, make your UDF project that
builds your UDFs dependent on PmmlUdfs:
lazy val CustomUdfLib = project.in(file("SampleApplication/CustomUdfLib")) dependsOn(PmmlUdfs)
Include the following two lines in your imports:
import com.ligadata.pmml.udfs._
import com.ligadata.pmml.udfs.Udfs._
The sample udf, ISO8601DateFmt, does use a function from the core udfs, so we include it here.
Once it is built, you can run the extractUDFLibMetadata.scala script on it that
will produce the metadata from it.
Assuming you have the sbtProjDependencies.scala and extractUDFLibMetadata.scala
scripts in a folder on your PATH, you can use this command from 'trunk' to compile it:
extractUdfLibMetadata.scala --sbtProject PmmlUdfs
--fullObjectPath com.mycompany.pmml.udfs.SomeCustomUdfs
--namespace Pmml
--versionNumber 1000000
--typeDefsPath /tmp/typesforSomeCustomUdfs.json
--fcnDefsPath /tmp/fcnsforSomCustomUdfs.json
The version number supplied should be greater than any prior version used for the same
Udfs. This is currently not checked. It will complain when you try to load the metadata instead.
The 1000000 will produce a "000000.000001.000000" version number for each of your udf functions.
The last two arguments are paths to the json that is produced by the script looking
into the udf jar for PmmlUdfs at the functions on the 'fullObjectPath'.
As written, the types do not really need to be loaded into the MetadataAPI, as they
have all been defined in the Kamanja metadata bootstrap. The udfs json file must be loaded
however. The types would be needed if you introduced a type that has not been previously declared
in the bootstrap. If you are not sure there is no harm loading the types file. If one of
the types is already present, an error will be logged to that effect. This is probably ok,
however, you should inspect the types that are duplicate to verify they reflect what
your intention is.
To use this udf library in one of your models, you need to reference it in the model
itself. In the DataDictionary section, put the following:
<DataField name="UDFSearchPath" displayName="UDFSearchPath" dataType="container">
<Value value="com.mycompany.pmml.udfs.CustomUdfs" property="valid"/>
</DataField>
Important: Don't forget to upload your library to the Kamanja cluster. There is an upload jar
protocol for this purpose.
*/
object CustomUdfs extends LogTrait {
/**
* Returns a random UUID string
* NOTE: idGen() is available in the core udf library now.. this one is deprecated
*/
def ID_GEN() : String = {
UUID.randomUUID().toString;
}
/**
Convert the supplied iso8601 date integers according to these format codes:
Symbol Meaning Presentation Examples
------ ------- ------------ -------
G era text AD
C century of era (>=0) number 20
Y year of era (>=0) year 1996
x weekyear year 1996
w week of weekyear number 27
e day of week number 2
E day of week text Tuesday; Tue
y year year 1996
D day of year number 189
M month of year month July; Jul; 07
d day of month number 10
a halfday of day text PM
K hour of halfday (0~11) number 0
h clockhour of halfday (1~12) number 12
H hour of day (0~23) number 0
k clockhour of day (1~24) number 24
m minute of hour number 30
s second of minute number 55
S fraction of second number 978
z time zone text Pacific Standard Time; PST
Z time zone offset/id zone -0800; -08:00; America/Los_Angeles
' escape for text delimiter
'' single quote literal
@param fmtStr: a String specifying the desired format.
@param yyyymmdds: one or more iso8601 dates... only the last will print
@return string rep of this date
NOTE: iso8601DateFmt(String, Int) is available in the core udf library now.. this one is deprecated
*/
def dateBlock(fmtStr : String, yyyymmdds : Any*): String = {
val dateTime : DateTime = toDateTime(yyyymmdds.toList.last.asInstanceOf[Int])
val fmt : DateTimeFormatter = DateTimeFormat.forPattern(fmtStr);
val str : String = fmt.print(dateTime);
str
}
/**
* Print the two strings to the log. The first is some location or context information. The second
* is the event description.
*
* @param severity a string describing log severity level ... any {error, warn, info, debug, trace}
* @param contextMsg a string describing the context of why this message is to be logged (or anything else for that matter)
* @param eventMsg a string that describes what has actually happened
* @param bool a Boolean that is returned as this function's result (to play into the pmml logic as desired)
* @return bool
*
* NOTE: logMsg(String,String,String,Boolean) is available in the core udf library now.. this one is deprecated
*/
def LogMsg(severity : String, contextMsg : String, eventMsg : String, bool : Boolean) : Boolean = {
if (severity != null && contextMsg != null && eventMsg != null) {
val sev : String = severity.toLowerCase
sev match {
case "error" => logger.error(s"$contextMsg...$eventMsg")
case "warn" => logger.warn(s"$contextMsg...$eventMsg")
case "info" => logger.info(s"$contextMsg...$eventMsg")
case "debug" => logger.debug(s"$contextMsg...$eventMsg")
case "trace" => logger.trace(s"$contextMsg...$eventMsg")
case _ => logger.trace(s"$contextMsg...$eventMsg")
}
} else {
logger.error("LogMsg called with bogus arguments")
}
bool
}
/**
* Accept an indefinite number of objects and concatenate their string representations
* @param args : arguments whose string representations will be concatenated
* @return concatenation of args' string representations
*
* NOTE: concat(args : Any*) is available in the core udf library now.. this one is deprecated
*/
def Concat(args : Any*) : String = {
val argList : List[Any] = args.toList
val buffer : StringBuilder = new StringBuilder
argList.foreach ( arg => if (arg != null) buffer.append(arg.toString) else "" )
val concatenation : String = buffer.toString
concatenation
}
/**
* matchTermsetCount - Method will analyze a given string for the presence of specified tokens and return the Array of integers where
* each element corresponds to the number of times a token in that position of the context array appears in the
* inputString. This method is CASE INSENSITIVE
*
* @param String: String to analyze
* @param Array[String]: The list of tokens to compare the inputString to
* @return Array[Integer]
*/
def matchTermsetCount (inputString: String, context: Array[String]): Array[Integer] = {
var pos = 0
var lcIS = inputString.toLowerCase
var outArray: Array[Integer] = new Array[Integer](context.size)
context.foreach(word => {
// Tricky thing here... this will not pick up a word if it is a first or the
// last word in an inputString. so we need to do some fancy sting checking to
// handle these 2 cases.
// General case... chech for a middle word in the inputString
outArray(pos) = (" "+word+" ").toLowerCase.r.findAllIn(lcIS).length
// Check for the last word in an input string
if (lcIS.endsWith(" "+word.toLowerCase)) outArray(pos) += 1
// Check for the first word in an input string
if (lcIS.startsWith(word.toLowerCase+" ")) outArray(pos) += 1
if (lcIS.equalsIgnoreCase(word)) outArray(pos) += 1
println(" ==>"+outArray(pos))
pos = pos + 1
})
outArray
}
def getMatchingTokens (inputString: String, context: Array[String]): String = {
if(inputString==null) return ""
if(context==null)return ""
var pos = 0
var lcIS:String = inputString.toLowerCase
var outArray:String = ""
context.foreach(word => {
// Tricky thing here... this will not pick up a word if it is a first or the
// last word in an inputString. so we need to do some fancy sting checking to
// handle these 2 cases.
// General case... chech for a middle word in the inputString
var num = (" "+word+" ").toLowerCase.r.findAllIn(lcIS).length
// Check for the last word in an input string
if (lcIS.endsWith(" "+word.toLowerCase)) num += 1
// Check for the first word in an input string
if (lcIS.startsWith(word.toLowerCase+" ")) num += 1
if (lcIS.equalsIgnoreCase(word)) num += 1
if (num > 0) {
outArray = outArray + "." + word
}
})
outArray
}
/**
* matchTermsetBoolean - Method will analyze a given string for the presence of tokens specified in the context parameters. If the number of
* present tokens exceeds the degree parameter, return TRUE, else return FALSE. This method is CASE INSENSITIVE
*
*
* @param String: String to analyze
* @param Array[String]: The list of tokens to compare the inputString to
* @param Integer: the threshold.
* @return Boolean
*/
def matchTermsetBoolean (inputString: String, context: Array[String], degree: Integer): Boolean = {
var total: Int = 0
if(inputString==null) return false
if(context==null)return false
matchTermsetCount(inputString, context).foreach(v => {total = total + v})
if (total >= degree) true else false
}
def Length(str : String) : Int = {
val len : Int = if (str != null) str.size else 0
len
}
} | traytonwhite/Kamanja | trunk/Pmml/PmmlUdfs/src/main/scala/com/ligadata/pmml/udfs/CustomUdfs.scala | Scala | apache-2.0 | 12,452 |
package is.hail.types.physical
import is.hail.annotations.{Annotation, Region, UnsafeRow, UnsafeUtils}
import is.hail.asm4s._
import is.hail.expr.ir.{EmitCode, EmitCodeBuilder}
import is.hail.types.BaseStruct
import is.hail.types.physical.stypes.SValue
import is.hail.types.physical.stypes.concrete.{SBaseStructPointer, SBaseStructPointerValue}
import is.hail.utils._
import org.apache.spark.sql.Row
abstract class PCanonicalBaseStruct(val types: Array[PType]) extends PBaseStruct {
if (!types.forall(_.isRealizable)) {
throw new AssertionError(
s"found non realizable type(s) ${ types.filter(!_.isRealizable).mkString(", ") } in ${ types.mkString(", ") }")
}
override val (missingIdx: Array[Int], nMissing: Int) = BaseStruct.getMissingIndexAndCount(types.map(_.required))
val nMissingBytes: Int = UnsafeUtils.packBitsToBytes(nMissing)
val byteOffsets: Array[Long] = new Array[Long](size)
override val byteSize: Long = getByteSizeAndOffsets(types.map(_.byteSize), types.map(_.alignment), nMissingBytes, byteOffsets)
override val alignment: Long = PBaseStruct.alignment(types)
override def allocate(region: Region): Long = {
region.allocate(alignment, byteSize)
}
override def allocate(region: Code[Region]): Code[Long] =
region.allocate(alignment, byteSize)
override def initialize(structAddress: Long, setMissing: Boolean = false): Unit = {
if (allFieldsRequired) {
return
}
Region.setMemory(structAddress, nMissingBytes.toLong, if (setMissing) 0xFF.toByte else 0.toByte)
}
override def stagedInitialize(cb: EmitCodeBuilder, structAddress: Code[Long], setMissing: Boolean = false): Unit = {
if (!allFieldsRequired) {
cb += Region.setMemory(structAddress, const(nMissingBytes.toLong), const(if (setMissing) 0xFF.toByte else 0.toByte))
}
}
override def isFieldDefined(offset: Long, fieldIdx: Int): Boolean =
fieldRequired(fieldIdx) || !Region.loadBit(offset, missingIdx(fieldIdx))
override def isFieldMissing(cb: EmitCodeBuilder, offset: Code[Long], fieldIdx: Int): Value[Boolean] =
if (fieldRequired(fieldIdx))
false
else
cb.memoize(Region.loadBit(offset, missingIdx(fieldIdx).toLong))
override def setFieldMissing(offset: Long, fieldIdx: Int) {
assert(!fieldRequired(fieldIdx))
Region.setBit(offset, missingIdx(fieldIdx))
}
override def setFieldMissing(cb: EmitCodeBuilder, offset: Code[Long], fieldIdx: Int): Unit = {
if (!fieldRequired(fieldIdx))
cb += Region.setBit(offset, missingIdx(fieldIdx).toLong)
else {
cb._fatal(s"Required field cannot be missing.")
}
}
override def setFieldPresent(offset: Long, fieldIdx: Int) {
if (!fieldRequired(fieldIdx))
Region.clearBit(offset, missingIdx(fieldIdx))
}
override def setFieldPresent(cb: EmitCodeBuilder, offset: Code[Long], fieldIdx: Int): Unit = {
if (!fieldRequired(fieldIdx))
cb += Region.clearBit(offset, missingIdx(fieldIdx).toLong)
}
override def fieldOffset(structAddress: Long, fieldIdx: Int): Long =
structAddress + byteOffsets(fieldIdx)
override def fieldOffset(structAddress: Code[Long], fieldIdx: Int): Code[Long] =
structAddress + byteOffsets(fieldIdx)
override def loadField(offset: Long, fieldIdx: Int): Long = {
val off = fieldOffset(offset, fieldIdx)
types(fieldIdx).unstagedLoadFromNested(off)
}
override def loadField(offset: Code[Long], fieldIdx: Int): Code[Long] = loadField(fieldOffset(offset, fieldIdx), types(fieldIdx))
private def loadField(fieldOffset: Code[Long], fieldType: PType): Code[Long] = {
fieldType.loadFromNested(fieldOffset)
}
def deepPointerCopy(cb: EmitCodeBuilder, region: Value[Region], dstStructAddress: Code[Long]): Unit = {
val dstAddr = cb.newLocal[Long]("pcbs_dpcopy_dst", dstStructAddress)
fields.foreach { f =>
val dstFieldType = f.typ
if (dstFieldType.containsPointers) {
cb.ifx(isFieldDefined(cb, dstAddr, f.index),
{
val fieldAddr = cb.newLocal[Long]("pcbs_dpcopy_field", fieldOffset(dstAddr, f.index))
dstFieldType.storeAtAddress(cb, fieldAddr, region, dstFieldType.loadCheapSCode(cb, dstFieldType.loadFromNested(fieldAddr)), deepCopy = true)
})
}
}
}
def deepPointerCopy(region: Region, dstStructAddress: Long) {
var i = 0
while (i < this.size) {
val dstFieldType = this.fields(i).typ
if (dstFieldType.containsPointers && this.isFieldDefined(dstStructAddress, i)) {
val dstFieldAddress = this.fieldOffset(dstStructAddress, i)
val dstFieldAddressFromNested = dstFieldType.unstagedLoadFromNested(dstFieldAddress)
dstFieldType.unstagedStoreAtAddress(dstFieldAddress, region, dstFieldType, dstFieldAddressFromNested, true)
}
i += 1
}
}
override def _copyFromAddress(region: Region, srcPType: PType, srcAddress: Long, deepCopy: Boolean): Long = {
if (equalModuloRequired(srcPType) && !deepCopy)
return srcAddress
val newAddr = allocate(region)
unstagedStoreAtAddress(newAddr, region, srcPType.asInstanceOf[PBaseStruct], srcAddress, deepCopy)
newAddr
}
override def unstagedStoreAtAddress(addr: Long, region: Region, srcPType: PType, srcAddress: Long, deepCopy: Boolean): Unit = {
val srcStruct = srcPType.asInstanceOf[PBaseStruct]
if (equalModuloRequired(srcStruct)) {
Region.copyFrom(srcAddress, addr, byteSize)
if (deepCopy)
deepPointerCopy(region, addr)
} else {
initialize(addr, setMissing = true)
var idx = 0
while (idx < types.length) {
if (srcStruct.isFieldDefined(srcAddress, idx)) {
setFieldPresent(addr, idx)
types(idx).unstagedStoreAtAddress(
fieldOffset(addr, idx), region, srcStruct.types(idx), srcStruct.loadField(srcAddress, idx), deepCopy)
} else
assert(!fieldRequired(idx))
idx += 1
}
}
}
override def sType: SBaseStructPointer = SBaseStructPointer(setRequired(false).asInstanceOf[PCanonicalBaseStruct])
override def loadCheapSCode(cb: EmitCodeBuilder, addr: Code[Long]): SBaseStructPointerValue =
new SBaseStructPointerValue(sType, cb.memoize(addr))
override def store(cb: EmitCodeBuilder, region: Value[Region], value: SValue, deepCopy: Boolean): Value[Long] = {
value.st match {
case SBaseStructPointer(t) if t.equalModuloRequired(this) && !deepCopy =>
value.asInstanceOf[SBaseStructPointerValue].a
case _ =>
val newAddr = cb.memoize(allocate(region))
storeAtAddress(cb, newAddr, region, value, deepCopy)
newAddr
}
}
override def storeAtAddress(cb: EmitCodeBuilder, addr: Code[Long], region: Value[Region], value: SValue, deepCopy: Boolean): Unit = {
value.st match {
case SBaseStructPointer(t) if t.equalModuloRequired(this) =>
val pcs = value.asInstanceOf[SBaseStructPointerValue]
val addrVar = cb.newLocal[Long]("pcbasestruct_store_dest_addr1", addr)
cb += Region.copyFrom(pcs.a, addrVar, byteSize)
if (deepCopy)
deepPointerCopy(cb, region, addrVar)
case _ =>
val addrVar = cb.newLocal[Long]("pcbasestruct_store_dest_addr2", addr)
val pcs = value.asBaseStruct
stagedInitialize(cb, addrVar, setMissing = false)
fields.foreach { f =>
pcs.loadField(cb, f.index)
.consume(cb,
{
setFieldMissing(cb, addrVar, f.index)
},
{ sv =>
f.typ.storeAtAddress(cb, fieldOffset(addrVar, f.index), region, sv, deepCopy)
})
}
}
}
def constructFromFields(cb: EmitCodeBuilder, region: Value[Region], emitFields: IndexedSeq[EmitCode], deepCopy: Boolean): SBaseStructPointerValue = {
require(emitFields.length == size)
val addr = cb.newLocal[Long]("pcbs_construct_fields", allocate(region))
stagedInitialize(cb, addr, setMissing = false)
emitFields.zipWithIndex.foreach { case (ev, i) =>
ev.toI(cb)
.consume(cb,
setFieldMissing(cb, addr, i),
{ sc =>
types(i).storeAtAddress(cb, fieldOffset(addr, i), region, sc, deepCopy = deepCopy)
}
)
}
new SBaseStructPointerValue(sType, addr)
}
override def unstagedStoreJavaObject(annotation: Annotation, region: Region): Long = {
val addr = allocate(region)
unstagedStoreJavaObjectAtAddress(addr, annotation, region)
addr
}
override def unstagedStoreJavaObjectAtAddress(addr: Long, annotation: Annotation, region: Region): Unit = {
initialize(addr)
val row = annotation.asInstanceOf[Row]
row match {
case ur: UnsafeRow => {
this.unstagedStoreAtAddress(addr, region, ur.t, ur.offset, region.ne(ur.region))
}
case sr: Row => {
this.types.zipWithIndex.foreach { case (fieldPt, fieldIdx) =>
if (row(fieldIdx) == null) {
setFieldMissing(addr, fieldIdx)
}
else {
val fieldAddress = fieldOffset(addr, fieldIdx)
fieldPt.unstagedStoreJavaObjectAtAddress(fieldAddress, row(fieldIdx), region)
}
}
}
}
}
override def loadFromNested(addr: Code[Long]): Code[Long] = addr
override def unstagedLoadFromNested(addr: Long): Long = addr
}
| hail-is/hail | hail/src/main/scala/is/hail/types/physical/PCanonicalBaseStruct.scala | Scala | mit | 9,344 |
import java.io.{BufferedReader, FileReader}
class Assembler {
type ICode = Vector[(Command, Int)]
type HackMachineCode = Vector[String]
private val st = SymbolTable()
private val codeGenerator = CodeGenerator()
def assemble(asmFile: String): HackMachineCode = secondPass(firstPass(asmFile))
private def firstPass(asmFile: String): ICode = {
val parser = Parser(new BufferedReader(new FileReader(asmFile)))
var intermediateCode = Vector.empty[(Command, Int)]
var romCounter = 0
while (parser.hasMoreCommands) {
parser.advance
val command = parser.commandType
command match {
case LCommand(sym) =>
st(sym) = Utils.to16bitBinaryString(romCounter)
case Comment() => ()
case _ => {
intermediateCode :+= (command, romCounter)
romCounter += 1
}
}
}
intermediateCode foreach println
intermediateCode
}
private def secondPass(iCode: ICode): HackMachineCode = {
var nextAvailRamAddr = 16
for {
(command, _) <- iCode
}
yield {
command match {
case ACommand(data) => data match {
case Right(dec) => Utils.to16bitBinaryString(dec)
case Left(sym) => {
if (!st.contains(sym)) {
st(sym) = Utils.to16bitBinaryString(nextAvailRamAddr)
nextAvailRamAddr += 1
}
st(sym).get
}
}
case CCommand(dest, comp, jump) => {
val abit = {
if (comp.contains("M")) "1"
else "0"
}
"111" + abit + codeGenerator.comp(comp) + codeGenerator.dest(dest.getOrElse("")) + codeGenerator.jump(jump.getOrElse(""))
}
}
}
}
}
object Assembler {
def apply(): Assembler = new Assembler()
}
object Main extends App {
val asm = Assembler()
val code = asm.assemble("/Users/mlukovic/Documents/nand2tetris/projects/04/mult/Mult.asm")
code foreach(println)
}
| matija94/show-me-the-code | nand2tetrisAssembler/src/main/scala/Assembler.scala | Scala | mit | 2,011 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.transactor
import forms.TransactorEmailAddressForm
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import views.VatRegViewSpec
import views.html.transactor.TransactorCaptureEmailAddress
class TransactorCaptureEmailAddressViewSpec extends VatRegViewSpec {
val title = "What is your email address?"
val heading = "What is your email address?"
val paragraph = "We will send you a confirmation code"
val buttonText = "Save and continue"
"Capture Email Address Page" should {
val form = TransactorEmailAddressForm.form
val view = app.injector.instanceOf[TransactorCaptureEmailAddress].apply(testCall, form)
implicit val doc: Document = Jsoup.parse(view.body)
"have the correct title" in new ViewSetup {
doc.title must include(title)
}
"have the correct heading" in new ViewSetup {
doc.heading mustBe Some(heading)
}
"have the correct paragraph" in new ViewSetup {
doc.para(1) mustBe Some(paragraph)
}
"have the correct continue button" in new ViewSetup {
doc.submitButton mustBe Some(buttonText)
}
}
}
| hmrc/vat-registration-frontend | test/views/transactor/TransactorCaptureEmailAddressViewSpec.scala | Scala | apache-2.0 | 1,714 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.apache.org/licenses/LICENSE-2.0
package org.ensime.api
import java.io.File
trait EnsimeTestData {
// duplicating utils to minimise dependencies
private def canon(s: String): RawFile = {
val file = new File(s)
val canonised = try file.getCanonicalFile
catch {
case t: Throwable => file.getAbsoluteFile
}
RawFile(canonised.toPath)
}
val typeInfo =
BasicTypeInfo("type1", DeclaredAs.Method, "FOO.type1", Nil, Nil, None, Nil)
val interfaceInfo = new InterfaceInfo(typeInfo, Some("DEF"))
val paramSectionInfo = new ParamSectionInfo(List(("ABC", typeInfo)), false)
val symFile = canon("/abc")
val symbolDesignations = SymbolDesignations(
symFile,
List(
SymbolDesignation(7, 9, ObjectSymbol),
SymbolDesignation(11, 22, TraitSymbol)
)
)
val symbolInfo = new SymbolInfo("name", "localName", None, typeInfo)
val implicitInfos = List(
ImplicitConversionInfo(5, 6, symbolInfo),
ImplicitParamInfo(7, 8, symbolInfo, List(symbolInfo, symbolInfo), true)
)
val batchSourceFile = "/abc"
val packageInfo = new PackageInfo("name", "fullName", Nil)
val refactorFailure = RefactorFailure(7, "message")
val file1 = canon("/abc/def")
val file2 = canon("/test/test/")
val file3 = canon("/foo/abc")
val file4 = canon("/foo/def")
val file5 = canon("/foo/hij")
val refactorDiffEffect =
new RefactorDiffEffect(9, RefactorType.AddImport, file2.file.toFile)
val sourcePos1 = new LineSourcePosition(file1, 57)
val sourcePos2 = new LineSourcePosition(file1, 59)
val sourcePos3 = new EmptySourcePosition()
val sourcePos4 = new OffsetSourcePosition(file1, 456)
val breakPoint1 = new Breakpoint(RawFile(file1.file), sourcePos1.line)
val breakPoint2 = new Breakpoint(RawFile(file1.file), sourcePos2.line)
val breakpointList = BreakpointList(List(breakPoint1), List(breakPoint2))
val debugStackLocal1 = DebugStackLocal(3, "name1", "summary1", "type1")
val debugStackLocal2 = DebugStackLocal(4, "name2", "summary2", "type2")
val debugStackFrame = DebugStackFrame(
7,
List(debugStackLocal1, debugStackLocal2),
4,
"class1",
"method1",
sourcePos1,
DebugObjectId(7)
)
val debugBacktrace =
DebugBacktrace(List(debugStackFrame), DebugThreadId(17), "thread1")
val analyzerFile = canon("Analyzer.scala")
val fooFile = canon("Foo.scala")
val abd = canon("/abd")
val methodSearchRes = MethodSearchResult("abc",
"a",
DeclaredAs.Method,
Some(LineSourcePosition(abd, 10)),
"ownerStr")
val typeSearchRes = TypeSearchResult("abc",
"a",
DeclaredAs.Trait,
Some(LineSourcePosition(abd, 10)))
val importSuggestions = new ImportSuggestions(
List(List(methodSearchRes, typeSearchRes))
)
val symbolSearchResults = new SymbolSearchResults(
List(methodSearchRes, typeSearchRes)
)
val completionInfoCList = CompletionInfoList("fooBar", List(completionInfo))
val fileRange = FileRange("/abc", 7, 9)
val debugLocObjectRef: DebugLocation = DebugObjectReference(57L)
val debugNullValue = DebugNullValue("typeNameStr")
val debugArrayInstValue =
DebugArrayInstance(3, "typeName", "elementType", DebugObjectId(5L))
val debugPrimitiveValue = DebugPrimitiveValue("summaryStr", "typeNameStr")
val debugClassField =
DebugClassField(19, "nameStr", "typeNameStr", "summaryStr")
val debugStringValue = DebugStringInstance("summaryStr",
List(debugClassField),
"typeNameStr",
DebugObjectId(6L))
val note1 = new Note("file1", "note1", NoteError, 23, 33, 19, 8)
val note2 = new Note("file1", "note2", NoteWarn, 23, 33, 19, 8)
val noteList = NewScalaNotesEvent(isFull = true, List(note1, note2))
val entityInfo: TypeInfo = new ArrowTypeInfo("Arrow1",
"example.Arrow1",
typeInfo,
List(paramSectionInfo),
Nil)
val typeParamA =
BasicTypeInfo("A", DeclaredAs.Nil, "example.Arrow1.A", Nil, Nil, None, Nil)
val typeParamB =
BasicTypeInfo("B", DeclaredAs.Nil, "example.Arrow1.B", Nil, Nil, None, Nil)
val entityInfoTypeParams: TypeInfo = new ArrowTypeInfo(
"Arrow1",
"example.Arrow1",
typeInfo,
List(paramSectionInfo),
List(typeParamA, typeParamB)
)
val completionInfo = CompletionInfo(Some(typeInfo), "name", 90, Some("BAZ"))
val completionInfo2 = CompletionInfo(None, "nam", 91, None, true)
val completionInfoList = List(completionInfo, completionInfo2)
val sourceFileInfo =
SourceFileInfo(file1, Some("{/* code here */}"), Some(file2.file.toFile))
val sourceFileInfo2 = SourceFileInfo(file1)
val dtid = DebugThreadId(13)
val debugLocationArray = DebugArrayElement(DebugObjectId(13), 14)
val structureView = StructureView(
List(
StructureViewMember(
keyword = "class",
name = "StructureView",
position = sourcePos1,
members = Nil
),
StructureViewMember(
keyword = "object",
name = "StructureView",
position = sourcePos2,
members = List(
StructureViewMember(
keyword = "type",
name = "BasicType",
position = sourcePos4,
members = Nil
)
)
)
)
)
val classInfo =
ClassInfo(Some("def.foo"), "def$foo", DeclaredAs.Class, Some(sourcePos2))
val classInfo2 = ClassInfo(None, "java.lang.object", DeclaredAs.Class, None)
val hierarchyInfo = HierarchyInfo(classInfo2 :: Nil, classInfo :: Nil)
}
| yyadavalli/ensime-server | api/src/test/scala/org/ensime/api/EnsimeTestData.scala | Scala | gpl-3.0 | 6,130 |
package demo
import scalanative.native._, stdlib._, stdio._
import java.lang.Math.{PI, sin, cos, abs, pow, sqrt}
@struct
class Vec(val x: Double = 0, val y: Double = 0, val z: Double = 0) {
@inline def +(v: Vec) = new Vec(x + v.x, y + v.y, z + v.z)
@inline def -(v: Vec) = new Vec(x - v.x, y - v.y, z - v.z)
@inline def *(v: Double) = new Vec(x * v, y * v, z * v)
@inline def mult(v: Vec) = new Vec(x * v.x, y * v.y, z * v.z)
@inline def norm() = this * (1d/sqrt(x*x + y*y + z*z))
@inline def dot(v: Vec) = x * v.x + y * v.y + z * v.z
@inline def %(v: Vec) = new Vec(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x)
}
@struct
class Ray(val o: Vec, val d: Vec)
object Refl {
type Type = Int
final val DIFF: Refl.Type = 1
final val SPEC: Refl.Type = 2
final val REFR: Refl.Type = 3
}
@struct
class Sphere(val rad: Double, val p: Vec, val e: Vec,
val c: Vec, val refl: Refl.Type) {
def intersect(r: Ray): Double = {
val op = p - r.o
var t = 0.0d
val eps = 1e-4d
val b = op.dot(r.d)
var det = b * b - op.dot(op) + rad * rad
if (det < 0) return 0
else det = sqrt(det)
t = b - det
if (t > eps) t
else {
t = b + det
if (t > eps) t
else 0
}
}
}
@extern object Erand48 {
def erand48(xsubi: Ptr[Short]): Double = extern
}
import Erand48._
object Main {
final val SPHERES = 9
val spheres = malloc(sizeof[Sphere] * SPHERES).cast[Ptr[Sphere]]
spheres(0) = new Sphere(1e5, new Vec( 1e5+1,40.8,81.6), new Vec(), new Vec(.75,.25,.25), Refl.DIFF)
spheres(1) = new Sphere(1e5, new Vec(-1e5+99,40.8,81.6), new Vec(), new Vec(.25,.25,.75), Refl.DIFF)
spheres(2) = new Sphere(1e5, new Vec(50,40.8, 1e5), new Vec(), new Vec(.75,.75,.75), Refl.DIFF)
spheres(3) = new Sphere(1e5, new Vec(50,40.8,-1e5+170), new Vec(), new Vec(), Refl.DIFF)
spheres(4) = new Sphere(1e5, new Vec(50, 1e5, 81.6), new Vec(), new Vec(.75,.75,.75), Refl.DIFF)
spheres(5) = new Sphere(1e5, new Vec(50,-1e5+81.6,81.6), new Vec(), new Vec(.75,.75,.75), Refl.DIFF)
spheres(6) = new Sphere(16.5, new Vec(27,16.5,47), new Vec(), new Vec(1,1,1)*.999, Refl.SPEC)
spheres(7) = new Sphere(16.5, new Vec(73,16.5,78), new Vec(), new Vec(1,1,1)*.999, Refl.REFR)
spheres(8) = new Sphere(600, new Vec(50,681.6-.27,81.6), new Vec(12,12,12), new Vec(), Refl.DIFF)
@inline def clamp(x: Double): Double =
if (x < 0) 0
else if (x > 1) 1
else x
@inline def toInt(x: Double): Int =
(pow(clamp(x), 1/2.2) * 255 + .5).toInt
final val inf = 1e20
@inline def intersect(r: Ray, t: Ptr[Double], id: Ptr[Int]): Boolean = {
!t = inf
var d = 0.0d
var i = SPHERES
while (i != 0) {
i -= 1
d = spheres(i).intersect(r)
if ((d != 0) && d < !t) {
!t = d
!id = i
}
}
return !t < inf
}
def radiance(r: Ray, _depth: Int, Xi: Ptr[Short]): Vec = {
var depth = _depth
val t = stackalloc[Double]
val id = stackalloc[Int]
!id = 0
if (!intersect(r, t, id)) return new Vec()
val obj = spheres(!id)
val x = r.o + r.d * !t
val n = (x - obj.p).norm
val nl = if (n.dot(r.d) < 0) n else n * -1
var f = obj.c
val p =
if (f.x > f.y && f.x > f.z) f.x
else if (f.y > f.z) f.y
else f.z
depth += 1
if (depth > 5) {
if (erand48(Xi) < p) f = f * (1/ p)
else return obj.e
}
if (obj.refl == Refl.DIFF) {
val r1 = 2 * PI * erand48(Xi)
val r2 = erand48(Xi)
val r2s = sqrt(r2)
val w = nl
val u = ((if (abs(w.x) > .1) new Vec(0, 1) else new Vec(1)) % w).norm()
val v = w % u
val d = (u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm()
return obj.e + f.mult(radiance(new Ray(x, d), depth, Xi))
} else if (obj.refl == Refl.SPEC) {
return obj.e + f.mult(radiance(new Ray(x, r.d - n * 2 * n.dot(r.d)), depth, Xi))
}
val reflRay = new Ray(x, r.d - n * 2 * n.dot(r.d))
val into = n.dot(nl) > 0
val nc = 1d
val nt = 1.5d
val nnt = if (into) nc/nt else nt/nc
val ddn = r.d.dot(nl)
val cos2t = 1 - nnt * nnt * (1 - ddn * ddn)
if (cos2t < 0)
return obj.e + f.mult(radiance(reflRay, depth, Xi))
val tdir = (r.d*nnt - n*((if (into) 1 else -1)*(ddn*nnt+sqrt(cos2t)))).norm();
val a = nt - nc
val b = nt + nc
val R0 = (a * a) / (b * b)
val c = 1 - (if (into) -ddn else tdir.dot(n))
val Re = R0 + (1 - R0) * c * c * c * c * c
val Tr = 1 - Re
val P = .25d + .5d * Re
val RP = Re/P
val TP = Tr/(1 - P)
return obj.e + f.mult(
if (depth > 2)
(if (erand48(Xi) < P) radiance(reflRay, depth, Xi)*RP
else radiance(new Ray(x, tdir), depth, Xi)*TP)
else
radiance(reflRay, depth, Xi) * Re + radiance(new Ray(x, tdir), depth, Xi) * Tr
)
}
final val W = 800
final val H = 600
final val SAMPLES = 2
def main(args: Array[String]): Unit = {
val cam = new Ray(new Vec(50d, 52d, 295.6),
new Vec(0d,-0.042612d,-1d).norm())
val cx = new Vec(W * .5135d/H)
val cy = (cx % cam.d).norm() * .5135d
var r = new Vec()
val c = malloc(sizeof[Vec] * W * H).cast[Ptr[Vec]]
val Xi = malloc(sizeof[Short] * 3).cast[Ptr[Short]]
var y = 0
while (y < H) {
fprintf(stderr, c"\\rRendering (%d spp) %5.2f%%", SAMPLES * 4, 100.0 * y/(H-1))
Xi(0) = 0.toShort
Xi(1) = 0.toShort
Xi(2) = (y * y * y).toShort
var x = 0
while (x < W) {
val i = (H - y - 1) * W + x
var sy = 0
while (sy < 2) {
var sx = 0
while (sx < 2) {
var s = 0
while (s < SAMPLES) {
val r1 = 2 * erand48(Xi)
val r2 = 2 * erand48(Xi)
val dx = if (r1 < 1d) sqrt(r1) - 1d else 1d - sqrt(2d - r1)
val dy = if (r2 < 1d) sqrt(r2) - 1d else 1d - sqrt(2d - r2)
val d = cx * (((sx + .5d + dx)/2d + x)/W - .5d) +
cy * (((sy + .5d + dy)/2d + y)/H - .5d) + cam.d
r = r + radiance(new Ray(cam.o+d*140, d.norm()), 0, Xi) * (1.0d/SAMPLES)
s += 1
}
c(i) = c(i) + new Vec(clamp(r.x), clamp(r.y), clamp(r.z)) * .25d
r = new Vec()
sx += 1
}
sy += 1
}
x += 1
}
y += 1
}
val f = fopen(c"image0.ppm", c"w")
fprintf(f, c"P3\\n%d %d\\n%d\\n", W, H, 255)
var i = 0
while (i < W * H) {
fprintf(f, c"%d %d %d ", toInt(c(i).x), toInt(c(i).y), toInt(c(i).z))
i += 1
}
}
}
| phdoerfler/scala-native | demo/native/smallpt.scala | Scala | bsd-3-clause | 6,624 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.sumobot.plugins.advice
import akka.actor.{ActorSystem, Props}
import com.sumologic.sumobot.plugins.BotPlugin.InitializePlugin
import com.sumologic.sumobot.test.annotated.BotPluginTestKit
import scala.concurrent.duration._
class AdviceTest extends BotPluginTestKit(ActorSystem("AdviceTest")) {
val adviceRef = system.actorOf(Props[Advice], "advice")
adviceRef ! InitializePlugin(null, null, null)
"advice" should {
"match regexes" in {
"what should I do about beer" should fullyMatch regex Advice.AdviceAbout
"what should I do about beer and chips" should fullyMatch regex Advice.AdviceAbout
"what do you think about beer and chips" should fullyMatch regex Advice.AdviceAbout
"how do you handle cocktails" should fullyMatch regex Advice.AdviceAbout
}
"retrieve advice" in {
adviceRef ! instantMessage("I need some advice")
confirmOutgoingMessage({
msg =>
println(s"ADVICE: ${msg.text}")
msg.text should not include("No advice")
}, 5.seconds)
}
}
}
| SumoLogic/sumobot | src/test/scala/com/sumologic/sumobot/plugins/advice/AdviceTest.scala | Scala | apache-2.0 | 1,877 |
package abeel.genometools.sort
import java.io.File
import abeel.genometools.Main
import atk.util.NaturalOrderComparator
import net.sf.jannot.source.FileSource
import net.sf.jannot.parser.FastaParser
import scala.collection.JavaConversions._
import java.io.PrintWriter
object MFA extends Main {
override val description = """Tool to sort mfa files"""
case class Config(val inputFile: File = null, val outputFile: File = null, val n:Int=80)
override def main(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[Config]("java -jar genometools.jar sort_mfa") {
opt[File]('i', "input") required () action { (x, c) => c.copy(inputFile = x) } text ("Input mfa formatted file.")
opt[File]('o', "output") required () action { (x, c) => c.copy(outputFile = x) } text ("Output mfa formatted file with entries sorted in alphanumerci order.")
opt[Int]('n', "n") action { (x, c) => c.copy(n = x) } text ("Characters per line in mfa")
}
parser.parse(args, Config()) map { config =>
assume(config.inputFile != null)
assume(config.outputFile != null)
FastaParser.forceEntries=true
val es = new FileSource(config.inputFile).read()
val map = es.map{e =>
(e.getID() -> e.sequence().stringRepresentation())}.toMap
// val mfa=tLines(config.inputFile)
// val map=mfa.grouped(2).map(f=>f(0)->f(1)).toMap
val pw = new PrintWriter(config.outputFile)
val sorted = map.keySet.toList.sorted(naturalOrdering)
sorted.map { k =>
pw.println(">" + k)
pw.println(map(k).grouped(config.n).mkString("\n"))
}
pw.close
}
}
} | AbeelLab/genometools | genomeview/genomeview/genometools/MFA.scala | Scala | gpl-3.0 | 1,690 |
import sbt._
import sbt.Keys._
import com.teambytes.sbt.dynamodb.DynamoDBLocal
import com.teambytes.sbt.dynamodb.DynamoDBLocal.Keys._
object localdynamodb {
val settings: Seq[Setting[_]] = DynamoDBLocal.settings ++ Seq(
dynamoDBLocalDownloadDirectory := file("dynamodb-local"),
test in Test <<= (test in Test).dependsOn(startDynamoDBLocal),
dynamoDBLocalInMemory := true,
// Plugin currently downloads "latest" every single time it runs, unless you specify a version.
// We need this PR merged: https://github.com/grahamar/sbt-dynamodb/pull/4
dynamoDBLocalVersion := "2015-07-16_1.0"
)
}
| cjwebb/dynamo-mapper | project/localdynamodb.scala | Scala | apache-2.0 | 620 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.examples.control
import java.util.concurrent.TimeoutException
import wvlet.airframe.control.Retry
import wvlet.log.LogSupport
/**
*/
object Control_03_CustomRetry extends App with LogSupport {
val withRetry =
Retry
.withJitter()
.retryOn {
case e: IllegalArgumentException =>
Retry.nonRetryableFailure(e)
case e: TimeoutException =>
Retry
.retryableFailure(e)
// Add extra wait millis
.withExtraWaitMillis(50)
}
withRetry.run {
debug("Hello Retry!")
}
withRetry.run {
debug("Retryer can be reused for other runs")
}
}
| wvlet/airframe | examples/src/main/scala/wvlet/airframe/examples/control/Control_03_CustomRetry.scala | Scala | apache-2.0 | 1,221 |
package com.twitter.finagle.liveness
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.util.Future
import org.scalatest.FunSuite
class FailureDetectorTest extends FunSuite
{
def ping = () => Future.Done
val statsReceiver = NullStatsReceiver
test("default settings with flag override") {
sessionFailureDetector.let("threshold") {
val FailureDetector.Param(failDetectorConfig) = FailureDetector.Param.param.default
assert(FailureDetector(failDetectorConfig, ping, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
test("flag settings with flag set to none") {
sessionFailureDetector.let("none") {
assert(NullFailureDetector == FailureDetector(FailureDetector.GlobalFlagConfig, ping,statsReceiver))
}
}
test("flag settings with invalid string") {
sessionFailureDetector.let("tacos") {
assert(NullFailureDetector == FailureDetector(FailureDetector.GlobalFlagConfig, ping, statsReceiver))
}
}
test("flag settings with valid string") {
sessionFailureDetector.let("threshold") {
assert(FailureDetector(FailureDetector.GlobalFlagConfig, ping, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
test("request null gets null") {
assert(NullFailureDetector == FailureDetector(FailureDetector.NullConfig, ping, statsReceiver))
}
test("explicit threshold used") {
assert(FailureDetector(FailureDetector.ThresholdConfig(), ping, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
| koshelev/finagle | finagle-core/src/test/scala/com/twitter/finagle/liveness/FailureDetectorTest.scala | Scala | apache-2.0 | 1,517 |
/**
* Copyright (C) 2016 Nicola Justus <nicola.justus@mni.thm.de>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package de.thm.move.views.shapes
import javafx.beans.property.SimpleDoubleProperty
import javafx.geometry.Bounds
import javafx.scene.shape.Rectangle
import de.thm.move.implicits.FxHandlerImplicits._
/** A rectangle that highlights the underlying shape by drawing a dotted-line around the underlying shape. */
class SelectionRectangle(selectedShape:ResizableShape) extends Rectangle {
import SelectionRectangle._
/* If underlying element (selectedShape) gets adjusted,
* adjust the selection too.
*/
val xProp = new SimpleDoubleProperty(selectedShape.getBoundsInLocal().getMinX)
val yProp = new SimpleDoubleProperty(selectedShape.getBoundsInLocal().getMinY)
val widthProp = new SimpleDoubleProperty(selectedShape.getBoundsInLocal().getWidth)
val heightProp = new SimpleDoubleProperty(selectedShape.getBoundsInLocal().getHeight)
selectedShape.boundsInParentProperty().addListener { (_:Bounds, newBounds:Bounds) =>
xProp.set( newBounds.getMinX )
yProp.set( newBounds.getMinY )
widthProp.set( newBounds.getWidth )
heightProp.set( newBounds.getHeight )
}
this.getStyleClass.addAll("selection-rectangle")
this.xProperty().bind(xProp.subtract(distanceToShape))
this.yProperty().bind(yProp.subtract(distanceToShape))
this.widthProperty().bind(widthProp.add(distanceToShape).add(aditionalSpace))
this.heightProperty().bind(heightProp.add(distanceToShape).add(aditionalSpace))
}
object SelectionRectangle {
val distanceToShape = 5
//needed for width, height because of some strange boundsInLocal behaviour
val aditionalSpace = 6
}
| THM-MoTE/MoVE | src/main/scala/de/thm/move/views/shapes/SelectionRectangle.scala | Scala | mpl-2.0 | 1,861 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.content
import android.content.Context
import com.waz.log.BasicLogging.LogTag
import com.waz.model.ConversationMemberData.ConversationMemberDataDao
import com.waz.model._
import com.waz.threading.SerialDispatchQueue
import com.waz.utils.TrimmingLruCache.Fixed
import com.waz.utils.events.{AggregatingSignal, Signal}
import com.waz.utils.{CachedStorage, CachedStorageImpl, TrimmingLruCache}
import scala.concurrent.Future
trait MembersStorage extends CachedStorage[(UserId, ConvId), ConversationMemberData] {
def getByConv(conv: ConvId): Future[IndexedSeq[ConversationMemberData]]
def getByConvs(conv: Set[ConvId]): Future[IndexedSeq[ConversationMemberData]]
def add(conv: ConvId, users: Iterable[UserId]): Future[Set[ConversationMemberData]]
def add(conv: ConvId, user: UserId): Future[Option[ConversationMemberData]]
def isActiveMember(conv: ConvId, user: UserId): Future[Boolean]
def remove(conv: ConvId, users: Iterable[UserId]): Future[Set[ConversationMemberData]]
def remove(conv: ConvId, user: UserId): Future[Option[ConversationMemberData]]
def getByUsers(users: Set[UserId]): Future[IndexedSeq[ConversationMemberData]]
def getActiveUsers(conv: ConvId): Future[Seq[UserId]]
def getActiveUsers2(conv: Set[ConvId]): Future[Map[ConvId, Set[UserId]]]
def getActiveConvs(user: UserId): Future[Seq[ConvId]]
def activeMembers(conv: ConvId): Signal[Set[UserId]]
def set(conv: ConvId, users: Set[UserId]): Future[Unit]
def setAll(members: Map[ConvId, Set[UserId]]): Future[Unit]
def addAll(members: Map[ConvId, Set[UserId]]): Future[Unit]
def delete(conv: ConvId): Future[Unit]
}
class MembersStorageImpl(context: Context, storage: ZmsDatabase)
extends CachedStorageImpl[(UserId, ConvId), ConversationMemberData](new TrimmingLruCache(context, Fixed(1024)), storage)(ConversationMemberDataDao, LogTag("MembersStorage_Cached"))
with MembersStorage {
private implicit val dispatcher = new SerialDispatchQueue(name = "MembersStorage")
def getByConv(conv: ConvId) = find(_.convId == conv, ConversationMemberDataDao.findForConv(conv)(_), identity)
def getByUser(user: UserId) = find(_.userId == user, ConversationMemberDataDao.findForUser(user)(_), identity)
def activeMembers(conv: ConvId): Signal[Set[UserId]] =
new AggregatingSignal[Seq[(UserId, Boolean)], Set[UserId]](onConvMemberChanged(conv),
getActiveUsers(conv).map(_.toSet), { (current, changes) =>
val (active, inactive) = changes.partition(_._2)
current -- inactive.map(_._1) ++ active.map(_._1)
})
private def onConvMemberChanged(conv: ConvId) = onAdded.map(_.filter(_.convId == conv).map(_.userId -> true)).union(onDeleted.map(_.filter(_._2 == conv).map(_._1 -> false)))
override def getActiveUsers(conv: ConvId) = getByConv(conv) map { _.map(_.userId) }
override def getActiveConvs(user: UserId) = getByUser(user) map { _.map(_.convId) }
override def getActiveUsers2(convs: Set[ConvId]): Future[Map[ConvId, Set[UserId]]] =
getByConvs(convs).map(_.groupBy(_.convId).map {
case (cId, members) => cId -> members.map(_.userId).toSet
})
def add(conv: ConvId, users: Iterable[UserId]) =
updateOrCreateAll2(users.map((_, conv)), { (k, v) =>
v match {
case Some(m) => m
case None => ConversationMemberData(k._1, conv)
}
})
def add(conv: ConvId, user: UserId) =
add(conv, Set(user)).map(_.headOption)
override def remove(conv: ConvId, users: Iterable[UserId]) = {
getAll(users.map(_ -> conv)).flatMap(toBeRemoved => removeAll(users.map(_ -> conv)).map(_ => toBeRemoved.flatten.toSet))
}
override def remove(conv: ConvId, user: UserId) =
remove(conv, Set(user)).map(_.headOption)
def set(conv: ConvId, users: Set[UserId]): Future[Unit] = getActiveUsers(conv) flatMap { active =>
val toRemove = active.filterNot(users)
val toAdd = users -- toRemove
remove(conv, toRemove).zip(add(conv, toAdd)).map(_ => ())
}
def setAll(members: Map[ConvId, Set[UserId]]): Future[Unit] = getActiveUsers2(members.keySet).flatMap { active =>
val toRemove = active.map {
case (convId, users) => convId -> active.get(convId).map(_.filterNot(users)).getOrElse(Set())
}
val toAdd = members.map {
case (convId, users) => convId -> (users -- toRemove.getOrElse(convId, Set()))
}
val removeList = toRemove.toSeq.flatMap {
case (convId, users) => users.map((_, convId))
}
val addList = toAdd.flatMap {
case (convId, users) => users.map(ConversationMemberData(_, convId))
}
removeAll(removeList).zip(insertAll(addList)).map(_ => ())
}
def addAll(members: Map[ConvId, Set[UserId]]): Future[Unit] = {
val addList =
members.flatMap { case (convId, users) => users.map(ConversationMemberData(_, convId)) }
insertAll(addList).map(_ => ())
}
override def isActiveMember(conv: ConvId, user: UserId) = get(user -> conv).map(_.nonEmpty)
def delete(conv: ConvId) = getByConv(conv) flatMap { users => removeAll(users.map(_.userId -> conv)) }
override def getByUsers(users: Set[UserId]) = find(mem => users.contains(mem.userId), ConversationMemberDataDao.findForUsers(users)(_), identity)
override def getByConvs(convs: Set[ConvId]) = find(mem => convs.contains(mem.convId), ConversationMemberDataDao.findForConvs(convs)(_), identity)
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/content/MembersStorage.scala | Scala | gpl-3.0 | 6,102 |
package ujson
import upickle.core.Visitor
trait Transformer[I] {
def transform[T](j: I, f: Visitor[_, T]): T
def transformable[T](j: I) = Readable.fromTransformer(j, this)
}
| lihaoyi/upickle-pprint | ujson/src/ujson/Transformer.scala | Scala | mit | 179 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.net.InetAddress
import java.nio.file.Files
import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.stream.IntStream
import java.util.{Collections, Optional, Properties}
import kafka.api._
import kafka.cluster.{BrokerEndPoint, Partition}
import kafka.log._
import kafka.server.QuotaFactory.{QuotaManagers, UnboundedQuota}
import kafka.server.checkpoints.{LazyOffsetCheckpoints, OffsetCheckpointFile}
import kafka.server.epoch.util.ReplicaFetcherMockBlockingSend
import kafka.utils.timer.MockTimer
import kafka.utils.{MockScheduler, MockTime, TestUtils}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.LeaderAndIsrRequestData
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState
import org.apache.kafka.common.metadata.{PartitionRecord, RemoveTopicRecord, TopicRecord}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record._
import org.apache.kafka.common.replica.ClientMetadata
import org.apache.kafka.common.replica.ClientMetadata.DefaultClientMetadata
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.common.{IsolationLevel, Node, TopicPartition, Uuid}
import org.apache.kafka.image.{ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, TopicImage, TopicsDelta, TopicsImage }
import org.apache.kafka.metadata.{PartitionRegistration, Replicas}
import org.easymock.EasyMock
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.mockito.{ArgumentMatchers, Mockito}
import scala.collection.{Map, Seq, mutable}
import scala.jdk.CollectionConverters._
class ReplicaManagerTest {
val topic = "test-topic"
val topicId = Uuid.randomUuid()
val topicIds = scala.Predef.Map("test-topic" -> topicId)
val topicNames = scala.Predef.Map(topicId -> "test-topic")
val time = new MockTime
val scheduler = new MockScheduler(time)
val metrics = new Metrics
var alterIsrManager: AlterIsrManager = _
var config: KafkaConfig = _
var quotaManager: QuotaManagers = _
// Constants defined for readability
val zkVersion = 0
val correlationId = 0
var controllerEpoch = 0
val brokerEpoch = 0L
@BeforeEach
def setUp(): Unit = {
val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect)
config = KafkaConfig.fromProps(props)
alterIsrManager = EasyMock.createMock(classOf[AlterIsrManager])
quotaManager = QuotaFactory.instantiate(config, metrics, time, "")
}
@AfterEach
def tearDown(): Unit = {
TestUtils.clearYammerMetrics()
Option(quotaManager).foreach(_.shutdown())
metrics.close()
}
@Test
def testHighWaterMarkDirectoryMapping(): Unit = {
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)))
val rm = new ReplicaManager(config, metrics, time, None, new MockScheduler(time), mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
MetadataCache.zkMetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size), alterIsrManager)
try {
val partition = rm.createPartition(new TopicPartition(topic, 1))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints), None)
rm.checkpointHighWatermarks()
} finally {
// shutdown the replica manager upon test completion
rm.shutdown(false)
}
}
@Test
def testHighwaterMarkRelativeDirectoryMapping(): Unit = {
val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect)
props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath)
val config = KafkaConfig.fromProps(props)
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)))
val rm = new ReplicaManager(config, metrics, time, None, new MockScheduler(time), mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
MetadataCache.zkMetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size), alterIsrManager)
try {
val partition = rm.createPartition(new TopicPartition(topic, 1))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints), None)
rm.checkpointHighWatermarks()
} finally {
// shutdown the replica manager upon test completion
rm.shutdown(checkpointHW = false)
}
}
@Test
def testIllegalRequiredAcks(): Unit = {
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)))
val rm = new ReplicaManager(config, metrics, time, None, new MockScheduler(time), mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
MetadataCache.zkMetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size), alterIsrManager, Option(this.getClass.getName))
try {
def callback(responseStatus: Map[TopicPartition, PartitionResponse]) = {
assert(responseStatus.values.head.error == Errors.INVALID_REQUIRED_ACKS)
}
rm.appendRecords(
timeout = 0,
requiredAcks = 3,
internalTopicsAllowed = false,
origin = AppendOrigin.Client,
entriesPerPartition = Map(new TopicPartition("test1", 0) -> MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord("first message".getBytes))),
responseCallback = callback)
} finally {
rm.shutdown(checkpointHW = false)
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
private def mockGetAliveBrokerFunctions(cache: MetadataCache, aliveBrokers: Seq[Node]): Unit = {
Mockito.when(cache.hasAliveBroker(ArgumentMatchers.anyInt())).thenAnswer(new Answer[Boolean]() {
override def answer(invocation: InvocationOnMock): Boolean = {
aliveBrokers.map(_.id()).contains(invocation.getArguments()(0).asInstanceOf[Int])
}
})
Mockito.when(cache.getAliveBrokerNode(ArgumentMatchers.anyInt(), ArgumentMatchers.any[ListenerName])).
thenAnswer(new Answer[Option[Node]]() {
override def answer(invocation: InvocationOnMock): Option[Node] = {
aliveBrokers.find(node => node.id == invocation.getArguments()(0).asInstanceOf[Integer])
}
})
Mockito.when(cache.getAliveBrokerNodes(ArgumentMatchers.any[ListenerName])).thenReturn(aliveBrokers)
}
@Test
def testClearPurgatoryOnBecomingFollower(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath)
val config = KafkaConfig.fromProps(props)
val logProps = new Properties()
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), LogConfig(logProps))
val aliveBrokers = Seq(new Node(0, "host0", 0), new Node(1, "host1", 1))
val metadataCache: MetadataCache = Mockito.mock(classOf[MetadataCache])
mockGetAliveBrokerFunctions(metadataCache, aliveBrokers)
val rm = new ReplicaManager(config, metrics, time, None, new MockScheduler(time), mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
metadataCache, new LogDirFailureChannel(config.logDirs.size), alterIsrManager)
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicIds = Collections.singletonMap(topic, Uuid.randomUuid())
val partition = rm.createPartition(new TopicPartition(topic, 0))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints), None)
// Make this replica the leader.
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ())
rm.getPartitionOrException(new TopicPartition(topic, 0))
.localLogOrException
val records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("first message".getBytes()))
val appendResult = appendRecords(rm, new TopicPartition(topic, 0), records).onFire { response =>
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, response.error)
}
// Make this replica the follower
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
rm.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ())
assertTrue(appendResult.isFired)
} finally {
rm.shutdown(checkpointHW = false)
}
}
@Test
def testFencedErrorCausedByBecomeLeader(): Unit = {
testFencedErrorCausedByBecomeLeader(0)
testFencedErrorCausedByBecomeLeader(1)
testFencedErrorCausedByBecomeLeader(10)
}
private[this] def testFencedErrorCausedByBecomeLeader(loopEpochChange: Int): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
replicaManager.createPartition(topicPartition)
.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints), None)
def leaderAndIsrRequest(epoch: Int): LeaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(epoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0), (_, _) => ())
val partition = replicaManager.getPartitionOrException(new TopicPartition(topic, 0))
assertEquals(1, replicaManager.logManager.liveLogDirs.filterNot(_ == partition.log.get.dir.getParentFile).size)
val previousReplicaFolder = partition.log.get.dir.getParentFile
// find the live and different folder
val newReplicaFolder = replicaManager.logManager.liveLogDirs.filterNot(_ == partition.log.get.dir.getParentFile).head
assertEquals(0, replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.size)
replicaManager.alterReplicaLogDirs(Map(topicPartition -> newReplicaFolder.getAbsolutePath))
// make sure the future log is created
replicaManager.futureLocalLogOrException(topicPartition)
assertEquals(1, replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.size)
(1 to loopEpochChange).foreach(epoch => replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(epoch), (_, _) => ()))
// wait for the ReplicaAlterLogDirsThread to complete
TestUtils.waitUntilTrue(() => {
replicaManager.replicaAlterLogDirsManager.shutdownIdleFetcherThreads()
replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.isEmpty
}, s"ReplicaAlterLogDirsThread should be gone")
// the fenced error should be recoverable
assertEquals(0, replicaManager.replicaAlterLogDirsManager.failedPartitions.size)
// the replica change is completed after retrying
assertTrue(partition.futureLog.isEmpty)
assertEquals(newReplicaFolder.getAbsolutePath, partition.log.get.dir.getParent)
// change the replica folder again
val response = replicaManager.alterReplicaLogDirs(Map(topicPartition -> previousReplicaFolder.getAbsolutePath))
assertNotEquals(0, response.size)
response.values.foreach(assertEquals(Errors.NONE, _))
// should succeed to invoke ReplicaAlterLogDirsThread again
assertEquals(1, replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.size)
} finally replicaManager.shutdown(checkpointHW = false)
}
@Test
def testReceiveOutOfOrderSequenceExceptionWithLogStartOffset(): Unit = {
val timer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(timer)
try {
val brokerList = Seq[Integer](0, 1).asJava
val partition = replicaManager.createPartition(new TopicPartition(topic, 0))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints), None)
// Make this replica the leader.
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
Collections.singletonMap(topic, Uuid.randomUuid()),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ())
replicaManager.getPartitionOrException(new TopicPartition(topic, 0))
.localLogOrException
val producerId = 234L
val epoch = 5.toShort
// write a few batches as part of a transaction
val numRecords = 3
for (sequence <- 0 until numRecords) {
val records = MemoryRecords.withIdempotentRecords(CompressionType.NONE, producerId, epoch, sequence,
new SimpleRecord(s"message $sequence".getBytes))
appendRecords(replicaManager, new TopicPartition(topic, 0), records).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
assertEquals(0, partition.logStartOffset)
// Append a record with an out of range sequence. We should get the OutOfOrderSequence error code with the log
// start offset set.
val outOfRangeSequence = numRecords + 10
val record = MemoryRecords.withIdempotentRecords(CompressionType.NONE, producerId, epoch, outOfRangeSequence,
new SimpleRecord(s"message: $outOfRangeSequence".getBytes))
appendRecords(replicaManager, new TopicPartition(topic, 0), record).onFire { response =>
assertEquals(Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, response.error)
assertEquals(0, response.logStartOffset)
}
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
@Test
def testReadCommittedFetchLimitedAtLSO(): Unit = {
val timer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(timer)
try {
val brokerList = Seq[Integer](0, 1).asJava
val partition = replicaManager.createPartition(new TopicPartition(topic, 0))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints), None)
// Make this replica the leader.
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ())
replicaManager.getPartitionOrException(new TopicPartition(topic, 0))
.localLogOrException
val producerId = 234L
val epoch = 5.toShort
// write a few batches as part of a transaction
val numRecords = 3
for (sequence <- 0 until numRecords) {
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, epoch, sequence,
new SimpleRecord(s"message $sequence".getBytes))
appendRecords(replicaManager, new TopicPartition(topic, 0), records).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
// fetch as follower to advance the high watermark
fetchAsFollower(replicaManager, new TopicPartition(topic, 0),
new PartitionData(numRecords, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_UNCOMMITTED)
// fetch should return empty since LSO should be stuck at 0
var consumerFetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
new PartitionData(0, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_COMMITTED)
var fetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertTrue(fetchData.records.batches.asScala.isEmpty)
assertEquals(Some(0), fetchData.lastStableOffset)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
// delayed fetch should timeout and return nothing
consumerFetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
new PartitionData(0, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_COMMITTED, minBytes = 1000)
assertFalse(consumerFetchResult.isFired)
timer.advanceClock(1001)
fetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertTrue(fetchData.records.batches.asScala.isEmpty)
assertEquals(Some(0), fetchData.lastStableOffset)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
// now commit the transaction
val endTxnMarker = new EndTransactionMarker(ControlRecordType.COMMIT, 0)
val commitRecordBatch = MemoryRecords.withEndTransactionMarker(producerId, epoch, endTxnMarker)
appendRecords(replicaManager, new TopicPartition(topic, 0), commitRecordBatch,
origin = AppendOrigin.Coordinator)
.onFire { response => assertEquals(Errors.NONE, response.error) }
// the LSO has advanced, but the appended commit marker has not been replicated, so
// none of the data from the transaction should be visible yet
consumerFetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
new PartitionData(0, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_COMMITTED)
fetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertTrue(fetchData.records.batches.asScala.isEmpty)
// fetch as follower to advance the high watermark
fetchAsFollower(replicaManager, new TopicPartition(topic, 0),
new PartitionData(numRecords + 1, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_UNCOMMITTED)
// now all of the records should be fetchable
consumerFetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
new PartitionData(0, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_COMMITTED)
fetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertEquals(Some(numRecords + 1), fetchData.lastStableOffset)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
assertEquals(numRecords + 1, fetchData.records.batches.asScala.size)
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
@Test
def testDelayedFetchIncludesAbortedTransactions(): Unit = {
val timer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(timer)
try {
val brokerList = Seq[Integer](0, 1).asJava
val partition = replicaManager.createPartition(new TopicPartition(topic, 0))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints), None)
// Make this replica the leader.
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ())
replicaManager.getPartitionOrException(new TopicPartition(topic, 0))
.localLogOrException
val producerId = 234L
val epoch = 5.toShort
// write a few batches as part of a transaction
val numRecords = 3
for (sequence <- 0 until numRecords) {
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, epoch, sequence,
new SimpleRecord(s"message $sequence".getBytes))
appendRecords(replicaManager, new TopicPartition(topic, 0), records).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
// now abort the transaction
val endTxnMarker = new EndTransactionMarker(ControlRecordType.ABORT, 0)
val abortRecordBatch = MemoryRecords.withEndTransactionMarker(producerId, epoch, endTxnMarker)
appendRecords(replicaManager, new TopicPartition(topic, 0), abortRecordBatch,
origin = AppendOrigin.Coordinator)
.onFire { response => assertEquals(Errors.NONE, response.error) }
// fetch as follower to advance the high watermark
fetchAsFollower(replicaManager, new TopicPartition(topic, 0),
new PartitionData(numRecords + 1, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_UNCOMMITTED)
// Set the minBytes in order force this request to enter purgatory. When it returns, we should still
// see the newly aborted transaction.
val fetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
new PartitionData(0, 0, 100000, Optional.empty()),
isolationLevel = IsolationLevel.READ_COMMITTED, minBytes = 10000)
assertFalse(fetchResult.isFired)
timer.advanceClock(1001)
val fetchData = fetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertEquals(Some(numRecords + 1), fetchData.lastStableOffset)
assertEquals(numRecords + 1, fetchData.records.records.asScala.size)
assertTrue(fetchData.abortedTransactions.isDefined)
assertEquals(1, fetchData.abortedTransactions.get.size)
val abortedTransaction = fetchData.abortedTransactions.get.head
assertEquals(0L, abortedTransaction.firstOffset)
assertEquals(producerId, abortedTransaction.producerId)
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
@Test
def testFetchBeyondHighWatermark(): Unit = {
val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2))
try {
val brokerList = Seq[Integer](0, 1, 2).asJava
val partition = rm.createPartition(new TopicPartition(topic, 0))
partition.createLogIfNotExists(isNew = false, isFutureReplica = false,
new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints), None)
// Make this replica the leader.
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1), new Node(2, "host2", 2)).asJava).build()
rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ())
rm.getPartitionOrException(new TopicPartition(topic, 0))
.localLogOrException
// Append a couple of messages.
for(i <- 1 to 2) {
val records = TestUtils.singletonRecords(s"message $i".getBytes)
appendRecords(rm, new TopicPartition(topic, 0), records).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
// Followers are always allowed to fetch above the high watermark
val followerFetchResult = fetchAsFollower(rm, new TopicPartition(topic, 0),
new PartitionData(1, 0, 100000, Optional.empty()))
val followerFetchData = followerFetchResult.assertFired
assertEquals(Errors.NONE, followerFetchData.error, "Should not give an exception")
assertTrue(followerFetchData.records.batches.iterator.hasNext, "Should return some data")
// Consumers are not allowed to consume above the high watermark. However, since the
// high watermark could be stale at the time of the request, we do not return an out of
// range error and instead return an empty record set.
val consumerFetchResult = fetchAsConsumer(rm, new TopicPartition(topic, 0),
new PartitionData(1, 0, 100000, Optional.empty()))
val consumerFetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, consumerFetchData.error, "Should not give an exception")
assertEquals(MemoryRecords.EMPTY, consumerFetchData.records, "Should return empty response")
} finally {
rm.shutdown(checkpointHW = false)
}
}
@Test
def testFollowerStateNotUpdatedIfLogReadFails(): Unit = {
val maxFetchBytes = 1024 * 1024
val aliveBrokersIds = Seq(0, 1)
val leaderEpoch = 5
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time),
brokerId = 0, aliveBrokersIds)
try {
val tp = new TopicPartition(topic, 0)
val replicas = aliveBrokersIds.toList.map(Int.box).asJava
// Broker 0 becomes leader of the partition
val leaderAndIsrPartitionState = new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(leaderEpoch)
.setIsr(replicas)
.setZkVersion(0)
.setReplicas(replicas)
.setIsNew(true)
val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(leaderAndIsrPartitionState).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ())
assertEquals(Errors.NONE, leaderAndIsrResponse.error)
// Follower replica state is initialized, but initial state is not known
assertTrue(replicaManager.onlinePartition(tp).isDefined)
val partition = replicaManager.onlinePartition(tp).get
assertTrue(partition.getReplica(1).isDefined)
val followerReplica = partition.getReplica(1).get
assertEquals(-1L, followerReplica.logStartOffset)
assertEquals(-1L, followerReplica.logEndOffset)
// Leader appends some data
for (i <- 1 to 5) {
appendRecords(replicaManager, tp, TestUtils.singletonRecords(s"message $i".getBytes)).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
// We receive one valid request from the follower and replica state is updated
var successfulFetch: Option[FetchPartitionData] = None
def callback(response: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
successfulFetch = response.headOption.filter(_._1 == tp).map(_._2)
}
val validFetchPartitionData = new FetchRequest.PartitionData(0L, 0L, maxFetchBytes,
Optional.of(leaderEpoch))
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> validFetchPartitionData),
topicIds = topicIds.asJava,
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(0L, followerReplica.logStartOffset)
assertEquals(0L, followerReplica.logEndOffset)
// Next we receive an invalid request with a higher fetch offset, but an old epoch.
// We expect that the replica state does not get updated.
val invalidFetchPartitionData = new FetchRequest.PartitionData(3L, 0L, maxFetchBytes,
Optional.of(leaderEpoch - 1))
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> invalidFetchPartitionData),
topicIds = topicIds.asJava,
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(0L, followerReplica.logStartOffset)
assertEquals(0L, followerReplica.logEndOffset)
// Next we receive an invalid request with a higher fetch offset, but a diverging epoch.
// We expect that the replica state does not get updated.
val divergingFetchPartitionData = new FetchRequest.PartitionData(3L, 0L, maxFetchBytes,
Optional.of(leaderEpoch), Optional.of(leaderEpoch - 1))
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> divergingFetchPartitionData),
topicIds = topicIds.asJava,
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(0L, followerReplica.logStartOffset)
assertEquals(0L, followerReplica.logEndOffset)
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
@Test
def testFetchMessagesWithInconsistentTopicId(): Unit = {
val maxFetchBytes = 1024 * 1024
val aliveBrokersIds = Seq(0, 1)
val leaderEpoch = 5
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time),
brokerId = 0, aliveBrokersIds)
try {
val tp = new TopicPartition(topic, 0)
val replicas = aliveBrokersIds.toList.map(Int.box).asJava
// Broker 0 becomes leader of the partition
val leaderAndIsrPartitionState = new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(leaderEpoch)
.setIsr(replicas)
.setZkVersion(0)
.setReplicas(replicas)
.setIsNew(true)
val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(leaderAndIsrPartitionState).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ())
assertEquals(Errors.NONE, leaderAndIsrResponse.error)
assertEquals(Some(topicId), replicaManager.getPartitionOrException(tp).topicId)
// We receive one valid request from the follower and replica state is updated
var successfulFetch: Option[FetchPartitionData] = None
def callback(response: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
successfulFetch = response.headOption.filter { case (topicPartition, _) => topicPartition == tp }.map { case (_, data) => data }
}
val validFetchPartitionData = new FetchRequest.PartitionData(0L, 0L, maxFetchBytes,
Optional.of(leaderEpoch))
// Fetch messages simulating a different ID than the one in the log.
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> validFetchPartitionData),
topicIds = Collections.singletonMap(topic, Uuid.randomUuid()),
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(Errors.INCONSISTENT_TOPIC_ID, successfulFetch.get.error)
// Simulate where the fetch request did not use topic IDs
// Fetch messages simulating an ID in the log.
// We should not see topic ID errors.
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> validFetchPartitionData),
topicIds = Collections.emptyMap(),
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(Errors.NONE, successfulFetch.get.error)
// Next create a topic without a topic ID written in the log.
val tp2 = new TopicPartition("noIdTopic", 0)
// Broker 0 becomes leader of the partition
val leaderAndIsrPartitionState2 = new LeaderAndIsrPartitionState()
.setTopicName("noIdTopic")
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(leaderEpoch)
.setIsr(replicas)
.setZkVersion(0)
.setReplicas(replicas)
.setIsNew(true)
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(leaderAndIsrPartitionState2).asJava,
Collections.emptyMap(),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val leaderAndIsrResponse2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest2, (_, _) => ())
assertEquals(Errors.NONE, leaderAndIsrResponse2.error)
assertEquals(None, replicaManager.getPartitionOrException(tp2).topicId)
// Fetch messages simulating the request containing a topic ID. We should not have an error.
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> validFetchPartitionData),
topicIds = Collections.singletonMap("noIdTopic", Uuid.randomUuid()),
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(Errors.NONE, successfulFetch.get.error)
// Fetch messages simulating the request not containing a topic ID. We should not have an error.
replicaManager.fetchMessages(
timeout = 0L,
replicaId = 1,
fetchMinBytes = 1,
fetchMaxBytes = maxFetchBytes,
hardMaxBytesLimit = false,
fetchInfos = Seq(tp -> validFetchPartitionData),
topicIds = Collections.emptyMap(),
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = None
)
assertTrue(successfulFetch.isDefined)
assertEquals(Errors.NONE, successfulFetch.get.error)
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
/**
* If a follower sends a fetch request for 2 partitions and it's no longer the follower for one of them, the other
* partition should not be affected.
*/
@Test
def testFetchMessagesWhenNotFollowerForOnePartition(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2))
try {
// Create 2 partitions, assign replica 0 as the leader for both a different follower (1 and 2) for each
val tp0 = new TopicPartition(topic, 0)
val tp1 = new TopicPartition(topic, 1)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
replicaManager.createPartition(tp1).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val partition1Replicas = Seq[Integer](0, 2).asJava
val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava
val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(
new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true),
new LeaderAndIsrPartitionState()
.setTopicName(tp1.topic)
.setPartitionIndex(tp1.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(0)
.setIsr(partition1Replicas)
.setZkVersion(0)
.setReplicas(partition1Replicas)
.setIsNew(true)
).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ())
// Append a couple of messages.
for (i <- 1 to 2) {
appendRecords(replicaManager, tp0, TestUtils.singletonRecords(s"message $i".getBytes)).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
appendRecords(replicaManager, tp1, TestUtils.singletonRecords(s"message $i".getBytes)).onFire { response =>
assertEquals(Errors.NONE, response.error)
}
}
def fetchCallback(responseStatus: Seq[(TopicPartition, FetchPartitionData)]) = {
val responseStatusMap = responseStatus.toMap
assertEquals(2, responseStatus.size)
assertEquals(Set(tp0, tp1), responseStatusMap.keySet)
val tp0Status = responseStatusMap.get(tp0)
assertTrue(tp0Status.isDefined)
// the response contains high watermark on the leader before it is updated based
// on this fetch request
assertEquals(0, tp0Status.get.highWatermark)
assertEquals(Some(0), tp0Status.get.lastStableOffset)
assertEquals(Errors.NONE, tp0Status.get.error)
assertTrue(tp0Status.get.records.batches.iterator.hasNext)
val tp1Status = responseStatusMap.get(tp1)
assertTrue(tp1Status.isDefined)
assertEquals(0, tp1Status.get.highWatermark)
assertEquals(Some(0), tp0Status.get.lastStableOffset)
assertEquals(Errors.NONE, tp1Status.get.error)
assertFalse(tp1Status.get.records.batches.iterator.hasNext)
}
replicaManager.fetchMessages(
timeout = 1000,
replicaId = 1,
fetchMinBytes = 0,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
fetchInfos = Seq(
tp0 -> new PartitionData(1, 0, 100000, Optional.empty()),
tp1 -> new PartitionData(1, 0, 100000, Optional.empty())),
topicIds = topicIds,
quota = UnboundedQuota,
responseCallback = fetchCallback,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
clientMetadata = None
)
val tp0Log = replicaManager.localLog(tp0)
assertTrue(tp0Log.isDefined)
assertEquals(1, tp0Log.get.highWatermark, "hw should be incremented")
val tp1Replica = replicaManager.localLog(tp1)
assertTrue(tp1Replica.isDefined)
assertEquals(0, tp1Replica.get.highWatermark, "hw should not be incremented")
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
@Test
def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(): Unit = {
verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(new Properties, expectTruncation = false)
}
@Test
def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdateIbp26(): Unit = {
val extraProps = new Properties
extraProps.put(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_6_IV0.version)
verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps, expectTruncation = true)
}
/**
* If a partition becomes a follower and the leader is unchanged it should check for truncation
* if the epoch has increased by more than one (which suggests it has missed an update). For
* IBP version 2.7 onwards, we don't require this since we can truncate at any time based
* on diverging epochs returned in fetch responses.
*/
private def verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps: Properties,
expectTruncation: Boolean): Unit = {
val topicPartition = 0
val topicId = Uuid.randomUuid()
val followerBrokerId = 0
val leaderBrokerId = 1
val controllerId = 0
val controllerEpoch = 0
var leaderEpoch = 1
val leaderEpochIncrement = 2
val aliveBrokerIds = Seq[Integer] (followerBrokerId, leaderBrokerId)
val countDownLatch = new CountDownLatch(1)
// Prepare the mocked components for the test
val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch,
expectTruncation = expectTruncation, localLogOffset = Some(10), extraProps = extraProps, topicId = Some(topicId))
try {
// Initialize partition state to follower, with leader = 1, leaderEpoch = 1
val tp = new TopicPartition(topic, topicPartition)
val partition = replicaManager.createPartition(tp)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
partition.makeFollower(
leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds),
offsetCheckpoints,
None)
// Make local partition a follower - because epoch increased by more than 1, truncation should
// trigger even though leader does not change
leaderEpoch += leaderEpochIncrement
val leaderAndIsrRequest0 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion,
controllerId, controllerEpoch, brokerEpoch,
Seq(leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds)).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(followerBrokerId, "host1", 0),
new Node(leaderBrokerId, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest0,
(_, followers) => assertEquals(followerBrokerId, followers.head.partitionId))
assertTrue(countDownLatch.await(1000L, TimeUnit.MILLISECONDS))
// Truncation should have happened once
EasyMock.verify(mockLogMgr)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testReplicaSelector(): Unit = {
val topicPartition = 0
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val aliveBrokerIds = Seq[Integer] (followerBrokerId, leaderBrokerId)
val countDownLatch = new CountDownLatch(1)
// Prepare the mocked components for the test
val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true)
val tp = new TopicPartition(topic, topicPartition)
val partition = replicaManager.createPartition(tp)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
partition.makeLeader(
leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds),
offsetCheckpoints,
None)
val metadata: ClientMetadata = new DefaultClientMetadata("rack-a", "client-id",
InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default")
// We expect to select the leader, which means we return None
val preferredReadReplica: Option[Int] = replicaManager.findPreferredReadReplica(
partition, metadata, Request.OrdinaryConsumerId, 1L, System.currentTimeMillis)
assertFalse(preferredReadReplica.isDefined)
}
@Test
def testPreferredReplicaAsFollower(): Unit = {
val topicPartition = 0
val topicId = Uuid.randomUuid()
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val countDownLatch = new CountDownLatch(1)
// Prepare the mocked components for the test
val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId))
try {
val brokerList = Seq[Integer](0, 1).asJava
val tp0 = new TopicPartition(topic, 0)
initializeLogAndTopicId(replicaManager, tp0, topicId)
// Make this replica the follower
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ())
val metadata: ClientMetadata = new DefaultClientMetadata("rack-a", "client-id",
InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default")
val consumerResult = fetchAsConsumer(replicaManager, tp0,
new PartitionData(0, 0, 100000, Optional.empty()),
clientMetadata = Some(metadata))
// Fetch from follower succeeds
assertTrue(consumerResult.isFired)
// But only leader will compute preferred replica
assertTrue(consumerResult.assertFired.preferredReadReplica.isEmpty)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testPreferredReplicaAsLeader(): Unit = {
val topicPartition = 0
val topicId = Uuid.randomUuid()
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val countDownLatch = new CountDownLatch(1)
// Prepare the mocked components for the test
val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId))
try {
val brokerList = Seq[Integer](0, 1).asJava
val tp0 = new TopicPartition(topic, 0)
initializeLogAndTopicId(replicaManager, tp0, topicId)
// Make this replica the follower
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ())
val metadata: ClientMetadata = new DefaultClientMetadata("rack-a", "client-id",
InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default")
val consumerResult = fetchAsConsumer(replicaManager, tp0,
new PartitionData(0, 0, 100000, Optional.empty()),
clientMetadata = Some(metadata))
// Fetch from follower succeeds
assertTrue(consumerResult.isFired)
// Returns a preferred replica (should just be the leader, which is None)
assertFalse(consumerResult.assertFired.preferredReadReplica.isDefined)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testFollowerFetchWithDefaultSelectorNoForcedHwPropagation(): Unit = {
val topicPartition = 0
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val countDownLatch = new CountDownLatch(1)
val timer = new MockTimer(time)
// Prepare the mocked components for the test
val (replicaManager, _) = prepareReplicaManagerAndLogManager(timer,
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId))
val brokerList = Seq[Integer](0, 1).asJava
val tp0 = new TopicPartition(topic, 0)
initializeLogAndTopicId(replicaManager, tp0, topicId)
// Make this replica the follower
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(false)).asJava,
Collections.singletonMap(topic, topicId),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ())
val simpleRecords = Seq(new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))
val appendResult = appendRecords(replicaManager, tp0,
MemoryRecords.withRecords(CompressionType.NONE, simpleRecords.toSeq: _*), AppendOrigin.Client)
// Increment the hw in the leader by fetching from the last offset
val fetchOffset = simpleRecords.size
var followerResult = fetchAsFollower(replicaManager, tp0,
new PartitionData(fetchOffset, 0, 100000, Optional.empty()),
clientMetadata = None)
assertTrue(followerResult.isFired)
assertEquals(0, followerResult.assertFired.highWatermark)
assertTrue(appendResult.isFired, "Expected producer request to be acked")
// Fetch from the same offset, no new data is expected and hence the fetch request should
// go to the purgatory
followerResult = fetchAsFollower(replicaManager, tp0,
new PartitionData(fetchOffset, 0, 100000, Optional.empty()),
clientMetadata = None, minBytes = 1000)
assertFalse(followerResult.isFired, "Request completed immediately unexpectedly")
// Complete the request in the purgatory by advancing the clock
timer.advanceClock(1001)
assertTrue(followerResult.isFired)
assertEquals(fetchOffset, followerResult.assertFired.highWatermark)
}
@Test
def testUnknownReplicaSelector(): Unit = {
val topicPartition = 0
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val countDownLatch = new CountDownLatch(1)
val props = new Properties()
props.put(KafkaConfig.ReplicaSelectorClassProp, "non-a-class")
assertThrows(classOf[ClassNotFoundException], () => prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true, extraProps = props))
}
// Due to some limitations to EasyMock, we need to create the log so that the Partition.topicId does not call
// LogManager.getLog with a default argument
// TODO: convert tests to using Mockito to avoid this issue.
private def initializeLogAndTopicId(replicaManager: ReplicaManager, topicPartition: TopicPartition, topicId: Uuid): Unit = {
val partition = replicaManager.createPartition(new TopicPartition(topic, 0))
val log = replicaManager.logManager.getOrCreateLog(topicPartition, false, false, Some(topicId))
partition.log = Some(log)
}
@Test
def testDefaultReplicaSelector(): Unit = {
val topicPartition = 0
val followerBrokerId = 0
val leaderBrokerId = 1
val leaderEpoch = 1
val leaderEpochIncrement = 2
val countDownLatch = new CountDownLatch(1)
val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time),
topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId,
leaderBrokerId, countDownLatch, expectTruncation = true)
assertFalse(replicaManager.replicaSelectorOpt.isDefined)
}
@Test
def testFetchFollowerNotAllowedForOlderClients(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1))
try {
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(0)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ())
// Fetch from follower, with non-empty ClientMetadata (FetchRequest v11+)
val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "")
var partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.of(0))
var fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, Some(clientMetadata))
assertNotNull(fetchResult.get)
assertEquals(Errors.NONE, fetchResult.get.error)
// Fetch from follower, with empty ClientMetadata (which implies an older version)
partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.of(0))
fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, None)
assertNotNull(fetchResult.get)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchResult.get.error)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testFetchRequestRateMetrics(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
def assertMetricCount(expected: Int): Unit = {
assertEquals(expected, replicaManager.brokerTopicStats.allTopicsStats.totalFetchRequestRate.count)
assertEquals(expected, replicaManager.brokerTopicStats.topicStats(topic).totalFetchRequestRate.count)
}
val partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.empty())
val nonPurgatoryFetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, None, timeout = 0)
assertNotNull(nonPurgatoryFetchResult.get)
assertEquals(Errors.NONE, nonPurgatoryFetchResult.get.error)
assertMetricCount(1)
val purgatoryFetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, None, timeout = 10)
assertNull(purgatoryFetchResult.get)
mockTimer.advanceClock(11)
assertNotNull(purgatoryFetchResult.get)
assertEquals(Errors.NONE, purgatoryFetchResult.get.error)
assertMetricCount(2)
}
@Test
def testBecomeFollowerWhileOldClientFetchInPurgatory(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
try {
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.empty())
val fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, None, timeout = 10)
assertNull(fetchResult.get)
// Become a follower and ensure that the delayed fetch returns immediately
val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(2)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ())
assertNotNull(fetchResult.get)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchResult.get.error)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testBecomeFollowerWhileNewClientFetchInPurgatory(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
try {
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "")
val partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.of(1))
val fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, Some(clientMetadata), timeout = 10)
assertNull(fetchResult.get)
// Become a follower and ensure that the delayed fetch returns immediately
val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(2)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ())
assertNotNull(fetchResult.get)
assertEquals(Errors.FENCED_LEADER_EPOCH, fetchResult.get.error)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testFetchFromLeaderAlwaysAllowed(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "")
var partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.of(1))
var fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, Some(clientMetadata))
assertNotNull(fetchResult.get)
assertEquals(Errors.NONE, fetchResult.get.error)
partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.empty())
fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, Some(clientMetadata))
assertNotNull(fetchResult.get)
assertEquals(Errors.NONE, fetchResult.get.error)
}
@Test
def testClearFetchPurgatoryOnStopReplica(): Unit = {
// As part of a reassignment, we may send StopReplica to the old leader.
// In this case, we should ensure that pending purgatory operations are cancelled
// immediately rather than sitting around to timeout.
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val partitionData = new FetchRequest.PartitionData(0L, 0L, 100,
Optional.of(1))
val fetchResult = sendConsumerFetch(replicaManager, tp0, partitionData, None, timeout = 10)
assertNull(fetchResult.get)
Mockito.when(replicaManager.metadataCache.contains(ArgumentMatchers.eq(tp0))).thenReturn(true)
// We have a fetch in purgatory, now receive a stop replica request and
// assert that the fetch returns with a NOT_LEADER error
replicaManager.stopReplicas(2, 0, 0,
mutable.Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setDeletePartition(true)
.setLeaderEpoch(LeaderAndIsr.EpochDuringDelete)))
assertNotNull(fetchResult.get)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchResult.get.error)
}
@Test
def testClearProducePurgatoryOnStopReplica(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val partition0Replicas = Seq[Integer](0, 1).asJava
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true)).asJava,
topicIds.asJava,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val produceResult = sendProducerAppend(replicaManager, tp0, 3)
assertNull(produceResult.get)
Mockito.when(replicaManager.metadataCache.contains(tp0)).thenReturn(true)
replicaManager.stopReplicas(2, 0, 0,
mutable.Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setDeletePartition(true)
.setLeaderEpoch(LeaderAndIsr.EpochDuringDelete)))
assertNotNull(produceResult.get)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, produceResult.get.error)
}
private def sendProducerAppend(
replicaManager: ReplicaManager,
topicPartition: TopicPartition,
numOfRecords: Int
): AtomicReference[PartitionResponse] = {
val produceResult = new AtomicReference[PartitionResponse]()
def callback(response: Map[TopicPartition, PartitionResponse]): Unit = {
produceResult.set(response(topicPartition))
}
val records = MemoryRecords.withRecords(
CompressionType.NONE,
IntStream
.range(0, numOfRecords)
.mapToObj(i => new SimpleRecord(i.toString.getBytes))
.toArray(Array.ofDim[SimpleRecord]): _*
)
replicaManager.appendRecords(
timeout = 10,
requiredAcks = -1,
internalTopicsAllowed = false,
origin = AppendOrigin.Client,
entriesPerPartition = Map(topicPartition -> records),
responseCallback = callback
)
produceResult
}
private def sendConsumerFetch(replicaManager: ReplicaManager,
topicPartition: TopicPartition,
partitionData: FetchRequest.PartitionData,
clientMetadataOpt: Option[ClientMetadata],
timeout: Long = 0L): AtomicReference[FetchPartitionData] = {
val fetchResult = new AtomicReference[FetchPartitionData]()
def callback(response: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
fetchResult.set(response.toMap.apply(topicPartition))
}
replicaManager.fetchMessages(
timeout = timeout,
replicaId = Request.OrdinaryConsumerId,
fetchMinBytes = 1,
fetchMaxBytes = 100,
hardMaxBytesLimit = false,
fetchInfos = Seq(topicPartition -> partitionData),
topicIds = topicIds.asJava,
quota = UnboundedQuota,
isolationLevel = IsolationLevel.READ_UNCOMMITTED,
responseCallback = callback,
clientMetadata = clientMetadataOpt
)
fetchResult
}
/**
* This method assumes that the test using created ReplicaManager calls
* ReplicaManager.becomeLeaderOrFollower() once with LeaderAndIsrRequest containing
* 'leaderEpochInLeaderAndIsr' leader epoch for partition 'topicPartition'.
*/
private def prepareReplicaManagerAndLogManager(timer: MockTimer,
topicPartition: Int,
leaderEpochInLeaderAndIsr: Int,
followerBrokerId: Int,
leaderBrokerId: Int,
countDownLatch: CountDownLatch,
expectTruncation: Boolean,
localLogOffset: Option[Long] = None,
offsetFromLeader: Long = 5,
leaderEpochFromLeader: Int = 3,
extraProps: Properties = new Properties(),
topicId: Option[Uuid] = None) : (ReplicaManager, LogManager) = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath)
props.asScala ++= extraProps.asScala
val config = KafkaConfig.fromProps(props)
val logConfig = LogConfig()
val logDir = new File(new File(config.logDirs.head), s"$topic-$topicPartition")
Files.createDirectories(logDir.toPath)
val mockScheduler = new MockScheduler(time)
val mockBrokerTopicStats = new BrokerTopicStats
val mockLogDirFailureChannel = new LogDirFailureChannel(config.logDirs.size)
val tp = new TopicPartition(topic, topicPartition)
val maxProducerIdExpirationMs = 30000
val segments = new LogSegments(tp)
val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, tp, mockLogDirFailureChannel, logConfig.recordVersion, "")
val producerStateManager = new ProducerStateManager(tp, logDir, maxProducerIdExpirationMs, time)
val offsets = LogLoader.load(LoadLogParams(
logDir,
tp,
logConfig,
mockScheduler,
time,
mockLogDirFailureChannel,
hadCleanShutdown = true,
segments,
0L,
0L,
maxProducerIdExpirationMs,
leaderEpochCache,
producerStateManager))
val localLog = new LocalLog(logDir, logConfig, segments, offsets.recoveryPoint,
offsets.nextOffsetMetadata, mockScheduler, time, tp, mockLogDirFailureChannel)
val mockLog = new UnifiedLog(
logStartOffset = offsets.logStartOffset,
localLog = localLog,
brokerTopicStats = mockBrokerTopicStats,
producerIdExpirationCheckIntervalMs = 30000,
leaderEpochCache = leaderEpochCache,
producerStateManager = producerStateManager,
_topicId = topicId,
keepPartitionMetadataFile = true) {
override def endOffsetForEpoch(leaderEpoch: Int): Option[OffsetAndEpoch] = {
assertEquals(leaderEpoch, leaderEpochFromLeader)
localLogOffset.map { logOffset =>
Some(OffsetAndEpoch(logOffset, leaderEpochFromLeader))
}.getOrElse(super.endOffsetForEpoch(leaderEpoch))
}
override def latestEpoch: Option[Int] = Some(leaderEpochFromLeader)
override def logEndOffsetMetadata: LogOffsetMetadata =
localLogOffset.map(LogOffsetMetadata(_)).getOrElse(super.logEndOffsetMetadata)
override def logEndOffset: Long = localLogOffset.getOrElse(super.logEndOffset)
}
// Expect to call LogManager.truncateTo exactly once
val topicPartitionObj = new TopicPartition(topic, topicPartition)
val mockLogMgr: LogManager = EasyMock.createMock(classOf[LogManager])
EasyMock.expect(mockLogMgr.liveLogDirs).andReturn(config.logDirs.map(new File(_).getAbsoluteFile)).anyTimes
EasyMock.expect(mockLogMgr.getOrCreateLog(EasyMock.eq(topicPartitionObj),
isNew = EasyMock.eq(false), isFuture = EasyMock.eq(false), EasyMock.anyObject())).andReturn(mockLog).anyTimes
if (expectTruncation) {
EasyMock.expect(mockLogMgr.truncateTo(Map(topicPartitionObj -> offsetFromLeader),
isFuture = false)).once
}
EasyMock.expect(mockLogMgr.initializingLog(topicPartitionObj)).anyTimes
EasyMock.expect(mockLogMgr.getLog(topicPartitionObj, isFuture = true)).andReturn(None)
EasyMock.expect(mockLogMgr.finishedInitializingLog(
EasyMock.eq(topicPartitionObj), EasyMock.anyObject())).anyTimes
EasyMock.replay(mockLogMgr)
val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId)
val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId))
val metadataCache: MetadataCache = Mockito.mock(classOf[MetadataCache])
mockGetAliveBrokerFunctions(metadataCache, aliveBrokers)
Mockito.when(metadataCache.getPartitionReplicaEndpoints(
ArgumentMatchers.any[TopicPartition], ArgumentMatchers.any[ListenerName])).
thenReturn(Map(leaderBrokerId -> new Node(leaderBrokerId, "host1", 9092, "rack-a"),
followerBrokerId -> new Node(followerBrokerId, "host2", 9092, "rack-b")).toMap)
val mockProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", timer, reaperEnabled = false)
val mockFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", timer, reaperEnabled = false)
val mockDeleteRecordsPurgatory = new DelayedOperationPurgatory[DelayedDeleteRecords](
purgatoryName = "DeleteRecords", timer, reaperEnabled = false)
val mockElectLeaderPurgatory = new DelayedOperationPurgatory[DelayedElectLeader](
purgatoryName = "ElectLeader", timer, reaperEnabled = false)
// Mock network client to show leader offset of 5
val blockingSend = new ReplicaFetcherMockBlockingSend(
Map(topicPartitionObj -> new EpochEndOffset()
.setPartition(topicPartitionObj.partition)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(leaderEpochFromLeader)
.setEndOffset(offsetFromLeader)).asJava,
BrokerEndPoint(1, "host1" ,1), time)
val replicaManager = new ReplicaManager(config, metrics, time, None, mockScheduler, mockLogMgr,
new AtomicBoolean(false), quotaManager, mockBrokerTopicStats,
metadataCache, mockLogDirFailureChannel, mockProducePurgatory, mockFetchPurgatory,
mockDeleteRecordsPurgatory, mockElectLeaderPurgatory, Option(this.getClass.getName),
alterIsrManager) {
override protected def createReplicaFetcherManager(metrics: Metrics,
time: Time,
threadNamePrefix: Option[String],
replicationQuotaManager: ReplicationQuotaManager): ReplicaFetcherManager = {
new ReplicaFetcherManager(config, this, metrics, time, threadNamePrefix, replicationQuotaManager) {
override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = {
new ReplicaFetcherThread(s"ReplicaFetcherThread-$fetcherId", fetcherId,
sourceBroker, config, failedPartitions, replicaManager, metrics, time, quotaManager.follower, Some(blockingSend)) {
override def doWork() = {
// In case the thread starts before the partition is added by AbstractFetcherManager,
// add it here (it's a no-op if already added)
val initialOffset = InitialFetchState(
leader = new BrokerEndPoint(0, "localhost", 9092),
initOffset = 0L, currentLeaderEpoch = leaderEpochInLeaderAndIsr)
addPartitions(Map(new TopicPartition(topic, topicPartition) -> initialOffset))
super.doWork()
// Shut the thread down after one iteration to avoid double-counting truncations
initiateShutdown()
countDownLatch.countDown()
}
}
}
}
}
}
(replicaManager, mockLogMgr)
}
private def leaderAndIsrPartitionState(topicPartition: TopicPartition,
leaderEpoch: Int,
leaderBrokerId: Int,
aliveBrokerIds: Seq[Integer],
isNew: Boolean = false): LeaderAndIsrPartitionState = {
new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(topicPartition.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(leaderBrokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(aliveBrokerIds.asJava)
.setZkVersion(zkVersion)
.setReplicas(aliveBrokerIds.asJava)
.setIsNew(isNew)
}
private class CallbackResult[T] {
private var value: Option[T] = None
private var fun: Option[T => Unit] = None
def assertFired: T = {
assertTrue(isFired, "Callback has not been fired")
value.get
}
def isFired: Boolean = {
value.isDefined
}
def fire(value: T): Unit = {
this.value = Some(value)
fun.foreach(f => f(value))
}
def onFire(fun: T => Unit): CallbackResult[T] = {
this.fun = Some(fun)
if (this.isFired) fire(value.get)
this
}
}
private def appendRecords(replicaManager: ReplicaManager,
partition: TopicPartition,
records: MemoryRecords,
origin: AppendOrigin = AppendOrigin.Client,
requiredAcks: Short = -1): CallbackResult[PartitionResponse] = {
val result = new CallbackResult[PartitionResponse]()
def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = {
val response = responses.get(partition)
assertTrue(response.isDefined)
result.fire(response.get)
}
replicaManager.appendRecords(
timeout = 1000,
requiredAcks = requiredAcks,
internalTopicsAllowed = false,
origin = origin,
entriesPerPartition = Map(partition -> records),
responseCallback = appendCallback)
result
}
private def fetchAsConsumer(replicaManager: ReplicaManager,
partition: TopicPartition,
partitionData: PartitionData,
minBytes: Int = 0,
isolationLevel: IsolationLevel = IsolationLevel.READ_UNCOMMITTED,
clientMetadata: Option[ClientMetadata] = None): CallbackResult[FetchPartitionData] = {
fetchMessages(replicaManager, replicaId = -1, partition, partitionData, minBytes, isolationLevel, clientMetadata)
}
private def fetchAsFollower(replicaManager: ReplicaManager,
partition: TopicPartition,
partitionData: PartitionData,
minBytes: Int = 0,
isolationLevel: IsolationLevel = IsolationLevel.READ_UNCOMMITTED,
clientMetadata: Option[ClientMetadata] = None): CallbackResult[FetchPartitionData] = {
fetchMessages(replicaManager, replicaId = 1, partition, partitionData, minBytes, isolationLevel, clientMetadata)
}
private def fetchMessages(replicaManager: ReplicaManager,
replicaId: Int,
partition: TopicPartition,
partitionData: PartitionData,
minBytes: Int,
isolationLevel: IsolationLevel,
clientMetadata: Option[ClientMetadata]): CallbackResult[FetchPartitionData] = {
val result = new CallbackResult[FetchPartitionData]()
def fetchCallback(responseStatus: Seq[(TopicPartition, FetchPartitionData)]) = {
assertEquals(1, responseStatus.size)
val (topicPartition, fetchData) = responseStatus.head
assertEquals(partition, topicPartition)
result.fire(fetchData)
}
replicaManager.fetchMessages(
timeout = 1000,
replicaId = replicaId,
fetchMinBytes = minBytes,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
fetchInfos = Seq(partition -> partitionData),
topicIds = topicIds.asJava,
quota = UnboundedQuota,
responseCallback = fetchCallback,
isolationLevel = isolationLevel,
clientMetadata = clientMetadata
)
result
}
private def setupReplicaManagerWithMockedPurgatories(
timer: MockTimer,
brokerId: Int = 0,
aliveBrokerIds: Seq[Int] = Seq(0, 1),
propsModifier: Properties => Unit = _ => {}
): ReplicaManager = {
val props = TestUtils.createBrokerConfig(brokerId, TestUtils.MockZkConnect)
props.put("log.dirs", TestUtils.tempRelativeDir("data").getAbsolutePath + "," + TestUtils.tempRelativeDir("data2").getAbsolutePath)
propsModifier.apply(props)
val config = KafkaConfig.fromProps(props)
val logProps = new Properties()
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), LogConfig(logProps))
val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId))
val metadataCache: MetadataCache = Mockito.mock(classOf[MetadataCache])
Mockito.when(metadataCache.topicIdInfo()).thenReturn((topicIds.asJava, topicNames.asJava))
Mockito.when(metadataCache.topicNamesToIds()).thenReturn(topicIds.asJava)
Mockito.when(metadataCache.topicIdsToNames()).thenReturn(topicNames.asJava)
mockGetAliveBrokerFunctions(metadataCache, aliveBrokers)
val mockProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", timer, reaperEnabled = false)
val mockFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", timer, reaperEnabled = false)
val mockDeleteRecordsPurgatory = new DelayedOperationPurgatory[DelayedDeleteRecords](
purgatoryName = "DeleteRecords", timer, reaperEnabled = false)
val mockDelayedElectLeaderPurgatory = new DelayedOperationPurgatory[DelayedElectLeader](
purgatoryName = "DelayedElectLeader", timer, reaperEnabled = false)
new ReplicaManager(config, metrics, time, None, scheduler, mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
metadataCache, new LogDirFailureChannel(config.logDirs.size), mockProducePurgatory, mockFetchPurgatory,
mockDeleteRecordsPurgatory, mockDelayedElectLeaderPurgatory, Option(this.getClass.getName),
alterIsrManager)
}
@Test
def testOldLeaderLosesMetricsWhenReassignPartitions(): Unit = {
val controllerEpoch = 0
val leaderEpoch = 0
val leaderEpochIncrement = 1
val correlationId = 0
val controllerId = 0
val mockTopicStats1: BrokerTopicStats = EasyMock.mock(classOf[BrokerTopicStats])
val (rm0, rm1) = prepareDifferentReplicaManagers(EasyMock.mock(classOf[BrokerTopicStats]), mockTopicStats1)
EasyMock.expect(mockTopicStats1.removeOldLeaderMetrics(topic)).andVoid.once
EasyMock.replay(mockTopicStats1)
try {
// make broker 0 the leader of partition 0 and
// make broker 1 the leader of partition 1
val tp0 = new TopicPartition(topic, 0)
val tp1 = new TopicPartition(topic, 1)
val partition0Replicas = Seq[Integer](0, 1).asJava
val partition1Replicas = Seq[Integer](1, 0).asJava
val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion,
controllerId, 0, brokerEpoch,
Seq(
new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(leaderEpoch)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true),
new LeaderAndIsrPartitionState()
.setTopicName(tp1.topic)
.setPartitionIndex(tp1.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(1)
.setLeaderEpoch(leaderEpoch)
.setIsr(partition1Replicas)
.setZkVersion(0)
.setReplicas(partition1Replicas)
.setIsNew(true)
).asJava,
topicIds,
Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build()
rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ())
rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ())
// make broker 0 the leader of partition 1 so broker 1 loses its leadership position
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, controllerId,
controllerEpoch, brokerEpoch,
Seq(
new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(leaderEpoch + leaderEpochIncrement)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true),
new LeaderAndIsrPartitionState()
.setTopicName(tp1.topic)
.setPartitionIndex(tp1.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(leaderEpoch + leaderEpochIncrement)
.setIsr(partition1Replicas)
.setZkVersion(0)
.setReplicas(partition1Replicas)
.setIsNew(true)
).asJava,
topicIds,
Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build()
rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ())
rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ())
} finally {
rm0.shutdown()
rm1.shutdown()
}
// verify that broker 1 did remove its metrics when no longer being the leader of partition 1
EasyMock.verify(mockTopicStats1)
}
@Test
def testOldFollowerLosesMetricsWhenReassignPartitions(): Unit = {
val controllerEpoch = 0
val leaderEpoch = 0
val leaderEpochIncrement = 1
val correlationId = 0
val controllerId = 0
val mockTopicStats1: BrokerTopicStats = EasyMock.mock(classOf[BrokerTopicStats])
val (rm0, rm1) = prepareDifferentReplicaManagers(EasyMock.mock(classOf[BrokerTopicStats]), mockTopicStats1)
EasyMock.expect(mockTopicStats1.removeOldLeaderMetrics(topic)).andVoid.once
EasyMock.expect(mockTopicStats1.removeOldFollowerMetrics(topic)).andVoid.once
EasyMock.replay(mockTopicStats1)
try {
// make broker 0 the leader of partition 0 and
// make broker 1 the leader of partition 1
val tp0 = new TopicPartition(topic, 0)
val tp1 = new TopicPartition(topic, 1)
val partition0Replicas = Seq[Integer](1, 0).asJava
val partition1Replicas = Seq[Integer](1, 0).asJava
val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava
val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion,
controllerId, 0, brokerEpoch,
Seq(
new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(1)
.setLeaderEpoch(leaderEpoch)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true),
new LeaderAndIsrPartitionState()
.setTopicName(tp1.topic)
.setPartitionIndex(tp1.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(1)
.setLeaderEpoch(leaderEpoch)
.setIsr(partition1Replicas)
.setZkVersion(0)
.setReplicas(partition1Replicas)
.setIsNew(true)
).asJava,
topicIds,
Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build()
rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ())
rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ())
// make broker 0 the leader of partition 1 so broker 1 loses its leadership position
val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, controllerId,
controllerEpoch, brokerEpoch,
Seq(
new LeaderAndIsrPartitionState()
.setTopicName(tp0.topic)
.setPartitionIndex(tp0.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(leaderEpoch + leaderEpochIncrement)
.setIsr(partition0Replicas)
.setZkVersion(0)
.setReplicas(partition0Replicas)
.setIsNew(true),
new LeaderAndIsrPartitionState()
.setTopicName(tp1.topic)
.setPartitionIndex(tp1.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(0)
.setLeaderEpoch(leaderEpoch + leaderEpochIncrement)
.setIsr(partition1Replicas)
.setZkVersion(0)
.setReplicas(partition1Replicas)
.setIsNew(true)
).asJava,
topicIds,
Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build()
rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ())
rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ())
} finally {
rm0.shutdown()
rm1.shutdown()
}
// verify that broker 1 did remove its metrics when no longer being the leader of partition 1
EasyMock.verify(mockTopicStats1)
}
private def prepareDifferentReplicaManagers(brokerTopicStats1: BrokerTopicStats,
brokerTopicStats2: BrokerTopicStats): (ReplicaManager, ReplicaManager) = {
val props0 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
val props1 = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect)
props0.put("log0.dir", TestUtils.tempRelativeDir("data").getAbsolutePath)
props1.put("log1.dir", TestUtils.tempRelativeDir("data").getAbsolutePath)
val config0 = KafkaConfig.fromProps(props0)
val config1 = KafkaConfig.fromProps(props1)
val mockLogMgr0 = TestUtils.createLogManager(config0.logDirs.map(new File(_)))
val mockLogMgr1 = TestUtils.createLogManager(config1.logDirs.map(new File(_)))
val metadataCache0: MetadataCache = Mockito.mock(classOf[MetadataCache])
val metadataCache1: MetadataCache = Mockito.mock(classOf[MetadataCache])
val aliveBrokers = Seq(new Node(0, "host0", 0), new Node(1, "host1", 1))
mockGetAliveBrokerFunctions(metadataCache0, aliveBrokers)
mockGetAliveBrokerFunctions(metadataCache1, aliveBrokers)
// each replica manager is for a broker
val rm0 = new ReplicaManager(config0, metrics, time, None, new MockScheduler(time), mockLogMgr0,
new AtomicBoolean(false), quotaManager,
brokerTopicStats1, metadataCache0, new LogDirFailureChannel(config0.logDirs.size), alterIsrManager)
val rm1 = new ReplicaManager(config1, metrics, time, None, new MockScheduler(time), mockLogMgr1,
new AtomicBoolean(false), quotaManager,
brokerTopicStats2, metadataCache1, new LogDirFailureChannel(config1.logDirs.size), alterIsrManager)
(rm0, rm1)
}
@Test
def testStopReplicaWithStaleControllerEpoch(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 10, brokerEpoch,
Seq(leaderAndIsrPartitionState(tp0, 1, 0, Seq(0, 1), true)).asJava,
Collections.singletonMap(topic, Uuid.randomUuid()),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava
).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val partitionStates = Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setLeaderEpoch(1)
.setDeletePartition(false)
)
val (_, error) = replicaManager.stopReplicas(1, 0, 0, partitionStates)
assertEquals(Errors.STALE_CONTROLLER_EPOCH, error)
}
@Test
def testStopReplicaWithOfflinePartition(): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(leaderAndIsrPartitionState(tp0, 1, 0, Seq(0, 1), true)).asJava,
Collections.singletonMap(topic, Uuid.randomUuid()),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava
).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
replicaManager.markPartitionOffline(tp0)
val partitionStates = Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setLeaderEpoch(1)
.setDeletePartition(false)
)
val (result, error) = replicaManager.stopReplicas(1, 0, 0, partitionStates)
assertEquals(Errors.NONE, error)
assertEquals(Map(tp0 -> Errors.KAFKA_STORAGE_ERROR), result)
}
@Test
def testStopReplicaWithInexistentPartition(): Unit = {
testStopReplicaWithInexistentPartition(false, false)
}
@Test
def testStopReplicaWithInexistentPartitionAndPartitionsDelete(): Unit = {
testStopReplicaWithInexistentPartition(true, false)
}
@Test
def testStopReplicaWithInexistentPartitionAndPartitionsDeleteAndIOException(): Unit = {
testStopReplicaWithInexistentPartition(true, true)
}
private def testStopReplicaWithInexistentPartition(deletePartitions: Boolean, throwIOException: Boolean): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val log = replicaManager.logManager.getOrCreateLog(tp0, true, topicId = None)
if (throwIOException) {
// Delete the underlying directory to trigger an KafkaStorageException
val dir = log.dir.getParentFile
Utils.delete(dir)
dir.createNewFile()
}
val partitionStates = Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setLeaderEpoch(1)
.setDeletePartition(deletePartitions)
)
val (result, error) = replicaManager.stopReplicas(1, 0, 0, partitionStates)
assertEquals(Errors.NONE, error)
if (throwIOException && deletePartitions) {
assertEquals(Map(tp0 -> Errors.KAFKA_STORAGE_ERROR), result)
assertTrue(replicaManager.logManager.getLog(tp0).isEmpty)
} else if (deletePartitions) {
assertEquals(Map(tp0 -> Errors.NONE), result)
assertTrue(replicaManager.logManager.getLog(tp0).isEmpty)
} else {
assertEquals(Map(tp0 -> Errors.NONE), result)
assertTrue(replicaManager.logManager.getLog(tp0).isDefined)
}
}
@Test
def testStopReplicaWithExistingPartitionAndNewerLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(2, false, false, Errors.NONE)
}
@Test
def testStopReplicaWithExistingPartitionAndOlderLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(0, false, false, Errors.FENCED_LEADER_EPOCH)
}
@Test
def testStopReplicaWithExistingPartitionAndEqualLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(1, false, false, Errors.FENCED_LEADER_EPOCH)
}
@Test
def testStopReplicaWithExistingPartitionAndDeleteSentinel(): Unit = {
testStopReplicaWithExistingPartition(LeaderAndIsr.EpochDuringDelete, false, false, Errors.NONE)
}
@Test
def testStopReplicaWithExistingPartitionAndLeaderEpochNotProvided(): Unit = {
testStopReplicaWithExistingPartition(LeaderAndIsr.NoEpoch, false, false, Errors.NONE)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndNewerLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(2, true, false, Errors.NONE)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndNewerLeaderEpochAndIOException(): Unit = {
testStopReplicaWithExistingPartition(2, true, true, Errors.KAFKA_STORAGE_ERROR)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndOlderLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(0, true, false, Errors.FENCED_LEADER_EPOCH)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndEqualLeaderEpoch(): Unit = {
testStopReplicaWithExistingPartition(1, true, false, Errors.FENCED_LEADER_EPOCH)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndDeleteSentinel(): Unit = {
testStopReplicaWithExistingPartition(LeaderAndIsr.EpochDuringDelete, true, false, Errors.NONE)
}
@Test
def testStopReplicaWithDeletePartitionAndExistingPartitionAndLeaderEpochNotProvided(): Unit = {
testStopReplicaWithExistingPartition(LeaderAndIsr.NoEpoch, true, false, Errors.NONE)
}
private def testStopReplicaWithExistingPartition(leaderEpoch: Int,
deletePartition: Boolean,
throwIOException: Boolean,
expectedOutput: Errors): Unit = {
val mockTimer = new MockTimer(time)
val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1))
val tp0 = new TopicPartition(topic, 0)
val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints)
val partition = replicaManager.createPartition(tp0)
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val logDirFailureChannel = new LogDirFailureChannel(replicaManager.config.logDirs.size)
val logDir = partition.log.get.parentDirFile
def readRecoveryPointCheckpoint(): Map[TopicPartition, Long] = {
new OffsetCheckpointFile(new File(logDir, LogManager.RecoveryPointCheckpointFile),
logDirFailureChannel).read()
}
def readLogStartOffsetCheckpoint(): Map[TopicPartition, Long] = {
new OffsetCheckpointFile(new File(logDir, LogManager.LogStartOffsetCheckpointFile),
logDirFailureChannel).read()
}
val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(leaderAndIsrPartitionState(tp0, 1, 0, Seq(0, 1), true)).asJava,
Collections.singletonMap(tp0.topic(), Uuid.randomUuid()),
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava
).build()
replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ())
val batch = TestUtils.records(records = List(
new SimpleRecord(10, "k1".getBytes, "v1".getBytes),
new SimpleRecord(11, "k2".getBytes, "v2".getBytes)))
partition.appendRecordsToLeader(batch, AppendOrigin.Client, requiredAcks = 0, RequestLocal.withThreadConfinedCaching)
partition.log.get.updateHighWatermark(2L)
partition.log.get.maybeIncrementLogStartOffset(1L, LeaderOffsetIncremented)
replicaManager.logManager.checkpointLogRecoveryOffsets()
replicaManager.logManager.checkpointLogStartOffsets()
assertEquals(Some(1L), readRecoveryPointCheckpoint().get(tp0))
assertEquals(Some(1L), readLogStartOffsetCheckpoint().get(tp0))
if (throwIOException) {
// Delete the underlying directory to trigger an KafkaStorageException
val dir = partition.log.get.dir
Utils.delete(dir)
dir.createNewFile()
}
val partitionStates = Map(tp0 -> new StopReplicaPartitionState()
.setPartitionIndex(tp0.partition)
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)
)
val (result, error) = replicaManager.stopReplicas(1, 0, 0, partitionStates)
assertEquals(Errors.NONE, error)
assertEquals(Map(tp0 -> expectedOutput), result)
if (expectedOutput == Errors.NONE && deletePartition) {
assertEquals(HostedPartition.None, replicaManager.getPartition(tp0))
assertFalse(readRecoveryPointCheckpoint().contains(tp0))
assertFalse(readLogStartOffsetCheckpoint().contains(tp0))
}
}
@Test
def testReplicaNotAvailable(): Unit = {
def createReplicaManager(): ReplicaManager = {
val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect)
val config = KafkaConfig.fromProps(props)
val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)))
new ReplicaManager(config, metrics, time, None, new MockScheduler(time), mockLogMgr,
new AtomicBoolean(false), quotaManager, new BrokerTopicStats,
MetadataCache.zkMetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size), alterIsrManager) {
override def getPartitionOrException(topicPartition: TopicPartition): Partition = {
throw Errors.NOT_LEADER_OR_FOLLOWER.exception()
}
}
}
val replicaManager = createReplicaManager()
try {
val tp = new TopicPartition(topic, 0)
val dir = replicaManager.logManager.liveLogDirs.head.getAbsolutePath
val errors = replicaManager.alterReplicaLogDirs(Map(tp -> dir))
assertEquals(Errors.REPLICA_NOT_AVAILABLE, errors(tp))
} finally {
replicaManager.shutdown(false)
}
}
@Test
def testPartitionMetadataFile(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
val topicIds = Collections.singletonMap(topic, Uuid.randomUuid())
val topicNames = topicIds.asScala.map(_.swap).asJava
def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest =
new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(epoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ())
assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition))
assertFalse(replicaManager.localLog(topicPartition).isEmpty)
val id = topicIds.get(topicPartition.topic())
val log = replicaManager.localLog(topicPartition).get
assertTrue(log.partitionMetadataFile.exists())
val partitionMetadata = log.partitionMetadataFile.read()
// Current version of PartitionMetadataFile is 0.
assertEquals(0, partitionMetadata.version)
assertEquals(id, partitionMetadata.topicId)
} finally replicaManager.shutdown(checkpointHW = false)
}
@Test
def testPartitionMetadataFileCreatedWithExistingLog(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
replicaManager.logManager.getOrCreateLog(topicPartition, isNew = true, topicId = None)
assertTrue(replicaManager.getLog(topicPartition).isDefined)
var log = replicaManager.getLog(topicPartition).get
assertEquals(None, log.topicId)
assertFalse(log.partitionMetadataFile.exists())
val topicIds = Collections.singletonMap(topic, Uuid.randomUuid())
val topicNames = topicIds.asScala.map(_.swap).asJava
def leaderAndIsrRequest(epoch: Int): LeaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(epoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0), (_, _) => ())
assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition))
assertFalse(replicaManager.localLog(topicPartition).isEmpty)
val id = topicIds.get(topicPartition.topic())
log = replicaManager.localLog(topicPartition).get
assertTrue(log.partitionMetadataFile.exists())
val partitionMetadata = log.partitionMetadataFile.read()
// Current version of PartitionMetadataFile is 0.
assertEquals(0, partitionMetadata.version)
assertEquals(id, partitionMetadata.topicId)
} finally replicaManager.shutdown(checkpointHW = false)
}
@Test
def testPartitionMetadataFileCreatedAfterPreviousRequestWithoutIds(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
val topicPartition2 = new TopicPartition(topic, 1)
def leaderAndIsrRequest(topicIds: util.Map[String, Uuid], version: Short, partition: Int = 0, leaderEpoch: Int = 0): LeaderAndIsrRequest =
new LeaderAndIsrRequest.Builder(version, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(partition)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(leaderEpoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
// Send a request without a topic ID so that we have a log without a topic ID associated to the partition.
val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(Collections.emptyMap(), 4), (_, _) => ())
assertEquals(Errors.NONE, response.partitionErrors(Collections.emptyMap()).get(topicPartition))
assertTrue(replicaManager.localLog(topicPartition).isDefined)
val log = replicaManager.localLog(topicPartition).get
assertFalse(log.partitionMetadataFile.exists())
assertTrue(log.topicId.isEmpty)
val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(topicIds.asJava, ApiKeys.LEADER_AND_ISR.latestVersion), (_, _) => ())
assertEquals(Errors.NONE, response2.partitionErrors(topicNames.asJava).get(topicPartition))
assertTrue(replicaManager.localLog(topicPartition).isDefined)
assertTrue(log.partitionMetadataFile.exists())
assertTrue(log.topicId.isDefined)
assertEquals(topicId, log.topicId.get)
// Repeat with partition 2, but in this case, update the leader epoch
// Send a request without a topic ID so that we have a log without a topic ID associated to the partition.
val response3 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(Collections.emptyMap(), 4, 1), (_, _) => ())
assertEquals(Errors.NONE, response3.partitionErrors(Collections.emptyMap()).get(topicPartition2))
assertTrue(replicaManager.localLog(topicPartition2).isDefined)
val log2 = replicaManager.localLog(topicPartition2).get
assertFalse(log2.partitionMetadataFile.exists())
assertTrue(log2.topicId.isEmpty)
val response4 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(topicIds.asJava, ApiKeys.LEADER_AND_ISR.latestVersion, 1, 1), (_, _) => ())
assertEquals(Errors.NONE, response4.partitionErrors(topicNames.asJava).get(topicPartition2))
assertTrue(replicaManager.localLog(topicPartition2).isDefined)
assertTrue(log2.partitionMetadataFile.exists())
assertTrue(log2.topicId.isDefined)
assertEquals(topicId, log2.topicId.get)
assertEquals(topicId, log.partitionMetadataFile.read().topicId)
assertEquals(topicId, log2.partitionMetadataFile.read().topicId)
} finally replicaManager.shutdown(checkpointHW = false)
}
@Test
def testInconsistentIdReturnsError(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
val topicIds = Collections.singletonMap(topic, Uuid.randomUuid())
val topicNames = topicIds.asScala.map(_.swap).asJava
val invalidTopicIds = Collections.singletonMap(topic, Uuid.randomUuid())
val invalidTopicNames = invalidTopicIds.asScala.map(_.swap).asJava
def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest =
new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(topic)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(epoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build()
val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ())
assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition))
val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, topicIds), (_, _) => ())
assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition))
// Send request with inconsistent ID.
val response3 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, invalidTopicIds), (_, _) => ())
assertEquals(Errors.INCONSISTENT_TOPIC_ID, response3.partitionErrors(invalidTopicNames).get(topicPartition))
val response4 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(2, invalidTopicIds), (_, _) => ())
assertEquals(Errors.INCONSISTENT_TOPIC_ID, response4.partitionErrors(invalidTopicNames).get(topicPartition))
} finally replicaManager.shutdown(checkpointHW = false)
}
@Test
def testPartitionMetadataFileNotCreated(): Unit = {
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time))
try {
val brokerList = Seq[Integer](0, 1).asJava
val topicPartition = new TopicPartition(topic, 0)
val topicPartitionFoo = new TopicPartition("foo", 0)
val topicPartitionFake = new TopicPartition("fakeTopic", 0)
val topicIds = Map(topic -> Uuid.ZERO_UUID, "foo" -> Uuid.randomUuid()).asJava
val topicNames = topicIds.asScala.map(_.swap).asJava
def leaderAndIsrRequest(epoch: Int, name: String, version: Short): LeaderAndIsrRequest = LeaderAndIsrRequest.parse(
new LeaderAndIsrRequest.Builder(version, 0, 0, brokerEpoch,
Seq(new LeaderAndIsrPartitionState()
.setTopicName(name)
.setPartitionIndex(0)
.setControllerEpoch(0)
.setLeader(0)
.setLeaderEpoch(epoch)
.setIsr(brokerList)
.setZkVersion(0)
.setReplicas(brokerList)
.setIsNew(true)).asJava,
topicIds,
Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build().serialize(), version)
// There is no file if the topic does not have an associated topic ID.
val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, "fakeTopic", ApiKeys.LEADER_AND_ISR.latestVersion), (_, _) => ())
assertTrue(replicaManager.localLog(topicPartitionFake).isDefined)
val log = replicaManager.localLog(topicPartitionFake).get
assertFalse(log.partitionMetadataFile.exists())
assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition))
// There is no file if the topic has the default UUID.
val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topic, ApiKeys.LEADER_AND_ISR.latestVersion), (_, _) => ())
assertTrue(replicaManager.localLog(topicPartition).isDefined)
val log2 = replicaManager.localLog(topicPartition).get
assertFalse(log2.partitionMetadataFile.exists())
assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition))
// There is no file if the request an older version
val response3 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, "foo", 0), (_, _) => ())
assertTrue(replicaManager.localLog(topicPartitionFoo).isDefined)
val log3 = replicaManager.localLog(topicPartitionFoo).get
assertFalse(log3.partitionMetadataFile.exists())
assertEquals(Errors.NONE, response3.partitionErrors(topicNames).get(topicPartitionFoo))
// There is no file if the request is an older version
val response4 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, "foo", 4), (_, _) => ())
assertTrue(replicaManager.localLog(topicPartitionFoo).isDefined)
val log4 = replicaManager.localLog(topicPartitionFoo).get
assertFalse(log4.partitionMetadataFile.exists())
assertEquals(Errors.NONE, response4.partitionErrors(topicNames).get(topicPartitionFoo))
} finally replicaManager.shutdown(checkpointHW = false)
}
private def leaderAndIsrRequest(
topicId: Uuid,
topicPartition: TopicPartition,
replicas: Seq[Int],
leaderAndIsr: LeaderAndIsr,
isNew: Boolean = true,
brokerEpoch: Int = 0,
controllerId: Int = 0,
controllerEpoch: Int = 0,
version: Short = LeaderAndIsrRequestData.HIGHEST_SUPPORTED_VERSION
): LeaderAndIsrRequest = {
val partitionState = new LeaderAndIsrPartitionState()
.setTopicName(topicPartition.topic)
.setPartitionIndex(topicPartition.partition)
.setControllerEpoch(controllerEpoch)
.setLeader(leaderAndIsr.leader)
.setLeaderEpoch(leaderAndIsr.leaderEpoch)
.setIsr(leaderAndIsr.isr.map(Int.box).asJava)
.setZkVersion(leaderAndIsr.zkVersion)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(isNew)
def mkNode(replicaId: Int): Node = {
new Node(replicaId, s"host-$replicaId", 9092)
}
val nodes = Set(mkNode(controllerId)) ++ replicas.map(mkNode).toSet
new LeaderAndIsrRequest.Builder(
version,
controllerId,
controllerEpoch,
brokerEpoch,
Seq(partitionState).asJava,
Map(topicPartition.topic -> topicId).asJava,
nodes.asJava
).build()
}
@Test
def testActiveProducerState(): Unit = {
val brokerId = 0
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), brokerId)
try {
val fooPartition = new TopicPartition("foo", 0)
Mockito.when(replicaManager.metadataCache.contains(fooPartition)).thenReturn(false)
val fooProducerState = replicaManager.activeProducerState(fooPartition)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.forCode(fooProducerState.errorCode))
val oofPartition = new TopicPartition("oof", 0)
Mockito.when(replicaManager.metadataCache.contains(oofPartition)).thenReturn(true)
val oofProducerState = replicaManager.activeProducerState(oofPartition)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, Errors.forCode(oofProducerState.errorCode))
// This API is supported by both leaders and followers
val barPartition = new TopicPartition("bar", 0)
val barLeaderAndIsrRequest = leaderAndIsrRequest(
topicId = Uuid.randomUuid(),
topicPartition = barPartition,
replicas = Seq(brokerId),
leaderAndIsr = LeaderAndIsr(brokerId, List(brokerId))
)
replicaManager.becomeLeaderOrFollower(0, barLeaderAndIsrRequest, (_, _) => ())
val barProducerState = replicaManager.activeProducerState(barPartition)
assertEquals(Errors.NONE, Errors.forCode(barProducerState.errorCode))
val otherBrokerId = 1
val bazPartition = new TopicPartition("baz", 0)
val bazLeaderAndIsrRequest = leaderAndIsrRequest(
topicId = Uuid.randomUuid(),
topicPartition = bazPartition,
replicas = Seq(brokerId, otherBrokerId),
leaderAndIsr = LeaderAndIsr(otherBrokerId, List(brokerId, otherBrokerId))
)
replicaManager.becomeLeaderOrFollower(0, bazLeaderAndIsrRequest, (_, _) => ())
val bazProducerState = replicaManager.activeProducerState(bazPartition)
assertEquals(Errors.NONE, Errors.forCode(bazProducerState.errorCode))
} finally {
replicaManager.shutdown(checkpointHW = false)
}
}
val FOO_UUID = Uuid.fromString("fFJBx0OmQG-UqeaT6YaSwA")
val BAR_UUID = Uuid.fromString("vApAP6y7Qx23VOfKBzbOBQ")
val BAZ_UUID = Uuid.fromString("7wVsX2aaTk-bdGcOxLRyVQ")
@Test
def testGetOrCreatePartition(): Unit = {
val brokerId = 0
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), brokerId)
val foo0 = new TopicPartition("foo", 0)
val emptyDelta = new TopicsDelta(TopicsImage.EMPTY)
val (fooPart, fooNew) = replicaManager.getOrCreatePartition(foo0, emptyDelta, FOO_UUID).get
assertTrue(fooNew)
assertEquals(foo0, fooPart.topicPartition)
val (fooPart2, fooNew2) = replicaManager.getOrCreatePartition(foo0, emptyDelta, FOO_UUID).get
assertFalse(fooNew2)
assertTrue(fooPart eq fooPart2)
val bar1 = new TopicPartition("bar", 1)
replicaManager.markPartitionOffline(bar1)
assertEquals(None, replicaManager.getOrCreatePartition(bar1, emptyDelta, BAR_UUID))
}
val TEST_IMAGE = {
val topicsById = new util.HashMap[Uuid, TopicImage]()
val topicsByName = new util.HashMap[String, TopicImage]()
val fooPartitions = new util.HashMap[Integer, PartitionRegistration]()
fooPartitions.put(0, new PartitionRegistration(Array(1, 2, 3),
Array(1, 2, 3), Replicas.NONE, Replicas.NONE, 1, 100, 200))
fooPartitions.put(1, new PartitionRegistration(Array(4, 5, 6),
Array(4, 5), Replicas.NONE, Replicas.NONE, 5, 300, 400))
val foo = new TopicImage("foo", FOO_UUID, fooPartitions)
val barPartitions = new util.HashMap[Integer, PartitionRegistration]()
barPartitions.put(0, new PartitionRegistration(Array(2, 3, 4),
Array(2, 3, 4), Replicas.NONE, Replicas.NONE, 3, 100, 200))
val bar = new TopicImage("bar", BAR_UUID, barPartitions)
topicsById.put(FOO_UUID, foo)
topicsByName.put("foo", foo)
topicsById.put(BAR_UUID, bar)
topicsByName.put("bar", bar)
new TopicsImage(topicsById, topicsByName)
}
val TEST_DELTA = {
val delta = new TopicsDelta(TEST_IMAGE)
delta.replay(new RemoveTopicRecord().setTopicId(FOO_UUID))
delta.replay(new TopicRecord().setName("baz").setTopicId(BAZ_UUID))
delta.replay(new PartitionRecord().setPartitionId(0).
setTopicId(BAZ_UUID).
setReplicas(util.Arrays.asList(1, 2, 4)).
setIsr(util.Arrays.asList(1, 2, 4)).
setRemovingReplicas(Collections.emptyList()).
setAddingReplicas(Collections.emptyList()).
setLeader(1).
setLeaderEpoch(123).
setPartitionEpoch(456))
delta.replay(new PartitionRecord().setPartitionId(1).
setTopicId(BAZ_UUID).
setReplicas(util.Arrays.asList(2, 4, 1)).
setIsr(util.Arrays.asList(2, 4, 1)).
setRemovingReplicas(Collections.emptyList()).
setAddingReplicas(Collections.emptyList()).
setLeader(2).
setLeaderEpoch(123).
setPartitionEpoch(456))
delta.replay(new PartitionRecord().setPartitionId(2).
setTopicId(BAZ_UUID).
setReplicas(util.Arrays.asList(3, 5, 2)).
setIsr(util.Arrays.asList(3, 5, 2)).
setRemovingReplicas(Collections.emptyList()).
setAddingReplicas(Collections.emptyList()).
setLeader(3).
setLeaderEpoch(456).
setPartitionEpoch(789))
delta
}
@Test
def testCalculateDeltaChanges(): Unit = {
val brokerId = 1
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), brokerId)
assertEquals((
Map(new TopicPartition("foo", 0) -> true,
new TopicPartition("foo", 1) -> true),
Map(new TopicPartition("baz", 0) -> LocalLeaderInfo(BAZ_UUID,
new PartitionRegistration(Array(1, 2, 4), Array(1, 2, 4),
Replicas.NONE, Replicas.NONE, 1, 123, 456))),
Map(new TopicPartition("baz", 1) -> LocalLeaderInfo(BAZ_UUID,
new PartitionRegistration(Array(2, 4, 1), Array(2, 4, 1),
Replicas.NONE, Replicas.NONE, 2, 123, 456)))),
replicaManager.calculateDeltaChanges(TEST_DELTA))
}
@Test
def testDeltaFromLeaderToFollower(): Unit = {
val localId = 1
val otherId = localId + 1
val numOfRecords = 3
val epoch = 100
val topicPartition = new TopicPartition("foo", 0)
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId)
try {
// Make the local replica the leader
val leaderMetadataImage = imageFromTopics(topicsImage(localId, true, epoch))
replicaManager.applyDelta(leaderMetadataImage, topicsDelta(localId, true, epoch))
// Check the state of that partition and fetcher
val HostedPartition.Online(leaderPartition) = replicaManager.getPartition(topicPartition)
assertTrue(leaderPartition.isLeader)
assertEquals(Set(localId, otherId), leaderPartition.inSyncReplicaIds)
assertEquals(epoch, leaderPartition.getLeaderEpoch)
assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition))
// Send a produce request and advance the highwatermark
val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords)
fetchMessages(
replicaManager,
otherId,
topicPartition,
new PartitionData(numOfRecords, 0, Int.MaxValue, Optional.empty()),
Int.MaxValue,
IsolationLevel.READ_UNCOMMITTED,
None
)
assertEquals(Errors.NONE, leaderResponse.get.error)
// Change the local replica to follower
val followerMetadataImage = imageFromTopics(topicsImage(localId, false, epoch + 1))
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch + 1))
// Append on a follower should fail
val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error)
// Check the state of that partition and fetcher
val HostedPartition.Online(followerPartition) = replicaManager.getPartition(topicPartition)
assertFalse(followerPartition.isLeader)
assertEquals(epoch + 1, followerPartition.getLeaderEpoch)
val fetcher = replicaManager.replicaFetcherManager.getFetcher(topicPartition)
assertEquals(Some(BrokerEndPoint(otherId, "localhost", 9093)), fetcher.map(_.sourceBroker))
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testDeltaFromFollowerToLeader(): Unit = {
val localId = 1
val otherId = localId + 1
val numOfRecords = 3
val epoch = 100
val topicPartition = new TopicPartition("foo", 0)
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId)
try {
// Make the local replica the follower
val followerMetadataImage = imageFromTopics(topicsImage(localId, false, epoch))
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch))
// Check the state of that partition and fetcher
val HostedPartition.Online(followerPartition) = replicaManager.getPartition(topicPartition)
assertFalse(followerPartition.isLeader)
assertEquals(epoch, followerPartition.getLeaderEpoch)
val fetcher = replicaManager.replicaFetcherManager.getFetcher(topicPartition)
assertEquals(Some(BrokerEndPoint(otherId, "localhost", 9093)), fetcher.map(_.sourceBroker))
// Append on a follower should fail
val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error)
// Change the local replica to leader
val leaderMetadataImage = imageFromTopics(topicsImage(localId, true, epoch + 1))
replicaManager.applyDelta(leaderMetadataImage, topicsDelta(localId, true, epoch + 1))
// Send a produce request and advance the highwatermark
val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords)
fetchMessages(
replicaManager,
otherId,
topicPartition,
new PartitionData(numOfRecords, 0, Int.MaxValue, Optional.empty()),
Int.MaxValue,
IsolationLevel.READ_UNCOMMITTED,
None
)
assertEquals(Errors.NONE, leaderResponse.get.error)
val HostedPartition.Online(leaderPartition) = replicaManager.getPartition(topicPartition)
assertTrue(leaderPartition.isLeader)
assertEquals(Set(localId, otherId), leaderPartition.inSyncReplicaIds)
assertEquals(epoch + 1, leaderPartition.getLeaderEpoch)
assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition))
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testDeltaFollowerWithNoChange(): Unit = {
val localId = 1
val otherId = localId + 1
val epoch = 100
val topicPartition = new TopicPartition("foo", 0)
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId)
try {
// Make the local replica the follower
val followerMetadataImage = imageFromTopics(topicsImage(localId, false, epoch))
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch))
// Check the state of that partition and fetcher
val HostedPartition.Online(followerPartition) = replicaManager.getPartition(topicPartition)
assertFalse(followerPartition.isLeader)
assertEquals(epoch, followerPartition.getLeaderEpoch)
val fetcher = replicaManager.replicaFetcherManager.getFetcher(topicPartition)
assertEquals(Some(BrokerEndPoint(otherId, "localhost", 9093)), fetcher.map(_.sourceBroker))
// Apply the same delta again
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch))
// Check that the state stays the same
val HostedPartition.Online(noChangePartition) = replicaManager.getPartition(topicPartition)
assertFalse(noChangePartition.isLeader)
assertEquals(epoch, noChangePartition.getLeaderEpoch)
val noChangeFetcher = replicaManager.replicaFetcherManager.getFetcher(topicPartition)
assertEquals(Some(BrokerEndPoint(otherId, "localhost", 9093)), noChangeFetcher.map(_.sourceBroker))
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testDeltaToFollowerCompletesProduce(): Unit = {
val localId = 1
val otherId = localId + 1
val numOfRecords = 3
val epoch = 100
val topicPartition = new TopicPartition("foo", 0)
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId)
try {
// Make the local replica the leader
val leaderMetadataImage = imageFromTopics(topicsImage(localId, true, epoch))
replicaManager.applyDelta(leaderMetadataImage, topicsDelta(localId, true, epoch))
// Check the state of that partition and fetcher
val HostedPartition.Online(leaderPartition) = replicaManager.getPartition(topicPartition)
assertTrue(leaderPartition.isLeader)
assertEquals(Set(localId, otherId), leaderPartition.inSyncReplicaIds)
assertEquals(epoch, leaderPartition.getLeaderEpoch)
assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition))
// Send a produce request
val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords)
// Change the local replica to follower
val followerMetadataImage = imageFromTopics(topicsImage(localId, false, epoch + 1))
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch + 1))
// Check that the produce failed because it changed to follower before replicating
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, leaderResponse.get.error)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
@Test
def testDeltaToFollowerCompletesFetch(): Unit = {
val localId = 1
val otherId = localId + 1
val epoch = 100
val topicPartition = new TopicPartition("foo", 0)
val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId)
try {
// Make the local replica the leader
val leaderMetadataImage = imageFromTopics(topicsImage(localId, true, epoch))
replicaManager.applyDelta(leaderMetadataImage, topicsDelta(localId, true, epoch))
// Check the state of that partition and fetcher
val HostedPartition.Online(leaderPartition) = replicaManager.getPartition(topicPartition)
assertTrue(leaderPartition.isLeader)
assertEquals(Set(localId, otherId), leaderPartition.inSyncReplicaIds)
assertEquals(epoch, leaderPartition.getLeaderEpoch)
assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition))
// Send a fetch request
val fetchCallback = fetchMessages(
replicaManager,
otherId,
topicPartition,
new PartitionData(0, 0, Int.MaxValue, Optional.empty()),
Int.MaxValue,
IsolationLevel.READ_UNCOMMITTED,
None
)
// Change the local replica to follower
val followerMetadataImage = imageFromTopics(topicsImage(localId, false, epoch + 1))
replicaManager.applyDelta(followerMetadataImage, topicsDelta(localId, false, epoch + 1))
// Check that the produce failed because it changed to follower before replicating
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchCallback.assertFired.error)
} finally {
replicaManager.shutdown()
}
TestUtils.assertNoNonDaemonThreads(this.getClass.getName)
}
private def topicsImage(replica: Int, isLeader: Boolean, epoch: Int): TopicsImage = {
val leader = if (isLeader) replica else replica + 1
val topicsById = new util.HashMap[Uuid, TopicImage]()
val topicsByName = new util.HashMap[String, TopicImage]()
val fooPartitions = new util.HashMap[Integer, PartitionRegistration]()
fooPartitions.put(0, new PartitionRegistration(Array(replica, replica + 1),
Array(replica, replica + 1), Replicas.NONE, Replicas.NONE, leader, epoch, epoch))
val foo = new TopicImage("foo", FOO_UUID, fooPartitions)
topicsById.put(FOO_UUID, foo)
topicsByName.put("foo", foo)
new TopicsImage(topicsById, topicsByName)
}
private def topicsDelta(replica: Int, isLeader: Boolean, epoch: Int): TopicsDelta = {
val leader = if (isLeader) replica else replica + 1
val delta = new TopicsDelta(TopicsImage.EMPTY)
delta.replay(new TopicRecord().setName("foo").setTopicId(FOO_UUID))
delta.replay(new PartitionRecord().setPartitionId(0).
setTopicId(FOO_UUID).
setReplicas(util.Arrays.asList(replica, replica + 1)).
setIsr(util.Arrays.asList(replica, replica + 1)).
setRemovingReplicas(Collections.emptyList()).
setAddingReplicas(Collections.emptyList()).
setLeader(leader).
setLeaderEpoch(epoch).
setPartitionEpoch(epoch))
delta
}
private def imageFromTopics(topicsImage: TopicsImage): MetadataImage = {
new MetadataImage(
FeaturesImage.EMPTY,
ClusterImageTest.IMAGE1,
topicsImage,
ConfigurationsImage.EMPTY,
ClientQuotasImage.EMPTY
)
}
}
| lindong28/kafka | core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala | Scala | apache-2.0 | 139,517 |
package varys.examples
import varys.util.AkkaUtils
import varys.{Logging, Utils}
import varys.framework.client._
import varys.framework._
private[varys] object ReceiverClientFake extends Logging {
class TestListener extends ClientListener with Logging {
def connected(id: String) {
logInfo("Connected to master, got client ID " + id)
}
def disconnected() {
logInfo("Disconnected from master")
System.exit(0)
}
}
def main(args: Array[String]) {
if (args.length < 2) {
println("USAGE: ReceiverClientFake <masterUrl> <coflowId> [dataName]")
System.exit(1)
}
val url = args(0)
val coflowId = args(1)
val DATA_NAME = if (args.length > 2) args(2) else "DATA"
val listener = new TestListener
val client = new VarysClient("ReceiverClientFake", url, listener)
client.start()
//DNBD start for test
//client.startDNBD(5678, "p3p1")
//Thread.sleep(20000)
println("Trying to retrieve " + DATA_NAME)
val st = System.currentTimeMillis()
client.getFake(DATA_NAME, coflowId)
val interval = System.currentTimeMillis() - st
println("Got " + DATA_NAME + ". Now waiting to die. It takes " + interval + " ms")
//logInfo("Got " + DATA_NAME + ". Now waiting to die. It takes " + interval + " ms")
client.awaitTermination()
}
}
| frankfzw/varys | examples/src/main/scala/varys/examples/ReceiverClientFake.scala | Scala | apache-2.0 | 1,355 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package sitemap
import scala.xml.NodeSeq
/**
* The beginning of an experiment to provide a capability to define
* the sitemap menu in xml. Currently pretty limited.
* menu elements have a name attribute, and contain text and link
* elements, and optionally multiple menu elemnts.
* The contents of the text element is the menu display x(ht)ml,
* and the contents of the link element is an array of
* path components in JSON array syntax.
*
* @author nafg
*/
/*
object XmlMenu {
def apply(xml: NodeSeq): Seq[Menu] = for(node<-xml) yield node match {
case m @ <menu>{ children @ _* }</menu> =>
val name = m \ "@name" text
val text = NodeSeq.fromSeq((m \ "text" iterator) flatMap {_.child.iterator} toSeq)
val link = util.JSONParser.parse(m \ "link" text).get.asInstanceOf[List[Any]].map(_.asInstanceOf[String])
Menu(Loc(name, link, text), apply(m \ "menu") : _*)
}
}*/
| lzpfmh/framework-2 | web/webkit/src/main/scala/net/liftweb/sitemap/XmlMenu.scala | Scala | apache-2.0 | 1,551 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.epoch
import java.io.{File, RandomAccessFile}
import java.util.Properties
import kafka.api.KAFKA_0_11_0_IV1
import kafka.log.Log
import kafka.server.KafkaConfig._
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.tools.DumpLogSegments
import kafka.utils.{CoreUtils, Logging, TestUtils}
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record.RecordBatch
import org.apache.kafka.common.serialization.Deserializer
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.{After, Before, Test}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ListBuffer => Buffer}
import scala.collection.Seq
/**
* These tests were written to assert the addition of leader epochs to the replication protocol fix the problems
* described in KIP-101. There is a boolean KIP_101_ENABLED which can be toggled to demonstrate the tests failing in the pre-KIP-101 case
*
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-101+-+Alter+Replication+Protocol+to+use+Leader+Epoch+rather+than+High+Watermark+for+Truncation
*
* A test which validates the end to end workflow is also included.
*/
class EpochDrivenReplicationProtocolAcceptanceTest extends ZooKeeperTestHarness with Logging {
val topic = "topic1"
val msg = new Array[Byte](1000)
val msgBigger = new Array[Byte](10000)
var brokers: Seq[KafkaServer] = null
var producer: KafkaProducer[Array[Byte], Array[Byte]] = null
var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = null
val KIP_101_ENABLED = true
@Before
override def setUp() {
super.setUp()
}
@After
override def tearDown() {
producer.close()
TestUtils.shutdownServers(brokers)
super.tearDown()
}
@Test
def shouldFollowLeaderEpochBasicWorkflow(): Unit = {
//Given 2 brokers
brokers = (100 to 101).map(createBroker(_))
//A single partition topic with 2 replicas
TestUtils.createTopic(zkClient, topic, Map(0 -> Seq(100, 101)), brokers)
producer = createProducer
val tp = new TopicPartition(topic, 0)
//When one record is written to the leader
producer.send(new ProducerRecord(topic, 0, null, msg)).get
//The message should have epoch 0 stamped onto it in both leader and follower
assertEquals(0, latestRecord(leader).partitionLeaderEpoch())
assertEquals(0, latestRecord(follower).partitionLeaderEpoch())
//Both leader and follower should have recorded Epoch 0 at Offset 0
assertEquals(Buffer(EpochEntry(0, 0)), epochCache(leader).epochEntries())
assertEquals(Buffer(EpochEntry(0, 0)), epochCache(follower).epochEntries())
//Bounce the follower
bounce(follower)
awaitISR(tp)
//Nothing happens yet as we haven't sent any new messages.
assertEquals(Buffer(EpochEntry(0, 0)), epochCache(leader).epochEntries())
assertEquals(Buffer(EpochEntry(0, 0)), epochCache(follower).epochEntries())
//Send a message
producer.send(new ProducerRecord(topic, 0, null, msg)).get
//Epoch1 should now propagate to the follower with the written message
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1)), epochCache(leader).epochEntries())
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1)), epochCache(follower).epochEntries())
//The new message should have epoch 1 stamped
assertEquals(1, latestRecord(leader).partitionLeaderEpoch())
assertEquals(1, latestRecord(follower).partitionLeaderEpoch())
//Bounce the leader. Epoch -> 2
bounce(leader)
awaitISR(tp)
//Epochs 2 should be added to the leader, but not on the follower (yet), as there has been no replication.
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1)), epochCache(leader).epochEntries())
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1)), epochCache(follower).epochEntries())
//Send a message
producer.send(new ProducerRecord(topic, 0, null, msg)).get
//This should case epoch 2 to propagate to the follower
assertEquals(2, latestRecord(leader).partitionLeaderEpoch())
assertEquals(2, latestRecord(follower).partitionLeaderEpoch())
//The leader epoch files should now match on leader and follower
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1), EpochEntry(2, 2)), epochCache(leader).epochEntries())
assertEquals(Buffer(EpochEntry(0, 0), EpochEntry(1, 1), EpochEntry(2, 2)), epochCache(follower).epochEntries())
}
@Test
def shouldNotAllowDivergentLogs(): Unit = {
//Given two brokers
brokers = (100 to 101).map { id => createServer(fromProps(createBrokerConfig(id, zkConnect))) }
//A single partition topic with 2 replicas
TestUtils.createTopic(zkClient, topic, Map(0 -> Seq(100, 101)), brokers)
producer = createProducer
//Write 10 messages
(0 until 10).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msg))
producer.flush()
}
//Stop the brokers
brokers.foreach { b => b.shutdown() }
//Delete the clean shutdown file to simulate crash
new File(brokers(0).config.logDirs(0), Log.CleanShutdownFile).delete()
//Delete 5 messages from the leader's log on 100
deleteMessagesFromLogFile(5 * msg.length, brokers(0), 0)
//Restart broker 100
brokers(0).startup()
//Bounce the producer (this is required, although I'm unsure as to why?)
producer.close()
producer = createProducer
//Write ten larger messages (so we can easily distinguish between messages written in the two phases)
(0 until 10).foreach { _ =>
producer.send(new ProducerRecord(topic, 0, null, msgBigger))
producer.flush()
}
//Start broker 101
brokers(1).startup()
//Wait for replication to resync
waitForLogsToMatch(brokers(0), brokers(1))
assertEquals("Log files should match Broker0 vs Broker 1", getLogFile(brokers(0), 0).length, getLogFile(brokers(1), 0).length)
}
//We can reproduce the pre-KIP-101 failure of this test by setting KafkaConfig.InterBrokerProtocolVersionProp = KAFKA_0_11_0_IV1
@Test
def offsetsShouldNotGoBackwards(): Unit = {
//Given two brokers
brokers = (100 to 101).map(createBroker(_))
//A single partition topic with 2 replicas
TestUtils.createTopic(zkClient, topic, Map(0 -> Seq(100, 101)), brokers)
producer = createBufferingProducer
//Write 100 messages
(0 until 100).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msg))
producer.flush()
}
//Stop the brokers
brokers.foreach { b => b.shutdown() }
//Delete the clean shutdown file to simulate crash
new File(brokers(0).config.logDirs(0), Log.CleanShutdownFile).delete()
//Delete half the messages from the log file
deleteMessagesFromLogFile(getLogFile(brokers(0), 0).length() / 2, brokers(0), 0)
//Start broker 100 again
brokers(0).startup()
//Bounce the producer (this is required, although I'm unsure as to why?)
producer.close()
producer = createBufferingProducer
//Write two large batches of messages. This will ensure that the LeO of the follower's log aligns with the middle
//of the a compressed message set in the leader (which, when forwarded, will result in offsets going backwards)
(0 until 77).foreach { _ =>
producer.send(new ProducerRecord(topic, 0, null, msg))
}
producer.flush()
(0 until 77).foreach { _ =>
producer.send(new ProducerRecord(topic, 0, null, msg))
}
producer.flush()
printSegments()
//Start broker 101. When it comes up it should read a whole batch of messages from the leader.
//As the chronology is lost we would end up with non-monatonic offsets (pre kip-101)
brokers(1).startup()
//Wait for replication to resync
waitForLogsToMatch(brokers(0), brokers(1))
printSegments()
//Shut down broker 100, so we read from broker 101 which should have corrupted
brokers(0).shutdown()
//Search to see if we have non-monotonic offsets in the log
startConsumer()
val records = consumer.poll(1000).asScala
var prevOffset = -1L
records.foreach { r =>
assertTrue(s"Offset $prevOffset came before ${r.offset} ", r.offset > prevOffset)
prevOffset = r.offset
}
//Are the files identical?
assertEquals("Log files should match Broker0 vs Broker 1", getLogFile(brokers(0), 0).length, getLogFile(brokers(1), 0).length)
}
/**
* Unlike the tests above, this test doesn't fail prior to the Leader Epoch Change. I was unable to find a deterministic
* method for recreating the fast leader change bug.
*/
@Test
def shouldSurviveFastLeaderChange(): Unit = {
val tp = new TopicPartition(topic, 0)
//Given 2 brokers
brokers = (100 to 101).map(createBroker(_))
//A single partition topic with 2 replicas
TestUtils.createTopic(zkClient, topic, Map(0 -> Seq(100, 101)), brokers)
producer = createProducer
//Kick off with a single record
producer.send(new ProducerRecord(topic, 0, null, msg)).get
var messagesWritten = 1
//Now invoke the fast leader change bug
(0 until 5).foreach { i =>
val leaderId = zkClient.getLeaderForPartition(new TopicPartition(topic, 0)).get
val leader = brokers.filter(_.config.brokerId == leaderId)(0)
val follower = brokers.filter(_.config.brokerId != leaderId)(0)
producer.send(new ProducerRecord(topic, 0, null, msg)).get
messagesWritten += 1
//As soon as it replicates, bounce the follower
bounce(follower)
log(leader, follower)
awaitISR(tp)
//Then bounce the leader
bounce(leader)
log(leader, follower)
awaitISR(tp)
//Ensure no data was lost
assertTrue(brokers.forall { broker => getLog(broker, 0).logEndOffset == messagesWritten })
}
}
@Test
def logsShouldNotDivergeOnUncleanLeaderElections(): Unit = {
// Given two brokers, unclean leader election is enabled
brokers = (100 to 101).map(createBroker(_, enableUncleanLeaderElection = true))
// A single partition topic with 2 replicas, min.isr = 1
TestUtils.createTopic(zkClient, topic, Map(0 -> Seq(100, 101)), brokers,
CoreUtils.propsWith((KafkaConfig.MinInSyncReplicasProp, "1")))
producer = TestUtils.createProducer(getBrokerListStrFromServers(brokers), acks = 1)
// Write one message while both brokers are up
(0 until 1).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msg))
producer.flush()}
// Since we use producer with acks = 1, make sure that logs match for the first epoch
waitForLogsToMatch(brokers(0), brokers(1))
// shutdown broker 100
brokers(0).shutdown()
//Write 1 message
(0 until 1).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msg))
producer.flush()}
brokers(1).shutdown()
brokers(0).startup()
//Bounce the producer (this is required, probably because the broker port changes on restart?)
producer.close()
producer = TestUtils.createProducer(getBrokerListStrFromServers(brokers), acks = 1)
//Write 3 messages
(0 until 3).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msgBigger))
producer.flush()}
brokers(0).shutdown()
brokers(1).startup()
//Bounce the producer (this is required, probably because the broker port changes on restart?)
producer.close()
producer = TestUtils.createProducer(getBrokerListStrFromServers(brokers), acks = 1)
//Write 1 message
(0 until 1).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msg))
producer.flush()}
brokers(1).shutdown()
brokers(0).startup()
//Bounce the producer (this is required, probably because the broker port changes on restart?)
producer.close()
producer = TestUtils.createProducer(getBrokerListStrFromServers(brokers), acks = 1)
//Write 2 messages
(0 until 2).foreach { i =>
producer.send(new ProducerRecord(topic, 0, null, msgBigger))
producer.flush()}
printSegments()
brokers(1).startup()
waitForLogsToMatch(brokers(0), brokers(1))
printSegments()
def crcSeq(broker: KafkaServer, partition: Int = 0): Seq[Long] = {
val batches = getLog(broker, partition).activeSegment.read(0, None, Integer.MAX_VALUE)
.records.batches().asScala.toSeq
batches.map(_.checksum)
}
assertTrue(s"Logs on Broker 100 and Broker 101 should match",
crcSeq(brokers(0)) == crcSeq(brokers(1)))
}
private def log(leader: KafkaServer, follower: KafkaServer): Unit = {
info(s"Bounce complete for follower ${follower.config.brokerId}")
info(s"Leader: leo${leader.config.brokerId}: " + getLog(leader, 0).logEndOffset + " cache: " + epochCache(leader).epochEntries())
info(s"Follower: leo${follower.config.brokerId}: " + getLog(follower, 0).logEndOffset + " cache: " + epochCache(follower).epochEntries())
}
private def waitForLogsToMatch(b1: KafkaServer, b2: KafkaServer, partition: Int = 0): Unit = {
TestUtils.waitUntilTrue(() => {getLog(b1, partition).logEndOffset == getLog(b2, partition).logEndOffset}, "Logs didn't match.")
}
private def printSegments(): Unit = {
info("Broker0:")
DumpLogSegments.main(Seq("--files", getLogFile(brokers(0), 0).getCanonicalPath).toArray)
info("Broker1:")
DumpLogSegments.main(Seq("--files", getLogFile(brokers(1), 0).getCanonicalPath).toArray)
}
private def startConsumer(): KafkaConsumer[Array[Byte], Array[Byte]] = {
val consumerConfig = new Properties()
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBrokerListStrFromServers(brokers))
consumerConfig.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, String.valueOf(getLogFile(brokers(1), 0).length() * 2))
consumerConfig.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(getLogFile(brokers(1), 0).length() * 2))
consumer = new KafkaConsumer(consumerConfig, new StubDeserializer, new StubDeserializer)
consumer.assign(List(new TopicPartition(topic, 0)).asJava)
consumer.seek(new TopicPartition(topic, 0), 0)
consumer
}
private def deleteMessagesFromLogFile(bytes: Long, broker: KafkaServer, partitionId: Int): Unit = {
val logFile = getLogFile(broker, partitionId)
val writable = new RandomAccessFile(logFile, "rwd")
writable.setLength(logFile.length() - bytes)
writable.close()
}
private def createBufferingProducer: KafkaProducer[Array[Byte], Array[Byte]] = {
TestUtils.createProducer(getBrokerListStrFromServers(brokers),
acks = -1,
lingerMs = 10000,
batchSize = msg.length * 1000,
compressionType = "snappy")
}
private def getLogFile(broker: KafkaServer, partition: Int): File = {
val log: Log = getLog(broker, partition)
log.flush()
log.dir.listFiles.filter(_.getName.endsWith(".log"))(0)
}
private def getLog(broker: KafkaServer, partition: Int): Log = {
broker.logManager.getLog(new TopicPartition(topic, partition)).orNull
}
private def bounce(follower: KafkaServer): Unit = {
follower.shutdown()
follower.startup()
producer.close()
producer = createProducer //TODO not sure why we need to recreate the producer, but it doesn't reconnect if we don't
}
private def epochCache(broker: KafkaServer): LeaderEpochFileCache = {
getLog(broker, 0).leaderEpochCache.asInstanceOf[LeaderEpochFileCache]
}
private def latestRecord(leader: KafkaServer, offset: Int = -1, partition: Int = 0): RecordBatch = {
getLog(leader, partition).activeSegment.read(0, None, Integer.MAX_VALUE)
.records.batches().asScala.toSeq.last
}
private def awaitISR(tp: TopicPartition): Unit = {
TestUtils.waitUntilTrue(() => {
leader.replicaManager.getPartition(tp).get.inSyncReplicas.map(_.brokerId).size == 2
}, "Timed out waiting for replicas to join ISR")
}
private def createProducer: KafkaProducer[Array[Byte], Array[Byte]] = {
TestUtils.createProducer(getBrokerListStrFromServers(brokers), acks = -1)
}
private def leader(): KafkaServer = {
assertEquals(2, brokers.size)
val leaderId = zkClient.getLeaderForPartition(new TopicPartition(topic, 0)).get
brokers.filter(_.config.brokerId == leaderId)(0)
}
private def follower(): KafkaServer = {
assertEquals(2, brokers.size)
val leader = zkClient.getLeaderForPartition(new TopicPartition(topic, 0)).get
brokers.filter(_.config.brokerId != leader)(0)
}
private def createBroker(id: Int, enableUncleanLeaderElection: Boolean = false): KafkaServer = {
val config = createBrokerConfig(id, zkConnect)
if(!KIP_101_ENABLED) {
config.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_0_11_0_IV1.version)
config.setProperty(KafkaConfig.LogMessageFormatVersionProp, KAFKA_0_11_0_IV1.version)
}
config.setProperty(KafkaConfig.UncleanLeaderElectionEnableProp, enableUncleanLeaderElection.toString)
createServer(fromProps(config))
}
private class StubDeserializer extends Deserializer[Array[Byte]] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {}
override def deserialize(topic: String, data: Array[Byte]): Array[Byte] = { data }
override def close(): Unit = {}
}
}
| ollie314/kafka | core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceTest.scala | Scala | apache-2.0 | 18,320 |
package com.seanshubin.detangler.scanner
import java.nio.file.Path
trait DirectoryScanner {
def findFiles(): Iterable[Path]
}
| SeanShubin/detangler | scanner/src/main/scala/com/seanshubin/detangler/scanner/DirectoryScanner.scala | Scala | unlicense | 130 |
package scala.tasty.internal
package convert
trait SymbolConverter {
self: API =>
import self.GlobalToTName._
import self.{ Symbols => t }
import dotc.util.{ Positions => tp }
import scala.collection.JavaConversions._
val symCache = new java.util.IdentityHashMap[g.Symbol, t.Symbol]();
val oneToManyCache = new java.util.IdentityHashMap[g.Symbol, scala.collection.mutable.Set[t.Symbol]]
def convertSymbols(symbols: List[g.Symbol]): List[t.Symbol] = symbols map convertSymbol
//If passed symbol is a type param of class or trait
def isClassTypeParam(sym: g.Symbol): Boolean = sym.isTypeParameter && sym.owner.isClass
def isSymRefInsideConstructor(sym: g.Symbol): Boolean =
scopeStack.exists { scope =>
scope.isConstructor && scope.owner == sym.owner
}
//Expanded sym (type param) is for class/trait type params that are used outside of constructor
def isExpandedSym(sym: g.Symbol) = isClassTypeParam(sym) && !isSymRefInsideConstructor(sym)
def convertSymbol(sym: g.Symbol): t.Symbol = {
//TODO - add here other cases when one Scala symbol represents several Dotty symbols
def isOneToManySym(sym: g.Symbol): Boolean = isClassTypeParam(sym)
def processOneToManySyms(gSym: g.Symbol, tSyms: scala.collection.mutable.Set[t.Symbol]): t.Symbol = {
tSyms match {
case _ if isClassTypeParam(gSym) =>
val constructor = scopeStack.find { scope =>
scope.isConstructor && scope.owner == gSym.owner
}
constructor match {
case None =>
val res = tSyms.find { _.owner.isClass } getOrElse { convertScalaClassTypeParameter(gSym, gSym.owner) }
tSyms += res
res
case Some(constr) =>
val convertedConstr = convertSymbol(constr)
val res = tSyms.find { _.owner == convertedConstr } getOrElse { convertScalaClassTypeParameter(gSym, constr) }
tSyms += res
res
}
//TODO - add here other cases for one to many processing
case _ => throw new Exception(s"Unintended invocation of oneToManyCache during convertion of $gSym")
}
}
sym match {
case _ if sym == null => null
case _ if isOneToManySym(sym) =>
val foundSymbol = mapAsScalaMap(oneToManyCache).get(sym) match {
case None =>
val newSet = collection.mutable.Set[t.Symbol]()
oneToManyCache += (sym -> newSet)
processOneToManySyms(sym, newSet)
case Some(syms) =>
processOneToManySyms(sym, syms)
}
foundSymbol
case _ =>
//if sym is null - return null
//if sym is in the symCache map - just return the value from the map
//is sym is not in the map - write IncompleteSymbol to the map, convert symbol, update the value in the map
//if resSymbol is incomplete symbol throw new Exception
symCache.getOrElse(sym,
sym match {
case _ =>
symCache += (sym -> t.IncompleteSymbol)
val convertedSym = convertSymImpl(sym)
symCache += (sym -> convertedSym)
convertedSym
}) match {
case t.IncompleteSymbol => throw new Exception(s"IncompleteSymbol is found while converting $sym")
case res => res
}
}
}
def convertScalaClassTypeParameter(sym: g.Symbol, owner: g.Symbol) = {
val flags = convertModifiers(sym)
convertTypeParameter(sym, owner, flags)
}
def convertTypeParameter(sym: g.Symbol, owner: g.Symbol, flags: dotc.core.Flags.FlagSet): t.Symbol = {
val tOwner = convertSymbol(owner)
//TODO fix privateWithin
val bufName = convertToTypeName(sym.name)
val newName = if (owner.isMethod /*|| !isExpandedSym(sym)*/ ) bufName else expandedName(tOwner, bufName).toTypeName
newTypeParamSymbol(tOwner, newName, flags, sym)
}
def convertSymImpl(sym: g.Symbol, directParam: Boolean = false): t.Symbol = {
//TODO - fix flags
val flags = convertModifiers(sym)
val pos: tp.Position = sym.pos
val coord: tp.Coord = pos
val resSym = sym match {
case g.NoSymbol =>
t.NoSymbol
case _ if sym.isRoot => newClassSymbol(t.NoSymbol, dotc.core.StdNames.tpnme.ROOT, flags, sym)
case _ if sym.hasPackageFlag && sym.isPackageClass =>
val tOwner = convertSymbol(sym.owner)
val tName = convertToTypeName(sym.name)
newClassSymbol(tOwner, tName, dotc.core.Flags.PackageCreationFlags | flags, sym)
case _ if sym.hasPackageFlag =>
val tOwner = convertSymbol(sym.owner)
val tName = convertToTermName(sym.name)
newPackageSymbol(tOwner, tName, flags, sym)
// if sym.isModuleClass its name should be changed: '$'originalName
case _ if sym.isModuleClass =>
val tOwner = convertSymbol(sym.owner)
//TODO fix privateWithin
import dotc.core.Flags
newClassSymbol(tOwner, convertToTypeName(syntheticName(sym.name)), flags | Flags.Module, sym, privateWithin = t.NoSymbol, coord, sym.associatedFile)
//This case is for def type parameters (except for constructors). Class, trait, constructor type params should be processed with processOneToManySyms
case _ if sym.isTypeParameter =>
convertTypeParameter(sym, sym.owner, flags)
case _ if sym.isClass =>
val tOwner = convertSymbol(sym.owner)
//TODO fix privateWithin
newClassSymbol(tOwner, convertToTypeName(sym.name), flags, sym, privateWithin = t.NoSymbol, coord, sym.associatedFile)
case _ if sym.isConstructor =>
val tOwner = convertSymbol(sym.owner)
//TODO fix privateWithin
newConstructor(tOwner, flags, sym, privateWithin = t.NoSymbol, coord)
// In Dotty:
// class X(x: Int) extends Y(x) - reference to x from Y(x) is param accessor symbol
case _ if !directParam && isPrimaryConstrParameter(sym) =>
val paramAccessorSymbol = sym.enclClass.info.decl(sym.name)
assert(paramAccessorSymbol.isParamAccessor, s"Incorrect conversion for parameter of primary constructor: ${g.showRaw(sym, printKinds = true)}")
convertSymbol(paramAccessorSymbol)
case _ =>
generalSymbolConversion(sym, flags, coord)
}
resSym
}
def isPrimaryConstrParameter(sym: g.Symbol) = sym.isParameter && sym.owner.isPrimaryConstructor
def generalSymbolConversion(sym: g.Symbol, flags: dotc.core.Flags.FlagSet, coord: tp.Coord) = {
val tOwner = convertSymbol(sym.owner)
//TODO fix privateWithin
newSymbol(tOwner, convertToName(sym.name), flags, sym, privateWithin = t.NoSymbol, coord)
}
} | VladimirNik/tasty | plugin/src/main/scala/scala/tasty/internal/convert/SymbolConverter.scala | Scala | bsd-3-clause | 6,677 |
package com.eclipsesource.schema
import com.eclipsesource.schema.drafts.Version4
import com.eclipsesource.schema.test.JsonSpec
import play.api.test.PlaySpecification
class AjvSpecs extends PlaySpecification with JsonSpec {
import Version4._
implicit val validator = SchemaValidator(Some(Version4))
def validateAjv(testName: String) = validate(testName, "ajv_tests")
validateAjv("1_ids_in_refs")
validateAjv("2_root_ref_in_ref")
validateAjv("17_escaping_pattern_property")
validateAjv("19_required_many_properties")
validateAjv("20_failing_to_parse_schema")
validateAjv("27_recursive_reference")
validateAjv("27_1_recursive_raml_schema")
validateAjv("28_escaping_pattern_error")
validateAjv("33_json_schema_latest")
validateAjv("63_id_property_not_in_schema")
validateAjv("70_1_recursive_hash_ref_in_remote_ref")
validateAjv("70_swagger_schema")
validateAjv("87_$_property")
validateAjv("94_dependencies_fail")
validateAjv("170_ref_and_id_in_sibling")
validateAjv("226_json_with_control_chars")
}
| eclipsesource/play-json-schema-validator | src/test/scala/com/eclipsesource/schema/AjvSpecs.scala | Scala | apache-2.0 | 1,039 |
package org.http4s
package headers
import org.http4s.parser.HttpHeaderParser
import org.http4s.util.{Renderable, Writer}
object `User-Agent` extends HeaderKey.Internal[`User-Agent`] with HeaderKey.Singleton {
override def parse(s: String): ParseResult[`User-Agent`] =
HttpHeaderParser.USER_AGENT(s)
}
sealed trait AgentToken extends Renderable
final case class AgentProduct(name: String, version: Option[String] = None) extends AgentToken {
override def render(writer: Writer): writer.type = {
writer << name
version.foreach { v => writer << '/' << v }
writer
}
}
final case class AgentComment(comment: String) extends AgentToken {
override def renderString: String = comment
override def render(writer: Writer): writer.type = writer << comment
}
final case class `User-Agent`(product: AgentProduct, other: Seq[AgentToken] = Seq.empty) extends Header.Parsed {
def key: `User-Agent`.type = `User-Agent`
override def renderValue(writer: Writer): writer.type = {
writer << product
other.foreach {
case p: AgentProduct => writer << ' ' << p
case AgentComment(c) => writer << ' ' << '(' << c << ')'
}
writer
}
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/headers/User-Agent.scala | Scala | apache-2.0 | 1,173 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
// note that setting the logging level on this logger has no effect on its behavior, only
// on the behavior of the delegates.
class MultiLogger(delegates: List[AbstractLogger]) extends BasicLogger {
override lazy val ansiCodesSupported = delegates exists supported
private[this] lazy val allSupportCodes = delegates forall supported
private[this] def supported = (_: AbstractLogger).ansiCodesSupported
override def setLevel(newLevel: Level.Value): Unit = {
super.setLevel(newLevel)
dispatch(new SetLevel(newLevel))
}
override def setTrace(level: Int): Unit = {
super.setTrace(level)
dispatch(new SetTrace(level))
}
override def setSuccessEnabled(flag: Boolean): Unit = {
super.setSuccessEnabled(flag)
dispatch(new SetSuccess(flag))
}
def trace(t: => Throwable): Unit = dispatch(new Trace(t))
def log(level: Level.Value, message: => String): Unit = dispatch(new Log(level, message))
def success(message: => String): Unit = dispatch(new Success(message))
def logAll(events: Seq[LogEvent]): Unit = delegates.foreach(_.logAll(events))
def control(event: ControlEvent.Value, message: => String): Unit = delegates.foreach(_.control(event, message))
private[this] def dispatch(event: LogEvent): Unit = {
val plainEvent = if (allSupportCodes) event else removeEscapes(event)
for (d <- delegates)
if (d.ansiCodesSupported)
d.log(event)
else
d.log(plainEvent)
}
private[this] def removeEscapes(event: LogEvent): LogEvent =
{
import ConsoleLogger.{ removeEscapeSequences => rm }
event match {
case s: Success => new Success(rm(s.msg))
case l: Log => new Log(l.level, rm(l.msg))
case ce: ControlEvent => new ControlEvent(ce.event, rm(ce.msg))
case _: Trace | _: SetLevel | _: SetTrace | _: SetSuccess => event
}
}
}
| jasonchaffee/sbt | util/log/src/main/scala/sbt/MultiLogger.scala | Scala | bsd-3-clause | 1,949 |
/*
* Copyright 2011-2018 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.nta.ntarule.rules_2_02
import java.net.URI
import scala.collection.immutable
import eu.cdevreeze.nta.common.taxonomy.Taxonomy
import eu.cdevreeze.nta.common.validator.Result
import eu.cdevreeze.nta.common.validator.TaxonomyDocumentValidator
import eu.cdevreeze.nta.common.validator.TaxonomyValidatorFactory
import eu.cdevreeze.nta.common.validator.ValidationScope
import eu.cdevreeze.nta.ntarule.NtaRuleConfigWrapper
import eu.cdevreeze.nta.ntarule.NtaRules
import eu.cdevreeze.tqa.ENames
import eu.cdevreeze.tqa.base.dom.TaxonomyDocument
import eu.cdevreeze.tqa.base.dom.XsdSchema
/**
* Validator of rule 2.02.00.18. The rule says that in the schema document xs:include elements must not occur.
*
* @author Chris de Vreeze
*/
final class Validator_2_02_00_18(val excludedDocumentUris: Set[URI]) extends TaxonomyDocumentValidator {
def ruleName: String = NtaRules.extractRuleName(getClass)
def validateDocument(
doc: TaxonomyDocument,
taxonomy: Taxonomy,
validationScope: ValidationScope): immutable.IndexedSeq[Result] = {
require(isTypeOfDocumentToValidate(doc, taxonomy), s"Document ${doc.uri} should not be validated")
val invalidElems = doc.documentElement.filterElems(_.resolvedName == ENames.XsIncludeEName)
if (invalidElems.isEmpty) {
immutable.IndexedSeq()
} else {
immutable.IndexedSeq(Result.makeErrorResult(
ruleName,
"include-not-allowed",
s"No xs:include allowed but found ${invalidElems.size} such elements in '${doc.uri}'"))
}
}
def isTypeOfDocumentToValidate(doc: TaxonomyDocument, taxonomy: Taxonomy): Boolean = {
doc.documentElement.isInstanceOf[XsdSchema]
}
}
object Validator_2_02_00_18 extends TaxonomyValidatorFactory {
type Validator = Validator_2_02_00_18
type CfgWrapper = NtaRuleConfigWrapper
def ruleName: String = {
NtaRules.extractRuleName(classOf[Validator_2_02_00_18])
}
def create(configWrapper: NtaRuleConfigWrapper): Validator_2_02_00_18 = {
new Validator_2_02_00_18(
configWrapper.excludedDocumentUrisForRule(ruleName))
}
}
| dvreeze/nta | src/main/scala/eu/cdevreeze/nta/ntarule/rules_2_02/Validator_2_02_00_18.scala | Scala | apache-2.0 | 2,708 |
package android
import java.util.Properties
import android.Dependencies.LibraryProject
import com.android.builder.model.SyncIssue
import com.android.ide.common.blame.Message
import com.android.ide.common.blame.Message.Kind
import com.android.ide.common.process.BaseProcessOutputHandler.BaseProcessOutput
import com.android.ide.common.process._
import com.android.tools.lint.LintCliFlags
import com.hanhuy.sbt.bintray.UpdateChecker
import sbt._
import sbt.Keys._
import com.android.builder.core.{ErrorReporter, LibraryRequest, AndroidBuilder}
import com.android.builder.sdk.DefaultSdkLoader
import com.android.sdklib.{SdkVersionInfo, AndroidTargetHash, IAndroidTarget, SdkManager}
import com.android.sdklib.repository.FullRevision
import com.android.SdkConstants
import com.android.utils.ILogger
import java.io.{PrintWriter, File}
import scala.collection.JavaConverters._
import scala.util.Try
import scala.xml.XML
import language.postfixOps
// because of JavaProcessExecutor
import language.existentials
import Keys._
import Keys.Internal._
import Tasks._
import Commands._
import Dependencies.LibrarySeqOps
object Plugin extends sbt.Plugin {
// android build steps
// * handle library dependencies (android.library.reference.N)
// * ndk
// * aidl
// * renderscript
// * BuildConfig.java
// * aapt
// * compile
// * obfuscate
// * dex
// * png crunch
// * package resources
// * package apk
// * sign
// * zipalign
/**
* create a new project flavor, build outputs will go in "id/android"
* does not work in conjunction with AutoBuild, must use standard build.
*/
def flavorOf(p: Project, id: String, settings: Setting[_]*): Project = {
val base = p.base / id
p.copy(id = id, base = base).settings(Seq(
projectLayout := ProjectLayout(p.base.getCanonicalFile, Some(base.getCanonicalFile)),
sbt.Keys.target := base) ++ settings:_*)
}
def withVariant(project: String,
buildType: Option[String] = None,
flavor: Option[String] = None): Setting[_] =
sbt.Keys.onLoad in Global := (sbt.Keys.onLoad in Global).value andThen { s =>
val ref = ProjectRef(Project.extract(s).structure.root, project)
android.VariantSettings.withVariant(s) { variants =>
if (!variants.status.contains(ref))
android.VariantSettings.setVariant(s, ref, buildType, flavor)
else s
}
}
def withVariant(p: ProjectReference,
buildType: Option[String],
flavor: Option[String]): Setting[_] = withVariant(
p match {
case ProjectRef(_, id) => id
case LocalProject(id) => id
case _ => Plugin.fail("withVariant: Unsupported ProjectReference: " + p)
},
buildType, flavor)
lazy val androidBuild: Seq[Setting[_]]= {
// only set the property below if this plugin is actually used
// this property is a workaround for bootclasspath messing things
// up and causing full-recompiles
System.setProperty("xsbt.skip.cp.lookup", "true")
allPluginSettings
}
@deprecated("Use Project.androidBuildWith(subprojects) instead", "1.3.3")
def androidBuild(projects: ProjectReference*): Seq[Setting[_]]=
androidBuild ++ buildWith(projects: _*)
def buildWith(projects: ProjectReference*): Seq[Setting[_]] = {
projects flatMap { p =>
Seq(
transitiveAars <++= aars in p,
collectResources <<=
collectResources dependsOn (compile in Compile in p),
compile in Compile <<= compile in Compile dependsOn(
packageT in Compile in p),
localProjects +=
LibraryProject((projectLayout in p).value)((outputLayout in p).value),
localProjects := {
(localProjects.value ++
(localProjects in p).value).distinctLibs
}
)
}
}
lazy val androidBuildJar: Seq[Setting[_]] = androidBuild ++ buildJar
lazy val androidBuildAar: Seq[Setting[_]] = androidBuildAar()
@deprecated("Use aar files instead", "gradle compatibility")
lazy val androidBuildApklib: Seq[Setting[_]] = androidBuildApklib()
def androidBuildAar(projects: ProjectReference*): Seq[Setting[_]] = {
androidBuild(projects:_*) ++ buildAar
}
@deprecated("Use aar files instead", "gradle compatibility")
def androidBuildApklib(projects: ProjectReference*): Seq[Setting[_]] = {
androidBuild(projects:_*) ++ buildApklib
}
def useSupportVectors = Seq(
renderVectorDrawables := false,
aaptAdditionalParams += "--no-version-vectors"
)
def buildJar = Seq(
manifest := <manifest package="com.hanhuy.sbt.placeholder">
<application/>
</manifest>,
processManifest := {
implicit val out = outputLayout.value
val layout = projectLayout.value
val manifestTarget = layout.processedManifest
manifestTarget.getParentFile.mkdirs()
XML.save(manifestTarget.getAbsolutePath, manifest.value, "utf-8")
manifestTarget
},
buildConfigGenerator := Nil,
rGenerator := Nil,
debugIncludesTests := false,
libraryProject := true,
publishArtifact in (Compile,packageBin) := true,
publishArtifact in (Compile,packageSrc) := true,
mappings in (Compile,packageSrc) ++= (managedSources in Compile).value map (s => (s,s.getName)),
lintFlags := {
val flags = lintFlags.value
implicit val output = outputLayout.value
val layout = projectLayout.value
layout.bin.mkdirs()
val config = layout.libraryLintConfig
config.getParentFile.mkdirs()
(layout.manifest relativeTo layout.base) foreach { path =>
val lintconfig = <lint>
<issue id="ParserError">
<ignore path={path.getPath}/>
</issue>
</lint>
scala.xml.XML.save(config.getAbsolutePath, lintconfig, "utf-8")
flags.setDefaultConfiguration(config)
}
flags
}
)
def buildAar = Seq(libraryProject := true) ++
addArtifact(aarArtifact , packageAar)
def buildApklib = Seq(libraryProject := true) ++
addArtifact(apklibArtifact, packageApklib)
private lazy val allPluginSettings: Seq[Setting[_]] = inConfig(Compile) (Seq(
compile <<= ( compile
, lintDetectors
, lintFlags
, lintEnabled
, lintStrict
, projectLayout
, outputLayout
, minSdkVersion
, targetSdkVersion
, streams) map { (c, ld, f, en, strict, layout, o, minSdk, tgtSdk, s) =>
dsl.checkVersion("minSdkVersion", minSdk)
dsl.checkVersion("targetSdkVersion", tgtSdk)
implicit val output = o
if (en)
AndroidLint(layout, f, ld, strict, minSdk, tgtSdk, s)
c
},
sourceManaged := projectLayout.value.gen,
unmanagedSourceDirectories <<= projectLayout (l =>
Set(l.sources, l.javaSource, l.scalaSource).toSeq),
// was necessary prior to 0.13.8 to squelch "No main class detected" warning
//packageOptions in packageBin := Package.JarManifest(new java.util.jar.Manifest) :: Nil,
packageConfiguration in packageBin <<= ( packageConfiguration in packageBin
, baseDirectory
, libraryProject
, projectLayout
, outputLayout
) map {
(c, b, l, p, o) =>
// remove R.java generated code from library projects
implicit val output = o
val sources = if (l) {
c.sources filter {
case (f,n) => !f.getName.matches("R\\\\W+.*class")
}
} else {
c.sources
}
new Package.Configuration(sources, p.classesJar, c.options)
},
publishArtifact in packageBin := false,
resourceDirectory := projectLayout.value.resources,
scalaSource := projectLayout.value.scalaSource,
javaSource := projectLayout.value.javaSource,
unmanagedJars <<= unmanagedJarsTaskDef,
// doesn't work properly yet, not for intellij integration
//managedClasspath <<= managedClasspathTaskDef,
classDirectory := {
implicit val output = outputLayout.value
projectLayout.value.classes
},
sourceGenerators := sourceGenerators.value ++ List(
rGenerator.taskValue,
typedResourcesGenerator.taskValue,
aidl.taskValue,
buildConfigGenerator.taskValue,
renderscript.taskValue,
debugTestsGenerator.taskValue,
cleanForR.taskValue,
Def.task {
(apklibs.value ++ autolibs.value flatMap { l =>
(l.layout.javaSource ** "*.java" get) ++
(l.layout.scalaSource ** "*.scala" get)
}) map (_.getAbsoluteFile)
}.taskValue
),
packageT <<= packageT dependsOn compile,
javacOptions <<= ( javacOptions
, bootClasspath
, apkbuildDebug
, retrolambdaEnabled) map {
(o,boot, debug, re) =>
// users will want to call clean before compiling if changing debug
val debugOptions = if (debug()) Seq("-g") else Seq.empty
val bcp = boot.map(_.data) mkString File.pathSeparator
// make sure javac doesn't create code that proguard won't process
// (e.g. people with java7) -- specifying 1.5 is fine for 1.6, too
o ++ (if (!re) Seq("-bootclasspath" , bcp) else
Seq("-Xbootclasspath/a:" + bcp)) ++ debugOptions
},
javacOptions in doc := {
(javacOptions in doc).value.flatMap { opt =>
if (opt.startsWith("-Xbootclasspath/a:"))
Seq("-bootclasspath", opt.substring(opt.indexOf(":") + 1))
else if (opt == "-g")
Seq.empty
else Seq(opt)
}.foldRight(List.empty[String]) {
(x, a) => if (x != "-target") x :: a else a.drop(1)
}
},
scalacOptions <<= (scalacOptions, bootClasspath) map { (o,boot) =>
// scalac has -g:vars by default
val bcp = boot.map(_.data) mkString File.pathSeparator
o ++ Seq("-bootclasspath", bcp, "-javabootclasspath", bcp)
}
)) ++ inConfig(Test) (Seq(
exportJars := false,
managedClasspath <++= platform map { t =>
t.getOptionalLibraries.asScala map { l =>
Attributed.blank(l.getJar)
}
},
scalacOptions in console := Seq.empty
)) ++ inConfig(Android) (Classpaths.configSettings ++ Seq(
// fix for sbt 0.13.11
artifactPath in packageBin := (artifactPath in (Compile,packageBin)).value,
flavors := Map.empty,
buildTypes := Map.empty,
pluginSettingsLoaded := {
if (pluginSettingsLoaded.?.value.isDefined)
fail(s"androidBuild has been applied to project ${thisProject.value.id} more than once")
true
},
lint := {
implicit val output = outputLayout.value
AndroidLint(projectLayout.value,
lintFlags.value, lintDetectors.value, lintStrict.value,
minSdkVersion.value, targetSdkVersion.value, streams.value)
},
lintFlags := {
val flags = new LintCliFlags
flags.setQuiet(true)
flags
},
lintStrict := false,
lintEnabled := true,
lintDetectors := AndroidLint.lintDetectorList,
// support for android:test
classDirectory := (classDirectory in Test).value,
sourceDirectory := projectLayout.value.testSources,
managedSources := Nil,
unmanagedSourceDirectories <<= projectLayout (l =>
Set(l.testSources, l.testJavaSource, l.testScalaSource).toSeq),
unmanagedSources <<= Defaults.collectFiles(
unmanagedSourceDirectories,
includeFilter in (Compile,unmanagedSources),
excludeFilter in (Compile,unmanagedSources)),
scalacOptions := (scalacOptions in Compile).value,
javacOptions := (javacOptions in Compile).value,
compile := {
def exported(w: PrintWriter, command: String): Seq[String] => Unit =
args => w.println((command +: args).mkString(" "))
val s = streams.value
val ci = (compileInputs in compile).value
val reporter = (TaskKey[Option[xsbti.Reporter]]("compilerReporter") in (Compile,compile)).value
lazy val x = s.text(CommandStrings.ExportStream)
def onArgs(cs: Compiler.Compilers) =
cs.copy(scalac = cs.scalac.onArgs(exported(x, "scalac")),
javac = cs.javac.onArgs(exported(x, "javac")))
val i = ci.copy(compilers = onArgs(ci.compilers))
try reporter match {
case Some(r) => Compiler(i, s.log, r)
case None => Compiler(i, s.log)
}
finally x.close() // workaround for #937
},
compileIncSetup := {
Compiler.IncSetup(
Defaults.analysisMap((dependencyClasspath in Test).value),
definesClass.value,
(skip in compile).value,
// TODO - this is kind of a bad way to grab the cache directory for streams...
streams.value.cacheDirectory / compileAnalysisFilename.value,
compilerCache.value,
incOptions.value)
},
compileInputs in compile := {
val cp = classDirectory.value +: Attributed.data((dependencyClasspath in Test).value)
Compiler.inputs(cp, sources.value, classDirectory.value, scalacOptions.value, javacOptions.value, maxErrors.value, sourcePositionMappers.value, compileOrder.value)(compilers.value, compileIncSetup.value, streams.value.log)
},
compileAnalysisFilename := {
// Here, if the user wants cross-scala-versioning, we also append it
// to the analysis cache, so we keep the scala versions separated.
val extra =
if (crossPaths.value) s"_${scalaBinaryVersion.value}"
else ""
s"inc_compile$extra"
},
sources <<= Classpaths.concat(unmanagedSources, managedSources),
// productX := Nil is a necessity to use Classpaths.configSettings
exportedProducts := Nil,
products := Nil,
classpathConfiguration := config("compile"),
// end for Classpaths.configSettings
// hack since it doesn't take in dependent project's libs
dependencyClasspath <<= ( dependencyClasspath in Compile
, libraryDependencies
, streams) map { (cp, d, s) =>
s.log.debug("Filtering compile:dependency-classpath from: " + cp)
val pvd = d filter { dep => dep.configurations exists (_ == "provided") }
cp foreach { a =>
s.log.debug("%s => %s: %s" format (a.data.getName,
a.get(configuration.key), a.get(moduleID.key)))
}
// try to filter out duplicate aar libraries as well
// it seems internal-dependency-classpath already filters out "provided"
// from other projects, now, just filter out our own "provided" lib deps
// do not filter out provided libs for scala, we do that later
val (withMID,withoutMID) = cp collect {
case x if x.get(moduleID.key).isDefined =>
(x,(x.get(moduleID.key),x.data.getName))
case x => (x,(None, x.data.getName))
} partition (_._2._1.isDefined)
(withMID.groupBy(_._2).values.map(_.head._1) ++ withoutMID.map(_._1)) filterNot { _.get(moduleID.key) exists { m =>
m.organization != "org.scala-lang" &&
(pvd exists (p => m.organization == p.organization &&
m.name == p.name))
}
} groupBy(_.data) map { case (k,v) => v.head } toList
},
updateCheck := {
val log = streams.value.log
UpdateChecker("pfn", "sbt-plugins", "android-sdk-plugin") {
case Left(t) =>
log.debug("Failed to load version info: " + t)
case Right((versions, current)) =>
log.debug("available versions: " + versions)
log.debug("current version: " + BuildInfo.version)
log.debug("latest version: " + current)
if (versions(BuildInfo.version)) {
if (BuildInfo.version != current) {
log.warn(
s"UPDATE: A newer android-sdk-plugin is available:" +
s" $current, currently running: ${BuildInfo.version}")
}
}
}
},
antLayoutDetector := {
val log = streams.value.log
val prj = thisProjectRef.value.project
projectLayout.value match {
case a: ProjectLayout.Ant if a.manifest.exists =>
log.warn(s"Detected an ant-style project layout in $prj;")
log.warn(" this format has been deprecated in favor of modern layouts")
log.warn(" If this is what you want, set 'antLayoutDetector in Android := ()'")
case _ =>
}
},
transitiveAndroidLibs := true,
transitiveAndroidWarning := true,
autolibs <<= autolibsTaskDef,
apklibs <<= apklibsTaskDef,
localAars := Nil,
aars <<= aarsTaskDef,
transitiveAars := Nil,
aarArtifact <<= normalizedName { n => Artifact(n, "aar", "aar") },
apklibArtifact <<= normalizedName { n => Artifact(n, "apklib", "apklib") },
packageAar <<= packageAarTaskDef,
mappings in packageAar <<= packageAarMappings,
packageApklib <<= packageApklibTaskDef,
mappings in packageApklib <<= packageApklibMappings,
allDevices := false,
install <<= installTaskDef,
uninstall <<= uninstallTaskDef,
clean <<= cleanTaskDef,
test <<= testTaskDef,
test <<= test dependsOn (compile in Android, install),
testOnly <<= testOnlyTaskDef,
debug <<= runTaskDef(true) dependsOn install,
run <<= runTaskDef(false) dependsOn install,
aaptAggregate <<= aaptAggregateTaskDef,
aaptAdditionalParams := Nil,
cleanForR <<= (rGenerator
, projectLayout
, outputLayout
, classDirectory in Compile
, streams
) map {
(_, l, o, d, s) =>
implicit val output = o
FileFunction.cached(s.cacheDirectory / "clean-for-r",
FilesInfo.hash, FilesInfo.exists) { in =>
if (in.nonEmpty) {
s.log.info("Rebuilding all classes because R.java has changed")
IO.delete(d)
}
in
}(Set(l.generatedSrc ** "R.java" get: _*))
Seq.empty[File]
},
buildConfigGenerator <<= buildConfigGeneratorTaskDef,
buildConfigOptions := {
val s = state.value
val prj = thisProjectRef.value
val pkg = applicationId.value
val (buildType,flavor) = VariantSettings.variant(s).status.getOrElse(
prj, (None,None))
List(
("String", "BUILD_TYPE", s""""${buildType getOrElse ""}""""),
("String", "FLAVOR", s""""${flavor getOrElse ""}""""),
("String", "APPLICATION_ID", s""""$pkg"""")
) ++
versionName.value.toList.map(n => ("String", "VERSION_NAME", s""""$n"""")) ++
versionCode.value.toList.map (c => ("int", "VERSION_CODE", c.toString)
)
},
resValues := Nil,
resValuesGenerator <<= resValuesGeneratorTaskDef,
rGenerator <<= rGeneratorTaskDef,
rGenerator <<= rGenerator dependsOn renderscript,
ndkJavah <<= ndkJavahTaskDef,
ndkAbiFilter := Nil,
ndkBuild <<= ndkBuildTaskDef,
aidl <<= aidlTaskDef,
rsTargetApi <<= (properties, minSdkVersion) map { (p, m) =>
Option(p.getProperty("renderscript.target")).getOrElse(m)
},
rsSupportMode <<= properties { p =>
Try(p.getProperty("renderscript.support.mode").toBoolean).getOrElse(false)
},
rsOptimLevel := 3,
renderscript <<= renderscriptTaskDef,
localProjects <<= (baseDirectory, properties, outputLayout) { (b,p,o) =>
loadLibraryReferences(b, p)(o)
},
libraryProjects := localProjects.value ++ apklibs.value ++ aars.value,
libraryProject <<= properties { p =>
Option(p.getProperty("android.library")) exists { _.equals("true") } },
checkAars <<= checkAarsTaskDef,
dexInputs <<= dexInputsTaskDef,
dexAggregate <<= dexAggregateTaskDef,
collectResourcesAggregate <<= collectResourcesAggregateTaskDef,
manifestAggregate <<= manifestAggregateTaskDef,
proguardAggregate <<= proguardAggregateTaskDef,
apkbuildAggregate <<= apkbuildAggregateTaskDef,
retrolambdaAggregate <<= retrolambdaAggregateTaskDef,
testAggregate <<= testAggregateTaskDef,
predex <<= predexTaskDef,
predexSkip := {
localProjects.value map (_.getJarFile)
},
dex <<= dexTaskDef,
dexShards := false,
dexLegacyMode := {
val minSdk = minSdkVersion.value
val minLevel = Try(minSdk.toInt).toOption getOrElse
SdkVersionInfo.getApiByBuildCode(minSdk, true)
minLevel < 21
},
dexMaxHeap := "1024m",
dexMulti := false,
dexMainClasses := Seq.empty,
dexMinimizeMain := false,
dexAdditionalParams := Seq.empty,
dexMainClassesConfig <<= dexMainClassesConfigTaskDef dependsOn (packageT in Compile),
platformJars <<= platform map { p =>
(p.getPath(IAndroidTarget.ANDROID_JAR),
p.getOptionalLibraries.asScala map (_.getJar.getAbsolutePath))
},
projectLayout := ProjectLayout(baseDirectory.value, Some(target.value)),
outputLayout := { layout => new BuildOutput.AndroidOutput(layout) },
manifestPath <<= projectLayout { l =>
l.manifest
},
properties <<= projectLayout (l => loadProperties(l.base)),
mergeManifests := true,
manifestPlaceholders := Map.empty,
manifestOverlays := Seq.empty,
processManifest <<= processManifestTaskDef,
manifest <<= manifestPath map { m =>
if (!m.exists)
fail("cannot find AndroidManifest.xml: " + m)
XML.loadFile(m)
},
versionCode := {
manifest.value.attribute(ANDROID_NS, "versionCode").map(_.head.text.toInt)
},
versionName := {
manifest.value.attribute(
ANDROID_NS, "versionName").map(_.head.text) orElse Some(version.value)
},
packageForR <<= manifest map { m =>
m.attribute("package") get 0 text
},
applicationId := {
packageName.?.value.fold(manifest.value.attribute("package").head.text) { p =>
streams.value.log.warn(
"'packageName in Android' is deprecated, use 'applicationId'")
p
}
},
targetSdkVersion := {
val m = manifest.value
val usesSdk = m \\ "uses-sdk"
val ldr = sdkLoader.value
val tgt = ldr.getTargetInfo(platformTarget.value, buildTools.value.getRevision, ilogger.value(streams.value.log))
val v = String.valueOf(tgt.getTarget.getVersion.getApiLevel)
if (usesSdk.isEmpty) v else
usesSdk(0).attribute(ANDROID_NS, "targetSdkVersion").fold(v) { _.head.text }
},
minSdkVersion := {
val m = manifest.value
val usesSdk = m \\ "uses-sdk"
if (usesSdk.isEmpty) "7" else
usesSdk(0).attribute(ANDROID_NS, "minSdkVersion").fold("7") { _.head.text }
},
proguardCache := "scala" :: Nil,
proguardLibraries := Seq.empty,
proguardOptions := Seq.empty,
proguardConfig <<= proguardConfigTaskDef,
proguardConfig <<= proguardConfig dependsOn packageResources,
proguard <<= proguardTaskDef,
proguardInputs <<= proguardInputsTaskDef,
proguardInputs <<= proguardInputs dependsOn (packageT in Compile),
proguardScala <<= autoScalaLibrary,
retrolambdaEnabled := false,
typedResources <<= autoScalaLibrary,
typedResourcesIgnores := Seq.empty,
typedResourcesGenerator <<= typedResourcesGeneratorTaskDef,
useProguard <<= proguardScala,
useSdkProguard <<= proguardScala (!_),
useProguardInDebug <<= proguardScala,
extraResDirectories := Nil,
extraAssetDirectories := Nil,
renderVectorDrawables := true,
collectResources <<= collectResourcesTaskDef,
collectResources <<= collectResources dependsOn renderscript,
collectResources <<= collectResources dependsOn resValuesGenerator,
collectResources <<= collectResources dependsOn checkAars,
shrinkResources := false,
resourceShrinker <<= resourceShrinkerTaskDef,
packageResources <<= packageResourcesTaskDef dependsOn rGenerator,
apkFile := {
implicit val output = outputLayout.value
projectLayout.value.integrationApkFile(name.value)
},
collectProjectJni <<= collectProjectJniTaskDef,
collectProjectJni <<= collectProjectJni dependsOn renderscript,
collectJni <<= collectJniTaskDef,
packagingOptions := PackagingOptions(Nil, Nil, Nil),
apkbuildDebug := MutableSetting(true),
apkbuild <<= apkbuildTaskDef,
apkbuild <<= apkbuild dependsOn (managedResources in Compile),
apkDebugSigningConfig := DebugSigningConfig(),
apkSigningConfig <<= properties { p =>
def makeSigningConfig(alias: String, store: String, passwd: String) = {
val c = PlainSigningConfig(file(store), passwd, alias)
val c2 = Option(p.getProperty("key.store.type")).fold(c) { t =>
c.copy(storeType = t)
}
Option(p.getProperty("key.alias.password")).fold(c2) { p =>
c2.copy(keyPass = Some(p))
}
}
for {
a <- Option(p.getProperty("key.alias"))
b <- Option(p.getProperty("key.store"))
c <- Option(p.getProperty("key.store.password"))
} yield makeSigningConfig(a,b,c)
},
signRelease <<= signReleaseTaskDef,
zipalign <<= zipalignTaskDef,
packageT <<= zipalign,
instrumentTestTimeout := 180000,
instrumentTestRunner := "android.test.InstrumentationTestRunner",
debugIncludesTests := true,
debugTestsGenerator <<= (debugIncludesTests,projectLayout) map {
(tests,layout) =>
if (tests)
(layout.testScalaSource ** "*.scala" get) ++
(layout.testJavaSource ** "*.java" get)
else Seq.empty
},
setDebug := { apkbuildDebug.value(true) },
setRelease := { apkbuildDebug.value(false) },
// I hope packageXXX dependsOn(setXXX) sets createDebug before package
packageDebug <<= packageT,
packageDebug <<= packageDebug dependsOn setDebug,
packageRelease <<= packageT,
packageRelease <<= packageRelease dependsOn setRelease,
sdkPath <<= properties { props =>
(Option(System getenv "ANDROID_HOME") orElse
Option(props getProperty "sdk.dir")) flatMap { p =>
val f = file(p + File.separator)
if (f.exists && f.isDirectory)
Some(p + File.separator)
else
None
} getOrElse fail(
"set the env variable ANDROID_HOME pointing to your Android SDK")
},
ndkPath <<= (thisProject,properties) { (p,props) =>
(Option(System getenv "ANDROID_NDK_HOME") orElse
Option(props get "ndk.dir")) flatMap { p =>
val f = file(p + File.separator)
if (f.exists && f.isDirectory)
Some(p + File.separator)
else
None
}
},
zipalignPath <<= ( sdkPath
, sdkManager
, buildTools
, streams) map { (p, m, bt, s) =>
import SdkConstants._
val pathInBt = bt.getLocation / FN_ZIPALIGN
s.log.debug("checking zipalign at: " + pathInBt)
if (pathInBt.exists)
pathInBt.getAbsolutePath
else {
val zipalign = file(p + OS_SDK_TOOLS_FOLDER + FN_ZIPALIGN)
if (!zipalign.exists)
fail("zipalign not found at either %s or %s" format (
pathInBt, zipalign))
zipalign.getAbsolutePath
}
},
ilogger := { l: Logger => SbtLogger(l) },
buildToolsVersion := None,
sdkLoader <<= sdkManager map { m =>
DefaultSdkLoader.getLoader(file(m.getLocation))
},
libraryRequests := Nil,
builder <<= ( sdkLoader
, sdkManager
, name
, ilogger
, buildTools
, platformTarget
, libraryRequests
, state) map {
(ldr, m, n, l, b, t, reqs, s) =>
val bldr = new AndroidBuilder(n, "android-sdk-plugin",
new DefaultProcessExecutor(l(s.log)),
SbtJavaProcessExecutor,
new ErrorReporter(ErrorReporter.EvaluationMode.STANDARD) {
override def receiveMessage(message: Message) = {
val errorStringBuilder = new StringBuilder
message.getSourceFilePositions.asScala.foreach { pos =>
errorStringBuilder.append(pos.toString)
errorStringBuilder.append(' ')
}
if (errorStringBuilder.nonEmpty)
errorStringBuilder.append(": ")
if (message.getToolName.isPresent) {
errorStringBuilder.append(message.getToolName.get).append(": ")
}
errorStringBuilder.append(message.getText).append("\\n")
val messageString = errorStringBuilder.toString
message.getKind match {
case Kind.ERROR =>
s.log.error(messageString)
case Kind.WARNING =>
s.log.warn(messageString)
case Kind.INFO =>
s.log.info(messageString)
case Kind.STATISTICS =>
s.log.debug(messageString)
case Kind.UNKNOWN =>
s.log.debug(messageString)
case Kind.SIMPLE =>
s.log.info(messageString)
}
}
override def handleSyncError(data: String, `type`: Int, msg: String) = {
s.log.error(s"android sync error: data=$data, type=${`type`}, msg=$msg")
new SyncIssue {
override def getType = `type`
override def getData = data
override def getMessage = msg
override def getSeverity = SyncIssue.SEVERITY_ERROR
}
}
},
l(s.log), false)
val sdkInfo = ldr.getSdkInfo(l(s.log))
val targetInfo = ldr.getTargetInfo(t, b.getRevision, l(s.log))
bldr.setTargetInfo(sdkInfo, targetInfo, reqs map { case ((nm, required)) => new LibraryRequest(nm, required) } asJava)
bldr
},
bootClasspath := builder.value.getBootClasspath(false).asScala map Attributed.blank,
sdkManager <<= (sdkPath,ilogger, streams) map { (p, l, s) =>
SdkManager.createManager(p, l(s.log))
},
buildTools := {
buildToolsVersion.value flatMap { version =>
Option(sdkManager.value.getBuildTool(FullRevision.parseRevision(version)))
} getOrElse {
val tools = sdkManager.value.getLatestBuildTool
if (tools == null) fail("Android SDK build-tools not found")
else streams.value.log.debug("Using Android build-tools: " + tools)
tools
}
},
platformTarget <<= (properties,thisProject) { (p,prj) =>
Option(p.getProperty("target")) getOrElse fail(
prj.id + ": configure project.properties or set 'platformTarget'")
},
platform <<= (sdkManager, platformTarget, thisProject) map {
(m, p, prj) =>
val plat = Option(m.getTargetFromHashString(p))
plat getOrElse fail("Platform %s unknown in %s" format (p, prj.base))
}
)) ++ Seq(
autoScalaLibrary := {
((scalaSource in Compile).value ** "*.scala").get.nonEmpty ||
(managedSourceDirectories in Compile).value.exists(d =>
(d ** "*.scala").get.nonEmpty)
},
crossPaths <<= autoScalaLibrary,
resolvers <++= sdkPath { p =>
Seq(SdkLayout.googleRepository(p), SdkLayout.androidRepository(p))
},
cleanFiles += projectLayout.value.bin,
exportJars := true,
unmanagedBase := projectLayout.value.libs,
watchSources <++= Def.task {
val filter = new SimpleFileFilter({ f =>
f.isFile && Character.isJavaIdentifierStart(f.getName.charAt(0))
})
val layout = projectLayout.value
val extras = extraResDirectories.value.map(_.getCanonicalFile).distinct
(layout.testSources +: layout.jni +: layout.res +: extras) flatMap { path =>
(path ** filter) get }
}
)
lazy val androidCommands: Seq[Setting[_]] = Seq(
commands ++= Seq(genAndroid, genAndroidSbt, pidcat, logcat, adbLs, adbShell,
devices, device, reboot, adbScreenOn, adbRunas, adbKill,
adbWifi, adbPush, adbPull, adbCat, adbRm, variant, variantClear)
)
private def adbCat = Command(
"adb-cat", ("adb-cat", "Cat a file from device"),
"Cat a file from device to stdout"
)(androidFileParser)(adbCatAction)
private def adbRm = Command(
"adb-rm", ("adb-rm", "Remove a file from device"),
"Remove a file from device"
)(androidFileParser)(adbRmAction)
private def adbPull = Command(
"adb-pull", ("adb-pull", "pull a file from device"),
"Pull a file from device to the local system"
)(adbPullParser)(adbPullAction)
private def adbPush = Command(
"adb-push", ("adb-push", "push a file to device"),
"Push a file to device from the local system"
)(adbPushParser)(adbPushAction)
private def adbShell = Command(
"adb-shell", ("adb-shell", "execute shell commands on device"),
"Run a command on a selected android device using adb"
)(stringParser)(shellAction)
private def adbRunas = Command(
"adb-runas", ("adb-runas", "execute shell commands on device as a debuggable package user"),
"Run a command on a selected android device using adb with the permissions of the current package"
)(projectAndStringParser)(runasAction)
private def adbKill = Command(
"adb-kill", ("adb-kill", "kill the current/specified package"),
"Kills the process if it is not currently in the foreground"
)(projectAndStringParser)(killAction)
private def adbLs = Command(
"adb-ls", ("adb-ls", "list device files"),
"List files located on the selected android device"
)(androidFileParser)(adbLsAction)
private def logcat = Command(
"logcat", ("logcat", "grab device logcat"),
"Read logcat from device without blocking"
)(stringParser)(logcatAction)
private def pidcat = Command(
"pidcat", ("pidcat", "grab device logcat for a package"),
"Read logcat for a given package, defaults to project package if no arg"
)(projectAndStringParser)(pidcatAction)
private def genAndroid = Command(
"gen-android", ("gen-android", "Create an android project"),
"Create a new android project built using SBT"
)(createProjectParser)(createProjectAction)
private def genAndroidSbt = Command.command(
"gen-android-sbt", "Create SBT files for existing android project",
"Creates build.properties, build.scala, etc for an existing android project"
)(createProjectSbtAction)
private def device = Command(
"device", ("device", "Select a connected android device"),
"Select a device (when there are multiple) to apply actions to"
)(deviceParser)(deviceAction)
private def adbScreenOn = Command.command(
"adb-screenon", "Turn on screen and unlock (if not protected)",
"Turn the screen on and unlock the keyguard if it is not pin-protected"
)(adbPowerAction)
private def adbWifi = Command.command(
"adb-wifi", "Enable/disable ADB-over-wifi for selected device",
"Toggle ADB-over-wifi for the selected device"
)(adbWifiAction)
private def reboot = Command(
"adb-reboot", ("adb-reboot", "Reboot selected device"),
"Reboot the selected device into the specified mode"
)(rebootParser)(rebootAction)
private def devices = Command.command(
"devices", "List connected and online android devices",
"List all connected and online android devices")(devicesAction)
private def variant = Command("variant",
("variant[/project] <buildType> <flavor>",
"Load an Android build variant configuration (buildType + flavor)"),
"Usage: variant[/project] <buildType> <flavor>")(variantParser)(variantAction)
private def variantClear = Command("variant-reset",
("variant-reset", "Clear loaded variant configuration from the project"),
"Usage: variant-reset[/project]")(projectParser)(variantClearAction)
def fail[A](msg: => String): A = {
throw new MessageOnlyException(msg)
}
}
object SbtJavaProcessExecutor extends JavaProcessExecutor {
override def execute(javaProcessInfo: JavaProcessInfo, processOutputHandler: ProcessOutputHandler) = {
val options = ForkOptions(
envVars = javaProcessInfo.getEnvironment.asScala map { case ((x, y)) => x -> y.toString } toMap,
runJVMOptions = javaProcessInfo.getJvmArgs.asScala ++
("-cp" :: javaProcessInfo.getClasspath :: Nil))
val r = Fork.java(options, (javaProcessInfo.getMainClass :: Nil) ++ javaProcessInfo.getArgs.asScala)
new ProcessResult {
override def assertNormalExitValue() = {
if (r != 0) throw new ProcessException("error code: " + r)
this
}
override def rethrowFailure() = this
override def getExitValue = r
}
}
}
case class SbtProcessOutputHandler(lg: Logger) extends BaseProcessOutputHandler {
override def handleOutput(processOutput: ProcessOutput) = {
processOutput match {
case p: BaseProcessOutput =>
val stdout = p.getStandardOutputAsString
if (!stdout.isEmpty)
lg.debug(stdout)
val stderr = p.getErrorOutputAsString
if (!stderr.isEmpty)
lg.warn(stderr)
}
}
}
case class SbtLogger(lg: Logger) extends ILogger {
override def verbose(fmt: java.lang.String, args: Object*) {
lg.debug(String.format(fmt, args:_*))
}
override def info(fmt: java.lang.String, args: Object*) {
lg.debug(String.format(fmt, args:_*))
}
override def warning(fmt: java.lang.String, args: Object*) {
lg.warn(String.format(fmt, args:_*))
}
override def error(t: Throwable, fmt: java.lang.String, args: Object*) {
lg.error(String.format(fmt, args:_*))
if (t != null)
lg.trace(t)
}
}
object NullLogger extends ILogger {
override def verbose(fmt: java.lang.String, args: Object*) = ()
override def info(fmt: java.lang.String, args: Object*) = ()
override def warning(fmt: java.lang.String, args: Object*) = ()
override def error(t: Throwable, fmt: java.lang.String, args: Object*) = ()
}
trait AutoBuild extends Build {
private def loadLibraryProjects(b: File, props: Properties): Seq[Project] = {
val p = props.asScala
(p.keys.collect {
case k if k.startsWith("android.library.reference") => k
}.toList.sortWith { (a,b) => a < b } flatMap { k =>
val layout = ProjectLayout(b/p(k))
val pkg = pkgFor(layout.manifest)
(Project(id=pkg, base=b/p(k)) settings(Plugin.androidBuild ++
Seq(platformTarget := target(b/p(k)),
libraryProject := true): _*) enablePlugins
AndroidPlugin) +:
loadLibraryProjects(b/p(k), loadProperties(b/p(k)))
}) distinct
}
private def target(basedir: File): String = {
val props = loadProperties(basedir)
val path = (Option(System getenv "ANDROID_HOME") orElse
Option(props get "sdk.dir")) flatMap { p =>
val f = file(p + File.separator)
if (f.exists && f.isDirectory)
Some(p + File.separator)
else
None
} getOrElse {
fail("set ANDROID_HOME or run 'android update project -p %s'"
format basedir.getCanonicalPath): String
}
Option(props getProperty "target") getOrElse {
val manager = SdkManager.createManager(path, NullLogger)
val versions = (manager.getTargets map {
_.getVersion
} sorted) reverse
AndroidTargetHash.getPlatformHashString(versions(0))
}
}
private def pkgFor(manifest: File) =
(XML.loadFile(manifest).attribute("package") get 0 text).replaceAll(
"\\\\.", "-")
override def projects = {
val projects = super.projects
if (projects.isEmpty) {
// TODO search subdirectories to find more complex project structures
// e.g. root(empty) -> { main-android, library-android }
val basedir = file(".")
val layout = ProjectLayout(basedir)
if (layout.manifest.exists) {
val props = loadProperties(basedir)
val libProjects = loadLibraryProjects(basedir, props)
val project = Project(id=pkgFor(layout.manifest),
base=basedir).androidBuildWith(libProjects map(a ⇒ a: ProjectReference): _*).settings(
platformTarget := target(basedir)) enablePlugins
AndroidPlugin
project +: libProjects
} else Nil
} else {
// TODO automatically apply androidBuild with all library/sub projects
// for now, all main projects have to specify androidBuild(deps) manually
projects map { p =>
val layout = ProjectLayout(p.base)
if (layout.manifest.exists) {
val settings: Seq[Def.Setting[_]] = p.settings
val prefix = settings.takeWhile(
_.key.scope.config.toOption exists (_.name != Android.name))
val tail = settings.dropWhile(
_.key.scope.config.toOption exists (_.name != Android.name))
val platform = platformTarget := target(p.base)
p.settings(prefix ++ Plugin.androidBuild ++ (platform +: tail): _*)
.enablePlugins(AndroidPlugin)
} else p
}
}
}
}
| aafa/android-sdk-plugin | src/rules.scala | Scala | bsd-3-clause | 42,975 |
package com.airbnb.scheduler
import com.yammer.metrics.core.HealthCheck
import com.yammer.metrics.core.HealthCheck.Result
import com.google.inject.Inject
import jobs.JobScheduler
/**
* @author Florian Leibert (flo@leibert.de)
*/
class SchedulerHealthCheck @Inject()(val jobScheduler: JobScheduler) extends HealthCheck("scheduler-health") {
//TODO(FL): Implement
override def check(): Result =
Result.healthy
}
| snegi/chronos | src/main/scala/com/airbnb/scheduler/SchedulerHealthCheck.scala | Scala | apache-2.0 | 423 |
package dotGenerator
import org.graphstream.graph.implementations.MultiGraph
import spinal.core.{BaseType, Component, Node}
/**
* Created by snipy on 06.10.16.
*/
case class SpinalDotGenerator(rootComponent: Component, filename: String, targetDirectory: String)
{
private type GNode = org.graphstream.graph.Node
private type GEdge = org.graphstream.graph.Edge
private val fileCompletePath = s"$targetDirectory/$filename"
private val graph: org.graphstream.graph.Graph = new MultiGraph(filename)
private val dotFileManager = DotFileManager(filename = filename, directoryPath = targetDirectory)
private val rI = "RootComponentInput"
private val rO = "RootComponentOutput"
def generateDotFile(): SpinalDotGenerator =
{
dotFileManager.write("digraph g {")
dotFileManager.write("splines=ortho")
// Add root component input
val nRootInput: GNode = graph.addNode(rI)
nRootInput.addAttribute("label", rI)
nRootInput.addAttribute("name", rI)
nRootInput.addAttribute("level", new Integer(0))
// Add root component output
val nRootOutput: GNode = graph.addNode(rO)
nRootOutput.addAttribute("label", rO)
nRootOutput.addAttribute("name", rO)
nRootOutput.addAttribute("level", new Integer(0))
parseComponent(rootComponent)
parseIO(rootComponent)
parseRootIO(rootComponent)
val totNode = graph.getNodeCount
for (i <- 0 until totNode)
{
addDotNode(graph.getNode(i))
}
dotFileManager.write("}")
dotFileManager.close()
this
}
/*
Assume generateDotFile() has been already call
*/
def generatePdfFile(): SpinalDotGenerator =
{
Dot2PdfGenerator(fileCompletePath).generatePdfFile()
this
}
private def parseRootIO(root: Component): Unit =
{
root.getAllIo.foreach
{ n =>
if (n.isInput)
{
println(root.definitionName)
addEdge(rI, root.definitionName)
}
else if (n.isOutput)
{
println(root.definitionName)
addEdge(root.definitionName, rO)
}
}
}
private def parseIO(component: Component): Unit =
{
println(s"parse io of ${component.definitionName}")
component.getAllIo.foreach
{ n =>
if (n.isInput)
{
if (n.getInput(0) != null)
{
addEdge(n.getInput(0).component.definitionName, component.definitionName)
}
}
else if (n.isOutput)
{
n.consumers.foreach
{ n_consumers: spinal.core.Node =>
addEdge(n.component.definitionName, n_consumers.component.definitionName)
}
}
else
{
System.err.println("IO not input or output")
}
}
component.children.foreach(parseIO)
}
private def parseComponent(component: Component, level: Int = 1): Unit =
{
println(s"${component.definitionName}")
component.children.foreach(parseComponent(_, level = level + 1))
val n: GNode = graph.addNode(component.definitionName)
n.addAttribute("label", component.definitionName)
n.addAttribute("name", component.definitionName)
n.addAttribute("level", new Integer(level))
}
private def addDotNode(n: GNode): Unit =
{
val label = n.getId match
{
case `rI` =>
s"""label="Top Level Input" """
case `rO` =>
s"""label="Top Level Output" """
case _ =>
s"""label="${n.getAttribute("label")}| level = ${n.getAttribute("level")}" """
}
val shape = n.getId match
{
case `rI` =>
s"""shape=none"""
case `rO` =>
s"""shape=none"""
case _ =>
s"""shape=box"""
}
dotFileManager.write(s"""${n.getAttribute("name")} [$label , $shape]""")
}
private def addEdge(from: String, to: String, start: Int = 0): Unit =
{
if (graph.getEdge(from + to + start) == null)
{
graph.addEdge(from + to + start.toString, from, to, true)
val edgeLabel = """""""" // label = ""
dotFileManager.write(s"""$from -> $to[label = $edgeLabel];""")
//for java/scala interaction
//noinspection RemoveRedundantReturn
return
}
else
addEdge(from, to, start = start + 1)
}
}
| SnipyJulmy/MSE_1617_PA | AstGenerator/src/main/scala/dotGenerator/SpinalDotGenerator.scala | Scala | gpl-2.0 | 4,762 |
package scala.slick.compiler
import scala.collection.mutable.HashMap
import scala.slick.SlickException
import scala.slick.ast._
import Util._
/** Replace references to FieldSymbols in TableExpansions by the
* appropriate ElementSymbol */
class ReplaceFieldSymbols extends Phase with ColumnizerUtils {
val name = "replaceFieldSymbols"
def apply(n: Node, state: CompilationState): Node = {
val updatedTables = new HashMap[Symbol, ProductNode]
val seenDefs = new HashMap[Symbol, Node]
def rewrite(target: Node, p: Node, field: FieldSymbol, syms: List[Symbol]): Option[Select] = {
val ntarget = narrowStructure(target)
logger.debug("Narrowed to structure "+ntarget+" with tail "+Path.toString(syms.tail.reverse))
select(syms.tail, ntarget, seenDefs.get _).map {
case t: TableExpansion =>
logger.debug("Narrowed to element "+t)
val columns: ProductNode = updatedTables.get(t.generator).getOrElse(t.columns.asInstanceOf[ProductNode])
val needed = Select(Ref(t.generator), field)
Some(columns.nodeChildren.zipWithIndex.find(needed == _._1) match {
case Some((_, idx)) => Select(p, ElementSymbol(idx+1))
case None =>
updatedTables += t.generator -> ProductNode((columns.nodeChildren :+ needed))
Select(p, ElementSymbol(columns.nodeChildren.size + 1))
})
case t: TableRefExpansion =>
logger.debug("Narrowed to element "+t)
Path.unapply(t.ref).flatMap { psyms =>
val syms = psyms.reverse
logger.debug("Looking for seen def "+syms.head)
seenDefs.get(syms.head).flatMap { n =>
logger.debug("Trying to rewrite recursive match "+t.ref+" ."+field)
rewrite(n, t.ref, field, syms).map { recSel =>
logger.debug("Found recursive replacement "+recSel.in+" ."+recSel.field)
val columns: ProductNode = updatedTables.get(t.marker).getOrElse(t.columns.asInstanceOf[ProductNode])
val needed = Select(t.ref, recSel.field)
columns.nodeChildren.zipWithIndex.find(needed == _._1) match {
case Some((_, idx)) => Select(p, ElementSymbol(idx+1))
case None =>
updatedTables += t.marker -> ProductNode((columns.nodeChildren :+ needed))
Select(p, ElementSymbol(columns.nodeChildren.size + 1))
}
}
}
}
case _: TableNode =>
None // A table within a TableExpansion -> don't rewrite
case Path(psyms) =>
logger.debug("Narrowed to "+Path.toString(psyms))
val syms = psyms.reverse
seenDefs.get(syms.head).flatMap { n =>
logger.debug("Trying to rewrite target "+p+" ."+field)
rewrite(n, p, field, syms)
}
case n =>
throw new SlickException("Unexpected target node "+n+" (from "+Path.toString(syms.reverse)+")")
None
}.head // we have to assume that the structure is the same for all Union expansions
}
def tr(n: Node, scope: Scope = Scope.empty): Node = n match {
case d: DefNode =>
val r = d.mapChildrenWithScope({ (symO, ch, chscope) =>
val ch2 = tr(ch, chscope)
symO.foreach { sym => seenDefs += sym -> ch2 }
ch2
}, scope)
r
case sel @ Select(p @ Path(psyms), field: FieldSymbol) =>
val syms = psyms.reverse
scope.get(syms.head).flatMap { case (n, _) =>
logger.debug("Trying to rewrite "+p+" ."+field)
val newSelO = rewrite(n, p, field, syms)
newSelO.foreach(newSel => logger.debug("Replaced "+Path.toString(sel)+" by "+Path.toString(newSel)))
newSelO
}.getOrElse(sel)
case n => n.mapChildrenWithScope(((_, ch, chsc) => tr(ch, chsc)), scope)
}
val n2 = tr(n)
val n3 = if(!updatedTables.isEmpty) {
logger.debug("Patching "+updatedTables.size+" updated Table(Ref)Expansion(s) "+updatedTables.keysIterator.mkString(", ")+" into the tree")
for((sym, n) <- updatedTables) logger.debug("Updated expansion: "+sym, n)
def update(n: Node): Node = n match {
case t: TableExpansion =>
updatedTables.get(t.generator).fold(t)(c => t.copy(columns = c)).nodeMapChildren(update)
case t: TableRefExpansion =>
updatedTables.get(t.marker).fold(t)(c => t.copy(columns = c)).nodeMapChildren(update)
case n => n.nodeMapChildren(update)
}
update(n2)
} else n2
n3
}
}
/** Replace all TableNodes with TableExpansions which contain both the
* expansion and the original table. */
class ExpandTables extends Phase {
val name = "expandTables"
def apply(n: Node, state: CompilationState): Node = n match {
case t: TableExpansion => t
case t: TableNode =>
val sym = new AnonSymbol
val expanded = WithOp.encodeRef(t, sym).nodeShaped_*.packedNode
val processed = apply(state.compiler.runBefore(Phase.forceOuterBinds, expanded, state), state)
TableExpansion(sym, t, ProductNode(processed.flattenProduct))
case n => n.nodeMapChildren(ch => apply(ch, state))
}
}
/** Expand Paths to ProductNodes and TableExpansions into ProductNodes of
* Paths and TableRefExpansions of Paths, so that all Paths point to
* individual columns by index */
class ExpandRefs extends Phase with ColumnizerUtils {
val name = "expandRefs"
def apply(n: Node, state: CompilationState) = expandRefs(n)
def expandRefs(n: Node, scope: Scope = Scope.empty, keepRef: Boolean = false): Node = n match {
case p @ Path(psyms) =>
logger.debug("Checking path "+Path.toString(psyms))
psyms.head match {
case f: FieldSymbol => p
case _ if keepRef => p
case _ =>
val syms = psyms.reverse
scope.get(syms.head) match {
case Some((target, _)) =>
val exp = select(syms.tail, narrowStructure(target), (s => scope.get(s).map(_._1))).head
logger.debug(" narrowed "+p+" to "+exp)
exp match {
case t: TableExpansion => burstPath(Path(syms.reverse), t)
case t: TableRefExpansion => burstPath(Path(syms.reverse), t)
case pr: ProductNode => burstPath(Path(syms.reverse), pr)
case n => p
}
case None => p
}
}
case n @ Apply(sym: Library.AggregateFunctionSymbol, _) =>
// Don't expand children of aggregate functions
n.mapChildrenWithScope(((_, ch, chsc) => expandRefs(ch, chsc, true)), scope)
case n =>
// Don't expand children in 'from' positions
n.mapChildrenWithScope(((symO, ch, chsc) => expandRefs(ch, chsc, symO.isDefined)), scope)
}
/** Expand a base path into a given target */
def burstPath(base: Node, target: Node): Node = target match {
case ProductNode(ch) =>
ProductNode(ch.zipWithIndex.map { case (n, idx) =>
burstPath(Select(base, ElementSymbol(idx+1)), n)
})
case TableExpansion(_, t, cols) =>
TableRefExpansion(new AnonSymbol, base, ProductNode(cols.nodeChildren.zipWithIndex.map { case (n, idx) =>
burstPath(Select(base, ElementSymbol(idx+1)), n)
}))
case TableRefExpansion(_, t, cols) =>
TableRefExpansion(new AnonSymbol, base, ProductNode(cols.nodeChildren.zipWithIndex.map { case (n, idx) =>
burstPath(Select(base, ElementSymbol(idx+1)), n)
}))
case _ => base
}
}
trait ColumnizerUtils { _: Phase =>
/** Navigate into ProductNodes along a path */
def select(selects: List[Symbol], base: Node, lookup: (Symbol => Option[Node]) = (_ => None)): Vector[Node] = {
logger.debug(" select("+selects+", "+base+")")
(selects, base) match {
case (s, Union(l, r, _, _, _)) => select(s, l, lookup) ++ select(s, r, lookup)
case (Nil, n) => Vector(n)
case ((s: ElementSymbol) :: t, ProductNode(ch)) => select(t, ch(s.idx-1), lookup)
case (selects, Path(rpath)) =>
val path = rpath.reverse
logger.debug(" encountered reference "+Path.toString(rpath)+" -> resolving "+path.head)
lookup(path.head) match {
case Some(n) =>
select(path.tail ::: selects, n, lookup)
case None => throw new SlickException("Cannot resolve "+path.head+" in "+Path.toString(rpath))
}
case _ =>
val narrowed = narrowStructure(base)
if(narrowed eq base)
throw new SlickException("Cannot select "+Path.toString(selects.reverse)+" in "+base)
else select(selects, narrowed, lookup)
}
}
/** Find the actual structure produced by a Node */
def narrowStructure(n: Node): Node = n match {
case Pure(n) => n
case Join(_, _, l, r, _, _) => ProductNode(Seq(narrowStructure(l), narrowStructure(r)))
case GroupBy(_, _, from, by) => ProductNode(Seq(narrowStructure(by), narrowStructure(from)))
case u: Union => u.copy(left = narrowStructure(u.left), right = narrowStructure(u.right))
case FilteredQuery(_, from) => narrowStructure(from)
case Bind(_, _, select) => narrowStructure(select)
case n => n
}
}
| boldradius/slick | src/main/scala/scala/slick/compiler/Columnizer.scala | Scala | bsd-2-clause | 9,225 |
/*
* konpare
* Copyright (C) 2015 Alexander Fefelov <https://github.com/alexanderfefelov>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.github.alexanderfefelov.konpare.syntax.subject.config
import com.github.alexanderfefelov.konpare.syntax.{Syntax, Subject}
object Lldp extends Subject {
override def process2(data: List[String], model: collection.mutable.Map[String, String]) = {
data.head match {
case Syntax.PARAMETER_PORTS =>
data(3) match {
case Syntax.VALUE_ENABLE | Syntax.VALUE_DISABLE =>
// config lldp ports 1-28 notification disable
Syntax.expandRange(data(1)).foreach( i =>
model += s"${Syntax.SUBJECT_LLDP}=$i=${data(2)}" -> data(3)
)
case _ if data(2) == Syntax.PARAMETER_ADMIN_STATUS =>
// config lldp ports 1-28 admin_status tx_and_rx
Syntax.expandRange(data(1)).foreach( i =>
model += s"${Syntax.SUBJECT_LLDP}=$i=${data(2)}" -> data(3)
)
case _ if data(2) == Syntax.PARAMETER_BASIC_TLVS =>
// config lldp ports 25 basic_tlvs port_description system_name system_description system_capabilities enable
Syntax.expandRange(data(1)).foreach( i =>
data.drop(3).foreach { param =>
model += s"${Syntax.SUBJECT_LLDP}=$i=${data(2)}=$param" -> param
}
)
case _ =>
// config lldp ports 26 basic_tlvs port_description system_name system_description system_capabilities enable
}
case Syntax.PARAMETER_MESSAGE_TX_INTERVAL | Syntax.PARAMETER_TX_DELAY | Syntax.PARAMETER_MESSAGE_TX_HOLD_MULTIPLIER
| Syntax.PARAMETER_REINIT_DELAY | Syntax.PARAMETER_NOTIFICATION_INTERVAL =>
// config lldp message_tx_interval 30
// config lldp tx_delay 2
// config lldp message_tx_hold_multiplier 4
// config lldp reinit_delay 2
// config lldp notification_interval 5
model += s"${Syntax.SUBJECT_LLDP}=${data.head}" -> data(1)
case _ =>
}
}
} | alexanderfefelov/konpare | src/main/scala/com/github/alexanderfefelov/konpare/syntax/subject/config/Lldp.scala | Scala | gpl-3.0 | 2,691 |
package de.mineformers.visum.bean.binding
import de.mineformers.visum.bean.Observable
import de.mineformers.visum.bean.value.ObservableValue
/**
* BinaryBinding
*
* @author PaleoCrafter
*/
case class BinaryBinding[@specialized A, @specialized B](a: ObservableValue[A],
b: ObservableValue[A])
(op: (A, A) => B) extends Binding[B] {
Seq(a, b).foreach(_.onChange += {
e =>
invalidate()
})
override def computeValue: B = op(a.value, b.value)
override def dependencies: Seq[Observable] = Seq(a, b).distinct
}
| MineFormers/Visum | src/main/scala/de.mineformers.visum/bean/binding/BinaryBinding.scala | Scala | mit | 643 |
import sbt._
import Keys._
object SoqlEnvironment{
lazy val settings: Seq[Setting[_]] = BuildSettings.projectSettings() ++ Seq(
name := "soql-environment",
libraryDependencies += "com.ibm.icu" % "icu4j" % "63.1"
)
}
| socrata-platform/soql-reference | project/SoqlEnvironment.scala | Scala | apache-2.0 | 229 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef.{Int, List, Map => SMap, Symbol}
import quasar.fp._
import quasar.qscript.{
construction,
Center,
LeftSide,
LeftSide3,
RightSide,
RightSide3
}
import quasar.qscript.RecFreeS._
import quasar.qsu.{QScriptUniform => QSU}
import scalaz.syntax.bind._
import scalaz.syntax.equal._
import matryoshka.BirecursiveT
/** Coalesces adjacent mappable regions of a single root. */
final class CoalesceUnaryMappable[T[_[_]]: BirecursiveT] private () extends QSUTTypes[T] {
import QSUGraph.Extractors._
import MappableRegion.MaximalUnary
val mf = construction.Func[T]
def apply(graph: QSUGraph): QSUGraph =
graph rewrite {
case g @ MaximalUnary(src, fm) if g.root =/= src.root =>
g.overwriteAtRoot(QScriptUniform.Map(src.root, fm.asRec))
case g @ AutoJoin2(left, right, combine) =>
val nodes = mapNodes(List(left, right))
if (nodes.isEmpty)
g
else {
val (l, lf) = nodes.getOrElse(0, (left.root, mf.Hole))
val (r, rf) = nodes.getOrElse(1, (right.root, mf.Hole))
val cmb = combine flatMap {
case LeftSide => lf >> mf.LeftSide
case RightSide => rf >> mf.RightSide
}
g.overwriteAtRoot(QSU.AutoJoin2(l, r, cmb))
}
case g @ AutoJoin3(left, center, right, combine) =>
val nodes = mapNodes(List(left, center, right))
if (nodes.isEmpty)
g
else {
val (l, lf) = nodes.getOrElse(0, (left.root, mf.Hole))
val (c, cf) = nodes.getOrElse(1, (center.root, mf.Hole))
val (r, rf) = nodes.getOrElse(2, (right.root, mf.Hole))
val cmb = combine flatMap {
case LeftSide3 => lf >> mf.LeftSide3
case Center => cf >> mf.Center
case RightSide3 => rf >> mf.RightSide3
}
g.overwriteAtRoot(QSU.AutoJoin3(l, c, r, cmb))
}
}
def mapNodes(gs: List[QSUGraph]): SMap[Int, (Symbol, FreeMap)] =
gs.zipWithIndex.foldLeft(SMap[Int, (Symbol, FreeMap)]()) {
case (acc, (Map(s, fm), i)) => acc.updated(i, (s.root, fm.linearize))
case (acc, _) => acc
}
}
object CoalesceUnaryMappable {
def apply[T[_[_]]: BirecursiveT](graph: QSUGraph[T]): QSUGraph[T] =
(new CoalesceUnaryMappable[T]).apply(graph)
}
| djspiewak/quasar | qsu/src/main/scala/quasar/qsu/CoalesceUnaryMappable.scala | Scala | apache-2.0 | 2,931 |
package org.eknet.sitebag
import scala.concurrent.duration._
import akka.util.Timeout
import org.eknet.sitebag.mongo.{MongoStoreActor, SitebagMongo}
import akka.actor.{ActorSystem, ActorRef}
import scala.concurrent.Await
import java.util.concurrent.atomic.AtomicReference
import com.typesafe.config.ConfigFactory
import reactivemongo.api.MongoDriver
trait MongoTest extends ActorTestBase {
import system.dispatcher
val settings = SitebagSettings(system)
implicit val timeout: Timeout = 5.seconds
val dbname = testName + System.currentTimeMillis()
val mongo: SitebagMongo = SitebagMongo(settings).withDbName(dbname)
val storeRef: ActorRef = system.actorOf(MongoStoreActor(mongo))
before {
Await.ready(mongo.db.drop(), 10.seconds)
}
override def afterAll() = {
Await.ready(mongo.db.drop(), 10.seconds)
mongo.close()
super.afterAll()
}
}
object MongoTest {
private lazy val config = ConfigFactory.load("application")
private lazy val mongoClientUrl = config.getString("sitebag.mongodb-url")
def createMongoClient(dbname: String) = new SitebagMongo(new MongoDriver(), mongoClientUrl, "testdb")
} | eikek/sitebag | src/test/scala/org/eknet/sitebag/MongoTest.scala | Scala | apache-2.0 | 1,143 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine
import slamdata.Predef._
import slamdata.engine.analysis._
import slamdata.engine.fp._
import slamdata.engine.sql._
import slamdata.engine.SemanticError._
import scalaz.{Node => _, Tree => _, _}
import Scalaz._
final case class Variables(value: Map[VarName, VarValue])
final case class VarName(value: String) {
override def toString = ":" + value
}
final case class VarValue(value: String)
object Variables {
def fromMap(value: Map[String, String]): Variables = Variables(value.map(t => VarName(t._1) -> VarValue(t._2)))
def substVars[A](tree: AnnotatedTree[Node, A], vars: Variables): SemanticError \\/ AnnotatedTree[Node, A] = {
type S = List[(Node, A)]
type EitherM[A] = EitherT[Free.Trampoline, SemanticError, A]
type M[A] = StateT[EitherM, S, A]
def unchanged[A <: Node](t: (A, A)): M[A] = changed(t._1, \\/- (t._2))
def changed[A <: Node](old: A, new0: SemanticError \\/ A): M[A] = StateT[EitherM, S, A] { state =>
EitherT(new0.map { new0 =>
val ann = tree.attr(old)
(((new0 -> ann) :: state, new0))
}.point[Free.Trampoline])
}
tree.root.mapUpM0[M](
unchanged _,
unchanged _,
{
case (old, v @ Vari(name)) if vars.value.contains(VarName(name)) =>
val varValue = vars.value(VarName(name))
val parsed = (new SQLParser()).parseExpr(varValue.value)
.leftMap(err => VariableParseError(VarName(name), varValue, err))
changed(old, parsed)
case t => unchanged(t)
},
unchanged _,
unchanged _,
unchanged _
).run(Nil).run.run.map {
case (tuples, root) =>
val map1 = tuples.foldLeft(new java.util.IdentityHashMap[Node, A]) { // TODO: Use ordinary map when AnnotatedTree has been off'd
case (map, (k, v)) => ignore(map.put(k, v)); map
}
Tree[Node](root, _.children).annotate(map1.get(_))
}
}
}
| wemrysi/quasar | core/src/main/scala/slamdata/engine/variables.scala | Scala | apache-2.0 | 2,535 |
package apus.util
/**
* A bidirectional map
* Created by Hao Chen on 2014/11/26.
*/
class BiMap[K, V] {
private val keyMap = scala.collection.mutable.Map.empty[K, (K, V)]
private val valueMap = scala.collection.mutable.Map.empty[V, (K, V)]
def put(key: K, value: V): Unit ={
val pair = (key, value)
keyMap.put(key, pair)
valueMap.put(value, pair)
}
def getValue(key: K): Option[V] = {
keyMap.get(key).map(_._2)
}
def getKey(value: V): Option[K] = {
valueMap.get(value).map(_._1)
}
def removeByKey(key: K): Unit ={
keyMap.get(key).foreach{
remove
}
}
def removeByValue(value: V): Unit ={
valueMap.get(value).foreach{
remove
}
}
private def remove(pair: (K, V)): Unit ={
keyMap.remove(pair._1)
valueMap.remove(pair._2)
}
}
| raulchen/Apus | src/main/scala/apus/util/BiMap.scala | Scala | apache-2.0 | 816 |
package org.finra.datagenerator.scaffolding.random.randomizers
import org.finra.datagenerator.scaffolding.config._
import org.finra.datagenerator.scaffolding.utils.Logging
import org.finra.datagenerator.scaffolding.config.AnnotationField
import org.finra.datagenerator.scaffolding.random.predicate.{ClassRandomGenerator, RandomContext}
/**
* Created by dkopel on 12/6/16.
*/
class FloatRandomizer extends ClassRandomGenerator[Float]
with Configurable with AnnotationCapable with Logging {
override def apply(rc: RandomContext): Float = {
val min = rc.conf.conf[Float](FloatRandomizerMinName).getValue()
val max = rc.conf.conf[Float](FloatRandomizerMaxName).getValue()
logger.debug("Min {}, Max {}", min, max)
rc.jpr.floats.nextFloat(min, max)
}
object FloatRandomizerMinName extends ConfigName("floatRandomizerMin")
object FloatRandomizerMaxName extends ConfigName("floatRandomizerMax")
val minDef: ConfigDefinition[Float] = ConfigDefinition[Float](
FloatRandomizerMinName,
Some(Float.MinValue)
)
val maxDef: ConfigDefinition[Float] = ConfigDefinition[Float](
FloatRandomizerMaxName,
Some(Float.MaxValue)
)
private val defs = Seq(
minDef,
maxDef
)
override def configBundle: ConfigBundle = {
ConfigBundle(
getClass,
defs.map(d => (d.name, d)).toMap
)
}
override def name: String = "FloatRange"
override def values: Set[AnnotationField[_, _]] = Set(
AnnotationField("min", minDef, classOf[Float], classOf[Float]),
AnnotationField("max", maxDef, classOf[Float], classOf[Float])
)
override def classes: Array[Class[_]] = Array(classOf[java.lang.Float], classOf[Float])
} | yukaReal/DataGenerator | rubber-scaffolding/rubber-random/src/main/scala/org/finra/datagenerator/scaffolding/random/randomizers/FloatRandomizer.scala | Scala | apache-2.0 | 1,786 |
package eventstore
import akka.actor.ExtendedActorSystem
import ecommerce.shipping.shippingOffice
import org.json4s.Formats
import pl.newicom.eventstore.Json4sEsSerializer
class EventStoreSerializer(val sys: ExtendedActorSystem) extends Json4sEsSerializer(sys) {
override implicit val formats: Formats = shippingOffice.serializationHints ++ defaultFormats
}
| odd/ddd-leaven-akka-v2 | shipping/write-back/src/main/scala/eventstore/EventStoreSerializer.scala | Scala | mit | 364 |
package com.typesafe.sbt.packager.validation
import sbt._
trait ValidationKeys {
/**
* A task that implements various validations for a format.
* Example usage:
* - `sbt universal:packageBin::validatePackage`
* - `sbt debian:packageBin::validatePackage`
*
* Each format should implement it's own validate.
* Implemented in #1026
*/
val validatePackage = taskKey[Unit]("validates the package configuration")
val validatePackageValidators = taskKey[Seq[Validation.Validator]]("validator functions")
}
object ValidationKeys extends ValidationKeys
| kardapoltsev/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/validation/ValidationKeys.scala | Scala | bsd-2-clause | 587 |
import sbt._
import Keys._
object GitPrompt {
def currBranch = {
val branch = VersionControl.branchName()
if ("" == branch)
":master"
else
":" + branch
}
def currCommit = {
val commit = VersionControl.shortenedCurrentCommit()
if ("" == commit)
"@unknown"
else
"@" + commit
}
val build = {
(state: State) => {
val project_name = Project.extract(state).currentRef.project
val project_title = Settings.project + "-" + project_name
"%s%s%s> ".format(
project_title, currBranch, currCommit
)
}
}
}
| davidhoyt/getter.io | project/prompt.scala | Scala | mit | 597 |
package org.mozartoz.bootcompiler
package transform
import ast._
import oz._
import symtab._
/** Base class for transformation phases */
abstract class Transformer extends (Program => Unit) {
/** Program that is being transformed */
var program: Program = _
/** Abstraction that is being transformed (only if `!program.isRawCode`) */
var abstraction: Abstraction = _
/** Builtin manager of the program */
def builtins = program.builtins
protected def baseEnvironment(name: String): Expression = {
if (program.isBaseEnvironment) {
Variable(program.baseSymbols(name))
} else if (name == "Base") {
Variable(program.baseEnvSymbol)
} else {
CallExpression(Constant(OzBuiltin(builtins.binaryOpToBuiltin("."))),
List(Variable(program.baseEnvSymbol), Constant(OzAtom(name))))
}
}
/** Tree copier */
val treeCopy = new TreeCopier
/** Applies the transformation phase to a program */
def apply(program: Program) {
this.program = program
try {
apply()
} finally {
this.program = null
}
}
/** Applies the transformation phase to the current `program` */
protected def apply() {
if (program.isRawCode)
program.rawCode = transformStat(program.rawCode)
else {
for (abs <- program.abstractions) {
abstraction = abs
try {
applyToAbstraction()
} finally {
abstraction = null
}
}
}
}
/** Applies the transformation phase to the current `abstraction` */
protected def applyToAbstraction() {
abstraction.body = transformStat(abstraction.body)
}
/** Transforms a Statement */
def transformStat(statement: Statement): Statement = statement match {
case CompoundStatement(stats) =>
treeCopy.CompoundStatement(statement, stats map transformStat)
case RawLocalStatement(declarations, body) =>
treeCopy.RawLocalStatement(statement, declarations map transformDecl,
transformStat(body))
case LocalStatement(declarations, body) =>
treeCopy.LocalStatement(statement, declarations,
transformStat(body))
case CallStatement(callable, args) =>
treeCopy.CallStatement(statement, transformExpr(callable),
args map transformExpr)
case IfStatement(condition, trueStatement, falseStatement) =>
treeCopy.IfStatement(statement, transformExpr(condition),
transformStat(trueStatement), transformStat(falseStatement))
case MatchStatement(value, clauses, elseStatement) =>
treeCopy.MatchStatement(statement, transformExpr(value),
clauses map transformClauseStat, transformStat(elseStatement))
case NoElseStatement() =>
statement
case ThreadStatement(body) =>
treeCopy.ThreadStatement(statement, transformStat(body))
case LockStatement(lock, body) =>
treeCopy.LockStatement(statement, transformExpr(lock),
transformStat(body))
case LockObjectStatement(body) =>
treeCopy.LockObjectStatement(statement, transformStat(body))
case TryStatement(body, exceptionVar, catchBody) =>
treeCopy.TryStatement(statement, transformStat(body),
exceptionVar, transformStat(catchBody))
case TryFinallyStatement(body, finallyBody) =>
treeCopy.TryFinallyStatement(statement, transformStat(body),
transformStat(finallyBody))
case RaiseStatement(body) =>
treeCopy.RaiseStatement(statement, transformExpr(body))
case FailStatement() =>
statement
case BindStatement(left, right) =>
treeCopy.BindStatement(statement, transformExpr(left),
transformExpr(right))
case BinaryOpStatement(left, operator, right) =>
treeCopy.BinaryOpStatement(statement, transformExpr(left),
operator, transformExpr(right))
case DotAssignStatement(left, center, right) =>
treeCopy.DotAssignStatement(statement, transformExpr(left),
transformExpr(center), transformExpr(right))
case SkipStatement() =>
treeCopy.SkipStatement(statement)
}
/** Transforms an expression */
def transformExpr(expression: Expression): Expression = expression match {
case StatAndExpression(statement, expr) =>
treeCopy.StatAndExpression(expression, transformStat(statement),
transformExpr(expr))
case RawLocalExpression(declarations, expression) =>
treeCopy.RawLocalExpression(expression, declarations map transformDecl,
transformExpr(expression))
case LocalExpression(declarations, expression) =>
treeCopy.LocalExpression(expression, declarations,
transformExpr(expression))
// Complex expressions
case ProcExpression(name, args, body, flags) =>
treeCopy.ProcExpression(expression, name, args,
transformStat(body), flags)
case FunExpression(name, args, body, flags) =>
treeCopy.FunExpression(expression, name, args,
transformExpr(body), flags)
case CallExpression(callable, args) =>
treeCopy.CallExpression(expression, transformExpr(callable),
args map transformExpr)
case IfExpression(condition, trueExpression, falseExpression) =>
treeCopy.IfExpression(expression, transformExpr(condition),
transformExpr(trueExpression), transformExpr(falseExpression))
case MatchExpression(value, clauses, elseExpression) =>
treeCopy.MatchExpression(expression, transformExpr(value),
clauses map transformClauseExpr, transformExpr(elseExpression))
case NoElseExpression() =>
expression
case ThreadExpression(body) =>
treeCopy.ThreadExpression(expression, transformExpr(body))
case LockExpression(lock, body) =>
treeCopy.LockExpression(expression, transformExpr(lock),
transformExpr(body))
case LockObjectExpression(body) =>
treeCopy.LockObjectExpression(expression, transformExpr(body))
case TryExpression(body, exceptionVar, catchBody) =>
treeCopy.TryExpression(expression, transformExpr(body),
exceptionVar, transformExpr(catchBody))
case TryFinallyExpression(body, finallyBody) =>
treeCopy.TryFinallyExpression(expression, transformExpr(body),
transformStat(finallyBody))
case RaiseExpression(body) =>
treeCopy.RaiseExpression(expression, transformExpr(body))
case BindExpression(left, right) =>
treeCopy.BindExpression(expression, transformExpr(left),
transformExpr(right))
case DotAssignExpression(left, center, right) =>
treeCopy.DotAssignExpression(expression, transformExpr(left),
transformExpr(center), transformExpr(right))
case FunctorExpression(name, require, prepare, imports, define, exports) =>
def transformDefine(stat: LocalStatementOrRaw) =
transformStat(stat).asInstanceOf[LocalStatementOrRaw]
treeCopy.FunctorExpression(expression, name,
require, prepare map transformDefine,
imports, define map transformDefine,
exports)
// Operations
case UnaryOp(operator, operand) =>
treeCopy.UnaryOp(expression, operator, transformExpr(operand))
case BinaryOp(left, operator, right) =>
treeCopy.BinaryOp(expression, transformExpr(left), operator,
transformExpr(right))
case ShortCircuitBinaryOp(left, operator, right) =>
treeCopy.ShortCircuitBinaryOp(expression, transformExpr(left), operator,
transformExpr(right))
// Trivial expressions
case RawVariable(name) => expression
case Variable(symbol) => expression
case EscapedVariable(variable) => expression
case UnboundExpression() => expression
case NestingMarker() => expression
case Self() => expression
// Constants
case Constant(value) => expression
// Records
case AutoFeature() => expression
case Record(label, fields) =>
treeCopy.Record(expression, transformExpr(label),
fields map transformRecordField)
case OpenRecordPattern(label, fields) =>
treeCopy.OpenRecordPattern(expression, transformExpr(label),
fields map transformRecordField)
case PatternConjunction(parts) =>
treeCopy.PatternConjunction(expression, parts map transformExpr)
// Classes
case ClassExpression(name, parents, features, attributes,
properties, methods) =>
treeCopy.ClassExpression(expression, name, parents map transformExpr,
features map transformFeatOrAttr, attributes map transformFeatOrAttr,
properties map transformExpr, methods map transformMethodDef)
// Synthetic-only
case CreateAbstraction(body, globals) =>
treeCopy.CreateAbstraction(expression, transformExpr(body),
globals map transformExpr)
}
/** Transforms a declaration */
def transformDecl(
declaration: RawDeclaration): RawDeclaration = declaration match {
case stat:Statement => transformStat(stat)
case _ => declaration
}
/** Transforms a record field */
private def transformRecordField(field: RecordField): RecordField =
treeCopy.RecordField(field,
transformExpr(field.feature), transformExpr(field.value))
/** Transforms a clause of a match statement */
def transformClauseStat(clause: MatchStatementClause) =
treeCopy.MatchStatementClause(clause, transformExpr(clause.pattern),
clause.guard map transformExpr, transformStat(clause.body))
/** Transforms a clause of a match expression */
def transformClauseExpr(clause: MatchExpressionClause) =
treeCopy.MatchExpressionClause(clause, transformExpr(clause.pattern),
clause.guard map transformExpr, transformExpr(clause.body))
/** Transforms a feature or an attribute of a class */
def transformFeatOrAttr(featOrAttr: FeatOrAttr) =
treeCopy.FeatOrAttr(featOrAttr, transformExpr(featOrAttr.name),
featOrAttr.value map transformExpr)
/** Transforms a method definition */
def transformMethodDef(method: MethodDef) = {
val body = method.body match {
case stat:Statement => transformStat(stat)
case expr:Expression => transformExpr(expr)
}
treeCopy.MethodDef(method, method.header, method.messageVar, body)
}
}
| avisparesearch/Mozart2 | bootcompiler/src/main/scala/org/mozartoz/bootcompiler/transform/Transformer.scala | Scala | bsd-2-clause | 10,192 |
package mbench.benchmarks
object ParaLoops {
import mbench.benchmark._
import mbench.gnuplot._
import java.util.concurrent.{ ExecutorService, Executors }
/* The benchmark will be configured using an executor as runtime configuration
* and a number of loop cycles as static configuration.
*/
def runtimeConfig(executorName: String, mkExecutor: Int => ExecutorService) =
Config.runtime[Int, ExecutorService](executorName, mkExecutor, _.shutdown())
val threadPool = runtimeConfig("thread-pool", Executors.newFixedThreadPool)
val cycles = Config.static(10000000)
import mbench.Host.Hardware.cores
val threads = ((1 to 3) ++ (4 to cores by (if (cores <= 8) 2 else 4)) ++ (Seq(2, 4) map (_ + cores))).distinct
val ilabel = Label[Int]("threads")
/* This column uses the cycles in the static configuration to compute
* the throughput in cycles per second.
*/
val throughput = Column.withConfig[Int, Int, Double]("throughput", "cycles".perSeconds)(
(threads, cycles, time) => threads * (cycles / time)
)
val speedup = throughput.speedupHigherIsBetter
val benchmark = Benchmark("para-loops", threads, ilabel, warmups = 5, runs = 7)
.add(throughput).add(speedup)
def main(args: Array[String]) = {
/* The tests and the benchmark must use the same setup, which specifies
* the executor and the number of cycles to execute.
*/
def mkTest(loop: Int => Unit)(executor: ExecutorService, cycles: Int, threads: Int) = {
(1 to threads)
.map(_ => executor.submit(new Runnable { def run() = loop(cycles) }))
.foreach(_.get())
}
val testWhile = Test("while", mkTest(Loops.testWhile))
val testWhileNew = Test("while-new", mkTest(Loops.testWhileNew))
val testFor = Test("for", mkTest(Loops.testFor))
val idealTimes = benchmark.ideal("speedup", 1 /* cycles don't matter */ , threads => if (threads <= cores) 1 else (threads.toDouble / cores))
val tests = Seq(testWhile, testWhileNew, testFor)
val dats = tests.map(benchmark(threadPool and cycles, _))
val settings = Seq(Plot.xtics(1))
Gnuplot.save(Gnuplot(dats, settings, throughput))
Gnuplot.save(Gnuplot(dats :+ idealTimes, settings, speedup.label))
}
} | sbocq/mbench | mbench-benchmarks/src/main/scala/mbench/benchmarks/ParaLoops.scala | Scala | apache-2.0 | 2,244 |
package com.wanghuanming.tfidf
import java.io._
import java.util.Date
import scala.io.Source
import org.ansj.splitWord.analysis.ToAnalysis
trait Logger {
def log(msg: String): Unit = {
System.err.println(new Date() + ": " + msg)
}
}
/**
* Keywords extracting using TFIDF algorithm.
* author: HelloCode
* email: huanmingwong@163.com
*/
object TFIDF extends Logger {
final val idfPath = "/default-idf.cache"
final val customIDFPath = "idf.cache"
final val defaultIDF = Math.PI
val stopwords = List("/engStopwords.txt", "/zhStopwords.txt").flatMap { file =>
Source.fromInputStream(getClass.getResourceAsStream(file)).getLines().map(_.trim)
}
var idfCache_ : Map[String, Double] = null
/**
* @param content: the article to be extracted.
* @param topN: how many keywords to be extracted.
* @return : list of keywords
*/
def getKeywords(content: String, topN: Int): List[(String, Double)] = {
val terms = segment(content)
val tf = TF(filterTrivialWord(terms))
val idf = IDF
val tfidf = tf.map { case (word, freq) =>
word -> freq * idf.getOrElse(word, defaultIDF)
}.toList
tfidf.sortBy(_._2).reverse.take(topN)
}
/**
* Construct your own readCache with a corpus.
* It will compute the readCache of a corpus, and cache it.
* @param corpusPath the corpusPath must be a directory containing a huge number of documents.
*/
def constructIDF(corpusPath: String) = {
assert(new File(corpusPath).isDirectory)
val files = new File(corpusPath).listFiles
val fileCount = files.size
log(s"constructing IDF from $fileCount documents")
// compute the idf
val corpus =
files.flatMap { file =>
segment(Source.fromFile(file).mkString).distinct
}.groupBy { x =>
x
}.map { case (word, list) =>
word -> Math.log(fileCount.toDouble / list.length + 1)
}.withDefaultValue(defaultIDF)
// update idf cache
idfCache_ = corpus
cacheIDF(idfCache_)
log("successfully construct the IDF and cache it")
}
private def TF(article: List[String]) = {
val sum = article.length
// word count
article.groupBy(x => x).map { case (word, list) =>
word -> list.length.toDouble / sum
}
}
private def IDF: Map[String, Double] = {
if (idfCache_ == null) {
this.synchronized {
if (idfCache_ == null) {
idfCache_ = readIDFFromCache
}
}
}
idfCache_
}
private def cacheIDF(idf: Map[String, Double]): Unit = {
val writer = new ObjectOutputStream(new FileOutputStream(new File(customIDFPath)))
writer.writeObject(idf)
writer.close()
}
private def readIDFFromCache: Map[String, Double] = {
val cacheIS = {
if (new File(customIDFPath).exists) {
log(s"read cache from $customIDFPath")
new FileInputStream(new File(customIDFPath))
} else {
log(s"read cache from $idfPath")
getClass.getResourceAsStream(idfPath)
}
}
// deserialize
val reader = new ObjectInputStream(cacheIS)
val corpus = reader.readObject.asInstanceOf[Map[String, Double]]
reader.close()
corpus
}
/**
* Not a short symbol which length less that 2, not a stopword, not a number.
* @param terms list of words
* @return
*/
private def filterTrivialWord(terms: List[String]) = {
terms.filter { word =>
word.length >= 2 && !stopwords.contains(word) && !isNumber(word)
}
}
private def isNumber(term: String): Boolean = {
term.forall { x =>
('0' <= x && x <= '9') || x == '.'
}
}
/**
* article segmentation method for ansj_seg library
* @param content the article to be segmented.
* @return terms segmented.
*/
private def segment(content: String): List[String] = {
ToAnalysis.parse(content)
.toArray
.map(_.toString.split("/"))
.filter(_.length >= 2)
.map(_ (0))
.toList
}
}
| HelloCodeMing/scala-tfidf | src/main/scala/com/wanghuanming/tfidf/TFIDF.scala | Scala | mit | 3,973 |
package loaders
import java.io.File
import java.io.FileInputStream
import scala.util.Random
import libs._
/**
* Loads images from the CIFAR-10 Dataset. The string path points to a directory where the files data_batch_1.bin, etc. are stored.
*
* TODO: Implement loading of test images, and distinguish between training and test data
*/
class CifarLoader(path: String) {
// We hardcode this because these are properties of the CIFAR-10 dataset.
val height = 32
val width = 32
val channels = 3
val size = channels * height * width
val batchSize = 10000
val nBatches = 5
val nData = nBatches * batchSize
val trainImages = new Array[Array[Float]](nData)
val trainLabels = new Array[Int](nData)
val testImages = new Array[Array[Float]](batchSize)
val testLabels = new Array[Int](batchSize)
val r = new Random()
// val perm = Vector() ++ r.shuffle(1 to (nData - 1) toIterable)
val indices = Vector() ++ (0 to nData - 1) toIterable
val trainPerm = Vector() ++ r.shuffle(indices)
val testPerm = Vector() ++ ((0 to batchSize) toIterable)
val d = new File(path)
if (!d.exists) {
throw new Exception("The path " + path + " does not exist.")
}
if (!d.isDirectory) {
throw new Exception("The path " + path + " is not a directory.")
}
val cifar10Files = List("data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin", "test_batch.bin")
for (filename <- cifar10Files) {
if (!d.list.contains(filename)) {
throw new Exception("The directory " + path + " does not contain all of the Cifar10 data. Please run `bash $SPARKNET_HOME/data/cifar10/get_cifar10.sh` to obtain the Cifar10 data.")
}
}
val fullFileList = d.listFiles.filter(_.getName().split('.').last == "bin").toList
val testFile = fullFileList.find(x => x.getName().split('/').last == "test_batch.bin").head
val fileList = fullFileList diff List(testFile)
for (i <- 0 to nBatches - 1) {
readBatch(fileList(i), i, trainImages, trainLabels, trainPerm)
}
readBatch(testFile, 0, testImages, testLabels, testPerm)
val meanImage = new Array[Float](size)
for (i <- 0 to nData - 1) {
for (j <- 0 to size - 1) {
meanImage(j) += trainImages(i)(j).toFloat / nData
}
}
def readBatch(file: File, batch: Int, images: Array[Array[Float]], labels: Array[Int], perm: Vector[Int]) {
val buffer = new Array[Byte](1 + size)
val inputStream = new FileInputStream(file)
var i = 0
var nRead = inputStream.read(buffer)
while(nRead != -1) {
assert(i < batchSize)
labels(perm(batch * batchSize + i)) = (buffer(0) & 0xFF) // convert to unsigned
images(perm(batch * batchSize + i)) = new Array[Float](size)
var j = 0
while (j < size) {
// we access buffer(j + 1) because the 0th position holds the label
images(perm(batch * batchSize + i))(j) = buffer(j + 1) & 0xFF
j += 1
}
nRead = inputStream.read(buffer)
i += 1
}
}
}
| amplab/SparkNet | src/main/scala/loaders/CifarLoader.scala | Scala | mit | 3,001 |
package pdp.poatransportes
import org.apache.flink.api.scala._
import org.apache.flink.api.java.{DataSet => JavaDataSet}
import org.apache.flink.ml.common.LabeledVector
import org.apache.flink.ml.math.Vector
import org.apache.flink.ml.math.DenseVector
import org.apache.flink.ml.regression.MultipleLinearRegression
import org.apache.flink.ml.common.WeightVector
import org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression
object Trainer {
def trainMLR(javaDs: JavaDataSet[LabeledVector]): WeightVector = {
val scalaDs = new DataSet[LabeledVector](javaDs)
val labels:DataSet[Double] = scalaDs.map { x => x.label }
val Y = labels.collect().toArray[Double]
val vectors:DataSet[Array[Double]] = scalaDs.map { x => Array(x.vector(0)) }
val X = vectors.collect().toArray[Array[Double]]
val ols = new OLSMultipleLinearRegression()
ols.newSampleData(Y, X)
val b = ols.estimateRegressionParameters()
WeightVector(DenseVector(Array(b(1))), b(0))
}
def predictMLR(weights: JavaDataSet[WeightVector], javaDs: JavaDataSet[Vector]): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val mlr = MultipleLinearRegression()
mlr.weightsOption = Option(new DataSet[WeightVector](weights))
val scalaDs = new DataSet[Vector](javaDs)
val prediction = mlr.predict(scalaDs)
prediction.print()
}
def predictMLR(mlr: MultipleLinearRegression, javaDs: JavaDataSet[Vector]): Unit = {
val scalaDs = new DataSet[Vector](javaDs)
val prediction = mlr.predict(scalaDs)
prediction.print()
}
def predictMLRValidate(mlr: MultipleLinearRegression, javaDs: JavaDataSet[LabeledVector]): Unit = {
val scalaDs = new DataSet[LabeledVector](javaDs)
val prediction = mlr.predict(scalaDs)
prediction.print()
}
} | bombardellif/poa-transportes-demo | poatransportes-scala/src/main/scala/pdp/poatransportes/Trainer.scala | Scala | gpl-3.0 | 1,840 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import junit.framework.Assert._
import org.junit.Test
import org.scalatest.junit.JUnit3Suite
import java.util.Properties
import kafka.utils._
import kafka.log._
import kafka.zk.ZooKeeperTestHarness
import kafka.server.KafkaConfig
import kafka.utils.{Logging, ZkUtils, TestUtils}
import kafka.common.{TopicExistsException, ErrorMapping, TopicAndPartition}
class AdminTest extends JUnit3Suite with ZooKeeperTestHarness with Logging {
@Test
def testReplicaAssignment() {
val brokerList = List(0, 1, 2, 3, 4)
// test 0 replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 0)
}
// test wrong replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 6)
}
// correct assignment
val expectedAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2))
val actualAssignment = AdminUtils.assignReplicasToBrokers(brokerList, 10, 3, 0)
val e = (expectedAssignment.toList == actualAssignment.toList)
assertTrue(expectedAssignment.toList == actualAssignment.toList)
}
@Test
def testManualReplicaAssignment() {
val brokers = List(0, 1, 2, 3, 4)
TestUtils.createBrokersInZk(zkClient, brokers)
// duplicate brokers
intercept[IllegalArgumentException] {
AdminUtils.createTopicWithAssignment(zkClient, "test", Map(0->Seq(0,0)))
}
// inconsistent replication factor
intercept[IllegalArgumentException] {
AdminUtils.createTopicWithAssignment(zkClient, "test", Map(0->Seq(0,1), 1->Seq(0)))
}
// good assignment
val assignment = Map(0 -> List(0, 1, 2),
1 -> List(1, 2, 3))
AdminUtils.createTopicWithAssignment(zkClient, "test", assignment)
val found = ZkUtils.getPartitionAssignmentForTopics(zkClient, Seq("test"))
assertEquals(assignment, found("test"))
}
@Test
def testTopicCreationInZK() {
val expectedReplicaAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2),
10 -> List(1, 2, 3),
11 -> List(1, 3, 4)
)
val leaderForPartitionMap = Map(
0 -> 0,
1 -> 1,
2 -> 2,
3 -> 3,
4 -> 4,
5 -> 0,
6 -> 1,
7 -> 2,
8 -> 3,
9 -> 4,
10 -> 1,
11 -> 1
)
val topic = "test"
TestUtils.createBrokersInZk(zkClient, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// create leaders for all partitions
TestUtils.makeLeaderForPartition(zkClient, topic, leaderForPartitionMap, 1)
val actualReplicaAssignment = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient).partitionsMetadata.map(p => p.replicas)
val actualReplicaList = actualReplicaAssignment.map(r => r.map(b => b.id).toList).toList
assertEquals(expectedReplicaAssignment.size, actualReplicaList.size)
for(i <- 0 until actualReplicaList.size)
assertEquals(expectedReplicaAssignment.get(i).get, actualReplicaList(i))
intercept[TopicExistsException] {
// shouldn't be able to create a topic that already exists
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
}
}
@Test
def testGetTopicMetadata() {
val expectedReplicaAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3))
val leaderForPartitionMap = Map(
0 -> 0,
1 -> 1)
val topic = "auto-topic"
TestUtils.createBrokersInZk(zkClient, List(0, 1, 2, 3))
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// create leaders for all partitions
TestUtils.makeLeaderForPartition(zkClient, topic, leaderForPartitionMap, 1)
val newTopicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
newTopicMetadata.errorCode match {
case ErrorMapping.UnknownTopicOrPartitionCode =>
fail("Topic " + topic + " should've been automatically created")
case _ =>
assertEquals(topic, newTopicMetadata.topic)
assertNotNull("partition metadata list cannot be null", newTopicMetadata.partitionsMetadata)
assertEquals("partition metadata list length should be 2", 2, newTopicMetadata.partitionsMetadata.size)
val actualReplicaAssignment = newTopicMetadata.partitionsMetadata.map(p => p.replicas)
val actualReplicaList = actualReplicaAssignment.map(r => r.map(b => b.id).toList).toList
assertEquals(expectedReplicaAssignment.size, actualReplicaList.size)
for(i <- 0 until actualReplicaList.size) {
assertEquals(expectedReplicaAssignment(i), actualReplicaList(i))
}
}
}
@Test
def testPartitionReassignmentWithLeaderInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4).map(b => TestUtils.createServer(new KafkaConfig(b)))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(0, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment attempt failed for [test, 0]", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient).mapValues(_.newReplicas);
CheckReassignmentStatus.checkIfPartitionReassignmentSucceeded(zkClient, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
}, 1000)
val assignedReplicas = ZkUtils.getReplicasForPartition(zkClient, topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentWithLeaderNotInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4).map(b => TestUtils.createServer(new KafkaConfig(b)))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(1, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient).mapValues(_.newReplicas);
CheckReassignmentStatus.checkIfPartitionReassignmentSucceeded(zkClient, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
}, 1000)
val assignedReplicas = ZkUtils.getReplicasForPartition(zkClient, topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
// leader should be 2
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentNonOverlappingReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4).map(b => TestUtils.createServer(new KafkaConfig(b)))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient).mapValues(_.newReplicas);
CheckReassignmentStatus.checkIfPartitionReassignmentSucceeded(zkClient, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
}, 1000)
val assignedReplicas = ZkUtils.getReplicasForPartition(zkClient, topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 2, 3", newReplicas, assignedReplicas)
// leader should be 2
servers.foreach(_.shutdown())
}
@Test
def testReassigningNonExistingPartition() {
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4).map(b => TestUtils.createServer(new KafkaConfig(b)))
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
val reassignedPartitions = ZkUtils.getPartitionsBeingReassigned(zkClient)
assertFalse("Partition should not be reassigned", reassignedPartitions.contains(topicAndPartition))
// leader should be 2
servers.foreach(_.shutdown())
}
@Test
def testResumePartitionReassignmentThatWasCompleted() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
// put the partition in the reassigned path as well
// reassign partition 0
val newReplicas = Seq(0, 1)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, Map(topicAndPartition -> newReplicas))
reassignPartitionsCommand.reassignPartitions
// create brokers
val servers = TestUtils.createBrokerConfigs(2).map(b => TestUtils.createServer(new KafkaConfig(b)))
TestUtils.waitUntilTrue(checkIfReassignPartitionPathExists, 1000)
val assignedReplicas = ZkUtils.getReplicasForPartition(zkClient, topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 1", newReplicas, assignedReplicas)
servers.foreach(_.shutdown())
}
@Test
def testPreferredReplicaJsonData() {
// write preferred replica json data to zk path
val partitionsForPreferredReplicaElection = Set(TopicAndPartition("test", 1), TopicAndPartition("test2", 1))
PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkClient, partitionsForPreferredReplicaElection)
// try to read it back and compare with what was written
val preferredReplicaElectionZkData = ZkUtils.readData(zkClient,
ZkUtils.PreferredReplicaLeaderElectionPath)._1
val partitionsUndergoingPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaJsonData(preferredReplicaElectionZkData)
assertEquals("Preferred replica election ser-de failed", partitionsForPreferredReplicaElection,
partitionsUndergoingPreferredReplicaElection)
}
@Test
def testBasicPreferredReplicaElection() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
val preferredReplica = 0
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3).map(new KafkaConfig(_))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
val servers = serverConfigs.reverse.map(s => TestUtils.createServer(s))
// broker 2 should be the leader since it was started first
val currentLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, partition, 1000, None).get
// trigger preferred replica election
val preferredReplicaElection = new PreferredReplicaLeaderElectionCommand(zkClient, Set(TopicAndPartition(topic, partition)))
preferredReplicaElection.moveLeaderToPreferredReplica()
val newLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, partition, 1000, Some(currentLeader)).get
assertEquals("Preferred replica election failed", preferredReplica, newLeader)
servers.foreach(_.shutdown())
}
@Test
def testShutdownBroker() {
info("inside testShutdownBroker")
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3).map(new KafkaConfig(_))
// create the topic
AdminUtils.createTopicWithAssignment(zkClient, topic, expectedReplicaAssignment)
val servers = serverConfigs.reverse.map(s => TestUtils.createServer(s))
// broker 2 should be the leader since it was started first
var leaderBeforeShutdown = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, partition, 1000, None).get
var controllerId = ZkUtils.getController(zkClient)
var controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
var partitionsRemaining = controller.shutdownBroker(2)
try {
assertEquals(0, partitionsRemaining)
var topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
var leaderAfterShutdown = topicMetadata.partitionsMetadata.head.leader.get.id
assertTrue(leaderAfterShutdown != leaderBeforeShutdown)
leaderBeforeShutdown = leaderAfterShutdown
controllerId = ZkUtils.getController(zkClient)
controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
partitionsRemaining = controller.shutdownBroker(1)
assertEquals(0, partitionsRemaining)
topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
leaderAfterShutdown = topicMetadata.partitionsMetadata.head.leader.get.id
assertTrue(leaderAfterShutdown != leaderBeforeShutdown)
assertEquals(1, controller.controllerContext.allLeaders(TopicAndPartition("test", 1)).leaderAndIsr.isr.size)
leaderBeforeShutdown = leaderAfterShutdown
controllerId = ZkUtils.getController(zkClient)
controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
partitionsRemaining = controller.shutdownBroker(0)
assertEquals(1, partitionsRemaining)
topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
leaderAfterShutdown = topicMetadata.partitionsMetadata.head.leader.get.id
assertTrue(leaderAfterShutdown == leaderBeforeShutdown)
assertEquals(1, controller.controllerContext.allLeaders(TopicAndPartition("test", 1)).leaderAndIsr.isr.size)
} finally {
servers.foreach(_.shutdown())
}
}
/**
* This test creates a topic with a few config overrides and checks that the configs are applied to the new topic
* then changes the config and checks that the new values take effect.
*/
@Test
def testTopicConfigChange() {
val partitions = 3
val topic = "my-topic"
val server = TestUtils.createServer(new KafkaConfig(TestUtils.createBrokerConfig(0)))
def makeConfig(messageSize: Int, retentionMs: Long) = {
var props = new Properties()
props.setProperty(LogConfig.MaxMessageBytesProp, messageSize.toString)
props.setProperty(LogConfig.RententionMsProp, retentionMs.toString)
props
}
def checkConfig(messageSize: Int, retentionMs: Long) {
TestUtils.retry(10000) {
for(part <- 0 until partitions) {
val logOpt = server.logManager.getLog(TopicAndPartition(topic, part))
assertTrue(logOpt.isDefined)
assertEquals(retentionMs, logOpt.get.config.retentionMs)
assertEquals(messageSize, logOpt.get.config.maxMessageSize)
}
}
}
try {
// create a topic with a few config overrides and check that they are applied
val maxMessageSize = 1024
val retentionMs = 1000*1000
AdminUtils.createTopic(server.zkClient, topic, partitions, 1, makeConfig(maxMessageSize, retentionMs))
checkConfig(maxMessageSize, retentionMs)
// now double the config values for the topic and check that it is applied
AdminUtils.changeTopicConfig(server.zkClient, topic, makeConfig(2*maxMessageSize, 2 * retentionMs))
checkConfig(2*maxMessageSize, 2 * retentionMs)
} finally {
server.shutdown()
server.config.logDirs.map(Utils.rm(_))
}
}
private def checkIfReassignPartitionPathExists(): Boolean = {
ZkUtils.pathExists(zkClient, ZkUtils.ReassignPartitionsPath)
}
}
| akosiaris/kafka | core/src/test/scala/unit/kafka/admin/AdminTest.scala | Scala | apache-2.0 | 18,573 |
import java.io._
import scala.util.Random
object mGen {
def main(args: Array[String]) {
var file = ""
var rows = 0
var cols = 0
var bounds = 0
var nat = true
if(args.isDefinedAt(0)) file = args(0)
if(args.isDefinedAt(1)) rows = args(1).toInt
if(args.isDefinedAt(2)) cols = args(2).toInt
if(args.isDefinedAt(3)) bounds = args(3).toInt
if(args.isDefinedAt(4)) nat = args(4).toBoolean
println("Generating...")
if(args.isDefinedAt(0)) {
val writer = new PrintWriter(new File(file))
def generate(rows: Int, cols: Int) = {
def generateRec(r: Int, c: Int): Boolean =
if(r > 0 && c > 0) {
if (nat) writer.write((Random.nextInt(bounds)) + "\\t")
else writer.write((2 * Random.nextDouble - 1) * bounds + "\\t")
generateRec(r, c - 1)
} else if (r > 0 && c == 0) {
if(nat) writer.write((Random.nextInt(bounds)) + "\\n")
else writer.write((2 * Random.nextDouble - 1) * bounds + "\\n")
generateRec(r - 1, cols)
} else true
generateRec(rows, cols)
}
generate(rows, cols)
writer.close()
println("Complete!")
} else println("Failed.")
}
} | pomadchin/hadoop-dg-decomp | src/main/scala/mGen.scala | Scala | apache-2.0 | 1,236 |
package io.swagger.client.model
import io.swagger.client.core.ApiModel
import org.joda.time.DateTime
case class Credential (
/* connector_id */
connectorId: Int,
/* attr_key */
attrKey: Option[String],
/* attr_value */
attrValue: Option[String],
/* created_at */
createdAt: Option[DateTime],
/* updated_at */
updatedAt: Option[DateTime])
extends ApiModel
| QuantiModo/QuantiModo-SDK-Akka-Scala | src/main/scala/io/swagger/client/model/Credential.scala | Scala | gpl-2.0 | 383 |
package net.gumbix.hl7dsl
import net.gumbix.hl7dsl.build.{SimpleLoadMessage, BuildMessage}
import junit.framework.TestCase
import org.hl7.util.MessageLoader
import org.hl7.rim.{RimObject, Document}
import org.hl7.util._
import org.hl7.types._
import org.hl7.types.impl._
import net.gumbix.hl7dsl.DSL.DocumentDSL
import net.gumbix.hl7dsl.helper.ImplicitDef._
import net.gumbix.hl7dsl.helper.Address
import scala.collection.JavaConversions._
/**
* @author Ahmet Gül (guel.ahmet@hotmail.de)
* Simple load CDA and read data
*/
class HL7LoadTest extends TestCase {
def testLoadVHitG01() {
loadCDA("../cda-examples/vhitg-POCD_EX000001.xml")
}
def testLoadMessage() {
loadCDA("../cda-examples/Arztbrief-02-Level3.xml")
}
def loadCDA(filename: String) {
val doc = StringFromFile.readFileAsString(filename)
val cda = new DocumentDSL(doc)
println("\\nPatientenakten:")
val rtList = cda.participation("recordTarget")
rtList match {
case Nil => println(" CDA enthält keine Patientenakte(n)")
case _ => rtList.foreach(rt => println(" Patientenakte = " + rt))
}
println("\\nAlle Überschriften auslesen:")
val sections = cda.outboundRelationship("component")
sections match {
case Nil => println("Dokument hat keine Sections")
case _ => {
println("Dokument hat folgende Sections:")
sections.foreach {
section =>
val body = section.target().get
println(" (Structured) Body " + body)
body.outboundRelationship.list.foreach {
s =>
println(" Section " + s)
println(" " + s.target().get.title)
}
}
}
}
println("\\nAlle Adressen auslesen:")
cda.participation.list.foreach {
p =>
println(p.role().get.addr)
// println(" " + DatatypeTool.AddressTool.getAll(a.role().get.getAddr))
}
println("\\nTraversiere 'component' bis Ebene 2:")
cda.outboundRelationship.list.foreach {
o => // ActRelationship
println("level 1")
o.target().get.outboundRelationship.list.foreach {
o => // ActRelationship
println("level 2")
println("Überschrift: " + o.target().get.title)
println("Inhalt: " + o.target().get.text)
}
}
}
def testModifyMessage1() {
modifyCDA("../cda-examples/Arztbrief-02-Level3.xml")
}
def testModifyMessage2() {
modifyCDA("../cda-examples/vhitg-POCD_EX000001.xml")
}
def modifyCDA(filename: String) {
val doc = StringFromFile.readFileAsString(filename)
val cda = new DocumentDSL(doc)
cda.participation("recordTarget")(0).role().get.player() match {
case None => println("Warning: Person not available.")
case Some(patient) => patient.name.family match {
case None => println("Family name not set.")
case Some(fam) => {
println("Replace family name = " + fam + " with Gumbel")
patient.name.family = "Gumbel"
}
}
}
// Note: There might be more than one record target!
val city = cda.participation("recordTarget")(0).role().get.addr.city
println("Replace city = " + city + " with Mannheim")
cda.participation("recordTarget")(0).role().get.addr.city = "Mannheim"
val modified = BuildMessage.toXML(cda, "POCD_HD000040")
println(modified)
}
} | markusgumbel/dshl7 | core/src/test/scala/net/gumbix/hl7dsl/HL7LoadTest.scala | Scala | apache-2.0 | 3,402 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.core.nodes
import java.time.Instant
import akka.http.scaladsl.model.{MediaTypes, MediaType}
import scalatags.Text.all._
import scrupal.api._
import scala.concurrent.Future
/** Message Node
*
* This is a very simple node that simply renders a standard Boostrap message. It is used for generating error messages
* during node substitution so the result is visible to the end user.
*/
case class MessageNode(
name : String,
description : String,
css_class : String,
message : String,
modified : Option[Instant] = Some(Instant.now),
created : Option[Instant] = Some(Instant.now)
) extends Node {
final val mediaType : MediaType = MediaTypes.`text/html`
def apply(context: Context) : Future[Response] = Future.successful {
val text = div(cls := css_class, message)
HtmlResponse(Html.renderContents(Seq(text)), Successful)
}
}
| scrupal/scrupal | scrupal-core/src/main/scala/scrupal/core/nodes/MessageNode.scala | Scala | apache-2.0 | 2,557 |
package org.openmole.core.keyword
import org.openmole.core.highlight.HighLight._
import org.openmole.core.pluginregistry._
import org.osgi.framework.{ BundleActivator, BundleContext }
class Activator extends BundleActivator {
override def stop(context: BundleContext): Unit =
PluginRegistry.unregister(this)
override def start(context: BundleContext): Unit = {
val keyWords =
Vector(
WordHighLight("under"),
WordHighLight("in"),
WordHighLight("aggregate"),
WordHighLight("delta"),
WordHighLight("as"),
WordHighLight(":=")
)
PluginRegistry.register(
this,
nameSpaces = Vector(this.getClass.getPackage),
highLight = keyWords)
}
}
| openmole/openmole | openmole/core/org.openmole.core.keyword/src/main/scala/org/openmole/core/keyword/Activator.scala | Scala | agpl-3.0 | 729 |
package org.bitcoins.wallet.internal
import org.bitcoins.core.api.wallet.db._
import org.bitcoins.core.consensus.Consensus
import org.bitcoins.core.hd.HDAccount
import org.bitcoins.core.protocol.script.{P2WPKHWitnessSPKV0, P2WPKHWitnessV0}
import org.bitcoins.core.protocol.transaction.{
CoinbaseInput,
Transaction,
TransactionOutPoint,
TransactionOutput
}
import org.bitcoins.core.util.BlockHashWithConfs
import org.bitcoins.core.wallet.utxo.TxoState._
import org.bitcoins.core.wallet.utxo._
import org.bitcoins.crypto.DoubleSha256DigestBE
import org.bitcoins.wallet.{Wallet, WalletLogger}
import scala.concurrent.Future
/** Provides functionality related to handling UTXOs in our wallet.
* The most notable examples of functionality here are enumerating
* UTXOs in the wallet and importing a UTXO into the wallet for later
* spending.
*/
private[wallet] trait UtxoHandling extends WalletLogger {
self: Wallet =>
/** @inheritdoc */
def listDefaultAccountUtxos(): Future[Vector[SpendingInfoDb]] =
listUtxos(walletConfig.defaultAccount)
/** @inheritdoc */
override def listUtxos(): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findAllUnspent()
}
override def listUtxos(
hdAccount: HDAccount): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findAllUnspentForAccount(hdAccount)
}
/** Returns all the utxos originating from the given outpoints */
def listUtxos(outPoints: Vector[TransactionOutPoint]): Future[
Vector[SpendingInfoDb]] = {
spendingInfoDAO
.findAllSpendingInfos()
.map(_.filter(spendingInfo => outPoints.contains(spendingInfo.outPoint)))
}
override def listUtxos(tag: AddressTag): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findAllUnspentForTag(tag)
}
override def listUtxos(
hdAccount: HDAccount,
tag: AddressTag): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findAllUnspentForTag(tag).map { utxos =>
utxos.filter(utxo =>
HDAccount.isSameAccount(bip32Path = utxo.privKeyPath,
account = hdAccount))
}
}
override def listUtxos(state: TxoState): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findByTxoState(state)
}
override def listUtxos(
hdAccount: HDAccount,
state: TxoState): Future[Vector[SpendingInfoDb]] = {
spendingInfoDAO.findByTxoState(state).map { utxos =>
utxos.filter(utxo =>
HDAccount.isSameAccount(bip32Path = utxo.privKeyPath,
account = hdAccount))
}
}
private[wallet] def updateUtxoSpentConfirmedStates(
txo: SpendingInfoDb): Future[Option[SpendingInfoDb]] = {
updateUtxoSpentConfirmedStates(Vector(txo)).map(_.headOption)
}
private[wallet] def updateUtxoSpentConfirmedStates(
txos: Vector[SpendingInfoDb]): Future[Vector[SpendingInfoDb]] = {
updateUtxoStates(txos, UtxoHandling.updateSpentTxoWithConfs)
}
private[wallet] def updateUtxoReceiveConfirmedStates(
txo: SpendingInfoDb): Future[Option[SpendingInfoDb]] = {
updateUtxoReceiveConfirmedStates(Vector(txo))
.map(_.headOption)
}
private[wallet] def updateUtxoReceiveConfirmedStates(
txos: Vector[SpendingInfoDb]): Future[Vector[SpendingInfoDb]] = {
updateUtxoStates(txos, UtxoHandling.updateReceivedTxoWithConfs)
}
/** Returns a map of the SpendingInfoDbs with their relevant block.
* If the block hash is None, then it is a mempool transaction.
* The relevant block is determined by if the utxo has been spent or not.
* If it has been spent it uses the block that included the spending transaction,
* otherwise it uses the block that included the receiving transaction.
*/
private[wallet] def getDbsByRelevantBlock(
spendingInfoDbs: Vector[SpendingInfoDb]): Future[
Map[Option[DoubleSha256DigestBE], Vector[SpendingInfoDb]]] = {
val txIds =
spendingInfoDbs.map { db =>
db.spendingTxIdOpt match {
case Some(spendingTxId) =>
spendingTxId
case None =>
db.txid
}
}
transactionDAO.findByTxIdBEs(txIds).map { txDbs =>
val blockHashMap = txDbs.map(db => db.txIdBE -> db.blockHashOpt).toMap
val blockHashAndDb = spendingInfoDbs.map { txo =>
val txToUse = txo.state match {
case _: ReceivedState | ImmatureCoinbase | Reserved |
BroadcastReceived =>
txo.txid
case PendingConfirmationsSpent | ConfirmedSpent | BroadcastSpent =>
txo.spendingTxIdOpt.get
}
(blockHashMap(txToUse), txo)
}
blockHashAndDb.groupBy(_._1).map { case (blockHashOpt, vec) =>
blockHashOpt -> vec.map(_._2)
}
}
}
/** Updates all the given SpendingInfoDbs to the correct state
* based on how many confirmations they have received
* @param spendingInfoDbs the utxos we need to update
* @param fn the function used to transition the [[TxoState]] given a utxo and number of confirmations
*/
private def updateUtxoStates(
spendingInfoDbs: Vector[SpendingInfoDb],
fn: (SpendingInfoDb, Int, Int) => SpendingInfoDb): Future[
Vector[SpendingInfoDb]] = {
val relevantBlocksF: Future[
Map[Option[DoubleSha256DigestBE], Vector[SpendingInfoDb]]] = {
getDbsByRelevantBlock(spendingInfoDbs)
}
//fetch all confirmations for those blocks, do it in parallel
//as an optimzation, previously we would fetch sequentially
val blocksWithConfsF: Future[
Map[Option[BlockHashWithConfs], Vector[SpendingInfoDb]]] = {
for {
relevantBlocks <- relevantBlocksF
blocksWithConfirmations <- getConfirmationsForBlocks(relevantBlocks)
} yield blocksWithConfirmations
}
val toUpdateF = blocksWithConfsF.map { txsByBlock =>
val toUpdateFs: Vector[SpendingInfoDb] = txsByBlock.flatMap {
case (Some(blockHashWithConfs), txos) =>
blockHashWithConfs.confirmationsOpt match {
case None =>
logger.warn(
s"Given txos exist in block (${blockHashWithConfs.blockHash.hex}) that we do not have or that has been reorged! $txos")
Vector.empty
case Some(confs) =>
txos.map(fn(_, confs, walletConfig.requiredConfirmations))
}
case (None, txos) =>
logger.debug(
s"Currently have ${txos.size} transactions in the mempool")
txos
}.toVector
toUpdateFs
}
for {
toUpdate <- toUpdateF
_ =
if (toUpdate.nonEmpty)
logger.info(s"${toUpdate.size} txos are now confirmed!")
else logger.trace("No txos to be confirmed")
updated <- spendingInfoDAO.upsertAllSpendingInfoDb(toUpdate)
} yield updated
}
/** Fetches confirmations for the given blocks in parallel */
private def getConfirmationsForBlocks(
relevantBlocks: Map[
Option[DoubleSha256DigestBE],
Vector[SpendingInfoDb]]): Future[
Map[Option[BlockHashWithConfs], Vector[SpendingInfoDb]]] = {
val blockHashesWithConfsVec = relevantBlocks.map {
case (blockHashOpt, spendingInfoDbs) =>
blockHashOpt match {
case Some(blockHash) =>
chainQueryApi
.getNumberOfConfirmations(blockHash)
.map(confs => Some(BlockHashWithConfs(blockHash, confs)))
.map(blockWithConfsOpt => (blockWithConfsOpt, spendingInfoDbs))
case None =>
Future.successful((None, spendingInfoDbs))
}
}
Future
.sequence(blockHashesWithConfsVec)
.map(_.toMap)
}
/** Constructs a DB level representation of the given UTXO, and persist it to disk */
protected def writeUtxo(
tx: Transaction,
blockHashOpt: Option[DoubleSha256DigestBE],
output: TransactionOutput,
outPoint: TransactionOutPoint,
addressDb: AddressDb): Future[SpendingInfoDb] = {
val confirmationsF: Future[Int] = blockHashOpt match {
case Some(blockHash) =>
chainQueryApi
.getNumberOfConfirmations(blockHash)
.map {
case Some(confs) =>
confs
case None =>
sys.error(
s"Could not find block with our chain data source, hash=${blockHash}")
}
case None =>
Future.successful(0) //no confirmations on the tx
}
val stateF: Future[TxoState] = confirmationsF.map { confs =>
if (
tx.inputs.head
.isInstanceOf[CoinbaseInput] && confs <= Consensus.coinbaseMaturity
) {
TxoState.ImmatureCoinbase
} else {
UtxoHandling.getReceiveConfsState(confs,
walletConfig.requiredConfirmations)
}
}
val utxoF: Future[SpendingInfoDb] = stateF.map { state =>
addressDb match {
case segwitAddr: SegWitAddressDb =>
SegwitV0SpendingInfo(
state = state,
txid = tx.txIdBE,
outPoint = outPoint,
output = output,
privKeyPath = segwitAddr.path,
scriptWitness = segwitAddr.witnessScript,
spendingTxIdOpt = None
)
case LegacyAddressDb(path, _, _, _, _) =>
LegacySpendingInfo(state = state,
txid = tx.txIdBE,
outPoint = outPoint,
output = output,
privKeyPath = path,
spendingTxIdOpt = None)
case nested: NestedSegWitAddressDb =>
NestedSegwitV0SpendingInfo(
outPoint = outPoint,
output = output,
privKeyPath = nested.path,
redeemScript = P2WPKHWitnessSPKV0(nested.ecPublicKey),
scriptWitness = P2WPKHWitnessV0(nested.ecPublicKey),
txid = tx.txIdBE,
state = state,
spendingTxIdOpt = None,
id = None
)
}
}
for {
utxo <- utxoF
written <- spendingInfoDAO.create(utxo)
} yield {
val writtenOut = written.outPoint
logger.info(
s"Successfully inserted UTXO ${writtenOut.txIdBE.hex}:${writtenOut.vout.toInt} amt=${output.value} into DB")
logger.debug(s"UTXO details: ${written.output}")
written
}
}
override def markUTXOsAsReserved(
utxos: Vector[SpendingInfoDb]): Future[Vector[SpendingInfoDb]] = {
val outPoints = utxos.map(_.outPoint)
logger.info(s"Reserving utxos=$outPoints")
val updated = utxos.map(_.copyWithState(TxoState.Reserved))
for {
utxos <- spendingInfoDAO.markAsReserved(updated)
_ <- walletCallbacks.executeOnReservedUtxos(logger, utxos)
} yield utxos
}
/** @inheritdoc */
override def markUTXOsAsReserved(
tx: Transaction): Future[Vector[SpendingInfoDb]] = {
for {
utxos <- spendingInfoDAO.findOutputsBeingSpent(tx)
reserved <- markUTXOsAsReserved(utxos)
} yield reserved
}
override def unmarkUTXOsAsReserved(
utxos: Vector[SpendingInfoDb]): Future[Vector[SpendingInfoDb]] = {
logger.info(s"Unreserving utxos ${utxos.map(_.outPoint)}")
val updatedUtxosF = Future {
//make sure exception isn't thrown outside of a future to fix
//see: https://github.com/bitcoin-s/bitcoin-s/issues/3813
val unreserved = utxos.filterNot(_.state == TxoState.Reserved)
require(unreserved.isEmpty,
s"Some utxos are not reserved, got $unreserved")
// unmark all utxos are reserved
val updatedUtxos = utxos
.map(_.copyWithState(TxoState.PendingConfirmationsReceived))
updatedUtxos
}
for {
updatedUtxos <- updatedUtxosF
// update the confirmed utxos
updatedConfirmed <- updateUtxoReceiveConfirmedStates(updatedUtxos)
// update the utxos that are in blocks but not considered confirmed yet
pendingConf = updatedUtxos.filterNot(utxo =>
updatedConfirmed.exists(_.outPoint == utxo.outPoint))
updated <- spendingInfoDAO.updateAllSpendingInfoDb(
pendingConf ++ updatedConfirmed)
_ <- walletCallbacks.executeOnReservedUtxos(logger, updated)
} yield updated
}
/** @inheritdoc */
override def unmarkUTXOsAsReserved(
tx: Transaction): Future[Vector[SpendingInfoDb]] = {
for {
utxos <- spendingInfoDAO.findOutputsBeingSpent(tx)
reserved = utxos.filter(_.state == TxoState.Reserved)
updated <- unmarkUTXOsAsReserved(reserved.toVector)
} yield updated
}
/** @inheritdoc */
override def updateUtxoPendingStates(): Future[Vector[SpendingInfoDb]] = {
for {
infos <- spendingInfoDAO.findAllPendingConfirmation
_ = logger.debug(s"Updating states of ${infos.size} pending utxos...")
receivedUtxos = infos.filter(_.state.isInstanceOf[ReceivedState])
spentUtxos = infos.filter(_.state.isInstanceOf[SpentState])
updatedReceivedInfos <- updateUtxoReceiveConfirmedStates(receivedUtxos)
updatedSpentInfos <- updateUtxoSpentConfirmedStates(spentUtxos)
} yield (updatedReceivedInfos ++ updatedSpentInfos).toVector
}
}
object UtxoHandling {
/** Updates the SpendingInfoDb to the correct state based
* on the number of confirmations it has received
*/
def updateReceivedTxoWithConfs(
txo: SpendingInfoDb,
confs: Int,
requiredConfirmations: Int): SpendingInfoDb = {
txo.state match {
case TxoState.ImmatureCoinbase =>
if (confs > Consensus.coinbaseMaturity) {
if (confs >= requiredConfirmations)
txo.copyWithState(TxoState.ConfirmedReceived)
else
txo.copyWithState(TxoState.PendingConfirmationsReceived)
} else txo
case TxoState.PendingConfirmationsReceived | BroadcastReceived |
TxoState.ConfirmedReceived =>
val state = getReceiveConfsState(confs, requiredConfirmations)
txo.copyWithState(state)
case TxoState.Reserved =>
//do nothing if we have reserved the utxo
txo
case state: SpentState =>
sys.error(s"Cannot update spendingInfoDb in spent state=$state")
}
}
/** Given a number of confirmations and the required confirmations for the wallet
* this method returns the appropriate [[ReceivedState]] for the number of confirmations
*/
def getReceiveConfsState(
confs: Int,
requireConfirmations: Int): ReceivedState = {
if (confs < 0) {
sys.error(
s"Cannot have negative confirmations, got=$confs. Did the block get reorged or exist?")
} else if (confs == 0) {
TxoState.BroadcastReceived
} else if (confs >= requireConfirmations) {
TxoState.ConfirmedReceived
} else {
TxoState.PendingConfirmationsReceived
}
}
def updateSpentTxoWithConfs(
txo: SpendingInfoDb,
confs: Int,
requiredConfirmations: Int): SpendingInfoDb = {
txo.state match {
case TxoState.ImmatureCoinbase =>
sys.error(
s"Cannot update txo with received state=${TxoState.ImmatureCoinbase}")
case TxoState.Reserved | TxoState.PendingConfirmationsSpent |
TxoState.ConfirmedSpent | TxoState.BroadcastSpent |
TxoState.PendingConfirmationsReceived | TxoState.BroadcastReceived |
TxoState.ConfirmedReceived =>
if (confs >= requiredConfirmations) {
txo.copyWithState(TxoState.ConfirmedSpent)
} else if (confs == 0) {
txo.copyWithState(TxoState.BroadcastSpent)
} else {
txo.copyWithState(TxoState.PendingConfirmationsSpent)
}
}
}
}
| bitcoin-s/bitcoin-s | wallet/src/main/scala/org/bitcoins/wallet/internal/UtxoHandling.scala | Scala | mit | 15,643 |
package temportalist.origin.foundation.server
import java.util.UUID
import com.mojang.authlib.GameProfile
import net.minecraft.command.{CommandBase, ICommandSender, WrongUsageException}
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.util.text.TextComponentString
import temportalist.origin.api.common.utility.Players
/**
* Created by TheTemportalist on 1/17/2016.
*/
trait ICommand extends CommandBase {
def getUsage: String
override def getCommandUsage(sender: ICommandSender): String = this.getUsage
final def wrongUsage(suffix: String = null): Unit =
this.wrongUse(this.getUsage + (if (suffix == null) "" else "." + suffix))
private def wrongUse(message: String): Unit = {
throw new WrongUsageException(message)
}
final def getPlayerProfile(sender: ICommandSender, str: String,
checkSender: Boolean = true): GameProfile = {
val cache = Players.getCache
cache.getGameProfileForUsername(if (str == null) "" else str) match {
case profile: GameProfile => profile // 1) try to get the profile by name
case _ =>
try cache.getProfileByUUID(UUID.fromString(str)) // 2) try str as UUID
catch {
case e: Exception =>
if (checkSender) {
sender match {
case player: EntityPlayer =>
return player.getGameProfile // 3) get sending player
case _ =>
}
}
wrongUse("commands.icommand.player")
null
}
}
}
final def asInt(str: String): Int = {
CommandBase.parseInt(str)
}
final def canOpLevel(sender: ICommandSender, level: Int): Boolean =
sender.canCommandSenderUseCommand(level, this.getCommandName)
final def incorrectOp(sender: ICommandSender, level: Int): Unit = {
sender.addChatMessage(new TextComponentString("You are not opped at level " + level))
}
final def isBadOp(sender: ICommandSender, level: Int): Boolean = {
if (!this.canOpLevel(sender, level)) {
this.incorrectOp(sender, level)
false
} else true
}
}
| TheTemportalist/Origin | src/foundation/scala/temportalist/origin/foundation/server/ICommand.scala | Scala | apache-2.0 | 1,969 |
package changestream.helpers
import com.github.mauricio.async.db.Configuration
import com.github.mauricio.async.db.mysql.MySQLConnection
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
trait Database extends Base with Config with BeforeAndAfterAll {
val INSERT = s"""INSERT INTO changestream_test.users VALUES (
| NULL, "peter", "hello", 10, "I Am Peter", 0,
| 0.18289882, 0.23351681013224323546495497794239781796932220458984375, NOW(), CURDATE(), CURTIME(), NOW(), null, null, null)
""".stripMargin.trim
val INSERT_MULTI = s"""${INSERT},
| (NULL, "multiguy", "stupid", 1000000, "Leeloo Dallas Multiguy", 0, 0.18289882,
| 0.23351681013224323546495497794239781796932220458984375, NOW(), CURDATE(), CURTIME(), NOW(), null, null, null)""".stripMargin
val UPDATE = s"""UPDATE changestream_test.users set username = "username2", password = "password2", login_count = login_count + 1, bio = "bio2""""
val UPDATE_ALL = "UPDATE changestream_test.users set login_count = login_count + 1"
val DELETE = "DELETE from changestream_test.users LIMIT 1"
val DELETE_ALL = "DELETE from changestream_test.users"
protected val config = testConfig.getConfig("changestream.mysql")
protected val mysqlConfig = new Configuration(
config.getString("user"),
config.getString("host"),
config.getInt("port"),
Some(config.getString("password"))
)
protected val connectionTimeout = config.getLong("timeout")
protected val connection = new MySQLConnection(mysqlConfig)
def queryAndWait(sql: String): Unit = Await.result(connection.sendQuery(sql), connectionTimeout milliseconds)
override def beforeAll(): Unit = {
try {
Await.result(connection.connect, connectionTimeout milliseconds)
} catch {
case e: Exception =>
println(s"Could not connect to MySQL server for Metadata: ${e.getMessage}\n${e.getStackTrace.mkString("\n")}")
}
}
override def afterAll(): Unit = {
Await.result(connection.disconnect, connectionTimeout milliseconds)
}
before {
try {
val result = connection.sendQuery("drop database if exists changestream_test")
.flatMap(_ => connection.sendQuery("create database changestream_test"))
.flatMap(_ => connection.sendQuery(s"""
| CREATE TABLE changestream_test.users (
| `id` int(11) NOT NULL AUTO_INCREMENT,
| `username` varchar(32) DEFAULT NULL,
| `password` varchar(32) DEFAULT NULL,
| `login_count` int(11) NOT NULL DEFAULT '0',
| `bio` text DEFAULT NULL,
| `two_bit_field` bit DEFAULT NULL,
| `float_field` float DEFAULT NULL,
| `big_decimal_field` decimal DEFAULT NULL,
| `java_util_date` datetime DEFAULT NULL,
| `java_sql_date` date DEFAULT NULL,
| `java_sql_time` time DEFAULT NULL,
| `java_sql_timestamp` timestamp DEFAULT NOW(),
| `non_truncated_text` blob DEFAULT NULL,
| `text_that_should_be_truncated` blob DEFAULT NULL,
| `random_bytes_blob` blob DEFAULT NULL,
| PRIMARY KEY (`id`)
| ) ENGINE=InnoDB;
""".stripMargin))
Await.result(result, (connectionTimeout * 3) milliseconds)
} catch {
case e: Exception =>
println(s"Could not initialize database for tests ${e.getMessage}\n${e.getStackTrace.mkString("\n")}")
}
}
after {
try {
val result = connection.sendQuery("drop database if exists changestream_test")
Await.result(result, connectionTimeout milliseconds)
} catch {
case e: Exception =>
println(s"Could not tear down database for tests ${e.getMessage}\n${e.getStackTrace.mkString("\n")}")
}
}
}
| mavenlink/changestream | src/test/scala/changestream/helpers/Database.scala | Scala | mit | 4,509 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.TestUtils.assertSpilled
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.apache.spark.sql.internal.SQLConf.{WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD, WINDOW_EXEC_BUFFER_SPILL_THRESHOLD}
import org.apache.spark.sql.test.SharedSparkSession
case class WindowData(month: Int, area: String, product: Int)
/**
* Test suite for SQL window functions.
*/
class SQLWindowFunctionSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("window function: udaf with aggregate expression") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
"""
|select area, sum(product), sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 11),
("a", 6, 11),
("b", 7, 15),
("b", 8, 15),
("c", 9, 19),
("c", 10, 19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product) - 1, sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 4, 11),
("a", 5, 11),
("b", 6, 15),
("b", 7, 15),
("c", 8, 19),
("c", 9, 19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product), sum(product) / sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 5d/11),
("a", 6, 6d/11),
("b", 7, 7d/15),
("b", 8, 8d/15),
("c", 10, 10d/19),
("c", 9, 9d/19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product), sum(product) / sum(sum(product) - 1) over
|(partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 5d/9),
("a", 6, 6d/9),
("b", 7, 7d/13),
("b", 8, 8d/13),
("c", 10, 10d/17),
("c", 9, 9d/17)
).map(i => Row(i._1, i._2, i._3)))
}
}
test("window function: refer column in inner select block") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
"""
|select area, rank() over (partition by area order by tmp.month) + tmp.tmp1 as c1
|from (select month, area, product, 1 as tmp1 from windowData) tmp
""".stripMargin),
Seq(
("a", 2),
("a", 3),
("b", 2),
("b", 3),
("c", 2),
("c", 3)
).map(i => Row(i._1, i._2)))
}
}
test("window function: partition and order expressions") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
"""
|select month, area, product, sum(product + 1) over (partition by 1 order by 2)
|from windowData
""".stripMargin),
Seq(
(1, "a", 5, 51),
(2, "a", 6, 51),
(3, "b", 7, 51),
(4, "b", 8, 51),
(5, "c", 9, 51),
(6, "c", 10, 51)
).map(i => Row(i._1, i._2, i._3, i._4)))
checkAnswer(
sql(
"""
|select month, area, product, sum(product)
|over (partition by month % 2 order by 10 - product)
|from windowData
""".stripMargin),
Seq(
(1, "a", 5, 21),
(2, "a", 6, 24),
(3, "b", 7, 16),
(4, "b", 8, 18),
(5, "c", 9, 9),
(6, "c", 10, 10)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("window function: distinct should not be silently ignored") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
val e = intercept[AnalysisException] {
sql(
"""
|select month, area, product, sum(distinct product + 1) over (partition by 1 order by 2)
|from windowData
""".stripMargin)
}
assert(e.getMessage.contains("Distinct window functions are not supported"))
}
}
test("window function: expressions in arguments of a window functions") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
"""
|select month, area, month % 2,
|lag(product, 1 + 1, product) over (partition by month % 2 order by area)
|from windowData
""".stripMargin),
Seq(
(1, "a", 1, 5),
(2, "a", 0, 6),
(3, "b", 1, 7),
(4, "b", 0, 8),
(5, "c", 1, 5),
(6, "c", 0, 6)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("window function: Sorting columns are not in Project") {
val data = Seq(
WindowData(1, "d", 10),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 11)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql("select month, product, sum(product + 1) over() from windowData order by area"),
Seq(
(2, 6, 57),
(3, 7, 57),
(4, 8, 57),
(5, 9, 57),
(6, 11, 57),
(1, 10, 57)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, rank() over (partition by area order by tmp.month) + tmp.tmp1 as c1
|from (select month, area, product as p, 1 as tmp1 from windowData) tmp order by p
""".stripMargin),
Seq(
("a", 2),
("b", 2),
("b", 3),
("c", 2),
("d", 2),
("c", 3)
).map(i => Row(i._1, i._2)))
checkAnswer(
sql(
"""
|select area, rank() over (partition by area order by month) as c1
|from windowData group by product, area, month order by product, area
""".stripMargin),
Seq(
("a", 1),
("b", 1),
("b", 2),
("c", 1),
("d", 1),
("c", 2)
).map(i => Row(i._1, i._2)))
checkAnswer(
sql(
"""
|select area, sum(product) / sum(sum(product)) over (partition by area) as c1
|from windowData group by area, month order by month, c1
""".stripMargin),
Seq(
("d", 1.0),
("a", 1.0),
("b", 0.4666666666666667),
("b", 0.5333333333333333),
("c", 0.45),
("c", 0.55)
).map(i => Row(i._1, i._2)))
}
}
// todo: fix this test case by reimplementing the function ResolveAggregateFunctions
ignore("window function: Pushing aggregate Expressions in Sort to Aggregate") {
val data = Seq(
WindowData(1, "d", 10),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 11)
)
withTempView("windowData") {
sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
"""
|select area, sum(product) over () as c from windowData
|where product > 3 group by area, product
|having avg(month) > 0 order by avg(month), product
""".stripMargin),
Seq(
("a", 51),
("b", 51),
("b", 51),
("c", 51),
("c", 51),
("d", 51)
).map(i => Row(i._1, i._2)))
}
}
test("window function: multiple window expressions in a single expression") {
val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y")
nums.createOrReplaceTempView("nums")
val expected =
Row(1, 1, 1, 55, 1, 57) ::
Row(0, 2, 3, 55, 2, 60) ::
Row(1, 3, 6, 55, 4, 65) ::
Row(0, 4, 10, 55, 6, 71) ::
Row(1, 5, 15, 55, 9, 79) ::
Row(0, 6, 21, 55, 12, 88) ::
Row(1, 7, 28, 55, 16, 99) ::
Row(0, 8, 36, 55, 20, 111) ::
Row(1, 9, 45, 55, 25, 125) ::
Row(0, 10, 55, 55, 30, 140) :: Nil
val actual = sql(
"""
|SELECT
| y,
| x,
| sum(x) OVER w1 AS running_sum,
| sum(x) OVER w2 AS total_sum,
| sum(x) OVER w3 AS running_sum_per_y,
| ((sum(x) OVER w1) + (sum(x) OVER w2) + (sum(x) OVER w3)) as combined2
|FROM nums
|WINDOW w1 AS (ORDER BY x ROWS BETWEEN UnBOUNDED PRECEDiNG AND CuRRENT RoW),
| w2 AS (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOuNDED FoLLOWING),
| w3 AS (PARTITION BY y ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
""".stripMargin)
checkAnswer(actual, expected)
spark.catalog.dropTempView("nums")
}
test("window function: multiple window expressions specified by range in a single expression") {
val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y")
nums.createOrReplaceTempView("nums")
withTempView("nums") {
val expected =
Row(1, 1, 1, 4, null, 8, 25) ::
Row(1, 3, 4, 9, 1, 12, 24) ::
Row(1, 5, 9, 15, 4, 16, 21) ::
Row(1, 7, 16, 21, 8, 9, 16) ::
Row(1, 9, 25, 16, 12, null, 9) ::
Row(0, 2, 2, 6, null, 10, 30) ::
Row(0, 4, 6, 12, 2, 14, 28) ::
Row(0, 6, 12, 18, 6, 18, 24) ::
Row(0, 8, 20, 24, 10, 10, 18) ::
Row(0, 10, 30, 18, 14, null, 10) ::
Nil
val actual = sql(
"""
|SELECT
| y,
| x,
| sum(x) over w1 as history_sum,
| sum(x) over w2 as period_sum1,
| sum(x) over w3 as period_sum2,
| sum(x) over w4 as period_sum3,
| sum(x) over w5 as future_sum
|FROM nums
|WINDOW
| w1 AS (PARTITION BY y ORDER BY x RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
| w2 AS (PARTITION BY y ORDER BY x RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING),
| w3 AS (PARTITION BY y ORDER BY x RANGE BETWEEN 4 PRECEDING AND 2 PRECEDING ),
| w4 AS (PARTITION BY y ORDER BY x RANGE BETWEEN 2 FOLLOWING AND 4 FOLLOWING),
| w5 AS (PARTITION BY y ORDER BY x RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
""".stripMargin
)
checkAnswer(actual, expected)
}
}
test("SPARK-7595: Window will cause resolve failed with self join") {
checkAnswer(sql(
"""
|with
| v0 as (select 0 as key, 1 as value),
| v1 as (select key, count(value) over (partition by key) cnt_val from v0),
| v2 as (select v1.key, v1_lag.cnt_val from v1 cross join v1 v1_lag
| where v1.key = v1_lag.key)
| select key, cnt_val from v2 order by key limit 1
""".stripMargin), Row(0, 1))
}
test("SPARK-16633: lead/lag should return the default value if the offset row does not exist") {
checkAnswer(sql(
"""
|SELECT
| lag(123, 100, 321) OVER (ORDER BY id) as lag,
| lead(123, 100, 321) OVER (ORDER BY id) as lead
|FROM (SELECT 1 as id) tmp
""".stripMargin),
Row(321, 321))
checkAnswer(sql(
"""
|SELECT
| lag(123, 100, a) OVER (ORDER BY id) as lag,
| lead(123, 100, a) OVER (ORDER BY id) as lead
|FROM (SELECT 1 as id, 2 as a) tmp
""".stripMargin),
Row(2, 2))
}
test("lead/lag should respect null values") {
checkAnswer(sql(
"""
|SELECT
| b,
| lag(a, 1, 321) OVER (ORDER BY b) as lag,
| lead(a, 1, 321) OVER (ORDER BY b) as lead
|FROM (SELECT cast(null as int) as a, 1 as b
| UNION ALL
| select cast(null as int) as id, 2 as b) tmp
""".stripMargin),
Row(1, 321, null) :: Row(2, null, 321) :: Nil)
checkAnswer(sql(
"""
|SELECT
| b,
| lag(a, 1, c) OVER (ORDER BY b) as lag,
| lead(a, 1, c) OVER (ORDER BY b) as lead
|FROM (SELECT cast(null as int) as a, 1 as b, 3 as c
| UNION ALL
| select cast(null as int) as id, 2 as b, 4 as c) tmp
""".stripMargin),
Row(1, 3, null) :: Row(2, null, 4) :: Nil)
}
test("test with low buffer spill threshold") {
val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y")
nums.createOrReplaceTempView("nums")
val expected =
Row(1, 1, 1) ::
Row(0, 2, 3) ::
Row(1, 3, 6) ::
Row(0, 4, 10) ::
Row(1, 5, 15) ::
Row(0, 6, 21) ::
Row(1, 7, 28) ::
Row(0, 8, 36) ::
Row(1, 9, 45) ::
Row(0, 10, 55) :: Nil
val actual = sql(
"""
|SELECT y, x, sum(x) OVER w1 AS running_sum
|FROM nums
|WINDOW w1 AS (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDiNG AND CURRENT RoW)
""".stripMargin)
withSQLConf(WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "1",
WINDOW_EXEC_BUFFER_SPILL_THRESHOLD.key -> "2") {
assertSpilled(sparkContext, "test with low buffer spill threshold") {
checkAnswer(actual, expected)
}
}
spark.catalog.dropTempView("nums")
}
}
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala | Scala | apache-2.0 | 15,695 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.abstractnn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect._
/**
* [[Activity]] is a trait which represents
* the concept of neural input within neural
* networks. For now, two type of input are
* supported and extending this trait, which
* are [[Tensor]] and [[Table]].
*/
trait Activity {
def toTensor[D](implicit ev: TensorNumeric[D]): Tensor[D]
def toTable: Table
def isTensor: Boolean
def isTable: Boolean
}
object Activity {
def apply[A <: Activity: ClassTag, T : ClassTag]()(
implicit ev: TensorNumeric[T]): A = {
val result = if (classTag[A] == classTag[Tensor[T]]) {
Tensor[T]()
} else if (classTag[A] == classTag[Table]) {
T()
} else {
null
}
result.asInstanceOf[A]
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/abstractnn/Activity.scala | Scala | apache-2.0 | 1,530 |
package net.fehmicansaglam.bson
trait Identifiable[A] {
def identifier: A
override def equals(other: Any): Boolean = {
other.isInstanceOf[Identifiable[A]] && other.asInstanceOf[Identifiable[A]].identifier == this.identifier
}
}
| fehmicansaglam/tepkin | bson/src/main/scala/net/fehmicansaglam/bson/Identifiable.scala | Scala | apache-2.0 | 241 |
package co.ledger.wallet.core.net
import org.json.{JSONArray, JSONObject}
/**
*
* WebSocket
* ledger-wallet-ripple-chrome
*
* Created by Pierre Pollastri on 23/06/2016.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
trait WebSocket {
def send(data: String): Unit
def send(data: Array[Byte]): Unit
def send(json: JSONObject): Unit = send(json.toString)
def send(json: JSONArray): Unit = send(json.toString)
def close(): Unit
def isOpen: Boolean
def isClosed = !isOpen
def onOpen(handler: () => Unit): Unit = openHandler = Option(handler)
def onJsonMessage(handler: JSONObject => Unit): Unit = jsonHandler = Option(handler)
def onStringMessage(handler: String => Unit): Unit = stringHandler = Option(handler)
def onClose(handler: (Throwable) => Unit): Unit = closeHandler = Option(handler)
def onError(handler: Throwable => Unit): Unit = errorHandler = Option(handler)
protected var openHandler: Option[() => Unit] = None
protected var jsonHandler: Option[(JSONObject) => Unit] = None
protected var stringHandler: Option[(String) => Unit] = None
protected var closeHandler: Option[(Throwable) => Unit] = None
protected var errorHandler: Option[(Throwable) => Unit] = None
}
| LedgerHQ/ledger-wallet-ripple | src/main/scala/co/ledger/wallet/core/net/WebSocket.scala | Scala | mit | 2,340 |
package controllers.save_for_later
import java.util.concurrent.TimeUnit
import play.Logger
import gov.dwp.carers.play2.resilientmemcached.MemcachedCacheApi
import models.domain._
import models.view.CachedClaim
import models.view.cache.EncryptedCacheHandling
import org.specs2.mutable._
import play.api.i18n.Lang
import play.api.test.FakeRequest
import play.api.test.Helpers._
import utils.{LightFakeApplicationWithMemcache, WithMemcacheApplication, LightFakeApplication, WithApplication}
import models.{MultiLineAddress, DayMonthYear, NationalInsuranceNumber}
import scala.util.control.Breaks
class GSaveForLaterSpec extends Specification {
// Output from C3EncryptionSpec.scala ..... to create a set of xor pairs and decrypt key
// With key of:88a976e1-e926-4bb4-9322-15aabc6d0516 created xor pair of:0bcd1234-0000-0000-0000-abcd1234cdef and:174650142322392746796619227917559908601
val encryptkey = "88a976e1-e926-4bb4-9322-15aabc6d0516"
val uuid = "0bcd1234-0000-0000-0000-abcd1234cdef"
val decodeint = "174650142322392746796619227917559908601"
section("unit", "SaveForLater")
"Save for later controller" should {
"block submit when switched off" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterSaveEnabled" -> "false"))) with Claiming {
val request = FakeRequest()
val result = GSaveForLater.submit(request)
val bodyText: String = contentAsString(result)
bodyText must contain("This service is currently switched off")
status(result) mustEqual BAD_REQUEST
}
"block present when switched off" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterSaveEnabled" -> "false"))) with Claiming {
val request = FakeRequest()
val result = GSaveForLater.present("resumeurl")(request)
val bodyText: String = contentAsString(result)
bodyText must contain("This service is currently switched off")
status(result) mustEqual BAD_REQUEST
}
"present save screen" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterSaveEnabled" -> "true"))) with Claiming {
val request = FakeRequest()
val result = GSaveForLater.present("resumeurl")(request)
status(result) mustEqual OK
}
"allow submit and return save for later success screen" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterSaveEnabled" -> "true"))) with Claiming {
var claim = new Claim(CachedClaim.key, List(), System.currentTimeMillis(), Some(Lang("en")), "gacid", uuid)
val details = new YourDetails("Mr","", None, "green", NationalInsuranceNumber(Some("AB123456D")), DayMonthYear(None, None, None))
val contactDetails = new ContactDetails(new MultiLineAddress(), None, None, None, "yes", Some("bt@bt.com"), Some("bt@bt.com"))
claim = claim + details + contactDetails
cache.set("default"+uuid, claim)
val request = FakeRequest().withFormUrlEncodedBody().withSession(CachedClaim.key -> claim.uuid)
val result = GSaveForLater.submit(request)
status(result) mustEqual SEE_OTHER
redirectLocation(result) must beSome("/save/")
}
"not contain resume link when switched OFF" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterShowResumeLink" -> "false", "saveForLaterSaveEnabled" -> "true"))) with Claiming {
val request = FakeRequest()
val result = GSaveForLater.present("resumeurl")(request)
status(result) mustEqual OK
val bodyText: String = contentAsString(result)
bodyText must not contain("/resume")
}
"contain resume link when switched ON" in new WithApplication(app = LightFakeApplication(additionalConfiguration = Map("saveForLaterShowResumeLink" -> "true", "saveForLaterSaveEnabled" -> "true"))) with Claiming {
val request = FakeRequest()
val result = GSaveForLater.present("resumeurl")(request)
status(result) mustEqual OK
val bodyText: String = contentAsString(result)
bodyText must contain("/resume")
}
// Warning this test sets the memcache expiry to 1+1 sec which will affect remaining tests unless overridden
"ensure that memcache item expires in correct seconds" in new WithApplication(app=LightFakeApplicationWithMemcache(additionalConfiguration = Map("cache.saveForLaterCacheExpirySecs" -> "0", "cache.saveForLaterGracePeriodSecs" -> "5"))) with Claiming{
cache.isInstanceOf[MemcachedCacheApi] mustEqual true
val cacheHandling = new EncryptedCacheHandling() {
val cacheKey = "12345678"
}
cacheHandling.sflClaimExpirySecs() mustEqual(0)
cacheHandling.memcacheExpirySecs() mustEqual(5)
var claim = new Claim(CachedClaim.key, List(), System.currentTimeMillis(), Some(Lang("en")), "gacid", uuid=uuid)
val details = new YourDetails("Mr","", None, "green", NationalInsuranceNumber(Some("AB123456D")), DayMonthYear(None, None, None))
val contactDetails = new ContactDetails(new MultiLineAddress(), None, None, None, "yes", Some("bt@bt.com"), Some("bt@bt.com"))
claim = claim + details + contactDetails
cacheHandling.saveForLaterInCache(claim,"/savedpath")
// Since we set the SFL expiry to 0sec+1sec the item should not exist in cache after 1 second
val status1=cacheHandling.checkSaveForLaterInCache(uuid)
status1 mustEqual("OK")
// After 1 second the claim should have expired in memcache and have an SFL status of NO-CLAIM
// But on Jenkins this sometimes takes a little longer to drop off, so lets wait 10 secs max.
var status=false
val loop=new Breaks
loop.breakable{
for(n<-1 to 10){
TimeUnit.MILLISECONDS.sleep(1000)
val status2=cacheHandling.checkSaveForLaterInCache(uuid)
Logger.info( "SFL DEBUG after "+n+" seconds the status is:"+status2)
if( status2.equals("NO-CLAIM")){
status=true
loop.break
}
}
}
status must beTrue
}
}
section("unit", "SaveForLater")
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/save_for_later/GSaveForLaterSpec.scala | Scala | mit | 6,131 |
package com.twitter.finagle.mux.exp.pushsession
import com.twitter.conversions.time._
import com.twitter.finagle.exp.pushsession.{MockChannelHandle}
import com.twitter.finagle.mux.transport.Message
import com.twitter.finagle.mux.transport.Message._
import com.twitter.finagle._
import com.twitter.finagle.mux.{Request, Response}
import com.twitter.io.{Buf, ByteReader}
import com.twitter.util.{Future, MockTimer, Promise, Time}
import org.scalatest.FunSuite
class MuxServerSessionTest extends FunSuite {
private val data = Buf.ByteArray.Owned((0 until 1024).map(_.toByte).toArray)
private abstract class Ctx {
lazy val mockTimer: MockTimer = new MockTimer
lazy val params: Stack.Params = MuxPush.server.params + (param.Timer(mockTimer))
lazy val decoder: MuxMessageDecoder = new FragmentDecoder(params[param.Stats].statsReceiver)
lazy val messageWriter: MockMessageWriter = new MockMessageWriter
lazy val handle: MockChannelHandle[ByteReader, Buf] = new MockChannelHandle[ByteReader, Buf]()
lazy val service: Service[Request, Response] = Service.mk { req: Request =>
Future.value(Response(req.body.concat(req.body)))
}
var serviceClosed = false
private def proxyService = new ServiceProxy(service) {
override def close(deadline: Time): Future[Unit] = {
serviceClosed = true
super.close(deadline)
}
}
lazy val session: MuxServerSession = new MuxServerSession(
params = params,
decoder = decoder,
messageWriter = messageWriter,
handle = handle,
service = proxyService
)
def sessionReceive(msg: Message): Unit = {
val br = ByteReader(Message.encode(msg))
session.receive(br)
}
}
test("Dispatch requests") {
new Ctx {
sessionReceive(Tdispatch(2, Nil, Path.empty, Dtab.empty, data))
// Service should have made a trip through the executor
assert(1 == handle.serialExecutor.pendingTasks)
handle.serialExecutor.executeAll()
messageWriter.messages.dequeue() match {
case RdispatchOk(2, Nil, body) => assert(data.concat(data) == body)
case other => fail(s"Unexpected message: $other")
}
sessionReceive(Treq(2, None, data))
// Service should have made a trip through the executor
assert(1 == handle.serialExecutor.pendingTasks)
handle.serialExecutor.executeAll()
messageWriter.messages.dequeue() match {
case RreqOk(2, body) => assert(data.concat(data) == body)
case other => fail(s"Unexpected message: $other")
}
}
}
test("responds to pings") {
new Ctx {
sessionReceive(Tping(1))
handle.serialExecutor.executeAll()
assert(messageWriter.messages.dequeue() == Message.PreEncoded.Rping)
sessionReceive(Tping(2))
handle.serialExecutor.executeAll()
val Rping(2) = messageWriter.messages.dequeue()
}
}
test("Drains allow dispatches to finish") {
new Ctx {
val rep = Promise[Response]
override lazy val service = Service.constant(rep)
Time.withCurrentTimeFrozen { control =>
session.close(5.seconds)
handle.serialExecutor.executeAll()
assert(session.status == Status.Busy)
val Tdrain(1) = messageWriter.messages.dequeue()
// Send one message (we haven't sent the Rdrain)
sessionReceive(Tdispatch(2, Nil, Path.empty, Dtab.empty, data))
handle.serialExecutor.executeAll()
// Don't quite hit the deadline
control.advance(5.seconds - 1.millisecond)
mockTimer.tick()
handle.serialExecutor.executeAll()
assert(session.status == Status.Busy)
rep.setValue(Response(Nil, data))
handle.serialExecutor.executeAll()
val RdispatchOk(2, Seq(), `data`) = messageWriter.messages.dequeue()
assert(session.status == Status.Busy)
sessionReceive(Rdrain(1))
handle.serialExecutor.executeAll()
assert(session.status == Status.Closed)
assert(handle.closedCalled)
assert(serviceClosed)
}
}
}
test("Drains will cancel dispatches after timeout without receiving Rdrain") {
new Ctx {
val rep = Promise[Response]
override lazy val service = Service.constant(rep)
Time.withCurrentTimeFrozen { control =>
session.close(5.seconds)
handle.serialExecutor.executeAll()
assert(session.status == Status.Busy)
val Tdrain(1) = messageWriter.messages.dequeue()
// Send one message (we haven't sent the Rdrain)
sessionReceive(Tdispatch(2, Nil, Path.empty, Dtab.empty, data))
handle.serialExecutor.executeAll()
// Hit the deadline
control.advance(5.seconds)
mockTimer.tick()
handle.serialExecutor.executeAll()
assert(session.status == Status.Closed)
assert(rep.isInterrupted.isDefined)
assert(handle.closedCalled)
assert(serviceClosed)
}
}
}
test("Drains will cancel dispatches after timeout when receiving Rdrain") {
new Ctx {
val rep = Promise[Response]
override lazy val service = Service.constant(rep)
Time.withCurrentTimeFrozen { control =>
session.close(5.seconds)
handle.serialExecutor.executeAll()
assert(session.status == Status.Busy)
val Tdrain(1) = messageWriter.messages.dequeue()
// Send one message (we haven't sent the Rdrain)
sessionReceive(Tdispatch(2, Nil, Path.empty, Dtab.empty, data))
// And drain
sessionReceive(Rdrain(1))
handle.serialExecutor.executeAll()
// Hit the deadline
control.advance(5.seconds)
mockTimer.tick()
handle.serialExecutor.executeAll()
assert(session.status == Status.Closed)
assert(rep.isInterrupted.isDefined)
assert(handle.closedCalled)
assert(serviceClosed)
}
}
}
test("Nacks new requests after receiving Rdrain") {
new Ctx {
val rep = Promise[Response]
override lazy val service = Service.constant(rep)
Time.withCurrentTimeFrozen { control =>
// need to put a dispatch in the queue
sessionReceive(Tdispatch(2, Nil, Path.empty, Dtab.empty, data))
session.close(5.seconds)
handle.serialExecutor.executeAll()
assert(session.status == Status.Busy)
val Tdrain(1) = messageWriter.messages.dequeue()
// Send Rdrain
sessionReceive(Rdrain(1))
// Try a dispatch that should be nacked
sessionReceive(Tdispatch(3, Nil, Path.empty, Dtab.empty, data))
handle.serialExecutor.executeAll()
val RdispatchNack(3, Nil) = messageWriter.messages.dequeue()
// Hit the deadline
control.advance(5.seconds)
mockTimer.tick()
handle.serialExecutor.executeAll()
assert(session.status == Status.Closed)
assert(rep.isInterrupted.isDefined)
}
}
}
test("Frees the service on handle close") {
new Ctx {
assert(session.status == Status.Open) // touch the lazy session to initiate it
handle.onClosePromise.setDone()
handle.serialExecutor.executeAll()
// Service and handle should have been closed
assert(handle.closedCalled)
assert(serviceClosed)
assert(session.status == Status.Closed)
}
}
}
| mkhq/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/exp/pushsession/MuxServerSessionTest.scala | Scala | apache-2.0 | 7,369 |
package org.juanitodread.broker.client
import com.newmotion.akka.rabbitmq.ChannelActor
import com.newmotion.akka.rabbitmq.ChannelMessage
import com.newmotion.akka.rabbitmq.ConnectionActor
import com.newmotion.akka.rabbitmq.CreateChannel
import com.rabbitmq.client.Channel
import com.rabbitmq.client.MessageProperties
import akka.actor.ActorRef
import akka.actor.ActorSystem
/**
* Producer to RabbitMQ using Akka Actors.
*
* @author juan.sandoval
*
*/
object Producer {
implicit val system = ActorSystem("rabbit-actor")
val factory = RabbitConnection.getConnectionFactory
val connectionActor = system.actorOf(ConnectionActor.props(factory), "rabbitmq")
val queue = RabbitConnection.inputQueue
val exchange = "amq.fanout"
// this function will be called each time new channel received
def setupChannel(channel: Channel, self: ActorRef) {
channel.queueDeclare(queue, true, false, false, null)
channel.queueBind(queue, exchange, "")
}
connectionActor ! CreateChannel(ChannelActor.props(setupChannel), Some("publisher"))
def produce(message: String): Unit = {
val publisher = system.actorSelection("/user/rabbitmq/publisher")
def publish(channel: Channel) {
channel.basicPublish(exchange, queue, MessageProperties.PERSISTENT_TEXT_PLAIN, toBytes(message))
}
publisher ! ChannelMessage(publish, dropIfNoChannel = false)
}
def fromBytes(x: Array[Byte]) = new String(x, "UTF-8")
def toBytes(x: String) = x.toString.getBytes("UTF-8")
} | juanitodread/pitaya | src/main/scala/org/juanitodread/broker/client/Producer.scala | Scala | apache-2.0 | 1,496 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.clustering.{KMeans => MLlibKMeans}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}
private[clustering] case class TestRow(features: Vector)
/**
* 聚类测试套件
*/
object KMeansSuite {
def generateKMeansData(sql: SQLContext, rows: Int, dim: Int, k: Int): DataFrame = {
val sc = sql.sparkContext
val rdd = sc.parallelize(1 to rows).map(i => Vectors.dense(Array.fill(dim)((i % k).toDouble)))
.map(v => new TestRow(v))
sql.createDataFrame(rdd)
}
}
class KMeansSuite extends SparkFunSuite with MLlibTestSparkContext {
final val k = 5
@transient var dataset: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
dataset = KMeansSuite.generateKMeansData(sqlContext, 50, 3, k)
}
test("default parameters") {
//KMeans默认参数
val kmeans = new KMeans()
//聚类簇数
assert(kmeans.getK === 2)
//特征列名
assert(kmeans.getFeaturesCol === "features")
//Prediction预测列名
assert(kmeans.getPredictionCol === "prediction")
//最大迭代次数
assert(kmeans.getMaxIter === 20)
//初始化模型k-means||
assert(kmeans.getInitMode === MLlibKMeans.K_MEANS_PARALLEL)
//初始化步骤
assert(kmeans.getInitSteps === 5)
//迭代算法的收敛性
assert(kmeans.getTol === 1e-4)
}
test("set parameters") {//设置参数
val kmeans = new KMeans()
.setK(9)//聚类的个数
//设置特征列
.setFeaturesCol("test_feature")
//设置预测列
.setPredictionCol("test_prediction")
//最大迭代次数
.setMaxIter(33)
//设置模型
.setInitMode(MLlibKMeans.RANDOM)
//设置初始化步长
.setInitSteps(3)
//设置种子
.setSeed(123)
//迭代算法的收敛性
.setTol(1e-3)
//聚类的个数
assert(kmeans.getK === 9)
//设置特征列
assert(kmeans.getFeaturesCol === "test_feature")
//设置预测列
assert(kmeans.getPredictionCol === "test_prediction")
//最大迭代次数
assert(kmeans.getMaxIter === 33)
//设置模型
assert(kmeans.getInitMode === MLlibKMeans.RANDOM)
//设置初始化步长
assert(kmeans.getInitSteps === 3)
//设置种子
assert(kmeans.getSeed === 123)
//迭代算法的收敛性
assert(kmeans.getTol === 1e-3)
}
test("parameters validation") {//参数验证
intercept[IllegalArgumentException] {
new KMeans().setK(1)//聚类的个数
}
intercept[IllegalArgumentException] {
new KMeans().setInitMode("no_such_a_mode")
}
intercept[IllegalArgumentException] {
new KMeans().setInitSteps(0)
}
}
test("fit & transform") {//适合&转换
val predictionColName = "kmeans_prediction"
//PredictionCol 测量输出的名称 //聚类的个数
val kmeans = new KMeans().setK(k).setPredictionCol(predictionColName).setSeed(1)
/** +-------------+
| features|
+-------------+
|[1.0,1.0,1.0]|
|[2.0,2.0,2.0]|
|[3.0,3.0,3.0]|
|[4.0,4.0,4.0]|
|[0.0,0.0,0.0]|
+-------------+*/
dataset.show(5)
//fit()方法将DataFrame转化为一个Transformer的算法
val model = kmeans.fit(dataset)//返回一个训练模型
//clusterCenters = Array([1.0,1.0,1.0], [4.0,4.0,4.0], [0.0,0.0,0.0], [3.0,3.0,3.0], [2.0,2.0,2.0])
//聚类中心点
assert(model.clusterCenters.length === k)
//println("dataset:"+dataset.collect().toSeq)
//transform()方法将DataFrame转化为另外一个DataFrame的算法
val transformed = model.transform(dataset)//转换成DataFrame
/**
+-------------+-----------------+
| features|kmeans_prediction|
+-------------+-----------------+
|[1.0,1.0,1.0]| 3|
|[2.0,2.0,2.0]| 2|
|[3.0,3.0,3.0]| 4|
|[4.0,4.0,4.0]| 0|
|[0.0,0.0,0.0]| 1|
+-------------+-----------------+*/
transformed.show(5)
//期望值列
val expectedColumns = Array("features", predictionColName)
expectedColumns.foreach { column =>
//println("column>>>>"+column)
//判断数据集是否包含features,kmeans_prediction列
assert(transformed.columns.contains(column))
}
val coll=transformed.select("features","kmeans_prediction").collect()
val clusters = transformed.select(predictionColName).map(_.getInt(0)).distinct().collect().toSet
assert(clusters.size === k)
assert(clusters === Set(0, 1, 2, 3, 4))
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/ml/clustering/KMeansSuite.scala | Scala | apache-2.0 | 5,561 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.shellbase.slack
import com.flyberrycapital.slack.Responses.PostMessageResponse
/**
* This provides utility to ShellCommands to post anything to Slack as part of the command
*/
trait PostToSlackHelper {
protected val slackState: SlackState
protected val username: String = System.getProperty("user.name", "unknown")
protected val blacklistedUsernames: Set[String] = Set.empty
def slackMessagingConfigured: Boolean = slackState.slackClient.isDefined && slackState.slackChannels.nonEmpty
def slackChannelFilter(channelName: String): Boolean = true
def sendSlackMessageIfConfigured(msg: String, additionalOptions: Map[String, String] = Map.empty):
Option[PostMessageResponse] = {
var message : Option[PostMessageResponse] = None
if (!blacklistedUsernames.contains(username)) {
for (client <- slackState.slackClient;
channel <- slackState.slackChannels.filter(slackChannelFilter)) {
message = Option(client.chat.postMessage(channel, msg, slackState.slackOptions ++ additionalOptions))
}
}
message
}
}
| SumoLogic/shellbase | shellbase-slack/src/main/scala/com/sumologic/shellbase/slack/PostToSlackHelper.scala | Scala | apache-2.0 | 1,898 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.CreateTopicsRequest
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConverters._
class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest {
@Test
def testValidCreateTopicsRequests() {
val timeout = 10000
// Generated assignments
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("topic1" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout).build())
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("topic2" -> new CreateTopicsRequest.TopicDetails(1, 3.toShort)).asJava, timeout).build())
val config3 = Map("min.insync.replicas" -> "2").asJava
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("topic3" -> new CreateTopicsRequest.TopicDetails(5, 2.toShort, config3)).asJava, timeout).build())
// Manual assignments
val assignments4 = replicaAssignmentToJava(Map(0 -> List(0)))
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("topic4" -> new CreateTopicsRequest.TopicDetails(assignments4)).asJava, timeout).build())
val assignments5 = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(1, 0), 2 -> List(1, 2)))
val config5 = Map("min.insync.replicas" -> "2").asJava
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("topic5" -> new CreateTopicsRequest.TopicDetails(assignments5, config5)).asJava, timeout).build())
// Mixed
val assignments8 = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(1, 0), 2 -> List(1, 2)))
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map(
"topic6" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort),
"topic7" -> new CreateTopicsRequest.TopicDetails(5, 2.toShort),
"topic8" -> new CreateTopicsRequest.TopicDetails(assignments8)).asJava, timeout).build()
)
validateValidCreateTopicsRequests(new CreateTopicsRequest.Builder(Map(
"topic9" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort),
"topic10" -> new CreateTopicsRequest.TopicDetails(5, 2.toShort),
"topic11" -> new CreateTopicsRequest.TopicDetails(assignments8)).asJava, timeout, true).build()
)
}
@Test
def testErrorCreateTopicsRequests() {
val timeout = 10000
val existingTopic = "existing-topic"
createTopic(existingTopic, 1, 1)
// Basic
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map(existingTopic -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout).build(),
Map(existingTopic -> error(Errors.TOPIC_ALREADY_EXISTS, Some("Topic 'existing-topic' already exists."))))
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-partitions" -> new CreateTopicsRequest.TopicDetails(-1, 1.toShort)).asJava, timeout).build(),
Map("error-partitions" -> error(Errors.INVALID_PARTITIONS)), checkErrorMessage = false)
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-replication" -> new CreateTopicsRequest.TopicDetails(1, (numBrokers + 1).toShort)).asJava, timeout).build(),
Map("error-replication" -> error(Errors.INVALID_REPLICATION_FACTOR)), checkErrorMessage = false)
val invalidConfig = Map("not.a.property" -> "error").asJava
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-config" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort, invalidConfig)).asJava, timeout).build(),
Map("error-config" -> error(Errors.INVALID_CONFIG)), checkErrorMessage = false)
val invalidAssignments = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(0)))
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-assignment" -> new CreateTopicsRequest.TopicDetails(invalidAssignments)).asJava, timeout).build(),
Map("error-assignment" -> error(Errors.INVALID_REPLICA_ASSIGNMENT)), checkErrorMessage = false)
// Partial
validateErrorCreateTopicsRequests(
new CreateTopicsRequest.Builder(Map(
existingTopic -> new CreateTopicsRequest.TopicDetails(1, 1.toShort),
"partial-partitions" -> new CreateTopicsRequest.TopicDetails(-1, 1.toShort),
"partial-replication" -> new CreateTopicsRequest.TopicDetails(1, (numBrokers + 1).toShort),
"partial-assignment" -> new CreateTopicsRequest.TopicDetails(invalidAssignments),
"partial-none" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout).build(),
Map(
existingTopic -> error(Errors.TOPIC_ALREADY_EXISTS),
"partial-partitions" -> error(Errors.INVALID_PARTITIONS),
"partial-replication" -> error(Errors.INVALID_REPLICATION_FACTOR),
"partial-assignment" -> error(Errors.INVALID_REPLICA_ASSIGNMENT),
"partial-none" -> error(Errors.NONE)
), checkErrorMessage = false
)
validateTopicExists("partial-none")
// Timeout
// We don't expect a request to ever complete within 1ms. A timeout of 1 ms allows us to test the purgatory timeout logic.
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-timeout" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, 1).build(),
Map("error-timeout" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-timeout-zero" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, 0).build(),
Map("error-timeout-zero" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
// Negative timeouts are treated the same as 0
validateErrorCreateTopicsRequests(new CreateTopicsRequest.Builder(Map("error-timeout-negative" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, -1).build(),
Map("error-timeout-negative" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
// The topics should still get created eventually
TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout", 0)
TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout-zero", 0)
TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout-negative", 0)
validateTopicExists("error-timeout")
validateTopicExists("error-timeout-zero")
validateTopicExists("error-timeout-negative")
}
@Test
def testInvalidCreateTopicsRequests() {
// Duplicate
val singleRequest = new CreateTopicsRequest.Builder(Map("duplicate-topic" ->
new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000).build()
validateErrorCreateTopicsRequests(singleRequest, Map("duplicate-topic" -> error(Errors.INVALID_REQUEST,
Some("""Create topics request from client `client-id` contains multiple entries for the following topics: duplicate-topic"""))),
requestStruct = Some(toStructWithDuplicateFirstTopic(singleRequest)))
// Duplicate Partial with validateOnly
val doubleRequestValidateOnly = new CreateTopicsRequest.Builder(Map(
"duplicate-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort),
"other-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000, true).build()
validateErrorCreateTopicsRequests(doubleRequestValidateOnly, Map(
"duplicate-topic" -> error(Errors.INVALID_REQUEST),
"other-topic" -> error(Errors.NONE)), checkErrorMessage = false,
requestStruct = Some(toStructWithDuplicateFirstTopic(doubleRequestValidateOnly)))
// Duplicate Partial
val doubleRequest = new CreateTopicsRequest.Builder(Map(
"duplicate-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort),
"other-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000).build()
validateErrorCreateTopicsRequests(doubleRequest, Map(
"duplicate-topic" -> error(Errors.INVALID_REQUEST),
"other-topic" -> error(Errors.NONE)), checkErrorMessage = false,
requestStruct = Some(toStructWithDuplicateFirstTopic(doubleRequest)))
// Partitions/ReplicationFactor and ReplicaAssignment
val assignments = replicaAssignmentToJava(Map(0 -> List(0)))
val assignmentRequest = new CreateTopicsRequest.Builder(Map("bad-args-topic" ->
new CreateTopicsRequest.TopicDetails(assignments)).asJava, 1000).build()
val badArgumentsRequest = addPartitionsAndReplicationFactorToFirstTopic(assignmentRequest)
validateErrorCreateTopicsRequests(badArgumentsRequest, Map("bad-args-topic" -> error(Errors.INVALID_REQUEST)),
checkErrorMessage = false)
// Partitions/ReplicationFactor and ReplicaAssignment with validateOnly
val assignmentRequestValidateOnly = new CreateTopicsRequest.Builder(Map("bad-args-topic" ->
new CreateTopicsRequest.TopicDetails(assignments)).asJava, 1000, true).build()
val badArgumentsRequestValidateOnly = addPartitionsAndReplicationFactorToFirstTopic(assignmentRequestValidateOnly)
validateErrorCreateTopicsRequests(badArgumentsRequestValidateOnly, Map("bad-args-topic" -> error(Errors.INVALID_REQUEST)),
checkErrorMessage = false)
}
@Test
def testNotController() {
val request = new CreateTopicsRequest.Builder(Map("topic1" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000).build()
val response = sendCreateTopicRequest(request, notControllerSocketServer)
val error = response.errors.asScala.head._2.error
assertEquals("Expected controller error when routed incorrectly", Errors.NOT_CONTROLLER, error)
}
}
| ollie314/kafka | core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala | Scala | apache-2.0 | 10,396 |
/*
* Copyright (C) 2017 HAT Data Exchange Ltd
* SPDX-License-Identifier: AGPL-3.0
*
* This file is part of the Hub of All Things project (HAT).
*
* HAT is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation, version 3 of
* the License.
*
* HAT is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
* Written by Andrius Aucinas <andrius.aucinas@hatdex.org>
* 2 / 2017
*/
package org.hatdex.hat.modules
import akka.actor.ActorSystem
import com.google.inject.{ AbstractModule, Provides }
import net.codingwell.scalaguice.ScalaModule
import org.hatdex.hat.api.service.RemoteExecutionContext
import org.hatdex.hat.api.service.applications.{
TrustedApplicationProvider,
TrustedApplicationProviderDex
}
import org.hatdex.hat.resourceManagement._
import org.hatdex.hat.resourceManagement.actors.{
HatServerActor,
HatServerProviderActor
}
import play.api.cache.AsyncCacheApi
import play.api.cache.ehcache._
import play.api.inject.ApplicationLifecycle
import play.api.libs.concurrent.AkkaGuiceSupport
import play.api.{ Configuration, Environment }
class HatServerProviderModule
extends AbstractModule
with ScalaModule
with AkkaGuiceSupport {
override def configure(): Unit = {
bindActor[HatServerProviderActor]("hatServerProviderActor")
bindActorFactory[HatServerActor, HatServerActor.Factory]
bind[HatDatabaseProvider].to[HatDatabaseProviderMilliner]
bind[HatKeyProvider].to[HatKeyProviderMilliner]
bind[HatServerProvider].to[HatServerProviderImpl]
bind[TrustedApplicationProvider].to[TrustedApplicationProviderDex]
()
}
@Provides @play.cache.NamedCache("hatserver-cache")
def provideHatServerCache(
env: Environment,
config: Configuration,
lifecycle: ApplicationLifecycle,
system: ActorSystem
)(implicit ec: RemoteExecutionContext
): AsyncCacheApi = {
val cacheComponents = new EhCacheComponents {
def environment: Environment = env
def configuration: Configuration =
config.get[Configuration]("hat.serverProvider")
def applicationLifecycle: ApplicationLifecycle = lifecycle
def actorSystem: ActorSystem = system
implicit def executionContext = ec
}
cacheComponents.cacheApi("hatserver-cache", create = true)
}
}
| Hub-of-all-Things/HAT2.0 | hat/app/org/hatdex/hat/modules/HatServerProviderModule.scala | Scala | agpl-3.0 | 2,716 |
package woshilaiceshide.sserver.benchmark.rapidoid
import org.rapidoid.net.ServerBuilder
object RapidoidHttpServer extends App with woshilaiceshide.sserver.benchmark.ServerProperty {
val builder = new ServerBuilder();
builder.protocol(new SimpleHttpProtocol()).address(interface).port(port).workers(2).build().start();
} | woshilaiceshide/s-server-benchmark | src/main/scala/woshilaiceshide/sserver/benchmark/rapidoid/RapidoidHttpServer.scala | Scala | apache-2.0 | 328 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.models.gp
import breeze.linalg.DenseVector
import io.github.mandar2812.dynaml.algebra.PartitionedVector
import io.github.mandar2812.dynaml.evaluation.RegressionMetrics
import io.github.mandar2812.dynaml.kernels.{DiracKernel, LocalScalarKernel, CovarianceFunction => CovFunc}
import io.github.mandar2812.dynaml.pipes.{DataPipe, StreamDataPipe}
/**
*
* @author mandar2812
* date: 17/11/15.
*
* Class representing Gaussian Process regression models
*
* y = f(x) + e
* f(x) ~ GP(0, cov(X,X))
* e|f(x) ~ N(f(x), noise(X,X))
*
* Constructor Parameters:
*
* @param cov The covariance/kernel function
* as an appropriate subtype of [[CovFunc]]
*
* @param noise The kernel function describing the
* noise model, defaults to [[DiracKernel]].
*
* @param trainingdata The data structure containing the
* training data i.e. [[Seq]] of [[Tuple2]]
* of the form (features, target)
*
*/
class GPRegression(
cov: LocalScalarKernel[DenseVector[Double]],
noise: LocalScalarKernel[DenseVector[Double]] = new DiracKernel(1.0),
trainingdata: Seq[(DenseVector[Double], Double)],
meanFunc: DataPipe[DenseVector[Double], Double] = DataPipe(_ => 0.0)) extends
AbstractGPRegressionModel[Seq[(DenseVector[Double], Double)],
DenseVector[Double]](cov, noise, trainingdata,
trainingdata.length, meanFunc){
/**
* Setting a validation set is optional in case
* one wants to calculate joint marginal likelihood of the
* training and validation data as the objective function for
* hyper-parameter optimization. While retaining just the
* training data set for final calculating [[predictiveDistribution]]
* during final deployment.
* */
protected var validationSet: Seq[(DenseVector[Double], Double)] = Seq()
/**
* Accessor method for [[validationSet]]
* */
def _validationSet = validationSet
/**
* Set the validation data, optionally append it to the existing validation data
*
* @param v data
* @param append Defaults to false
* */
def validationSet_(v: Seq[(DenseVector[Double], Double)], append: Boolean = false) =
if(append) validationSet ++= v else validationSet = v
protected lazy val validationDataFeatures = validationSet.map(_._1)
protected lazy val validationDataLabels = PartitionedVector(
validationSet.toStream.map(_._2),
trainingData.length.toLong, _blockSize
)
/**
* Assigning a value to the [[processTargets]] data pipe
* can be useful in cases where we need to
* perform operations such as de-normalizing
* the predicted and actual targets to their original
* scales.
*
* */
@deprecated("scheduled to be removed by DynaML 2.x")
var processTargets: DataPipe[
Stream[(Double, Double)],
Stream[(Double, Double)]] =
StreamDataPipe((predictionCouple: (Double, Double)) =>
identity(predictionCouple))
/**
* If one uses a non empty validation set, then
* the user can set a custom function of
* the validation predictions and targets as
* the objective function for the hyper-parameter
* optimization routine.
*
* Currently this defaults to RMSE calculated
* on the validation data.
* */
@deprecated("sscheduled to be removed by DynaML 2.x")
var scoresToEnergy: DataPipe[Stream[(Double, Double)], Double] =
DataPipe((scoresAndLabels) => {
val metrics = new RegressionMetrics(
scoresAndLabels.toList,
scoresAndLabels.length
)
metrics.rmse
})
/**
* Convert from the underlying data structure to
* Seq[(I, Y)] where I is the index set of the GP
* and Y is the value/label type.
**/
override def dataAsSeq(data: Seq[(DenseVector[Double], Double)]) = data
/**
* Calculates the energy of the configuration, required
* for global optimization routines.
*
* Defaults to the base implementation in
* [[io.github.mandar2812.dynaml.optimization.GloballyOptimizable]]
* in case a validation set is not specified
* through the [[validationSet]] variable.
*
* @param h The value of the hyper-parameters in the configuration space
* @param options Optional parameters about configuration
* @return Configuration Energy E(h)
* */
override def energy(h: Map[String, Double],
options: Map[String, String]): Double = validationSet.length match {
case 0 => super.energy(h, options)
case _ => super.calculateEnergyPipe(h, options)(
trainingData ++ validationDataFeatures,
PartitionedVector.vertcat(trainingDataLabels, validationDataLabels)
)
}
/**
* Calculates the gradient energy of the configuration and
* subtracts this from the current value of h to yield a new
* hyper-parameter configuration.
*
* Over ride this function if you aim to implement a gradient based
* hyper-parameter optimization routine like ML-II
*
* @param h The value of the hyper-parameters in the configuration space
* @return Gradient of the objective function (marginal likelihood) as a Map
**/
override def gradEnergy(h: Map[String, Double]) = validationSet.length match {
case 0 => super.gradEnergy(h)
case _ => super.calculateGradEnergyPipe(h)(
trainingData ++ validationDataFeatures,
PartitionedVector.vertcat(trainingDataLabels, validationDataLabels)
)
}
}
| transcendent-ai-labs/DynaML | dynaml-core/src/main/scala/io/github/mandar2812/dynaml/models/gp/GPRegression.scala | Scala | apache-2.0 | 6,242 |
package io.ssc.angles.pipeline
import com.twitter.util.Config.intoList
import io.ssc.angles.pipeline.data.Storage
import io.ssc.angles.pipeline.filters.{ArticleFilter, GermanFilter}
import io.ssc.data.CrawledWebsite
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import scalikejdbc.DB
import scala.concurrent.Future
/**
* Created by niklas on 08.02.15.
*/
class MarkGermanTweets extends Step {
val log = LoggerFactory.getLogger(classOf[MarkGermanTweets])
override def execute(since: DateTime): Unit = {
val websites = Storage.unmarkedWebsites(since)
val websitesWithMetadata = websites map { website =>
website -> Storage.metadataFor(website.id)
}
var germanTweets = Set.empty[Long]
websitesWithMetadata
.filter { case (website, metadata) => GermanFilter.passes(website, metadata)}
.filter { case (website, metadata) => ArticleFilter.passes(website, metadata)}
.foreach { case (website: CrawledWebsite, metadata) =>
val status = Storage.statusOfWebsite(website.id).get
germanTweets += status.getId
}
log.info("Marking tweets with German news articles.")
implicit val db = DB(Storage.getConn())
try {
db.begin()
germanTweets
.foreach(id => Storage.markTweetToFollow(id))
db.commit()
} catch {
case _: Throwable => db.rollbackIfActive()
}
log.info("Marked {} tweets", germanTweets.size)
Storage.markUninterestingTweets
}
}
| jhendess/angles | src/main/scala/io/ssc/angles/pipeline/MarkGermanTweets.scala | Scala | gpl-3.0 | 1,477 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator}
import org.apache.spark.sql.types._
/**
* This is a helper class to generate an append-only row-based hash map that can act as a 'cache'
* for extremely fast key-value lookups while evaluating aggregates (and fall back to the
* `BytesToBytesMap` if a given key isn't found). This is 'codegened' in HashAggregate to speed
* up aggregates w/ key.
*
* We also have VectorizedHashMapGenerator, which generates a append-only vectorized hash map.
* We choose one of the two as the 1st level, fast hash map during aggregation.
*
* NOTE: This row-based hash map currently doesn't support nullable keys and falls back to the
* `BytesToBytesMap` to store them.
*/
class RowBasedHashMapGenerator(
ctx: CodegenContext,
aggregateExpressions: Seq[AggregateExpression],
generatedClassName: String,
groupingKeySchema: StructType,
bufferSchema: StructType)
extends HashMapGenerator (ctx, aggregateExpressions, generatedClassName,
groupingKeySchema, bufferSchema) {
override protected def initializeAggregateHashMap(): String = {
val generatedKeySchema: String =
s"new org.apache.spark.sql.types.StructType()" +
groupingKeySchema.map { key =>
val keyName = ctx.addReferenceObj("keyName", key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
val generatedValueSchema: String =
s"new org.apache.spark.sql.types.StructType()" +
bufferSchema.map { key =>
val keyName = ctx.addReferenceObj("keyName", key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
s"""
| private org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch batch;
| private int[] buckets;
| private int capacity = 1 << 16;
| private double loadFactor = 0.5;
| private int numBuckets = (int) (capacity / loadFactor);
| private int maxSteps = 2;
| private int numRows = 0;
| private org.apache.spark.sql.types.StructType keySchema = $generatedKeySchema
| private org.apache.spark.sql.types.StructType valueSchema = $generatedValueSchema
| private Object emptyVBase;
| private long emptyVOff;
| private int emptyVLen;
| private boolean isBatchFull = false;
|
|
| public $generatedClassName(
| org.apache.spark.memory.TaskMemoryManager taskMemoryManager,
| InternalRow emptyAggregationBuffer) {
| batch = org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch
| .allocate(keySchema, valueSchema, taskMemoryManager, capacity);
|
| final UnsafeProjection valueProjection = UnsafeProjection.create(valueSchema);
| final byte[] emptyBuffer = valueProjection.apply(emptyAggregationBuffer).getBytes();
|
| emptyVBase = emptyBuffer;
| emptyVOff = Platform.BYTE_ARRAY_OFFSET;
| emptyVLen = emptyBuffer.length;
|
| buckets = new int[numBuckets];
| java.util.Arrays.fill(buckets, -1);
| }
""".stripMargin
}
/**
* Generates a method that returns true if the group-by keys exist at a given index in the
* associated [[org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch]].
*
*/
protected def generateEquals(): String = {
def genEqualsForKeys(groupingKeys: Seq[Buffer]): String = {
groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
s"""(${ctx.genEqual(key.dataType, CodeGenerator.getValue("row",
key.dataType, ordinal.toString()), key.name)})"""
}.mkString(" && ")
}
s"""
|private boolean equals(int idx, $groupingKeySignature) {
| UnsafeRow row = batch.getKeyRow(buckets[idx]);
| return ${genEqualsForKeys(groupingKeys)};
|}
""".stripMargin
}
/**
* Generates a method that returns a
* [[org.apache.spark.sql.catalyst.expressions.UnsafeRow]] which keeps track of the
* aggregate value(s) for a given set of keys. If the corresponding row doesn't exist, the
* generated method adds the corresponding row in the associated
* [[org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch]].
*
*/
protected def generateFindOrInsert(): String = {
val numVarLenFields = groupingKeys.map(_.dataType).count {
case dt if UnsafeRow.isFixedLength(dt) => false
// TODO: consider large decimal and interval type
case _ => true
}
val createUnsafeRowForKey = groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
key.dataType match {
case t: DecimalType =>
s"agg_rowWriter.write(${ordinal}, ${key.name}, ${t.precision}, ${t.scale})"
case t: DataType =>
if (!t.isInstanceOf[StringType] && !CodeGenerator.isPrimitiveType(t)) {
throw new IllegalArgumentException(s"cannot generate code for unsupported type: $t")
}
s"agg_rowWriter.write(${ordinal}, ${key.name})"
}
}.mkString(";\\n")
s"""
|public org.apache.spark.sql.catalyst.expressions.UnsafeRow findOrInsert(${
groupingKeySignature}) {
| long h = hash(${groupingKeys.map(_.name).mkString(", ")});
| int step = 0;
| int idx = (int) h & (numBuckets - 1);
| while (step < maxSteps) {
| // Return bucket index if it's either an empty slot or already contains the key
| if (buckets[idx] == -1) {
| if (numRows < capacity && !isBatchFull) {
| // creating the unsafe for new entry
| UnsafeRow agg_result = new UnsafeRow(${groupingKeySchema.length});
| org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder
| = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result,
| ${numVarLenFields * 32});
| org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter
| = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(
| agg_holder,
| ${groupingKeySchema.length});
| agg_holder.reset(); //TODO: investigate if reset or zeroout are actually needed
| agg_rowWriter.zeroOutNullBytes();
| ${createUnsafeRowForKey};
| agg_result.setTotalSize(agg_holder.totalSize());
| Object kbase = agg_result.getBaseObject();
| long koff = agg_result.getBaseOffset();
| int klen = agg_result.getSizeInBytes();
|
| UnsafeRow vRow
| = batch.appendRow(kbase, koff, klen, emptyVBase, emptyVOff, emptyVLen);
| if (vRow == null) {
| isBatchFull = true;
| } else {
| buckets[idx] = numRows++;
| }
| return vRow;
| } else {
| // No more space
| return null;
| }
| } else if (equals(idx, ${groupingKeys.map(_.name).mkString(", ")})) {
| return batch.getValueRow(buckets[idx]);
| }
| idx = (idx + 1) & (numBuckets - 1);
| step++;
| }
| // Didn't find it
| return null;
|}
""".stripMargin
}
protected def generateRowIterator(): String = {
s"""
|public org.apache.spark.unsafe.KVIterator<UnsafeRow, UnsafeRow> rowIterator() {
| return batch.rowIterator();
|}
""".stripMargin
}
}
| ioana-delaney/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala | Scala | apache-2.0 | 9,249 |
package org.atnos.eff
import Eff._
import cats.syntax.all._
import Interpret._
import cats.Traverse
/**
* Effect for computations possibly returning several values
*/
trait ListEffect extends
ListCreation with
ListInterpretation
object ListEffect extends ListEffect
trait ListCreation {
type _List[R] = List <= R
type _list[R] = List |= R
/** create a list effect with no values */
def empty[R :_list, A]: Eff[R, A] =
fromList(List())
/** create a list effect from a single value */
def singleton[R :_list, A](a: A): Eff[R, A] =
fromList(List(a))
/** create a list effect from a list of values */
def values[R :_list, A](as: A*): Eff[R, A] =
fromList(as.toList)
/** create a list effect from a list of values */
def fromList[R :_list, A](as: List[A]): Eff[R, A] =
send[List, R, A](as)
}
object ListCreation extends ListCreation
trait ListInterpretation {
/** run an effect stack starting with a list effect */
def runList[R, U, A](effect: Eff[R, A])(implicit m: Member.Aux[List, R, U]): Eff[U, List[A]] =
runInterpreter(effect)(new Interpreter[List, U, A, List[A]] {
def onPure(a: A): Eff[U, List[A]] =
Eff.pure(List(a))
def onEffect[X](xs: List[X], continuation: Continuation[U, X, List[A]]): Eff[U, List[A]] =
xs.traverse(continuation).map(_.flatten)
def onLastEffect[X](x: List[X], continuation: Continuation[U, X, Unit]): Eff[U, Unit] =
Eff.pure(())
def onApplicativeEffect[X, T[_] : Traverse](xs: T[List[X]], continuation: Continuation[U, T[X], List[A]]): Eff[U, List[A]] = {
val sequenced: List[T[X]] = xs.sequence
sequenced match {
case Nil => continuation.runOnNone >> Eff.pure(Nil)
case tx :: rest => Eff.impure[U, T[X], List[A]](tx, Continuation.lift((tx1: T[X]) =>
continuation(tx1).flatMap(la => rest.map(continuation).sequence.map(ls => la ++ ls.flatten))))
}
}
})
}
object ListInterpretation extends ListInterpretation
| atnos-org/eff | shared/src/main/scala/org/atnos/eff/ListEffect.scala | Scala | mit | 2,016 |
/*
* Copyright ASCII Soup (Nils Luxton) (c) 2016.
*
* GNU GPL v3 - See LICENSE.txt for details
*/
package com.asciisoup.advent.DaySix
import scala.io.Source
object Puzzle2 extends App {
val input = Source.fromInputStream(getClass.getResourceAsStream("/day_six.txt"))
println(
input.getLines()
.map(s => s.toList) // read line into a list of rows
.toSeq
.transpose // transpose to a list of columns
.map(xs => {
xs.groupBy(identity) // chunk the list into char occurrences
.mapValues(_.size) // create (Char -> Occurrences) map
.toSeq
.sortWith(_._2 < _._2) // put it in ascending order
.map(c => c._1) // make it a list of just the chars
.head // pop off the first (most frequent) char
})
.mkString // stitch the whole lot together into a string
)
input.close()
}
| ascii-soup/AdventOfCode2016 | src/main/scala/com/asciisoup/advent/DaySix/Puzzle2.scala | Scala | gpl-3.0 | 903 |
package mesosphere.marathon
package api.v2
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.container.{AsyncResponse, Suspended}
import javax.ws.rs.core.{Context, MediaType}
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.AuthResource
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.plugin.auth.{Authenticator, Authorizer, ViewRunSpec}
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state.Timestamp
import scala.async.Async.{await, async}
import scala.concurrent.ExecutionContext
@Produces(Array(MediaType.APPLICATION_JSON))
@Consumes(Array(MediaType.APPLICATION_JSON))
class AppVersionsResource(
service: MarathonSchedulerService,
groupManager: GroupManager,
val authenticator: Authenticator,
val authorizer: Authorizer,
val config: MarathonConf)(implicit val executionContext: ExecutionContext) extends AuthResource {
@GET
def index(
@PathParam("appId") appId: String,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val id = appId.toRootPath
withAuthorization(ViewRunSpec, groupManager.app(id), unknownApp(id)) { _ =>
ok(jsonObjString("versions" -> service.listAppVersions(id)))
}
}
}
@GET
@Path("{version}")
def show(
@PathParam("appId") appId: String,
@PathParam("version") version: String,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val id = appId.toRootPath
val timestamp = Timestamp(version)
withAuthorization(ViewRunSpec, service.getApp(id, timestamp), unknownApp(id, Some(timestamp))) { app =>
ok(jsonString(app))
}
}
}
}
| gsantovena/marathon | src/main/scala/mesosphere/marathon/api/v2/AppVersionsResource.scala | Scala | apache-2.0 | 1,966 |
package io.youi.path
import io.youi.drawable.Context
object BeginPath extends PathAction {
override def draw(context: Context, x: Double, y: Double, scaleX: Double, scaleY: Double): Unit = context.begin()
override def toString: String = "BeginPath"
}
| outr/youi | gui/src/main/scala/io/youi/path/BeginPath.scala | Scala | mit | 258 |
package org.jetbrains.plugins.scala.lang.psi.types
import com.intellij.openapi.util.Ref
import org.jetbrains.plugins.scala.lang.psi.types.ScUndefinedSubstitutor._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.project.ProjectContext
sealed trait ScUndefinedSubstitutor {
def addLower(name: Name, _lower: ScType, additional: Boolean = false, variance: Variance = Contravariant): ScUndefinedSubstitutor
def addUpper(name: Name, _upper: ScType, additional: Boolean = false, variance: Variance = Covariant): ScUndefinedSubstitutor
def getSubstitutor: Option[ScSubstitutor] = getSubstitutorWithBounds(nonable = true).map(_._1)
def filter(fun: (((String, Long), Set[ScType])) => Boolean): ScUndefinedSubstitutor
def addSubst(added: ScUndefinedSubstitutor): ScUndefinedSubstitutor
def +(subst: ScUndefinedSubstitutor): ScUndefinedSubstitutor = addSubst(subst)
def isEmpty: Boolean
def names: Set[Name]
//subst, lowers, uppers
def getSubstitutorWithBounds(nonable: Boolean): Option[(ScSubstitutor, Map[Name, ScType], Map[Name, ScType])]
}
object ScUndefinedSubstitutor {
type Name = (String, Long)
type SubstitutorWithBounds = (ScSubstitutor, Map[Name, ScType], Map[Name, ScType])
def apply()(implicit project: ProjectContext): ScUndefinedSubstitutor = {
ScUndefinedSubstitutorImpl(Map.empty, Map.empty, Set.empty)
}
def multi(subs: Set[ScUndefinedSubstitutor])(implicit project: ProjectContext): ScUndefinedSubstitutor = {
val flatten = subs.filterNot(_.isEmpty).flatMap {
case m: ScMultiUndefinedSubstitutor => m.subs
case s: ScUndefinedSubstitutorImpl => Set(s)
}
flatten.size match {
case 0 => ScUndefinedSubstitutor()
case 1 => flatten.head
case _ => ScMultiUndefinedSubstitutor(flatten)
}
}
private[types] def computeLower(rawLower: ScType, v: Variance): ScType = {
var index = 0
val updated = rawLower match {
case ScAbstractType(_, absLower, _) =>
absLower //upper will be added separately
case _ =>
rawLower.recursiveVarianceUpdateModifiable[Set[String]](Set.empty, {
case (ScAbstractType(_, absLower, upper), variance, data) =>
variance match {
case Contravariant => (true, absLower, data)
case Covariant => (true, upper, data)
case Invariant => (true, absLower /*ScExistentialArgument(s"_$$${index += 1; index}", Nil, absLower, upper)*/ , data) //todo: why this is right?
}
case (ScExistentialArgument(nm, _, skoLower, upper), variance, data) if !data.contains(nm) =>
variance match {
case Contravariant => (true, skoLower, data)
case Covariant => (true, upper, data)
case Invariant => (true, ScExistentialArgument(s"_$$${index += 1; index}", Nil, skoLower, upper), data)
}
case (ex: ScExistentialType, _, data) => (false, ex, data ++ ex.boundNames)
case (tp, _, data) => (false, tp, data)
}, v, revertVariances = true)
}
updated.unpackedType
}
private[types] def computeUpper(rawUpper: ScType, v: Variance): ScType = {
import rawUpper.projectContext
var index = 0
val updated = rawUpper match {
case ScAbstractType(_, _, absUpper) if v == Invariant =>
absUpper // lower will be added separately
case ScAbstractType(_, _, absUpper) if v == Covariant && absUpper.equiv(Any) => Any
case _ =>
rawUpper.recursiveVarianceUpdateModifiable[Set[String]](Set.empty, {
case (ScAbstractType(_, lower, absUpper), variance, data) =>
variance match {
case Contravariant => (true, lower, data)
case Covariant => (true, absUpper, data)
case Invariant => (true, ScExistentialArgument(s"_$$${index += 1; index}", Nil, lower, absUpper), data) //todo: why this is right?
}
case (ScExistentialArgument(nm, _, lower, skoUpper), variance, data) if !data.contains(nm) =>
variance match {
case Contravariant => (true, lower, data)
case Covariant => (true, skoUpper, data)
case Invariant => (true, ScExistentialArgument(s"_$$${index += 1; index}", Nil, lower, skoUpper), data)
}
case (ex: ScExistentialType, _, data) => (false, ex, data ++ ex.boundNames)
case (tp, _, data) => (false, tp, data)
}, v)
}
updated.unpackedType
}
}
private case class ScUndefinedSubstitutorImpl(upperMap: Map[Name, Set[ScType]] = Map.empty,
lowerMap: Map[Name, Set[ScType]] = Map.empty,
additionalNames: Set[Name] = Set.empty)
(implicit project: ProjectContext)
extends ScUndefinedSubstitutor {
def isEmpty: Boolean = upperMap.isEmpty && lowerMap.isEmpty
private def equivNothing(tp: ScType) = tp.equiv(Nothing(tp.projectContext))
private def equivAny(tp: ScType) = tp.equiv(Any(tp.projectContext))
private def merge(map1: Map[Name, Set[ScType]], map2: Map[Name, Set[ScType]], forUpper: Boolean): Map[Name, Set[ScType]] = {
var result = Map[Name, Set[ScType]]()
val iterator = map1.iterator ++ map2.iterator
while (iterator.nonEmpty) {
val (name, set) = iterator.next()
val newSet = result.getOrElse(name, Set.empty) ++ set
val filtered = if (forUpper) newSet.filterNot(equivAny) else newSet.filterNot(equivNothing)
result =
if (filtered.isEmpty) result - name
else result.updated(name, filtered)
}
result
}
def addSubst(added: ScUndefinedSubstitutor): ScUndefinedSubstitutor = {
added match {
case subst: ScUndefinedSubstitutorImpl =>
val newUpper = merge(this.upperMap, subst.upperMap, forUpper = true)
val newLower = merge(this.lowerMap, subst.lowerMap, forUpper = false)
val newAddNames = this.additionalNames ++ subst.additionalNames
ScUndefinedSubstitutorImpl(newUpper, newLower, newAddNames)
case subst: ScMultiUndefinedSubstitutor =>
subst.addSubst(this)
}
}
def addLower(name: Name, _lower: ScType, additional: Boolean = false, variance: Variance = Contravariant): ScUndefinedSubstitutor = {
val lower = computeLower(_lower, variance)
if (equivNothing(lower)) this
else addToMap(name, lower, toUpper = false, additional)
}
def addUpper(name: Name, _upper: ScType, additional: Boolean = false, variance: Variance = Covariant): ScUndefinedSubstitutor = {
val upper = computeUpper(_upper, variance)
if (equivAny(upper)) this
else addToMap(name, upper, toUpper = true, additional)
}
lazy val names: Set[Name] = upperMap.keySet ++ lowerMap.keySet
private lazy val substWithBounds: Option[SubstitutorWithBounds] = getSubstitutorWithBoundsImpl(nonable = true)
private lazy val substWithBoundsNotNonable: Option[SubstitutorWithBounds] = getSubstitutorWithBoundsImpl(nonable = false)
override def getSubstitutorWithBounds(nonable: Boolean): Option[SubstitutorWithBounds] = {
if (nonable) substWithBounds
else substWithBoundsNotNonable
}
private def addToMap(name: Name, scType: ScType, toUpper: Boolean, toAdditional: Boolean): ScUndefinedSubstitutor = {
val map = if (toUpper) upperMap else lowerMap
val forName = map.getOrElse(name, Set.empty)
val updated = map.updated(name, forName + scType)
val additional = if (toAdditional) additionalNames + name else additionalNames
if (toUpper) copy(upperMap = updated, additionalNames = additional)
else copy(lowerMap = updated, additionalNames = additional)
}
private def getSubstitutorWithBoundsImpl(nonable: Boolean): Option[SubstitutorWithBounds] = {
var tvMap = Map.empty[Name, ScType]
var lMap = Map.empty[Name, ScType]
var uMap = Map.empty[Name, ScType]
def solve(name: Name, visited: Set[Name]): Option[ScType] = {
def checkRecursive(tp: ScType, needTvMap: Ref[Boolean]): Boolean = {
tp.visitRecursively {
case tpt: TypeParameterType =>
val otherName = tpt.nameAndId
if (additionalNames.contains(otherName)) {
needTvMap.set(true)
solve(otherName, visited + name) match {
case None if nonable => return false
case _ =>
}
}
case UndefinedType(tpt, _) =>
val otherName = tpt.nameAndId
if (names.contains(otherName)) {
needTvMap.set(true)
solve(otherName, visited + name) match {
case None if nonable => return false
case _ =>
}
}
case _: ScType =>
}
true
}
def hasRecursion(set: Set[ScType], needTvMapRef: Ref[Boolean]): Boolean = {
val iterator = set.iterator
while (iterator.hasNext) {
val p = iterator.next()
if (!checkRecursive(p, needTvMapRef)) {
tvMap += ((name, Nothing))
return true
}
}
false
}
if (visited.contains(name)) {
tvMap += ((name, Nothing))
return None
}
tvMap.get(name) match {
case Some(tp) => Some(tp)
case _ =>
val lowerSet = lowerMap.getOrElse(name, Set.empty)
if (lowerSet.nonEmpty) {
val needTvMap = Ref.create(false)
if (hasRecursion(lowerSet, needTvMap)) return None
val subst = if (needTvMap.get()) ScSubstitutor(tvMap) else ScSubstitutor.empty
val substed = lowerSet.map(subst.subst)
val lower = substed.reduce(_ lub _)
lMap += ((name, lower))
tvMap += ((name, lower))
}
val upperSet = upperMap.getOrElse(name, Set.empty)
if (upperSet.nonEmpty) {
val needTvMap = Ref.create(false)
if (hasRecursion(upperSet, needTvMap)) return None
val subst = if (needTvMap.get()) ScSubstitutor(tvMap) else ScSubstitutor.empty
val substed = upperSet.map(subst.subst)
val upper = substed.reduce(_ glb _)
uMap += ((name, upper))
tvMap.get(name) match {
case Some(lower) =>
if (nonable && !lower.conforms(upper)) {
return None
}
case None => tvMap += ((name, upper))
}
}
if (tvMap.get(name).isEmpty) {
tvMap += ((name, Nothing))
}
tvMap.get(name)
}
}
val namesIterator = names.iterator
while (namesIterator.hasNext) {
val name = namesIterator.next()
solve(name, Set.empty) match {
case Some(_) => // do nothing
case None if nonable => return None
case _ =>
}
}
val subst = ScSubstitutor(tvMap)
Some((subst, lMap, uMap))
}
def filter(fun: (((String, Long), Set[ScType])) => Boolean): ScUndefinedSubstitutor = {
copy(upperMap = upperMap.filter(fun), lowerMap = lowerMap.filter(fun))
}
}
private case class ScMultiUndefinedSubstitutor(subs: Set[ScUndefinedSubstitutorImpl])(implicit project: ProjectContext)
extends ScUndefinedSubstitutor {
override def addLower(name: (String, Long), _lower: ScType, additional: Boolean, variance: Variance): ScUndefinedSubstitutor =
multi(subs.map(_.addLower(name, _lower, additional, variance)))
override def addUpper(name: (String, Long), _upper: ScType, additional: Boolean, variance: Variance): ScUndefinedSubstitutor =
multi(subs.map(_.addUpper(name, _upper, additional, variance)))
override def getSubstitutorWithBounds(nonable: Boolean): Option[SubstitutorWithBounds] =
subs.iterator.map(_.getSubstitutorWithBounds(nonable)).find(_.isDefined).flatten
override def filter(fun: (((String, Long), Set[ScType])) => Boolean): ScUndefinedSubstitutor =
multi(subs.map(_.filter(fun)))
override def addSubst(added: ScUndefinedSubstitutor): ScUndefinedSubstitutor = added match {
case impl: ScUndefinedSubstitutorImpl =>
multi(subs.map(_.addSubst(impl)))
case mult: ScMultiUndefinedSubstitutor =>
val flatten = for (s1 <- subs; s2 <- mult.subs) yield s1.addSubst(s2)
multi(flatten)
}
override def isEmpty: Boolean = names.isEmpty
override val names: Set[Name] = subs.map(_.names).reduce(_ intersect _)
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/ScUndefinedSubstitutor.scala | Scala | apache-2.0 | 12,530 |
class Foo[+A] {
def count(f: A => Boolean = _ => true): Unit = {}
// The preceding line is valid, even though the generated default getter
// has type `A => Boolean` which wouldn't normally pass variance checks
// because it's equivalent to the following overloads which are valid:
def count2(f: A => Boolean): Unit = {}
def count2(): Unit = count(_ => true)
}
class Bar1[+A] extends Foo[A] {
override def count(f: A => Boolean): Unit = {}
// This reasoning extends to overrides:
override def count2(f: A => Boolean): Unit = {}
}
class Bar2[+A] extends Foo[A] {
override def count(f: A => Boolean = _ => true): Unit = {}
// ... including overrides which also override the default getter:
override def count2(f: A => Boolean): Unit = {}
override def count2(): Unit = count(_ => true)
}
// This can be contrasted with the need for variance checks in
// `protected[this] methods (cf tests/neg/t7093.scala),
// default getters do not have the same problem since they cannot
// appear in arbitrary contexts.
// Crucially, this argument does not apply to situations in which the default
// getter result type is not a subtype of the parameter type, for example (from
// tests/neg/variance.scala):
//
// class Foo[+A: ClassTag](x: A) {
// private[this] val elems: Array[A] = Array(x)
// def f[B](x: Array[B] = elems): Array[B] = x
// }
//
// If we tried to rewrite this with an overload, it would fail
// compilation:
//
// def f[B](): Array[B] = f(elems) // error: Found: Array[A], Expected: Array[B]
//
// So we only disable variance checking for default getters whose
// result type is the method parameter type, this is checked by
// `tests/neg/variance.scala`
| lampepfl/dotty | tests/pos/default-getter-variance.scala | Scala | apache-2.0 | 1,702 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat._
/**
* @author
* Josh Rosen
*/
class CollectionIndexOnNonIndexedSeq
extends Inspection(
text = "Use of apply method on a non-indexed Seq",
defaultLevel = Levels.Warning,
description = "Checks for indexing on a Seq which is not an IndexedSeq.",
explanation = "Using an index to access elements of an IndexedSeq may cause performance problems."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser: context.Traverser =
new context.Traverser {
import context.global._
private def isLiteral(t: Tree) =
t match {
case Literal(_) => true
case _ => false
}
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(Select(lhs, TermName("apply")), List(idx))
if isSeq(lhs) && !isIndexedSeq(lhs) && !isLiteral(idx) =>
context.warn(tree.pos, self, tree.toString.take(100))
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/inspections/collections/CollectionIndexOnNonIndexedSeq.scala | Scala | apache-2.0 | 1,218 |
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.datasource.avro
import java.io.IOException
import java.io.OutputStream
import java.math.BigDecimal
import java.nio.ByteBuffer
import java.sql.Timestamp
import java.util.Date
import java.util.HashMap
import scala.collection.immutable.Map
import org.apache.avro.Schema
import org.apache.avro.SchemaBuilder
import org.apache.avro.generic.GenericData.Record
import org.apache.avro.generic.GenericRecord
import org.apache.avro.mapred.AvroKey
import org.apache.avro.mapreduce.AvroKeyOutputFormat
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.RecordWriter
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.TaskAttemptID
import org.apache.spark.sql.Row
import org.apache.spark.sql.execution.datasources.OutputWriter
import org.apache.spark.sql.types._
/**
* The Class AvroOutputGenerator.
*
* @author Bitwise
*
*/
class AvroOutputGenerator(
path: String,
context: TaskAttemptContext,
schema: StructType,
recordName: String,
recordNamespace: String) extends OutputWriter {
private lazy val converter = createConverterToAvro(schema, recordName, recordNamespace)
private val recordWriter: RecordWriter[AvroKey[GenericRecord], NullWritable] =
new AvroKeyOutputFormat[GenericRecord]() {
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
val uniqueWriteJobId = context.getConfiguration.get("spark.sql.sources.writeJobUUID")
val taskAttemptId: TaskAttemptID = context.getTaskAttemptID
val split = taskAttemptId.getTaskID.getId
new Path(path, f"part-r-$split%05d-$uniqueWriteJobId$extension")
}
@throws(classOf[IOException])
override def getAvroFileOutputStream(c: TaskAttemptContext): OutputStream = {
val path = getDefaultWorkFile(context, ".avro")
path.getFileSystem(context.getConfiguration).create(path)
}
}.getRecordWriter(context)
override def write(row: Row): Unit = {
val key = new AvroKey(converter(row).asInstanceOf[GenericRecord])
recordWriter.write(key, NullWritable.get())
}
override def close(): Unit = recordWriter.close(context)
private def toAvroDecimal(item: Any): AnyRef = {
decimalToBinary(item.asInstanceOf[BigDecimal]);
}
private def decimalToBinary(bigDecimal: BigDecimal): AnyRef = {
if(bigDecimal==null){
throw new IncompatibleSchemaException(
s"\\nBigdecimal field can't be null & must have precision & scale"+
"\\nmake sure the field given in input schema matches with the expected schema field")
}
val prec = bigDecimal.precision
val scale = bigDecimal.scale()
val decimalBytes = bigDecimal.setScale(scale).unscaledValue().toByteArray()
val precToBytes = PRECISION_TO_BYTE_COUNT(prec - 1)
if (precToBytes == decimalBytes.length) {
return ByteBuffer.wrap(decimalBytes)
}
val tgt = Array.ofDim[Byte](precToBytes)
if (bigDecimal.signum() == -1) {
for (i <- 0 until precToBytes) {
tgt(i) != 0xFF //needs to be changed
}
}
System.arraycopy(decimalBytes, 0, tgt, precToBytes - decimalBytes.length, decimalBytes.length)
ByteBuffer.wrap(tgt)
}
val PRECISION_TO_BYTE_COUNT: Array[Int] = new Array[Int](38)
var prec = 1
while (prec <= 38) {
PRECISION_TO_BYTE_COUNT(prec - 1) = Math.ceil((Math.log(Math.pow(10, prec) - 1) / Math.log(2) + 1) /
8).toInt
prec += 1
}
private def createConverterToAvro(
dataType: DataType,
structName: String,
recordNamespace: String): (Any) => Any = {
dataType match {
case BinaryType => (item: Any) => item match {
case null => null
case bytes: Array[Byte] => ByteBuffer.wrap(bytes)
}
case ByteType | IntegerType | LongType |
FloatType | DoubleType | StringType | BooleanType => identity
case _: DecimalType => (item: Any) =>
toAvroDecimal(item.asInstanceOf[BigDecimal])
case TimestampType => (item: Any) =>
if (item == null) null else item.asInstanceOf[Timestamp].getTime
case DateType => (item: Any) => if (item == null) null else
item.asInstanceOf[Date].getTime()
case ShortType => (item: Any) => if (item == null) null else
item.asInstanceOf[Short]
case ArrayType(elementType, _) =>
val elementConverter = createConverterToAvro(elementType, structName, recordNamespace)
(item: Any) => {
if (item == null) {
null
} else {
val sourceArray = item.asInstanceOf[Seq[Any]]
val sourceArraySize = sourceArray.size
val targetArray = new Array[Any](sourceArraySize)
var idx = 0
while (idx < sourceArraySize) {
targetArray(idx) = elementConverter(sourceArray(idx))
idx += 1
}
targetArray
}
}
case MapType(StringType, valueType, _) =>
val valueConverter = createConverterToAvro(valueType, structName, recordNamespace)
(item: Any) => {
if (item == null) {
null
} else {
val javaMap = new HashMap[String, Any]()
item.asInstanceOf[Map[String, Any]].foreach { case (key, value) =>
javaMap.put(key, valueConverter(value))
}
javaMap
}
}
case structType: StructType =>
val builder = SchemaBuilder.record(structName).namespace(recordNamespace)
val schema: Schema = CustomSparkToAvro.convertStructToAvro(
structType, builder, recordNamespace)
val fieldConverters = structType.fields.map(field =>
createConverterToAvro(field.dataType, field.name, recordNamespace))
(item: Any) => {
if (item == null) {
null
} else {
val record = new Record(schema)
val convertersIterator = fieldConverters.iterator
val fieldNamesIterator = dataType.asInstanceOf[StructType].fieldNames.iterator
val rowIterator = item.asInstanceOf[Row].toSeq.iterator
while (convertersIterator.hasNext) {
val converter = convertersIterator.next()
record.put(fieldNamesIterator.next(), converter(rowIterator.next()))
}
record }
}
}
}
}
class IncompatibleSchemaException(msg: String, ex: Throwable = null) extends Exception(msg, ex)
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/datasource/avro/AvroOutputGenerator.scala | Scala | apache-2.0 | 7,310 |
package io.shaka.http
import javax.xml.bind.DatatypeConverter.printBase64Binary
import io.shaka.http.ContentType.APPLICATION_FORM_URLENCODED
import io.shaka.http.FormParameters.{fromEntity, toEntity}
import io.shaka.http.Http._
import io.shaka.http.HttpHeader.{COOKIE, ACCEPT, AUTHORIZATION, CONTENT_TYPE}
object Request {
object GET {
def apply(url: Url) = Request(Method.GET, url)
def unapply(req: Request): Option[String] = if (req.method == Method.GET) Some(req.url) else None
}
object POST {
def apply(url: Url) = Request(Method.POST, url)
def unapply(req: Request): Option[String] = if (req.method == Method.POST) Some(req.url) else None
}
object PUT {
def apply(url: Url) = Request(Method.PUT, url)
def unapply(req: Request): Option[String] = if (req.method == Method.PUT) Some(req.url) else None
}
object HEAD {
def apply(url: Url) = Request(Method.HEAD, url)
def unapply(req: Request): Option[String] = if (req.method == Method.HEAD) Some(req.url) else None
}
object DELETE {
def apply(url: Url) = Request(Method.DELETE, url)
def unapply(req: Request): Option[String] = if (req.method == Method.DELETE) Some(req.url) else None
}
}
case class Request(method: Method, url: Url, headers: Headers = Headers.Empty, entity: Option[Entity] = None) {
def formParameters(parameters: FormParameter*): Request = {
val existingFormParameters = entity.fold(List[FormParameter]())(fromEntity)
copy(
entity = Some(toEntity(existingFormParameters ++ parameters)),
headers = (CONTENT_TYPE -> APPLICATION_FORM_URLENCODED.value) :: headers
)
}
def header(header: HttpHeader, value: String): Request = copy(headers = (header, value) :: headers)
def contentType(value: String) = header(CONTENT_TYPE, value)
def contentType(value: ContentType) = header(CONTENT_TYPE, value.value)
def accept(value: ContentType) = header(ACCEPT, value.value)
def entity(content: String) = copy(entity = Some(Entity(content)))
def entity(content: Array[Byte]) = copy(entity = Some(Entity(content)))
def entityAsString: String = entity match {
case Some(value) => value.toString
case _ => throw new RuntimeException("There is no entity in this request! Consider using request.entity:Option[Entity] instead.")
}
def basicAuth(user: String, password: String): Request = {
header(AUTHORIZATION, "Basic " + printBase64Binary(s"$user:$password".getBytes))
}
def cookies: Set[Cookie] = headers.filter(_._1 == COOKIE).flatMap(_._2.split(";"))
.map(_.trim.split("="))
.map(cookie => Cookie(cookie.head, cookie.last))
.toSet
}
| jcaraballo/naive-http | src/main/scala/io/shaka/http/Request.scala | Scala | apache-2.0 | 2,637 |
package io.eels.component.parquet
import com.sksamuel.exts.Logging
import io.eels.{Predicate, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.hadoop.{ParquetInputFormat, ParquetReader}
import org.apache.parquet.hadoop.api.ReadSupport
import org.apache.parquet.schema.Type
/**
* Helper function to create a parquet reader, using the apache parquet library.
* The reader supports optional predicate (for row filtering) and a
* projection schema (for column filtering).
*/
object ParquetReaderFn extends Logging {
private val config = ParquetReaderConfig()
/**
* Creates a new reader for the given path.
*
* @param predicate if set then a parquet predicate is applied to the rows
* @param projectionSchema if set then the schema is used to narrow the fields returned
*/
def apply(path: Path,
predicate: Option[Predicate],
projectionSchema: Option[Type]): ParquetReader[Row] = {
logger.debug(s"Opening parquet reader for $path")
// The parquet reader can use a projection by setting a projected schema onto a conf object
def configuration(): Configuration = {
val conf = new Configuration()
projectionSchema.foreach { it =>
conf.set(ReadSupport.PARQUET_READ_SCHEMA, it.toString)
}
conf.set(ParquetInputFormat.DICTIONARY_FILTERING_ENABLED, "true")
conf.set(org.apache.parquet.hadoop.ParquetFileReader.PARQUET_READ_PARALLELISM, config.parallelism.toString)
conf
}
// a filter is set when we have a predicate for the read
def filter(): FilterCompat.Filter = predicate.map(ParquetPredicateBuilder.build)
.map(FilterCompat.get)
.getOrElse(FilterCompat.NOOP)
ParquetReader.builder(new RowReadSupport, path)
.withConf(configuration())
.withFilter(filter())
.build()
}
} | stheppi/eel | eel-components/src/main/scala/io/eels/component/parquet/ParquetReaderFn.scala | Scala | apache-2.0 | 1,948 |
package mimir.util
import play.api.libs.json._
object HTTPUtils {
@throws(classOf[java.io.IOException])
@throws(classOf[java.net.SocketTimeoutException])
def get(url: String,
connectTimeout: Int = 8000,
readTimeout: Int = 8000,
requestMethod: String = "GET") =
{
import java.net.{URL, HttpURLConnection}
val connection = (new URL(url)).openConnection.asInstanceOf[HttpURLConnection]
connection.setConnectTimeout(connectTimeout)
connection.setReadTimeout(readTimeout)
connection.setRequestMethod(requestMethod)
val inputStream = connection.getInputStream
val content = scala.io.Source.fromInputStream(inputStream).mkString
if (inputStream != null) inputStream.close
content
}
def getJson(url:String, path: Option[String] = None,
connectTimeout: Int = 8000,
readTimeout: Int = 8000,
requestMethod: String = "GET"): JsValue = {
path match {
case None => play.api.libs.json.Json.parse(get(url, connectTimeout, readTimeout, requestMethod))
case Some(path) => JsonUtils.seekPath(play.api.libs.json.Json.parse(get(url, connectTimeout, readTimeout, requestMethod)), path)
}
}
} | UBOdin/mimir | src/main/scala/mimir/util/HTTPUtils.scala | Scala | apache-2.0 | 1,222 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.