code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.rasterfoundry.datamodel
import io.circe.generic.JsonCodec
import io.circe.generic.semiauto._
import io.circe.{Decoder, Encoder}
import scala.util.Try
import java.util.UUID
@JsonCodec
final case class Band(id: UUID,
image: UUID,
name: String,
number: Int,
wavelength: List[Int])
object Band {
def create = Create.apply _
def tupled = (Band.apply _).tupled
final case class Create(name: String, number: Int, wavelength: List[Int]) {
def toBand(imageId: UUID): Band = {
Band(UUID.randomUUID, imageId, name, number, wavelength)
}
}
object Create {
implicit val encodeCreate: Encoder[Create] = deriveEncoder[Create]
// The second decoder exists because we've done some truly bizarre json things
// in the db, at least from the perspective of whether the things in the db should
// ever be translatable back into bands.
// Note that because of Sentinel-2's 8A band, band numbers for all bands above that number
// will be wrong for Sentinel-2 scenes.
implicit val decodeCreate
: Decoder[Create] = deriveDecoder[Create] or Decoder.forProduct3(
"name",
"number",
"wavelength")((name: String, number: String, wavelength: Float) => {
Create(name,
(Try { number.toInt } toOption) getOrElse { 0 },
List(wavelength.toInt))
})
}
@JsonCodec
final case class Identified(id: Option[UUID],
imageId: UUID,
name: String,
number: Int,
wavelength: List[Int]) {
def toBand: Band = {
Band(id.getOrElse(UUID.randomUUID), imageId, name, number, wavelength)
}
}
object Identified
}
| raster-foundry/raster-foundry | app-backend/datamodel/src/main/scala/Band.scala | Scala | apache-2.0 | 1,822 |
package uk.co.grahamcox.dozy.servlet
import javax.servlet.http.HttpServletRequest
import scala.collection.JavaConverters._
/**
* Implementation of the Request that works in terms of an HttpServletRequest
*/
class HttpRequest(req: HttpServletRequest) extends Request {
/** The parameters from the request */
private val params: Map[String, String] = (req.getParameterMap.asScala.toMap.asInstanceOf[Map[String, Array[String]]].filter {
case (key, value) => value.length > 0
}).map {
case (key, value) => (key -> value(0))
}
/** The headers from the request */
private val headers: Map[String, String] = Map(req.getHeaderNames.asScala.toList.asInstanceOf[List[String]].map {
name: String => (name -> req.getHeader(name))
}: _*)
/**
* Get the URL that was requested. This is the full URL, including all components
* @return the URL that was requested
*/
def getFullURL: String = req.getRequestURL.toString
/**
* Get the URL that was requested. This is the URL only as far as the inside of the servlet
* @return the URL that was requested
*/
def getURL: String = req.getRequestURI.stripPrefix(req.getServletPath)
/**
* Get the HTTP Method that was requested
* @return the HTTP Method
*/
def getMethod: String = req.getMethod
/**
* Get the value of a single header
* @param name The name of the header to get
* @return the header value
*/
def getHeader(name: String): Option[String] = headers.get(name)
/**
* Get all of the headers
* @return the headers
*/
def getHeaders: Map[String, String] = headers
/**
* Get a single parameter
* @param name The name of the parameter to get
* @return the parameter value
*/
def getParam(name: String): Option[String] = params.get(name)
/**
* Get all of the parameters
* @return the parameters
*/
def getParams: Map[String, String] = params
}
| sazzer/dozy | core/src/main/scala/uk/co/grahamcox/dozy/servlet/HttpRequest.scala | Scala | gpl-3.0 | 1,903 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.events
import java.util.UUID
/**
* @author rxue
* @since 1.0.5
*/
trait EventSourceGroupManager {
val uuid: UUID = UUID.randomUUID()
/**
* @return The display name of this manager
*/
def name: String
/**
* @return if this manager can be closed by user
*/
def closeable: Boolean
def eventSources: Seq[EventSource]
def getEventSources(uuid: String): EventSource
def getFreeScrollEventSource(uuid: String): FreeScrollEventSource
def containsEventSources(uuid: String): Boolean
}
| groupon/sparklint | src/main/scala/com/groupon/sparklint/events/EventSourceGroupManager.scala | Scala | apache-2.0 | 1,153 |
package provingground
/**
* Much of the richness of HoTT is in the definitions of ``Inductive types`` (and their indexed counterparts)
* and of (dependent) functions on these by recursion and induction
* These are implemented using several layers of recursive definitions and diagonals (i.e., fixed points). In HoTT,
* recursion and induction are applications of (dependent) functions `rec_W,X` and `ind_W, Xs` to the ``definition data``.
*
* It is useful to capture information regarding inductive types and the recursion and induction functions in scala types. Our implementation
* is designed to do this.
*
* ==Inductive Type Definitions==
*
* Inductive types are specified by introduction rules. Each introduction rule is specified in [[ConstructorShape]] (without specifying the type)
* and [[ConstructorTL]] including the specific type. The full definition is in [[ConstructorSeqTL]].
*
* ==Recursion and Induction functions==
*
* These are defined recursively, first for each introduction rule and then for the inductive type as a whole. A subtlety is that the
* scala type of the `rec_W,X` and `induc_W, Xs` functions depends on the scala type of the codomain `X` (or family `Xs`).
* To make these types visible, some type level calculations using implicits are done, giving traits [[ConstructorPatternMap]]
* and [[ConstructorSeqMap]] that have recursive definition of the recursion and induction functions, the former for the case of a
* single introduction rule. Traits [[ConstructorSeqMapper]] and [[ConstructorPatternMapper]] provide the lifts.
*
* ==Indexed Versions==
*
* There are indexed versions of all these definitions, to work with indexed inductive type families.
*/
package object induction {
import provingground._
object implicits
extends InductionImplicits
with TermListImplicits
with SubstImplicits
}
| siddhartha-gadgil/ProvingGround | core/src/main/scala/provingground/induction/package.scala | Scala | mit | 1,905 |
package com.karasiq.bootstrap.panel
import com.karasiq.bootstrap.components.BootstrapComponents
import com.karasiq.bootstrap.context.RenderingContext
import com.karasiq.bootstrap.icons.Icons
import com.karasiq.bootstrap.utils.Utils
trait UniversalPanels { self: RenderingContext with BootstrapComponents with Utils with Icons with Panels with PanelStyles ⇒
import scalaTags.all._
import BootstrapAttrs._
type Panel = PanelBuilder
object Panel extends PanelFactory {
def collapse(panelId: String, modifiers: Modifier*): Tag = {
span(cursor.pointer, `data-toggle` := "collapse", `data-target` := s"#$panelId-panel-body", modifiers)
}
def title(icon: IconModifier, title: Modifier, modifiers: Modifier*): Tag = {
h3(`class` := "panel-title")(
icon,
Bootstrap.nbsp,
title,
modifiers
)
}
def button(icon: IconModifier, modifiers: Modifier*): Tag = {
a(href := "javascript:void(0);", icon, modifiers)
}
def buttons(buttons: Modifier*): Tag = {
div(`class` := "pull-right panel-head-buttons", buttons)
}
/**
* Shortcut to PanelBuilder()
*/
def apply(panelId: String = Bootstrap.newId, style: PanelStyle = PanelStyle.default,
header: Option[Modifier] = None, footer: Option[Modifier] = None): PanelBuilder = {
PanelBuilder(panelId, style, header, footer)
}
}
case class PanelBuilder(panelId: String, style: PanelStyle = PanelStyle.default,
header: Option[Modifier] = None, footer: Option[Modifier] = None)
extends AbstractPanel with BootstrapHtmlComponent {
def withId(newId: String): PanelBuilder = {
copy(panelId = newId)
}
def withStyle(style: PanelStyle): PanelBuilder = {
copy(style = style)
}
def withHeader(modifiers: Modifier*): PanelBuilder = {
copy(header = Some(modifiers))
}
def withFooter(modifiers: Modifier*): PanelBuilder = {
copy(footer = Some(modifiers))
}
def renderTag(content: ModifierT*): TagT = {
div("panel".addClass, style, id := panelId)(
for (h ← header) yield div(`class` := "panel-heading", id := s"$panelId-panel-header", h),
div(`class` := "panel-body collapse in", id := s"$panelId-panel-body", content),
for (f ← footer) yield div(`class` := "panel-footer", id := s"$panelId-panel-footer", f)
)
}
}
}
| Karasiq/scalajs-bootstrap | library/shared/src/main/scala/com/karasiq/bootstrap/panel/UniversalPanels.scala | Scala | mit | 2,428 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import Keys._
object Dependencies {
object PlayJson {
val version = "2.6.7"
val playJson = "com.typesafe.play" %% "play-json" % version
}
object PlayJsonJoda {
val version = "2.6.7"
val playJsonJoda = "com.typesafe.play" %% "play-json-joda" % version
}
object JodaTime {
val version = "2.9.9"
val jodaTime = "joda-time" % "joda-time" % version
}
object JodaConvert {
val version = "1.9.2"
val jodaConvert = "org.joda" % "joda-convert" % version
}
object Scalatest {
val version = "3.0.4"
val scalatest = "org.scalatest" %% "scalatest" % version % "test"
}
object JUnitInterface {
val version = "0.11"
val junitInterface = "com.novocode" % "junit-interface" % version % "test"
}
}
| coursera/courscala | project/Dependencies.scala | Scala | apache-2.0 | 1,369 |
package com.qunar.spark.tungsten.api
import com.qunar.spark.tungsten.base.CommonEncoders._
import com.qunar.spark.tungsten.base.CoreJoinOperators
import org.apache.spark.sql.Dataset
import scala.reflect.runtime.universe.TypeTag
/**
* 针对[[org.apache.spark.sql.Dataset]]拓展的api
* <p/>
* NOTICE: 为保护核心功能,此类只能由[[com.qunar.spark.tungsten]]包内的类构造
*/
class DataSet[T: TypeTag] private[tungsten](private val innerDataset: Dataset[T]) extends Serializable {
/**
* 仅对[[com.qunar.spark.tungsten]]内部的类提供支持,保护核心功能
*/
private[tungsten] def getInnerDataset: Dataset[T] = innerDataset
/* 函数式算子 针对scala api */
def filter(func: T => Boolean): DataSet[T] = {
val newDataset = innerDataset.filter(func)
new DataSet[T](newDataset)
}
def map[U: TypeTag](func: T => U): DataSet[U] = {
val newDataset = innerDataset.map(func)
new DataSet[U](newDataset)
}
def mapPartitions[U: TypeTag](func: Iterator[T] => Iterator[U]): DataSet[U] = {
val newDataset = innerDataset.mapPartitions(func)
new DataSet[U](newDataset)
}
def flatMap[U: TypeTag](func: T => TraversableOnce[U]): DataSet[U] = {
val newDataset = innerDataset.flatMap(func)
new DataSet[U](newDataset)
}
/* join相关的连接算子 */
/**
* ''左外连接算子''
* </p>
* 利用[[CoreJoinOperators.strongTypeJoin]]方法构造左外连接算子
* 这里将不会提供右外连接,因为其本质上可以用左外连接实现
*
* @param genJoinKey 数据集记录生成key的函数
*/
def leftOuterJoin[K: TypeTag](anotherDataSet: DataSet[T], genJoinKey: T => K): DataSet[(T, T)] = {
val dataset = CoreJoinOperators.strongTypeJoin(innerDataset, anotherDataSet.innerDataset, genJoinKey, "left_outer")
new DataSet[(T, T)](dataset)
}
/**
* 同上,''内连接算子''
*/
def innerJoin[K: TypeTag](anotherDataSet: DataSet[T], genJoinKey: T => K): DataSet[(T, T)] = {
val dataset = CoreJoinOperators.strongTypeJoin(innerDataset, anotherDataSet.innerDataset, genJoinKey, "inner")
new DataSet[(T, T)](dataset)
}
/**
* 同上,''全外连接算子''
*/
def fullOuterJoin[K: TypeTag](anotherDataSet: DataSet[T], genJoinKey: T => K): DataSet[(T, T)] = {
val dataset = CoreJoinOperators.strongTypeJoin(innerDataset, anotherDataSet.innerDataset, genJoinKey, "outer")
new DataSet[(T, T)](dataset)
}
/**
* ''cogroup算子''
* </p>
* 对[[CoreJoinOperators.cogroup]]方法的简单封装
*
* @param genJoinKey 数据集记录生成key的函数
*/
def cogroup[K: TypeTag](anotherDataSet: DataSet[T], genJoinKey: T => K): DataSet[(Seq[T], Seq[T])] = {
val dataset = CoreJoinOperators.cogroup(innerDataset, anotherDataSet.innerDataset, genJoinKey)
new DataSet[(Seq[T], Seq[T])](dataset)
}
}
| spark-bypass-common/common-tungsten | src/main/scala/com/qunar/spark/tungsten/api/DataSet.scala | Scala | apache-2.0 | 2,906 |
/* * Copyright 2011 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package code.snippet
import net.liftweb.common.{Box, Empty, Full}
import net.liftweb.http.rest.RestHelper
import net.liftweb.json.JsonAST.{JString, JObject, JField}
import net.liftweb.mapper.By
import code.model.LogFileInfo
object LogFileRest extends RestHelper {
serve {
case XmlGet("logfile" :: name :: _, _) => renderXML(name)
case JsonGet("logfile" :: name :: _, _) =>
JObject(List(JField("a", JString("hello, restful world, " + name))))
}
private def renderXML(name: String) = {
lookupLogFile(name) match {
case Full(logFileInfo) => <div>
Name: { logFileInfo.name } <br/>
Filename: { logFileInfo.filename } <br/>
</div>
case _ => <div>
Sorry, couldn't find a log file named { name }.
</div>
}
}
private def lookupLogFile(name: String): Box[LogFileInfo] = {
LogFileInfo.find(By(LogFileInfo.name, name))
}
}
| haroldl/kumiho | src/main/scala/code/snippet/LogFileRest.scala | Scala | apache-2.0 | 1,508 |
package namedparams
class C[type Elem, type Value](val elem: Elem) {
def toVal: Elem = ???
}
class D[type Elem, V](elem: Elem) extends C[Elem, V](elem)
object Test {
val c = new C[String, String]("A") {
override def toVal = elem
}
val x: c.Elem = c.elem
val c2: C { type Elem = String } = c
val c3 = new C[Elem = String, Value = Int]("B")
val c4 = new C[Elem = String]("C")
val x2: c2.Elem = c2.elem
def d1[E, V](x: E) = new D[E, V](x)
def d2[E, V](x: E) = new C[Elem = E, Value = V](x)
val y1 = d1[Int, String](1)
val y2 = d1[E = Int](2)
val y3 = d1[V = String](3)
val z1 = d2[E = Int, V = String](1)
val z2 = d2[V = String, E = Int](1)
val z3 = d2[E = Int](1)
val z4 = d2[V = Int]("AAA")
val z5 = d2[E = Int][V = String](1)
}
// Adapated from i94-nada
trait Test1 {
trait Monad[type Elem] {
def unit: Elem
}
sealed abstract class Either[A,B]
case class Left[A,B](unit: A) extends Either[A,B] with Monad[A]
case class Right[A,B](unit: B) extends Either[A,B] with Monad[B]
def flatMap[X,Y,M <: Monad](m: M[Elem = X], f: X => M[Elem = Y]): M[Elem = Y] = f(m.unit)
println(flatMap(Left(1), {x: Int => Left(x)}))
}
| densh/dotty | tests/pos/named-params.scala | Scala | bsd-3-clause | 1,180 |
package org.knora.webapi.responders
import java.util.UUID
import org.knora.webapi.{ApplicationLockException, IRI}
import org.scalatest.{Matchers, WordSpec}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
/**
* Tests [[IriLocker]].
*/
class IriLockerSpec extends WordSpec with Matchers {
import scala.concurrent.ExecutionContext.Implicits.global
val SUCCESS = "success"
val FAILURE = "failure"
"IriLocker" should {
"not allow a request to acquire a lock when another request already has it" in {
def runLongTask(): Future[String] = Future {
Thread.sleep(4500)
SUCCESS
}
def runShortTask(): Future[String] = Future(SUCCESS)
val testIri: IRI = "http://example.org/test1"
val firstApiRequestID = UUID.randomUUID
IriLocker.runWithIriLock(
apiRequestID = firstApiRequestID,
iri = testIri,
task = () => runLongTask()
)
// Wait a bit to allow the first request to get the lock.
Thread.sleep(200)
val secondApiRequestID = UUID.randomUUID
val secondTaskResultFuture = IriLocker.runWithIriLock(
apiRequestID = secondApiRequestID,
iri = testIri,
task = () => runShortTask()
)
val secondTaskFailedWithLockTimeout = try {
Await.result(secondTaskResultFuture, 5.seconds)
false
} catch {
case ale: ApplicationLockException => true
}
assert(secondTaskFailedWithLockTimeout, "Second task did not get a lock timeout")
}
"provide reentrant locks" in {
def runRecursiveTask(iri: IRI, apiRequestID: UUID, count: Int): Future[String] = {
if (count > 0) {
IriLocker.runWithIriLock(
apiRequestID = apiRequestID,
iri = iri,
task = () => runRecursiveTask(iri, apiRequestID, count - 1)
)
} else {
Future(SUCCESS)
}
}
val testIri: IRI = "http://example.org/test2"
val firstApiRequestID = UUID.randomUUID
val firstTestResult = Await.result(runRecursiveTask(testIri, firstApiRequestID, 3), 1.second)
assert(firstTestResult == SUCCESS)
val secondApiRequestID = UUID.randomUUID
val secondTestResult = Await.result(runRecursiveTask(testIri, secondApiRequestID, 3), 1.second)
assert(secondTestResult == SUCCESS)
}
"release a lock when a task returns a failed future" in {
// If succeed is true, returns a successful future, otherwise returns a failed future.
def runTask(succeed: Boolean): Future[String] = Future {
if (succeed) {
SUCCESS
} else {
throw new Exception(FAILURE)
}
}
val testIri: IRI = "http://example.org/test3"
val firstApiRequestID = UUID.randomUUID
val firstTaskResultFuture = IriLocker.runWithIriLock(
apiRequestID = firstApiRequestID,
iri = testIri,
task = () => runTask(false)
)
val firstTaskFailed = try {
Await.result(firstTaskResultFuture, 1.second)
false
} catch {
case e: Exception => true
}
assert(firstTaskFailed, "First task did not fail")
val secondApiRequestID = UUID.randomUUID
val secondTaskResultFuture = IriLocker.runWithIriLock(
apiRequestID = secondApiRequestID,
iri = testIri,
task = () => runTask(true)
)
val secondTaskResult = Await.result(secondTaskResultFuture, 1.second)
assert(secondTaskResult == SUCCESS, "Second task did not succeed")
}
"release a lock when a task throws an exception instead of returning a future" in {
// If succeed is true, returns a successful future, otherwise throws an exception.
def runTask(succeed: Boolean): Future[String] = {
if (succeed) {
Future(SUCCESS)
} else {
throw new Exception(FAILURE)
}
}
val testIri: IRI = "http://example.org/test4"
val firstApiRequestID = UUID.randomUUID
val firstTaskResultFuture = IriLocker.runWithIriLock(
apiRequestID = firstApiRequestID,
iri = testIri,
task = () => runTask(false)
)
val firstTaskFailed = try {
Await.result(firstTaskResultFuture, 1.second)
false
} catch {
case e: Exception => true
}
assert(firstTaskFailed, "First task did not fail")
val secondApiRequestID = UUID.randomUUID
val secondTaskResultFuture = IriLocker.runWithIriLock(
apiRequestID = secondApiRequestID,
iri = testIri,
task = () => runTask(true)
)
val secondTaskResult = Await.result(secondTaskResultFuture, 1.second)
assert(secondTaskResult == SUCCESS, "Second task did not succeed")
}
}
}
| musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/responders/IriLockerSpec.scala | Scala | agpl-3.0 | 5,602 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes
import java.lang.String
import org.neo4j.cypher.internal.symbols.SymbolTable
class ParameterPipe() extends Pipe {
def createResults(state: QueryState) = Seq(ExecutionContext(params = state.params))
val symbols = new SymbolTable()
override def executionPlan(): String = "Parameters()"
} | dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/ParameterPipe.scala | Scala | gpl-3.0 | 1,132 |
package com.twitter.finagle.ssl.server
import com.twitter.finagle.ssl.{CipherSuites, ClientAuth, Engine, Protocols}
import javax.net.ssl.{SSLContext, SSLEngine}
import org.scalatest.FunSuite
class SslServerEngineFactoryTest extends FunSuite {
private[this] def createTestSslContext(): SSLContext = {
val sslContext = SSLContext.getInstance("TLSv1.2")
sslContext.init(null, null, null)
sslContext
}
private[this] def createTestSslEngine(): SSLEngine = {
val sslContext = createTestSslContext()
sslContext.createSSLEngine()
}
private[this] def createTestEngine(): Engine =
new Engine(createTestSslEngine())
test("configureClientAuth Unspecified doesn't change anything") {
val sslEngine = createTestSslEngine()
sslEngine.setWantClientAuth(true)
SslServerEngineFactory.configureClientAuth(sslEngine, ClientAuth.Unspecified)
assert(sslEngine.getWantClientAuth())
assert(!sslEngine.getNeedClientAuth())
}
test("configureClientAuth Off turns off client authentication") {
val sslEngine = createTestSslEngine()
sslEngine.setWantClientAuth(true)
SslServerEngineFactory.configureClientAuth(sslEngine, ClientAuth.Off)
assert(!sslEngine.getWantClientAuth())
assert(!sslEngine.getNeedClientAuth())
}
test("configureClientAuth Wanted turns on desired client authentication") {
val sslEngine = createTestSslEngine()
SslServerEngineFactory.configureClientAuth(sslEngine, ClientAuth.Wanted)
assert(sslEngine.getWantClientAuth())
assert(!sslEngine.getNeedClientAuth())
}
test("configureClientAuth Needed turns on required client authentication") {
val sslEngine = createTestSslEngine()
SslServerEngineFactory.configureClientAuth(sslEngine, ClientAuth.Needed)
assert(!sslEngine.getWantClientAuth())
assert(sslEngine.getNeedClientAuth())
}
test("configureEngine sets server mode, protocols, and cipher suites") {
val engine = createTestEngine()
val protocols = Protocols.Enabled(Seq("TLSv1.2"))
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"))
val config = SslServerConfiguration(protocols = protocols, cipherSuites = cipherSuites)
SslServerEngineFactory.configureEngine(engine, config)
val sslEngine = engine.self
// is a server engine
assert(!sslEngine.getUseClientMode())
// has the right protocols
val enabledProtocols = sslEngine.getEnabledProtocols()
assert(enabledProtocols.length == 1)
assert(enabledProtocols(0) == "TLSv1.2")
// has the right cipher suites
val enabledCipherSuites = sslEngine.getEnabledCipherSuites()
assert(enabledCipherSuites.length == 1)
assert(enabledCipherSuites(0) == "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384")
}
test("createEngine can create an engine from an SSLContext") {
val sslContext = createTestSslContext()
val engine = SslServerEngineFactory.createEngine(sslContext)
assert(engine != null)
assert(engine.self != null)
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/ssl/server/SslServerEngineFactoryTest.scala | Scala | apache-2.0 | 3,014 |
/*
* Copyright 2016 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.bruneli.scalaopt.core.linalg
import com.github.bruneli.scalaopt.core.linalg.FromToDoubleConversions.ToDouble
import org.apache.commons.math3.linear.RealMatrix
/**
* Define operations on real-valued vector
*
* @author bruneli
*/
trait DenseVectorLike[+A <: ToDouble] {
/** Vector type */
type V <: DenseVectorLike[A]
def length: Int
def apply(i: Int): A
def coordinate(i: Int): Double
def force: DenseVector[A]
/** Element wise addition */
def +[B >: A <: ToDouble](that: DenseVectorLike[B]): DenseVectorLike[B]
/** Element wise subtraction */
def -[B >: A <: ToDouble](that: DenseVectorLike[B]): DenseVectorLike[B]
/** Add a constant value */
def + (offset: Double): V
/** Subtract a constant value */
def - (offset: Double): V
/** Negative of a vector */
def unary_- : V
/** Multiplication by scalar */
def * (scalar: Double): V
/** Division by scalar */
def / (scalar: Double): V
/** Multiplication by scalar */
def *[B >: A <: ToDouble] (scalar: B): DenseVectorLike[B]
/** Division by scalar */
def /[B >: A <: ToDouble] (scalar: B): DenseVectorLike[B]
/** Inner product of two vectors */
def inner[B <: ToDouble](that: DenseVectorLike[B]): Double = {
var acc = 0.0
var idx = 0
while (idx < this.length) {
acc += this.coordinate(idx) * that.coordinate(idx)
idx += 1
}
acc
}
/** Dot product of two vectors */
def dot[B <: ToDouble](that: DenseVectorLike[B]): Double = {
this.inner(that)
}
/** L2 norm */
def norm: Double = {
math.sqrt(norm2)
}
/** L2 norm squared */
def norm2: Double = {
var acc = 0.0
var idx = 0
while (idx < this.length) {
acc += this.coordinate(idx) * this.coordinate(idx)
idx += 1
}
acc
}
/** Conversion to a column matrix */
def toMatrix: RealMatrix
/** Transpose, conversion to a row matrix */
def t: RealMatrix = {
this.toMatrix.transpose()
}
/** Outer product of two vectors */
def outer[B <: ToDouble](that: DenseVectorLike[B]): RealMatrix = {
this.toMatrix.multiply(that.t)
}
/** Directly update the underlying raw value of a vector */
def updated(index: Int, elem: Double): V
/** Map a function directly on raw values of the vector */
def mapValues(f: Double => Double): V
/** Map a function acting on tuple (value, index) of vector elements */
def mapWithIndex(f: (Double, Int) => Double): V
/**
* Zip two vectors and map their pair of values into a new vector
*
* The size of the vector returned is the minimum size between this and that vectors
*
* @param that a vector of variable identical to the source vector
* @param f a real-valued function acting on pairs of (this, that) vector elements
* @return a vector of variable
*/
def zipAndMap[B >: A <: ToDouble](that: DenseVectorLike[B],
f: (Double, Double) => Double): DenseVectorLike[B]
/** Build a new vector with values corresponding to indices i and j swapped */
def swap(i: Int, j: Int): V
def withValues(coordinates: Array[Double]): V = {
newDenseVectorBuilder.withValues(coordinates)
}
def newDenseVectorBuilder: DenseVectorBuilder[V]
}
| bruneli/scalaopt | core/src/main/scala/com/github/bruneli/scalaopt/core/linalg/DenseVectorLike.scala | Scala | apache-2.0 | 3,851 |
package org.scalatra
package fileupload
import org.apache.commons.io.IOUtils
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import test.scalatest.ScalatraFunSuite
import org.apache.commons.fileupload.FileUploadBase
class FileUploadSupportTestServlet extends ScalatraServlet with FileUploadSupport {
post("""/multipart.*""".r) {
multiParams.get("string") foreach { ps: Seq[String] => response.setHeader("string", ps.mkString(";")) }
fileParams.get("file") foreach { fi => response.setHeader("file", new String(fi.get).trim) }
fileParams.get("file-none") foreach { fi => response.setHeader("file-none", new String(fi.get).trim) }
fileParams.get("file-two[]") foreach { fi => response.setHeader("file-two", new String(fi.get).trim) }
fileMultiParams.get("file-two[]") foreach { fis =>
response.setHeader("file-two-with-brackets", fis.foldLeft(""){ (acc, fi) => acc + new String(fi.get).trim })
}
fileMultiParams.get("file-two") foreach { fis =>
response.setHeader("file-two-without-brackets", fis.foldLeft(""){ (acc, fi) => acc + new String(fi.get).trim })
}
params.get("file") foreach { response.setHeader("file-as-param", _) }
params("utf8-string")
}
post("/multipart-pass") {
pass()
}
post("/multipart-param") {
params.get("queryParam") foreach { p =>
response.addHeader("Query-Param", p)
}
pass()
}
post("/echo") {
params.getOrElse("echo", "")
}
}
class MaxSizeTestServlet extends ScalatraServlet with FileUploadSupport {
post() {
}
error {
case e: FileUploadBase.SizeLimitExceededException => halt(413, "boom")
}
override def newServletFileUpload = {
val upload = super.newServletFileUpload
upload.setSizeMax(1)
upload
}
}
@RunWith(classOf[JUnitRunner])
class FileUploadSupportTest extends ScalatraFunSuite {
addServlet(classOf[FileUploadSupportTestServlet], "/*")
addServlet(classOf[MaxSizeTestServlet], "/max-size/*")
def multipartResponse(path: String = "/multipart") = {
val reqBody = new String(
IOUtils.toString(getClass.getResourceAsStream("multipart_request.txt")).getBytes, "iso-8859-1").getBytes("iso-8859-1")
val boundary = "---------------------------3924013385056820061124200860"
post(path, headers = Map("Content-Type" -> "multipart/form-data; boundary=%s".format(boundary)), body = reqBody) {
response
}
}
// test("keeps input parameters on multipart request") {
// multipartResponse().getHeader("string") should equal ("foo")
// }
//
// test("decodes input parameters according to request encoding") {
// multipartResponse().getContent() should equal ("föo")
// }
test("sets file params") {
val out = multipartResponse().getHeader("file").toCharArray.map(_.toByte).toList
println(s"the output of file params: $out")
multipartResponse().getHeader("file") should equal ("one")
}
test("sets file param with no bytes when no file is uploaded") {
multipartResponse().getHeader("file-none") should equal ("")
}
test("sets multiple file params") {
multipartResponse().getHeader("file-two-with-brackets") should equal ("twothree")
}
test("looks for params with [] suffix, Ruby style") {
multipartResponse().getHeader("file-two-without-brackets") should equal ("twothree")
}
test("fileParams returns first input for multiple file params") {
multipartResponse().getHeader("file-two") should equal ("two")
}
test("file params are not params") {
multipartResponse().getHeader("file-as-param") should equal (null)
}
test("keeps input params on pass") {
multipartResponse("/multipart-pass").getHeader("string") should equal ("foo")
}
test("keeps file params on pass") {
multipartResponse("/multipart-pass").getHeader("file") should equal ("one")
}
test("reads form params on non-multipart request") {
post("/echo", "echo" -> "foo") {
body should equal("foo")
}
}
test("keeps query parameters") {
multipartResponse("/multipart-param?queryParam=foo").getHeader("Query-Param") should equal ("foo")
}
test("query parameters don't shadow post parameters") {
multipartResponse("/multipart-param?string=bar").getHeader("string") should equal ("bar;foo")
}
test("max size is respected") {
multipartResponse("/max-size/").status should equal (413)
}
test("file upload exceptions are handled by standard error handler") {
multipartResponse("/max-size/").body should equal ("boom")
}
}
| etorreborre/scalatra | fileupload/src/test/scala/org/scalatra/fileupload/FileUploadSupportTest.scala | Scala | bsd-2-clause | 4,520 |
package text.kanji
/**
* @author K.Sakamoto
* Created on 2016/07/26
*/
object PrimarySchoolKanjiCharacter extends KanjiCharacter {
override val kanji: Seq[String] = {
PrimarySchool1stGradeKanjiCharacter.kanji ++
PrimarySchool2ndGradeKanjiCharacter.kanji ++
PrimarySchool3rdGradeKanjiCharacter.kanji ++
PrimarySchool4thGradeKanjiCharacter.kanji ++
PrimarySchool5thGradeKanjiCharacter.kanji ++
PrimarySchool6thGradeKanjiCharacter.kanji
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/text/kanji/PrimarySchoolKanjiCharacter.scala | Scala | apache-2.0 | 481 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.ksmpartners.ernie.engine
import com.ksmpartners.ernie.model._
import scala.collection.immutable
/** Request that the report defId be generated resulting in an output of type rptType */
case class ReportRequest(defId: String, rptType: ReportType, retentionPeriod: Option[Int], reportParameters: immutable.Map[String, String], userName: String)
/** The response to the given ReportRequest or ReportAndNotifyRequest */
case class ReportResponse(jobId: Long, jobStatus: JobStatus, req: ReportRequest)
/** Request the resulting file for the given jobId */
case class ResultRequest(jobId: Long)
/** The response to the given ResultRequest */
case class ResultResponse(rptId: Option[String], req: ResultRequest)
/** Request the status for the given jobId */
case class StatusRequest(jobId: Long)
/** The response to the given StatusRequest */
case class StatusResponse(jobStatus: JobStatus, req: StatusRequest)
/** Request job output deletion for the given jobId */
case class DeleteRequest(jobId: Long)
/** The response to the given DeleteRequest */
case class DeleteResponse(deleteStatus: DeleteStatus, req: DeleteRequest)
/** Request report definition deletion for the given jobId */
case class DeleteDefinitionRequest(defId: String)
/** The response to the given DeleteDefinitionRequest */
case class DeleteDefinitionResponse(deleteStatus: DeleteStatus, req: DeleteDefinitionRequest)
/** Request purge of all expired reports */
case class PurgeRequest()
/** The response to the given PurgeRequest */
case class PurgeResponse(deleteStatus: DeleteStatus, purgedRptIds: List[String], req: PurgeRequest)
/** Request a list of the currently known jobIds */
case class JobsListRequest()
/** The response to the given JobsListRequest */
case class JobsListResponse(jobsList: Array[String], req: JobsListRequest)
/** Request that the definition defId be generated into a rptType document */
case class JobRequest(defId: String, rptType: ReportType, jobId: Long, retentionPeriod: Option[Int], reportParameters: immutable.Map[String, String], userName: String)
/** The response(s) associated with the given JobRequest */
case class JobResponse(jobStatus: JobStatus, rptId: Option[String], req: JobRequest)
/** Request that the Actor be shut down */
case class ShutDownRequest()
/** The response that indicates that the Actor's facilities are shut down */
case class ShutDownResponse()
/**Request details for a given report */
case class ReportDetailRequest(jobId: Long)
/**The response associated with the given ReportDetailRequest*/
case class ReportDetailResponse(rptEntity: Option[ReportEntity], req: ReportDetailRequest)
/**Request details for a given job */
case class JobDetailRequest(jobId: Long)
/**The response associated with the given ReportDetailRequest*/
case class JobDetailResponse(jobEntity: Option[JobEntity], req: JobDetailRequest)
/**Request a catalog for some subset of jobs */
case class JobsCatalogRequest(jobCatalog: Option[JobCatalog])
/**The response associated with the given ReportDetailRequest*/
case class JobsCatalogResponse(catalog: List[JobEntity], req: JobsCatalogRequest)
/**Request a worker added to the Coordinator pool */
case class NewWorkerRequest()
/**Request notification on job status change or on specified job status **/
case class JobNotificationRequest(jobId: Long, status: Option[JobStatus])
/**The response associated with the given JobNotificationRequest **/
case class JobNotificationResponse(status: JobStatus, req: JobNotificationRequest) | ksmpartners/ernie | ernie-engine/src/main/scala/com/ksmpartners/ernie/engine/Messages.scala | Scala | apache-2.0 | 4,057 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic.builtin
import java.io.{File, OutputStream}
import java.net.{URI, URL}
import java.nio.file.{FileSystems, Files}
import org.apache.toree.interpreter.Interpreter
import org.apache.toree.magic.dependencies.{IncludeConfig, IncludeInterpreter, IncludeKernel, IncludeOutputStream}
import com.typesafe.config.ConfigFactory
import org.apache.spark.SparkContext
import org.apache.toree.kernel.api.KernelLike
import org.apache.toree.plugins.PluginManager
import org.scalatest.{FunSpec, Matchers}
import org.scalatestplus.mockito.MockitoSugar
import org.mockito.Mockito._
import org.mockito.Matchers._
class AddJarSpec extends FunSpec with Matchers with MockitoSugar {
describe("AddJar"){
describe("#execute") {
it("should call addJar on the provided kernel") {
val mockKernel = mock[KernelLike]
val mockOutputStream = mock[OutputStream]
val mockPluginManager = mock[PluginManager]
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeOutputStream
with IncludeConfig
with IncludeKernel
{
override val kernel: KernelLike = mockKernel
override val outputStream: OutputStream = mockOutputStream
override lazy val pluginManager: PluginManager = mockPluginManager
override val config = testConfig
}
addJarMagic.execute("""https://repo1.maven.org/maven2/org/scala-rules/rule-engine-core_2.11/0.5.1/rule-engine-core_2.11-0.5.1.jar""")
verify(mockKernel).addJars(any[URI])
verify(mockPluginManager, times(0)).loadPlugins(any())
}
it("should raise exception if jar file does not end in .jar or .zip") {
val mockOutputStream = mock[OutputStream]
val addJarMagic = new AddJar
with IncludeOutputStream
{
override val outputStream: OutputStream = mockOutputStream
}
intercept[IllegalArgumentException] {
addJarMagic.execute("""http://www.example.com/""")
}
intercept[IllegalArgumentException] {
addJarMagic.execute("""http://www.example.com/not_a_jar""")
}
}
it("should raise exception if jar file does not exist") {
val mockOutputStream = mock[OutputStream]
val addJarMagic = new AddJar
with IncludeOutputStream
{
override val outputStream: OutputStream = mockOutputStream
}
intercept[IllegalArgumentException] {
addJarMagic.execute("""http://ibm.com/this.jar.does.not.exist.jar""")
}
}
it("should extract jar file name from jar URL") {
val mockOutputStream = mock[OutputStream]
val addJarMagic = new AddJar
with IncludeOutputStream
{
override val outputStream: OutputStream = mockOutputStream
}
var url = """http://www.example.com/someJar.jar"""
var jarName = addJarMagic.getFileFromLocation(url)
assert(jarName == "someJar.jar")
url = """http://www.example.com/remotecontent?filepath=/path/to/someJar.jar"""
jarName = addJarMagic.getFileFromLocation(url)
// File names come from the path, not from the query fragment
assert(jarName == "remotecontent")
url = """http://www.example.com/"""
jarName = addJarMagic.getFileFromLocation(url)
assert(jarName == "")
}
it("should use a cached jar if the force option is not provided") {
val mockKernel = mock[KernelLike]
val mockOutputStream = mock[OutputStream]
var downloadFileCalled = false // Used to verify that downloadFile
// was or was not called in this test
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeOutputStream
with IncludeConfig
with IncludeKernel
{
override val kernel: KernelLike = mockKernel
override val outputStream: OutputStream = mockOutputStream
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL = {
downloadFileCalled = true
super.downloadFile(fileUrl, destinationUrl)
}
}
addJarMagic.execute("""https://repo1.maven.org/maven2/org/scala-rules/rule-engine-core_2.11/0.5.1/rule-engine-core_2.11-0.5.1.jar""")
downloadFileCalled should be (false)
verify(mockKernel).addJars(any[URI])
}
it("should not use a cached jar if the force option is provided") {
val mockKernel = mock[KernelLike]
val mockOutputStream = mock[OutputStream]
var downloadFileCalled = false // Used to verify that downloadFile
// was or was not called in this test
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeOutputStream
with IncludeConfig
with IncludeKernel
{
override val kernel: KernelLike = mockKernel
override val outputStream: OutputStream = mockOutputStream
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL = {
downloadFileCalled = true
super.downloadFile(fileUrl, destinationUrl)
}
}
addJarMagic.execute("""-f https://repo1.maven.org/maven2/org/scala-rules/rule-engine-core_2.11/0.5.1/rule-engine-core_2.11-0.5.1.jar""")
downloadFileCalled should be (true)
verify(mockKernel).addJars(any[URI])
}
it("should add magic jar to magicloader and not to interpreter and spark context") {
val mockSparkContext = mock[SparkContext]
val mockInterpreter = mock[Interpreter]
val mockOutputStream = mock[OutputStream]
val mockPluginManager = mock[PluginManager]
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeInterpreter
with IncludeOutputStream
with IncludeConfig
{
override val interpreter: Interpreter = mockInterpreter
override val outputStream: OutputStream = mockOutputStream
override lazy val pluginManager: PluginManager = mockPluginManager
override val config = testConfig
}
addJarMagic.execute(
"""--magic https://repo1.maven.org/maven2/org/scala-rules/rule-engine-core_2.11/0.5.1/rule-engine-core_2.11-0.5.1.jar""")
verify(mockPluginManager).loadPlugins(any())
verify(mockSparkContext, times(0)).addJar(anyString())
verify(mockInterpreter, times(0)).addJars(any[URL])
}
}
}
}
| lresende/incubator-toree | kernel/src/test/scala/org/apache/toree/magic/builtin/AddJarSpec.scala | Scala | apache-2.0 | 7,581 |
package net.lshift.diffa.agent.rest.exceptions
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import javax.ws.rs.ext.{Provider, ExceptionMapper}
import javax.ws.rs.core.Response
import net.lshift.diffa.kernel.frontend.InvalidInventoryException
/**
* This transform all InvalidInventoryExceptions that occur in the application and returns an HTTP 400 to the requester.
*/
@Provider
class InvalidInventoryExceptionMapper extends ExceptionMapper[InvalidInventoryException] {
def toResponse(x: InvalidInventoryException) = {
Response.status(Response.Status.BAD_REQUEST).entity("Inventory was invalid: " + x.getMessage).`type`("text/plain").build()
}
} | aprescott/diffa | agent/src/main/scala/net/lshift/diffa/agent/rest/exceptions/InvalidInventoryExceptionMapper.scala | Scala | apache-2.0 | 1,223 |
package com.avast.metrics.scalaapi.impl
import java.time.{Duration => JDuration}
import com.avast.metrics.api.Timer.TimeContext
import com.avast.metrics.api.{Timer => JTimer}
import com.avast.metrics.scalaapi.Timer
import scala.concurrent.duration.Duration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
private class TimerImpl(inner: JTimer) extends Timer {
override def start(): TimeContext = inner.start()
override def update(duration: JDuration): Unit = inner.update(duration)
override def update(duration: Duration): Unit = update(JDuration.ofNanos(duration.toNanos))
override def name: String = inner.getName
override def time[A](block: => A): A = {
val context = inner.start()
try {
block
} finally {
context.stop()
}
}
override def time[A](future: => Future[A])(implicit ec: ExecutionContext): Future[A] = {
val context = inner.start()
try {
future andThen { case _ =>
context.stop()
}
} catch {
case NonFatal(ex) =>
context.stop()
throw ex
}
}
}
| avast/metrics | scala-api/src/main/scala/com/avast/metrics/scalaapi/impl/TimerImpl.scala | Scala | mit | 1,106 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.compiler
import org.scalajs.jasminetest.JasmineTest
object DoubleTest extends JasmineTest {
describe("Double") {
it("toInt") {
@inline
def test(x: Double, expected: Int): Unit =
expect(x.toInt).toEqual(expected)
// Specials
test(+0.0, 0)
test(-0.0, 0)
test(Double.PositiveInfinity, Int.MaxValue)
test(Double.NegativeInfinity, Int.MinValue)
test(Double.NaN, 0)
// Positive numbers
test(0.3, 0)
test(0.7, 0)
test(1.2, 1)
test(5e12, Int.MaxValue)
test(2147483646, 2147483646)
test(2147483646.999, 2147483646)
test(2147483512.546, 2147483512)
test(65.67, 65)
// Negative numbers
test(-0.3, 0)
test(-0.7, 0)
test(-1.2, -1)
test(-5e12, Int.MinValue)
test(-2147483647.9999, -2147483647)
test(-2147483565.123, -2147483565)
test(-65.67, -65)
}
}
}
| renyaoxiang/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/compiler/DoubleTest.scala | Scala | bsd-3-clause | 1,473 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.admin.CreateTopicCommand
import kafka.api._
import kafka.message._
import kafka.network._
import scala.collection._
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic._
import kafka.metrics.KafkaMetricsGroup
import org.I0Itec.zkclient.ZkClient
import kafka.common._
import kafka.utils.{ZkUtils, Pool, SystemTime, Logging}
import kafka.network.RequestChannel.Response
import kafka.cluster.Broker
import kafka.controller.KafkaController
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val replicaManager: ReplicaManager,
val zkClient: ZkClient,
brokerId: Int,
val controller: KafkaController) extends Logging {
private val producerRequestPurgatory =
new ProducerRequestPurgatory(replicaManager.config.producerPurgatoryPurgeIntervalRequests)
private val fetchRequestPurgatory =
new FetchRequestPurgatory(requestChannel, replicaManager.config.fetchPurgatoryPurgeIntervalRequests)
private val delayedRequestMetrics = new DelayedRequestMetrics
/* following 3 data structures are updated by the update metadata request
* and is queried by the topic metadata request. */
var leaderCache: mutable.Map[TopicAndPartition, PartitionStateInfo] =
new mutable.HashMap[TopicAndPartition, PartitionStateInfo]()
// private var allBrokers: mutable.Map[Int, Broker] = new mutable.HashMap[Int, Broker]()
private var aliveBrokers: mutable.Map[Int, Broker] = new mutable.HashMap[Int, Broker]()
private val partitionMetadataLock = new Object
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
def handle(request: RequestChannel.Request) {
try{
trace("Handling request: " + request.requestObj + " from client: " + request.remoteAddress)
request.requestId match {
case RequestKeys.ProduceKey => handleProducerRequest(request)
case RequestKeys.FetchKey => handleFetchRequest(request)
case RequestKeys.OffsetsKey => handleOffsetRequest(request)
case RequestKeys.MetadataKey => handleTopicMetadataRequest(request)
case RequestKeys.LeaderAndIsrKey => handleLeaderAndIsrRequest(request)
case RequestKeys.StopReplicaKey => handleStopReplicaRequest(request)
case RequestKeys.UpdateMetadataKey => handleUpdateMetadataRequest(request)
case RequestKeys.ControlledShutdownKey => handleControlledShutdownRequest(request)
case requestId => throw new KafkaException("No mapping found for handler id " + requestId)
}
} catch {
case e: Throwable =>
request.requestObj.handleError(e, requestChannel, request)
error("error when handling request %s".format(request.requestObj), e)
} finally
request.apiLocalCompleteTimeMs = SystemTime.milliseconds
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request) {
val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest]
try {
val (response, error) = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest)
val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, response, error)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(leaderAndIsrResponse)))
} catch {
case e: KafkaStorageException =>
fatal("Disk error during leadership change.", e)
Runtime.getRuntime.halt(1)
}
}
def handleStopReplicaRequest(request: RequestChannel.Request) {
val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest]
val (response, error) = replicaManager.stopReplicas(stopReplicaRequest)
val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, response.toMap, error)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(stopReplicaResponse)))
replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads()
}
def handleUpdateMetadataRequest(request: RequestChannel.Request) {
val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest]
val stateChangeLogger = replicaManager.stateChangeLogger
if(updateMetadataRequest.controllerEpoch < replicaManager.controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(brokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
replicaManager.controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
}
partitionMetadataLock synchronized {
replicaManager.controllerEpoch = updateMetadataRequest.controllerEpoch
// cache the list of alive brokers in the cluster
updateMetadataRequest.aliveBrokers.foreach(b => aliveBrokers.put(b.id, b))
updateMetadataRequest.partitionStateInfos.foreach { partitionState =>
leaderCache.put(partitionState._1, partitionState._2)
if(stateChangeLogger.isTraceEnabled)
stateChangeLogger.trace(("Broker %d cached leader info %s for partition %s in response to UpdateMetadata request " +
"sent by controller %d epoch %d with correlation id %d").format(brokerId, partitionState._2, partitionState._1,
updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch, updateMetadataRequest.correlationId))
}
}
val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(updateMetadataResponse)))
}
def handleControlledShutdownRequest(request: RequestChannel.Request) {
val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest]
val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId)
val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId,
ErrorMapping.NoError, partitionsRemaining)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(controlledShutdownResponse)))
}
/**
* Check if a partitionData from a produce request can unblock any
* DelayedFetch requests.
*/
def maybeUnblockDelayedFetchRequests(topic: String, partition: Int, messageSizeInBytes: Int) {
val satisfied = fetchRequestPurgatory.update(RequestKey(topic, partition), messageSizeInBytes)
trace("Producer request to (%s-%d) unblocked %d fetch requests.".format(topic, partition, satisfied.size))
// send any newly unblocked responses
for(fetchReq <- satisfied) {
val topicData = readMessageSets(fetchReq.fetch)
val response = FetchResponse(fetchReq.fetch.correlationId, topicData)
requestChannel.sendResponse(new RequestChannel.Response(fetchReq.request, new FetchResponseSend(response)))
}
}
/**
* Handle a produce request
*/
def handleProducerRequest(request: RequestChannel.Request) {
val produceRequest = request.requestObj.asInstanceOf[ProducerRequest]
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(produceRequest)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val numPartitionsInError = localProduceResults.count(_.error.isDefined)
produceRequest.data.foreach(partitionAndData =>
maybeUnblockDelayedFetchRequests(partitionAndData._1.topic, partitionAndData._1.partition, partitionAndData._2.sizeInBytes))
val allPartitionHaveReplicationFactorOne =
!produceRequest.data.keySet.exists(
m => replicaManager.getReplicationFactorForPartition(m.topic, m.partition) != 1)
if(produceRequest.requiredAcks == 0) {
// send a fake producer response if producer request.required.acks = 0. This mimics the behavior of a 0.7 producer
// and is tuned for very high throughput
requestChannel.sendResponse(new RequestChannel.Response(request.processor, request, null))
} else if (produceRequest.requiredAcks == 1 ||
produceRequest.numPartitions <= 0 ||
allPartitionHaveReplicationFactorOne ||
numPartitionsInError == produceRequest.numPartitions) {
val statuses = localProduceResults.map(r => r.key -> ProducerResponseStatus(r.errorCode, r.start)).toMap
val response = ProducerResponse(produceRequest.correlationId, statuses)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
} else {
// create a list of (topic, partition) pairs to use as keys for this delayed request
val producerRequestKeys = produceRequest.data.keys.map(
topicAndPartition => new RequestKey(topicAndPartition)).toSeq
val statuses = localProduceResults.map(r => r.key -> ProducerResponseStatus(r.errorCode, r.end + 1)).toMap
val delayedProduce = new DelayedProduce(producerRequestKeys,
request,
statuses,
produceRequest,
produceRequest.ackTimeoutMs.toLong)
producerRequestPurgatory.watch(delayedProduce)
/*
* Replica fetch requests may have arrived (and potentially satisfied)
* delayedProduce requests while they were being added to the purgatory.
* Here, we explicitly check if any of them can be satisfied.
*/
var satisfiedProduceRequests = new mutable.ArrayBuffer[DelayedProduce]
producerRequestKeys.foreach(key =>
satisfiedProduceRequests ++=
producerRequestPurgatory.update(key, key))
debug(satisfiedProduceRequests.size +
" producer requests unblocked during produce to local log.")
satisfiedProduceRequests.foreach(_.respond())
// we do not need the data anymore
produceRequest.emptyData()
}
}
case class ProduceResult(key: TopicAndPartition, start: Long, end: Long, error: Option[Throwable] = None) {
def this(key: TopicAndPartition, throwable: Throwable) =
this(key, -1L, -1L, Some(throwable))
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(error) => ErrorMapping.codeFor(error.getClass.asInstanceOf[Class[Throwable]])
}
}
/**
* Helper method for handling a parsed producer request
*/
private def appendToLocalLog(producerRequest: ProducerRequest): Iterable[ProduceResult] = {
val partitionAndData: Map[TopicAndPartition, MessageSet] = producerRequest.data
trace("Append [%s] to local log ".format(partitionAndData.toString))
partitionAndData.map {case (topicAndPartition, messages) =>
// BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
// BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
try {
val partitionOpt = replicaManager.getPartition(topicAndPartition.topic, topicAndPartition.partition)
val (start, end) =
partitionOpt match {
case Some(partition) => partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet])
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, brokerId))
}
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.size, topicAndPartition.topic, topicAndPartition.partition, start, end))
ProduceResult(topicAndPartition, start, end)
} catch {
// NOTE: Failed produce requests is not incremented for UnknownTopicOrPartitionException and NotLeaderForPartitionException
// since failed produce requests metric is supposed to indicate failure of a broker in handling a produce request
// for a partition it is the leader for
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
null
case utpe: UnknownTopicOrPartitionException =>
warn("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
producerRequest.correlationId, producerRequest.clientId, topicAndPartition, utpe.getMessage))
new ProduceResult(topicAndPartition, utpe)
case nle: NotLeaderForPartitionException =>
warn("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
producerRequest.correlationId, producerRequest.clientId, topicAndPartition, nle.getMessage))
new ProduceResult(topicAndPartition, nle)
case e =>
// BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
// BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing ProducerRequest with correlation id %d from client %s on partition %s"
.format(producerRequest.correlationId, producerRequest.clientId, topicAndPartition), e)
new ProduceResult(topicAndPartition, e)
}
}
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request) {
val fetchRequest = request.requestObj.asInstanceOf[FetchRequest]
if(fetchRequest.isFromFollower) {
maybeUpdatePartitionHw(fetchRequest)
// after updating HW, some delayed produce requests may be unblocked
var satisfiedProduceRequests = new mutable.ArrayBuffer[DelayedProduce]
fetchRequest.requestInfo.foreach {
case (topicAndPartition, _) =>
val key = new RequestKey(topicAndPartition)
satisfiedProduceRequests ++= producerRequestPurgatory.update(key, key)
}
debug("Replica %d fetch unblocked %d producer requests."
.format(fetchRequest.replicaId, satisfiedProduceRequests.size))
satisfiedProduceRequests.foreach(_.respond())
}
val dataRead = readMessageSets(fetchRequest)
val bytesReadable = dataRead.values.map(_.messages.sizeInBytes).sum
if(fetchRequest.maxWait <= 0 ||
bytesReadable >= fetchRequest.minBytes ||
fetchRequest.numPartitions <= 0) {
debug("Returning fetch response %s for fetch request with correlation id %d to client %s"
.format(dataRead.values.map(_.error).mkString(","), fetchRequest.correlationId, fetchRequest.clientId))
val response = new FetchResponse(fetchRequest.correlationId, dataRead)
requestChannel.sendResponse(new RequestChannel.Response(request, new FetchResponseSend(response)))
} else {
debug("Putting fetch request with correlation id %d from client %s into purgatory".format(fetchRequest.correlationId,
fetchRequest.clientId))
// create a list of (topic, partition) pairs to use as keys for this delayed request
val delayedFetchKeys = fetchRequest.requestInfo.keys.toSeq.map(new RequestKey(_))
val delayedFetch = new DelayedFetch(delayedFetchKeys, request, fetchRequest, fetchRequest.maxWait, bytesReadable)
fetchRequestPurgatory.watch(delayedFetch)
}
}
private def maybeUpdatePartitionHw(fetchRequest: FetchRequest) {
debug("Maybe update partition HW due to fetch request: %s ".format(fetchRequest))
fetchRequest.requestInfo.foreach(info => {
val (topic, partition, offset) = (info._1.topic, info._1.partition, info._2.offset)
replicaManager.recordFollowerPosition(topic, partition, fetchRequest.replicaId, offset)
})
}
/**
* Read from all the offset details given and return a map of
* (topic, partition) -> PartitionData
*/
private def readMessageSets(fetchRequest: FetchRequest) = {
val isFetchFromFollower = fetchRequest.isFromFollower
fetchRequest.requestInfo.map
{
case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
val partitionData =
try {
val (messages, highWatermark) = readMessageSet(topic, partition, offset, fetchSize, fetchRequest.replicaId)
// BrokerTopicStats.getBrokerTopicStats(topic).bytesOutRate.mark(messages.sizeInBytes)
// BrokerTopicStats.getBrokerAllTopicsStats.bytesOutRate.mark(messages.sizeInBytes)
if (!isFetchFromFollower) {
new FetchResponsePartitionData(ErrorMapping.NoError, highWatermark, messages)
} else {
debug("Leader %d for partition [%s,%d] received fetch request from follower %d"
.format(brokerId, topic, partition, fetchRequest.replicaId))
new FetchResponsePartitionData(ErrorMapping.NoError, highWatermark, messages)
}
} catch {
// NOTE: Failed fetch requests is not incremented for UnknownTopicOrPartitionException and NotLeaderForPartitionException
// since failed fetch requests metric is supposed to indicate failure of a broker in handling a fetch request
// for a partition it is the leader for
case utpe: UnknownTopicOrPartitionException =>
warn("Fetch request with correlation id %d from client %s on partition [%s,%d] failed due to %s".format(
fetchRequest.correlationId, fetchRequest.clientId, topic, partition, utpe.getMessage))
new FetchResponsePartitionData(ErrorMapping.codeFor(utpe.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
case nle: NotLeaderForPartitionException =>
warn("Fetch request with correlation id %d from client %s on partition [%s,%d] failed due to %s".format(
fetchRequest.correlationId, fetchRequest.clientId, topic, partition, nle.getMessage))
new FetchResponsePartitionData(ErrorMapping.codeFor(nle.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
case t =>
// BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
// BrokerTopicStats.getBrokerAllTopicsStats.failedFetchRequestRate.mark()
error("Error when processing fetch request for partition [%s,%d] offset %d from %s with correlation id %d"
.format(topic, partition, offset, if (isFetchFromFollower) "follower" else "consumer", fetchRequest.correlationId),
t)
new FetchResponsePartitionData(ErrorMapping.codeFor(t.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
}
(TopicAndPartition(topic, partition), partitionData)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
private def readMessageSet(topic: String,
partition: Int,
offset: Long,
maxSize: Int,
fromReplicaId: Int): (MessageSet, Long) = {
// check if the current broker is the leader for the partitions
val localReplica = if(fromReplicaId == Request.DebuggingConsumerId)
replicaManager.getReplicaOrException(topic, partition)
else
replicaManager.getLeaderReplicaIfLocal(topic, partition)
trace("Fetching log segment for topic, partition, offset, size = " + (topic, partition, offset, maxSize))
val maxOffsetOpt = if (fromReplicaId == Request.OrdinaryConsumerId) {
Some(localReplica.highWatermark)
} else {
None
}
val messages = localReplica.log match {
case Some(log) =>
log.read(offset, maxSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] on broker %d does not have a local log".format(topic, partition, brokerId))
MessageSet.Empty
}
(messages, localReplica.highWatermark)
}
/**
* Service the offset request API
*/
def handleOffsetRequest(request: RequestChannel.Request) {
val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest]
val responseMap = offsetRequest.requestInfo.map(elem => {
val (topicAndPartition, partitionOffsetRequestInfo) = elem
try {
// ensure leader exists
val localReplica = if(!offsetRequest.isFromDebuggingClient)
replicaManager.getLeaderReplicaIfLocal(topicAndPartition.topic, topicAndPartition.partition)
else
replicaManager.getReplicaOrException(topicAndPartition.topic, topicAndPartition.partition)
val offsets = {
val allOffsets = replicaManager.logManager.getOffsets(topicAndPartition,
partitionOffsetRequestInfo.time,
partitionOffsetRequestInfo.maxNumOffsets)
if (!offsetRequest.isFromOrdinaryClient) allOffsets
else {
val hw = localReplica.highWatermark
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else allOffsets
}
}
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.NoError, offsets))
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderForPartitionException are special cased since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case utpe: UnknownTopicOrPartitionException =>
warn("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition, utpe.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(utpe.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case nle: NotLeaderForPartitionException =>
warn("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition,nle.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(nle.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case e =>
warn("Error while responding to offset request", e)
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), Nil) )
}
})
val response = OffsetResponse(offsetRequest.correlationId, responseMap)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
/**
* Service the topic metadata request API
*/
def handleTopicMetadataRequest(request: RequestChannel.Request) {
val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest]
val topicsMetadata = new mutable.ArrayBuffer[TopicMetadata]()
val config = replicaManager.config
var uniqueTopics = Set.empty[String]
uniqueTopics = {
if(metadataRequest.topics.size > 0)
metadataRequest.topics.toSet
else
ZkUtils.getAllTopics(zkClient).toSet
}
val topicMetadataList =
partitionMetadataLock synchronized {
uniqueTopics.map { topic =>
if(leaderCache.keySet.map(_.topic).contains(topic)) {
val partitionStateInfo = leaderCache.filter(p => p._1.topic.equals(topic))
val sortedPartitions = partitionStateInfo.toList.sortWith((m1,m2) => m1._1.partition < m2._1.partition)
val partitionMetadata = sortedPartitions.map { case(topicAndPartition, partitionState) =>
val replicas = leaderCache(topicAndPartition).allReplicas
var replicaInfo: Seq[Broker] = replicas.map(aliveBrokers.getOrElse(_, null)).filter(_ != null).toSeq
var leaderInfo: Option[Broker] = None
var isrInfo: Seq[Broker] = Nil
val leaderIsrAndEpoch = partitionState.leaderIsrAndControllerEpoch
val leader = leaderIsrAndEpoch.leaderAndIsr.leader
val isr = leaderIsrAndEpoch.leaderAndIsr.isr
debug("%s".format(topicAndPartition) + ";replicas = " + replicas + ", in sync replicas = " + isr + ", leader = " + leader)
try {
if(aliveBrokers.keySet.contains(leader))
leaderInfo = Some(aliveBrokers(leader))
else throw new LeaderNotAvailableException("Leader not available for partition %s".format(topicAndPartition))
isrInfo = isr.map(aliveBrokers.getOrElse(_, null)).filter(_ != null)
if(replicaInfo.size < replicas.size)
throw new ReplicaNotAvailableException("Replica information not available for following brokers: " +
replicas.filterNot(replicaInfo.map(_.id).contains(_)).mkString(","))
if(isrInfo.size < isr.size)
throw new ReplicaNotAvailableException("In Sync Replica information not available for following brokers: " +
isr.filterNot(isrInfo.map(_.id).contains(_)).mkString(","))
new PartitionMetadata(topicAndPartition.partition, leaderInfo, replicaInfo, isrInfo, ErrorMapping.NoError)
} catch {
case e =>
error("Error while fetching metadata for partition %s".format(topicAndPartition), e)
new PartitionMetadata(topicAndPartition.partition, leaderInfo, replicaInfo, isrInfo,
ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}
}
new TopicMetadata(topic, partitionMetadata)
} else {
// topic doesn't exist, send appropriate error code
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.UnknownTopicOrPartitionCode)
}
}
}
// handle auto create topics
topicMetadataList.foreach { topicMetadata =>
topicMetadata.errorCode match {
case ErrorMapping.NoError => topicsMetadata += topicMetadata
case ErrorMapping.UnknownTopicOrPartitionCode =>
if (config.autoCreateTopicsEnable) {
try {
CreateTopicCommand.createTopic(zkClient, topicMetadata.topic, config.numPartitions, config.defaultReplicationFactor)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topicMetadata.topic, config.numPartitions, config.defaultReplicationFactor))
} catch {
case e: TopicExistsException => // let it go, possibly another broker created this topic
}
topicsMetadata += new TopicMetadata(topicMetadata.topic, topicMetadata.partitionsMetadata, ErrorMapping.LeaderNotAvailableCode)
} else {
topicsMetadata += topicMetadata
}
case _ =>
debug("Error while fetching topic metadata for topic %s due to %s ".format(topicMetadata.topic,
ErrorMapping.exceptionFor(topicMetadata.errorCode).getClass.getName))
topicsMetadata += topicMetadata
}
}
trace("Sending topic metadata %s for correlation id %d to client %s".format(topicsMetadata.mkString(","), metadataRequest.correlationId, metadataRequest.clientId))
val response = new TopicMetadataResponse(topicsMetadata.toSeq, metadataRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
def close() {
debug("Shutting down.")
fetchRequestPurgatory.shutdown()
producerRequestPurgatory.shutdown()
debug("Shut down complete.")
}
private [kafka] trait MetricKey {
def keyLabel: String
}
private [kafka] object MetricKey {
val globalLabel = "All"
}
private [kafka] case class RequestKey(topic: String, partition: Int)
extends MetricKey {
def this(topicAndPartition: TopicAndPartition) = this(topicAndPartition.topic, topicAndPartition.partition)
def topicAndPartition = TopicAndPartition(topic, partition)
override def keyLabel = "%s-%d".format(topic, partition)
}
/**
* A delayed fetch request
*/
class DelayedFetch(keys: Seq[RequestKey], request: RequestChannel.Request, val fetch: FetchRequest, delayMs: Long, initialSize: Long)
extends DelayedRequest(keys, request, delayMs) {
val bytesAccumulated = new AtomicLong(initialSize)
}
/**
* A holding pen for fetch requests waiting to be satisfied
*/
class FetchRequestPurgatory(requestChannel: RequestChannel, purgeInterval: Int)
extends RequestPurgatory[DelayedFetch, Int](brokerId, purgeInterval) {
this.logIdent = "[FetchRequestPurgatory-%d] ".format(brokerId)
/**
* A fetch request is satisfied when it has accumulated enough data to meet the min_bytes field
*/
def checkSatisfied(messageSizeInBytes: Int, delayedFetch: DelayedFetch): Boolean = {
val accumulatedSize = delayedFetch.bytesAccumulated.addAndGet(messageSizeInBytes)
accumulatedSize >= delayedFetch.fetch.minBytes
}
/**
* When a request expires just answer it with whatever data is present
*/
def expire(delayed: DelayedFetch) {
debug("Expiring fetch request %s.".format(delayed.fetch))
try {
val topicData = readMessageSets(delayed.fetch)
val response = FetchResponse(delayed.fetch.correlationId, topicData)
val fromFollower = delayed.fetch.isFromFollower
delayedRequestMetrics.recordDelayedFetchExpired(fromFollower)
requestChannel.sendResponse(new RequestChannel.Response(delayed.request, new FetchResponseSend(response)))
}
catch {
case e1: LeaderNotAvailableException =>
debug("Leader changed before fetch request %s expired.".format(delayed.fetch))
case e2: UnknownTopicOrPartitionException =>
debug("Replica went offline before fetch request %s expired.".format(delayed.fetch))
}
}
}
class DelayedProduce(keys: Seq[RequestKey],
request: RequestChannel.Request,
initialErrorsAndOffsets: Map[TopicAndPartition, ProducerResponseStatus],
val produce: ProducerRequest,
delayMs: Long)
extends DelayedRequest(keys, request, delayMs) with Logging {
/**
* Map of (topic, partition) -> partition status
* The values in this map don't need to be synchronized since updates to the
* values are effectively synchronized by the ProducerRequestPurgatory's
* update method
*/
private [kafka] val partitionStatus = keys.map(requestKey => {
val producerResponseStatus = initialErrorsAndOffsets(TopicAndPartition(requestKey.topic, requestKey.partition))
// if there was an error in writing to the local replica's log, then don't
// wait for acks on this partition
val (acksPending, error, nextOffset) =
if (producerResponseStatus.error == ErrorMapping.NoError) {
// Timeout error state will be cleared when requiredAcks are received
(true, ErrorMapping.RequestTimedOutCode, producerResponseStatus.offset)
}
else (false, producerResponseStatus.error, producerResponseStatus.offset)
val initialStatus = PartitionStatus(acksPending, error, nextOffset)
trace("Initial partition status for %s = %s".format(requestKey.keyLabel, initialStatus))
(requestKey, initialStatus)
}).toMap
def respond() {
val finalErrorsAndOffsets = initialErrorsAndOffsets.map(
status => {
val pstat = partitionStatus(new RequestKey(status._1))
(status._1, ProducerResponseStatus(pstat.error, pstat.requiredOffset))
})
val response = ProducerResponse(produce.correlationId, finalErrorsAndOffsets)
requestChannel.sendResponse(new RequestChannel.Response(
request, new BoundedByteBufferSend(response)))
}
/**
* Returns true if this delayed produce request is satisfied (or more
* accurately, unblocked) -- this is the case if for every partition:
* Case A: This broker is not the leader: unblock - should return error.
* Case B: This broker is the leader:
* B.1 - If there was a localError (when writing to the local log): unblock - should return error
* B.2 - else, at least requiredAcks replicas should be caught up to this request.
*
* As partitions become acknowledged, we may be able to unblock
* DelayedFetchRequests that are pending on those partitions.
*/
def isSatisfied(followerFetchRequestKey: RequestKey) = {
val topic = followerFetchRequestKey.topic
val partitionId = followerFetchRequestKey.partition
val key = RequestKey(topic, partitionId)
val fetchPartitionStatus = partitionStatus(key)
trace("Checking producer request satisfaction for %s-%d, acksPending = %b"
.format(topic, partitionId, fetchPartitionStatus.acksPending))
if (fetchPartitionStatus.acksPending) {
val partitionOpt = replicaManager.getPartition(topic, partitionId)
val (hasEnough, errorCode) = partitionOpt match {
case Some(partition) =>
partition.checkEnoughReplicasReachOffset(fetchPartitionStatus.requiredOffset, produce.requiredAcks)
case None =>
(false, ErrorMapping.UnknownTopicOrPartitionCode)
}
if (errorCode != ErrorMapping.NoError) {
fetchPartitionStatus.acksPending = false
fetchPartitionStatus.error = errorCode
} else if (hasEnough) {
fetchPartitionStatus.acksPending = false
fetchPartitionStatus.error = ErrorMapping.NoError
}
if (!fetchPartitionStatus.acksPending) {
val messageSizeInBytes = produce.topicPartitionMessageSizeMap(followerFetchRequestKey.topicAndPartition)
maybeUnblockDelayedFetchRequests(topic, partitionId, messageSizeInBytes)
}
}
// unblocked if there are no partitions with pending acks
val satisfied = ! partitionStatus.exists(p => p._2.acksPending)
trace("Producer request satisfaction for %s-%d = %b".format(topic, partitionId, satisfied))
satisfied
}
case class PartitionStatus(var acksPending: Boolean,
var error: Short,
requiredOffset: Long) {
def setThisBrokerNotLeader() {
error = ErrorMapping.NotLeaderForPartitionCode
acksPending = false
}
override def toString =
"acksPending:%b, error: %d, requiredOffset: %d".format(
acksPending, error, requiredOffset
)
}
}
/**
* A holding pen for produce requests waiting to be satisfied.
*/
private [kafka] class ProducerRequestPurgatory(purgeInterval: Int)
extends RequestPurgatory[DelayedProduce, RequestKey](brokerId, purgeInterval) {
this.logIdent = "[ProducerRequestPurgatory-%d] ".format(brokerId)
protected def checkSatisfied(followerFetchRequestKey: RequestKey,
delayedProduce: DelayedProduce) =
delayedProduce.isSatisfied(followerFetchRequestKey)
/**
* Handle an expired delayed request
*/
protected def expire(delayedProduce: DelayedProduce) {
for (partitionStatus <- delayedProduce.partitionStatus if partitionStatus._2.acksPending)
delayedRequestMetrics.recordDelayedProducerKeyExpired(partitionStatus._1)
delayedProduce.respond()
}
}
private class DelayedRequestMetrics {
private class DelayedProducerRequestMetrics(keyLabel: String = MetricKey.globalLabel) extends KafkaMetricsGroup {
// val expiredRequestMeter = newMeter(keyLabel + "ExpiresPerSecond", "requests", TimeUnit.SECONDS)
}
private class DelayedFetchRequestMetrics(forFollower: Boolean) extends KafkaMetricsGroup {
private val metricPrefix = if (forFollower) "Follower" else "Consumer"
// val expiredRequestMeter = newMeter(metricPrefix + "ExpiresPerSecond", "requests", TimeUnit.SECONDS)
}
private val producerRequestMetricsForKey = {
val valueFactory = (k: MetricKey) => new DelayedProducerRequestMetrics(k.keyLabel + "-")
new Pool[MetricKey, DelayedProducerRequestMetrics](Some(valueFactory))
}
private val aggregateProduceRequestMetrics = new DelayedProducerRequestMetrics
private val aggregateFollowerFetchRequestMetrics = new DelayedFetchRequestMetrics(forFollower = true)
private val aggregateNonFollowerFetchRequestMetrics = new DelayedFetchRequestMetrics(forFollower = false)
def recordDelayedProducerKeyExpired(key: MetricKey) {
val keyMetrics = producerRequestMetricsForKey.getAndMaybePut(key)
// List(keyMetrics, aggregateProduceRequestMetrics).foreach(_.expiredRequestMeter.mark())
}
def recordDelayedFetchExpired(forFollower: Boolean) {
val metrics = if (forFollower) aggregateFollowerFetchRequestMetrics
else aggregateNonFollowerFetchRequestMetrics
// metrics.expiredRequestMeter.mark()
}
}
}
| kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/server/KafkaApis.scala | Scala | apache-2.0 | 38,267 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js IR **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.ir
/** Serialization and hashing tags for trees and types */
private[ir] object Tags {
// Tags for Trees
/** Use to denote optional trees. */
final val TagEmptyTree = 1
final val TagVarDef = TagEmptyTree + 1
final val TagParamDef = TagVarDef + 1
final val TagSkip = TagParamDef + 1
final val TagBlock = TagSkip + 1
final val TagLabeled = TagBlock + 1
final val TagAssign = TagLabeled + 1
final val TagReturn = TagAssign + 1
final val TagIf = TagReturn + 1
final val TagWhile = TagIf + 1
final val TagDoWhile = TagWhile + 1
// TODO remove when we can break binary compat.
final val TagTry = TagDoWhile + 1
final val TagThrow = TagTry + 1
final val TagContinue = TagThrow + 1
final val TagMatch = TagContinue + 1
final val TagDebugger = TagMatch + 1
final val TagNew = TagDebugger + 1
final val TagLoadModule = TagNew + 1
final val TagStoreModule = TagLoadModule + 1
final val TagSelect = TagStoreModule + 1
final val TagApply = TagSelect + 1
final val TagApplyStatically = TagApply + 1
final val TagApplyStatic = TagApplyStatically + 1
final val TagUnaryOp = TagApplyStatic + 1
final val TagBinaryOp = TagUnaryOp + 1
final val TagNewArray = TagBinaryOp + 1
final val TagArrayValue = TagNewArray + 1
final val TagArrayLength = TagArrayValue + 1
final val TagArraySelect = TagArrayLength + 1
final val TagRecordValue = TagArraySelect + 1
final val TagIsInstanceOf = TagRecordValue + 1
final val TagAsInstanceOf = TagIsInstanceOf + 1
final val TagUnbox = TagAsInstanceOf + 1
final val TagGetClass = TagUnbox + 1
final val TagCallHelper = TagGetClass + 1
final val TagJSNew = TagCallHelper + 1
final val TagJSDotSelect = TagJSNew + 1
final val TagJSBracketSelect = TagJSDotSelect + 1
final val TagJSFunctionApply = TagJSBracketSelect + 1
final val TagJSDotMethodApply = TagJSFunctionApply + 1
final val TagJSBracketMethodApply = TagJSDotMethodApply + 1
final val TagJSDelete = TagJSBracketMethodApply + 1
final val TagJSUnaryOp = TagJSDelete + 1
final val TagJSBinaryOp = TagJSUnaryOp + 1
final val TagJSArrayConstr = TagJSBinaryOp + 1
final val TagJSObjectConstr = TagJSArrayConstr + 1
final val TagJSEnvInfo = TagJSObjectConstr + 1
final val TagUndefined = TagJSEnvInfo + 1
final val TagUndefinedParam = TagUndefined + 1 // TODO Move this
final val TagNull = TagUndefinedParam + 1
final val TagBooleanLiteral = TagNull + 1
final val TagIntLiteral = TagBooleanLiteral + 1
final val TagLongLiteral = TagIntLiteral + 1
final val TagFloatLiteral = TagLongLiteral + 1
final val TagDoubleLiteral = TagFloatLiteral + 1
final val TagStringLiteral = TagDoubleLiteral + 1
final val TagClassOf = TagStringLiteral + 1
final val TagVarRef = TagClassOf + 1
final val TagThis = TagVarRef + 1
final val TagClosure = TagThis + 1
final val TagClassDef = TagClosure + 1
final val TagFieldDef = TagClassDef + 1
final val TagMethodDef = TagFieldDef + 1
final val TagPropertyDef = TagMethodDef + 1
final val TagConstructorExportDef = TagPropertyDef + 1
final val TagModuleExportDef = TagConstructorExportDef + 1
// TODO Reorganize these when we can break binary compatibility
final val TagJSSpread = TagModuleExportDef + 1
final val TagJSLinkingInfo = TagJSSpread + 1
final val TagStringLitFieldDef = TagJSLinkingInfo + 1
final val TagJSSuperBracketSelect = TagStringLitFieldDef + 1
final val TagJSSuperBracketCall = TagJSSuperBracketSelect + 1
final val TagJSSuperConstructorCall = TagJSSuperBracketCall + 1
final val TagLoadJSConstructor = TagJSSuperConstructorCall + 1
final val TagLoadJSModule = TagLoadJSConstructor + 1
final val TagJSClassExportDef = TagLoadJSModule + 1
final val TagTryCatch = TagJSClassExportDef + 1
final val TagTryFinally = TagTryCatch + 1
final val TagTopLevelMethodExportDef = TagTryFinally + 1
final val TagSelectStatic = TagTopLevelMethodExportDef + 1
final val TagTopLevelFieldExportDef = TagSelectStatic + 1
final val TagTopLevelModuleExportDef = TagTopLevelFieldExportDef + 1
// Tags for Types
final val TagAnyType = 1
final val TagNothingType = TagAnyType + 1
final val TagUndefType = TagNothingType + 1
final val TagBooleanType = TagUndefType + 1
final val TagIntType = TagBooleanType + 1
final val TagLongType = TagIntType + 1
final val TagFloatType = TagLongType + 1
final val TagDoubleType = TagFloatType + 1
final val TagStringType = TagDoubleType + 1
final val TagNullType = TagStringType + 1
final val TagClassType = TagNullType + 1
final val TagArrayType = TagClassType + 1
final val TagRecordType = TagArrayType + 1
final val TagNoType = TagRecordType + 1
// Tags for PropertyNames
final val TagPropertyNameIdent = 1
final val TagPropertyNameStringLiteral = TagPropertyNameIdent + 1
final val TagPropertyNameComputedName = TagPropertyNameStringLiteral + 1
// Tags for JS native loading specs
final val TagJSNativeLoadSpecNone = 0
final val TagJSNativeLoadSpecGlobal = TagJSNativeLoadSpecNone + 1
final val TagJSNativeLoadSpecImport = TagJSNativeLoadSpecGlobal + 1
}
| xuwei-k/scala-js | ir/src/main/scala/org/scalajs/core/ir/Tags.scala | Scala | bsd-3-clause | 5,673 |
import comm.models.{ErrorInfo, IotaAcqResult, IotaData}
import comm.utils.JsonHelper
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.eclipse.paho.client.mqttv3.{MqttClient, MqttConnectOptions, MqttMessage}
import org.joda.time.DateTime
import scala.math.BigDecimal.RoundingMode
import scala.util.Random
/**
* Created by yww08 on 2019/1/16.
*/
object MqttYCThresholds {
def main(args: Array[String]): Unit = {
/**
* -----+--------------------------------------+--------------------+--------+--------+-----+-----------+------+--------+----------+--------+-------------+---------------------------------------------------------------------------------------------
* 898 | b2158a50-4152-4504-9088-35c7b9f3daec | 0 | 693 | {} | 693 | 202 | 扬尘 | 250 | | {} | f | {"latitude": 28.88402, "longitude": 115.38859, "stationTypeId": "1", "divisionTypeId": "4"}
*
*/
val connOpt = new MqttConnectOptions()
connOpt.setCleanSession(false)
val client = new MqttClient("tcp://221.230.55.28:1883", "test-send-mqtt-client", new MemoryPersistence())
client.connect(connOpt)
val thingId = "940c71fb-b561-4ae9-994a-e1417d3af004"
val deviceId = "b2158a50-4152-4504-9088-35c7b9f3daec"
val time = new DateTime(2019, 1, 10, 0, 0)
var line = JsonHelper.Object2Json(IotaData("1b2d8739-627e-4b7d-9480-3eee6e9396fe",
thingId,
deviceId,
"715cc7f8-873a-48f5-a0c2-af6e8c9e89ab",
"dd1202cd-3e51-49d5-a326-e617f9d1008c",
new DateTime(2019, 1, 14, 18, 30), time,
IotaAcqResult(ErrorInfo(0, None, None), Some(Map(
"pm25" -> 32.6,
"pm10" -> 23.2,
"noise" -> 45.1,
"temperature" -> 3.5,
"humidity" -> 96.1,
"speed" -> 0,
"direction" -> 233
)))
))._1.get
println(line)
client.publish("anxinyun_data", new MqttMessage(line.getBytes("UTF-8")))
// val source = Source.fromInputStream(getClass.getResourceAsStream("/mqtts.txt"))
// val lines = source.mkString.split("\\r\\n")
// lines.sliding(20).foreach(dts => {
// dts.foreach(d => {
// if (d != null && d.length > 0)
// client.publish("anxinyun_data", new MqttMessage(d.getBytes("UTF-8")))
// })
// Thread.sleep(5 * 1000)
// })
client.disconnect()
client.close()
}
}
| yinweiwen/study | demo/datamover/src/main/scala/MqttYCThresholds.scala | Scala | mit | 2,308 |
package controllers
import java.util.concurrent.TimeUnit
import org.specs2.mutable._
import play.api.libs.json._
import play.api.test.Helpers._
import play.api.test._
import scala.concurrent._
import scala.concurrent.duration._
/**
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class DishesIT extends Specification {
val timeout: FiniteDuration = FiniteDuration(5, TimeUnit.SECONDS)
"Dishes" should {
"insert a valid json" in {
running(FakeApplication()) {
val request = FakeRequest.apply(POST, "/dish").withJsonBody(Json.obj(
"firstName" -> "Jack",
"lastName" -> "London",
"age" -> 27,
"active" -> true))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(CREATED)
}
}
"fail inserting a non valid json" in {
running(FakeApplication()) {
val request = FakeRequest.apply(POST, "/dish").withJsonBody(Json.obj(
"firstName" -> 98,
"lastName" -> "London",
"age" -> 27))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
contentAsString(response.get) mustEqual "invalid json"
result.header.status mustEqual BAD_REQUEST
}
}
}
} | joludigo/home-made | test/controllers/DishesIT.scala | Scala | apache-2.0 | 1,468 |
object ParameterAsQualifier {
def foo(first: String, second: String) {
val i = 0
/*start*/first.charAt(i).isUpper/*end*/ || second.charAt(1).isUpper
}
}
/*
object ParameterAsQualifier {
def foo(first: String, second: String) {
val i = 0
testMethodName(first, i) || testMethodName(second, 1)
}
def testMethodName(first: String, i: Int): Boolean = {
first.charAt(i).isUpper
}
}
*/ | double-y/translation-idea-plugin | testdata/extractMethod/duplicates/ParameterAsQualifier.scala | Scala | apache-2.0 | 411 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Michael Cotterell
* @version 1.3
* @date Mon Mar 2 16:18:29 EST 2015
* @see LICENSE (MIT style license file).
*
* @see Matrix Computation, 4th ed.
* @see www2.cs.cas.cz/mweb/download/publi/Ples2006.pdf
* @see www.math.iit.edu/~fass/477577_Chapter_12.pdf
* @see Handbook of Linear Algrbra, Chapter 45
* @see cs.fit.edu/~dmitra/SciComp/11Spr/SVD-Presentation-Updated2.ppt
* @see www.cs.utexas.edu/users/inderjit/public_papers/HLA_SVD.pdf
* @see people.duke.edu/~hpgavin/SystemID/References/Golub+Reinsch-NM-1970.pdf
*/
// U N D E R D E V E L O P M E N T
package scalation.linalgebra
import scala.math.abs
import scalation.linalgebra.Givens.{givens, givensRo, givensRoT, givensColUpdate, givensRowUpdate}
import scalation.linalgebra.MatrixD.eye
import scalation.math.double_exp
import scalation.math.ExtremeD.EPSILON
import scalation.util.{banner, Error, sline}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4` class is used to compute the Singular Value Decomposition 'SVD' of
* matrix 'aa' using the Golub-Kahan-Reinsch Algorithm.
* Factor/decompose matrix 'aa' into the product of three matrices:
* <p>
* aa = uu * a * vv.t
* <p>
* where 'uu' is a matrix of orthogonal eigenvectors of 'aa * aa.t'
* (LEFT SINGULAR VECTORS)
* 'vv' is a matrix of orthogonal eigenvectors of 'aa.t * aa'
* (RIGHT SINGULAR VECTORS) and
* 'a' is a diagonal matrix of square roots of eigenvalues of 'aa.t * aa' & 'aa * aa.t'
* (SINGULAR VALUES).
* FIX: need to reorder so singular values are in decreasing order.
* FIX: make the singular values positive
*------------------------------------------------------------------------------
* @param aa the m-by-n matrix to deflate/decompose (algorithm requires m >= n)
*/
class SVD4 (aa: MatrixD)
extends SVDecomp with Error
{
private val DEBUG = false // debug flag
private val m = aa.dim1 // number of rows
private val n = aa.dim2 // number of columns
if (n > m) flaw ("constructor", "SVD4 implementation requires m >= n")
private var a = aa.copy // work on modifiable copy of aa (will hold singular values)
private var uu: MatrixD = null // left orthogonal matrix uu = u_1 * ... u_k
private var s: VectorD = null // vector of singular values (main diagonal of a after deflation)
private var vv: MatrixD = null // right orthogonal matrix vv = v_1 * ... v_k
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor matrix 'a' into the product of a matrix of left singular vectors 'uu',
* a vector of singular values 's' and a matrix of right singular vectors 'vv'
* such that 'a = uu ** s * vv.t'.
*/
override def factor (): Tuple3 [MatrixD, VectorD, MatrixD] =
{
if (! a.isBidiagonal) {
val bid = new Bidiagonal2 (a)
val (u, b, v) = bid.bidiagonalize () // turn a into a bidiagonal matrix
uu = u; a = b; vv = v
} else {
// uu = eye (m); vv = eye (n)
uu = eye (m, n); vv = eye (n)
} // if
if (DEBUG) println ("factor: bidiagonal a = " + a)
deflate () // deflate the superdiagonal
s = a.getDiag () // get the singular values from matrix a
reorder () // reorder so largest singular values come first
(uu, s, vv)
} // factor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Deflate matrix 'a' forming a diagonal matrix consisting of singular
* values and return the singular values in vector 's'. Also return the
* singular vector matrices 'uu' and 'vv'.
* @see Matrix Computation: Algorithm 8.6.2 SVD Algorithm.
*/
private def deflate ()
{
var p = 0 // # zero elements in left end of superdiagonal
var q = 0 // # zero elements in right end of superdiagonal
while (true) {
for (i <- 0 until n-1) {
if (abs (a(i, i+1)) < EPSILON * (abs (a(i, i)) + abs (a(i+1, i+1)))) a(i, i+1) = 0.0
} // for
val (p, q) = findMiddle ()
if (q >= n-1) return // return since no non-zero elements remain in superdiagonal
val k = findZero (p, n-q)
if (k >= 0) {
if (DEBUG) println ("deflate: found zero on diagonal at " + k)
// use Givens rotation to make superdiagonal element a(k, k+1) = 0.0
val cs = givens (a(k-1, k+1), a(k, k+1))
val u = givensRoT (k-1, k, n, cs) // left orthogonal matrix u_k^t
a = u * a // zero element with Givens rotations
} else {
diagonStep (p, q)
} // if
if (DEBUG) println ("deflate: a = " + a)
} // while
} // deflate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take one step in converting the bidiagonal matrix 'a' to a diagonal matrix.
* That is, reduce the middle run of nonzero super-diagonal elements by one.
* @see Matrix Computation: Algorithm 8.6.1 Golub-Kahan Step.
* @param p the size of the head of the super-diagonal
* @param q the size of the tail of the super-diagonal
*/
private def diagonStep (p: Int, q: Int)
{
import SVD4.trailing
import Eigen_2by2.eigenvalues
val tt = trailing (a(p until n-q, p until n-q)) // trailing 2-by-2 submatrix of a.t * a
val l = eigenvalues (tt) // the eigenvalues of the submatrix
if (DEBUG) println ("diagonStep: tt = " + tt + "\\ndiagonStep: l = " + l)
val td = tt(1, 1) // last diagonal element in a.t * a
val mu = if (abs (td - l(0)) <= abs (td - l(1))) l(0) else l(1) // pick closest eigenvalue
var y = a(p, p) * a(p, p) - mu
var z = a(p, p) * a(p, p+1)
if (DEBUG) println ("diagonStep: (mu, y, z) = " + (mu, y, z))
for (k <- p until n-1-q) {
// Givens rotation 1: k, k+1, theta1 (c1, s1); zero right
val cs1 = givens (y, z) // compute rotation cosine and sine
givensColUpdate (a, k, k+1, cs1) // rotate to clear an element in a
val v = givensRo (k, k+1, n, cs1) // right orthogonal matrix v_k
vv = vv * v // update vv
if (DEBUG) {
println ("diagonStep (" + k + "): rotation 1: (c1, s1) = " + cs1)
println ("diagonStep (" + k + "): rotation 1: v = " + v)
println ("diagonStep (" + k + "): rotation 1: a = " + a)
} // if
y = a(k, k); z = a(k+1, k)
if (DEBUG) println ("diagonStep: (y, z) = " + (y, z))
// Givens rotation 2: k, k+1, theta2 (c2, s2); zero down
val cs2 = givens (y, z) // compute rotation cosine and sine
givensRowUpdate (a, k, k+1, cs2) // rotate to clear an element in a
val u = givensRo (k, k+1, m, cs2) // left orthogonal matrix u_k^t
uu = uu * u // update uu
if (DEBUG) {
println ("diagonStep (" + k + "): rotation 2: (c2, s2) = " + cs2)
println ("diagonStep (" + k + "): rotation 2: u = " + u)
println ("diagonStep (" + k + "): rotation 2: a = " + a)
} // if
if (k < n-q-2) {
y = a(k, k+1); z = a(k, k+2)
if (DEBUG) println ("diagonStep: (y, z) = " + (y, z))
} // if
} // for
} // diagonStep
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find/return the index of the first diagonal entry in 'a' from 'j' until 'k'
* that is zero; otherwise -1 (not found).
* @param j start the search here
* @param k end the search here
*/
private def findZero (j: Int, k: Int): Int =
{
for (i <- j until k if a(i, i) =~ 0.0) return i
-1
} // findZero
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the run of nonzero elements in the middle of the super-diagonal
* of matrix 'a' such that the tail super-diagonal contains only zeros.
* Return p the size of the head and q the size of the tail.
*/
private def findMiddle (): Tuple2 [Int, Int] =
{
var i = n - 1
while (i >= 1 && a(i-1, i) =~ 0.0) i -= 1
val q = n - 1 - i
while (i >= 1 && ! (a(i-1, i) =~ 0.0)) i -= 1
val p = i
if (DEBUG) println ("findMiddle: (p, q) = " + (p, q))
(p, q)
} // findMiddle
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reorder the singular values to be in non-increasing order. Must swap
* singular vectors in lock step with singular values. To minimize the
* number of swaps, selection sort is used.
*/
private def reorder ()
{
for (i <- 0 until n) {
val j = s(i until n).argmax ()
if (i != j) {
s.swap (i, j)
uu.swapCol (i, j)
vv.swapCol (i, j)
} // if
} // for
} // reorder
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for `x` in `a^t*a*x = b` using `SVD`.
* @param b the constant vector
*/
def solve (b: VectorD): VectorD =
{
val (u, d, vt) = factor () // factor using SVD4
val alpha = u.t * b // principle component regression
vt ** d.recip * alpha // estimate coefficients
} // solve
} // SVD4 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4` companion object.
*/
object SVD4
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the trailing 2-by-2 submatrix of 'b.t * b' without multiplying
* the full matrices.
* @param b the given bidiagonal matrix
*/
def trailing (b: MatrixD): MatrixD =
{
// println ("trailing: b = " + b)
val n3 = b.dim2 - 1
val n2 = n3 - 1
val n1 = n2 - 1
val b12 = if (n1 < 0) 0.0 else b(n1, n2)
val b22 = b(n2, n2)
val b23 = b(n2, n3)
val b33 = b(n3, n3)
new MatrixD ((2, 2), b12*b12 + b22*b22, b22*b23,
b22*b23, b23*b23 + b33*b33)
} // trailing
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the SVD4 Factorization algorithm on matrix 'a' by factoring the matrix
* into a left matrix u, a vector s, and a right matrix v. Then multiply back
* to recover the original matrix.
* @param a the given matrix to factor
* @param name the name of the test case
*/
def test (a: MatrixD, name: String)
{
banner (name)
println ("original matrix a = " + a)
val svd = new SVD4 (a) // Singular Value Decomposition object
val (u, s, v) = svd.factor () // factor matrix a
println (sline () + "svd.factor: (u, s, v) = " + (u, s, v))
val prod = u ** s * v.t // compute the product
println (sline () + "check: u ** s * v.t = " + prod) // should equal the original a matrix
println ("a - prod = " + (a - prod))
assert (prod == a)
} // test
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the SVD4 Factorization algorithm on a bidiagonalization of matrix 'a', factoring
* it into a left matrix 'uu', bidiagonal matrix 'bb', and right matrix 'vv'.
* Then multiply back to recover the original matrix.
* @param a the given matrix to bidiagonalize and then factor
* @param name the name of the test case
*/
def testBid (aa: MatrixD, name: String)
{
val a = aa.copy // make a copy of aa
val bid = new Bidiagonal2 (a) // Householder Bidiagonalization
val (uu, bb, vv) = bid.bidiagonalize () // bidiagonalize a
println (sline () + "bid.bidiagonalize: (uu, bb, vv) = " + (uu, bb, vv))
test (bb, name)
} // testBid
} // SVD4 object
import SVD4.{test, testBid}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test` object is used to test the `SVD4` class starting with a matrix that
* is already in bidiagonal form and gives eigenvalues of 28, 18 for the first step.
* @see ocw.mit.edu/ans7870/18/18.06/javademo/SVD/
* > run-main scalation.linalgebra.SVD4Test
*/
object SVD4Test extends App
{
val a = new MatrixD ((2, 2), 1.00, 2.00, // original matrix
0.00, 2.00) // 2 by 2, bidiagonal
val u_ = new MatrixD ((2, 2), 0.75, -0.66,
0.66, 0.75)
val b_ = new MatrixD ((2, 2), 2.92, 0.00,
0.00, 0.68)
val v_ = new MatrixD ((2, 2), 0.26, -0.97,
0.97, 0.26)
println ("svd: (u_, b_, v_) = " + (u_, b_, v_)) // answer from Web page
println ("u_b_v_.t = " + u_ * b_ * v_.t) // should equal the original a
test (a, "SVD4Test")
} // SVD4Test object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test2` is used to test the `SVD4` class.
* Answer: singular values = (3.82983, 1.91368, 0.81866)
* > run-main scalation.linalgebra.SVD4Test2
*/
object SVD4Test2 extends App
{
val bb = new MatrixD ((3, 3), 1.0, 1.0, 0.0, // original matrix
0.0, 2.0, 2.0, // 3 by 3, bidiagonal
0.0, 0.0, 3.0)
test (bb, "SVD4Test2")
} // SVD4Test2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test3` object is used to test the `SVD4` class starting with general
* (i.e., not bidiagonalized) matrices. All probelems for which m >= n from
* the following Webspage are tried.
* @see www.mathstat.uottawa.ca/~phofstra/MAT2342/SVDproblems.pdf
* @see mysite.science.uottawa.ca/phofstra/MAT2342/SVDproblems.pdf
* > run-main scalation.linalgebra.SVD4Test3
*/
object SVD4Test3 extends App
{
import scala.math.sqrt
val a2 = new MatrixD ((2, 2), 1.0, 2.0, // original matrix, problem 2
2.0, 1.0) // 2 by 2
testBid (a2, "SVD4Test3_2b") // test the bidiagonalized matrix
test (a2, "SVD4Test3_2") // test the original matrix
val a3 = new MatrixD ((3, 3), 0.0, 1.0, 1.0, // original matrix, problem 3
sqrt(2), 2.0, 0.0, // 3 by 3
0.0, 1.0, 1.0)
// testBid (a3, "SVD4Test3_3b") // test the bidiagonalized matrix - FIX - fails
test (a3, "SVD4Test3_3") // test the original matrix
} // SVD4Test3 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test4` object is used to test the `SVD4` class starting with a general
* matrix.
* > run-main scalation.linalgebra.SVD4Test4
*/
object SVD4Test4 extends App
{
val a = new MatrixD ((4, 4), 0.9501, 0.8913, 0.8214, 0.9218, // original matrix
0.2311, 0.7621, 0.4447, 0.7382, // 4 by 4
0.6068, 0.4565, 0.6154, 0.1763,
0.4860, 0.0185, 0.7919, 0.4057)
// testBid (a, "SVD4Test34") // test the bidiagonalized matrix
test (a, "SVD4Test4") // test the original matrix
} // SVD4Test4 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test5` is used to test the `SVD4` class on a problem where the matrix
* is not a square matrix.
* > run-main scalation.linalgebra.SVD4Test5
*/
object SVD4Test5 extends App
{
val a = new MatrixD ((3, 2), 4, 5, // original matrix
6, 7, // 3 by 2
9, 8)
// testBid (a, "SVD4Test5b") // test the bidiagonalized matrix
test (a, "SVD4Test5") // test the original matrix
} // SVD4Test5 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test5` is used to test the `SVD4` class on a larger test problem.
* @see www.maths.manchester.ac.uk/~peterf/MATH48062/math48062%20Calculating%20and%20using%20the%20svd%20of%20a%20matrix.pdf
* FIX: this example does not work, in the sense that is does not converge to 'TOL'.
* > run-main scalation.linalgebra.SVD4Test6
*/
object SVD4Test6 extends App
{
val a = new MatrixD ((5, 3), 0.44444444, 0.3333333, -1.3333333, // original matrix
0.41111111, -0.3166667, -0.3333333, // 5 by 3
-0.18888889, 0.4833333, -0.3333333,
-0.03333333, -0.6500000, 1.0000000,
-0.63333333, 0.1500000, 1.0000000)
// testBid (a, "SVD4Test6b") // test the bidiagonalized matrix
test (a, "SVD4Test6") // test the original matrix
} // SVD4Test6 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD4Test6` object is used to test the `SVD4` companion object's computation
* of trailing submatrices.
* > run-main scalation.linalgebra.SVD4Test7
*/
object SVD4Test7 extends App
{
import SVD4.trailing
val b = new MatrixD ((4, 4), 1.0, 5.0, 0.0, 0.0, // bidiagonal matrix
0.0, 2.0, 6.0, 0.0, // 4 by 4
0.0, 0.0, 3.0, 7.0,
0.0, 0.0, 0.0, 4.0)
val n = b.dim2
println ("b = " + b)
println ("trailing b.t * b = " + trailing (b))
println ("check: " + (b.t * b)(n-2 to n, n-2 to n))
} // SVD4Test7 object
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/SVD4.scala | Scala | mit | 19,397 |
import scala.reflect.runtime.universe._
object Test extends dotty.runtime.LegacyApp {
val tree = reify{def foo(@annotation.elidable(0) x: Int) = ""}.tree
println(tree.toString)
}
| yusuke2255/dotty | tests/disabled/macro/run/t5225_2.scala | Scala | bsd-3-clause | 184 |
/*
* Copyright 2013 Stephan Rehfeld
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scaladelray.texture
import javax.imageio.ImageIO
import java.io.File
import scaladelray.Color
case class InterpolatedImageTexture( file : String ) extends Texture with Serializable {
private val image = ImageIO.read( new File( file ) )
override def apply(texCoord: TexCoord2D) = {
var u = if( texCoord.u == 1.0 ) texCoord.u else texCoord.u % 1.0
var v = if( texCoord.v == 1.0 ) texCoord.v else texCoord.v % 1.0
if( u < 0.0 ) u = u + 1.0
if( v < 0.0 ) v = v + 1.0
val x = (image.getWidth-1) * u
val y = (image.getHeight-1) - ((image.getHeight-1) * v)
val xa = x - math.floor( x )
val ya = y - math.floor( y )
val a = image.getRGB(math.floor( x ).asInstanceOf[Int], math.floor( y ).asInstanceOf[Int])
val b = image.getRGB(math.ceil( x ).asInstanceOf[Int], math.floor( y ).asInstanceOf[Int])
val c = image.getRGB(math.floor( x ).asInstanceOf[Int], math.ceil( y ).asInstanceOf[Int])
val d = image.getRGB(math.ceil( x ).asInstanceOf[Int], math.ceil( y ).asInstanceOf[Int])
val (redA,greenA,blueA) = extract( a )
val (redB,greenB,blueB) = extract( b )
val (redC,greenC,blueC) = extract( c )
val (redD,greenD,blueD) = extract( d )
val (redE,greenE,blueE) = (redA*(1.0-xa) + (redB * xa), greenA*(1.0-xa) + (greenB * xa), blueA*(1.0-xa) + (blueB * xa) )
val (redF,greenF,blueF) = (redC*(1.0-xa) + (redD * xa), greenC*(1.0-xa) + (greenD * xa), blueC*(1.0-xa) + (blueD * xa) )
val (red,green,blue) = (redE*(1.0-ya) + redF *ya , greenE* (1.0-ya) + greenF * ya, blueE*(1.0-ya) + blueF * ya )
Color( red/255.0, green/255.0, blue/255.0 )
}
private def extract( argb : Int ) = ((argb & 0xff0000) >> 16,(argb & 0xff00) >> 8, argb & 0xff)
} | stephan-rehfeld/scaladelray | src/main/scala/scaladelray/texture/InterpolatedImageTexture.scala | Scala | apache-2.0 | 2,334 |
package test.auctionsniper
import org.junit.Test
import org.junit.Assert._
import auctionsniper.{SniperSnapshot => Snapshot, SniperState => State}
class SniperSnapshotTest {
@Test
def transitionsBetweenStates() {
val itemId = "item id"
val joining = Snapshot.joining(itemId)
assertEquals(new Snapshot(itemId, 0, 0, State.JOINING), joining)
val bidding = joining.bidding(123, 234);
assertEquals(new Snapshot(itemId, 123, 234, State.BIDDING), bidding)
assertEquals(new Snapshot(itemId, 456, 234, State.LOSING), bidding.losing(456))
assertEquals(new Snapshot(itemId, 456, 234, State.WINNING), bidding.winning(456))
assertEquals(new Snapshot(itemId, 123, 234, State.LOST), bidding.closed())
assertEquals(new Snapshot(itemId, 678, 234, State.WON), bidding.winning(678).closed());
}
@Test
def comparesItemIdentities() {
assertTrue(Snapshot.joining("item 1").isForSameItemAs(Snapshot.joining("item 1")))
assertFalse(Snapshot.joining("item 1").isForSameItemAs (Snapshot.joining("item 2")))
}
}
| sptz45/goos-scala | test/unit/test/auctionsniper/SniperSnapshotTest.scala | Scala | apache-2.0 | 1,072 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.quinine.metrics
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.adam.projections.FieldValue
import org.bdgenomics.adam.projections.AlignmentRecordField._
import org.bdgenomics.adam.rich.RichAlignmentRecord._
import org.bdgenomics.adam.util.Util._
import scala.collection.Map
import org.bdgenomics.adam.models.ReadBucket
object DefaultComparisons {
val comparisons: Seq[BucketComparisons[Any]] = Seq[BucketComparisons[Any]](
OverMatched,
DupeMismatch,
MappedPosition,
MapQualityScores,
BaseQualityScores)
private val map =
comparisons.foldLeft(
Map[String, BucketComparisons[Any]]())(
(a: Map[String, BucketComparisons[Any]], b: BucketComparisons[Any]) => a + ((b.name, b)))
def findComparison(k: String): BucketComparisons[Any] =
map.getOrElse(k, throw new ArrayIndexOutOfBoundsException(
String.format("Could not find comparison %s", k)))
}
object OverMatched extends BooleanComparisons with Serializable {
val name = "overmatched"
val description = "Checks that all buckets have exactly 0 or 1 records"
def matches(records1: Iterable[AlignmentRecord], records2: Iterable[AlignmentRecord]): Boolean =
records1.size == records2.size && (records1.size == 0 || records1.size == 1)
def matchedByName(bucket1: ReadBucket, bucket2: ReadBucket): Seq[Boolean] =
Seq(matches(bucket1.unpairedPrimaryMappedReads, bucket2.unpairedPrimaryMappedReads) &&
matches(bucket1.pairedFirstPrimaryMappedReads, bucket2.pairedFirstPrimaryMappedReads) &&
matches(bucket1.pairedSecondPrimaryMappedReads, bucket2.pairedSecondPrimaryMappedReads) &&
matches(bucket1.pairedFirstSecondaryMappedReads, bucket2.pairedFirstSecondaryMappedReads) &&
matches(bucket1.pairedSecondSecondaryMappedReads, bucket2.pairedSecondSecondaryMappedReads))
def schemas: Seq[FieldValue] = Seq()
}
object DupeMismatch extends PointComparisons with Serializable {
val name = "dupemismatch"
val description = "Counts the number of common reads marked as duplicates"
def points(records1: Iterable[AlignmentRecord], records2: Iterable[AlignmentRecord]): Option[(Int, Int)] = {
if (records1.size == records2.size) {
records1.size match {
case 0 => None
case 1 => Some((if (records1.head.getDuplicateRead) 1 else 0, if (records2.head.getDuplicateRead) 1 else 0))
case _ => None
}
} else None
}
def matchedByName(bucket1: ReadBucket, bucket2: ReadBucket): Seq[(Int, Int)] =
Seq(
points(bucket1.unpairedPrimaryMappedReads, bucket2.unpairedPrimaryMappedReads),
points(bucket1.pairedFirstPrimaryMappedReads, bucket2.pairedFirstPrimaryMappedReads),
points(bucket1.pairedSecondPrimaryMappedReads, bucket2.pairedSecondPrimaryMappedReads),
points(bucket1.pairedFirstSecondaryMappedReads, bucket2.pairedFirstSecondaryMappedReads),
points(bucket1.pairedSecondSecondaryMappedReads, bucket2.pairedSecondSecondaryMappedReads)).flatten
def schemas: Seq[FieldValue] = Seq(duplicateRead)
}
object MappedPosition extends LongComparisons with Serializable {
val name = "positions"
val description = "Counts how many reads align to the same genomic location"
def distance(records1: Iterable[AlignmentRecord], records2: Iterable[AlignmentRecord]): Long = {
if (records1.size == records2.size) records1.size match {
case 0 => 0
case 1 =>
val r1 = records1.head
val r2 = records2.head
if (isSameContig(r1.getContig, r2.getContig)) {
val start1 = r1.getStart
val start2 = r2.getStart
if (start1 > start2) start1 - start2 else start2 - start1
} else -1
case _ => -1
}
else -1
}
/**
* The records have been matched by their names, but the rest may be mismatched.
*/
def matchedByName(bucket1: ReadBucket, bucket2: ReadBucket): Seq[Long] =
Seq(distance(bucket1.unpairedPrimaryMappedReads, bucket2.unpairedPrimaryMappedReads) +
distance(bucket1.pairedFirstPrimaryMappedReads, bucket2.pairedFirstPrimaryMappedReads) +
distance(bucket1.pairedSecondPrimaryMappedReads, bucket2.pairedSecondPrimaryMappedReads) +
distance(bucket1.pairedFirstSecondaryMappedReads, bucket2.pairedFirstSecondaryMappedReads) +
distance(bucket1.pairedSecondSecondaryMappedReads, bucket2.pairedSecondSecondaryMappedReads))
def schemas: Seq[FieldValue] = Seq(
start,
readInFragment)
}
object MapQualityScores extends PointComparisons with Serializable {
val name = "mapqs"
val description = "Creates scatter plot of mapping quality scores across identical reads"
def points(records1: Iterable[AlignmentRecord], records2: Iterable[AlignmentRecord]): Option[(Int, Int)] = {
if (records1.size == records2.size) {
records1.size match {
case 0 => None
case 1 => Some((records1.head.getMapq.toInt, records2.head.getMapq.toInt))
case _ => None
}
} else None
}
def matchedByName(bucket1: ReadBucket, bucket2: ReadBucket): Seq[(Int, Int)] =
Seq(
points(bucket1.unpairedPrimaryMappedReads, bucket2.unpairedPrimaryMappedReads),
points(bucket1.pairedFirstPrimaryMappedReads, bucket2.pairedFirstPrimaryMappedReads),
points(bucket1.pairedSecondPrimaryMappedReads, bucket2.pairedSecondPrimaryMappedReads),
points(bucket1.pairedFirstSecondaryMappedReads, bucket2.pairedFirstSecondaryMappedReads),
points(bucket1.pairedSecondSecondaryMappedReads, bucket2.pairedSecondSecondaryMappedReads)).flatten
def schemas: Seq[FieldValue] = Seq(mapq)
}
object BaseQualityScores extends PointComparisons with Serializable {
val name = "baseqs"
val description = "Creates scatter plots of base quality scores across identical positions in the same reads"
def points(records1: Iterable[AlignmentRecord], records2: Iterable[AlignmentRecord]): Seq[(Int, Int)] = {
if (records1.size == records2.size) {
records1.size match {
case 0 => Seq()
case 1 =>
val record1 = records1.head
val record2 = records2.head
record1.qualityScores
.zip(record2.qualityScores)
.map(b => (b._1.toInt, b._2.toInt))
case _ => Seq()
}
} else Seq()
}
def matchedByName(bucket1: ReadBucket, bucket2: ReadBucket): Seq[(Int, Int)] =
points(bucket1.unpairedPrimaryMappedReads, bucket2.unpairedPrimaryMappedReads) ++
points(bucket1.pairedFirstPrimaryMappedReads, bucket2.pairedFirstPrimaryMappedReads) ++
points(bucket1.pairedSecondPrimaryMappedReads, bucket2.pairedSecondPrimaryMappedReads) ++
points(bucket1.pairedFirstSecondaryMappedReads, bucket2.pairedFirstSecondaryMappedReads) ++
points(bucket1.pairedSecondSecondaryMappedReads, bucket2.pairedSecondSecondaryMappedReads)
def schemas: Seq[FieldValue] = Seq(qual)
}
| bigdatagenomics/qc-metrics | quinine-core/src/main/scala/org/bdgenomics/quinine/metrics/AvailableComparisons.scala | Scala | apache-2.0 | 7,672 |
package com.lamaVersion.core
import scala.sys.process._
import java.io.File
import scala.io.Source
import scala.language.postfixOps
import com.lamaVersion.impl.EasyIO
object Command{
def mkdir(dir: String) = Seq("mkdir", dir)
}
class Experiment(val name: String,
val command: ProcessBuilder,
val accept: Commit => Boolean = _ => false,
val outputs: Seq[String] = Nil){
def execute(output: File) = {
if(command.toString != "[]")
command #>> output !
else
println("Empty experiment can't be executed")
}
def success() = command.! == 0
def extractResultsTo(workingDir: File, outputDir: File, ext: String){
for(file <- outputs){
println(file)
val splitted = file.split(".")
new File(workingDir, file) #>
new File(outputDir, splitted(0) + '_' + ext + '.' + splitted(1)) !
}
}
}
object Experiment{
def fromStream(lines: Stream[String], name: String, cwd: String = ".") = {
val workingDir = new File(cwd)
val range = lines contains(_.startsWith("# RANGE"))
val beginDate = lines.find(_.startsWith("# BEGIN: ")).map( (line: String) =>
Commit.gitDateFormat.parseDateTime(line.substring(9)))
val endDate = lines.find(_.startsWith("# END: ")).map( (line: String) =>
Commit.gitDateFormat.parseDateTime(line.substring(7)))
val excluded = lines filter(_.startsWith("# EXCLUDE: ")) flatMap(_.substring(11).split(" "))
val included = lines filter(_.startsWith("# INCLUDE: ")) flatMap(_.substring(10).split(" "))
val commands = lines filterNot((l: String) => l.startsWith("#") || l == "")
val outputs = lines filter(_.startsWith("# GET: ")) flatMap(_.substring(7).split(" "))
def accept(c: Commit) = {
val isIncluded = included.contains(c.hash) || included.contains(c.shortHash)
if(range){
val afterBegin = beginDate.map(begin => c.date.isAfter(begin) || c.date.isEqual(begin))
val beforeEnd = endDate.map(end => c.date.isBefore(end) || c.date.isEqual(end))
val isExcluded = excluded.contains(c.hash) || excluded.contains(c.shortHash)
isIncluded || afterBegin.getOrElse(true) && beforeEnd.getOrElse(true) && !isExcluded
} else isIncluded
}
def toProcessBuilder(commands: Stream[String]): ProcessBuilder = commands match {
case a #:: b #:: l => Process(a, workingDir) ### toProcessBuilder(b #:: l)
case a #:: Stream.Empty => Process(a, workingDir)
case Stream.Empty => ""
}
new Experiment(name, toProcessBuilder(commands), accept, outputs)
}
def fromFile(file: File, cwd: String = ".") = {
val lines = Source.fromFile(file).getLines.toStream
fromStream(lines, EasyIO.getShortFileName(file), cwd)
}
} | gwenzek/lamaVersion | src/main/scala/core/Experiment.scala | Scala | mit | 3,000 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.kudu.spark.tools
import java.io.File
import org.apache.kudu.ColumnSchema.ColumnSchemaBuilder
import org.apache.kudu.{Schema, Type}
import org.apache.kudu.client.CreateTableOptions
import org.apache.kudu.spark.kudu._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FunSuite, Matchers}
import org.spark_project.guava.collect.ImmutableList
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class TestImportExportFiles extends FunSuite with TestContext with Matchers {
private val TABLE_NAME: String = "TestImportExportFiles"
private val TABLE_DATA_PATH: String = "src/test/resources/TestImportExportFiles.csv"
test("Spark Import Export") {
val schema: Schema = {
val columns = ImmutableList.of(
new ColumnSchemaBuilder("key", Type.STRING).key(true).build(),
new ColumnSchemaBuilder("column1_i", Type.STRING).build(),
new ColumnSchemaBuilder("column2_d", Type.STRING).nullable(true).build(),
new ColumnSchemaBuilder("column3_s", Type.STRING).build(),
new ColumnSchemaBuilder("column4_b", Type.STRING).build())
new Schema(columns)
}
val tableOptions = new CreateTableOptions().setRangePartitionColumns(List("key").asJava)
.setNumReplicas(1)
kuduClient.createTable(TABLE_NAME, schema, tableOptions)
val dataPath = new File(TABLE_DATA_PATH).getAbsolutePath
ImportExportFiles.testMain(Array("--operation=import",
"--format=csv",
s"--master-addrs=${miniCluster.getMasterAddresses}",
s"--path=$TABLE_DATA_PATH",
s"--table-name=$TABLE_NAME",
"--delimiter=,",
"--header=true",
"--inferschema=true"), ss)
val rdd = kuduContext.kuduRDD(ss.sparkContext, TABLE_NAME, List("key"))
assert(rdd.collect.length == 4)
assertEquals(rdd.collect().mkString(","),"[1],[2],[3],[4]")
}
}
| andrwng/kudu | java/kudu-spark-tools/src/test/scala/org/apache/kudu/spark/tools/TestImportExportFiles.scala | Scala | apache-2.0 | 2,728 |
//Copyright 2014, Alex Khilko.
//This file is part of MoonGene which is released under MIT.
//See file LICENSE.TXT or go to www.alexkhilko.com for full license details.
package controllers
import models.{AccountAccessLevel, DataAccess}
import play.api._
import play.api.mvc._
import concurrent.{Future, Promise}
import org.joda.time.DateTime
/*
Admin controller:
- health metrics' graphs
- users management
- apps management
*/
object Admin extends Controller with DataAccess with Secured {
def health = IsAccountAccessLevel(AccountAccessLevel.Admin) { email => request =>
Async {
accountByEmail(email).map( acc => {
Ok(views.html.admin.health(acc.get, getSessionData(request)))
})
}
}
def users = IsAccountAccessLevel(AccountAccessLevel.Admin) { email => request =>
Async {
accountByEmail(email).map( acc => {
Ok(views.html.admin.users(acc.get, getSessionData(request)))
})
}
}
def apps = IsAccountAccessLevel(AccountAccessLevel.Admin) { email => request =>
Async {
accountByEmail(email).map( acc => {
Ok(views.html.admin.apps(acc.get, getSessionData(request)))
})
}
}
} | InfiniteCode/MoonGene | src/moon/app/controllers/Admin.scala | Scala | mit | 1,189 |
/* Copyright 2014, 2015 Richard Wiedenhöft <richard@wiedenhoeft.xyz>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xyz.wiedenhoeft.scalacrypt.iteratees
import scala.util.{ Try, Success, Failure }
import xyz.wiedenhoeft.scalacrypt._
trait Enumeratee[From, To] {
def apply[A](inner: Iteratee[To, A]): Iteratee[From, Iteratee[To, A]]
def transform[A](inner: Iteratee[To, A]): Iteratee[From, A] = apply(inner) flatMap {
_.fold(EOF).state match {
case Cont(_) ⇒ Iteratee.error(new IterateeException("Iteratee must be done after EOF"))
case Error(error) ⇒ Iteratee.error(error)
case Done(result) ⇒ Iteratee.done(result)
}
}
}
object Enumeratee {
def map[From, To](f: (From) ⇒ To) = new Enumeratee[From, To] {
def apply[A](inner: Iteratee[To, A]): Iteratee[From, Iteratee[To, A]] = inner.state match {
case Cont(folder) ⇒ Iteratee.cont {
case Element(element) ⇒ apply(inner.fold(Element(f(element))))
case Empty ⇒ apply(inner)
case EOF ⇒ Iteratee.done(inner)
}
case _ ⇒ Iteratee.done(inner)
}
}
}
| Richard-W/scalacrypt | src/main/scala/iteratees/Enumeratee.scala | Scala | apache-2.0 | 1,623 |
package beam.sim
import java.io.File
import java.nio.file.{Files, Path}
import beam.utils.BeamConfigUtils
import scala.collection.JavaConverters._
object RunBatch extends App with BeamHelper {
val BATCH_OPT = "batch"
val argsMap = parseArgs(args)
if (argsMap.get(BATCH_OPT).isEmpty) {
throw new IllegalArgumentException(s"$BATCH_OPT param is missing")
}
val batchPath: Path = new File(argsMap(BATCH_OPT)).toPath.toAbsolutePath
if (!Files.exists(batchPath)) {
throw new IllegalArgumentException(s"$BATCH_OPT file is missing: $batchPath")
}
// Run batch
runBatch(batchPath)
System.exit(0)
def parseArgs(args: Array[String]) = {
args
.sliding(2, 1)
.toList
.collect {
case Array("--batch", filePath: String) if filePath.trim.nonEmpty =>
(BATCH_OPT, filePath)
case arg @ _ =>
throw new IllegalArgumentException(arg.mkString(" "))
}
.toMap
}
def runBatch(batchPath: Path): Unit = {
val batchConfig = BeamConfigUtils.parseFileSubstitutingInputDirectory(batchPath.toFile).resolve()
val baseConfPath = batchConfig.getString("batch.baseConfig")
val baseConf = BeamConfigUtils.parseFileSubstitutingInputDirectory(baseConfPath)
val plans = batchConfig.getConfigList("batch.plans")
for (plan <- plans.asScala) {
val conf = plan.withFallback(baseConf).resolve()
runBeamWithConfig(conf)
}
}
}
| colinsheppard/beam | src/main/scala/beam/sim/RunBatch.scala | Scala | gpl-3.0 | 1,437 |
class TC
given tc: TC()
class Foo(using TC) {
println("hi")
}
object Test extends App {
new Foo
new Foo(using tc)
new Foo()
new Foo()(using tc)
Foo()
Foo()(using tc)
} | dotty-staging/dotty | tests/run/i2567.scala | Scala | apache-2.0 | 184 |
package com.houseofmoran.selfies.faces
object HorizontalFacePresence {
def fromFaces(faces: Seq[DetectedFaceInContext]) : HorizontalFacePresence = {
val segmented = faces.groupBy(face => face.toHorizontalSegment)
HorizontalFacePresence(
segmented.get(TopHorizontal),
segmented.get(MiddleHorizontal),
segmented.get(BottomHorizontal))
}
}
case class HorizontalFacePresence(top: Option[Seq[DetectedFaceInContext]],
middle: Option[Seq[DetectedFaceInContext]],
bottom: Option[Seq[DetectedFaceInContext]]) | mikemoraned/selfies | src/main/scala/com/houseofmoran/selfies/faces/HorizontalFacePresence.scala | Scala | mit | 599 |
package opennlp.scalabha.classify
package object perceptron {
type WeightVector = Array[Double]
}
| eponvert/Scalabha | src/main/scala/opennlp/scalabha/classify/perceptron/package.scala | Scala | apache-2.0 | 102 |
/*
* Operations that require integral operands.
*/
package see.operations
import see.BinIntOp
import see.Scope
import see.Unary
import see.values.Bool
import see.values.IntLike
import see.values.Val
private[see] object UnaryInv extends Unary("~") {
override def apply(s: Scope, v: Val): Val = v match {
case Bool(x) => Bool(!x)
case x: IntLike => x.~
case _ => super.apply(s, v)
}
}
private[see] object BitAnd extends BinIntOp("&") (_ & _)
private[see] object BitOr extends BinIntOp("|") (_ | _)
private[see] object BitXor extends BinIntOp("^") (_ ^ _)
private[see] object BitRshift extends BinIntOp(">>") (_ >> _)
private[see] object BitLshift extends BinIntOp("<<") (_ << _)
| RayRacine/scee | src/main/scala/see/operations/IntOps.scala | Scala | bsd-3-clause | 702 |
package com.gigaspaces.sbp
import Global.Implicits._
import scala.language.postfixOps
// this class is excessive, can be substituted by main method in Rebalance
@deprecated("code was copy-pasted in from https://github.com/jasonnerothin/gs-rebalance and is hanging around only for reference purposes.")
object RebalanceRunner {
def main(args: Array[String]) {
println("REBALANCE STARTED")
val main = new Rebalance(new CheckGridStatus)
main.check()
while (main.unbalanced()) {
val rebalanceOps = main.rebalanceOperations()
val puMoveOps = main.puMoveOperations()
main.canSafelyRebalance(rebalanceOps, puMoveOps) match {
case true => main.rebalance(rebalanceOps, puMoveOps)
case false => main.cannotRebalance()
}
main.timeout()
main.check()
}
println(main.successfulRebalanceMessage)
}
}
| jasonnerothin/gs-cheater | src/main/scala/com/gigaspaces/sbp/RebalanceRunner.scala | Scala | apache-2.0 | 872 |
package org.backuity.clist.util
trait ReadMultiple[A] { self =>
def reads(many: Seq[String]): A
def map[B](f: A => B): ReadMultiple[B] = new ReadMultiple[B] {
override def reads(many: Seq[String]): B = {
f(self.reads(many))
}
}
}
object ReadMultiple {
implicit def seqReadMultiple[T: Read]: ReadMultiple[Seq[T]] = new ReadMultiple[Seq[T]] {
override def reads(many: Seq[String]): Seq[T] = {
many.map(implicitly[Read[T]].reads)
}
}
implicit def listReadMultiple[T: Read]: ReadMultiple[List[T]] = seqReadMultiple[T].map(_.toList)
implicit def setReadMultiple[T: Read]: ReadMultiple[Set[T]] = seqReadMultiple[T].map(_.toSet)
implicit def mapReadMultiple[K: Read, V: Read]: ReadMultiple[Map[K,V]] = seqReadMultiple[(K,V)].map(_.toMap)
}
| backuity/clist | core/src/main/scala/org/backuity/clist/util/ReadMultiple.scala | Scala | apache-2.0 | 783 |
/*
* Copyright (c) 2012 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
import shapeless._
/*
* Examples of Scrap Your Boilerplate in action
*
* @author Miles Sabin
*/
object SybClassExamples {
// Example taken from the original SYB paper:
// "Scrap your boilerplate: a practical approach to generic programming", Ralf Laemmel, Simon Peyton Jones
// http://research.microsoft.com/en-us/um/people/simonpj/papers/hmap/
case class Company(depts : List[Dept])
sealed trait Subunit
case class Dept(name : Name, manager : Manager, subunits : List[Subunit]) extends Subunit
case class Employee(person : Person, salary : Salary) extends Subunit
case class Person(name : Name, address : Address)
case class Salary(salary : Int)
type Manager = Employee
type Name = String
type Address = String
object raise extends Poly1 {
implicit def caseInt = at[Int](_*110/100)
}
def paradise : Unit = {
val beforeRaise =
Company(
List(
Dept("Research",
Employee(Person("Ralf", "Amsterdam"), Salary(8000)),
List(
Employee(Person("Joost", "Amsterdam"), Salary(1000)),
Employee(Person("Marlow", "Cambridge"), Salary(2000))
)
),
Dept("Strategy",
Employee(Person("Blair", "London"), Salary(100000)),
List()
)
)
)
// Compute a new company structure with all salaries increased by 10%
val afterRaise = everywhere(raise)(beforeRaise)
println(afterRaise)
val expected =
Company(
List(
Dept("Research",
Employee(Person("Ralf", "Amsterdam"), Salary(8800)),
List(
Employee(Person("Joost", "Amsterdam"), Salary(1100)),
Employee(Person("Marlow", "Cambridge"),Salary(2200))
)
),
Dept("Strategy",
Employee(Person("Blair", "London"),Salary(110000)),
List()
)
)
)
assert(afterRaise == expected)
}
sealed trait Tree[T]
case class Leaf[T](t: T) extends Tree[T]
case class Node[T](left: Tree[T], right: Tree[T]) extends Tree[T]
object inc extends Poly1 {
implicit def caseInt = at[Int](_+1)
}
def recursion : Unit = {
val tree: Tree[Int] =
Node(
Node(
Node(
Leaf(1),
Node(
Leaf(2),
Leaf(3)
)
),
Leaf(4)
),
Node(
Leaf(5),
Leaf(6)
)
)
val result = everywhere(inc)(tree)
println(result)
val expected: Tree[Int] =
Node(
Node(
Node(
Leaf(2),
Node(
Leaf(3),
Leaf(4)
)
),
Leaf(5)
),
Node(
Leaf(6),
Leaf(7)
)
)
assert(expected == result)
}
def main(args: Array[String]) {
paradise
recursion
}
}
| non/shapeless | examples/src/main/scala/shapeless/examples/sybclass.scala | Scala | apache-2.0 | 3,585 |
package org.dele.text.maen.matchers
import org.dele.text.maen.TestHelper._
import SubMatchCheckerLib._
import org.scalatest.ShouldMatchers
import org.scalatest.testng.TestNGSuite
import org.testng.annotations.Test
/**
* Created by jiaji on 2016-02-10.
*/
class DepMapTest extends TestNGSuite with ShouldMatchers {
import org.dele.text.maen.AtomPropMatcherLib._
import TMatcher._
val idWord = "atom-matcher-f-word"
val idPhrase = "atom-matcher-f-phrase"
val idSentence = "atom-matcher-f-sentence"
val idGroup = "atom-matchers"
val idGroup2 = "atom-matchers2"
implicit val smlib = EmptySubMatchCheckerLib
@Test
def t1 = {
val matchers = Array(
fromAtomMatcher(FExact("word"), EmptyCheckerIds, Option(idWord)),
fromAtomMatcher(FExact("phrase"), EmptyCheckerIds, Option(idPhrase))
)
val compMatcher = matchersOR(idGroup, matchers)
val matchers2 = Array(
fromAtomMatcher(FExact("word"), EmptyCheckerIds, Option(idWord)),
fromAtomMatcher(FExact("sentence"), EmptyCheckerIds, Option(idSentence))
)
val compMatcher2 = matchersOR(idGroup2, matchers2)
val depMap = DepMap.create
depMap += compMatcher
depMap += compMatcher2
depMap.getMatcherIdsDepOn(idWord) shouldBe(Set(idGroup, idGroup2))
depMap.getMatcherIdsDepOn(idPhrase) shouldBe(Set(idGroup))
depMap.getMatcherIdsDepOn(idSentence) shouldBe(Set(idGroup2))
}
import DepMap._
@Test
def testFindCircles = {
val deps = Map(
"list1" -> Set("list11", "list12"),
"list11" -> Set("list111", "list112"),
"list112" -> Set("list1", "list1121")
)
val circle1 = IndexedSeq("list1", "list11", "list112", "list1")
val c = findDepCircles("list1", deps)
c shouldBe Set(circle1)
val deps1 = Map(
"list1" -> Set("list11", "list12"),
"list2" -> Set("list21", "list112"),
"list11" -> Set("list111", "list112"),
"list112" -> Set("list1", "list1121", "list1122"),
"list1122" -> Set("list11221", "list11222"),
"list11221" -> Set("list112", "list112211")
)
val circle2 = IndexedSeq("list112", "list1122", "list11221", "list112")
val c1 = findDepCircles("list1", deps1)
c1 shouldBe Set(circle1, circle2)
val all = computeAllCircles(deps1)
//val merged = mergeCircles(all)
all shouldBe Set(circle1.toSet ++ circle2)
}
}
| new2scala/text-util | maen/src/test/scala/org/dele/text/maen/matchers/DepMapTest.scala | Scala | apache-2.0 | 2,364 |
package master
import Import.FileImport.FileImporter.{BrokenLines, ResponseResult}
import Import.Types._
import Import.{FinalPlayer, ImportManager}
import Import.ImportOptimizer.ImportOptimizer.FinalPlayers
import RestConnection.TeamRequest
import TeamBuilding.Evaluation.TeamEvaluator.FinalTeamsLinesWithValue
import TeamBuilding.TeamBuildingMaster
import akka.actor.{Actor, ActorRef, PoisonPill, Props}
import master.UeberActor._
import scala.collection.immutable.TreeMap
/**
* Created by yannick on 16.02.16.
*/
object UeberActor {
val name = "ueber-actor"
def props = Props(new UeberActor)
case class ResultPaths(teamsPath: String = "", plotPath: String = "") extends ResponseResult
case class ResultTeamsAndEval(teams: List[String], eval: Seq[(Int,Int)])
case class BestTeamComboAndEval(finalTeamsWithValue: FinalTeamsLinesWithValue, eval: TreeMap[Int,Int])
case class FinalPlayersWithTeamNumber(players: List[FinalPlayer], meanVals: ValueVector, teamCount: Int)
case class WeightVector(weightVector: ValueVector)
}
class UeberActor extends Actor {
val importManager = context.actorOf(ImportManager.props(self), ImportManager.name)
val teamBuldingMaster = context.actorOf(TeamBuildingMaster.props(self), TeamBuildingMaster.name)
var realSender: ActorRef = null
val startTime = System.currentTimeMillis()
var resultPaths = ResultPaths()
var numberOfTeams = 0
def receive: Receive = {
case t @ TeamRequest(_,count, weightVector) => importManager ! t
numberOfTeams = count
realSender = sender
teamBuldingMaster ! WeightVector(weightVector)
println("Sent to Importmanager")
case FinalPlayers(players,meanVals) =>
teamBuldingMaster ! FinalPlayersWithTeamNumber(players, meanVals, numberOfTeams)
println("Sent to TeamBuildingMaster")
case ft @ BestTeamComboAndEval(finalTeams,eval) =>
realSender ! ResultTeamsAndEval(finalTeams.teams.toLines, eval.toSeq)
self ! PoisonPill
println("Time after calculations: " + (System.currentTimeMillis() - startTime) / 1000 + " sec.")
case br @ BrokenLines(brokenList) =>
realSender ! br
self ! PoisonPill
}
def checkIfResult = {
if(resultPaths.plotPath != "" && resultPaths.teamsPath != ""){
realSender ! resultPaths
println("Finished!")
self ! PoisonPill
}
}
} | yannick-cw/tournament_planer | hatplaner/src/main/scala/master/UeberActor.scala | Scala | mit | 2,350 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.common
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
class OctalTest extends FlatSpec with MustMatchers {
"Octal parser" must "parse valid strings" in new {
Octal("777") must be (511)
}
it must "support leading zeroes" in new {
Octal("022") must be (18)
}
it must "throw on invalid numbers" in new {
evaluating(Octal("19")) must produce [NumberFormatException]
}
}
| telefonicaid/fiware-cosmos-platform | common/src/test/scala/es/tid/cosmos/common/OctalTest.scala | Scala | apache-2.0 | 1,083 |
package io.circe
import cats.data.{
Chain,
NonEmptyChain,
NonEmptyList,
NonEmptyMap,
NonEmptySet,
NonEmptyStream,
NonEmptyVector,
Validated
}
import cats.instances.all._
import cats.kernel.Eq
import cats.laws.discipline.arbitrary._
import cats.syntax.contravariant._
import cats.syntax.invariant._
import cats.syntax.eq._
import io.circe.testing.CodecTests
import io.circe.tests.CirceSuite
import io.circe.tests.examples.{ Foo, Wub }
import java.util.UUID
import org.scalacheck.{ Arbitrary, Gen }
import scala.collection.immutable.SortedMap
import scala.collection.mutable.HashMap
trait SpecialEqForFloatAndDouble {
/**
* We provide a special [[cats.kernel.Eq]] instance for [[scala.Float]] that does not distinguish
* `NaN` from itself.
*/
val eqFloat: Eq[Float] = Eq.instance[Float] { (a, b) =>
(a.isNaN && b.isNaN) || cats.instances.float.catsKernelStdOrderForFloat.eqv(a, b)
}
/**
* We provide a special [[cats.kernel.Eq]] instance for [[scala.Double]] that does not distinguish
* `NaN` from itself.
*/
val eqDouble: Eq[Double] = Eq.instance[Double] { (a, b) =>
(a.isNaN && b.isNaN) || cats.instances.double.catsKernelStdOrderForDouble.eqv(a, b)
}
}
class AnyValCodecSuite extends CirceSuite with SpecialEqForFloatAndDouble {
checkAll("Codec[Unit]", CodecTests[Unit].codec)
checkAll("Codec[Boolean]", CodecTests[Boolean].codec)
checkAll("Codec[Char]", CodecTests[Char].codec)
checkAll("Codec[Float]", CodecTests[Float].codec(implicitly, implicitly, eqFloat, implicitly, implicitly))
checkAll("Codec[Double]", CodecTests[Double].codec(implicitly, implicitly, eqDouble, implicitly, implicitly))
checkAll("Codec[Byte]", CodecTests[Byte].codec)
checkAll("Codec[Short]", CodecTests[Short].codec)
checkAll("Codec[Int]", CodecTests[Int].codec)
checkAll("Codec[Long]", CodecTests[Long].codec)
}
class JavaBoxedCodecSuite extends CirceSuite with SpecialEqForFloatAndDouble {
import java.{ lang => jl }
import java.{ math => jm }
private def JavaCodecTests[ScalaPrimitive, JavaBoxed](
wrap: ScalaPrimitive => JavaBoxed,
unwrap: JavaBoxed => ScalaPrimitive,
eq: Eq[JavaBoxed] = Eq.fromUniversalEquals[JavaBoxed]
)(implicit scalaArb: Arbitrary[ScalaPrimitive], decoder: Decoder[JavaBoxed], encoder: Encoder[JavaBoxed]) =
CodecTests[JavaBoxed].codec(Arbitrary(scalaArb.arbitrary.map(wrap)), implicitly, eq, implicitly, implicitly)
checkAll("Codec[java.lang.Boolean]", JavaCodecTests[Boolean, jl.Boolean](jl.Boolean.valueOf, _.booleanValue()))
checkAll("Codec[java.lang.Character]", JavaCodecTests[Char, jl.Character](jl.Character.valueOf, _.charValue()))
checkAll(
"Codec[java.lang.Float]",
JavaCodecTests[Float, jl.Float](jl.Float.valueOf, _.floatValue(), eqFloat.contramap(_.floatValue()))
)
checkAll(
"Codec[java.lang.Double]",
JavaCodecTests[Double, jl.Double](jl.Double.valueOf, _.doubleValue(), eqDouble.contramap(_.doubleValue()))
)
checkAll("Codec[java.lang.Byte]", JavaCodecTests[Byte, jl.Byte](jl.Byte.valueOf, _.byteValue()))
checkAll("Codec[java.lang.Short]", JavaCodecTests[Short, jl.Short](jl.Short.valueOf, _.shortValue()))
checkAll("Codec[java.lang.Long]", JavaCodecTests[Long, jl.Long](jl.Long.valueOf, _.longValue()))
checkAll("Codec[java.lang.Integer]", JavaCodecTests[Int, jl.Integer](jl.Integer.valueOf, _.intValue()))
checkAll("Codec[java.math.BigDecimal]", JavaCodecTests[BigDecimal, jm.BigDecimal](_.bigDecimal, BigDecimal.apply))
checkAll("Codec[java.math.BigInteger]", JavaCodecTests[BigInt, jm.BigInteger](_.bigInteger, BigInt.apply))
}
class StdLibCodecSuite extends CirceSuite with ArrayFactoryInstance {
implicit def eqHashMap[Long, Int]: Eq[HashMap[Long, Int]] = Eq.fromUniversalEquals
implicit val arbitraryUUID: Arbitrary[UUID] = Arbitrary(Gen.uuid)
checkAll("Codec[String]", CodecTests[String].codec)
checkAll("Codec[BigInt]", CodecTests[BigInt].codec)
checkAll("Codec[BigDecimal]", CodecTests[BigDecimal].codec)
checkAll("Codec[UUID]", CodecTests[UUID].codec)
checkAll("Codec[Option[Int]]", CodecTests[Option[Int]].codec)
checkAll("Codec[Some[Int]]", CodecTests[Some[Int]].codec)
checkAll("Codec[None.type]", CodecTests[None.type].codec)
checkAll("Codec[List[Int]]", CodecTests[List[Int]].codec)
checkAll("Codec[Seq[Int]]", CodecTests[Seq[Int]].codec)
checkAll("Codec[Map[String, Int]]", CodecTests[Map[String, Int]].codec)
checkAll("Codec[Map[Symbol, Int]]", CodecTests[Map[Symbol, Int]].codec)
checkAll("Codec[Map[UUID, Int]]", CodecTests[Map[UUID, Int]].codec)
checkAll("Codec[Map[Byte, Int]]", CodecTests[Map[Byte, Int]].codec)
checkAll("Codec[Map[Short, Int]]", CodecTests[Map[Short, Int]].codec)
checkAll("Codec[Map[Int, Int]]", CodecTests[Map[Int, Int]].codec)
checkAll("Codec[Map[Long, Int]]", CodecTests[Map[Long, Int]].codec)
checkAll("Codec[HashMap[Long, Int]]", CodecTests[HashMap[Long, Int]].unserializableCodec)
checkAll("Codec[SortedMap[Long, Int]]", CodecTests[SortedMap[Long, Int]].unserializableCodec)
checkAll("Codec[Set[Int]]", CodecTests[Set[Int]].codec)
checkAll("Codec[Array[String]]", CodecTests[Array[String]].codec)
"A tuple encoder" should "return a JSON array" in forAll { (t: (Int, String, Char)) =>
val json = Encoder[(Int, String, Char)].apply(t)
val target = Json.arr(Json.fromInt(t._1), Json.fromString(t._2), Encoder[Char].apply(t._3))
assert(json === target && json.as[(Int, String, Char)] === Right(t))
}
"A tuple decoder" should "fail if not given enough elements" in forAll { (i: Int, s: String) =>
assert(Json.arr(Json.fromInt(i), Json.fromString(s)).as[(Int, String, Double)].isLeft)
}
it should "fail if given too many elements" in forAll { (i: Int, s: String, d: Double) =>
assert(Json.arr(Json.fromInt(i), Json.fromString(s), Json.fromDoubleOrNull(d)).as[(Int, String)].isLeft)
}
"A list decoder" should "not stack overflow with a large number of elements" in {
val size = 10000
val jsonArr = Json.arr(Seq.fill(size)(Json.fromInt(1)): _*)
val maybeList = jsonArr.as[List[Int]]
assert(maybeList.isRight)
val Right(list) = maybeList
assert(list.length == size)
assert(list.forall(_ == 1))
}
it should "stop after first failure" in {
object Bomb {
implicit val decodeBomb: Decoder[Bomb] = Decoder[Int].map {
case 0 => throw new Exception("You shouldn't have tried to decode this")
case i => Bomb(i)
}
}
case class Bomb(i: Int)
val jsonArr = Json.arr(Json.fromInt(1), Json.fromString("foo"), Json.fromInt(0))
val result = jsonArr.as[List[Bomb]]
assert(result.isLeft)
}
}
class CatsCodecSuite extends CirceSuite with StreamFactoryInstance {
checkAll("Codec[Chain[Int]]", CodecTests[Chain[Int]].codec)
checkAll("Codec[NonEmptyList[Int]]", CodecTests[NonEmptyList[Int]].codec)
checkAll("Codec[NonEmptyVector[Int]]", CodecTests[NonEmptyVector[Int]].codec)
checkAll("Codec[NonEmptyStream[Int]]", CodecTests[NonEmptyStream[Int]].codec)
checkAll("Codec[NonEmptySet[Int]]", CodecTests[NonEmptySet[Int]].codec)
checkAll("Codec[NonEmptyMap[Int, String]]", CodecTests[NonEmptyMap[Int, String]].unserializableCodec)
checkAll("Codec[NonEmptyChain[Int]]", CodecTests[NonEmptyChain[Int]].codec)
}
class CirceCodecSuite extends CirceSuite {
checkAll("Codec[Json]", CodecTests[Json].codec)
checkAll("Codec[JsonObject]", CodecTests[JsonObject].codec)
checkAll("Codec[JsonNumber]", CodecTests[JsonNumber].codec)
checkAll("Codec[Foo]", CodecTests[Foo](Foo.decodeFoo, Foo.encodeFoo).codec)
}
class InvariantCodecSuite extends CirceSuite {
val wubCodec = Codec.from(Decoder[Long], Encoder[Long]).imap(Wub(_))(_.x)
val wubCodecE = Codec.from(Decoder[Long], Encoder[Long]).iemap(l => Right(Wub(l)))(_.x)
checkAll("Codec[Wub] via imap", CodecTests[Wub](wubCodec, wubCodec).codec)
checkAll("Codec[Wub] via iemap", CodecTests[Wub](wubCodecE, wubCodecE).codec)
}
class EitherCodecSuite extends CirceSuite {
val decoder = Decoder.decodeEither[Int, String]("L", "R")
val encoder = Encoder.encodeEither[Int, String]("L", "R")
val codec = Codec.codecForEither[Int, String]("L", "R")
checkAll("Codec[Either[Int, String]]", CodecTests[Either[Int, String]](decoder, encoder).codec)
checkAll("Codec[Either[Int, String]] via Codec", CodecTests[Either[Int, String]](codec, codec).codec)
checkAll("Codec[Either[Int, String]] via Decoder and Codec", CodecTests[Either[Int, String]](decoder, codec).codec)
checkAll("Codec[Either[Int, String]] via Encoder and Codec", CodecTests[Either[Int, String]](codec, encoder).codec)
}
class ValidatedCodecSuite extends CirceSuite {
val decoder = Decoder.decodeValidated[Int, String]("E", "A")
val encoder = Encoder.encodeValidated[Int, String]("E", "A")
val codec = Codec.codecForValidated[Int, String]("E", "A")
checkAll("Codec[Validated[Int, String]]", CodecTests[Validated[Int, String]](decoder, encoder).codec)
checkAll("Codec[Validated[Int, String]] via Codec", CodecTests[Validated[Int, String]](codec, codec).codec)
checkAll(
"Codec[Validated[Int, String]] via Decoder and Codec",
CodecTests[Validated[Int, String]](decoder, codec).codec
)
checkAll(
"Codec[Validated[Int, String]] via Encoder and Codec",
CodecTests[Validated[Int, String]](codec, encoder).codec
)
}
class DisjunctionCodecSuite extends CirceSuite {
import disjunctionCodecs._
checkAll("Codec[Either[Int, String]]", CodecTests[Either[Int, String]].codec)
checkAll("Codec[Validated[String, Int]]", CodecTests[Validated[String, Int]].codec)
}
class DecodingFailureSuite extends CirceSuite {
val n = Json.fromInt(10)
val b = Json.True
val s = Json.fromString("foo")
val l = Json.arr(s)
val o = Json.obj("foo" -> n)
val nd = Decoder[Int]
val bd = Decoder[Boolean]
val sd = Decoder[String]
val ld = Decoder[List[String]]
val od = Decoder[Map[String, Int]]
"A JSON number" should "not be decoded as a non-numeric type" in {
assert(List(bd, sd, ld, od).forall(d => d.decodeJson(n).isLeft))
}
"A JSON boolean" should "not be decoded as a non-boolean type" in {
assert(List(nd, sd, ld, od).forall(d => d.decodeJson(b).isLeft))
}
"A JSON string" should "not be decoded as a non-string type" in {
assert(List(nd, bd, ld, od).forall(d => d.decodeJson(s).isLeft))
}
"A JSON array" should "not be decoded as an inappropriate type" in {
assert(List(nd, bd, sd, od).forall(d => d.decodeJson(l).isLeft))
}
"A JSON object" should "not be decoded as an inappropriate type" in {
assert(List(nd, bd, sd, ld).forall(d => d.decodeJson(o).isLeft))
}
}
| travisbrown/circe | modules/tests/shared/src/test/scala/io/circe/CodecSuites.scala | Scala | apache-2.0 | 10,645 |
package com.thetestpeople.trt.json
import com.thetestpeople.trt.model.Id
import com.thetestpeople.trt.model.Test
import com.thetestpeople.trt.model.TestStatus
/**
* View of a test which we expose via the JSON API
*/
case class TestApiView(
id: Id[Test],
name: String,
groupOpt: Option[String],
statusOpt: Option[TestStatus],
ignored: Boolean) | thetestpeople/trt | app/com/thetestpeople/trt/json/TestApiView.scala | Scala | mit | 366 |
package idv.brianhsu.maidroid.plurk.view
import idv.brianhsu.maidroid.plurk.adapter._
import org.bone.soplurk.constant.Qualifier
import android.widget.Spinner
import android.content.Context
import android.util.AttributeSet
class QualifierSpinner(context: Context, attrs: AttributeSet) extends Spinner(context, attrs) {
this.setAdapter(new QualifierSpinnerAdapter(context))
this.setSelection(17)
def getSelectedQualifier = this.getSelectedItem.asInstanceOf[Qualifier]
}
| brianhsu/MaidroidPlurk | src/main/scala/view/QualifierSpinner.scala | Scala | gpl-3.0 | 480 |
package com.avsystem.commons
package redis.util
import com.avsystem.commons.collection.CrossBuilder
class FoldingBuilder[A, B](zero: B, fun: (B, A) => B) extends CrossBuilder[A, B] {
private[this] var res = zero
def addOne(elem: A): this.type = {
res = fun(res, elem)
this
}
def clear(): Unit = res = zero
def result(): B = res
}
object UnitBuilder extends CrossBuilder[Any, Unit] {
def addOne(elem: Any): this.type = this
def clear(): Unit = ()
def result(): Unit = ()
}
| AVSystem/scala-commons | commons-redis/src/main/scala/com/avsystem/commons/redis/util/FoldingBuilder.scala | Scala | mit | 499 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.lang.reflect.Modifier
import scala.reflect.{classTag, ClassTag}
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions.{BoundReference, Cast}
import org.apache.spark.sql.catalyst.expressions.objects.{DecodeUsingSerializer, EncodeUsingSerializer}
import org.apache.spark.sql.types._
/**
* Methods for creating an [[Encoder]].
*
* @since 1.6.0
*/
object Encoders {
/**
* An encoder for nullable boolean type.
* The Scala primitive encoder is available as [[scalaBoolean]].
* @since 1.6.0
*/
def BOOLEAN: Encoder[java.lang.Boolean] = ExpressionEncoder()
/**
* An encoder for nullable byte type.
* The Scala primitive encoder is available as [[scalaByte]].
* @since 1.6.0
*/
def BYTE: Encoder[java.lang.Byte] = ExpressionEncoder()
/**
* An encoder for nullable short type.
* The Scala primitive encoder is available as [[scalaShort]].
* @since 1.6.0
*/
def SHORT: Encoder[java.lang.Short] = ExpressionEncoder()
/**
* An encoder for nullable int type.
* The Scala primitive encoder is available as [[scalaInt]].
* @since 1.6.0
*/
def INT: Encoder[java.lang.Integer] = ExpressionEncoder()
/**
* An encoder for nullable long type.
* The Scala primitive encoder is available as [[scalaLong]].
* @since 1.6.0
*/
def LONG: Encoder[java.lang.Long] = ExpressionEncoder()
/**
* An encoder for nullable float type.
* The Scala primitive encoder is available as [[scalaFloat]].
* @since 1.6.0
*/
def FLOAT: Encoder[java.lang.Float] = ExpressionEncoder()
/**
* An encoder for nullable double type.
* The Scala primitive encoder is available as [[scalaDouble]].
* @since 1.6.0
*/
def DOUBLE: Encoder[java.lang.Double] = ExpressionEncoder()
/**
* An encoder for nullable string type.
*
* @since 1.6.0
*/
def STRING: Encoder[java.lang.String] = ExpressionEncoder()
/**
* An encoder for nullable decimal type.
*
* @since 1.6.0
*/
def DECIMAL: Encoder[java.math.BigDecimal] = ExpressionEncoder()
/**
* An encoder for nullable date type.
*
* @since 1.6.0
*/
def DATE: Encoder[java.sql.Date] = ExpressionEncoder()
/**
* Creates an encoder that serializes instances of the `java.time.LocalDate` class
* to the internal representation of nullable Catalyst's DateType.
*
* @since 3.0.0
*/
def LOCALDATE: Encoder[java.time.LocalDate] = ExpressionEncoder()
/**
* An encoder for nullable timestamp type.
*
* @since 1.6.0
*/
def TIMESTAMP: Encoder[java.sql.Timestamp] = ExpressionEncoder()
/**
* Creates an encoder that serializes instances of the `java.time.Instant` class
* to the internal representation of nullable Catalyst's TimestampType.
*
* @since 3.0.0
*/
def INSTANT: Encoder[java.time.Instant] = ExpressionEncoder()
/**
* An encoder for arrays of bytes.
*
* @since 1.6.1
*/
def BINARY: Encoder[Array[Byte]] = ExpressionEncoder()
/**
* Creates an encoder for Java Bean of type T.
*
* T must be publicly accessible.
*
* supported types for java bean field:
* - primitive types: boolean, int, double, etc.
* - boxed types: Boolean, Integer, Double, etc.
* - String
* - java.math.BigDecimal, java.math.BigInteger
* - time related: java.sql.Date, java.sql.Timestamp, java.time.LocalDate, java.time.Instant
* - collection types: only array and java.util.List currently, map support is in progress
* - nested java bean.
*
* @since 1.6.0
*/
def bean[T](beanClass: Class[T]): Encoder[T] = ExpressionEncoder.javaBean(beanClass)
/**
* (Scala-specific) Creates an encoder that serializes objects of type T using Kryo.
* This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def kryo[T: ClassTag]: Encoder[T] = genericSerializer(useKryo = true)
/**
* Creates an encoder that serializes objects of type T using Kryo.
* This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def kryo[T](clazz: Class[T]): Encoder[T] = kryo(ClassTag[T](clazz))
/**
* (Scala-specific) Creates an encoder that serializes objects of type T using generic Java
* serialization. This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @note This is extremely inefficient and should only be used as the last resort.
*
* @since 1.6.0
*/
def javaSerialization[T: ClassTag]: Encoder[T] = genericSerializer(useKryo = false)
/**
* Creates an encoder that serializes objects of type T using generic Java serialization.
* This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @note This is extremely inefficient and should only be used as the last resort.
*
* @since 1.6.0
*/
def javaSerialization[T](clazz: Class[T]): Encoder[T] = javaSerialization(ClassTag[T](clazz))
/** Throws an exception if T is not a public class. */
private def validatePublicClass[T: ClassTag](): Unit = {
if (!Modifier.isPublic(classTag[T].runtimeClass.getModifiers)) {
throw new UnsupportedOperationException(
s"${classTag[T].runtimeClass.getName} is not a public class. " +
"Only public classes are supported.")
}
}
/** A way to construct encoders using generic serializers. */
private def genericSerializer[T: ClassTag](useKryo: Boolean): Encoder[T] = {
if (classTag[T].runtimeClass.isPrimitive) {
throw new UnsupportedOperationException("Primitive types are not supported.")
}
validatePublicClass[T]()
ExpressionEncoder[T](
objSerializer =
EncodeUsingSerializer(
BoundReference(0, ObjectType(classOf[AnyRef]), nullable = true), kryo = useKryo),
objDeserializer =
DecodeUsingSerializer[T](
Cast(GetColumnByOrdinal(0, BinaryType), BinaryType),
classTag[T],
kryo = useKryo),
clsTag = classTag[T]
)
}
/**
* An encoder for 2-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2](
e1: Encoder[T1],
e2: Encoder[T2]): Encoder[(T1, T2)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2))
}
/**
* An encoder for 3-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3]): Encoder[(T1, T2, T3)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2), encoderFor(e3))
}
/**
* An encoder for 4-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3, T4](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3],
e4: Encoder[T4]): Encoder[(T1, T2, T3, T4)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2), encoderFor(e3), encoderFor(e4))
}
/**
* An encoder for 5-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3, T4, T5](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3],
e4: Encoder[T4],
e5: Encoder[T5]): Encoder[(T1, T2, T3, T4, T5)] = {
ExpressionEncoder.tuple(
encoderFor(e1), encoderFor(e2), encoderFor(e3), encoderFor(e4), encoderFor(e5))
}
/**
* An encoder for Scala's product type (tuples, case classes, etc).
* @since 2.0.0
*/
def product[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
/**
* An encoder for Scala's primitive int type.
* @since 2.0.0
*/
def scalaInt: Encoder[Int] = ExpressionEncoder()
/**
* An encoder for Scala's primitive long type.
* @since 2.0.0
*/
def scalaLong: Encoder[Long] = ExpressionEncoder()
/**
* An encoder for Scala's primitive double type.
* @since 2.0.0
*/
def scalaDouble: Encoder[Double] = ExpressionEncoder()
/**
* An encoder for Scala's primitive float type.
* @since 2.0.0
*/
def scalaFloat: Encoder[Float] = ExpressionEncoder()
/**
* An encoder for Scala's primitive byte type.
* @since 2.0.0
*/
def scalaByte: Encoder[Byte] = ExpressionEncoder()
/**
* An encoder for Scala's primitive short type.
* @since 2.0.0
*/
def scalaShort: Encoder[Short] = ExpressionEncoder()
/**
* An encoder for Scala's primitive boolean type.
* @since 2.0.0
*/
def scalaBoolean: Encoder[Boolean] = ExpressionEncoder()
}
| pgandhi999/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/Encoders.scala | Scala | apache-2.0 | 9,406 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import kafka.utils.Json
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.SecurityUtils
object Acl {
val WildCardPrincipal: KafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "*")
val WildCardHost: String = "*"
val AllowAllAcl = new Acl(WildCardPrincipal, Allow, WildCardHost, All)
val PrincipalKey = "principal"
val PermissionTypeKey = "permissionType"
val OperationKey = "operation"
val HostsKey = "host"
val VersionKey = "version"
val CurrentVersion = 1
val AclsKey = "acls"
/**
*
* @param bytes of acls json string
*
* <p>
{
"version": 1,
"acls": [
{
"host":"host1",
"permissionType": "Deny",
"operation": "Read",
"principal": "User:alice"
}
]
}
* </p>
*
* @return
*/
def fromBytes(bytes: Array[Byte]): Set[Acl] = {
if (bytes == null || bytes.isEmpty)
return collection.immutable.Set.empty[Acl]
Json.parseBytes(bytes).map(_.asJsonObject).map { js =>
//the acl json version.
require(js(VersionKey).to[Int] == CurrentVersion)
js(AclsKey).asJsonArray.iterator.map(_.asJsonObject).map { itemJs =>
val principal = SecurityUtils.parseKafkaPrincipal(itemJs(PrincipalKey).to[String])
val permissionType = PermissionType.fromString(itemJs(PermissionTypeKey).to[String])
val host = itemJs(HostsKey).to[String]
val operation = Operation.fromString(itemJs(OperationKey).to[String])
new Acl(principal, permissionType, host, operation)
}.toSet
}.getOrElse(Set.empty)
}
def toJsonCompatibleMap(acls: Set[Acl]): Map[String, Any] = {
Map(Acl.VersionKey -> Acl.CurrentVersion, Acl.AclsKey -> acls.map(acl => acl.toMap).toList)
}
}
/**
* An instance of this class will represent an acl that can express following statement.
* <pre>
* Principal P has permissionType PT on Operation O1 from hosts H1.
* </pre>
* @param principal A value of *:* indicates all users.
* @param permissionType
* @param host A value of * indicates all hosts.
* @param operation A value of ALL indicates all operations.
*/
case class Acl(principal: KafkaPrincipal, permissionType: PermissionType, host: String, operation: Operation) {
/**
* TODO: Ideally we would have a symmetric toJson method but our current json library can not jsonify/dejsonify complex objects.
* @return Map representation of the Acl.
*/
def toMap(): Map[String, Any] = {
Map(Acl.PrincipalKey -> principal.toString,
Acl.PermissionTypeKey -> permissionType.name,
Acl.OperationKey -> operation.name,
Acl.HostsKey -> host)
}
override def toString: String = {
"%s has %s permission for operations: %s from hosts: %s".format(principal, permissionType.name, operation, host)
}
}
| themarkypantz/kafka | core/src/main/scala/kafka/security/auth/Acl.scala | Scala | apache-2.0 | 3,703 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.parrot.config
import com.twitter.conversions.time._
import com.twitter.logging.{Logger, LoggerFactory}
import com.twitter.ostrich.admin._
import com.twitter.parrot.feeder.{LogSource, LogSourceImpl, ParrotFeeder}
import com.twitter.util.{Duration, Config}
import java.util.concurrent.TimeUnit
trait ParrotFeederConfig extends Config[RuntimeEnvironment => Service]
with ParrotCommonConfig
{
var batchSize = 1000
var duration = Duration(0, TimeUnit.MILLISECONDS)
var httpPort = 9993
var inputLog: String = ""
var jobName = "parrot"
var linesToSkip = 0
var maxRequests = Integer.MAX_VALUE
var numInstances = 1
var numThreads = 0
var pollInterval = Duration(1, TimeUnit.SECONDS)
var requestRate = 1
var reuseFile = false
var statsName = "parrot-feeder"
// runtime configured
var logSource: Option[LogSource] = None
def apply() = { (runtime: RuntimeEnvironment) =>
val adminPort = runtime.arguments.get("httpPort").map(_.toInt).getOrElse(httpPort)
Logger.configure(loggers)
inputLog = runtime.arguments.getOrElse("log", inputLog)
var admin = AdminServiceFactory(
adminPort,
statsNodes = StatsFactory(
reporters = JsonStatsLoggerFactory(
period = 1.minute,
serviceName = statsName
) :: TimeSeriesCollectorFactory()
)
)(runtime)
val feeder = new ParrotFeeder(this)
ServiceTracker.register(feeder)
if (this.logSource == None) {
this.logSource = Some(new LogSourceImpl(this.inputLog))
}
feeder
}
}
| twitter/iago | src/main/scala/com/twitter/parrot/config/ParrotFeederConfig.scala | Scala | apache-2.0 | 2,109 |
package com.twitter.inject.thrift.integration.inheritance
import com.google.inject.Module
import com.twitter.inject.exceptions.PossiblyRetryable
import com.twitter.inject.thrift.ThriftMethodBuilderFactory
import com.twitter.inject.thrift.integration.filters.MethodLoggingTypeAgnosticFilter
import com.twitter.inject.thrift.modules.{ThriftClientIdModule, ThriftMethodBuilderClientModule}
import com.twitter.inject.Injector
import com.twitter.serviceA.thriftscala.ServiceA
import com.twitter.serviceB.thriftscala.ServiceB
object ServiceBThriftMethodBuilderClientModule
extends ThriftMethodBuilderClientModule[
ServiceB.ServicePerEndpoint,
ServiceB.MethodPerEndpoint
] {
override val modules: Seq[Module] = Seq(ThriftClientIdModule)
override val dest = "flag!serviceB-thrift-service"
override val label = "serviceB-thrift-client"
override protected def configureServicePerEndpoint(
injector: Injector,
builder: ThriftMethodBuilderFactory[ServiceB.ServicePerEndpoint],
servicePerEndpoint: ServiceB.ServicePerEndpoint
): ServiceB.ServicePerEndpoint = {
servicePerEndpoint
.withEcho(
builder
.method[ServiceA.Echo.Args, ServiceA.Echo.SuccessType](ServiceA.Echo)
.withRetryForClassifier(PossiblyRetryable.ResponseClassifier)
.withAgnosticFilter(new MethodLoggingTypeAgnosticFilter())
.filtered[EchoFilter]
.service)
.withPing(
builder
.method(ServiceB.Ping)
.filtered(new PingFilter)
.nonIdempotent
.withRetryDisabled
.service)
}
}
| twitter/finatra | inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/inheritance/ServiceBThriftMethodBuilderClientModule.scala | Scala | apache-2.0 | 1,606 |
package se.uu.farmbio.vs
import scala.io.Source
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import openeye.oedocking.OEDockMethod
import org.apache.spark.storage.StorageLevel
trait PoseTransforms {
def collapse(bestN: Int): SBVSPipeline with PoseTransforms
def sortByScore: SBVSPipeline with PoseTransforms
def repartition: SBVSPipeline with PoseTransforms
def getTopPoses(topN: Int): Array[String]
}
private[vs] object PosePipeline extends Logging {
private[vs] def parseId(pose: String) = {
Source.fromString(pose).getLines.next
}
private[vs] def parseIdAndScore(method: Int)(pose: String) = {
var score: Double = Double.MinValue
val id: String = parseId(pose)
//Sometimes OEChem produce molecules with empty score or malformed molecules
//We use try catch block for those exceptions
try {
val methodString: String = method match {
case OEDockMethod.Chemgauss4 => "Chemgauss4"
case OEDockMethod.Chemgauss3 => "Chemgauss3"
case OEDockMethod.Shapegauss => "Shapegauss"
case OEDockMethod.Chemscore => "Chemscore"
case OEDockMethod.Hybrid => "Hybrid"
case OEDockMethod.Hybrid1 => "Hybrid1"
case OEDockMethod.Hybrid2 => "Hybrid2"
case OEDockMethod.PLP => "PLP"
}
var res: String = null
val it = SBVSPipeline.CDKInit(pose)
if (it.hasNext()) {
val mol = it.next
res = mol.getProperty(methodString)
}
if (res != "340282346638528859811704183484516925440.000000")
{score = res.toDouble}
score
} catch {
case exec: Exception => logWarning("JOB_INFO: Setting the score to Double.MinValue." +
"It was not possible to parse the score of the following molecule due to \\n" + exec +
"\\n" + exec.getStackTraceString + "\\nPose:\\n" + pose)
}
(id, score)
}
private[vs] def parseScore(method: Int)(pose: String) = {
var result: Double = Double.MinValue
//Sometimes OEChem produce molecules with empty score or malformed molecules
//We use try catch block for those exceptions
try {
val methodString: String = method match {
case OEDockMethod.Chemgauss4 => "Chemgauss4"
case OEDockMethod.Chemgauss3 => "Chemgauss3"
case OEDockMethod.Shapegauss => "Shapegauss"
case OEDockMethod.Chemscore => "Chemscore"
case OEDockMethod.Hybrid => "Hybrid"
case OEDockMethod.Hybrid1 => "Hybrid1"
case OEDockMethod.Hybrid2 => "Hybrid2"
case OEDockMethod.PLP => "PLP"
}
var res: String = null
val it = SBVSPipeline.CDKInit(pose)
if (it.hasNext()) {
val mol = it.next
res = mol.getProperty(methodString)
}
if (res != "340282346638528859811704183484516925440.000000")
{result = res.toDouble}
result
} catch {
case exec: Exception => logWarning("JOB_INFO: Setting the score to Double.MinValue." +
" It was not possible to parse the score of the following molecule due to \\n" + exec +
"\\n" + exec.getStackTraceString + "\\nPose:\\n" + pose)
}
result
}
@deprecated("parent Method collapse deprecated", "Sep 29, 2016")
private def collapsePoses(bestN: Int, parseScore: String => Double) = (record: (String, Iterable[String])) => {
record._2.toList.sortBy(parseScore).reverse.take(bestN)
}
}
private[vs] class PosePipeline(override val rdd: RDD[String], val scoreMethod: Int) extends SBVSPipeline(rdd)
with PoseTransforms {
// Need a local copy due to serialization error
// http://spark-summit.org/wp-content/uploads/2013/10/McDonough-spark-tutorial_spark-summit-2013.pptx
val methodBroadcast = rdd.sparkContext.broadcast(scoreMethod)
override def getTopPoses(topN: Int) = {
val cachedRDD = rdd.cache()
val methodBroadcastLocal = methodBroadcast
val method = methodBroadcastLocal.value
//Parsing id and Score in parallel and collecting data to driver
val idAndScore = cachedRDD.map {
case (mol) => PosePipeline.parseIdAndScore(method)(mol)
}.collect()
//Finding Distinct top id and score in serial at driver
val topMols =
idAndScore.foldLeft(Map[String, Double]() withDefaultValue Double.MinValue) {
case (m, (id, score)) => m updated (id, score max m(id))
}
.toSeq
.sortBy { case (id, score) => -score }
.take(topN).toArray
//Broadcasting the top id and score and search main rdd
//for top molecules in parallel
val topMolsBroadcast = cachedRDD.sparkContext.broadcast(topMols)
val topPoses = cachedRDD.filter { mol =>
val idAndScore = PosePipeline.parseIdAndScore(method)(mol)
topMolsBroadcast.value
.map(topHit => topHit == idAndScore)
.reduce(_ || _)
}
//filtering out duplicate top mols by id
val duplicateRemovedTopPoses = topPoses.map { mol =>
(PosePipeline.parseId(mol),mol)
}.reduceByKey((id, mol) => id).map{ case (id, mol) => mol }
//return statement
duplicateRemovedTopPoses.collect
.sortBy {
mol => -PosePipeline.parseScore(scoreMethod)(mol)
}
}
@deprecated("Spark sortBy is slow, use getTopPoses instead", "Sep 29, 2016")
override def sortByScore = {
val res = rdd.sortBy(PosePipeline
.parseScore(methodBroadcast.value), false)
new PosePipeline(res, scoreMethod)
}
@deprecated("getTopPoses includes collapsing", "Sep 29, 2016")
override def collapse(bestN: Int) = {
val res = rdd.groupBy(PosePipeline.parseId)
.flatMap(PosePipeline.collapsePoses(bestN, PosePipeline.parseScore(methodBroadcast.value)))
new PosePipeline(res, scoreMethod)
}
override def repartition() = {
val res = rdd.repartition(defaultParallelism)
new PosePipeline(res, scoreMethod)
}
} | laeeq80/spark-cpvs | vs/src/main/scala/se/uu/farmbio/vs/PosePipeline.scala | Scala | apache-2.0 | 5,896 |
package models
case class User(
username: String,
password: String,
email: String,
profile: UserProfile
)
case class UserProfile(
country: String,
address: Option[String],
age: Option[Int]
) | play2-maven-plugin/play2-maven-test-projects | play24/scala/forms/app/models/User.scala | Scala | apache-2.0 | 207 |
package org.jetbrains.sbt
package project.data.service
import java.io.File
import java.util
import com.intellij.openapi.externalSystem.model.DataNode
import com.intellij.openapi.externalSystem.model.project.ProjectData
import com.intellij.openapi.externalSystem.service.project.IdeModifiableModelsProvider
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.io.FileUtil
import com.intellij.util.SystemProperties
import org.jetbrains.android.facet.{AndroidFacet, AndroidFacetType, AndroidRootUtil}
import org.jetbrains.sbt.project.data.AndroidFacetData
/**
* @author Nikolay Obedin
* @since 8/12/14.
*/
class AndroidFacetDataService extends AbstractDataService[AndroidFacetData, AndroidFacet](AndroidFacetData.Key) {
override def createImporter(toImport: Seq[DataNode[AndroidFacetData]],
projectData: ProjectData,
project: Project,
modelsProvider: IdeModifiableModelsProvider): Importer[AndroidFacetData] =
new AndroidFacetDataService.Importer(toImport, projectData, project, modelsProvider)
}
object AndroidFacetDataService {
private class Importer(dataToImport: Seq[DataNode[AndroidFacetData]],
projectData: ProjectData,
project: Project,
modelsProvider: IdeModifiableModelsProvider)
extends AbstractImporter[AndroidFacetData](dataToImport, projectData, project, modelsProvider) {
override def importData(): Unit =
dataToImport.foreach { facetNode =>
for {
module <- getIdeModuleByNode(facetNode)
facet = getOrCreateFacet(module)
} {
configureFacet(module, facet, facetNode.getData)
}
}
private def getOrCreateFacet(module: Module): AndroidFacet =
Option(getModifiableFacetModel(module).getFacetByType(AndroidFacet.ID)).getOrElse(createFacet(module))
private def createFacet(module: Module): AndroidFacet = {
val model = getModifiableFacetModel(module)
val facetType = new AndroidFacetType
val facet = facetType.createFacet(module, "Android", facetType.createDefaultConfiguration(), null)
model.addFacet(facet)
facet
}
private def configureFacet(module: Module, facet: AndroidFacet, data: AndroidFacetData): Unit = {
val configuration = facet.getConfiguration.getState
val base = AndroidRootUtil.getModuleDirPath(module)
def getRelativePath(f: File) = "/" + FileUtil.getRelativePath(base, FileUtil.toSystemIndependentName(f.getAbsolutePath), '/')
configuration.GEN_FOLDER_RELATIVE_PATH_APT = getRelativePath(data.gen)
configuration.GEN_FOLDER_RELATIVE_PATH_AIDL = getRelativePath(data.gen)
configuration.MANIFEST_FILE_RELATIVE_PATH = getRelativePath(data.manifest)
configuration.RES_FOLDER_RELATIVE_PATH = getRelativePath(data.res)
configuration.ASSETS_FOLDER_RELATIVE_PATH = getRelativePath(data.assets)
configuration.LIBS_FOLDER_RELATIVE_PATH = getRelativePath(data.libs)
configuration.APK_PATH = getRelativePath(data.apk)
configuration.myProGuardCfgFiles = new util.ArrayList[String]()
if (data.proguardConfig.nonEmpty) {
val proguardFile = new File(module.getProject.getBasePath) / "proguard-sbt.txt"
FileUtil.writeToFile(proguardFile, data.proguardConfig.mkString(SystemProperties.getLineSeparator))
configuration.myProGuardCfgFiles.add(proguardFile.getCanonicalPath)
configuration.RUN_PROGUARD = true
}
}
}
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/project/data/service/AndroidFacetDataService.scala | Scala | apache-2.0 | 3,616 |
/* Title: Pure/General/file.scala
Author: Makarius
File system operations.
*/
package isabelle
import java.io.{BufferedWriter, OutputStreamWriter, FileOutputStream, BufferedOutputStream,
OutputStream, InputStream, FileInputStream, BufferedInputStream, BufferedReader,
InputStreamReader, File => JFile, IOException}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import scala.collection.mutable
object File
{
/* directory content */
def read_dir(dir: Path): List[String] =
{
if (!dir.is_dir) error("Bad directory: " + dir.toString)
val files = dir.file.listFiles
if (files == null) Nil
else files.toList.map(_.getName)
}
def find_files(dir: Path): Stream[Path] =
read_dir(dir).toStream.map(name =>
if (Path.is_wellformed(name)) {
val path = dir + Path.basic(name)
path #:: (if (path.is_dir) find_files(path) else Stream.empty)
}
else Stream.empty).flatten
/* read */
def read(file: JFile): String = Bytes.read(file).toString
def read(path: Path): String = read(path.file)
def read_stream(reader: BufferedReader): String =
{
val output = new StringBuilder(100)
var c = -1
while ({ c = reader.read; c != -1 }) output += c.toChar
reader.close
output.toString
}
def read_stream(stream: InputStream): String =
read_stream(new BufferedReader(new InputStreamReader(stream, UTF8.charset)))
def read_gzip(file: JFile): String =
read_stream(new GZIPInputStream(new BufferedInputStream(new FileInputStream(file))))
def read_gzip(path: Path): String = read_gzip(path.file)
/* read lines */
def read_lines(reader: BufferedReader, progress: String => Unit): List[String] =
{
val result = new mutable.ListBuffer[String]
var line: String = null
while ({ line = try { reader.readLine} catch { case _: IOException => null }; line != null }) {
progress(line)
result += line
}
reader.close
result.toList
}
/* try_read */
def try_read(paths: Seq[Path]): String =
{
val buf = new StringBuilder
for (path <- paths if path.is_file) {
buf.append(read(path))
buf.append('\\n')
}
buf.toString
}
/* write */
def write_file(file: JFile, text: Iterable[CharSequence],
make_stream: OutputStream => OutputStream)
{
val stream = make_stream(new FileOutputStream(file))
val writer = new BufferedWriter(new OutputStreamWriter(stream, UTF8.charset))
try { text.iterator.foreach(writer.append(_)) }
finally { writer.close }
}
def write(file: JFile, text: Iterable[CharSequence]): Unit = write_file(file, text, (s) => s)
def write(file: JFile, text: CharSequence): Unit = write(file, List(text))
def write(path: Path, text: Iterable[CharSequence]): Unit = write(path.file, text)
def write(path: Path, text: CharSequence): Unit = write(path.file, text)
def write_gzip(file: JFile, text: Iterable[CharSequence]): Unit =
write_file(file, text, (s: OutputStream) => new GZIPOutputStream(new BufferedOutputStream(s)))
def write_gzip(file: JFile, text: CharSequence): Unit = write_gzip(file, List(text))
def write_gzip(path: Path, text: Iterable[CharSequence]): Unit = write_gzip(path.file, text)
def write_gzip(path: Path, text: CharSequence): Unit = write_gzip(path.file, text)
def write_backup(path: Path, text: CharSequence)
{
path.file renameTo path.backup.file
File.write(path, text)
}
/* copy */
def eq(file1: JFile, file2: JFile): Boolean =
try { java.nio.file.Files.isSameFile(file1.toPath, file2.toPath) }
catch { case ERROR(_) => false }
def copy(src: JFile, dst: JFile)
{
if (!eq(src, dst)) {
val in = new FileInputStream(src)
try {
val out = new FileOutputStream(dst)
try {
val buf = new Array[Byte](65536)
var m = 0
do {
m = in.read(buf, 0, buf.length)
if (m != -1) out.write(buf, 0, m)
} while (m != -1)
}
finally { out.close }
}
finally { in.close }
}
}
def copy(path1: Path, path2: Path): Unit = copy(path1.file, path2.file)
}
| wneuper/libisabelle | pide/2014/src/main/scala/General/file.scala | Scala | mit | 4,160 |
package rescala.core
import rescala.operator.RExceptions
import scala.annotation.implicitNotFound
import scala.util.DynamicVariable
trait Core {
/** In case you wondered why everything in REScala is in these weird bundle traits, this is why.
* The ReSource below depends on some abstract state, which is defined by the concrete scheduler implementations.
* As basically everything else references ReSources, everything must be bundled together.
* This is good for users, because they get strong guarantees about the used correctness, and the API is still OK.
* Its terrible for us, because the Scala Incremental compiler does not really work anymore.
*/
type State[_]
/** Source of (reactive) values. */
trait ReSource {
type Value
protected[rescala] def state: State[Value]
protected[rescala] def name: ReName
protected[rescala] def commit(base: Value): Value
}
/** A reactive value is something that can be reevaluated */
trait Derived extends ReSource {
final type ReIn = ReevTicket[Value]
final type Rout = Result[Value]
/** called if any of the dependencies ([[rescala.core.Core.ReSource]]s) changed in the current update turn,
* after all (known) dependencies are updated
*/
protected[rescala] def reevaluate(input: ReIn): Rout
}
/** Base implementation for reactives, with [[Derived]] for scheduling,
* together with a [[ReName]] and containing a [[State]]
*
* @param state the initial state passed by the scheduler
* @param name the name of the reactive, useful for debugging as it often contains positional information
*/
abstract class Base[V](override protected[rescala] val state: State[V], override val name: ReName)
extends ReSource {
override type Value = V
override def toString: String = s"${name.str}($state)"
}
/** Common macro accessors for [[rescala.operator.SignalBundle.Signal]] and [[rescala.operator.EventBundle.Event]]
*
* @tparam A return type of the accessor
* @groupname accessor Accessor and observers
*/
trait Readable[+A] extends ReSource {
/** Interprets the internal type to the external type
*
* @group internal
*/
def read(v: Value): A
}
/** Encapsulates an action changing a single source. */
trait InitialChange {
/** The source to be changed. */
val source: ReSource
/** @param base the current (old) value of the source.
* @param writeCallback callback to apply the new value, executed only if the action is approved by the source.
* @return the propagation status of the source (whether or not to reevaluate output reactives).
*/
def writeValue(base: source.Value, writeCallback: source.Value => Unit): Boolean
}
/** An initializer is the glue between that binds the creation of the reactive from the operator and scheduler side together.
* The operator provides the logic to wrap a state and the scheduler provides the implementation of that state.
* This is where the two are joined. After that, the new reactive may have to be initialized.
*/
trait Initializer {
/** Creates and correctly initializes new [[Derived]]s */
final private[rescala] def create[V, T <: Derived](
incoming: Set[ReSource],
initValue: V,
needsReevaluation: Boolean,
creationTicket: CreationTicket
)(instantiateReactive: State[V] => T): T = {
val state = makeDerivedStructState[V](initValue)
val reactive = instantiateReactive(state)
register(reactive)
initialize(reactive, incoming, needsReevaluation)
reactive
}
/** hook for schedulers to globally collect all created resources, usually does nothing */
protected[this] def register(reactive: ReSource): Unit = ()
/** Correctly initializes [[ReSource]]s */
final private[rescala] def createSource[V, T <: ReSource](
intv: V,
creationTicket: CreationTicket
)(instantiateReactive: State[V] => T): T = {
val state = makeSourceStructState[V](intv)
val reactive = instantiateReactive(state)
register(reactive)
reactive
}
/** Creates the internal state of [[Derived]]s */
protected[this] def makeDerivedStructState[V](initialValue: V): State[V]
/** Creates the internal state of [[ReSource]]s */
protected[this] def makeSourceStructState[V](initialValue: V): State[V] =
makeDerivedStructState[V](initialValue)
/** to be implemented by the propagation algorithm, called when a new reactive has been instantiated and needs to be connected to the graph and potentially reevaluated.
*
* @param reactive the newly instantiated reactive
* @param incoming a set of incoming dependencies
* @param needsReevaluation true if the reactive must be reevaluated at creation even if none of its dependencies change in the creating turn.
*/
protected[this] def initialize(
reactive: Derived,
incoming: Set[ReSource],
needsReevaluation: Boolean
): Unit
}
/** User facing low level API to access values in a static context. */
sealed abstract class StaticTicket(val tx: Transaction) {
private[rescala] def collectStatic(reactive: ReSource): reactive.Value
final def dependStatic[A](reactive: Readable[A]): A = reactive.read(collectStatic(reactive))
}
/** User facing low level API to access values in a dynamic context. */
abstract class DynamicTicket(tx: Transaction) extends StaticTicket(tx) {
private[rescala] def collectDynamic(reactive: ReSource): reactive.Value
final def depend[A](reactive: Readable[A]): A = reactive.read(collectDynamic(reactive))
}
/** [[ReevTicket]] is given to the [[Derived]] reevaluate method and allows to access other reactives.
* The ticket tracks return values, such as dependencies, the value, and if the value should be propagated.
* Such usages make it unsuitable as an API for the user, where [[StaticTicket]] or [[DynamicTicket]] should be used instead.
*/
abstract class ReevTicket[V](tx: Transaction, private var _before: V)
extends DynamicTicket(tx)
with Result[V] {
// schedulers implement these to allow access
protected def staticAccess(reactive: ReSource): reactive.Value
protected def dynamicAccess(reactive: ReSource): reactive.Value
// dependency tracking accesses
private[rescala] final override def collectStatic(reactive: ReSource): reactive.Value = {
assert(collectedDependencies == null || collectedDependencies.contains(reactive))
staticAccess(reactive)
}
private[rescala] final override def collectDynamic(reactive: ReSource): reactive.Value = {
assert(collectedDependencies != null, "may not access dynamic dependencies without tracking dependencies")
val updatedDeps = collectedDependencies + reactive
if (updatedDeps eq collectedDependencies) {
staticAccess(reactive)
} else {
collectedDependencies = updatedDeps
dynamicAccess(reactive)
}
}
// inline result into ticket, to reduce the amount of garbage during reevaluation
private var collectedDependencies: Set[ReSource] = null
private var _propagate = false
private var value: V = _
private var effect: Observation = null
override final def toString: String =
s"Result(value = $value, propagate = $activate, deps = $collectedDependencies)"
final def before: V = _before
/** Advises the ticket to track dynamic dependencies.
* The passed initial set of dependencies may be processed as if they were static,
* and are also returned in the resulting dependencies.
*/
final def trackDependencies(initial: Set[ReSource]): ReevTicket[V] = { collectedDependencies = initial; this }
final def trackStatic(): ReevTicket[V] = { collectedDependencies = null; this }
final def withPropagate(p: Boolean): ReevTicket[V] = { _propagate = p; this }
final def withValue(v: V): ReevTicket[V] = {
require(v != null, "value must not be null");
value = v;
_propagate = true;
this
}
final def withEffect(v: Observation): ReevTicket[V] = { effect = v; this }
final override def activate: Boolean = _propagate
final override def forValue(f: V => Unit): Unit = if (value != null) f(value)
final override def forEffect(f: Observation => Unit): Unit = if (effect != null) f(effect)
final override def inputs(): Option[Set[ReSource]] = Option(collectedDependencies)
final def reset[NT](nb: NT): ReevTicket[NT] = {
_propagate = false
value = null.asInstanceOf[V]
effect = null
collectedDependencies = null
val res = this.asInstanceOf[ReevTicket[NT]]
res._before = nb
res
}
}
/** Result of a reevaluation */
trait Result[T] {
/** True iff outputs must also be reevaluated, false iff the propagation ends here. */
def activate: Boolean
/** No-allocation accessor for the optional new value. */
def forValue(f: T => Unit): Unit
/** No-allocation accessor for the effect caused by the reevaluation. */
def forEffect(f: Observation => Unit): Unit
/** New input resources.
* None for static reactives.
* Otherwise a list of all static reactives, and accessed dynamic reactives.
*/
def inputs(): Option[Set[ReSource]]
}
/** Records side effects for latex execution. */
trait Observation { def execute(): Unit }
/** Enables reading of the current value during admission.
* Keeps track of written sources internally.
*/
final class AdmissionTicket(val tx: Transaction, declaredWrites: Set[ReSource]) {
private var _initialChanges: Map[ReSource, InitialChange] = Map[ReSource, InitialChange]()
private[rescala] def initialChanges: Map[ReSource, InitialChange] = _initialChanges
private[rescala] def recordChange[T](ic: InitialChange): Unit = {
assert(
declaredWrites.contains(ic.source),
"must not set a source that has not been pre-declared for the transaction"
)
assert(!_initialChanges.contains(ic.source), "must not admit same source twice in one turn")
_initialChanges += ic.source -> ic
}
/** convenience method as many case studies depend on this being available directly on the AT */
def now[A](reactive: Readable[A]): A = tx.now(reactive)
private[rescala] var wrapUp: Transaction => Unit = null
}
/** Enables the creation of other reactives */
@implicitNotFound(msg = "Could not find capability to create reactives. Maybe a missing import?")
final class CreationTicket(val scope: ScopeSearch, val rename: ReName) {
private[rescala] def create[V, T <: Derived](
incoming: Set[ReSource],
initValue: V,
needsReevaluation: Boolean
)(instantiateReactive: State[V] => T): T = {
scope.embedTransaction(_.initializer.create(incoming, initValue, needsReevaluation, this)(instantiateReactive))
}
private[rescala] def createSource[V, T <: ReSource](intv: V)(instantiateReactive: State[V] => T): T = {
scope.embedTransaction(_.initializer.createSource(intv, this)(instantiateReactive))
}
}
object CreationTicket {
implicit def fromScope(implicit scope: ScopeSearch, line: ReName): CreationTicket = new CreationTicket(scope, line)
// cases below are when one explicitly passes one of the parameters
implicit def fromExplicitDynamicScope(factory: DynamicScope)(implicit line: ReName): CreationTicket =
new CreationTicket(new ScopeSearch(Right(factory)), line)
implicit def fromTransaction(tx: Transaction)(implicit line: ReName): CreationTicket =
new CreationTicket(new ScopeSearch(Left(tx)), line)
implicit def fromName(str: String)(implicit scopeSearch: ScopeSearch): CreationTicket =
new CreationTicket(scopeSearch, ReName(str))
}
/** Essentially a kill switch, that will remove the reactive at some point. */
trait Disconnectable {
def disconnect(): Unit
}
/** Removes the reactive instead of its next normal reevaluation.
* This makes use of the fact, that all reactives are technically dynamic reactives,
* and removing incoming dependencies is always kinda safe, as long as we are sure we no longer care!
*/
trait DisconnectableImpl extends Derived with Disconnectable {
@volatile private var disconnected = false
final def disconnect(): Unit = {
disconnected = true
}
final override protected[rescala] def reevaluate(rein: ReIn): Rout = {
if (disconnected) {
rein.trackDependencies(Set.empty)
rein
} else {
guardedReevaluate(rein)
}
}
protected[rescala] def guardedReevaluate(rein: ReIn): Rout
}
/** A transaction (or maybe transaction handle would be the better term) is available from reevaluation and admission tickets.
* That is, everywhere during a transaction, you can read reactives, but also create them.
* The reading values is core to any reactive propagation.
* But creating reactives using the [[Initializer]] is a liability to the scheduler, but a superpower to the operators.
* Its a classical tradeoff, but it would be better to not make this choice by default,
* that is, reactive creation should be limited such that we can experiment with schedulers that do not have this liability.
*/
trait Transaction {
final def now[A](reactive: Readable[A]): A = {
RExceptions.toExternalReadException(reactive, reactive.read(access(reactive)))
}
private[rescala] def access(reactive: ReSource): reactive.Value
def initializer: Initializer
}
/** Scheduler that defines the basic data-types available to the user and creates turns for propagation handling.
* Note: This should NOT extend [[DynamicScope]], but did so in the past and there are too many tests that assume so ...
*/
@implicitNotFound(msg = "Could not find an implicit scheduler. Did you forget an import?")
trait Scheduler extends DynamicScope {
final def forceNewTransaction[R](initialWrites: ReSource*)(admissionPhase: AdmissionTicket => R): R = {
forceNewTransaction(initialWrites.toSet, admissionPhase)
}
def forceNewTransaction[R](initialWrites: Set[ReSource], admissionPhase: AdmissionTicket => R): R
private[rescala] def singleReadValueOnce[A](reactive: Readable[A]): A
/** Name of the scheduler, used for helpful error messages. */
def schedulerName: String
override def toString: String = s"Scheduler($schedulerName)"
}
/** Provides the capability to look up transactions in the dynamic scope. */
trait DynamicScope {
private[rescala] def dynamicTransaction[T](f: Transaction => T): T
}
trait SchedulerImpl[Tx <: Transaction] extends DynamicScope with Scheduler {
final private[rescala] def dynamicTransaction[T](f: Transaction => T): T = {
_currentInitializer.value match {
case Some(transaction) => f(transaction)
case None => forceNewTransaction(Set.empty, ticket => f(ticket.tx))
}
}
final protected val _currentInitializer: DynamicVariable[Option[Tx]] =
new DynamicVariable[Option[Tx]](None)
final private[rescala] def withDynamicInitializer[R](init: Tx)(thunk: => R): R =
_currentInitializer.withValue(Some(init))(thunk)
}
case class ScopeSearch(val self: Either[Transaction, DynamicScope]) {
/** Either just use the statically found transaction,
* or do a lookup in the dynamic scope.
* If the lookup fails, it will start a new transaction.
*/
def embedTransaction[T](f: Transaction => T): T =
self match {
case Left(integrated) => f(integrated)
case Right(ds) => ds.dynamicTransaction(dt => f(dt))
}
}
/** As reactives can be created during propagation, any Ticket can be converted to a creation ticket. */
object ScopeSearch extends LowPriorityScopeImplicits {
implicit def fromTicketImplicit(implicit ticket: StaticTicket): ScopeSearch =
new ScopeSearch(Left(ticket.tx))
implicit def fromAdmissionImplicit(implicit ticket: AdmissionTicket): ScopeSearch =
new ScopeSearch(Left(ticket.tx))
implicit def fromTransactionImplicit(implicit tx: Transaction): ScopeSearch =
new ScopeSearch(Left(tx))
}
/** If no Fitting Ticket is found, then these implicits will search for a [[DynamicScope]],
* creating the reactives outside of any turn.
*/
sealed trait LowPriorityScopeImplicits {
implicit def fromSchedulerImplicit(implicit factory: DynamicScope): ScopeSearch =
new ScopeSearch(Right(factory))
}
}
| guidosalva/REScala | Code/Main/shared/src/main/scala/rescala/core/Core.scala | Scala | apache-2.0 | 16,854 |
package com.wincom.dcim.rest
import akka.actor.{ActorRef, ActorSystem}
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.{ContentTypes, HttpEntity}
import akka.http.scaladsl.server.Directives.{complete, get, path}
import akka.http.scaladsl.server.RouteConcatenation._
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.Config
import com.wincom.dcim.domain.Settings
import scala.concurrent.Future
import scala.util.{Failure, Success}
/**
* Created by wangxy on 17-8-15.
*/
trait ServiceSupport extends RequestTimeout {
def startService(fsus: ActorRef,
devices: ActorRef,
drivers: ActorRef,
signals: ActorRef,
alarms: ActorRef,
alarmRecords: ActorRef
)(implicit system: ActorSystem): Unit = {
val config = system.settings.config
val settings = Settings(system)
val host = settings.http.host
val port = settings.http.port
implicit val ec = system.dispatcher //bindAndHandle requires an implicit ExecutionContext
val fsuApi = new FsuService(fsus, system, requestTimeout(config)).routes // the RestApi provides a Route
val deviceApi = new DeviceService(devices, system, requestTimeout(config)).routes // the RestApi provides a Route
val driverApi = new DriverService(drivers, system, requestTimeout(config)).routes // the RestApi provides a Route
val signalApi = new SignalService(signals, system, requestTimeout(config)).routes // the RestApi provides a Route
val alarmApi = new AlarmService(alarms, system, requestTimeout(config)).routes // the RestApi provides a Route
val alarmRecordApi = new AlarmRecordService(alarmRecords, system, requestTimeout(config)).routes // the RestApi provides a Route
val api = fsuApi ~
deviceApi ~
driverApi ~
signalApi ~
alarmApi ~
alarmRecordApi
implicit val materializer = ActorMaterializer()
val bindingFuture: Future[ServerBinding] =
Http().bindAndHandle(api, host, port)
val log = Logging(system.eventStream, "dcim-cluster")
bindingFuture.onComplete {
case s: Success[ServerBinding] =>
log.info(s"dcim cluster API bound to ${s.value.localAddress} ")
case f: Failure[ServerBinding] =>
log.error(f.exception, "Failed to bind to {}:{}!", host, port)
system.terminate()
}
}
}
trait RequestTimeout {
import scala.concurrent.duration._
def requestTimeout(config: Config): Timeout = {
val t = config.getString("akka.http.server.request-timeout")
val d = Duration(t)
FiniteDuration(d.length, d.unit)
}
}
| xtwxy/mysc | dcim-cluster/rest/src/main/scala/com/wincom/dcim/rest/ServiceSupport.scala | Scala | apache-2.0 | 2,745 |
package com.weibo.datasys.rest.data
/**
* Created by tuoyu on 25/01/2017.
*/
trait User {
def userId: String
def name: String
def userGroupId: String
def isValid: Boolean
}
case class WebUser(
user_id: String,
name: String,
auth: String,
group_id: String
)
extends User {
val VALID_CODE = 0
override def isValid: Boolean = {
authFlag >= VALID_CODE
}
def authFlag: Int = auth.toInt
def userId: String = user_id
def userGroupId: String = group_id
} | batizty/wolong | src/main/scala/com/weibo/datasys/rest/data/User.scala | Scala | apache-2.0 | 493 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.Reference
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Reference Repository
* @param session
* @param executionContext
*/
class ReferenceRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.ReferenceRepository[Reference , Int]
with ReferenceMapping {
def getById(id: Int): Future[Reference] = {
Future(run(queryReference.filter(_.referenceId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[Reference] = {
Future(run(queryReference.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByReferenceId(id : Int) : Future[List[Reference]] = {
Future(run(queryReference))
}
def getAll() : Future[List[Reference]] = {
Future(run(queryReference))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[Reference]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countReference()
elements <- if (offset > count) Future.successful(Nil)
else selectReference(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countReference() = {
Future(run(queryReference.size).toInt)
}
private def selectReference(offset: Int, limit: Int): Future[Seq[Reference]] = {
Future(run(queryReference).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/ReferenceRepository.scala | Scala | gpl-3.0 | 2,701 |
package com.github.gigurra.franklin.mongoimpl
import java.util.concurrent.TimeUnit
import java.util.logging.Logger
import reactivemongo.api.{MongoConnection, DB}
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.bson.BSONDocument
import com.github.gigurra.franklin.{Collection, Store}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success, Try}
/**
* Created by johan on 2015-12-24.
*/
case class MongoStore(dbName: String, mongo: MongoDb, codec: BsonCodec) extends Store {
implicit val timeout = Duration.apply(4, TimeUnit.SECONDS)
private val logger = Logger.getLogger(this.getClass.getName)
private val db = mongo.connection.apply(dbName)
Try {
// Check the connection
val testCollection = db.apply[BSONCollection]("connection_test")
val someOp = testCollection.insert(BSONDocument("abc" -> 123))
Await.result(someOp, timeout)
} match {
case Success(_) =>
case Failure(e) =>
logger.severe("Failed to connect to mongodb - closing mongodb actorsystem")
mongo.close()
throw e
}
override def getOrCreate(name: String): Collection = {
MongoCollection(db[BSONCollection](name), codec)
}
override def close(): Unit = {
db.connection.actorSystem.shutdown()
}
}
| GiGurra/franklin | src/main/scala/com/github/gigurra/franklin/mongoimpl/MongoStore.scala | Scala | mit | 1,378 |
/* ---------------------------------------------------------------------
%%
%% Copyright (c) 2007-2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% ---------------------------------------------------------------------*/
package acceptance
import org.scalatest.BeforeAndAfterEach
import org.scalatest.FunSpec
import org.scalatest.Matchers
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.mockito.Matchers._
import java.util.concurrent.TimeUnit._
import com.amazonaws._
import com.amazonaws.services.dynamodbv2.model._
import com.jayway.awaitility.scala._
import com.jayway.awaitility.Awaitility._
class QueryMappingTest extends FunSpec
with AWSHelper with MockitoSugar with Matchers
with BeforeAndAfterEach with AwaitilitySupport {
val item_1 = new Item(
("Id", "N", "101"),
("Title", "S", "Some Title"),
("ISBN", "S", "ABC"))
val item_2 = new Item(
("Id", "N", "102"),
("Title", "S", "Another Title"),
("ISBN", "S", "DEF"))
val item_3 = new Item(
("Id", "N", "101"),
("Title", "S", "Tale of Two Databases"),
("ISBN", "S", "XYZ"))
describe ("[US193709, US193711]: table type, hash + range, LWW behavior") {
val range_table_name = "test_table_range"
it ("should create range table") {
val table_range:CreateTableResult = Table.create(range_table_name, "Id", "N", Some("Title"), Some("S"))
Table.put(range_table_name)(item_1, item_2, item_3)
await atMost(2, MINUTES) until {
val result = Table.describe(range_table_name)
// TODO: and these together, once rinamo goes async
"ACTIVE".equals(result.getTable().getTableStatus())
range_table_name.equals(result.getTable().getTableName())
}
}
describe ("read data, query item") {
it ("from table using EQ") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("EQ"), Some("Some Title"))
assert(query_result.getCount() == 1)
assert("Some Title".equals(query_result.getItems().get(0).get("Title").getS()))
}
it ("from table using LE") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("LE"), Some("Tale of Two Databases"))
assert(query_result.getCount() == 2)
}
it ("from table using LT") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("LT"), Some("Tale of Two Databases"))
assert(query_result.getCount() == 1)
}
it ("from table using GE") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("GE"), Some("Tale of Two Databases"))
assert(query_result.getCount() == 1)
}
it ("from table using GT") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("GT"), Some("Tale of Two Databases"))
assert(query_result.getCount() == 0)
}
it ("from table using BEGINS_WITH") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("BEGINS_WITH"), Some("Some"))
assert(query_result.getCount() == 1)
assert("Some Title".equals(query_result.getItems().get(0).get("Title").getS()))
}
it ("from table using BETWEEN") {
val query_result = Table.range_query(
range_table_name, "Id", "101",
Some("Title"), Some("BETWEEN"), Some("A"), Some("Z"))
assert(query_result.getCount() == 2)
}
it ("finds all range values when using hash key only") (pending)
/*
{
val query_result = Table.range_query(range_table_name, "Id", "101")
assert(query_result.getCount() == 2)
}
*/
}
it ("should delete range table") {
try {
Table.delete(range_table_name)
}
catch {
case e: ResourceNotFoundException => {}
}
await atMost(2, MINUTES) until {
var exception:Throwable = null
try {
Table.describe(range_table_name)
}
catch {
case e: Throwable => {
exception = e
}
}
exception != null
}
}
}
}
| basho-labs/rinamo | tests/com.basho.dynamodb.integ/src/test/scala/acceptance/QueryMappingTest.scala | Scala | apache-2.0 | 5,128 |
/*
* This file is part of the \\BlueLaTeX project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bluelatex
package synchro
import akka.actor.{
Actor,
ActorRef,
Props,
Terminated
}
import scala.collection.mutable.Map
import name.fraser.neil.plaintext.DiffMatchPatch
class Synchronizer(store: ActorRef) extends Actor {
val dmp = new DiffMatchPatch
val papers = Map.empty[String, ActorRef]
def getPaper(paperId: String) =
papers.get(paperId) match {
case Some(ref) =>
ref
case None =>
// create the paper
val act = context.actorOf(Props(classOf[Paper], paperId, store, dmp), paperId)
context.watch(act)
papers(paperId) = act
act
}
def receive = {
case (paperId: String, m) =>
getPaper(paperId).forward(m)
case Terminated(ref) =>
papers -= ref.path.name
}
}
| bluelatex/bluelatex-server | core/src/main/scala/bluelatex/synchro/Synchronizer.scala | Scala | apache-2.0 | 1,391 |
/*
* Copyright 2017 helloscala.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helloscala.jdbc
/**
* Created by yangbajing(yangbajing@gmail.com) on 2017-03-02.
*/
case class HSSqlMetaData(label: String, name: String, jdbcType: Int)
| helloscala/helloscala | hs-jdbc/src/main/scala/helloscala/jdbc/HSSqlMetaData.scala | Scala | apache-2.0 | 763 |
package com.roundeights.vfunk
import scala.collection.immutable.ListMap
import scala.concurrent.{Future, ExecutionContext}
import scala.util.Try
/**
* A companion for the Form class
*/
object Form {
/** Creates a form from a list of fields */
private[vfunk] def toMap[F <: CommonField[_]] (
fields: Traversable[F]
): ListMap[String, F] = {
fields.foldLeft ( ListMap[String, F]() ) {
(accum, field) => accum + ((field.name, field))
}
}
/** Creates a form from a list of fields */
def apply ( fields: Traversable[Field] ): Form = new Form( fields )
/** Creates a form from a list of fields */
def apply ( fields: Field* ): Form = new Form( fields )
}
/**
* Shared form methods
*/
abstract class CommonForm[S <: CommonForm[_, F], F <: CommonField[_]] (
val fields: ListMap[String, F]
) extends Traversable[F] {
/** Creates a new form including a new field */
def add( field: Field ): S
/** Creates a new form including a new field */
def + ( field: Field ): S = add(field)
/** Creates a new form including a new field */
def add( field: AsyncField ): AsyncForm
/** Creates a new form including a new field */
def + ( field: AsyncField ): AsyncForm = add(field)
/** {@inheritDoc} */
override def foreach[U] ( callback: F => U ): Unit
= fields.foreach( pair => callback( pair._2 ) )
/** Returns this form as an async form */
def async: AsyncForm
}
/**
* A form
*/
class Form (
fields: ListMap[String, Field]
) extends CommonForm[Form, Field](fields) {
/** Creates a form from a list of fields */
def this ( fields: Traversable[Field] ) = this( Form.toMap(fields) )
/** Creates a form from a list of fields */
def this ( fields: Field* ) = this( fields )
/** {@inheritDoc} */
override def add( field: Field ): Form
= new Form( fields + ((field.name, field)) )
/** {@inheritDoc} */
override def add( field: AsyncField ): AsyncForm = async.add(field)
/** Validates a map against this form */
def process ( values: Map[String,String] ): FormResults = {
fields.foldLeft( FormResults() ) {
(accum, pair) => accum + ((
pair._1,
pair._2.process( values.getOrElse( pair._1, "" ) )
))
}
}
/** Validates a list of tuples */
def process ( values: (String, String)* ): FormResults
= process( Map( values:_* ) )
/** Validates a map against this form and fails if it doesn't validate */
def require ( values: Map[String, String] ): ValidFormResults
= process( values ).require
/** Validates a map against this form and fails if it doesn't validate */
def require ( values: (String, String)* ): ValidFormResults
= require( Map(values:_*) )
/** {@inheritDoc} */
override def async: AsyncForm
= new AsyncForm( fields.map( pair => (pair._1 -> pair._2.async) ) )
}
/** @see AsyncForm */
object AsyncForm {
/** Creates a form from a list of fields */
def apply ( fields: Traversable[CommonField[_]] ): AsyncForm
= new AsyncForm(fields)
/** Creates a form from a list of fields */
def apply ( fields: CommonField[_]* ): AsyncForm
= new AsyncForm( fields )
}
/**
* A form
*/
class AsyncForm (
fields: ListMap[String, AsyncField]
) extends CommonForm[AsyncForm, AsyncField](fields) {
/** Creates a form from a list of fields */
def this ( fields: Traversable[CommonField[_]] )
= this( Form.toMap( fields.map(_.async) ) )
/** Creates a form from a list of fields */
def this ( fields: CommonField[_]* ) = this( fields )
/** {@inheritDoc} */
override def add( field: AsyncField ): AsyncForm
= new AsyncForm( fields + ((field.name, field)) )
/** {@inheritDoc} */
override def add( field: Field ): AsyncForm = add( field.async )
/** Validates a map against this form */
def process
( values: Map[String, String] )
( implicit ctx: ExecutionContext )
: Future[FormResults] = {
// Kick off requests to validate all the fields
val futures: Iterable[Future[(String, FieldResult)]] =
fields.map(pair => {
pair._2.process( values.getOrElse(pair._1, "") )
.map( pair._1 -> _ )
})
Future.fold(futures)(FormResults())(_ + _)
}
/** Validates a list of tuples */
def process
( values: (String, String)* )
( implicit ctx: ExecutionContext )
: Future[FormResults]
= process( Map( values:_* ) )
/** Validates a map against this form and fails if it doesn't validate */
def require
( values: Map[String, String] )
( implicit ctx: ExecutionContext )
: Future[ValidFormResults]
= process( values ).map( _.require )
/** Validates a map against this form and fails if it doesn't validate */
def require
( values: (String, String)* )
( implicit ctx: ExecutionContext )
: Future[ValidFormResults]
= require( Map(values:_*) )
/** {@inheritDoc} */
override def async: AsyncForm = this
}
/**
* Thrown when a required validation does not pass
*/
case class InvalidFormException (
val formResults: FormResults
) extends ValidationException {
/** Alternate constructor from a list of results */
def this ( fields: (String, FieldResult)* )
= this( new FormResults( fields: _* ) )
/** {@inheritDoc} */
override def errors: Seq[Err] = formResults.errors
/** {@inheritDoc} */
override def toString = "InvalidFormException(%s)".format(formResults)
}
/**
* Common interface for form results
*/
abstract class CommonFormResults[
F <: CommonFieldResult,
T <: CommonFormResults[_, _]
] (
val results: ListMap[String, F]
) extends Traversable[F] {
/** {@inheritDoc} */
override def foreach[U] ( callback: F => U ): Unit
= results.foreach( value => callback( value._2 ) )
/** Adds a new result to thie map */
def + ( elem: (String, CommonFieldResult) ): T
/** Returns the original value from a form */
def original ( field: String ): Option[String]
= results.get( field ).map( _.original )
/** Returns the value of a field */
def apply ( field: String ): String = results( field ).value
/** Returns the value of a field as an option */
def get ( field: String ): Option[String]
= results.get( field ).map( _.value )
/** Requires that this Form is valid */
def require: ValidFormResults
/** Returns whether this form is valid */
def isValid: Boolean
/** Produces this form result as a future */
def future: Future[ValidFormResults]
/** A an error to this result set */
def addError ( field: String, err: Err ): FormResults = {
val updated = err +: results(field).asFieldResult
FormResults( results.foldLeft( ListMap[String,FieldResult]() )(
(accum, pair) => pair match {
case (name, _) if field == name => accum + (name -> updated)
case pair => accum + (pair._1 -> pair._2.asFieldResult)
}
))
}
/** Adds an error to this result set */
def addError ( field: String, code: String, message: String ): FormResults
= addError( field, Err(code, message) )
}
/**
* The results of a validation run
*/
case class FormResults (
resultMap: ListMap[String, FieldResult] = ListMap()
) extends CommonFormResults[FieldResult, FormResults](resultMap) with Errable {
/** Constructs from a list of field tuples */
def this ( fields: (String, FieldResult)* ) = this( ListMap(fields:_*) )
/** {@inheritDoc} */
override def + ( elem: (String, CommonFieldResult) ): FormResults
= FormResults( results + (elem._1 -> elem._2.asFieldResult) )
/** {@inheritDoc} */
override def isValid: Boolean
= results.forall( result => result._2.isValid )
/** Returns the results of the first invalid field */
def firstInvalid: Option[FieldResult]
= results.find( ! _._2.isValid ).map( _._2 )
/** {@inheritDoc} */
override def errors: Seq[Err] = results.foldLeft( List[Err]() ) {
(accum, pair) => pair._2.errors.toList ::: accum
}
/** Returns a map of field names to error messages */
def fieldErrors: Map[String,Seq[Err]] = {
results.foldLeft( Map[String,Seq[Err]]() ) { (accum, pair) =>
if ( pair._2.isValid )
accum
else
accum + ( pair._1 -> pair._2.errors )
}
}
/** Returns a map of field names to error messages */
def fieldMessages: Map[String,Seq[String]]
= fieldErrors.mapValues( _.view.map(_.message) )
/** {@inheritDoc} */
override def require: ValidFormResults = {
if ( !isValid )
throw InvalidFormException( this )
new ValidFormResults( results.map(pair => pair._1 -> pair._2.require) )
}
/** {@inheritDoc} */
override def future: Future[ValidFormResults]
= Future.fromTry(Try(require))
}
/**
* A valid form
*/
case class ValidFormResults(
resultMap: ListMap[String, ValidFieldResult] = ListMap()
) extends CommonFormResults[ValidFieldResult, ValidFormResults](resultMap) {
/** {@inheritDoc} */
override def + ( elem: (String, CommonFieldResult) ): ValidFormResults
= ValidFormResults( results + (elem._1 -> elem._2.require) )
/** {@inheritDoc} */
override def require: ValidFormResults = this
/** {@inheritDoc} */
override def isValid: Boolean = true
/** {@inheritDoc} */
override def future: Future[ValidFormResults] = Future.successful(this)
}
| Nycto/vFunk | src/main/scala/vfunk/form/Form.scala | Scala | mit | 9,795 |
/**
* Copyright (C) 2012 Typesafe, Inc. <http://www.typesafe.com>
*/
package org.pantsbuild.zinc.compiler
import java.io.{File, IOException}
import java.lang.{ Boolean => JBoolean }
import java.util.function.{ Function => JFunction }
import java.util.{ List => JList, Map => JMap }
import scala.collection.JavaConverters._
import scala.compat.java8.OptionConverters._
import scala.util.matching.Regex
import sbt.io.IO
import sbt.util.Logger
import xsbti.{Position, Problem, Severity, ReporterConfig, ReporterUtil}
import xsbti.compile.{
AnalysisStore,
CompileOptions,
CompileOrder,
Compilers,
Inputs,
PreviousResult,
Setup
}
import org.pantsbuild.zinc.analysis.AnalysisMap
object InputUtils {
/**
* Create Inputs based on command-line settings.
*/
def create(
settings: Settings,
analysisMap: AnalysisMap,
previousResult: PreviousResult,
log: Logger
): Inputs = {
import settings._
val compilers = CompilerUtils.getOrCreate(settings, log)
// TODO: Remove duplication once on Scala 2.12.x.
val positionMapper =
new JFunction[Position, Position] {
override def apply(p: Position): Position = p
}
val compileOptions =
CompileOptions
.create()
.withClasspath(
autoClasspath(
classesDirectory,
compilers.scalac().scalaInstance().allJars,
javaOnly,
classpath
).toArray
)
.withSources(sources.toArray)
.withClassesDirectory(classesDirectory)
.withScalacOptions(scalacOptions.toArray)
.withJavacOptions(javacOptions.toArray)
.withOrder(compileOrder)
val reporter =
ReporterUtil.getDefault(
ReporterUtil.getDefaultReporterConfig()
.withMaximumErrors(Int.MaxValue)
.withUseColor(settings.consoleLog.color)
.withMsgFilters(settings.consoleLog.msgPredicates.toArray)
.withFileFilters(settings.consoleLog.filePredicates.toArray)
.withLogLevel(settings.consoleLog.javaLogLevel)
.withPositionMapper(positionMapper)
)
val setup =
Setup.create(
analysisMap.getPCELookup,
false,
settings.analysis.cache,
CompilerUtils.getGlobalsCache,
incOptions.options(log),
reporter,
None.asJava,
Array()
)
Inputs.create(
compilers,
compileOptions,
setup,
previousResult
)
}
/**
* Load the analysis for the destination, creating it if necessary.
*/
def loadDestinationAnalysis(
settings: Settings,
analysisMap: AnalysisMap,
log: Logger
): (AnalysisStore, PreviousResult) = {
def load() = {
val analysisStore = analysisMap.cachedStore(settings.analysis.cache)
analysisStore.get().asScala match {
case Some(a) => (analysisStore, Some(a.getAnalysis), Some(a.getMiniSetup))
case _ => (analysisStore, None, None)
}
}
// Try loading, and optionally remove/retry on failure.
val (analysisStore, previousAnalysis, previousSetup) =
try {
load()
} catch {
case e: Throwable if settings.analysis.clearInvalid =>
// Remove the corrupted analysis and output directory.
log.warn(s"Failed to load analysis from ${settings.analysis.cache} ($e): will execute a clean compile.")
IO.delete(settings.analysis.cache)
IO.delete(settings.classesDirectory)
load()
}
(analysisStore, PreviousResult.create(previousAnalysis.asJava, previousSetup.asJava))
}
/**
* Automatically add the output directory and scala library to the classpath.
*/
def autoClasspath(classesDirectory: File, allScalaJars: Seq[File], javaOnly: Boolean, classpath: Seq[File]): Seq[File] = {
if (javaOnly) classesDirectory +: classpath
else splitScala(allScalaJars) match {
case Some(scalaJars) => classesDirectory +: scalaJars.library +: classpath
case None => classesDirectory +: classpath
}
}
/**
* Select the scala jars.
*
* Prefer the explicit scala-compiler, scala-library, and scala-extra settings,
* then the scala-path setting, then the scala-home setting. Default to bundled scala.
*/
def selectScalaJars(scala: ScalaLocation): ScalaJars = {
val jars = splitScala(scala.path) getOrElse Defaults.scalaJars
ScalaJars(
scala.compiler getOrElse jars.compiler,
scala.library getOrElse jars.library,
scala.extra ++ jars.extra
)
}
/**
* Distinguish the compiler and library jars.
*/
def splitScala(jars: Seq[File], excluded: Set[String] = Set.empty): Option[ScalaJars] = {
val filtered = jars filterNot (excluded contains _.getName)
val (compiler, other) = filtered partition (_.getName matches ScalaCompiler.pattern)
val (library, extra) = other partition (_.getName matches ScalaLibrary.pattern)
if (compiler.nonEmpty && library.nonEmpty) Some(ScalaJars(compiler(0), library(0), extra)) else None
}
//
// Default setup
//
val ScalaCompiler = JarFile("scala-compiler")
val ScalaLibrary = JarFile("scala-library")
val ScalaReflect = JarFile("scala-reflect")
val CompilerBridgeSources = JarFile("compiler-bridge", "sources")
val CompilerInterface = JarFile("compiler-interface")
// TODO: The default jar locations here are definitely not helpful, but the existence
// of "some" value for each of these is assumed in a few places. Should remove and make
// them optional to more cleanly support Java-only compiles.
object Defaults {
val scalaCompiler = ScalaCompiler.default
val scalaLibrary = ScalaLibrary.default
val scalaExtra = Seq(ScalaReflect.default)
val scalaJars = ScalaJars(scalaCompiler, scalaLibrary, scalaExtra)
val scalaExcluded = Set("jansi.jar", "jline.jar", "scala-partest.jar", "scala-swing.jar", "scalacheck.jar", "scalap.jar")
}
/**
* Jar file description for locating jars.
*/
case class JarFile(name: String, classifier: Option[String] = None) {
val versionPattern = "(-.*)?"
val classifierString = classifier map ("-" + _) getOrElse ""
val extension = "jar"
val pattern = name + versionPattern + classifierString + "." + extension
val default = new File(name + classifierString + "." + extension)
}
object JarFile {
def apply(name: String, classifier: String): JarFile = JarFile(name, Some(classifier))
}
/**
* The scala jars split into compiler, library, and extra.
*/
case class ScalaJars(compiler: File, library: File, extra: Seq[File])
}
| UnrememberMe/pants | src/scala/org/pantsbuild/zinc/compiler/InputUtils.scala | Scala | apache-2.0 | 6,687 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.ping
import org.apache.ignite.cluster.ClusterNode
import org.apache.ignite.internal.util.scala.impl
import org.apache.ignite.visor.VisorTag
import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable}
import org.apache.ignite.visor.visor._
import java.util.concurrent._
import scala.collection.JavaConversions._
import scala.language.{implicitConversions, reflectiveCalls}
import scala.util.control.Breaks._
/**
* Ping result container.
*/
private class Result {
/** Total pings count. */
var total = 0
/** Successful pings count. */
var oks = 0
/** Failed pings count */
var fails = 0
/** Failed nodes. */
val failedNodes = collection.mutable.Set.empty[ClusterNode]
}
/**
* Thread that pings one node.
*/
private case class Pinger(n: ClusterNode, res: Result) extends Runnable {
assert(n != null)
assert(res != null)
override def run() {
val ok = ignite.cluster.pingNode(n.id())
res.synchronized {
res.total += 1
if (ok)
res.oks += 1
else {
res.fails += 1
res.failedNodes += n
}
}
}
}
/**
* ==Command==
* Visor 'ping' command implementation.
*
* ==Help==
* {{{
* +--------------------+
* | ping | Pings node. |
* +--------------------+
* }}}
*
* ====Specification====
* {{{
* ping {"id81 id82 ... id8k"}
* }}}
*
* ====Arguments====
* {{{
* id8k
* ID8 of the node to ping.
* }}}
*
* ====Examples====
* {{{
* ping "12345678"
* Pings node with '12345678' ID8.
* ping
* Pings all nodes in the topology.
* }}}
*/
class VisorPingCommand extends VisorConsoleCommand {
@impl protected val name = "ping"
/**
* ===Command===
* Pings node(s) by its ID8.
*
* ===Examples===
* <ex>ping "12345678 56781234"</ex>
* Pings nodes with '12345678' and '56781234' ID8s.
*
* @param args List of node ID8s. If empty or null - pings all nodes in the topology.
*/
def ping(args: String) = breakable {
if (checkConnected()) {
val argLst = parseArgs(args)
val res = new Result()
var pings = List.empty[Pinger]
if (argLst.isEmpty)
pings ++= ignite.cluster.nodes().map(Pinger(_, res))
else {
for (id8 <- argLst) {
if (id8._1 != null || id8._2 == null)
scold("Invalid ID8: " + argName(id8))
else {
val ns = nodeById8(id8._2)
if (ns.size != 1)
scold("Unknown ID8: " + argName(id8))
else
pings +:= Pinger(ns.head, res)
}
}
}
if (pings.isEmpty)
scold("Topology is empty.")
else {
try
pings.map(pool.submit(_)).foreach(_.get)
catch {
case _: RejectedExecutionException => scold("Ping failed due to system error.").^^
}
val t = VisorTextTable()
// No synchronization on 'res' is needed since all threads
// are finished and joined.
t += ("Total pings", res.total)
t += ("Successful pings", res.oks + " (" + formatInt(100 * res.oks / res.total) + "%)")
t += ("Failed pings", res.fails + " (" + formatInt(100 * res.fails / res.total) + "%)")
if (res.failedNodes.nonEmpty)
t += ("Failed nodes", res.failedNodes.map(n => nodeId8Addr(n.id)))
t.render()
}
}
}
/**
* ===Command===
* Pings all nodes in the topology.
*
* ===Examples===
* <ex>ping</ex>
* Pings all nodes in the topology.
*/
def ping() {
ping("")
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorPingCommand {
/** Singleton command. */
private val cmd = new VisorPingCommand
// Adds command's help to visor.
addHelp(
name = cmd.name,
shortInfo = "Pings node.",
spec = List(s"${cmd.name} <id81> <id82> ... <id8k>"),
args = List(
("<id8k>",
"ID8 of the node to ping. Note you can also use '@n0' ... '@nn' variables as shortcut to <id8k>.")
),
examples = List(
s"${cmd.name} 12345678" ->
"Pings node with '12345678' ID8.",
s"${cmd.name} @n0" ->
"Pings node with 'specified node with ID8 taken from 'n0' memory variable.",
cmd.name ->
"Pings all nodes in the topology."
),
emptyArgs = cmd.ping,
withArgs = cmd.ping
)
/**
* Singleton.
*/
def apply() = cmd
/**
* Implicit converter from visor to commands "pimp".
*
* @param vs Visor tagging trait.
*/
implicit def fromPing2Visor(vs: VisorTag): VisorPingCommand = cmd
}
| wmz7year/ignite | modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala | Scala | apache-2.0 | 6,011 |
package org.ensime.util
trait WireFormat{
def toWireString:String
}
| bbatsov/ensime | src/main/scala/org/ensime/util/WireFormat.scala | Scala | gpl-3.0 | 72 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java
import ws.epigraph.lang.Qn
import scala.annotation.tailrec
/**
* Code fragment
* todo doc
*
* todo this won't handle cases like "Map.@Nullalbe Entry"
*
* @author <a href="mailto:konstantin.sobolev@gmail.com">Konstantin Sobolev</a>
*/
case class Fragment(text: String) extends AnyVal {
def length: Int = text.length
def +(other: Fragment): Fragment = Fragment(
if (text.isEmpty) other.text
else if (other.text.isEmpty) text
else text + other.text
)
// def +(other: Fragment): Fragment = Fragment(
// if (text.isEmpty) other.text
// else if (other.text.isEmpty) text
// else {
// if (text.endsWith("\\n\\n")) text + other.text
// else if (text.endsWith("\\n")) text + "\\n" + other.text
// else text + "\\n\\n" + other.text
// }
// )
override def toString: String = text
def interpolate(
namespacesInScope: Set[Qn] = Fragment.javaNamespacesInScope,
importsGenerator: List[String] => String = Fragment.javaImportsGenerator
): String = {
var t = text
t = interpolateImports(t, namespacesInScope, importsGenerator)
t = Fragment.interpolateEmptyLines(t) // should be the last one
t
}
private def interpolateImports(
text: String,
namespacesInScope: Set[Qn],
importsGenerator: List[String] => String = Fragment.javaImportsGenerator
): String = {
import scala.util.control.Breaks._
var curText = text
var curImportedShortNames: Set[String] = Fragment.getImplicitlyUsedImports(text, namespacesInScope)
var curImportedFqns: Set[Qn] = Set()
breakable {
while (true) {
val (newText, newImportedShortNames, newImportedFqns) =
Fragment.resolveImportsOnce(curText, curImportedShortNames, namespacesInScope)
if (newImportedShortNames.size == curImportedShortNames.size && newText == curText) {
break
}
curText = newText
curImportedShortNames = newImportedShortNames
curImportedFqns = newImportedFqns
}
}
val newImportsStmt = importsGenerator.apply(curImportedFqns.map(_.toString).toList.sorted)
curText.replace(Fragment.imports.text, newImportsStmt)
}
}
object Fragment {
protected val sign = '\\u00a7'
val imports = Fragment(sign + "imports")
def imp(fqn: Qn): Fragment = Fragment(s"${ sign }ref[$fqn]")
def apply(fqn: Qn): Fragment = imp(fqn)
def imp(fqn: String): Fragment = imp(Qn.fromDotSeparated(fqn))
val empty: Fragment = Fragment("")
val emptyLine: Fragment = Fragment(sign + "el")
val javaImportsGenerator: List[String] => String = _.map(i => s"import $i;").mkString("\\n")
val javaNamespacesInScope: Set[Qn] = Set(Qn.fromDotSeparated("java.lang"))
def join(fs: Iterable[Fragment], sep: Fragment): Fragment = Fragment(fs.mkString(sep.toString))
/////
private def shortName(fqn: Qn, removeTypeParams: Boolean): String = {
val csi = fqn.segments.indexWhere(_.charAt(0).isUpper)
val ns = if (csi < 0 || csi == fqn.size()) Qn.EMPTY else fqn.takeHeadSegments(csi)
val shortClassName = if (csi < 0) null else if (csi == 0) fqn else fqn.removeHeadSegments(csi)
if (shortClassName == null || shortClassName.isEmpty)
throw new IllegalArgumentException(s"Can't determine short name for '$fqn'")
val res = shortClassName.toString
lazy val paramsStartAt = Set('[', '<').map(res.indexOf(_)).filter(_ >= 0)
if (!removeTypeParams || paramsStartAt.isEmpty)
res
else
res.substring(0, paramsStartAt.min)
}
private def namespace(fqn: Qn): Qn = {
val csi = fqn.segments.indexWhere(_.charAt(0).isUpper)
if (csi < 0 || csi == fqn.size()) Qn.EMPTY else fqn.takeHeadSegments(csi)
}
private def interpolateEmptyLines(s: String): String = {
val ml = emptyLine.text.length
@tailrec
def countNewLinesPrefix(k: String, idx: Int, cur: Int, max: Int): Int =
if (idx >= k.length) cur
else if (cur >= max) cur
else if (k.charAt(idx) == '\\n')
countNewLinesPrefix(k, idx + 1, cur + 1, max)
else if (k.startsWith(emptyLine.text, idx))
countNewLinesPrefix(k, idx + ml, cur + 2, max)
else cur
@tailrec
def countNewLinesSuffix(k: String, idx: Int, cur: Int, max: Int): Int =
if (idx <= 0) cur
else if (cur >= max) cur
else if (idx - 1 >= 0 && k.charAt(idx - 1) == '\\n')
countNewLinesSuffix(k, idx - 1, cur + 1, max)
else if (idx - ml >= 0 && k.startsWith(emptyLine.text, idx - ml))
countNewLinesSuffix(k, idx - ml, cur + 2, max)
else cur
var t = "\\n\\n" + s + "\\n\\n"
var idx = t.indexOf(emptyLine.toString)
while (idx >= 0) {
val newLinesBefore = countNewLinesSuffix(t, idx, 0, 2)
val newLinesAfter = countNewLinesPrefix(t, idx + emptyLine.length, 0, 2)
val numNewLines = Math.max(0, 2 - (newLinesBefore + newLinesAfter))
val newLines = "\\n" * numNewLines
t = t.substring(0, idx) + newLines + t.substring(idx + ml)
idx = t.indexOf(emptyLine.toString, idx)
}
t.substring(2, t.length - 2)
}
private def collectRefs(text: String): Set[Qn] = {
val refPattern = s"${ sign }ref\\\\[([^]$sign]+)\\\\]".r
(for (m <- refPattern.findAllMatchIn(text)) yield m.group(1)).toSet.map(Qn.fromDotSeparated)
}
private def collectPotentialRefs(text: String): Set[Qn] = {
val refPattern = s"${ sign }ref\\\\[([^]$sign\\\\[<]+)".r
(for (m <- refPattern.findAllMatchIn(text)) yield m.group(1)).toSet.map(Qn.fromDotSeparated)
}
private def getImplicitlyUsedImports(text: String, namespacesInScope: Set[Qn]): Set[String] =
collectPotentialRefs(text)
.filter(qn => namespacesInScope.contains(namespace(qn)))
.map(qn => shortName(qn, removeTypeParams = true))
private def resolveImportsOnce(
text: String,
importedShortNames: Set[String],
namespacesInScope: Set[Qn]
): (String, Set[String], Set[Qn]) = {
def replaceRef(fqn: Qn, to: String, text: String): String = text.replace(imp(fqn).text, to)
val refs: Set[Qn] = collectRefs(text)
var processedRefs: Set[Qn] = Set()
var newImportedShortNames: Set[String] = importedShortNames
var importedFqns: Set[Qn] = Set()
var t = text
// first resolve all implicitly visible names
for (ref <- refs) {
val ns = namespace(ref)
if (namespacesInScope.contains(ns)) {
val shortWithParams = shortName(ref, removeTypeParams = false)
val shortWithoutParams = shortName(ref, removeTypeParams = true)
t = replaceRef(ref, shortWithParams, t)
newImportedShortNames += shortWithoutParams
processedRefs += ref
}
}
for (ref <- refs; if !processedRefs.contains(ref)) {
lazy val shortWithParams = shortName(ref, removeTypeParams = false)
val shortWithoutParams = shortName(ref, removeTypeParams = true)
if (!newImportedShortNames.contains(shortWithoutParams)) {
t = replaceRef(ref, shortWithParams, t)
importedFqns += ref
newImportedShortNames += shortWithoutParams
} else {
t = replaceRef(ref, ref.toString, t)
}
}
(t, newImportedShortNames, importedFqns)
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/Fragment.scala | Scala | apache-2.0 | 7,783 |
package poly.collection
import cats.implicits._
/**
* Trait for an indexed sorted sequence.
* @author Tongfei Chen
* @since 0.1.0
*/
trait SortedIndexedSeq[T] extends SortedSeq[T] with IndexedSeq[T] { self =>
/**
* $Ologn Checks if this sorted sequence contains the specific element.
* The equivalence relation used for checking is the order of this sequence.
*/
def contains(x: T) = tryBinarySearch(x) >= 0
/**
* $Ologn Finds the key in a sorted sequence using binary search.
* @param x The key to be found
* @return An object of type `BinarySearchResult`. Can be either:
* - Right(i): the given key is found at ''i''
* - Left(i): the given key is not found. If it should be inserted to the sequence, it should be located at ''i''.
*/
def binarySearch(x: T): Either[Int, Int] = {
val i = tryBinarySearch(x)
if (i >= 0) Right(i)
else Left(~i)
}
/**
* Finds the key in a sorted sequence using binary search.
* If not found, returns the complement (~x) of its lower bound. $Ologn
* @param x The key to be found
* @return Index of key. If not found, complement of the index at which it should be inserted
*/
def tryBinarySearch(x: T): Int = {
var l = 0
var r = length - 1
while (l <= r) {
val m = l + (r - l) / 2
val value = this(m)
if (x === value) return m
else {
if (value < x)
l = m + 1
else
r = m - 1
}
}
~l
}
/**
* Finds the first element that is greater than the key and returns its index. $Ologn
* @param key The key to be found
* @return The index of the first element that is greater than the key.
*/
def indexOfUpperBound(key: T): Int = {
var len = length
var first = 0
while (len > 0) {
val mid = first + (len / 2)
if (key < this(mid))
len /= 2
else {
first = mid + 1
len = len - (len / 2) - 1
}
}
first
}
/**
* Finds the first element that is not less than the key and returns its index. $Ologn
* @param key The key to be found
* @return The index of the first element that is not less than the key.
*/
def indexOfLowerBound(key: T): Int = {
var len = length
var first = 0
while (len > 0) {
val mid = first + (len / 2)
if (key > this(mid)) {
first = mid + 1
len = len - (len / 2) - 1
}
else
len /= 2
}
first
}
/** Returns the ''q''-quantile of this sequence under the current sorted order. $O1 */
def quantile(q: Double) = {
val i = math.floor(self.length * q).toInt
if (i < self.length)
if (i < 0) self.head
else self(i)
else self.last
}
def asWeightedSet: WeightedSet[T, Int] = new SortedIndexedSeqT.AsWeightedSet(self)
def asSet: SortedSet[T] = new SortedIndexedSeqT.AsSet(self)
}
abstract class AbstractSortedIndexedSeq[T] extends AbstractIndexedSeq[T] with SortedIndexedSeq[T]
private[poly] object SortedIndexedSeqT {
class AsSet[T](self: SortedIndexedSeq[T]) extends AbstractSortedSet[T] {
def keyOrder = self.elementOrder
def keys = self.distinct()
def contains(x: T) = self.contains(x)
}
class AsWeightedSet[T](self: SortedIndexedSeq[T]) extends AbstractWeightedSet[T, Int] {
def keySet: SortedSet[T] = new AsSet(self)
def weightRing = algebra.instances.int.intAlgebra
def weightOrder = cats.Order[Int]
def weight(k: T) = {
val l = self.indexOfLowerBound(k)
if (self(l) =!= k) 0
else {
var r = l
while (self(r) === k) r += 1
r - l
}
}
}
}
| ctongfei/poly-collection | core/src/main/scala/poly/collection/SortedIndexedSeq.scala | Scala | mit | 3,631 |
package controllers.auth
import javax.inject.Inject
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.exceptions.ProviderException
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.impl.providers._
import models.services.UserService
import play.api.i18n.{ I18nSupport, Messages, MessagesApi }
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json.Json
import play.api.mvc.{ Action, AnyContent, Controller }
import utils.auth.DefaultEnv
import scala.concurrent.Future
/**
* The social auth controller.
*
* @param messagesApi The Play messages API.
* @param silhouette The Silhouette stack.
* @param userService The user service implementation.
* @param authInfoRepository The auth info service implementation.
* @param socialProviderRegistry The social provider registry.
*/
class SocialAuthController @Inject() (
val messagesApi: MessagesApi,
silhouette: Silhouette[DefaultEnv],
userService: UserService,
authInfoRepository: AuthInfoRepository,
socialProviderRegistry: SocialProviderRegistry)
extends Controller with I18nSupport with Logger {
/**
* Authenticates a user against a social provider.
*
* @param provider The ID of the provider to authenticate against.
* @return The result to display.
*/
def authenticate(provider: String): Action[AnyContent] = Action.async { implicit request =>
(socialProviderRegistry.get[SocialProvider](provider) match {
case Some(p: SocialProvider with CommonSocialProfileBuilder) =>
p.authenticate().flatMap {
case Left(result) => Future.successful(result)
case Right(authInfo) => for {
profile <- p.retrieveProfile(authInfo)
user <- userService.save(profile)
authInfo <- authInfoRepository.save(profile.loginInfo, authInfo)
authenticator <- silhouette.env.authenticatorService.create(profile.loginInfo)
value <- silhouette.env.authenticatorService.init(authenticator)
result <- silhouette.env.authenticatorService.embed(value, Redirect("/"))
} yield {
silhouette.env.eventBus.publish(LoginEvent(user, request))
result
}
}
case _ => Future.failed(new ProviderException(s"Cannot authenticate with unexpected social provider $provider"))
}).recover {
case e: ProviderException =>
logger.error("Unexpected provider error", e)
Unauthorized(Json.obj("error" -> Messages("could.not.authenticate")))
}
}
}
| SwaggerTagger/octo-tagger-backend | app/controllers/auth/SocialAuthController.scala | Scala | mit | 2,587 |
package org.scalaide.core.interpreter
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.tools.nsc.interpreter.Results.Result
import scala.tools.nsc.interpreter.Results.Success
import org.apache.log4j.BasicConfigurator
import org.junit.Assert.assertEquals
import org.junit.Ignore
import org.junit.Test
import org.scalaide.core.internal.repl.EclipseRepl
import org.scalaide.core.internal.repl.EclipseRepl._
import EclipseReplTest._
// This test is not a regular part of org.scalaide.core.TestsSuite because
// when running under Maven the environment isn't set up quite right for the
// Scala Interpreter's special ClassLoader. At least that's my best guess as
// to the problem so far. Under Maven {new IMain} throws with the message:
// Failed to initialize compiler: object scala not found.
// ** Note that as of 2.8 scala does not assume use of the java classpath.
// ** For the old behavior pass -usejavacp to scala, or if using a Settings
// ** object programatically, settings.usejavacp.value = true.
// Note that the suggested setting does not solve the problem. All the other
// tests pass under Maven. That finicky test passes when you set up Eclipse and
// run it from there. There is another benefit to this approach: Maven isn't
// currently configured to do code coverage analysis of the unit tests. Eclemma
// was quite helpful in writing these tests (warning: Eclemma can't deal with a
// def inside another def).
// Here's an easy way to set up Eclipse: Create a new Scala project. Unzip the
// Scala compiler plugin into a folder there and add the scala-compiler.jar to
// the build path (and scala-compiler-src.jar as source attachment). Create a
// scala.tools.eclipse.interpreter package and copy this file and the
// EclipseRepl source code file to there.
// ----------
// capitalization is important...
// in the EclipseRepl public API:
// - capitalized {Init,Stop,Exec,Drop,Quit} are message/types sent to the Future
// - lowercase {starting,...,unknown} are listener methods on the Client
// in this file we need to write test input and expected output values:
// - lowercase {init,stop,exec,drop,quit} are inputs for the Future functions
// - capitalized {Starting,...,Unknown} are expected for the Client methods
class EclipseReplTest {
BasicConfigurator.configure()
val helloWorld = InOut("println(\\"hello world\\")", _ => "hello world\\n")
val onePlusOne = InOut("1+1", "res" + _ + ": Int = 2\\n")
def allTransitions: Seq[Expect] = // start in Z, go thru every possible path
Expect(drop, stop) /*Z*/ ++ execAdded("1") /*H*/ ++ Expect(stop) ++
execAdded("2") ++ Expect(drop, Dropped) /*Z*/ ++ initStarted /*R*/ ++
Expect(drop, stop, Stopped) /*Z*/ ++ initStarted /*R*/ ++ Expect(Stopped) ++
initStarted ++ helloWorld.execDone(0) /*B*/ ++ Expect(stop, Stopped) /*H*/ ++
onePlusOne.execAdded ++ initReplay(helloWorld, onePlusOne) /*B*/ ++
onePlusOne.execDone(2) ++ Expect(Stopped) ++
initReplay(helloWorld, onePlusOne, onePlusOne) ++ Expect(drop, Dropped) /*R*/ ++
onePlusOne.execDone(3) /*B*/ ++ onePlusOne.execDone(4) ++ Expect(Stopped) ++
initReplay(onePlusOne, onePlusOne) ++ quitFromB
def failures1: Seq[Expect] =
Echopreter.steal(initStarted /*R*/ ++ onePlusOne.execDone(0) /*B*/ ) ++
Expect(Stopped) ++ Failder.initFailed /*H*/ ++ quitFromH
def failures2: Seq[Expect] =
onePlusOne.execAdded ++ Failpreter.initReplayingFailed(onePlusOne) /*B*/ ++
quitFromB
def failures3: Seq[Expect] =
Failpreter.initStarted /*R*/ ++ helloWorld.execFailed ++ quitFromB
def failures: Seq[Seq[Expect]] =
Seq(failures1, failures2, failures3) map Echopreter.editOutput
def unknowns: Seq[Expect] = { // some request messages that aren't recognized
val anys = Seq[Any](1, 0.0, true, 'J', null, new Object, new Throwable)
val unkns = (anys map { a => Expect(bad(a), Unknown(a)) }).flatten ++ quitFromZ
unkns
}
def multiple(n: Int): Seq[Seq[Expect]] = { // all the above at once
val tests = failures ++ failures ++ failures ++ // cheap "load balancing"
(Seq(allTransitions, unknowns, unknowns, unknowns) map Echopreter.steal)
val stream = Stream.continually(tests.toStream).flatten
stream.take(n * tests.size).toSeq
}
// start with the Echopreter testing the state machine ...
@Test def allTransitions_Echopreter(): Unit = {
test(Echopreter.steal(allTransitions))
}
@Test def failures_Failpreter(): Unit = {
test(failures: _*)
}
@Test def unknowns_Echopreter(): Unit = {
test(Echopreter.steal(unknowns))
}
// next use multiple EclipseRepls in parallel to test the Future stuff ...
@Test def multiple_RTPScheduler(): Unit = {
test(multiple(6): _*)
}
@Test def multiple_FJScheduler(): Unit = {
test(multiple(8): _*)
}
// last rerun allTransitions with the real NSC Interpreter ...
@Ignore("See description of failure in class head.")
@Test def allTransitions_RealNSC(): Unit = {
test(allTransitions)
}
}
object EclipseReplTest {
import org.scalaide.util.TestFutureUtil._
def test(ses: Seq[Expect]*): Unit = {
val rs = ses map { new Recorder(_) }
whenReady(tryAsResult(Future.sequence {
val reqs = rs map { _.sendRequests() }
reqs
})) { ignore =>
rs foreach { _.assertMatches() }
}
}
val TheOneException = new RuntimeException("TheOne") {
// suppress the useless stack traces...
setStackTrace(Array.empty[StackTraceElement])
}
trait Expect
def Expect(es: Expect*): Seq[Expect] = es
def replace(es: Seq[Expect], f: PartialFunction[Expect, Expect]): Seq[Expect] =
for (e <- es) yield if (f.isDefinedAt(e)) f(e) else e
trait Request extends Expect { def msg: Any }
case class init(msg: Init) extends Request
case object stop extends Request { val msg = Stop }
case class exec(msg: Exec) extends Request
case object drop extends Request { val msg = Drop }
case object quit extends Request { val msg = Quit }
case class bad(msg: Any) extends Request
trait Reply extends Expect
case class Starting(init: Init) extends Reply
case class Started(init: Init) extends Reply
case object Stopped extends Reply
case object Replaying extends Reply
case object Replayed extends Reply
case object Dropped extends Reply
case class Added(exec: Exec) extends Reply
case class Doing(exec: Exec) extends Reply
case class Done(exec: Exec, result: Result, output: String) extends Reply
case object Terminating extends Reply
case class Failed(request: Any, thrown: Throwable, output: String) extends Reply
case class Unknown(request: Any) extends Reply
def quitFromZ = Expect(quit, Terminating)
def quitFromH = Expect(quit, Dropped, Terminating)
def quitFromB = Expect(quit, Stopped, Dropped, Terminating)
def execAdded(line: Exec) =
Expect(exec(line), Added(line))
def DoingFailed(line: Exec) =
Expect(Doing(line), Failed(line, TheOneException, ""))
case class InOut(in: String, out: (Int => String)) {
def execAdded = EclipseReplTest.execAdded(in)
def DoingFailed = EclipseReplTest.DoingFailed(in)
def execFailed = execAdded ++ DoingFailed
def DoingDone(n: Int) =
Expect(Doing(in), Done(in, Success, out(n)))
def execDone(n: Int) =
execAdded ++ DoingDone(n)
}
trait Initialization {
def settings: Init
def initStarting =
Expect(init(settings), Starting(settings))
def initStarted =
initStarting ++ Expect(Started(settings))
def initReplaying =
initStarting ++ Expect(Replaying)
def ReplayedStarted =
Expect(Replayed) ++ Expect(Started(settings))
def initFailed =
initStarting ++ Expect(Failed(settings, TheOneException, ""))
def initReplay(ios: InOut*): Seq[Expect] =
initReplaying ++
ios.zipWithIndex.map { t => t._1.DoingDone(t._2) }.flatten ++
ReplayedStarted
def initReplayingFailed(io: InOut) =
initReplaying ++ blame(io.DoingFailed)
def blame(es: Seq[Expect]) =
replace(es, { case Failed(_, t, o) => Failed(settings, t, o) })
def steal(es: Seq[Expect]) =
replace(es, {
case init(_) => init(settings)
case Starting(_) => Starting(settings)
case Started(_) => Started(settings)
case Failed(_: Init, t, o) => Failed(settings, t, o)
})
}
object Unspecified extends Initialization {
val settings = new Init { override def toString = "Unspecified" }
}
def initStarting = Unspecified.initStarting
def initStarted = Unspecified.initStarted
def initReplaying = Unspecified.initReplaying
def ReplayedStarted = Unspecified.ReplayedStarted
def initFailed = Unspecified.initFailed
def initReplay(ios: InOut*): Seq[Expect] = Unspecified.initReplay(ios: _*)
def initReplayingFailed(io: InOut) = Unspecified.initReplayingFailed(io)
object Echopreter extends Initialization with Interpreter {
val settings = new Init { override def toString = "Echopreter" }
// just echo back each line of code as if that were the result
def interpret(e: String) = { print(e); Success }
def editOutput(es: Seq[Expect]) =
replace(es, {
case Done(e, r, _) => Done(e, r, e)
})
override def steal(es: Seq[Expect]) =
super.steal(editOutput(es))
}
object Failpreter extends Initialization with Interpreter {
def interpret(e: String) = throw TheOneException
val settings = new Init { override def toString = "Failpreter" }
override def steal(es: Seq[Expect]) =
throw new UnsupportedOperationException()
}
object Failder extends Initialization {
val settings = new Init { override def toString = "Failder" }
override def steal(es: Seq[Expect]) =
throw new UnsupportedOperationException()
}
object TestBuilder extends Builder {
def interpreter(i: Init) =
if (i eq Echopreter.settings) Echopreter
else if (i eq Failpreter.settings) Failpreter
else if (i eq Failder.settings) throw TheOneException
else DefaultBuilder.interpreter(i)
}
def messageToRequest(msg: Any): Request =
msg match {
case i: Init => init(i)
case Stop => stop
case e: Exec => exec(e)
case Drop => drop
case Quit => quit
case x => bad(x)
}
def filterRequests(es: Seq[Expect]): Seq[Request] =
es filter { _.isInstanceOf[Request] } map { _.asInstanceOf[Request] }
def filterReplies(es: Seq[Expect]): Seq[Reply] =
es filter { _.isInstanceOf[Reply] } map { _.asInstanceOf[Reply] }
class Recorder(val expected: Seq[Expect]) {
private val buffer = new collection.mutable.ListBuffer[Expect]
def add(e: Expect): Unit = { synchronized { buffer += e } }
def record = { synchronized { buffer.toList } }
val client = new Client {
override def starting(i: Init): Unit = { add(Starting(i)) }
override def started(i: Init): Unit = { add(Started(i)) }
override def stopped(): Unit = { add(Stopped) }
override def replaying(): Unit = { add(Replaying) }
override def replayed(): Unit = { add(Replayed) }
override def dropped(): Unit = { add(Dropped) }
override def added(e: Exec): Unit = { add(Added(e)) }
override def doing(e: Exec): Unit = { add(Doing(e)) }
override def done(e: Exec, r: Result, o: String): Unit = { add(Done(e, r, o)) }
override def terminating(): Unit = { add(Terminating) }
override def failed(r: Any, t: Throwable, o: String): Unit = { add(Failed(r, t, o)) }
override def unknown(request: Any): Unit = { add(Unknown(request)) }
}
val tested = new EclipseRepl(client, TestBuilder)
def sendRequests(): Future[Unit] = {
def chainFutures(seq: Seq[Request]): Future[Unit] =
if (seq.isEmpty)
Future.successful {}
else
dispatch(seq.head.msg).flatMap { unit =>
chainFutures(seq.tail)
}.recoverWith {
case _ =>
if (seq.drop(1).nonEmpty)
chainFutures(seq.drop(1).tail)
else chainFutures(Nil)
}
chainFutures(filterRequests(expected))
}
private def dispatch(msg: Any): Future[Unit] = {
add(messageToRequest(msg))
msg match {
case e: Exec => tested.exec(e)
case i: Init => tested.init(i)
case Stop => tested.stop()
case Drop => tested.drop()
case Quit => tested.quit()
case un => tested.unknown(un)
}
}
def assertMatches(): Unit = {
val actual = record
val expectedRequests = filterRequests(expected).toList
val actualRequests = filterRequests(actual).toList
val expectedReplies = filterReplies(expected).toList
val actualReplies = filterReplies(actual).toList
assertEquals("requests", expectedRequests, actualRequests)
assertEquals("replies", expectedReplies, actualReplies)
}
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/interpreter/EclipseReplTest.scala | Scala | bsd-3-clause | 12,965 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.jobs.mapreduce
import java.io.{Closeable, InputStream}
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, Seekable}
import org.apache.hadoop.mapreduce._
import org.geotools.data.simple.DelegateSimpleFeatureReader
import org.geotools.data.ReTypeFeatureReader
import org.geotools.feature.collection.DelegateSimpleFeatureIterator
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.convert.SimpleFeatureConverters
import org.locationtech.geomesa.jobs.GeoMesaConfigurator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Input format for Converters gives us access to the entire file as a byte stream
* via the record reader.
*/
class ConverterInputFormat extends FileStreamInputFormat {
override def createRecordReader(): FileStreamRecordReader = new ConverterRecordReader
}
object ConverterInputFormat {
object Counters {
val Group = "org.locationtech.geomesa.jobs.convert"
val Converted = "converted"
val Failed = "failed"
}
val ConverterKey = "org.locationtech.geomesa.jobs.ingest.converter"
val RetypeKey = "org.locationtech.geomesa.jobs.ingest.retype"
def setConverterConfig(job: Job, config: String): Unit = setConverterConfig(job.getConfiguration, config)
def setConverterConfig(conf: Configuration, config: String): Unit = conf.set(ConverterKey, config)
def setSft(job: Job, sft: SimpleFeatureType): Unit = FileStreamInputFormat.setSft(job, sft)
def setSft(conf: Configuration, sft: SimpleFeatureType): Unit = FileStreamInputFormat.setSft(conf, sft)
def setRetypeSft(job: Job, sft: SimpleFeatureType): Unit = setRetypeSft(job.getConfiguration, sft)
def setRetypeSft(conf: Configuration, sft: SimpleFeatureType): Unit = conf.set(RetypeKey, SimpleFeatureTypes.encodeType(sft))
def setFilter(job: Job, ecql: String): Unit = setFilter(job.getConfiguration, ecql)
def setFilter(conf: Configuration, ecql: String): Unit = GeoMesaConfigurator.setFilter(conf, ecql)
}
class ConverterRecordReader extends FileStreamRecordReader with LazyLogging {
import ConverterInputFormat._
override def createIterator(stream: InputStream with Seekable,
filePath: Path,
context: TaskAttemptContext): Iterator[SimpleFeature] with Closeable = {
val confStr = context.getConfiguration.get(ConverterKey)
val conf = ConfigFactory.parseString(confStr)
val sft = FileStreamInputFormat.getSft(context.getConfiguration)
val converter = SimpleFeatureConverters.build(sft, conf)
val filter = GeoMesaConfigurator.getFilter(context.getConfiguration).map(ECQL.toFilter)
val retypedSpec = context.getConfiguration.get(RetypeKey)
class MapReduceCounter extends org.locationtech.geomesa.convert.Counter {
import ConverterInputFormat.{Counters => C}
// Global counters for the entire job
override def incSuccess(i: Long): Unit = context.getCounter(C.Group, C.Converted).increment(i)
override def getSuccess: Long = context.getCounter(C.Group, C.Converted).getValue
override def incFailure(i: Long): Unit = context.getCounter(C.Group, C.Failed).increment(i)
override def getFailure: Long = context.getCounter(C.Group, C.Failed).getValue
// Line counts are local to file not global
private var c: Long = 0
override def incLineCount(i: Long = 1): Unit = c += i
override def getLineCount: Long = c
override def setLineCount(i: Long): Unit = c = i
}
val ec = converter.createEvaluationContext(Map("inputFilePath" -> filePath.toString), new MapReduceCounter)
val raw = converter.process(stream, ec)
val iter = filter match {
case Some(f) => raw.filter(f.evaluate)
case None => raw
}
import scala.collection.JavaConversions._
val featureReader = if (retypedSpec != null) {
val retypedSft = SimpleFeatureTypes.createType(sft.getTypeName, retypedSpec)
val reader = new DelegateSimpleFeatureReader(sft, new DelegateSimpleFeatureIterator(iter))
new ReTypeFeatureReader(reader, retypedSft)
} else {
new DelegateSimpleFeatureReader(sft, new DelegateSimpleFeatureIterator(iter))
}
logger.info(s"Initialized record reader on split ${filePath.toString} with " +
s"type name ${sft.getTypeName} and convert conf $confStr")
new Iterator[SimpleFeature] with Closeable {
override def hasNext: Boolean = featureReader.hasNext
override def next(): SimpleFeature = featureReader.next
override def close(): Unit = {
CloseWithLogging(featureReader)
CloseWithLogging(converter)
}
}
}
}
| ronq/geomesa | geomesa-jobs/src/main/scala/org/locationtech/geomesa/jobs/mapreduce/ConverterInputFormat.scala | Scala | apache-2.0 | 5,465 |
/*
* Copyright (c) 2011-2015 EPFL DATA Laboratory
* Copyright (c) 2014-2015 The Squall Collaboration (see NOTICE)
*
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.epfl.data.squall.api.scala.operators
import ch.epfl.data.squall.operators.Operator
import ch.epfl.data.squall.visitors.OperatorVisitor
import ch.epfl.data.squall.api.scala.SquallType._
import scala.collection.JavaConverters._
/**
* @author mohamed
*/
class ScalaMapOperator[T: SquallType, U: SquallType](fn: T => U) extends Operator {
private var _numTuplesProcessed: Int = 0;
def accept(ov: OperatorVisitor): Unit = {
//ov.visit(this);
}
def getContent(): java.util.List[String] = {
throw new RuntimeException("getContent for SelectionOperator should never be invoked!")
}
def getNumTuplesProcessed(): Int = {
_numTuplesProcessed
}
def isBlocking(): Boolean = {
false
}
def printContent(): String = {
throw new RuntimeException("printContent for SelectionOperator should never be invoked!");
}
def process(tuple: java.util.List[String], lineageTimestamp: Long): java.util.List[String] = {
_numTuplesProcessed += 1;
val squalTypeInput: SquallType[T] = implicitly[SquallType[T]]
val squalTypeOutput: SquallType[U] = implicitly[SquallType[U]]
val scalaList = tuple.asScala.toList
val squallTuple = squalTypeInput.convertBack(scalaList)
val cmp = fn(squallTuple)
val res = squalTypeOutput.convert(cmp)
seqAsJavaListConverter(res).asJava
}
}
| khuevu/squall | squall-functional/src/main/scala/ch/epfl/data/squall/api/scala/operators/ScalaMapOperator.scala | Scala | apache-2.0 | 2,103 |
package io.tmos.arm
/**
* For encapsulating the management logic of a resource.
*
* Default logic for any `java.lang.AutoClosable` is provided by the companion object,
* which may be imported into current scope as implicits.
*
* Other types may be provided in scope by the user. For example
* {{{
* import java.util.concurrent._
* import io.tmos.arm.Implicits._
*
* implicit val manager: CanManage[ExecutorService] = new CanManage[ExecutorService] {
* override def onFinally(pool: ExecutorService): Unit = {
* pool.shutdown() // Disable new tasks from being submitted
* try {
* if (!pool.awaitTermination(10, TimeUnit.SECONDS)) { // wait for normal termination
* pool.shutdownNow() // force terminate
* if (!pool.awaitTermination(10, TimeUnit.SECONDS)) // wait for forced termination
* throw new RuntimeException("ExecutorService did not terminate")
* }
* } catch {
* case _: InterruptedException =>
* pool.shutdownNow() // (Re-)Cancel if current thread also interrupted
* Thread.currentThread().interrupt() // Preserve interrupt status
* }
* }
* override def onException(r: ExecutorService): Unit = {}
* }
*
* for (manage(executorService) <- Executors.newSingleThreadExecutor.manage) { ... }
* }}}
*
* @tparam R the type of the resource to manage
*/
trait CanManage[-R] {
/**
* Execution hook called after the managed block.
*
* This execution hook is called regardless if an exception is thrown.
*
* Usually resources are released or closed in the lifecycle.
*
* Implementors are free to permit exceptions thrown from this method, however
* it is strongly advised to not have the
* method throw `java.lang.InterruptedException`. This exception interacts with a thread's
* interrupted status, and runtime misbehavior is likely to occur if an `java.lang.InterruptedException`
* is suppressed. More generally, if it would cause problems for an exception to be suppressed,
* the AutoCloseable.close method should not throw it."
*
* @param r the resource being managed
*/
def onFinally(r: R): Unit = {}
/**
* Execution hook called when an exception is thrown from the managed
* block. This is executed prior to [onFinally].
*
* Implementors are free to permit exceptions thrown from this method, however
* note that any new exceptions thrown will be added as
* a suppressed exception of the currently throwing exception.
* Thus it is strongly advised that implementors do not throw any exceptions
* if it would cause problems for an exception to be suppressed.
*
* @param r the resource being managed
*/
def onException(r: R): Unit = {}
}
/**
* Companion object to the CanManage type trait.
*
* Contains common implementations of CanManage for AutoClosable Resources
*/
object CanManage {
/**
* Always call close on a AutoClosable after applied block,
* regardless if block throws an exception or not. This is identical to
* Java's try-with-resources.
*/
implicit object CloseOnFinally extends CanManage[AutoCloseable] {
override def onFinally(r: AutoCloseable): Unit = if (r != null) r.close()
}
/**
* Call close on a resource only if a exception is thrown in the applied block.
* This is useful for instance if closing a resource needs to be delegated
* elsewhere under normal circumstances, but abnormal circumstances should be
* handled in the current scope. Such examples may include managing a resource
* across threads.
*/
object CloseOnException extends CanManage[AutoCloseable] {
override def onException(r: AutoCloseable): Unit = if (r != null) r.close()
}
}
| tmoschou/arm4s | src/main/scala/io/tmos/arm/CanManage.scala | Scala | bsd-3-clause | 3,839 |
package org.jetbrains.plugins.scala.lang.optimize
package generated
import org.jetbrains.plugins.scala.ScalaVersion
class OptimizeImportsSimpleTestBase extends OptimizeImportsTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "simple/"
protected override def sourceRootPath: String = folderPath
def testFromRoot(): Unit = doTest()
def testHasOverloads(): Unit = doTest()
def testSorted(): Unit = doTest()
def testSortedInPackage(): Unit = doTest()
def testTwoExpressions(): Unit = doTest()
def testDeleteBraces(): Unit = doTest()
def testDontSaveNotResolved(): Unit = doTest()
def testImportChainUsed(): Unit = doTest()
def testLanguageFeatures(): Unit = doTest()
def testNewLines(): Unit = doTest()
def testOneImport(): Unit = doTest()
def testScalaDoc(): Unit = doTest()
def testSCL7275(): Unit = doTest()
def testSomeTrait(): Unit = doTest()
def testUnusedImportChain(): Unit = doTest()
def testUnusedSelector(): Unit = doTest()
def testUsedImport(): Unit = doTest()
def testRelativeNameConflict(): Unit = doTest()
def testNoReformattingComments(): Unit = doTest()
def testRemoveImportsFromSamePackageAndDefaultPackages_NoNameClashes(): Unit = {
getFixture.addFileToProject("org/example/declaration/all.scala",
"""package org.example.declaration.data
|
|class Random
|class Qwe
|""".stripMargin
)
doTest(
"""import java.lang.AbstractMethodError
|import java.util.Properties
|import scala.Predef.Manifest
|import scala.Tuple1
|import scala.util.Try
|
|object Usage {
| val a1: Manifest[_] = ???
| val a2: Tuple1[_] = ???
| val a3: AbstractMethodError = ???
| val a4: Properties = ???
| val a5: Try[_] = ???
|}""".stripMargin,
"""import java.util.Properties
|import scala.util.Try
|
|object Usage {
| val a1: Manifest[_] = ???
| val a2: Tuple1[_] = ???
| val a3: AbstractMethodError = ???
| val a4: Properties = ???
| val a5: Try[_] = ???
|}""".stripMargin
)
}
def testRemoveImportsFromSamePackageAndDefaultPackages_NoNameClashes_LocalImports(): Unit = {
getFixture.addFileToProject("org/example/declaration/all.scala",
"""package org.example.declaration.data
|
|class Random
|class Qwe
|""".stripMargin
)
doTest(
"""object Usage {
| import java.lang.AbstractMethodError
| import java.util.Properties
| import scala.Predef.Manifest
| import scala.Tuple1
| import scala.util.Try
|
| val a1: Manifest[_] = ???
| val a2: Tuple1[_] = ???
| val a3: AbstractMethodError = ???
| val a4: Properties = ???
| val a5: Try[_] = ???
|}""".stripMargin,
"""object Usage {
| import java.util.Properties
| import scala.util.Try
|
| val a1: Manifest[_] = ???
| val a2: Tuple1[_] = ???
| val a3: AbstractMethodError = ???
| val a4: Properties = ???
| val a5: Try[_] = ???
|}""".stripMargin
)
}
}
class OptimizeImportsSimpleTest_2_12 extends OptimizeImportsSimpleTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean =
version == ScalaVersion.Latest.Scala_2_12
}
class OptimizeImportsSimpleTest_2_13 extends OptimizeImportsSimpleTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean =
version == ScalaVersion.Latest.Scala_2_13
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/optimize/generated/OptimizeImportsSimpleTest.scala | Scala | apache-2.0 | 3,723 |
package mysql2hbase
import java.util
import java.util.Arrays
import com.github.shyiko.mysql.binlog.event.TableMapEventData
object TableInfo {
def get(
data: TableMapEventData,
host: String, port: Int, username: String, password: String
): TableInfo = {
val cols = ColInfo.get(host, port, username, password, data.getDatabase, data.getTable)
TableInfo(data, cols._1,cols._2)
}
}
case class TableInfo(data: TableMapEventData, cols: IndexedSeq[ColInfo],primaryKey:util.BitSet) {
def sameData(data: TableMapEventData): Boolean = {
//2015.11.4 huh...seems no use
// this.data.getTableId == data.getTableId &&
this.data.getDatabase == data.getDatabase &&
this.data.getTable == data.getTable &&
Arrays.equals(this.data.getColumnTypes, data.getColumnTypes) &&
Arrays.equals(this.data.getColumnMetadata, data.getColumnMetadata) &&
this.data.getColumnNullability != data.getColumnNullability
}
def isKeyColumnChanged(that:TableInfo): Boolean ={
this.getKeyColumns().sortBy(_._1) != that.getKeyColumns().sortBy(_._1)
}
def nonKeyColumnAdded(that:TableInfo) ={
that.getNonKeyColumns() diff this.getNonKeyColumns()
}
def nonKeyColumnDropped(that:TableInfo) ={
this.getNonKeyColumns() diff that.getNonKeyColumns()
}
def getKeyColumns()={
cols.filter(col=>col.isPrimaryKey).map(col=>(col.name,col.typeLowerCase))
}
def getNonKeyColumns()={
cols.filter(col => !col.isPrimaryKey).map(col=>(col.name,col.typeLowerCase))
}
def getDBName()={
data.getDatabase
}
def getHTableName()={
data.getDatabase+"_"+data.getTable
}
// def getSparkMetaName()={
// data.getDatabase+"_"+data.getTable
// }
def getDBTableName()={
data.getDatabase+"."+data.getTable
}
override def toString={
var ret=""
for(i <- cols){
ret+="|"+i.toString
}
ret
}
}
| chenm11/mysql-hbase-replicator | src/main/scala/mysql2hbase/TableInfo.scala | Scala | mit | 1,987 |
/* sbt -- Simple Build Tool
* Copyright 2009 Mark Harrah, Vesa Vilhonen
*/
package sbt
import java.lang.{Process => JProcess, ProcessBuilder => JProcessBuilder}
import java.io.{BufferedReader, Closeable, InputStream, InputStreamReader, IOException, OutputStream, PrintStream}
import java.io.{FilterInputStream, FilterOutputStream, PipedInputStream, PipedOutputStream}
import java.io.{File, FileInputStream, FileOutputStream}
import java.net.URL
import scala.concurrent.SyncVar
/** Runs provided code in a new Thread and returns the Thread instance. */
private object Spawn
{
def apply(f: => Unit): Thread = apply(f, false)
def apply(f: => Unit, daemon: Boolean): Thread =
{
val thread = new Thread() { override def run() = { f } }
thread.setDaemon(daemon)
thread.start()
thread
}
}
private object Future
{
def apply[T](f: => T): () => T =
{
val result = new SyncVar[Either[Throwable, T]]
def run: Unit =
try { result.set(Right(f)) }
catch { case e: Exception => result.set(Left(e)) }
Spawn(run)
() =>
result.get match
{
case Right(value) => value
case Left(exception) => throw exception
}
}
}
object BasicIO
{
def apply(buffer: StringBuffer, log: Option[ProcessLogger], withIn: Boolean) = new ProcessIO(input(withIn), processFully(buffer), getErr(log), inheritInput(withIn))
def apply(log: ProcessLogger, withIn: Boolean) = new ProcessIO(input(withIn), processInfoFully(log), processErrFully(log), inheritInput(withIn))
def getErr(log: Option[ProcessLogger]) = log match { case Some(lg) => processErrFully(lg); case None => toStdErr }
private def processErrFully(log: ProcessLogger) = processFully(s => log.error(s))
private def processInfoFully(log: ProcessLogger) = processFully(s => log.info(s))
def closeOut = (_: OutputStream).close()
final val BufferSize = 8192
final val Newline = System.getProperty("line.separator")
def close(c: java.io.Closeable) = try { c.close() } catch { case _: java.io.IOException => () }
def processFully(buffer: Appendable): InputStream => Unit = processFully(appendLine(buffer))
def processFully(processLine: String => Unit): InputStream => Unit =
in =>
{
val reader = new BufferedReader(new InputStreamReader(in))
processLinesFully(processLine)(reader.readLine)
reader.close()
}
def processLinesFully(processLine: String => Unit)(readLine: () => String)
{
def readFully()
{
val line = readLine()
if(line != null)
{
processLine(line)
readFully()
}
}
readFully()
}
def connectToIn(o: OutputStream) { transferFully(Uncloseable protect System.in, o) }
def input(connect: Boolean): OutputStream => Unit = if(connect) connectToIn else closeOut
def standard(connectInput: Boolean): ProcessIO = standard(input(connectInput), inheritInput(connectInput))
def standard(in: OutputStream => Unit, inheritIn: JProcessBuilder => Boolean): ProcessIO = new ProcessIO(in, toStdOut, toStdErr, inheritIn)
def toStdErr = (in: InputStream) => transferFully(in, System.err)
def toStdOut = (in: InputStream) => transferFully(in, System.out)
def transferFully(in: InputStream, out: OutputStream): Unit =
try { transferFullyImpl(in, out) }
catch { case _: InterruptedException => () }
private[this] def appendLine(buffer: Appendable): String => Unit =
line =>
{
buffer.append(line)
buffer.append(Newline)
}
private[this] def transferFullyImpl(in: InputStream, out: OutputStream)
{
val continueCount = 1//if(in.isInstanceOf[PipedInputStream]) 1 else 0
val buffer = new Array[Byte](BufferSize)
def read
{
val byteCount = in.read(buffer)
if(byteCount >= continueCount)
{
out.write(buffer, 0, byteCount)
out.flush()
read
}
}
read
in.close()
}
def inheritInput(connect: Boolean) = { p: JProcessBuilder => if (connect) InheritInput(p) else false }
}
private abstract class AbstractProcessBuilder extends ProcessBuilder with SinkPartialBuilder with SourcePartialBuilder
{
def #&&(other: ProcessBuilder): ProcessBuilder = new AndProcessBuilder(this, other)
def #||(other: ProcessBuilder): ProcessBuilder = new OrProcessBuilder(this, other)
def #|(other: ProcessBuilder): ProcessBuilder =
{
require(other.canPipeTo, "Piping to multiple processes is not supported.")
new PipedProcessBuilder(this, other, false)
}
def ###(other: ProcessBuilder): ProcessBuilder = new SequenceProcessBuilder(this, other)
protected def toSource = this
protected def toSink = this
def run(): Process = run(false)
def run(connectInput: Boolean): Process = run(BasicIO.standard(connectInput))
def run(log: ProcessLogger): Process = run(log, false)
def run(log: ProcessLogger, connectInput: Boolean): Process = run(BasicIO(log, connectInput))
private[this] def getString(log: Option[ProcessLogger], withIn: Boolean): String =
{
val buffer = new StringBuffer
val code = this ! BasicIO(buffer, log, withIn)
if(code == 0) buffer.toString else error("Nonzero exit value: " + code)
}
def !! = getString(None, false)
def !!(log: ProcessLogger) = getString(Some(log), false)
def !!< = getString(None, true)
def !!<(log: ProcessLogger) = getString(Some(log), true)
def lines: Stream[String] = lines(false, true, None)
def lines(log: ProcessLogger): Stream[String] = lines(false, true, Some(log))
def lines_! : Stream[String] = lines(false, false, None)
def lines_!(log: ProcessLogger): Stream[String] = lines(false, false, Some(log))
private[this] def lines(withInput: Boolean, nonZeroException: Boolean, log: Option[ProcessLogger]): Stream[String] =
{
val streamed = Streamed[String](nonZeroException)
val process = run(new ProcessIO(BasicIO.input(withInput), BasicIO.processFully(streamed.process), BasicIO.getErr(log), BasicIO.inheritInput(withInput)))
Spawn { streamed.done(process.exitValue()) }
streamed.stream()
}
def ! = run(false).exitValue()
def !< = run(true).exitValue()
def !(log: ProcessLogger) = runBuffered(log, false).exitValue()
def !<(log: ProcessLogger) = runBuffered(log, true).exitValue()
def runBuffered(log: ProcessLogger, connectInput: Boolean) =
log.buffer { run(log, connectInput) }
def !(io: ProcessIO) = run(io).exitValue()
def canPipeTo = false
}
private[sbt] class URLBuilder(url: URL) extends URLPartialBuilder with SourcePartialBuilder
{
protected def toSource = new URLInput(url)
}
private[sbt] class FileBuilder(base: File) extends FilePartialBuilder with SinkPartialBuilder with SourcePartialBuilder
{
protected def toSource = new FileInput(base)
protected def toSink = new FileOutput(base, false)
def #<<(f: File): ProcessBuilder = #<<(new FileInput(f))
def #<<(u: URL): ProcessBuilder = #<<(new URLInput(u))
def #<<(s: => InputStream): ProcessBuilder = #<<(new InputStreamBuilder(s))
def #<<(b: ProcessBuilder): ProcessBuilder = new PipedProcessBuilder(b, new FileOutput(base, true), false)
}
private abstract class BasicBuilder extends AbstractProcessBuilder
{
protected[this] def checkNotThis(a: ProcessBuilder) = require(a != this, "Compound process '" + a + "' cannot contain itself.")
final def run(io: ProcessIO): Process =
{
val p = createProcess(io)
p.start()
p
}
protected[this] def createProcess(io: ProcessIO): BasicProcess
}
private abstract class BasicProcess extends Process
{
def start(): Unit
}
private abstract class CompoundProcess extends BasicProcess
{
def destroy() { destroyer() }
def exitValue() = getExitValue().getOrElse(error("No exit code: process destroyed."))
def start() = getExitValue
protected lazy val (getExitValue, destroyer) =
{
val code = new SyncVar[Option[Int]]()
code.set(None)
val thread = Spawn(code.set(runAndExitValue()))
(
Future { thread.join(); code.get },
() => thread.interrupt()
)
}
/** Start and block until the exit value is available and then return it in Some. Return None if destroyed (use 'run')*/
protected[this] def runAndExitValue(): Option[Int]
protected[this] def runInterruptible[T](action: => T)(destroyImpl: => Unit): Option[T] =
{
try { Some(action) }
catch { case _: InterruptedException => destroyImpl; None }
}
}
private abstract class SequentialProcessBuilder(a: ProcessBuilder, b: ProcessBuilder, operatorString: String) extends BasicBuilder
{
checkNotThis(a)
checkNotThis(b)
override def toString = " ( " + a + " " + operatorString + " " + b + " ) "
}
private class PipedProcessBuilder(first: ProcessBuilder, second: ProcessBuilder, toError: Boolean) extends SequentialProcessBuilder(first, second, if(toError) "#|!" else "#|")
{
override def createProcess(io: ProcessIO) = new PipedProcesses(first, second, io, toError)
}
private class AndProcessBuilder(first: ProcessBuilder, second: ProcessBuilder) extends SequentialProcessBuilder(first, second, "#&&")
{
override def createProcess(io: ProcessIO) = new AndProcess(first, second, io)
}
private class OrProcessBuilder(first: ProcessBuilder, second: ProcessBuilder) extends SequentialProcessBuilder(first, second, "#||")
{
override def createProcess(io: ProcessIO) = new OrProcess(first, second, io)
}
private class SequenceProcessBuilder(first: ProcessBuilder, second: ProcessBuilder) extends SequentialProcessBuilder(first, second, "###")
{
override def createProcess(io: ProcessIO) = new ProcessSequence(first, second, io)
}
private class SequentialProcess(a: ProcessBuilder, b: ProcessBuilder, io: ProcessIO, evaluateSecondProcess: Int => Boolean) extends CompoundProcess
{
protected[this] override def runAndExitValue() =
{
val first = a.run(io)
runInterruptible(first.exitValue)(first.destroy()) flatMap
{ codeA =>
if(evaluateSecondProcess(codeA))
{
val second = b.run(io)
runInterruptible(second.exitValue)(second.destroy())
}
else
Some(codeA)
}
}
}
private class AndProcess(a: ProcessBuilder, b: ProcessBuilder, io: ProcessIO) extends SequentialProcess(a, b, io, _ == 0)
private class OrProcess(a: ProcessBuilder, b: ProcessBuilder, io: ProcessIO) extends SequentialProcess(a, b, io, _ != 0)
private class ProcessSequence(a: ProcessBuilder, b: ProcessBuilder, io: ProcessIO) extends SequentialProcess(a, b, io, ignore => true)
private class PipedProcesses(a: ProcessBuilder, b: ProcessBuilder, defaultIO: ProcessIO, toError: Boolean) extends CompoundProcess
{
protected[this] override def runAndExitValue() =
{
val currentSource = new SyncVar[Option[InputStream]]
val pipeOut = new PipedOutputStream
val source = new PipeSource(currentSource, pipeOut, a.toString)
source.start()
val pipeIn = new PipedInputStream(pipeOut)
val currentSink = new SyncVar[Option[OutputStream]]
val sink = new PipeSink(pipeIn, currentSink, b.toString)
sink.start()
def handleOutOrError(fromOutput: InputStream) = currentSource.put(Some(fromOutput))
val firstIO =
if(toError)
defaultIO.withError(handleOutOrError)
else
defaultIO.withOutput(handleOutOrError)
val secondIO = defaultIO.withInput(toInput => currentSink.put(Some(toInput)) )
val second = b.run(secondIO)
val first = a.run(firstIO)
try
{
runInterruptible {
first.exitValue
currentSource.put(None)
currentSink.put(None)
val result = second.exitValue
result
} {
first.destroy()
second.destroy()
}
}
finally
{
BasicIO.close(pipeIn)
BasicIO.close(pipeOut)
}
}
}
private class PipeSource(currentSource: SyncVar[Option[InputStream]], pipe: PipedOutputStream, label: => String) extends Thread
{
final override def run()
{
currentSource.get match
{
case Some(source) =>
try { BasicIO.transferFully(source, pipe) }
catch { case e: IOException => println("I/O error " + e.getMessage + " for process: " + label); e.printStackTrace() }
finally
{
BasicIO.close(source)
currentSource.unset()
}
run()
case None =>
currentSource.unset()
BasicIO.close(pipe)
}
}
}
private class PipeSink(pipe: PipedInputStream, currentSink: SyncVar[Option[OutputStream]], label: => String) extends Thread
{
final override def run()
{
currentSink.get match
{
case Some(sink) =>
try { BasicIO.transferFully(pipe, sink) }
catch { case e: IOException => println("I/O error " + e.getMessage + " for process: " + label); e.printStackTrace() }
finally
{
BasicIO.close(sink)
currentSink.unset()
}
run()
case None =>
currentSink.unset()
}
}
}
private[sbt] class DummyProcessBuilder(override val toString: String, exitValue : => Int) extends AbstractProcessBuilder
{
override def run(io: ProcessIO): Process = new DummyProcess(exitValue)
override def canPipeTo = true
}
/** A thin wrapper around a java.lang.Process. `ioThreads` are the Threads created to do I/O.
* The implementation of `exitValue` waits until these threads die before returning. */
private class DummyProcess(action: => Int) extends Process
{
private[this] val exitCode = Future(action)
override def exitValue() = exitCode()
override def destroy() {}
}
/** Represents a simple command without any redirection or combination. */
private[sbt] class SimpleProcessBuilder(p: JProcessBuilder) extends AbstractProcessBuilder
{
override def run(io: ProcessIO): Process =
{
import io._
val inherited = inheritInput(p)
val process = p.start()
// spawn threads that process the output and error streams, and also write input if not inherited.
if (!inherited)
Spawn(writeInput(process.getOutputStream))
val outThread = Spawn(processOutput(process.getInputStream))
val errorThread =
if(!p.redirectErrorStream)
Spawn(processError(process.getErrorStream)) :: Nil
else
Nil
new SimpleProcess(process, outThread :: errorThread)
}
override def toString = p.command.toString
override def canPipeTo = true
}
/** A thin wrapper around a java.lang.Process. `outputThreads` are the Threads created to read from the
* output and error streams of the process.
* The implementation of `exitValue` wait for the process to finish and then waits until the threads reading output and error streams die before
* returning. Note that the thread that reads the input stream cannot be interrupted, see https://github.com/sbt/sbt/issues/327 and
* http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4514257 */
private class SimpleProcess(p: JProcess, outputThreads: List[Thread]) extends Process
{
override def exitValue() =
{
try {
p.waitFor()
} catch {
case _: InterruptedException => p.destroy()
}
outputThreads.foreach(_.join()) // this ensures that all output is complete before returning (waitFor does not ensure this)
p.exitValue()
}
override def destroy() = p.destroy()
}
private class FileOutput(file: File, append: Boolean) extends OutputStreamBuilder(new FileOutputStream(file, append), file.getAbsolutePath)
private class URLInput(url: URL) extends InputStreamBuilder(url.openStream, url.toString)
private class FileInput(file: File) extends InputStreamBuilder(new FileInputStream(file), file.getAbsolutePath)
import Uncloseable.protect
private class OutputStreamBuilder(stream: => OutputStream, label: String) extends ThreadProcessBuilder(label, _.writeInput(protect(stream)))
{
def this(stream: => OutputStream) = this(stream, "<output stream>")
}
private class InputStreamBuilder(stream: => InputStream, label: String) extends ThreadProcessBuilder(label, _.processOutput(protect(stream)))
{
def this(stream: => InputStream) = this(stream, "<input stream>")
}
private abstract class ThreadProcessBuilder(override val toString: String, runImpl: ProcessIO => Unit) extends AbstractProcessBuilder
{
override def run(io: ProcessIO): Process =
{
val success = new SyncVar[Boolean]
success.put(false)
new ThreadProcess(Spawn {runImpl(io); success.set(true) }, success)
}
}
private final class ThreadProcess(thread: Thread, success: SyncVar[Boolean]) extends Process
{
override def exitValue() =
{
thread.join()
if(success.get) 0 else 1
}
override def destroy() { thread.interrupt() }
}
object Uncloseable
{
def apply(in: InputStream): InputStream = new FilterInputStream(in) { override def close() {} }
def apply(out: OutputStream): OutputStream = new FilterOutputStream(out) { override def close() {} }
def protect(in: InputStream): InputStream = if(in eq System.in) Uncloseable(in) else in
def protect(out: OutputStream): OutputStream = if( (out eq System.out) || (out eq System.err)) Uncloseable(out) else out
}
private object Streamed
{
def apply[T](nonzeroException: Boolean): Streamed[T] =
{
val q = new java.util.concurrent.LinkedBlockingQueue[Either[Int, T]]
def next(): Stream[T] =
q.take match
{
case Left(0) => Stream.empty
case Left(code) => if(nonzeroException) error("Nonzero exit code: " + code) else Stream.empty
case Right(s) => Stream.cons(s, next)
}
new Streamed((s: T) => q.put(Right(s)), code => q.put(Left(code)), () => next())
}
}
private final class Streamed[T](val process: T => Unit, val done: Int => Unit, val stream: () => Stream[T]) extends NotNull
| olove/xsbt | util/process/src/main/scala/sbt/ProcessImpl.scala | Scala | bsd-3-clause | 16,951 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.io
import scala.annotation.tailrec
import java.io._
import scala.scalajs.js
import scala.scalajs.js.JSConverters._
import org.scalajs.jasminetest.JasmineTest
/** Tests for our implementation of java.io._ reader classes */
object ReadersTest extends JasmineTest {
describe("java.io.StringReader") {
val str = "asdf"
def newReader: StringReader = new StringReader(str)
it("should provide read()") {
val r = newReader
for (c <- str) {
expect(r.read().toChar).toEqual(c)
}
expect(r.read()).toEqual(-1)
}
it("should provide read(buf: Array[Char], off: Int, len: Int)") {
val r = newReader
val buf = new Array[Char](10)
expect(r.read(buf, 2, 8)).toBe(4)
expect(buf.map(_.toInt).toJSArray).toEqual(
js.Array[Int](0,0,'a','s','d','f',0,0,0,0))
expect(r.read(buf, 2, 8)).toBe(-1) // #1560
}
it("should provide read(java.nio.CharBuffer)") {
val r = newReader
val buf0 = java.nio.CharBuffer.allocate(25)
buf0.position(3)
val buf = buf0.slice()
buf.position(4)
buf.limit(14)
expect(r.read(buf)).toBe(4)
expect(buf.position()).toBe(8)
buf.flip()
expect(buf.toString().map(_.toInt).toJSArray).toEqual(
js.Array[Int](0, 0, 0, 0, 'a', 's', 'd', 'f'))
}
it("should provide ready") {
val r = newReader
for (c <- str) {
expect(r.ready()).toBeTruthy
expect(r.read().toChar).toEqual(c)
}
expect(r.ready()).toBeFalsy
expect(r.read()).toEqual(-1)
}
it("should provide mark/reset") {
val r = newReader
r.mark(str.length)
for (c <- str) {
expect(r.read().toChar).toEqual(c)
}
expect(r.read()).toEqual(-1)
r.reset()
for (c <- str) {
expect(r.read().toChar).toEqual(c)
}
expect(r.read()).toEqual(-1)
}
it("should provide skip") {
val r = newReader
expect(r.read()).toEqual('a')
expect(r.skip(2L).toInt).toBe(2)
expect(r.read()).toEqual('f')
expect(r.read()).toEqual(-1)
}
it("should provide close") {
val r = newReader
r.close()
expect(() => r.read()).toThrow
}
it("should support marking") {
expect(newReader.markSupported).toBeTruthy
}
}
describe("java.io.BufferedReader") {
val str = "line1\\nline2\\r\\n\\nline4\\rline5"
def newReader: BufferedReader = new BufferedReader(new StringReader(str), 3)
it("should provide read()") {
val r = newReader
for (c <- str) {
expect(r.read().toChar).toEqual(c)
}
expect(r.read()).toEqual(-1)
}
it("should provide read(cbuf)") {
var read = 0
val r = newReader
val buf = new Array[Char](15)
// twice to force filling internal buffer
for (_ <- 0 to 1) {
val len = r.read(buf)
expect(len).toBeGreaterThan(0)
for (i <- 0 until len)
expect(buf(i)).toEqual(str.charAt(i+read))
read += len
}
}
it("should provide read(cbuf, off, len)") {
var read = 0
val r = newReader
val buf = new Array[Char](15)
// twice to force filling internal buffer
for (_ <- 0 to 1) {
val len = r.read(buf, 1, 10)
expect(len).toBeGreaterThan(0)
expect(len).toBeLessThan(11)
for (i <- 0 until len)
expect(buf(i+1)).toEqual(str.charAt(i+read))
read += len
}
}
it("should provide mark/reset") {
val r = newReader
expect(r.read()).toEqual('l')
// force moving and resizing buffer
r.mark(10)
for (i <- 0 until 10) {
expect(r.read()).toEqual(str.charAt(i+1))
}
r.reset()
for (i <- 1 until str.length) {
expect(r.read()).toEqual(str.charAt(i))
}
}
it("should provide readLine") {
val r = newReader
expect(r.readLine()).toEqual("line1")
expect(r.readLine()).toEqual("line2")
expect(r.readLine()).toEqual("")
expect(r.readLine()).toEqual("line4")
expect(r.readLine()).toEqual("line5")
expect(r.readLine()).toEqual(null)
}
it("should readLine on an empty stream") {
val r = new BufferedReader(new StringReader(""))
expect(r.readLine()).toEqual(null)
}
it("should readline with empty lines only") {
val r = new BufferedReader(new StringReader("\\n\\r\\n\\r\\r\\n"), 1)
for (_ <- 1 to 4)
expect(r.readLine()).toEqual("")
expect(r.readLine()).toEqual(null)
}
it("should support marking") {
expect(newReader.markSupported).toBeTruthy
}
}
describe("java.io.InputStreamReader") {
it("should read UTF8") {
val buf = Array[Byte](72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100,
46, -29, -127, -109, -29, -126, -109, -29, -127, -85, -29, -127, -95,
-29, -127, -81, -26, -105, -91, -26, -100, -84, -24, -86, -98, -29,
-126, -110, -24, -86, -83, -29, -126, -127, -29, -127, -66, -29, -127,
-103, -29, -127, -117, -29, -128, -126)
val r = new InputStreamReader(new ByteArrayInputStream(buf))
def expectRead(str: String): Unit = {
val buf = new Array[Char](str.length)
@tailrec
def readAll(readSoFar: Int): Int = {
if (readSoFar == buf.length) readSoFar
else {
val newlyRead = r.read(buf, readSoFar, buf.length - readSoFar)
if (newlyRead == -1) readSoFar
else readAll(readSoFar + newlyRead)
}
}
expect(readAll(0)).toBe(str.length)
expect(new String(buf)).toEqual(str)
}
expectRead("Hello World.")
expectRead("こんにちは")
expectRead("日本語を読めますか。")
expect(r.read()).toBe(-1)
}
}
}
| CapeSepias/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/io/ReadersTest.scala | Scala | bsd-3-clause | 6,405 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.scalatest.Assertions._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete, Count, Max}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.{Cross, LeftOuter, RightOuter}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, MapData}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
private[sql] case class GroupableData(data: Int) {
def getData: Int = data
}
private[sql] class GroupableUDT extends UserDefinedType[GroupableData] {
override def sqlType: DataType = IntegerType
override def serialize(groupableData: GroupableData): Int = groupableData.data
override def deserialize(datum: Any): GroupableData = {
datum match {
case data: Int => GroupableData(data)
}
}
override def userClass: Class[GroupableData] = classOf[GroupableData]
private[spark] override def asNullable: GroupableUDT = this
}
private[sql] case class UngroupableData(data: Map[Int, Int]) {
def getData: Map[Int, Int] = data
}
private[sql] class UngroupableUDT extends UserDefinedType[UngroupableData] {
override def sqlType: DataType = MapType(IntegerType, IntegerType)
override def serialize(ungroupableData: UngroupableData): MapData = {
val keyArray = new GenericArrayData(ungroupableData.data.keys.toSeq)
val valueArray = new GenericArrayData(ungroupableData.data.values.toSeq)
new ArrayBasedMapData(keyArray, valueArray)
}
override def deserialize(datum: Any): UngroupableData = {
datum match {
case data: MapData =>
val keyArray = data.keyArray().array
val valueArray = data.valueArray().array
assert(keyArray.length == valueArray.length)
val mapData = keyArray.zip(valueArray).toMap.asInstanceOf[Map[Int, Int]]
UngroupableData(mapData)
}
}
override def userClass: Class[UngroupableData] = classOf[UngroupableData]
private[spark] override def asNullable: UngroupableUDT = this
}
case class TestFunction(
children: Seq[Expression],
inputTypes: Seq[AbstractDataType])
extends Expression with ImplicitCastInputTypes with Unevaluable {
override def nullable: Boolean = true
override def dataType: DataType = StringType
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression =
copy(children = newChildren)
}
case class UnresolvedTestPlan() extends LeafNode {
override lazy val resolved = false
override def output: Seq[Attribute] = Nil
}
class AnalysisErrorSuite extends AnalysisTest {
import TestRelations._
def errorTest(
name: String,
plan: LogicalPlan,
errorMessages: Seq[String],
caseSensitive: Boolean = true): Unit = {
test(name) {
assertAnalysisError(plan, errorMessages, caseSensitive)
}
}
val dateLit = Literal.create(null, DateType)
errorTest(
"scalar subquery with 2 columns",
testRelation.select(
(ScalarSubquery(testRelation.select($"a", dateLit.as("b"))) + Literal(1)).as("a")),
"Scalar subquery must return only one column, but got 2" :: Nil)
errorTest(
"scalar subquery with no column",
testRelation.select(ScalarSubquery(LocalRelation()).as("a")),
"Scalar subquery must return only one column, but got 0" :: Nil)
errorTest(
"single invalid type, single arg",
testRelation.select(TestFunction(dateLit :: Nil, IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE))" :: "argument 1" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"single invalid type, second arg",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, DateType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"multiple invalid type",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, IntegerType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 1" :: "argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"invalid window function",
testRelation2.select(
WindowExpression(
Literal(0),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"not supported within a window function" :: Nil)
errorTest(
"distinct aggregate function in window",
testRelation2.select(
WindowExpression(
AggregateExpression(Count(UnresolvedAttribute("b")), Complete, isDistinct = true),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"Distinct window functions are not supported" :: Nil)
errorTest(
"window aggregate function with filter predicate",
testRelation2.select(
WindowExpression(
AggregateExpression(
Count(UnresolvedAttribute("b")),
Complete,
isDistinct = false,
filter = Some(UnresolvedAttribute("b") > 1)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"window aggregate function with filter predicate is not supported" :: Nil
)
errorTest(
"distinct function",
CatalystSqlParser.parsePlan("SELECT hex(DISTINCT a) FROM TaBlE"),
"Function hex does not support DISTINCT" :: Nil)
errorTest(
"non aggregate function with filter predicate",
CatalystSqlParser.parsePlan("SELECT hex(a) FILTER (WHERE c = 1) FROM TaBlE2"),
"Function hex does not support FILTER clause" :: Nil)
errorTest(
"distinct window function",
CatalystSqlParser.parsePlan("SELECT percent_rank(DISTINCT a) OVER () FROM TaBlE"),
"Function percent_rank does not support DISTINCT" :: Nil)
errorTest(
"window function with filter predicate",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) FILTER (WHERE c > 1) OVER () FROM TaBlE2"),
"Function percent_rank does not support FILTER clause" :: Nil)
errorTest(
"higher order function with filter predicate",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"FILTER (WHERE c > 1)"),
"FILTER predicate specified, but aggregate is not an aggregate function" :: Nil)
errorTest(
"non-deterministic filter predicate in aggregate functions",
CatalystSqlParser.parsePlan("SELECT count(a) FILTER (WHERE rand(int(c)) > 1) FROM TaBlE2"),
"FILTER expression is non-deterministic, it cannot be used in aggregate functions" :: Nil)
errorTest(
"function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT hex(a) IGNORE NULLS FROM TaBlE2"),
"Function hex does not support IGNORE NULLS" :: Nil)
errorTest(
"some window function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) IGNORE NULLS FROM TaBlE2"),
"Function percent_rank does not support IGNORE NULLS" :: Nil)
errorTest(
"aggregate function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT count(a) IGNORE NULLS FROM TaBlE2"),
"Function count does not support IGNORE NULLS" :: Nil)
errorTest(
"higher order function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"IGNORE NULLS"), "Function aggregate does not support IGNORE NULLS" :: Nil)
errorTest(
"nested aggregate functions",
testRelation.groupBy($"a")(
AggregateExpression(
Max(AggregateExpression(Count(Literal(1)), Complete, isDistinct = false)),
Complete,
isDistinct = false)),
"not allowed to use an aggregate function in the argument of another aggregate function." :: Nil
)
errorTest(
"offset window function",
testRelation2.select(
WindowExpression(
new Lead(UnresolvedAttribute("b")),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RangeFrame, Literal(1), Literal(2)))).as("window")),
"Cannot specify window frame for lead function" :: Nil)
errorTest(
"the offset of nth_value window function is negative or zero",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(0)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"The 'offset' argument of nth_value must be greater than zero but it is 0." :: Nil)
errorTest(
"the offset of nth_value window function is not int literal",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(true)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"argument 2 requires int type, however, 'true' is of boolean type." :: Nil)
errorTest(
"too many generators",
listRelation.select(Explode($"list").as("a"), Explode($"list").as("b")),
"only one generator" :: "explode" :: Nil)
errorTest(
"unresolved attributes",
testRelation.select($"abcd"),
"cannot resolve" :: "abcd" :: Nil)
errorTest(
"unresolved attributes with a generated name",
testRelation2.groupBy($"a")(max($"b"))
.where(sum($"b") > 0)
.orderBy($"havingCondition".asc),
"cannot resolve" :: "havingCondition" :: Nil)
errorTest(
"unresolved star expansion in max",
testRelation2.groupBy($"a")(sum(UnresolvedStar(None))),
"Invalid usage of '*'" :: "in expression 'sum'" :: Nil)
errorTest(
"sorting by unsupported column types",
mapRelation.orderBy($"map".asc),
"sort" :: "type" :: "map<int,int>" :: Nil)
errorTest(
"sorting by attributes are not from grouping expressions",
testRelation2.groupBy($"a", $"c")($"a", $"c", count($"a").as("a3")).orderBy($"b".asc),
"cannot resolve" :: "'b'" :: "given input columns" :: "[a, a3, c]" :: Nil)
errorTest(
"non-boolean filters",
testRelation.where(Literal(1)),
"filter" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"non-boolean join conditions",
testRelation.join(testRelation, condition = Some(Literal(1))),
"condition" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"missing group by",
testRelation2.groupBy($"a")($"b"),
"'b'" :: "group by" :: Nil
)
errorTest(
"ambiguous field",
nestedRelation.select($"top.duplicateField"),
"Ambiguous reference to fields" :: "duplicateField" :: Nil,
caseSensitive = false)
errorTest(
"ambiguous field due to case insensitivity",
nestedRelation.select($"top.differentCase"),
"Ambiguous reference to fields" :: "differentCase" :: "differentcase" :: Nil,
caseSensitive = false)
errorTest(
"missing field",
nestedRelation2.select($"top.c"),
"No such struct field" :: "aField" :: "bField" :: "cField" :: Nil,
caseSensitive = false)
errorTest(
"catch all unresolved plan",
UnresolvedTestPlan(),
"unresolved" :: Nil)
errorTest(
"union with unequal number of columns",
testRelation.union(testRelation2),
"union" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"intersect with unequal number of columns",
testRelation.intersect(testRelation2, isAll = false),
"intersect" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"except with unequal number of columns",
testRelation.except(testRelation2, isAll = false),
"except" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"union with incompatible column types",
testRelation.union(nestedRelation),
"union" :: "the compatible column types" :: Nil)
errorTest(
"union with a incompatible column type and compatible column types",
testRelation3.union(testRelation4),
"union" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"intersect with incompatible column types",
testRelation.intersect(nestedRelation, isAll = false),
"intersect" :: "the compatible column types" :: Nil)
errorTest(
"intersect with a incompatible column type and compatible column types",
testRelation3.intersect(testRelation4, isAll = false),
"intersect" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"except with incompatible column types",
testRelation.except(nestedRelation, isAll = false),
"except" :: "the compatible column types" :: Nil)
errorTest(
"except with a incompatible column type and compatible column types",
testRelation3.except(testRelation4, isAll = false),
"except" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"SPARK-9955: correct error message for aggregate",
// When parse SQL string, we will wrap aggregate expressions with UnresolvedAlias.
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"cannot resolve 'bad_column'" :: Nil)
errorTest(
"slide duration greater than window in time window",
testRelation2.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "2 second", "0 second").as("window")),
s"The slide duration " :: " must be less than or equal to the windowDuration " :: Nil
)
errorTest(
"start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"negative window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "-1 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "0 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"negative slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "-1 second", "0 second").as("window")),
"The slide duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "0 second", "0 second").as("window")),
"The slide duration" :: " must be greater than 0." :: Nil
)
errorTest(
"generator nested in expressions",
listRelation.select(Explode($"list") + 1),
"Generators are not supported when it's nested in expressions, but got: (explode(list) + 1)"
:: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators",
{
val nestedListRelation = LocalRelation(
AttributeReference("nestedList", ArrayType(ArrayType(IntegerType)))())
nestedListRelation.select(Explode(Explode($"nestedList")))
},
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(nestedList))" :: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators for aggregates",
testRelation.select(Explode(Explode(
CreateArray(CreateArray(min($"a") :: max($"a") :: Nil) :: Nil)))),
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(array(array(min(a), max(a)))))" :: Nil
)
errorTest(
"generator nested in expressions for aggregates",
testRelation.select(Explode(CreateArray(min($"a") :: max($"a") :: Nil)) + 1),
"Generators are not supported when it's nested in expressions, but got: " +
"(explode(array(min(a), max(a))) + 1)" :: Nil
)
errorTest(
"generator appears in operator which is not Project",
listRelation.sortBy(Explode($"list").asc),
"Generators are not supported outside the SELECT clause, but got: Sort" :: Nil
)
errorTest(
"an evaluated limit class must not be null",
testRelation.limit(Literal(null, IntegerType)),
"The evaluated limit expression must not be null, but got " :: Nil
)
errorTest(
"num_rows in limit clause must be equal to or greater than 0",
listRelation.limit(-1),
"The limit expression must be equal to or greater than 0, but got -1" :: Nil
)
errorTest(
"more than one generators in SELECT",
listRelation.select(Explode($"list"), Explode($"list")),
"Only one generator allowed per select clause but found 2: explode(list), explode(list)" :: Nil
)
errorTest(
"more than one generators for aggregates in SELECT",
testRelation.select(Explode(CreateArray(min($"a") :: Nil)),
Explode(CreateArray(max($"a") :: Nil))),
"Only one generator allowed per select clause but found 2: " +
"explode(array(min(a))), explode(array(max(a)))" :: Nil
)
test("SPARK-6452 regression test") {
// CheckAnalysis should throw AnalysisException when Aggregate contains missing attribute(s)
// Since we manually construct the logical plan at here and Sum only accept
// LongType, DoubleType, and DecimalType. We use LongType as the type of a.
val attrA = AttributeReference("a", LongType)(exprId = ExprId(1))
val otherA = AttributeReference("a", LongType)(exprId = ExprId(2))
val attrC = AttributeReference("c", LongType)(exprId = ExprId(3))
val aliases = Alias(sum(attrA), "b")() :: Alias(sum(attrC), "d")() :: Nil
val plan = Aggregate(
Nil,
aliases,
LocalRelation(otherA))
assert(plan.resolved)
val resolved = s"${attrA.toString},${attrC.toString}"
val errorMsg = s"Resolved attribute(s) $resolved missing from ${otherA.toString} " +
s"in operator !Aggregate [${aliases.mkString(", ")}]. " +
s"Attribute(s) with the same name appear in the operation: a. " +
"Please check if the right attribute(s) are used."
assertAnalysisError(plan, errorMsg :: Nil)
}
test("error test for self-join") {
val join = Join(testRelation, testRelation, Cross, None, JoinHint.NONE)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(join)
}
assert(error.message.contains("Failure when resolving conflicting references in Join"))
assert(error.message.contains("Conflicting attributes"))
}
test("check grouping expression data types") {
def checkDataType(dataType: DataType, shouldSuccess: Boolean): Unit = {
val plan =
Aggregate(
AttributeReference("a", dataType)(exprId = ExprId(2)) :: Nil,
Alias(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", dataType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
if (shouldSuccess) {
assertAnalysisSuccess(plan, true)
} else {
assertAnalysisError(plan, "expression a cannot be used as a grouping expression" :: Nil)
}
}
val supportedDataTypes = Seq(
StringType, BinaryType,
NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", StringType, nullable = true),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", ArrayType(BooleanType, containsNull = true), nullable = true),
new GroupableUDT())
supportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = true)
}
val unsupportedDataTypes = Seq(
MapType(StringType, LongType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", MapType(StringType, LongType), nullable = true),
new UngroupableUDT())
unsupportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = false)
}
}
test("we should fail analysis when we find nested aggregate functions") {
val plan =
Aggregate(
AttributeReference("a", IntegerType)(exprId = ExprId(2)) :: Nil,
Alias(sum(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1)))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", IntegerType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
assertAnalysisError(
plan,
"It is not allowed to use an aggregate function in the argument of " +
"another aggregate function." :: Nil)
}
test("Join can work on binary types but can't work on map types") {
val left = LocalRelation(Symbol("a").binary, Symbol("b").map(StringType, StringType))
val right = LocalRelation(Symbol("c").binary, Symbol("d").map(StringType, StringType))
val plan1 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("a") === Symbol("c")))
assertAnalysisSuccess(plan1)
val plan2 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("b") === Symbol("d")))
assertAnalysisError(plan2, "EqualTo does not support ordering on type map" :: Nil)
}
test("PredicateSubQuery is used outside of a filter") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Project(
Seq(a, Alias(InSubquery(Seq(a), ListQuery(LocalRelation(b))), "c")()),
LocalRelation(a))
assertAnalysisError(plan, "Predicate sub-queries can only be used" +
" in Filter" :: Nil)
}
test("PredicateSubQuery correlated predicate is nested in an illegal plan") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val plan1 = Filter(
Exists(
Join(
LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LeftOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan1, "Accessing outer query column is not allowed in" :: Nil)
val plan2 = Filter(
Exists(
Join(
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LocalRelation(b),
RightOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan2, "Accessing outer query column is not allowed in" :: Nil)
val plan3 = Filter(
Exists(Union(LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)))),
LocalRelation(a))
assertAnalysisError(plan3, "Accessing outer query column is not allowed in" :: Nil)
val plan4 = Filter(
Exists(
Limit(1,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b)))
),
LocalRelation(a))
assertAnalysisError(plan4, "Accessing outer query column is not allowed in" :: Nil)
val plan5 = Filter(
Exists(
Sample(0.0, 0.5, false, 1L,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b))).select("b")
),
LocalRelation(a))
assertAnalysisError(plan5,
"Accessing outer query column is not allowed in" :: Nil)
}
test("Error on filter condition containing aggregate expressions") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Filter(Symbol("a") === UnresolvedFunction("max", Seq(b), true), LocalRelation(a, b))
assertAnalysisError(plan,
"Aggregate/Window/Generate expressions are not valid in where clause of the query" :: Nil)
}
test("SPARK-30811: CTE should not cause stack overflow when " +
"it refers to non-existent table with same name") {
val plan = UnresolvedWith(
UnresolvedRelation(TableIdentifier("t")),
Seq("t" -> SubqueryAlias("t",
Project(
Alias(Literal(1), "x")() :: Nil,
UnresolvedRelation(TableIdentifier("t", Option("nonexist")))))))
assertAnalysisError(plan, "Table or view not found:" :: Nil)
}
test("SPARK-33909: Check rand functions seed is legal at analyer side") {
Seq(Rand("a".attr), Randn("a".attr)).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"Input argument to ${r.prettyName} must be a constant." :: Nil)
}
Seq(Rand(1.0), Rand("1"), Randn("a")).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"data type mismatch: argument 1 requires (int or bigint) type" :: Nil)
}
}
test("SPARK-34946: correlated scalar subquery in grouping expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
) :: Nil,
sum($"c2").as("sum") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subqueries in the group by clause must also be " +
"in the aggregate expressions" :: Nil)
}
test("SPARK-34946: correlated scalar subquery in aggregate expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
$"c1" :: Nil,
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
).as("sub") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subquery 'scalarsubquery(t1.c1)' is " +
"neither present in the group by, nor in an aggregate function. Add it to group by " +
"using ordinal position or wrap it in first() (or first_value) if you don't care " +
"which value you get." :: Nil)
}
errorTest(
"SC-69611: error code to error message",
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"cannot resolve 'bad_column' given input columns: [a, b, c, d, e]" :: Nil)
test("SPARK-35080: Unsupported correlated equality predicates in subquery") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val t1 = LocalRelation(a, b)
val t2 = LocalRelation(c)
val conditions = Seq(
(abs($"a") === $"c", "abs(a) = outer(c)"),
(abs($"a") <=> $"c", "abs(a) <=> outer(c)"),
($"a" + 1 === $"c", "(a + 1) = outer(c)"),
($"a" + $"b" === $"c", "(a + b) = outer(c)"),
($"a" + $"c" === $"b", "(a + outer(c)) = b"),
(And($"a" === $"c", Cast($"a", IntegerType) === $"c"), "CAST(a AS INT) = outer(c)"))
conditions.foreach { case (cond, msg) =>
val plan = Project(
ScalarSubquery(
Aggregate(Nil, count(Literal(1)).as("cnt") :: Nil,
Filter(cond, t1))
).as("sub") :: Nil,
t2)
assertAnalysisError(plan, s"Correlated column is not allowed in predicate ($msg)" :: Nil)
}
}
test("SPARK-35673: fail if the plan still contains UnresolvedHint after analysis") {
val hintName = "some_random_hint_that_does_not_exist"
val plan = UnresolvedHint(hintName, Seq.empty,
Project(Alias(Literal(1), "x")() :: Nil, OneRowRelation())
)
assert(plan.resolved)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(plan)
}
assert(error.message.contains(s"Hint not found: ${hintName}"))
// UnresolvedHint be removed by batch `Remove Unresolved Hints`
assertAnalysisSuccess(plan, true)
}
test("SPARK-35618: Resolve star expressions in subqueries") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val t0 = OneRowRelation()
val t1 = LocalRelation(a, b).as("t1")
// t1.* in the subquery should be resolved into outer(t1.a) and outer(t1.b).
assertAnalysisError(
Project(ScalarSubquery(t0.select(star("t1"))).as("sub") :: Nil, t1),
"Scalar subquery must return only one column, but got 2" :: Nil)
// t2.* cannot be resolved and the error should be the initial analysis exception.
assertAnalysisError(
Project(ScalarSubquery(t0.select(star("t2"))).as("sub") :: Nil, t1),
"cannot resolve 't2.*' given input columns ''" :: Nil
)
}
test("SPARK-35618: Invalid star usage in subqueries") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val t1 = LocalRelation(a, b).as("t1")
val t2 = LocalRelation(b, c).as("t2")
// SELECT * FROM t1 WHERE a = (SELECT sum(c) FROM t2 WHERE t1.* = t2.b)
assertAnalysisError(
Filter(EqualTo(a, ScalarSubquery(t2.select(sum(c)).where(star("t1") === b))), t1),
"Invalid usage of '*' in Filter" :: Nil
)
// SELECT * FROM t1 JOIN t2 ON (EXISTS (SELECT 1 FROM t2 WHERE t1.* = b))
assertAnalysisError(
t1.join(t2, condition = Some(Exists(t2.select(1).where(star("t1") === b)))),
"Invalid usage of '*' in Filter" :: Nil
)
}
test("SPARK-36488: Regular expression expansion should fail with a meaningful message") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "true") {
assertAnalysisError(testRelation.select(Divide(UnresolvedRegex(".?", None, false), "a")),
s"Invalid usage of regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(
Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".*", None, false))),
s"Invalid usage of regular expressions '.?', '.*' in" :: Nil)
assertAnalysisError(testRelation.select(
Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".?", None, false))),
s"Invalid usage of regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), "a")),
"Invalid usage of '*' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), UnresolvedStar(None))),
"Invalid usage of '*' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None),
UnresolvedRegex(".?", None, false))),
"Invalid usage of '*' and regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(Least(Seq(UnresolvedStar(None),
UnresolvedRegex(".*", None, false), UnresolvedRegex(".?", None, false)))),
"Invalid usage of '*' and regular expressions '.*', '.?' in" :: Nil)
}
}
}
| chuckchen/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala | Scala | apache-2.0 | 34,157 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import monix.reactive.Observable.Operator
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
private[reactive] final class EndWithErrorOperator[A](error: Throwable) extends Operator[A, A] {
def apply(out: Subscriber[A]): Subscriber[A] =
new Subscriber[A] {
implicit val scheduler = out.scheduler
def onNext(elem: A): Future[Ack] = out.onNext(elem)
def onError(ex: Throwable): Unit = out.onError(ex)
def onComplete(): Unit = out.onError(error)
}
}
| monifu/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/EndWithErrorOperator.scala | Scala | apache-2.0 | 1,251 |
package com.sksamuel.elastic4s.searches.suggestion
import com.sksamuel.elastic4s.json.XContentFactory
import com.sksamuel.elastic4s.script.Script
import com.sksamuel.exts.OptionImplicits._
case class PhraseSuggestion(name: String,
fieldname: String,
analyzer: Option[String] = None,
collateParams: Map[String, AnyRef] = Map.empty,
collatePrune: Option[Boolean] = None,
collateQuery: Option[Script] = None,
confidence: Option[Float] = None,
forceUnigrams: Option[Boolean] = None,
gramSize: Option[Int] = None,
preTag: Option[String] = None,
postTag: Option[String] = None,
maxErrors: Option[Float] = None,
realWordErrorLikelihood: Option[Float] = None,
separator: Option[String] = None,
tokenLimit: Option[Int] = None,
size: Option[Int] = None,
shardSize: Option[Int] = None,
text: Option[String] = None)
extends Suggestion {
override def analyzer(analyzer: String): PhraseSuggestion = copy(analyzer = analyzer.some)
override def text(text: String): PhraseSuggestion = copy(text = text.some)
override def size(size: Int): PhraseSuggestion = copy(size = size.some)
override def shardSize(shardSize: Int): PhraseSuggestion = copy(shardSize = shardSize.some)
// def addCandidateGenerator(generator: CandidateGenerator): PhraseSuggestionDefinition =
// copy(candidateGenerator = generator.some)
def collateParams(collateParams: Map[String, AnyRef]): PhraseSuggestion =
copy(collateParams = collateParams)
def collatePrune(collatePrune: Boolean): PhraseSuggestion = copy(collatePrune = collatePrune.some)
def collateQuery(collateQuery: Script): PhraseSuggestion = copy(collateQuery = collateQuery.some)
def collateQuery(queryType: String, fieldVariable: String, suggestionVariable: String): PhraseSuggestion = {
val collateQueryAsJson = XContentFactory
.jsonBuilder()
.startObject()
.startObject(queryType)
.field(s"{{$fieldVariable}}", s"{{$suggestionVariable}}")
.endObject()
.endObject()
.string()
val template = Script(collateQueryAsJson)
collateQuery(template)
}
def confidence(c: Float): PhraseSuggestion = copy(confidence = c.some)
def forceUnigrams(forceUnigrams: Boolean): PhraseSuggestion = copy(forceUnigrams = forceUnigrams.some)
def gramSize(gramSize: Int): PhraseSuggestion = copy(gramSize = gramSize.some)
def highlight(gramSize: Int): PhraseSuggestion = copy(gramSize = gramSize.some)
def maxErrors(f: Float): PhraseSuggestion = copy(maxErrors = f.some)
def realWordErrorLikelihood(f: Float): PhraseSuggestion = copy(realWordErrorLikelihood = f.some)
def separator(str: String): PhraseSuggestion = copy(separator = str.some)
// def smoothingModel(smoothingModel: SmoothingModel): PhraseSuggestionDefinition =
// copy(smoothingModel = smoothingModel.some)
def tokenLimit(tokenLimit: Int): PhraseSuggestion = copy(tokenLimit = tokenLimit.some)
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/suggestion/PhraseSuggestion.scala | Scala | apache-2.0 | 3,381 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.DataInputStream
import java.util
import java.util.Properties
import kafka.api.KAFKA_0_11_0_IV2
import kafka.log.LogConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{MemoryRecords, Record, RecordBatch}
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConverters._
import scala.util.Random
/**
* Subclasses of `BaseConsumerTest` exercise the consumer and fetch request/response. This class
* complements those classes with tests that require lower-level access to the protocol.
*/
class FetchRequestTest extends BaseRequestTest {
private var producer: KafkaProducer[String, String] = null
override def tearDown() {
if (producer != null)
producer.close()
super.tearDown()
}
private def createFetchRequest(maxResponseBytes: Int, maxPartitionBytes: Int, topicPartitions: Seq[TopicPartition],
offsetMap: Map[TopicPartition, Long]): FetchRequest =
FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(maxPartitionBytes, topicPartitions, offsetMap))
.setMaxBytes(maxResponseBytes).build()
private def createPartitionMap(maxPartitionBytes: Int, topicPartitions: Seq[TopicPartition],
offsetMap: Map[TopicPartition, Long] = Map.empty): util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] = {
val partitionMap = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
topicPartitions.foreach { tp =>
partitionMap.put(tp, new FetchRequest.PartitionData(offsetMap.getOrElse(tp, 0), 0L, maxPartitionBytes))
}
partitionMap
}
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse[MemoryRecords] = {
val response = connectAndSend(request, ApiKeys.FETCH, destination = brokerSocketServer(leaderId))
FetchResponse.parse(response, request.version)
}
private def initProducer(): Unit = {
producer = TestUtils.createProducer(TestUtils.getBrokerListStrFromServers(servers),
retries = 5, keySerializer = new StringSerializer, valueSerializer = new StringSerializer)
}
@Test
def testBrokerRespectsPartitionsOrderAndSizeLimits(): Unit = {
initProducer()
val messagesPerPartition = 9
val maxResponseBytes = 800
val maxPartitionBytes = 190
def createFetchRequest(topicPartitions: Seq[TopicPartition], offsetMap: Map[TopicPartition, Long] = Map.empty): FetchRequest =
this.createFetchRequest(maxResponseBytes, maxPartitionBytes, topicPartitions, offsetMap)
val topicPartitionToLeader = createTopics(numTopics = 5, numPartitions = 6)
val random = new Random(0)
val topicPartitions = topicPartitionToLeader.keySet
produceData(topicPartitions, messagesPerPartition)
val leaderId = servers.head.config.brokerId
val partitionsForLeader = topicPartitionToLeader.toVector.collect {
case (tp, partitionLeaderId) if partitionLeaderId == leaderId => tp
}
val partitionsWithLargeMessages = partitionsForLeader.takeRight(2)
val partitionWithLargeMessage1 = partitionsWithLargeMessages.head
val partitionWithLargeMessage2 = partitionsWithLargeMessages(1)
producer.send(new ProducerRecord(partitionWithLargeMessage1.topic, partitionWithLargeMessage1.partition,
"larger than partition limit", new String(new Array[Byte](maxPartitionBytes + 1)))).get
producer.send(new ProducerRecord(partitionWithLargeMessage2.topic, partitionWithLargeMessage2.partition,
"larger than response limit", new String(new Array[Byte](maxResponseBytes + 1)))).get
val partitionsWithoutLargeMessages = partitionsForLeader.filterNot(partitionsWithLargeMessages.contains)
// 1. Partitions with large messages at the end
val shuffledTopicPartitions1 = random.shuffle(partitionsWithoutLargeMessages) ++ partitionsWithLargeMessages
val fetchRequest1 = createFetchRequest(shuffledTopicPartitions1)
val fetchResponse1 = sendFetchRequest(leaderId, fetchRequest1)
checkFetchResponse(shuffledTopicPartitions1, fetchResponse1, maxPartitionBytes, maxResponseBytes, messagesPerPartition)
// 2. Same as 1, but shuffled again
val shuffledTopicPartitions2 = random.shuffle(partitionsWithoutLargeMessages) ++ partitionsWithLargeMessages
val fetchRequest2 = createFetchRequest(shuffledTopicPartitions2)
val fetchResponse2 = sendFetchRequest(leaderId, fetchRequest2)
checkFetchResponse(shuffledTopicPartitions2, fetchResponse2, maxPartitionBytes, maxResponseBytes, messagesPerPartition)
// 3. Partition with message larger than the partition limit at the start of the list
val shuffledTopicPartitions3 = Seq(partitionWithLargeMessage1, partitionWithLargeMessage2) ++
random.shuffle(partitionsWithoutLargeMessages)
val fetchRequest3 = createFetchRequest(shuffledTopicPartitions3, Map(partitionWithLargeMessage1 -> messagesPerPartition))
val fetchResponse3 = sendFetchRequest(leaderId, fetchRequest3)
assertEquals(shuffledTopicPartitions3, fetchResponse3.responseData.keySet.asScala.toSeq)
val responseSize3 = fetchResponse3.responseData.asScala.values.map { partitionData =>
records(partitionData).map(_.sizeInBytes).sum
}.sum
assertTrue(responseSize3 <= maxResponseBytes)
val partitionData3 = fetchResponse3.responseData.get(partitionWithLargeMessage1)
assertEquals(Errors.NONE, partitionData3.error)
assertTrue(partitionData3.highWatermark > 0)
val size3 = records(partitionData3).map(_.sizeInBytes).sum
assertTrue(s"Expected $size3 to be smaller than $maxResponseBytes", size3 <= maxResponseBytes)
assertTrue(s"Expected $size3 to be larger than $maxPartitionBytes", size3 > maxPartitionBytes)
assertTrue(maxPartitionBytes < partitionData3.records.sizeInBytes)
// 4. Partition with message larger than the response limit at the start of the list
val shuffledTopicPartitions4 = Seq(partitionWithLargeMessage2, partitionWithLargeMessage1) ++
random.shuffle(partitionsWithoutLargeMessages)
val fetchRequest4 = createFetchRequest(shuffledTopicPartitions4, Map(partitionWithLargeMessage2 -> messagesPerPartition))
val fetchResponse4 = sendFetchRequest(leaderId, fetchRequest4)
assertEquals(shuffledTopicPartitions4, fetchResponse4.responseData.keySet.asScala.toSeq)
val nonEmptyPartitions4 = fetchResponse4.responseData.asScala.toSeq.collect {
case (tp, partitionData) if records(partitionData).map(_.sizeInBytes).sum > 0 => tp
}
assertEquals(Seq(partitionWithLargeMessage2), nonEmptyPartitions4)
val partitionData4 = fetchResponse4.responseData.get(partitionWithLargeMessage2)
assertEquals(Errors.NONE, partitionData4.error)
assertTrue(partitionData4.highWatermark > 0)
val size4 = records(partitionData4).map(_.sizeInBytes).sum
assertTrue(s"Expected $size4 to be larger than $maxResponseBytes", size4 > maxResponseBytes)
assertTrue(maxResponseBytes < partitionData4.records.sizeInBytes)
}
@Test
def testFetchRequestV2WithOversizedMessage(): Unit = {
initProducer()
val maxPartitionBytes = 200
val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head
producer.send(new ProducerRecord(topicPartition.topic, topicPartition.partition,
"key", new String(new Array[Byte](maxPartitionBytes + 1)))).get
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(maxPartitionBytes,
Seq(topicPartition))).build(2)
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertTrue(partitionData.highWatermark > 0)
assertEquals(maxPartitionBytes, partitionData.records.sizeInBytes)
assertEquals(0, records(partitionData).map(_.sizeInBytes).sum)
}
@Test
def testFetchRequestToNonReplica(): Unit = {
val topic = "topic"
val partition = 0
val topicPartition = new TopicPartition(topic, partition)
// Create a single-partition topic and find a broker which is not the leader
val partitionToLeader = TestUtils.createTopic(zkClient, topic, numPartitions = 1, 1, servers)
val leader = partitionToLeader(partition)
val nonReplicaOpt = servers.find(_.config.brokerId != leader)
assertTrue(nonReplicaOpt.isDefined)
val nonReplicaId = nonReplicaOpt.get.config.brokerId
// Send the fetch request to the non-replica and verify the error code
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(1024,
Seq(topicPartition))).build()
val fetchResponse = sendFetchRequest(nonReplicaId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NOT_LEADER_FOR_PARTITION, partitionData.error)
}
/**
* Tests that down-conversions dont leak memory. Large down conversions are triggered
* in the server. The client closes its connection after reading partial data when the
* channel is muted in the server. If buffers are not released this will result in OOM.
*/
@Test
def testDownConversionWithConnectionFailure(): Unit = {
val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head
val msgValueLen = 100 * 1000
val batchSize = 4 * msgValueLen
val propsOverride = new Properties
propsOverride.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
val producer = TestUtils.createProducer(TestUtils.getBrokerListStrFromServers(servers),
retries = 5, lingerMs = Int.MaxValue,
keySerializer = new StringSerializer, valueSerializer = new ByteArraySerializer, props = Some(propsOverride))
val bytes = new Array[Byte](msgValueLen)
val futures = try {
(0 to 1000).map { _ =>
producer.send(new ProducerRecord(topicPartition.topic, topicPartition.partition, "key", bytes))
}
} finally {
producer.close()
}
// Check futures to ensure sends succeeded, but do this after close since the last
// batch is not complete, but sent when the producer is closed
futures.foreach(_.get)
def fetch(version: Short, maxPartitionBytes: Int, closeAfterPartialResponse: Boolean): Option[FetchResponse[MemoryRecords]] = {
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(maxPartitionBytes,
Seq(topicPartition))).build(version)
val socket = connect(brokerSocketServer(leaderId))
try {
send(fetchRequest, ApiKeys.FETCH, socket)
if (closeAfterPartialResponse) {
// read some data to ensure broker has muted this channel and then close socket
val size = new DataInputStream(socket.getInputStream).readInt()
// Check that we have received almost `maxPartitionBytes` (minus a tolerance) since in
// the case of OOM, the size will be significantly smaller. We can't check for exactly
// maxPartitionBytes since we use approx message sizes that include only the message value.
assertTrue(s"Fetch size too small $size, broker may have run out of memory",
size > maxPartitionBytes - batchSize)
None
} else {
Some(FetchResponse.parse(receive(socket), version))
}
} finally {
socket.close()
}
}
val version = 1.toShort
(0 to 15).foreach(_ => fetch(version, maxPartitionBytes = msgValueLen * 1000, closeAfterPartialResponse = true))
val response = fetch(version, maxPartitionBytes = batchSize, closeAfterPartialResponse = false)
val fetchResponse = response.getOrElse(throw new IllegalStateException("No fetch response"))
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
val batches = partitionData.records.batches.asScala.toBuffer
assertEquals(3, batches.size) // size is 3 (not 4) since maxPartitionBytes=msgValueSize*4, excluding key and headers
}
/**
* Ensure that we respect the fetch offset when returning records that were converted from an uncompressed v2
* record batch to multiple v0/v1 record batches with size 1. If the fetch offset points to inside the record batch,
* some records have to be dropped during the conversion.
*/
@Test
def testDownConversionFromBatchedToUnbatchedRespectsOffset(): Unit = {
// Increase linger so that we have control over the batches created
producer = TestUtils.createProducer(TestUtils.getBrokerListStrFromServers(servers),
retries = 5, keySerializer = new StringSerializer, valueSerializer = new StringSerializer,
lingerMs = 300 * 1000)
val topicConfig = Map(LogConfig.MessageFormatVersionProp -> KAFKA_0_11_0_IV2.version)
val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, topicConfig).head
val topic = topicPartition.topic
val firstBatchFutures = (0 until 10).map(i => producer.send(new ProducerRecord(topic, s"key-$i", s"value-$i")))
producer.flush()
val secondBatchFutures = (10 until 25).map(i => producer.send(new ProducerRecord(topic, s"key-$i", s"value-$i")))
producer.flush()
firstBatchFutures.foreach(_.get)
secondBatchFutures.foreach(_.get)
def check(fetchOffset: Long, requestVersion: Short, expectedOffset: Long, expectedNumBatches: Int, expectedMagic: Byte): Unit = {
var batchesReceived = 0
var currentFetchOffset = fetchOffset
var currentExpectedOffset = expectedOffset
// With KIP-283, we might not receive all batches in a single fetch request so loop through till we have consumed
// all batches we are interested in.
while (batchesReceived < expectedNumBatches) {
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(Int.MaxValue,
Seq(topicPartition), Map(topicPartition -> currentFetchOffset))).build(requestVersion)
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
// validate response
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertTrue(partitionData.highWatermark > 0)
val batches = partitionData.records.batches.asScala.toBuffer
val batch = batches.head
assertEquals(expectedMagic, batch.magic)
assertEquals(currentExpectedOffset, batch.baseOffset)
currentFetchOffset = batches.last.lastOffset + 1
currentExpectedOffset += (batches.last.lastOffset - batches.head.baseOffset + 1)
batchesReceived += batches.size
}
assertEquals(expectedNumBatches, batchesReceived)
}
// down conversion to message format 0, batches of 1 message are returned so we receive the exact offset we requested
check(fetchOffset = 3, expectedOffset = 3, requestVersion = 1, expectedNumBatches = 22,
expectedMagic = RecordBatch.MAGIC_VALUE_V0)
check(fetchOffset = 15, expectedOffset = 15, requestVersion = 1, expectedNumBatches = 10,
expectedMagic = RecordBatch.MAGIC_VALUE_V0)
// down conversion to message format 1, batches of 1 message are returned so we receive the exact offset we requested
check(fetchOffset = 3, expectedOffset = 3, requestVersion = 3, expectedNumBatches = 22,
expectedMagic = RecordBatch.MAGIC_VALUE_V1)
check(fetchOffset = 15, expectedOffset = 15, requestVersion = 3, expectedNumBatches = 10,
expectedMagic = RecordBatch.MAGIC_VALUE_V1)
// no down conversion, we receive a single batch so the received offset won't necessarily be the same
check(fetchOffset = 3, expectedOffset = 0, requestVersion = 4, expectedNumBatches = 2,
expectedMagic = RecordBatch.MAGIC_VALUE_V2)
check(fetchOffset = 15, expectedOffset = 10, requestVersion = 4, expectedNumBatches = 1,
expectedMagic = RecordBatch.MAGIC_VALUE_V2)
// no down conversion, we receive a single batch and the exact offset we requested because it happens to be the
// offset of the first record in the batch
check(fetchOffset = 10, expectedOffset = 10, requestVersion = 4, expectedNumBatches = 1,
expectedMagic = RecordBatch.MAGIC_VALUE_V2)
}
/**
* Test that when an incremental fetch session contains partitions with an error,
* those partitions are returned in all incremental fetch requests.
*/
@Test
def testCreateIncrementalFetchWithPartitionsInError(): Unit = {
def createFetchRequest(topicPartitions: Seq[TopicPartition],
metadata: JFetchMetadata,
toForget: Seq[TopicPartition]): FetchRequest =
FetchRequest.Builder.forConsumer(Int.MaxValue, 0,
createPartitionMap(Integer.MAX_VALUE, topicPartitions, Map.empty))
.toForget(toForget.asJava)
.metadata(metadata)
.build()
val foo0 = new TopicPartition("foo", 0)
val foo1 = new TopicPartition("foo", 1)
createTopic("foo", Map(0 -> List(0, 1), 1 -> List(0, 2)))
val bar0 = new TopicPartition("bar", 0)
val req1 = createFetchRequest(List(foo0, foo1, bar0), JFetchMetadata.INITIAL, Nil)
val resp1 = sendFetchRequest(0, req1)
assertEquals(Errors.NONE, resp1.error())
assertTrue("Expected the broker to create a new incremental fetch session", resp1.sessionId() > 0)
debug(s"Test created an incremental fetch session ${resp1.sessionId}")
assertTrue(resp1.responseData().containsKey(foo0))
assertTrue(resp1.responseData().containsKey(foo1))
assertTrue(resp1.responseData().containsKey(bar0))
assertEquals(Errors.NONE, resp1.responseData().get(foo0).error)
assertEquals(Errors.NONE, resp1.responseData().get(foo1).error)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, resp1.responseData().get(bar0).error)
val req2 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 1), Nil)
val resp2 = sendFetchRequest(0, req2)
assertEquals(Errors.NONE, resp2.error())
assertEquals("Expected the broker to continue the incremental fetch session",
resp1.sessionId(), resp2.sessionId())
assertFalse(resp2.responseData().containsKey(foo0))
assertFalse(resp2.responseData().containsKey(foo1))
assertTrue(resp2.responseData().containsKey(bar0))
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, resp2.responseData().get(bar0).error)
createTopic("bar", Map(0 -> List(0, 1)))
val req3 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 2), Nil)
val resp3 = sendFetchRequest(0, req3)
assertEquals(Errors.NONE, resp3.error())
assertFalse(resp3.responseData().containsKey(foo0))
assertFalse(resp3.responseData().containsKey(foo1))
assertTrue(resp3.responseData().containsKey(bar0))
assertEquals(Errors.NONE, resp3.responseData().get(bar0).error)
val req4 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 3), Nil)
val resp4 = sendFetchRequest(0, req4)
assertEquals(Errors.NONE, resp4.error())
assertFalse(resp4.responseData().containsKey(foo0))
assertFalse(resp4.responseData().containsKey(foo1))
assertFalse(resp4.responseData().containsKey(bar0))
}
private def records(partitionData: FetchResponse.PartitionData[MemoryRecords]): Seq[Record] = {
partitionData.records.records.asScala.toIndexedSeq
}
private def checkFetchResponse(expectedPartitions: Seq[TopicPartition], fetchResponse: FetchResponse[MemoryRecords],
maxPartitionBytes: Int, maxResponseBytes: Int, numMessagesPerPartition: Int): Unit = {
assertEquals(expectedPartitions, fetchResponse.responseData.keySet.asScala.toSeq)
var emptyResponseSeen = false
var responseSize = 0
var responseBufferSize = 0
expectedPartitions.foreach { tp =>
val partitionData = fetchResponse.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertTrue(partitionData.highWatermark > 0)
val records = partitionData.records
responseBufferSize += records.sizeInBytes
val batches = records.batches.asScala.toIndexedSeq
assertTrue(batches.size < numMessagesPerPartition)
val batchesSize = batches.map(_.sizeInBytes).sum
responseSize += batchesSize
if (batchesSize == 0 && !emptyResponseSeen) {
assertEquals(0, records.sizeInBytes)
emptyResponseSeen = true
}
else if (batchesSize != 0 && !emptyResponseSeen) {
assertTrue(batchesSize <= maxPartitionBytes)
assertEquals(maxPartitionBytes, records.sizeInBytes)
}
else if (batchesSize != 0 && emptyResponseSeen)
fail(s"Expected partition with size 0, but found $tp with size $batchesSize")
else if (records.sizeInBytes != 0 && emptyResponseSeen)
fail(s"Expected partition buffer with size 0, but found $tp with size ${records.sizeInBytes}")
}
assertEquals(maxResponseBytes - maxResponseBytes % maxPartitionBytes, responseBufferSize)
assertTrue(responseSize <= maxResponseBytes)
}
private def createTopics(numTopics: Int, numPartitions: Int, configs: Map[String, String] = Map.empty): Map[TopicPartition, Int] = {
val topics = (0 until numTopics).map(t => s"topic$t")
val topicConfig = new Properties
topicConfig.setProperty(LogConfig.MinInSyncReplicasProp, 2.toString)
configs.foreach { case (k, v) => topicConfig.setProperty(k, v) }
topics.flatMap { topic =>
val partitionToLeader = createTopic(topic, numPartitions = numPartitions, replicationFactor = 2,
topicConfig = topicConfig)
partitionToLeader.map { case (partition, leader) => new TopicPartition(topic, partition) -> leader }
}.toMap
}
private def produceData(topicPartitions: Iterable[TopicPartition], numMessagesPerPartition: Int): Seq[ProducerRecord[String, String]] = {
val records = for {
tp <- topicPartitions.toSeq
messageIndex <- 0 until numMessagesPerPartition
} yield {
val suffix = s"$tp-$messageIndex"
new ProducerRecord(tp.topic, tp.partition, s"key $suffix", s"value $suffix")
}
records.map(producer.send(_).get)
records
}
}
| richhaase/kafka | core/src/test/scala/unit/kafka/server/FetchRequestTest.scala | Scala | apache-2.0 | 23,358 |
package org.apache.mesos.chronos.scheduler.jobs
import org.apache.mesos.chronos.scheduler.api.{DependentJobResource, Iso8601JobResource}
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.state.PersistenceStore
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{DateTime, DateTimeZone, Hours, Minutes}
import org.specs2.mock.Mockito
import org.specs2.mutable._
class JobSchedulerIntegrationTest extends SpecificationWithJUnit with Mockito {
import MockJobUtils._
"JobScheduler" should {
"A job creates a failed task and then a successful task from a synchronous job" in {
val epsilon = Hours.hours(2).toPeriod
val job1 = new ScheduleBasedJob("R5/2012-01-01T00:00:00.000Z/P1D", "job1", "CMD", epsilon)
val jobGraph = new JobGraph
val persistenceStore = mock[PersistenceStore]
val mockTaskManager = mock[TaskManager]
val scheduler = mockScheduler(epsilon, mockTaskManager, jobGraph, persistenceStore)
val startTime = DateTime.parse("2012-01-01T01:00:00.000Z")
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, startTime)
val newStreams = scheduler.iteration(startTime, scheduler.streams)
newStreams.head.schedule must_== "R4/2012-01-02T00:00:00.000Z/P1D"
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, startTime, 0))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, startTime, 0))
there was one(persistenceStore)
.persistJob(new ScheduleBasedJob("R5/2012-01-01T00:00:00.000Z/P1D", "job1", "CMD", epsilon))
there was one(persistenceStore)
.persistJob(new ScheduleBasedJob("R4/2012-01-02T00:00:00.000Z/P1D", "job1", "CMD", epsilon))
}
"Executing a job updates the job counts and errors" in {
val epsilon = Minutes.minutes(20).toPeriod
val jobName = "FOO"
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = jobName, command = "fooo", epsilon = epsilon, retries = 0)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val mockJobsObserver = mockFullObserver
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore, mockJobsObserver)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.run(() => {
DateTime.parse("2012-01-01T00:05:01.000Z")
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
val job2 = graph.lookupVertex(jobName).get
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
val job3 = graph.lookupVertex(jobName).get
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
graph.lookupVertex(jobName).get.successCount must_== 2
graph.lookupVertex(jobName).get.errorCount must_== 1
there was one(mockJobsObserver).apply(JobFinished(job1, TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
there was one(mockJobsObserver).apply(JobFinished(job2, TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
there was one(mockJobsObserver).apply(JobFailed(Right(job3), TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
}
"Tests that a disabled job does not run and does not execute dependant children." in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = true)
val job2 = new DependencyBasedJob(Set("job1"), name = "job2", command = "CMD", disabled = true)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.registerJob(job2, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.run(() => {
DateTime.parse("2012-01-01T00:05:01.000Z")
})
/*
scheduler.handleFinishedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
scheduler.handleFinishedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
scheduler.handleFailedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
*/
graph.lookupVertex("job1").get.successCount must_== 0
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 0
graph.lookupVertex("job2").get.errorCount must_== 0
}
"Tests that dependent jobs runs when they should" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2011-01-01T00:05:01.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
}
"Tests that dependent jobs run even if their parents fail but have softError enabled" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false, retries = 0, softError = true)
val job3 = new DependencyBasedJob(Set("job1", "job2"), name = "job3", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.now(DateTimeZone.UTC)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job2, date, 0))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
val vJob2 = graph.lookupVertex("job2").get
vJob2.successCount must_== 0
vJob2.errorCount must_== 1
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, DateTime.parse(vJob2.lastError), 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, date, 0), Some(DateTime.parse(vJob2.lastError)))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
}
"Tests that dependent jobs don't run if their parents fail without softError enabled" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false, retries = 0, softError = false)
val job3 = new DependencyBasedJob(Set("job1", "job2"), name = "job3", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.now(DateTimeZone.UTC)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job2, date, 0))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
val vJob2 = graph.lookupVertex("job2").get
vJob2.successCount must_== 0
vJob2.errorCount must_== 1
there was no(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, DateTime.parse(vJob2.lastError), 0), highPriority = false)
}
"Tests that dependent jobs runs when they should after changing the jobgraph" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5_1 = new DependencyBasedJob(Set("job1", "job2"), name = "job5", command = "CMD", disabled = false)
val job5_2 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2012-01-01T00:00:00.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5_1, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_1, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_1, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
val jobResource = new DependentJobResource(jobScheduler = scheduler, jobGraph = graph)
jobResource.handleRequest(job5_2)
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 2
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 2
graph.lookupVertex("job2").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 2
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 2
graph.lookupVertex("job4").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_2, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_2, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
}
"Tests that complex dependent jobs run when they should" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val job6 = new DependencyBasedJob(Set("job4", "job5", "job1"), name = "job6", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2011-01-01T00:05:01.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5, persist = true, date)
scheduler.registerJob(job6, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 2
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 2
graph.lookupVertex("job2").get.errorCount must_== 0
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job6, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job6, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job6").get.successCount must_== 1
graph.lookupVertex("job6").get.errorCount must_== 0
}
"Tests that dependent jobs run when parents are updated" in {
val epsilon = Minutes.minutes(20).toPeriod
val date = DateTime.now(DateTimeZone.UTC)
val fmt = ISODateTimeFormat.dateTime()
val job1 = new ScheduleBasedJob(schedule = s"R/${fmt.print(date)}/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = s"R/${fmt.print(date)}/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5_1 = new DependencyBasedJob(Set("job1", "job2"), name = "job5", command = "CMD", disabled = false)
val job5_2 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5_1, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_1, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_1, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
val jobResource = new Iso8601JobResource(jobScheduler = scheduler, jobGraph = graph)
jobResource.handleRequest(job1)
jobResource.handleRequest(job2)
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 2
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 2
graph.lookupVertex("job4").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_2, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_2, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 2
graph.lookupVertex("job5").get.errorCount must_== 0
}
}
}
| tony-kerz/chronos | src/test/scala/org/apache/mesos/chronos/scheduler/jobs/JobSchedulerIntegrationTest.scala | Scala | apache-2.0 | 25,343 |
package com.olivergg.starttabs.scalaservice
import com.olivergg.starttabs.dto.Chat
object ChatsService {
// fake data
private var chats: Array[Chat] = Array(
Chat(0, "Ben Sparrow", "You on your way?", "https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png"),
Chat(1, "Max Lynx", "Hey, it\\'s me", "https://avatars3.githubusercontent.com/u/11214?v=3&s=460"),
Chat(2, "Andrew Jostlin", "Did you get the ice cream?", "https://pbs.twimg.com/profile_images/609810148769427456/dhzhuaNA.jpg"),
Chat(3, "Adam Bradleyson", "I should buy a boat", "https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg"),
Chat(4, "Perry Governor", "Look at my mukluks!", "https://pbs.twimg.com/profile_images/467390551830970368/80rkMI5v.jpeg")
)
def all(): Array[Chat] = {
println("calling all from ChatService")
chats
}
def get(id: Int): Chat = {
println(s"calling get in ChatService for id = $id")
chats(id)
}
}
| olivergg/scalajs-ionic-starttabs | app-js/src/main/scala/com/olivergg/starttabs/scalaservice/ChatsService.scala | Scala | gpl-2.0 | 972 |
import scala.scalajs.js
import scala.scalajs.js.annotation.*
class StaticContainer extends js.Object
object StaticContainer {
// Twice as static
@JSExportStatic // error
@JSExportStatic("a1")
val a: Int = 1
@JSExportStatic // error
@JSExportStatic("b1")
var b: Int = 1
// Once as static and once as top-level
@JSExportStatic
@JSExportTopLevel("c1") // error
val c: Int = 1
@JSExportStatic
@JSExportTopLevel("d1") // error
var d: Int = 1
}
| lampepfl/dotty | tests/neg-scalajs/jsexportstatic-twice-same-field.scala | Scala | apache-2.0 | 474 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package expressions
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
object NameValuePair {
def parse(builder: ScalaPsiBuilder): Boolean = {
val nameMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.kVAL =>
builder.advanceLexer() //Ate val
case _ =>
nameMarker.drop()
return false
}
builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer() //Ate id
case _ =>
builder error ScalaBundle.message("identifier.expected")
nameMarker.done(ScalaElementTypes.NAME_VALUE_PAIR)
return true
}
builder.getTokenType match {
case ScalaTokenTypes.tASSIGN =>
builder.advanceLexer() //Ate =
case _ =>
builder error ScalaBundle.message("assign.expected")
}
if (!PrefixExpr.parse(builder)) {
builder error ScalaBundle.message("wrong.expression")
}
nameMarker.done(ScalaElementTypes.NAME_VALUE_PAIR)
true
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/expressions/NameValuePair.scala | Scala | apache-2.0 | 1,237 |
/*
* Copyright 2015-2020 Snowflake Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.snowflake.spark.snowflake
import net.snowflake.spark.snowflake.testsuite.ClusterTestSuiteBase
import org.slf4j.{Logger, LoggerFactory}
import org.apache.spark.sql.SparkSession
object ClusterTest {
val log: Logger = LoggerFactory.getLogger(getClass)
val RemoteMode = "remote"
val LocalMode = "local"
val TestSuiteSeparator = ";"
// Driver function to run the test.
def main(args: Array[String]): Unit = {
log.info(s"Test Spark Connector: ${net.snowflake.spark.snowflake.Utils.VERSION}")
val usage = s"""Two parameters are need: [local | remote] and
| testClassNames (using ';' to separate multiple classes)
|""".stripMargin
log.info(usage)
if (args.length < 2) {
throw new Exception(s"At least two parameters are need. Usage: $usage")
}
// Setup Spark session.
// local mode is introduced for debugging purpose
val runMode = args(0)
var sparkSessionBuilder = SparkSession
.builder()
.appName("Spark SQL basic example")
.config("spark.some.config.option", "some-value")
if (runMode.equalsIgnoreCase(LocalMode)) {
sparkSessionBuilder = sparkSessionBuilder
.config("spark.master", "local")
}
val spark = sparkSessionBuilder.getOrCreate()
// Run specified test suites
val testSuiteNames = args(1).split(TestSuiteSeparator)
for (testSuiteName <- testSuiteNames) {
if (!testSuiteName.trim.isEmpty) {
// Retrieve commit ID from env.
val commitID = scala.util.Properties
.envOrElse(TestUtils.GITHUB_SHA, "commit id not set")
// val testSuiteName = "net.snowflake.spark.snowflake.testsuite.BasicReadWriteSuite"
val resultBuilder = new ClusterTestResultBuilder()
.withTestType("Scala")
.withTestCaseName(testSuiteName)
.withCommitID(commitID)
.withTestStatus(TestUtils.TEST_RESULT_STATUS_INIT)
.withStartTimeInMill(System.currentTimeMillis())
.withGithubRunId(TestUtils.githubRunId)
try {
Class
.forName(testSuiteName)
.newInstance()
.asInstanceOf[ClusterTestSuiteBase]
.run(spark, resultBuilder)
} catch {
case e: Throwable =>
log.error(e.getMessage)
resultBuilder
.withTestStatus(TestUtils.TEST_RESULT_STATUS_EXCEPTION)
.withReason(e.getMessage)
} finally {
// Set test end time.
resultBuilder
.withEndTimeInMill(System.currentTimeMillis())
// Write test result
resultBuilder.build().writeToSnowflake()
}
}
}
spark.stop()
}
}
| snowflakedb/spark-snowflakedb | ClusterTest/src/main/scala/net/snowflake/spark/snowflake/ClusterTest.scala | Scala | apache-2.0 | 3,328 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io._
import java.nio.ByteBuffer
import java.nio.file.{Files, Paths}
import java.util.concurrent.{Callable, Executors}
import java.util.regex.Pattern
import java.util.{Collections, Optional, Properties}
import kafka.api.{ApiVersion, KAFKA_0_11_0_IV0}
import kafka.common.{OffsetsOutOfOrderException, RecordValidationException, UnexpectedAppendOffsetException}
import kafka.log.Log.DeleteDirSuffix
import kafka.metrics.KafkaYammerMetrics
import kafka.server.checkpoints.LeaderEpochCheckpointFile
import kafka.server.epoch.{EpochEntry, LeaderEpochFileCache}
import kafka.server.{BrokerTopicStats, FetchDataInfo, FetchHighWatermark, FetchIsolation, FetchLogEnd, FetchTxnCommitted, KafkaConfig, LogDirFailureChannel, LogOffsetMetadata}
import kafka.utils._
import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition}
import org.apache.kafka.common.errors._
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record.MemoryRecords.RecordFilter
import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.requests.{ListOffsetRequest, ListOffsetResponse}
import org.apache.kafka.common.utils.{Time, Utils}
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.{After, Before, Test}
import org.scalatest.Assertions
import scala.collection.{Iterable, mutable}
import scala.jdk.CollectionConverters._
import scala.collection.mutable.ListBuffer
import org.scalatest.Assertions.{assertThrows, intercept, withClue}
class LogTest {
var config: KafkaConfig = null
val brokerTopicStats = new BrokerTopicStats
val tmpDir = TestUtils.tempDir()
val logDir = TestUtils.randomPartitionLogDir(tmpDir)
val mockTime = new MockTime()
def metricsKeySet = KafkaYammerMetrics.defaultRegistry.allMetrics.keySet.asScala
@Before
def setUp(): Unit = {
val props = TestUtils.createBrokerConfig(0, "127.0.0.1:1", port = -1)
config = KafkaConfig.fromProps(props)
}
@After
def tearDown(): Unit = {
brokerTopicStats.close()
Utils.delete(tmpDir)
}
def createEmptyLogs(dir: File, offsets: Int*): Unit = {
for(offset <- offsets) {
Log.logFile(dir, offset).createNewFile()
Log.offsetIndexFile(dir, offset).createNewFile()
}
}
@Test
def testHighWatermarkMetadataUpdatedAfterSegmentRoll(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
def assertFetchSizeAndOffsets(fetchOffset: Long,
expectedSize: Int,
expectedOffsets: Seq[Long]): Unit = {
val readInfo = log.read(
startOffset = fetchOffset,
maxLength = 2048,
isolation = FetchHighWatermark,
minOneMessage = false)
assertEquals(expectedSize, readInfo.records.sizeInBytes)
assertEquals(expectedOffsets, readInfo.records.records.asScala.map(_.offset))
}
val records = TestUtils.records(List(
new SimpleRecord(mockTime.milliseconds, "a".getBytes, "value".getBytes),
new SimpleRecord(mockTime.milliseconds, "b".getBytes, "value".getBytes),
new SimpleRecord(mockTime.milliseconds, "c".getBytes, "value".getBytes)
))
log.appendAsLeader(records, leaderEpoch = 0)
assertFetchSizeAndOffsets(fetchOffset = 0L, 0, Seq())
log.maybeIncrementHighWatermark(log.logEndOffsetMetadata)
assertFetchSizeAndOffsets(fetchOffset = 0L, records.sizeInBytes, Seq(0, 1, 2))
log.roll()
assertFetchSizeAndOffsets(fetchOffset = 0L, records.sizeInBytes, Seq(0, 1, 2))
log.appendAsLeader(records, leaderEpoch = 0)
assertFetchSizeAndOffsets(fetchOffset = 3L, 0, Seq())
}
@Test
def testHighWatermarkMaintenance(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
val leaderEpoch = 0
def records(offset: Long): MemoryRecords = TestUtils.records(List(
new SimpleRecord(mockTime.milliseconds, "a".getBytes, "value".getBytes),
new SimpleRecord(mockTime.milliseconds, "b".getBytes, "value".getBytes),
new SimpleRecord(mockTime.milliseconds, "c".getBytes, "value".getBytes)
), baseOffset = offset, partitionLeaderEpoch= leaderEpoch)
def assertHighWatermark(offset: Long): Unit = {
assertEquals(offset, log.highWatermark)
assertValidLogOffsetMetadata(log, log.fetchOffsetSnapshot.highWatermark)
}
// High watermark initialized to 0
assertHighWatermark(0L)
// High watermark not changed by append
log.appendAsLeader(records(0), leaderEpoch)
assertHighWatermark(0L)
// Update high watermark as leader
log.maybeIncrementHighWatermark(LogOffsetMetadata(1L))
assertHighWatermark(1L)
// Cannot update past the log end offset
log.updateHighWatermark(5L)
assertHighWatermark(3L)
// Update high watermark as follower
log.appendAsFollower(records(3L))
log.updateHighWatermark(6L)
assertHighWatermark(6L)
// High watermark should be adjusted by truncation
log.truncateTo(3L)
assertHighWatermark(3L)
log.appendAsLeader(records(0L), leaderEpoch = 0)
assertHighWatermark(3L)
assertEquals(6L, log.logEndOffset)
assertEquals(0L, log.logStartOffset)
// Full truncation should also reset high watermark
log.truncateFullyAndStartAt(4L)
assertEquals(4L, log.logEndOffset)
assertEquals(4L, log.logStartOffset)
assertHighWatermark(4L)
}
private def assertNonEmptyFetch(log: Log, offset: Long, isolation: FetchIsolation): Unit = {
val readInfo = log.read(startOffset = offset,
maxLength = Int.MaxValue,
isolation = isolation,
minOneMessage = true)
assertFalse(readInfo.firstEntryIncomplete)
assertTrue(readInfo.records.sizeInBytes > 0)
val upperBoundOffset = isolation match {
case FetchLogEnd => log.logEndOffset
case FetchHighWatermark => log.highWatermark
case FetchTxnCommitted => log.lastStableOffset
}
for (record <- readInfo.records.records.asScala)
assertTrue(record.offset < upperBoundOffset)
assertEquals(offset, readInfo.fetchOffsetMetadata.messageOffset)
assertValidLogOffsetMetadata(log, readInfo.fetchOffsetMetadata)
}
private def assertEmptyFetch(log: Log, offset: Long, isolation: FetchIsolation): Unit = {
val readInfo = log.read(startOffset = offset,
maxLength = Int.MaxValue,
isolation = isolation,
minOneMessage = true)
assertFalse(readInfo.firstEntryIncomplete)
assertEquals(0, readInfo.records.sizeInBytes)
assertEquals(offset, readInfo.fetchOffsetMetadata.messageOffset)
assertValidLogOffsetMetadata(log, readInfo.fetchOffsetMetadata)
}
@Test
def testFetchUpToLogEndOffset(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(
new SimpleRecord("0".getBytes),
new SimpleRecord("1".getBytes),
new SimpleRecord("2".getBytes)
)), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(
new SimpleRecord("3".getBytes),
new SimpleRecord("4".getBytes)
)), leaderEpoch = 0)
(log.logStartOffset until log.logEndOffset).foreach { offset =>
assertNonEmptyFetch(log, offset, FetchLogEnd)
}
}
@Test
def testFetchUpToHighWatermark(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(
new SimpleRecord("0".getBytes),
new SimpleRecord("1".getBytes),
new SimpleRecord("2".getBytes)
)), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(
new SimpleRecord("3".getBytes),
new SimpleRecord("4".getBytes)
)), leaderEpoch = 0)
def assertHighWatermarkBoundedFetches(): Unit = {
(log.logStartOffset until log.highWatermark).foreach { offset =>
assertNonEmptyFetch(log, offset, FetchHighWatermark)
}
(log.highWatermark to log.logEndOffset).foreach { offset =>
assertEmptyFetch(log, offset, FetchHighWatermark)
}
}
assertHighWatermarkBoundedFetches()
log.updateHighWatermark(3L)
assertHighWatermarkBoundedFetches()
log.updateHighWatermark(5L)
assertHighWatermarkBoundedFetches()
}
@Test
def testFetchUpToLastStableOffset(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val producerId1 = 1L
val producerId2 = 2L
val appendProducer1 = appendTransactionalAsLeader(log, producerId1, epoch)
val appendProducer2 = appendTransactionalAsLeader(log, producerId2, epoch)
appendProducer1(5)
appendNonTransactionalAsLeader(log, 3)
appendProducer2(2)
appendProducer1(4)
appendNonTransactionalAsLeader(log, 2)
appendProducer1(10)
def assertLsoBoundedFetches(): Unit = {
(log.logStartOffset until log.lastStableOffset).foreach { offset =>
assertNonEmptyFetch(log, offset, FetchTxnCommitted)
}
(log.lastStableOffset to log.logEndOffset).foreach { offset =>
assertEmptyFetch(log, offset, FetchTxnCommitted)
}
}
assertLsoBoundedFetches()
log.updateHighWatermark(log.logEndOffset)
assertLsoBoundedFetches()
appendEndTxnMarkerAsLeader(log, producerId1, epoch, ControlRecordType.COMMIT)
assertEquals(0L, log.lastStableOffset)
log.updateHighWatermark(log.logEndOffset)
assertEquals(8L, log.lastStableOffset)
assertLsoBoundedFetches()
appendEndTxnMarkerAsLeader(log, producerId2, epoch, ControlRecordType.ABORT)
assertEquals(8L, log.lastStableOffset)
log.updateHighWatermark(log.logEndOffset)
assertEquals(log.logEndOffset, log.lastStableOffset)
assertLsoBoundedFetches()
}
@Test
def testLogDeleteDirName(): Unit = {
val name1 = Log.logDeleteDirName(new TopicPartition("foo", 3))
assertTrue(name1.length <= 255)
assertTrue(Pattern.compile("foo-3\\\\.[0-9a-z]{32}-delete").matcher(name1).matches())
assertTrue(Log.DeleteDirPattern.matcher(name1).matches())
assertFalse(Log.FutureDirPattern.matcher(name1).matches())
val name2 = Log.logDeleteDirName(
new TopicPartition("n" + String.join("", Collections.nCopies(248, "o")), 5))
assertEquals(255, name2.length)
assertTrue(Pattern.compile("n[o]{212}-5\\\\.[0-9a-z]{32}-delete").matcher(name2).matches())
assertTrue(Log.DeleteDirPattern.matcher(name2).matches())
assertFalse(Log.FutureDirPattern.matcher(name2).matches())
}
@Test
def testOffsetFromFile(): Unit = {
val offset = 23423423L
val logFile = Log.logFile(tmpDir, offset)
assertEquals(offset, Log.offsetFromFile(logFile))
val offsetIndexFile = Log.offsetIndexFile(tmpDir, offset)
assertEquals(offset, Log.offsetFromFile(offsetIndexFile))
val timeIndexFile = Log.timeIndexFile(tmpDir, offset)
assertEquals(offset, Log.offsetFromFile(timeIndexFile))
val snapshotFile = Log.producerSnapshotFile(tmpDir, offset)
assertEquals(offset, Log.offsetFromFile(snapshotFile))
}
/**
* Tests for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRoll(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentMs = 1 * 60 * 60L)
// create a log
val log = createLog(logDir, logConfig, maxProducerIdExpirationMs = 24 * 60)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
// Test the segment rolling behavior when messages do not have a timestamp.
mockTime.sleep(log.config.segmentMs + 1)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("Log doesn't roll if doing so creates an empty segment.", 1, log.numberOfSegments)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("Log rolls on this append since time has expired.", 2, log.numberOfSegments)
for (numSegments <- 3 until 5) {
mockTime.sleep(log.config.segmentMs + 1)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("Changing time beyond rollMs and appending should create a new segment.", numSegments, log.numberOfSegments)
}
// Append a message with timestamp to a segment whose first message do not have a timestamp.
val timestamp = mockTime.milliseconds + log.config.segmentMs + 1
def createRecordsWithTimestamp = TestUtils.singletonRecords(value = "test".getBytes, timestamp = timestamp)
log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0)
assertEquals("Segment should not have been rolled out because the log rolling should be based on wall clock.", 4, log.numberOfSegments)
// Test the segment rolling behavior when messages have timestamps.
mockTime.sleep(log.config.segmentMs + 1)
log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0)
assertEquals("A new segment should have been rolled out", 5, log.numberOfSegments)
// move the wall clock beyond log rolling time
mockTime.sleep(log.config.segmentMs + 1)
log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0)
assertEquals("Log should not roll because the roll should depend on timestamp of the first message.", 5, log.numberOfSegments)
val recordWithExpiredTimestamp = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
log.appendAsLeader(recordWithExpiredTimestamp, leaderEpoch = 0)
assertEquals("Log should roll because the timestamp in the message should make the log segment expire.", 6, log.numberOfSegments)
val numSegments = log.numberOfSegments
mockTime.sleep(log.config.segmentMs + 1)
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE), leaderEpoch = 0)
assertEquals("Appending an empty message set should not roll log even if sufficient time has passed.", numSegments, log.numberOfSegments)
}
@Test
def testRollSegmentThatAlreadyExists(): Unit = {
val logConfig = LogTest.createLogConfig(segmentMs = 1 * 60 * 60L)
// create a log
val log = createLog(logDir, logConfig)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
// roll active segment with the same base offset of size zero should recreate the segment
log.roll(Some(0L))
assertEquals("Expect 1 segment after roll() empty segment with base offset.", 1, log.numberOfSegments)
// should be able to append records to active segment
val records = TestUtils.records(
List(new SimpleRecord(mockTime.milliseconds, "k1".getBytes, "v1".getBytes)),
baseOffset = 0L, partitionLeaderEpoch = 0)
log.appendAsFollower(records)
assertEquals("Expect one segment.", 1, log.numberOfSegments)
assertEquals(0L, log.activeSegment.baseOffset)
// make sure we can append more records
val records2 = TestUtils.records(
List(new SimpleRecord(mockTime.milliseconds + 10, "k2".getBytes, "v2".getBytes)),
baseOffset = 1L, partitionLeaderEpoch = 0)
log.appendAsFollower(records2)
assertEquals("Expect two records in the log", 2, log.logEndOffset)
assertEquals(0, readLog(log, 0, 1).records.batches.iterator.next().lastOffset)
assertEquals(1, readLog(log, 1, 1).records.batches.iterator.next().lastOffset)
// roll so that active segment is empty
log.roll()
assertEquals("Expect base offset of active segment to be LEO", 2L, log.activeSegment.baseOffset)
assertEquals("Expect two segments.", 2, log.numberOfSegments)
// manually resize offset index to force roll of an empty active segment on next append
log.activeSegment.offsetIndex.resize(0)
val records3 = TestUtils.records(
List(new SimpleRecord(mockTime.milliseconds + 12, "k3".getBytes, "v3".getBytes)),
baseOffset = 2L, partitionLeaderEpoch = 0)
log.appendAsFollower(records3)
assertTrue(log.activeSegment.offsetIndex.maxEntries > 1)
assertEquals(2, readLog(log, 2, 1).records.batches.iterator.next().lastOffset)
assertEquals("Expect two segments.", 2, log.numberOfSegments)
}
@Test(expected = classOf[OutOfOrderSequenceException])
def testNonSequentialAppend(): Unit = {
// create a log
val log = createLog(logDir, LogConfig())
val pid = 1L
val epoch: Short = 0
val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 0)
log.appendAsLeader(records, leaderEpoch = 0)
val nextRecords = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 2)
log.appendAsLeader(nextRecords, leaderEpoch = 0)
}
@Test
def testTruncateToEmptySegment(): Unit = {
val log = createLog(logDir, LogConfig())
// Force a segment roll by using a large offset. The first segment will be empty
val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)),
baseOffset = Int.MaxValue.toLong + 200)
appendAsFollower(log, records)
assertEquals(0, log.logSegments.head.size)
assertEquals(2, log.logSegments.size)
// Truncate to an offset before the base offset of the latest segment
log.truncateTo(0L)
assertEquals(1, log.logSegments.size)
// Now verify that we can still append to the active segment
appendAsFollower(log, TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)),
baseOffset = 100L))
assertEquals(1, log.logSegments.size)
assertEquals(101L, log.logEndOffset)
}
/**
* Test the values returned by the logSegments call
*/
@Test
def testLogSegmentsCallCorrect(): Unit = {
// Create 3 segments and make sure we get the right values from various logSegments calls.
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
def getSegmentOffsets(log :Log, from: Long, to: Long) = log.logSegments(from, to).map { _.baseOffset }
val setSize = createRecords.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
// create a log
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize)
val log = createLog(logDir, logConfig)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
// segments expire in size
for (_ <- 1 to (2 * msgPerSeg + 2))
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("There should be exactly 3 segments.", 3, log.numberOfSegments)
// from == to should always be null
assertEquals(List.empty[LogSegment], getSegmentOffsets(log, 10, 10))
assertEquals(List.empty[LogSegment], getSegmentOffsets(log, 15, 15))
assertEquals(List[Long](0, 10, 20), getSegmentOffsets(log, 0, 21))
assertEquals(List[Long](0), getSegmentOffsets(log, 1, 5))
assertEquals(List[Long](10, 20), getSegmentOffsets(log, 13, 21))
assertEquals(List[Long](10), getSegmentOffsets(log, 13, 17))
// from < to is bad
assertThrows[IllegalArgumentException]({ log.logSegments(10, 0) })
}
@Test
def testInitializationOfProducerSnapshotsUpgradePath(): Unit = {
// simulate the upgrade path by creating a new log with several segments, deleting the
// snapshot files, and then reloading the log
val logConfig = LogTest.createLogConfig(segmentBytes = 64 * 10)
var log = createLog(logDir, logConfig)
assertEquals(None, log.oldestProducerSnapshotOffset)
for (i <- 0 to 100) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
}
assertTrue(log.logSegments.size >= 2)
val logEndOffset = log.logEndOffset
log.close()
val cleanShutdownFile = createCleanShutdownFile()
deleteProducerSnapshotFiles()
// Reload after clean shutdown
log = createLog(logDir, logConfig, recoveryPoint = logEndOffset)
var expectedSnapshotOffsets = log.logSegments.map(_.baseOffset).takeRight(2).toVector :+ log.logEndOffset
assertEquals(expectedSnapshotOffsets, listProducerSnapshotOffsets)
log.close()
Utils.delete(cleanShutdownFile)
deleteProducerSnapshotFiles()
// Reload after unclean shutdown with recoveryPoint set to log end offset
log = createLog(logDir, logConfig, recoveryPoint = logEndOffset)
assertEquals(expectedSnapshotOffsets, listProducerSnapshotOffsets)
log.close()
deleteProducerSnapshotFiles()
// Reload after unclean shutdown with recoveryPoint set to 0
log = createLog(logDir, logConfig, recoveryPoint = 0L)
// We progressively create a snapshot for each segment after the recovery point
expectedSnapshotOffsets = log.logSegments.map(_.baseOffset).tail.toVector :+ log.logEndOffset
assertEquals(expectedSnapshotOffsets, listProducerSnapshotOffsets)
log.close()
}
@Test
def testRecoverAfterNonMonotonicCoordinatorEpochWrite(): Unit = {
// Due to KAFKA-9144, we may encounter a coordinator epoch which goes backwards.
// This test case verifies that recovery logic relaxes validation in this case and
// just takes the latest write.
val producerId = 1L
val coordinatorEpoch = 5
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
var log = createLog(logDir, logConfig)
val epoch = 0.toShort
val firstAppendTimestamp = mockTime.milliseconds()
appendEndTxnMarkerAsLeader(log, producerId, epoch, ControlRecordType.ABORT,
timestamp = firstAppendTimestamp, coordinatorEpoch = coordinatorEpoch)
assertEquals(firstAppendTimestamp, log.producerStateManager.lastEntry(producerId).get.lastTimestamp)
mockTime.sleep(log.maxProducerIdExpirationMs)
assertEquals(None, log.producerStateManager.lastEntry(producerId))
val secondAppendTimestamp = mockTime.milliseconds()
appendEndTxnMarkerAsLeader(log, producerId, epoch, ControlRecordType.ABORT,
timestamp = secondAppendTimestamp, coordinatorEpoch = coordinatorEpoch - 1)
log.close()
// Force recovery by setting the recoveryPoint to the log start
log = createLog(logDir, logConfig, recoveryPoint = 0L)
assertEquals(secondAppendTimestamp, log.producerStateManager.lastEntry(producerId).get.lastTimestamp)
log.close()
}
@Test
def testProducerSnapshotsRecoveryAfterUncleanShutdownV1(): Unit = {
testProducerSnapshotsRecoveryAfterUncleanShutdown(ApiVersion.minSupportedFor(RecordVersion.V1).version)
}
@Test
def testProducerSnapshotsRecoveryAfterUncleanShutdownCurrentMessageFormat(): Unit = {
testProducerSnapshotsRecoveryAfterUncleanShutdown(ApiVersion.latestVersion.version)
}
@Test
def testLogReinitializeAfterManualDelete(): Unit = {
val logConfig = LogTest.createLogConfig()
// simulate a case where log data does not exist but the start offset is non-zero
val log = createLog(logDir, logConfig, logStartOffset = 500)
assertEquals(500, log.logStartOffset)
assertEquals(500, log.logEndOffset)
}
@Test
def testLogEndLessThanStartAfterReopen(): Unit = {
val logConfig = LogTest.createLogConfig()
var log = createLog(logDir, logConfig)
for (i <- 0 until 5) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
log.roll()
}
assertEquals(6, log.logSegments.size)
// Increment the log start offset
val startOffset = 4
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(startOffset)
assertTrue(log.logEndOffset > log.logStartOffset)
// Append garbage to a segment below the current log start offset
val segmentToForceTruncation = log.logSegments.take(2).last
val bw = new BufferedWriter(new FileWriter(segmentToForceTruncation.log.file))
bw.write("corruptRecord")
bw.close()
log.close()
// Reopen the log. This will cause truncate the segment to which we appended garbage and delete all other segments.
// All remaining segments will be lower than the current log start offset, which will force deletion of all segments
// and recreation of a single, active segment starting at logStartOffset.
log = createLog(logDir, logConfig, logStartOffset = startOffset)
assertEquals(1, log.logSegments.size)
assertEquals(startOffset, log.logStartOffset)
assertEquals(startOffset, log.logEndOffset)
}
@Test
def testNonActiveSegmentsFrom(): Unit = {
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig)
for (i <- 0 until 5) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
log.roll()
}
def nonActiveBaseOffsetsFrom(startOffset: Long): Seq[Long] = {
log.nonActiveLogSegmentsFrom(startOffset).map(_.baseOffset).toSeq
}
assertEquals(5L, log.activeSegment.baseOffset)
assertEquals(0 until 5, nonActiveBaseOffsetsFrom(0L))
assertEquals(Seq.empty, nonActiveBaseOffsetsFrom(5L))
assertEquals(2 until 5, nonActiveBaseOffsetsFrom(2L))
assertEquals(Seq.empty, nonActiveBaseOffsetsFrom(6L))
}
@Test
def testInconsistentLogSegmentRange(): Unit = {
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig)
for (i <- 0 until 5) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
log.roll()
}
assertThrows[IllegalArgumentException] {
log.logSegments(5, 1)
}
}
@Test
def testLogDelete(): Unit = {
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig)
for (i <- 0 to 100) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
log.roll()
}
assertTrue(log.logSegments.size > 0)
assertFalse(logDir.listFiles.isEmpty)
// delete the log
log.delete()
assertEquals(0, log.logSegments.size)
assertFalse(logDir.exists)
}
/**
* Test that "PeriodicProducerExpirationCheck" scheduled task gets canceled after log
* is deleted.
*/
@Test
def testProducerExpireCheckAfterDelete(): Unit = {
val scheduler = new KafkaScheduler(1)
try {
scheduler.startup()
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig, scheduler = scheduler)
val producerExpireCheck = log.producerExpireCheck
assertTrue("producerExpireCheck isn't as part of scheduled tasks",
scheduler.taskRunning(producerExpireCheck))
log.delete()
assertFalse("producerExpireCheck is part of scheduled tasks even after log deletion",
scheduler.taskRunning(producerExpireCheck))
} finally {
scheduler.shutdown();
}
}
private def testProducerSnapshotsRecoveryAfterUncleanShutdown(messageFormatVersion: String): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 64 * 10, messageFormatVersion = messageFormatVersion)
var log = createLog(logDir, logConfig)
assertEquals(None, log.oldestProducerSnapshotOffset)
for (i <- 0 to 100) {
val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes)
log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0)
}
assertTrue(log.logSegments.size >= 5)
val segmentOffsets = log.logSegments.toVector.map(_.baseOffset)
val activeSegmentOffset = segmentOffsets.last
// We want the recovery point to be past the segment offset and before the last 2 segments including a gap of
// 1 segment. We collect the data before closing the log.
val offsetForSegmentAfterRecoveryPoint = segmentOffsets(segmentOffsets.size - 3)
val offsetForRecoveryPointSegment = segmentOffsets(segmentOffsets.size - 4)
val (segOffsetsBeforeRecovery, segOffsetsAfterRecovery) = segmentOffsets.toSet.partition(_ < offsetForRecoveryPointSegment)
val recoveryPoint = offsetForRecoveryPointSegment + 1
assertTrue(recoveryPoint < offsetForSegmentAfterRecoveryPoint)
log.close()
val segmentsWithReads = mutable.Set[LogSegment]()
val recoveredSegments = mutable.Set[LogSegment]()
val expectedSegmentsWithReads = mutable.Set[Long]()
val expectedSnapshotOffsets = mutable.Set[Long]()
if (logConfig.messageFormatVersion < KAFKA_0_11_0_IV0) {
expectedSegmentsWithReads += activeSegmentOffset
expectedSnapshotOffsets ++= log.logSegments.map(_.baseOffset).toVector.takeRight(2) :+ log.logEndOffset
} else {
expectedSegmentsWithReads ++= segOffsetsBeforeRecovery ++ Set(activeSegmentOffset)
expectedSnapshotOffsets ++= log.logSegments.map(_.baseOffset).toVector.takeRight(4) :+ log.logEndOffset
}
def createLogWithInterceptedReads(recoveryPoint: Long) = {
val maxProducerIdExpirationMs = 60 * 60 * 1000
val topicPartition = Log.parseTopicPartitionName(logDir)
val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxProducerIdExpirationMs)
// Intercept all segment read calls
new Log(logDir, logConfig, logStartOffset = 0, recoveryPoint = recoveryPoint, mockTime.scheduler,
brokerTopicStats, mockTime, maxProducerIdExpirationMs, LogManager.ProducerIdExpirationCheckIntervalMs,
topicPartition, producerStateManager, new LogDirFailureChannel(10)) {
override def addSegment(segment: LogSegment): LogSegment = {
val wrapper = new LogSegment(segment.log, segment.lazyOffsetIndex, segment.lazyTimeIndex, segment.txnIndex, segment.baseOffset,
segment.indexIntervalBytes, segment.rollJitterMs, mockTime) {
override def read(startOffset: Long, maxSize: Int, maxPosition: Long, minOneMessage: Boolean): FetchDataInfo = {
segmentsWithReads += this
super.read(startOffset, maxSize, maxPosition, minOneMessage)
}
override def recover(producerStateManager: ProducerStateManager,
leaderEpochCache: Option[LeaderEpochFileCache]): Int = {
recoveredSegments += this
super.recover(producerStateManager, leaderEpochCache)
}
}
super.addSegment(wrapper)
}
}
}
// Retain snapshots for the last 2 segments
ProducerStateManager.deleteSnapshotsBefore(logDir, segmentOffsets(segmentOffsets.size - 2))
log = createLogWithInterceptedReads(offsetForRecoveryPointSegment)
// We will reload all segments because the recovery point is behind the producer snapshot files (pre KAFKA-5829 behaviour)
assertEquals(expectedSegmentsWithReads, segmentsWithReads.map(_.baseOffset))
assertEquals(segOffsetsAfterRecovery, recoveredSegments.map(_.baseOffset))
assertEquals(expectedSnapshotOffsets, listProducerSnapshotOffsets.toSet)
log.close()
segmentsWithReads.clear()
recoveredSegments.clear()
// Only delete snapshots before the base offset of the recovery point segment (post KAFKA-5829 behaviour) to
// avoid reading all segments
ProducerStateManager.deleteSnapshotsBefore(logDir, offsetForRecoveryPointSegment)
log = createLogWithInterceptedReads(recoveryPoint = recoveryPoint)
assertEquals(Set(activeSegmentOffset), segmentsWithReads.map(_.baseOffset))
assertEquals(segOffsetsAfterRecovery, recoveredSegments.map(_.baseOffset))
assertEquals(expectedSnapshotOffsets, listProducerSnapshotOffsets.toSet)
// Verify that we keep 2 snapshot files if we checkpoint the log end offset
log.deleteSnapshotsAfterRecoveryPointCheckpoint()
val expectedSnapshotsAfterDelete = log.logSegments.map(_.baseOffset).toVector.takeRight(2) :+ log.logEndOffset
assertEquals(expectedSnapshotsAfterDelete, listProducerSnapshotOffsets)
log.close()
}
@Test
def testSizeForLargeLogs(): Unit = {
val largeSize = Int.MaxValue.toLong * 2
val logSegment: LogSegment = EasyMock.createMock(classOf[LogSegment])
EasyMock.expect(logSegment.size).andReturn(Int.MaxValue).anyTimes
EasyMock.replay(logSegment)
assertEquals(Int.MaxValue, Log.sizeInBytes(Seq(logSegment)))
assertEquals(largeSize, Log.sizeInBytes(Seq(logSegment, logSegment)))
assertTrue(Log.sizeInBytes(Seq(logSegment, logSegment)) > Int.MaxValue)
}
@Test
def testProducerIdMapOffsetUpdatedForNonIdempotentData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)))
log.appendAsLeader(records, leaderEpoch = 0)
log.takeProducerSnapshot()
assertEquals(Some(1), log.latestProducerSnapshotOffset)
}
@Test
def testSkipLoadingIfEmptyProducerStateBeforeTruncation(): Unit = {
val stateManager: ProducerStateManager = EasyMock.mock(classOf[ProducerStateManager])
// Load the log
EasyMock.expect(stateManager.latestSnapshotOffset).andReturn(None)
stateManager.updateMapEndOffset(0L)
EasyMock.expectLastCall().anyTimes()
EasyMock.expect(stateManager.mapEndOffset).andStubReturn(0L)
EasyMock.expect(stateManager.isEmpty).andStubReturn(true)
stateManager.takeSnapshot()
EasyMock.expectLastCall().anyTimes()
stateManager.truncateAndReload(EasyMock.eq(0L), EasyMock.eq(0L), EasyMock.anyLong)
EasyMock.expectLastCall()
EasyMock.expect(stateManager.firstUnstableOffset).andStubReturn(None)
EasyMock.replay(stateManager)
val config = LogConfig(new Properties())
val log = new Log(logDir,
config,
logStartOffset = 0L,
recoveryPoint = 0L,
scheduler = mockTime.scheduler,
brokerTopicStats = brokerTopicStats,
time = mockTime,
maxProducerIdExpirationMs = 300000,
producerIdExpirationCheckIntervalMs = 30000,
topicPartition = Log.parseTopicPartitionName(logDir),
producerStateManager = stateManager,
logDirFailureChannel = null)
EasyMock.verify(stateManager)
// Append some messages
EasyMock.reset(stateManager)
EasyMock.expect(stateManager.firstUnstableOffset).andStubReturn(None)
stateManager.updateMapEndOffset(1L)
EasyMock.expectLastCall()
stateManager.updateMapEndOffset(2L)
EasyMock.expectLastCall()
EasyMock.replay(stateManager)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), leaderEpoch = 0)
EasyMock.verify(stateManager)
// Now truncate
EasyMock.reset(stateManager)
EasyMock.expect(stateManager.firstUnstableOffset).andStubReturn(None)
EasyMock.expect(stateManager.latestSnapshotOffset).andReturn(None)
EasyMock.expect(stateManager.isEmpty).andStubReturn(true)
EasyMock.expect(stateManager.mapEndOffset).andReturn(2L)
stateManager.truncateAndReload(EasyMock.eq(0L), EasyMock.eq(1L), EasyMock.anyLong)
EasyMock.expectLastCall()
// Truncation causes the map end offset to reset to 0
EasyMock.expect(stateManager.mapEndOffset).andReturn(0L)
// We skip directly to updating the map end offset
stateManager.updateMapEndOffset(1L)
EasyMock.expectLastCall()
// Finally, we take a snapshot
stateManager.takeSnapshot()
EasyMock.expectLastCall().once()
EasyMock.replay(stateManager)
log.truncateTo(1L)
EasyMock.verify(stateManager)
}
@Test
def testSkipTruncateAndReloadIfOldMessageFormatAndNoCleanShutdown(): Unit = {
val stateManager: ProducerStateManager = EasyMock.mock(classOf[ProducerStateManager])
stateManager.updateMapEndOffset(0L)
EasyMock.expectLastCall().anyTimes()
stateManager.takeSnapshot()
EasyMock.expectLastCall().anyTimes()
EasyMock.expect(stateManager.isEmpty).andReturn(true)
EasyMock.expectLastCall().once()
EasyMock.expect(stateManager.firstUnstableOffset).andReturn(None)
EasyMock.expectLastCall().once()
EasyMock.replay(stateManager)
val logProps = new Properties()
logProps.put(LogConfig.MessageFormatVersionProp, "0.10.2")
val config = LogConfig(logProps)
new Log(logDir,
config,
logStartOffset = 0L,
recoveryPoint = 0L,
scheduler = mockTime.scheduler,
brokerTopicStats = brokerTopicStats,
time = mockTime,
maxProducerIdExpirationMs = 300000,
producerIdExpirationCheckIntervalMs = 30000,
topicPartition = Log.parseTopicPartitionName(logDir),
producerStateManager = stateManager,
logDirFailureChannel = null)
EasyMock.verify(stateManager)
}
@Test
def testSkipTruncateAndReloadIfOldMessageFormatAndCleanShutdown(): Unit = {
val stateManager: ProducerStateManager = EasyMock.mock(classOf[ProducerStateManager])
stateManager.updateMapEndOffset(0L)
EasyMock.expectLastCall().anyTimes()
stateManager.takeSnapshot()
EasyMock.expectLastCall().anyTimes()
EasyMock.expect(stateManager.isEmpty).andReturn(true)
EasyMock.expectLastCall().once()
EasyMock.expect(stateManager.firstUnstableOffset).andReturn(None)
EasyMock.expectLastCall().once()
EasyMock.replay(stateManager)
val cleanShutdownFile = createCleanShutdownFile()
val logProps = new Properties()
logProps.put(LogConfig.MessageFormatVersionProp, "0.10.2")
val config = LogConfig(logProps)
new Log(logDir,
config,
logStartOffset = 0L,
recoveryPoint = 0L,
scheduler = mockTime.scheduler,
brokerTopicStats = brokerTopicStats,
time = mockTime,
maxProducerIdExpirationMs = 300000,
producerIdExpirationCheckIntervalMs = 30000,
topicPartition = Log.parseTopicPartitionName(logDir),
producerStateManager = stateManager,
logDirFailureChannel = null)
EasyMock.verify(stateManager)
Utils.delete(cleanShutdownFile)
}
@Test
def testSkipTruncateAndReloadIfNewMessageFormatAndCleanShutdown(): Unit = {
val stateManager: ProducerStateManager = EasyMock.mock(classOf[ProducerStateManager])
EasyMock.expect(stateManager.latestSnapshotOffset).andReturn(None)
stateManager.updateMapEndOffset(0L)
EasyMock.expectLastCall().anyTimes()
stateManager.takeSnapshot()
EasyMock.expectLastCall().anyTimes()
EasyMock.expect(stateManager.isEmpty).andReturn(true)
EasyMock.expectLastCall().once()
EasyMock.expect(stateManager.firstUnstableOffset).andReturn(None)
EasyMock.expectLastCall().once()
EasyMock.replay(stateManager)
val cleanShutdownFile = createCleanShutdownFile()
val logProps = new Properties()
logProps.put(LogConfig.MessageFormatVersionProp, "0.11.0")
val config = LogConfig(logProps)
new Log(logDir,
config,
logStartOffset = 0L,
recoveryPoint = 0L,
scheduler = mockTime.scheduler,
brokerTopicStats = brokerTopicStats,
time = mockTime,
maxProducerIdExpirationMs = 300000,
producerIdExpirationCheckIntervalMs = 30000,
topicPartition = Log.parseTopicPartitionName(logDir),
producerStateManager = stateManager,
logDirFailureChannel = null)
EasyMock.verify(stateManager)
Utils.delete(cleanShutdownFile)
}
@Test
def testRebuildProducerIdMapWithCompactedData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid = 1L
val epoch = 0.toShort
val seq = 0
val baseOffset = 23L
// create a batch with a couple gaps to simulate compaction
val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List(
new SimpleRecord(mockTime.milliseconds(), "a".getBytes),
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes),
new SimpleRecord(mockTime.milliseconds(), "c".getBytes),
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes)))
records.batches.forEach(_.setPartitionLeaderEpoch(0))
val filtered = ByteBuffer.allocate(2048)
records.filterTo(new TopicPartition("foo", 0), new RecordFilter {
override def checkBatchRetention(batch: RecordBatch): BatchRetention = RecordFilter.BatchRetention.DELETE_EMPTY
override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = !record.hasKey
}, filtered, Int.MaxValue, BufferSupplier.NO_CACHING)
filtered.flip()
val filteredRecords = MemoryRecords.readableRecords(filtered)
log.appendAsFollower(filteredRecords)
// append some more data and then truncate to force rebuilding of the PID map
val moreRecords = TestUtils.records(baseOffset = baseOffset + 4, records = List(
new SimpleRecord(mockTime.milliseconds(), "e".getBytes),
new SimpleRecord(mockTime.milliseconds(), "f".getBytes)))
moreRecords.batches.forEach(_.setPartitionLeaderEpoch(0))
log.appendAsFollower(moreRecords)
log.truncateTo(baseOffset + 4)
val activeProducers = log.activeProducersWithLastSequence
assertTrue(activeProducers.contains(pid))
val lastSeq = activeProducers(pid)
assertEquals(3, lastSeq)
}
@Test
def testRebuildProducerStateWithEmptyCompactedBatch(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid = 1L
val epoch = 0.toShort
val seq = 0
val baseOffset = 23L
// create an empty batch
val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List(
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "a".getBytes),
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes)))
records.batches.forEach(_.setPartitionLeaderEpoch(0))
val filtered = ByteBuffer.allocate(2048)
records.filterTo(new TopicPartition("foo", 0), new RecordFilter {
override def checkBatchRetention(batch: RecordBatch): BatchRetention = RecordFilter.BatchRetention.RETAIN_EMPTY
override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = false
}, filtered, Int.MaxValue, BufferSupplier.NO_CACHING)
filtered.flip()
val filteredRecords = MemoryRecords.readableRecords(filtered)
log.appendAsFollower(filteredRecords)
// append some more data and then truncate to force rebuilding of the PID map
val moreRecords = TestUtils.records(baseOffset = baseOffset + 2, records = List(
new SimpleRecord(mockTime.milliseconds(), "e".getBytes),
new SimpleRecord(mockTime.milliseconds(), "f".getBytes)))
moreRecords.batches.forEach(_.setPartitionLeaderEpoch(0))
log.appendAsFollower(moreRecords)
log.truncateTo(baseOffset + 2)
val activeProducers = log.activeProducersWithLastSequence
assertTrue(activeProducers.contains(pid))
val lastSeq = activeProducers(pid)
assertEquals(1, lastSeq)
}
@Test
def testUpdateProducerIdMapWithCompactedData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid = 1L
val epoch = 0.toShort
val seq = 0
val baseOffset = 23L
// create a batch with a couple gaps to simulate compaction
val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List(
new SimpleRecord(mockTime.milliseconds(), "a".getBytes),
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes),
new SimpleRecord(mockTime.milliseconds(), "c".getBytes),
new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes)))
records.batches.forEach(_.setPartitionLeaderEpoch(0))
val filtered = ByteBuffer.allocate(2048)
records.filterTo(new TopicPartition("foo", 0), new RecordFilter {
override def checkBatchRetention(batch: RecordBatch): BatchRetention = RecordFilter.BatchRetention.DELETE_EMPTY
override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = !record.hasKey
}, filtered, Int.MaxValue, BufferSupplier.NO_CACHING)
filtered.flip()
val filteredRecords = MemoryRecords.readableRecords(filtered)
log.appendAsFollower(filteredRecords)
val activeProducers = log.activeProducersWithLastSequence
assertTrue(activeProducers.contains(pid))
val lastSeq = activeProducers(pid)
assertEquals(3, lastSeq)
}
@Test
def testProducerIdMapTruncateTo(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), leaderEpoch = 0)
log.takeProducerSnapshot()
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes))), leaderEpoch = 0)
log.takeProducerSnapshot()
log.truncateTo(2)
assertEquals(Some(2), log.latestProducerSnapshotOffset)
assertEquals(2, log.latestProducerStateEndOffset)
log.truncateTo(1)
assertEquals(Some(1), log.latestProducerSnapshotOffset)
assertEquals(1, log.latestProducerStateEndOffset)
log.truncateTo(0)
assertEquals(None, log.latestProducerSnapshotOffset)
assertEquals(0, log.latestProducerStateEndOffset)
}
@Test
def testProducerIdMapTruncateToWithNoSnapshots(): Unit = {
// This ensures that the upgrade optimization path cannot be hit after initial loading
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid = 1L
val epoch = 0.toShort
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid,
producerEpoch = epoch, sequence = 1), leaderEpoch = 0)
deleteProducerSnapshotFiles()
log.truncateTo(1L)
assertEquals(1, log.activeProducersWithLastSequence.size)
val lastSeqOpt = log.activeProducersWithLastSequence.get(pid)
assertTrue(lastSeqOpt.isDefined)
val lastSeq = lastSeqOpt.get
assertEquals(0, lastSeq)
}
@Test
def testLoadProducersAfterDeleteRecordsMidSegment(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid1 = 1L
val pid2 = 2L
val epoch = 0.toShort
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
assertEquals(2, log.activeProducersWithLastSequence.size)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(1L)
// Deleting records should not remove producer state
assertEquals(2, log.activeProducersWithLastSequence.size)
val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2)
assertTrue(retainedLastSeqOpt.isDefined)
assertEquals(0, retainedLastSeqOpt.get)
log.close()
// Because the log start offset did not advance, producer snapshots will still be present and the state will be rebuilt
val reloadedLog = createLog(logDir, logConfig, logStartOffset = 1L)
assertEquals(2, reloadedLog.activeProducersWithLastSequence.size)
val reloadedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2)
assertEquals(retainedLastSeqOpt, reloadedLastSeqOpt)
}
@Test
def testLoadProducersAfterDeleteRecordsOnSegment(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid1 = 1L
val pid2 = 2L
val epoch = 0.toShort
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
log.roll()
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
assertEquals(2, log.logSegments.size)
assertEquals(2, log.activeProducersWithLastSequence.size)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(1L)
log.deleteOldSegments()
// Deleting records should not remove producer state
assertEquals(1, log.logSegments.size)
assertEquals(2, log.activeProducersWithLastSequence.size)
val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2)
assertTrue(retainedLastSeqOpt.isDefined)
assertEquals(0, retainedLastSeqOpt.get)
log.close()
// After reloading log, producer state should not be regenerated
val reloadedLog = createLog(logDir, logConfig, logStartOffset = 1L)
assertEquals(1, reloadedLog.activeProducersWithLastSequence.size)
val reloadedEntryOpt = log.activeProducersWithLastSequence.get(pid2)
assertEquals(retainedLastSeqOpt, reloadedEntryOpt)
}
@Test
def testProducerIdMapTruncateFullyAndStartAt(): Unit = {
val records = TestUtils.singletonRecords("foo".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = records.sizeInBytes, retentionBytes = records.sizeInBytes * 2)
val log = createLog(logDir, logConfig)
log.appendAsLeader(records, leaderEpoch = 0)
log.takeProducerSnapshot()
log.appendAsLeader(TestUtils.singletonRecords("bar".getBytes), leaderEpoch = 0)
log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), leaderEpoch = 0)
log.takeProducerSnapshot()
assertEquals(3, log.logSegments.size)
assertEquals(3, log.latestProducerStateEndOffset)
assertEquals(Some(3), log.latestProducerSnapshotOffset)
log.truncateFullyAndStartAt(29)
assertEquals(1, log.logSegments.size)
assertEquals(None, log.latestProducerSnapshotOffset)
assertEquals(29, log.latestProducerStateEndOffset)
}
@Test
def testProducerIdExpirationOnSegmentDeletion(): Unit = {
val pid1 = 1L
val records = TestUtils.records(Seq(new SimpleRecord("foo".getBytes)), producerId = pid1, producerEpoch = 0, sequence = 0)
val logConfig = LogTest.createLogConfig(segmentBytes = records.sizeInBytes, retentionBytes = records.sizeInBytes * 2)
val log = createLog(logDir, logConfig)
log.appendAsLeader(records, leaderEpoch = 0)
log.takeProducerSnapshot()
val pid2 = 2L
log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord("bar".getBytes)), producerId = pid2, producerEpoch = 0, sequence = 0),
leaderEpoch = 0)
log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord("baz".getBytes)), producerId = pid2, producerEpoch = 0, sequence = 1),
leaderEpoch = 0)
log.takeProducerSnapshot()
assertEquals(3, log.logSegments.size)
assertEquals(Set(pid1, pid2), log.activeProducersWithLastSequence.keySet)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
// Producer state should not be removed when deleting log segment
assertEquals(2, log.logSegments.size)
assertEquals(Set(pid1, pid2), log.activeProducersWithLastSequence.keySet)
}
@Test
def testTakeSnapshotOnRollAndDeleteSnapshotOnRecoveryPointCheckpoint(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.singletonRecords("a".getBytes), leaderEpoch = 0)
log.roll(Some(1L))
assertEquals(Some(1L), log.latestProducerSnapshotOffset)
assertEquals(Some(1L), log.oldestProducerSnapshotOffset)
log.appendAsLeader(TestUtils.singletonRecords("b".getBytes), leaderEpoch = 0)
log.roll(Some(2L))
assertEquals(Some(2L), log.latestProducerSnapshotOffset)
assertEquals(Some(1L), log.oldestProducerSnapshotOffset)
log.appendAsLeader(TestUtils.singletonRecords("c".getBytes), leaderEpoch = 0)
log.roll(Some(3L))
assertEquals(Some(3L), log.latestProducerSnapshotOffset)
// roll triggers a flush at the starting offset of the new segment, we should retain all snapshots
assertEquals(Some(1L), log.oldestProducerSnapshotOffset)
// retain the snapshots from the active segment and the previous segment, delete the oldest one
log.deleteSnapshotsAfterRecoveryPointCheckpoint()
assertEquals(Some(2L), log.oldestProducerSnapshotOffset)
// even if we flush within the active segment, the snapshot should remain
log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), leaderEpoch = 0)
log.flush(4L)
assertEquals(Some(3L), log.latestProducerSnapshotOffset)
assertEquals(Some(2L), log.oldestProducerSnapshotOffset)
}
@Test
def testProducerSnapshotAfterSegmentRollOnAppend(): Unit = {
val producerId = 1L
val logConfig = LogTest.createLogConfig(segmentBytes = 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord(mockTime.milliseconds(), new Array[Byte](512))),
producerId = producerId, producerEpoch = 0, sequence = 0),
leaderEpoch = 0)
// The next append should overflow the segment and cause it to roll
log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord(mockTime.milliseconds(), new Array[Byte](512))),
producerId = producerId, producerEpoch = 0, sequence = 1),
leaderEpoch = 0)
assertEquals(2, log.logSegments.size)
assertEquals(1L, log.activeSegment.baseOffset)
assertEquals(Some(1L), log.latestProducerSnapshotOffset)
// Force a reload from the snapshot to check its consistency
log.truncateTo(1L)
assertEquals(2, log.logSegments.size)
assertEquals(1L, log.activeSegment.baseOffset)
assertTrue(log.activeSegment.log.batches.asScala.isEmpty)
assertEquals(Some(1L), log.latestProducerSnapshotOffset)
val lastEntry = log.producerStateManager.lastEntry(producerId)
assertTrue(lastEntry.isDefined)
assertEquals(0L, lastEntry.get.firstDataOffset)
assertEquals(0L, lastEntry.get.lastDataOffset)
}
@Test
def testRebuildTransactionalState(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val pid = 137L
val epoch = 5.toShort
val seq = 0
// add some transactional records
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid, epoch, seq,
new SimpleRecord("foo".getBytes),
new SimpleRecord("bar".getBytes),
new SimpleRecord("baz".getBytes))
log.appendAsLeader(records, leaderEpoch = 0)
val abortAppendInfo = appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT)
log.updateHighWatermark(abortAppendInfo.lastOffset + 1)
// now there should be no first unstable offset
assertEquals(None, log.firstUnstableOffset)
log.close()
val reopenedLog = createLog(logDir, logConfig)
reopenedLog.updateHighWatermark(abortAppendInfo.lastOffset + 1)
assertEquals(None, reopenedLog.firstUnstableOffset)
}
private def endTxnRecords(controlRecordType: ControlRecordType,
producerId: Long,
epoch: Short,
offset: Long = 0L,
coordinatorEpoch: Int,
partitionLeaderEpoch: Int = 0,
timestamp: Long): MemoryRecords = {
val marker = new EndTransactionMarker(controlRecordType, coordinatorEpoch)
MemoryRecords.withEndTransactionMarker(offset, timestamp, partitionLeaderEpoch, producerId, epoch, marker)
}
@Test
def testPeriodicProducerIdExpiration(): Unit = {
val maxProducerIdExpirationMs = 200
val producerIdExpirationCheckIntervalMs = 100
val pid = 23L
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig, maxProducerIdExpirationMs = maxProducerIdExpirationMs,
producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs)
val records = Seq(new SimpleRecord(mockTime.milliseconds(), "foo".getBytes))
log.appendAsLeader(TestUtils.records(records, producerId = pid, producerEpoch = 0, sequence = 0), leaderEpoch = 0)
assertEquals(Set(pid), log.activeProducersWithLastSequence.keySet)
mockTime.sleep(producerIdExpirationCheckIntervalMs)
assertEquals(Set(pid), log.activeProducersWithLastSequence.keySet)
mockTime.sleep(producerIdExpirationCheckIntervalMs)
assertEquals(Set(), log.activeProducersWithLastSequence.keySet)
}
@Test
def testDuplicateAppends(): Unit = {
// create a log
val log = createLog(logDir, LogConfig())
val pid = 1L
val epoch: Short = 0
var seq = 0
// Pad the beginning of the log.
for (_ <- 0 to 5) {
val record = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)),
producerId = pid, producerEpoch = epoch, sequence = seq)
log.appendAsLeader(record, leaderEpoch = 0)
seq = seq + 1
}
// Append an entry with multiple log records.
def createRecords = TestUtils.records(List(
new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes),
new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes),
new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes)
), producerId = pid, producerEpoch = epoch, sequence = seq)
val multiEntryAppendInfo = log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("should have appended 3 entries", multiEntryAppendInfo.lastOffset - multiEntryAppendInfo.firstOffset.get + 1, 3)
// Append a Duplicate of the tail, when the entry at the tail has multiple records.
val dupMultiEntryAppendInfo = log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("Somehow appended a duplicate entry with multiple log records to the tail",
multiEntryAppendInfo.firstOffset.get, dupMultiEntryAppendInfo.firstOffset.get)
assertEquals("Somehow appended a duplicate entry with multiple log records to the tail",
multiEntryAppendInfo.lastOffset, dupMultiEntryAppendInfo.lastOffset)
seq = seq + 3
// Append a partial duplicate of the tail. This is not allowed.
try {
val records = TestUtils.records(
List(
new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes),
new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes)),
producerId = pid, producerEpoch = epoch, sequence = seq - 2)
log.appendAsLeader(records, leaderEpoch = 0)
fail("Should have received an OutOfOrderSequenceException since we attempted to append a duplicate of a records " +
"in the middle of the log.")
} catch {
case _: OutOfOrderSequenceException => // Good!
}
// Append a duplicate of the batch which is 4th from the tail. This should succeed without error since we
// retain the batch metadata of the last 5 batches.
val duplicateOfFourth = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)),
producerId = pid, producerEpoch = epoch, sequence = 2)
log.appendAsLeader(duplicateOfFourth, leaderEpoch = 0)
// Duplicates at older entries are reported as OutOfOrderSequence errors
try {
val records = TestUtils.records(
List(new SimpleRecord(mockTime.milliseconds, s"key-1".getBytes, s"value-1".getBytes)),
producerId = pid, producerEpoch = epoch, sequence = 1)
log.appendAsLeader(records, leaderEpoch = 0)
fail("Should have received an OutOfOrderSequenceException since we attempted to append a duplicate of a batch " +
"which is older than the last 5 appended batches.")
} catch {
case _: OutOfOrderSequenceException => // Good!
}
// Append a duplicate entry with a single records at the tail of the log. This should return the appendInfo of the original entry.
def createRecordsWithDuplicate = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)),
producerId = pid, producerEpoch = epoch, sequence = seq)
val origAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, leaderEpoch = 0)
val newAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, leaderEpoch = 0)
assertEquals("Inserted a duplicate records into the log", origAppendInfo.firstOffset.get, newAppendInfo.firstOffset.get)
assertEquals("Inserted a duplicate records into the log", origAppendInfo.lastOffset, newAppendInfo.lastOffset)
}
@Test
def testMultipleProducerIdsPerMemoryRecord(): Unit = {
// create a log
val log = createLog(logDir, LogConfig())
val epoch: Short = 0
val buffer = ByteBuffer.allocate(512)
var builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), 1L, epoch, 0, false, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), 2L, epoch, 0, false, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), 3L, epoch, 0, false, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), 4L, epoch, 0, false, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
buffer.flip()
val memoryRecords = MemoryRecords.readableRecords(buffer)
log.appendAsFollower(memoryRecords)
log.flush()
val fetchedData = readLog(log, 0, Int.MaxValue)
val origIterator = memoryRecords.batches.iterator()
for (batch <- fetchedData.records.batches.asScala) {
assertTrue(origIterator.hasNext)
val origEntry = origIterator.next()
assertEquals(origEntry.producerId, batch.producerId)
assertEquals(origEntry.baseOffset, batch.baseOffset)
assertEquals(origEntry.baseSequence, batch.baseSequence)
}
}
@Test
def testDuplicateAppendToFollower(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val epoch: Short = 0
val pid = 1L
val baseSequence = 0
val partitionLeaderEpoch = 0
// The point of this test is to ensure that validation isn't performed on the follower.
// this is a bit contrived. to trigger the duplicate case for a follower append, we have to append
// a batch with matching sequence numbers, but valid increasing offsets
assertEquals(0L, log.logEndOffset)
log.appendAsFollower(MemoryRecords.withIdempotentRecords(0L, CompressionType.NONE, pid, epoch, baseSequence,
partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)))
log.appendAsFollower(MemoryRecords.withIdempotentRecords(2L, CompressionType.NONE, pid, epoch, baseSequence,
partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)))
// Ensure that even the duplicate sequences are accepted on the follower.
assertEquals(4L, log.logEndOffset)
}
@Test
def testMultipleProducersWithDuplicatesInSingleAppend(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val pid1 = 1L
val pid2 = 2L
val epoch: Short = 0
val buffer = ByteBuffer.allocate(512)
// pid1 seq = 0
var builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), pid1, epoch, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
// pid2 seq = 0
builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), pid2, epoch, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
// pid1 seq = 1
builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), pid1, epoch, 1)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
// pid2 seq = 1
builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), pid2, epoch, 1)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
// // pid1 seq = 1 (duplicate)
builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, 4L, mockTime.milliseconds(), pid1, epoch, 1)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
records.batches.forEach(_.setPartitionLeaderEpoch(0))
// Ensure that batches with duplicates are accepted on the follower.
assertEquals(0L, log.logEndOffset)
log.appendAsFollower(records)
assertEquals(5L, log.logEndOffset)
}
@Test(expected = classOf[ProducerFencedException])
def testOldProducerEpoch(): Unit = {
// create a log
val log = createLog(logDir, LogConfig())
val pid = 1L
val newEpoch: Short = 1
val oldEpoch: Short = 0
val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = newEpoch, sequence = 0)
log.appendAsLeader(records, leaderEpoch = 0)
val nextRecords = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = oldEpoch, sequence = 0)
log.appendAsLeader(nextRecords, leaderEpoch = 0)
}
@Test
def testDeleteSnapshotsOnIncrementLogStartOffset(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 2048 * 5)
val log = createLog(logDir, logConfig)
val pid1 = 1L
val pid2 = 2L
val epoch = 0.toShort
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
log.roll()
log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2,
producerEpoch = epoch, sequence = 0), leaderEpoch = 0)
log.roll()
assertEquals(2, log.activeProducersWithLastSequence.size)
assertEquals(2, ProducerStateManager.listSnapshotFiles(log.producerStateManager.logDir).size)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(2L)
// Deleting records should not remove producer state but should delete snapshots
assertEquals(2, log.activeProducersWithLastSequence.size)
assertEquals(1, ProducerStateManager.listSnapshotFiles(log.producerStateManager.logDir).size)
val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2)
assertTrue(retainedLastSeqOpt.isDefined)
assertEquals(0, retainedLastSeqOpt.get)
}
/**
* Test for jitter s for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRollJitter(): Unit = {
var set = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val maxJitter = 20 * 60L
// create a log
val logConfig = LogTest.createLogConfig(segmentMs = 1 * 60 * 60L, segmentJitterMs = maxJitter)
val log = createLog(logDir, logConfig)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
log.appendAsLeader(set, leaderEpoch = 0)
mockTime.sleep(log.config.segmentMs - maxJitter)
set = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
log.appendAsLeader(set, leaderEpoch = 0)
assertEquals("Log does not roll on this append because it occurs earlier than max jitter", 1, log.numberOfSegments)
mockTime.sleep(maxJitter - log.activeSegment.rollJitterMs + 1)
set = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
log.appendAsLeader(set, leaderEpoch = 0)
assertEquals("Log should roll after segmentMs adjusted by random jitter", 2, log.numberOfSegments)
}
/**
* Test that appending more than the maximum segment size rolls the log
*/
@Test
def testSizeBasedLogRoll(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val setSize = createRecords.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
// create a log
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize)
val log = createLog(logDir, logConfig)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
// segments expire in size
for (_ <- 1 to (msgPerSeg + 1))
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("There should be exactly 2 segments.", 2, log.numberOfSegments)
}
/**
* Test that we can open and append to an empty log
*/
@Test
def testLoadEmptyLog(): Unit = {
createEmptyLogs(logDir, 0)
val log = createLog(logDir, LogConfig())
log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds), leaderEpoch = 0)
}
/**
* This test case appends a bunch of messages and checks that we can read them all back using sequential offsets.
*/
@Test
def testAppendAndReadWithSequentialOffsets(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 71)
val log = createLog(logDir, logConfig)
val values = (0 until 100 by 2).map(id => id.toString.getBytes).toArray
for(value <- values)
log.appendAsLeader(TestUtils.singletonRecords(value = value), leaderEpoch = 0)
for(i <- values.indices) {
val read = readLog(log, i, 1).records.batches.iterator.next()
assertEquals("Offset read should match order appended.", i, read.lastOffset)
val actual = read.iterator.next()
assertNull("Key should be null", actual.key)
assertEquals("Values not equal", ByteBuffer.wrap(values(i)), actual.value)
}
assertEquals("Reading beyond the last message returns nothing.", 0,
readLog(log, values.length, 100).records.batches.asScala.size)
}
/**
* This test appends a bunch of messages with non-sequential offsets and checks that we can an the correct message
* from any offset less than the logEndOffset including offsets not appended.
*/
@Test
def testAppendAndReadWithNonSequentialOffsets(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 72)
val log = createLog(logDir, logConfig)
val messageIds = ((0 until 50) ++ (50 until 200 by 7)).toArray
val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes))
// now test the case that we give the offsets and use non-sequential offsets
for(i <- records.indices)
log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), CompressionType.NONE, 0, records(i)))
for(i <- 50 until messageIds.max) {
val idx = messageIds.indexWhere(_ >= i)
val read = readLog(log, i, 100).records.records.iterator.next()
assertEquals("Offset read should match message id.", messageIds(idx), read.offset)
assertEquals("Message should match appended.", records(idx), new SimpleRecord(read))
}
}
/**
* This test covers an odd case where we have a gap in the offsets that falls at the end of a log segment.
* Specifically we create a log where the last message in the first segment has offset 0. If we
* then read offset 1, we should expect this read to come from the second segment, even though the
* first segment has the greatest lower bound on the offset.
*/
@Test
def testReadAtLogGap(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 300)
val log = createLog(logDir, logConfig)
// keep appending until we have two segments with only a single message in the second segment
while(log.numberOfSegments == 1)
log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), leaderEpoch = 0)
// now manually truncate off all but one message from the first segment to create a gap in the messages
log.logSegments.head.truncateTo(1)
assertEquals("A read should now return the last message in the log", log.logEndOffset - 1,
readLog(log, 1, 200).records.batches.iterator.next().lastOffset)
}
@Test(expected = classOf[KafkaStorageException])
def testLogRollAfterLogHandlerClosed(): Unit = {
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig)
log.closeHandlers()
log.roll(Some(1L))
}
@Test
def testReadWithMinMessage(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 72)
val log = createLog(logDir, logConfig)
val messageIds = ((0 until 50) ++ (50 until 200 by 7)).toArray
val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes))
// now test the case that we give the offsets and use non-sequential offsets
for (i <- records.indices)
log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), CompressionType.NONE, 0, records(i)))
for (i <- 50 until messageIds.max) {
val idx = messageIds.indexWhere(_ >= i)
val reads = Seq(
readLog(log, i, 1),
readLog(log, i, 100000),
readLog(log, i, 100)
).map(_.records.records.iterator.next())
reads.foreach { read =>
assertEquals("Offset read should match message id.", messageIds(idx), read.offset)
assertEquals("Message should match appended.", records(idx), new SimpleRecord(read))
}
}
}
@Test
def testReadWithTooSmallMaxLength(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 72)
val log = createLog(logDir, logConfig)
val messageIds = ((0 until 50) ++ (50 until 200 by 7)).toArray
val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes))
// now test the case that we give the offsets and use non-sequential offsets
for (i <- records.indices)
log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), CompressionType.NONE, 0, records(i)))
for (i <- 50 until messageIds.max) {
assertEquals(MemoryRecords.EMPTY, readLog(log, i, maxLength = 0, minOneMessage = false).records)
// we return an incomplete message instead of an empty one for the case below
// we use this mechanism to tell consumers of the fetch request version 2 and below that the message size is
// larger than the fetch size
// in fetch request version 3, we no longer need this as we return oversized messages from the first non-empty
// partition
val fetchInfo = readLog(log, i, maxLength = 1, minOneMessage = false)
assertTrue(fetchInfo.firstEntryIncomplete)
assertTrue(fetchInfo.records.isInstanceOf[FileRecords])
assertEquals(1, fetchInfo.records.sizeInBytes)
}
}
/**
* Test reading at the boundary of the log, specifically
* - reading from the logEndOffset should give an empty message set
* - reading from the maxOffset should give an empty message set
* - reading beyond the log end offset should throw an OffsetOutOfRangeException
*/
@Test
def testReadOutOfRange(): Unit = {
createEmptyLogs(logDir, 1024)
// set up replica log starting with offset 1024 and with one message (at offset 1024)
val logConfig = LogTest.createLogConfig(segmentBytes = 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), leaderEpoch = 0)
assertEquals("Reading at the log end offset should produce 0 byte read.", 0,
readLog(log, 1025, 1000).records.sizeInBytes)
try {
readLog(log, 0, 1000)
fail("Reading below the log start offset should throw OffsetOutOfRangeException")
} catch {
case _: OffsetOutOfRangeException => // This is good.
}
try {
readLog(log, 1026, 1000)
fail("Reading at beyond the log end offset should throw OffsetOutOfRangeException")
} catch {
case _: OffsetOutOfRangeException => // This is good.
}
}
/**
* Test that covers reads and writes on a multisegment log. This test appends a bunch of messages
* and then reads them all back and checks that the message read and offset matches what was appended.
*/
@Test
def testLogRolls(): Unit = {
/* create a multipart log with 100 messages */
val logConfig = LogTest.createLogConfig(segmentBytes = 100)
val log = createLog(logDir, logConfig)
val numMessages = 100
val messageSets = (0 until numMessages).map(i => TestUtils.singletonRecords(value = i.toString.getBytes,
timestamp = mockTime.milliseconds))
messageSets.foreach(log.appendAsLeader(_, leaderEpoch = 0))
log.flush()
/* do successive reads to ensure all our messages are there */
var offset = 0L
for(i <- 0 until numMessages) {
val messages = readLog(log, offset, 1024*1024).records.batches
val head = messages.iterator.next()
assertEquals("Offsets not equal", offset, head.lastOffset)
val expected = messageSets(i).records.iterator.next()
val actual = head.iterator.next()
assertEquals(s"Keys not equal at offset $offset", expected.key, actual.key)
assertEquals(s"Values not equal at offset $offset", expected.value, actual.value)
assertEquals(s"Timestamps not equal at offset $offset", expected.timestamp, actual.timestamp)
offset = head.lastOffset + 1
}
val lastRead = readLog(log, startOffset = numMessages, maxLength = 1024*1024).records
assertEquals("Should be no more messages", 0, lastRead.records.asScala.size)
// check that rolling the log forced a flushed, the flush is async so retry in case of failure
TestUtils.retry(1000L){
assertTrue("Log role should have forced flush", log.recoveryPoint >= log.activeSegment.baseOffset)
}
}
/**
* Test reads at offsets that fall within compressed message set boundaries.
*/
@Test
def testCompressedMessages(): Unit = {
/* this log should roll after every messageset */
val logConfig = LogTest.createLogConfig(segmentBytes = 110)
val log = createLog(logDir, logConfig)
/* append 2 compressed message sets, each with two messages giving offsets 0, 1, 2, 3 */
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.GZIP, new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), leaderEpoch = 0)
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.GZIP, new SimpleRecord("alpha".getBytes), new SimpleRecord("beta".getBytes)), leaderEpoch = 0)
def read(offset: Int) = readLog(log, offset, 4096).records.records
/* we should always get the first message in the compressed set when reading any offset in the set */
assertEquals("Read at offset 0 should produce 0", 0, read(0).iterator.next().offset)
assertEquals("Read at offset 1 should produce 0", 0, read(1).iterator.next().offset)
assertEquals("Read at offset 2 should produce 2", 2, read(2).iterator.next().offset)
assertEquals("Read at offset 3 should produce 2", 2, read(3).iterator.next().offset)
}
/**
* Test garbage collecting old segments
*/
@Test
def testThatGarbageCollectingSegmentsDoesntChangeOffset(): Unit = {
for(messagesToAppend <- List(0, 1, 25)) {
logDir.mkdirs()
// first test a log segment starting at 0
val logConfig = LogTest.createLogConfig(segmentBytes = 100, retentionMs = 0)
val log = createLog(logDir, logConfig)
for(i <- 0 until messagesToAppend)
log.appendAsLeader(TestUtils.singletonRecords(value = i.toString.getBytes, timestamp = mockTime.milliseconds - 10), leaderEpoch = 0)
val currOffset = log.logEndOffset
assertEquals(currOffset, messagesToAppend)
// time goes by; the log file is deleted
log.updateHighWatermark(currOffset)
log.deleteOldSegments()
assertEquals("Deleting segments shouldn't have changed the logEndOffset", currOffset, log.logEndOffset)
assertEquals("We should still have one segment left", 1, log.numberOfSegments)
assertEquals("Further collection shouldn't delete anything", 0, log.deleteOldSegments())
assertEquals("Still no change in the logEndOffset", currOffset, log.logEndOffset)
assertEquals("Should still be able to append and should get the logEndOffset assigned to the new append",
currOffset,
log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, timestamp = mockTime.milliseconds), leaderEpoch = 0).firstOffset.get)
// cleanup the log
log.delete()
}
}
/**
* MessageSet size shouldn't exceed the config.segmentSize, check that it is properly enforced by
* appending a message set larger than the config.segmentSize setting and checking that an exception is thrown.
*/
@Test
def testMessageSetSizeCheck(): Unit = {
val messageSet = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("You".getBytes), new SimpleRecord("bethe".getBytes))
// append messages to log
val configSegmentSize = messageSet.sizeInBytes - 1
val logConfig = LogTest.createLogConfig(segmentBytes = configSegmentSize)
val log = createLog(logDir, logConfig)
try {
log.appendAsLeader(messageSet, leaderEpoch = 0)
fail("message set should throw RecordBatchTooLargeException.")
} catch {
case _: RecordBatchTooLargeException => // this is good
}
}
@Test
def testCompactedTopicConstraints(): Unit = {
val keyedMessage = new SimpleRecord("and here it is".getBytes, "this message has a key".getBytes)
val anotherKeyedMessage = new SimpleRecord("another key".getBytes, "this message also has a key".getBytes)
val unkeyedMessage = new SimpleRecord("this message does not have a key".getBytes)
val messageSetWithUnkeyedMessage = MemoryRecords.withRecords(CompressionType.NONE, unkeyedMessage, keyedMessage)
val messageSetWithOneUnkeyedMessage = MemoryRecords.withRecords(CompressionType.NONE, unkeyedMessage)
val messageSetWithCompressedKeyedMessage = MemoryRecords.withRecords(CompressionType.GZIP, keyedMessage)
val messageSetWithCompressedUnkeyedMessage = MemoryRecords.withRecords(CompressionType.GZIP, keyedMessage, unkeyedMessage)
val messageSetWithKeyedMessage = MemoryRecords.withRecords(CompressionType.NONE, keyedMessage)
val messageSetWithKeyedMessages = MemoryRecords.withRecords(CompressionType.NONE, keyedMessage, anotherKeyedMessage)
val logConfig = LogTest.createLogConfig(cleanupPolicy = LogConfig.Compact)
val log = createLog(logDir, logConfig)
val errorMsgPrefix = "Compacted topic cannot accept message without key"
var e = intercept[RecordValidationException] {
log.appendAsLeader(messageSetWithUnkeyedMessage, leaderEpoch = 0)
}
assertTrue(e.invalidException.isInstanceOf[InvalidRecordException])
assertEquals(1, e.recordErrors.size)
assertEquals(0, e.recordErrors.head.batchIndex)
assertTrue(e.recordErrors.head.message.startsWith(errorMsgPrefix))
e = intercept[RecordValidationException] {
log.appendAsLeader(messageSetWithOneUnkeyedMessage, leaderEpoch = 0)
}
assertTrue(e.invalidException.isInstanceOf[InvalidRecordException])
assertEquals(1, e.recordErrors.size)
assertEquals(0, e.recordErrors.head.batchIndex)
assertTrue(e.recordErrors.head.message.startsWith(errorMsgPrefix))
e = intercept[RecordValidationException] {
log.appendAsLeader(messageSetWithCompressedUnkeyedMessage, leaderEpoch = 0)
}
assertTrue(e.invalidException.isInstanceOf[InvalidRecordException])
assertEquals(1, e.recordErrors.size)
assertEquals(1, e.recordErrors.head.batchIndex) // batch index is 1
assertTrue(e.recordErrors.head.message.startsWith(errorMsgPrefix))
// check if metric for NoKeyCompactedTopicRecordsPerSec is logged
assertEquals(metricsKeySet.count(_.getMBeanName.endsWith(s"${BrokerTopicStats.NoKeyCompactedTopicRecordsPerSec}")), 1)
assertTrue(TestUtils.meterCount(s"${BrokerTopicStats.NoKeyCompactedTopicRecordsPerSec}") > 0)
// the following should succeed without any InvalidMessageException
log.appendAsLeader(messageSetWithKeyedMessage, leaderEpoch = 0)
log.appendAsLeader(messageSetWithKeyedMessages, leaderEpoch = 0)
log.appendAsLeader(messageSetWithCompressedKeyedMessage, leaderEpoch = 0)
}
/**
* We have a max size limit on message appends, check that it is properly enforced by appending a message larger than the
* setting and checking that an exception is thrown.
*/
@Test
def testMessageSizeCheck(): Unit = {
val first = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("You".getBytes), new SimpleRecord("bethe".getBytes))
val second = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord("change (I need more bytes)... blah blah blah.".getBytes),
new SimpleRecord("More padding boo hoo".getBytes))
// append messages to log
val maxMessageSize = second.sizeInBytes - 1
val logConfig = LogTest.createLogConfig(maxMessageBytes = maxMessageSize)
val log = createLog(logDir, logConfig)
// should be able to append the small message
log.appendAsLeader(first, leaderEpoch = 0)
try {
log.appendAsLeader(second, leaderEpoch = 0)
fail("Second message set should throw MessageSizeTooLargeException.")
} catch {
case _: RecordTooLargeException => // this is good
}
}
/**
* Append a bunch of messages to a log and then re-open it both with and without recovery and check that the log re-initializes correctly.
*/
@Test
def testLogRecoversToCorrectOffset(): Unit = {
val numMessages = 100
val messageSize = 100
val segmentSize = 7 * messageSize
val indexInterval = 3 * messageSize
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize, indexIntervalBytes = indexInterval, segmentIndexBytes = 4096)
var log = createLog(logDir, logConfig)
for(i <- 0 until numMessages)
log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(messageSize),
timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0)
assertEquals("After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages), numMessages, log.logEndOffset)
val lastIndexOffset = log.activeSegment.offsetIndex.lastOffset
val numIndexEntries = log.activeSegment.offsetIndex.entries
val lastOffset = log.logEndOffset
// After segment is closed, the last entry in the time index should be (largest timestamp -> last offset).
val lastTimeIndexOffset = log.logEndOffset - 1
val lastTimeIndexTimestamp = log.activeSegment.largestTimestamp
// Depending on when the last time index entry is inserted, an entry may or may not be inserted into the time index.
val numTimeIndexEntries = log.activeSegment.timeIndex.entries + {
if (log.activeSegment.timeIndex.lastEntry.offset == log.logEndOffset - 1) 0 else 1
}
log.close()
def verifyRecoveredLog(log: Log, expectedRecoveryPoint: Long): Unit = {
assertEquals(s"Unexpected recovery point", expectedRecoveryPoint, log.recoveryPoint)
assertEquals(s"Should have $numMessages messages when log is reopened w/o recovery", numMessages, log.logEndOffset)
assertEquals("Should have same last index offset as before.", lastIndexOffset, log.activeSegment.offsetIndex.lastOffset)
assertEquals("Should have same number of index entries as before.", numIndexEntries, log.activeSegment.offsetIndex.entries)
assertEquals("Should have same last time index timestamp", lastTimeIndexTimestamp, log.activeSegment.timeIndex.lastEntry.timestamp)
assertEquals("Should have same last time index offset", lastTimeIndexOffset, log.activeSegment.timeIndex.lastEntry.offset)
assertEquals("Should have same number of time index entries as before.", numTimeIndexEntries, log.activeSegment.timeIndex.entries)
}
log = createLog(logDir, logConfig, recoveryPoint = lastOffset)
verifyRecoveredLog(log, lastOffset)
log.close()
// test recovery case
log = createLog(logDir, logConfig)
verifyRecoveredLog(log, lastOffset)
log.close()
}
/**
* Test building the time index on the follower by setting assignOffsets to false.
*/
@Test
def testBuildTimeIndexWhenNotAssigningOffsets(): Unit = {
val numMessages = 100
val logConfig = LogTest.createLogConfig(segmentBytes = 10000, indexIntervalBytes = 1)
val log = createLog(logDir, logConfig)
val messages = (0 until numMessages).map { i =>
MemoryRecords.withRecords(100 + i, CompressionType.NONE, 0, new SimpleRecord(mockTime.milliseconds + i, i.toString.getBytes()))
}
messages.foreach(log.appendAsFollower)
val timeIndexEntries = log.logSegments.foldLeft(0) { (entries, segment) => entries + segment.timeIndex.entries }
assertEquals(s"There should be ${numMessages - 1} time index entries", numMessages - 1, timeIndexEntries)
assertEquals(s"The last time index entry should have timestamp ${mockTime.milliseconds + numMessages - 1}",
mockTime.milliseconds + numMessages - 1, log.activeSegment.timeIndex.lastEntry.timestamp)
}
/**
* Test that if we manually delete an index segment it is rebuilt when the log is re-opened
*/
@Test
def testIndexRebuild(): Unit = {
// publish the messages and close the log
val numMessages = 200
val logConfig = LogTest.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1)
var log = createLog(logDir, logConfig)
for(i <- 0 until numMessages)
log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0)
val indexFiles = log.logSegments.map(_.lazyOffsetIndex.file)
val timeIndexFiles = log.logSegments.map(_.lazyTimeIndex.file)
log.close()
// delete all the index files
indexFiles.foreach(_.delete())
timeIndexFiles.foreach(_.delete())
// reopen the log
log = createLog(logDir, logConfig)
assertEquals("Should have %d messages when log is reopened".format(numMessages), numMessages, log.logEndOffset)
assertTrue("The index should have been rebuilt", log.logSegments.head.offsetIndex.entries > 0)
assertTrue("The time index should have been rebuilt", log.logSegments.head.timeIndex.entries > 0)
for(i <- 0 until numMessages) {
assertEquals(i, readLog(log, i, 100).records.batches.iterator.next().lastOffset)
if (i == 0)
assertEquals(log.logSegments.head.baseOffset, log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).get.offset)
else
assertEquals(i, log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).get.offset)
}
log.close()
}
@Test
def testFetchOffsetByTimestampIncludesLeaderEpoch(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1)
val log = createLog(logDir, logConfig)
assertEquals(None, log.fetchOffsetByTimestamp(0L))
val firstTimestamp = mockTime.milliseconds
val firstLeaderEpoch = 0
log.appendAsLeader(TestUtils.singletonRecords(
value = TestUtils.randomBytes(10),
timestamp = firstTimestamp),
leaderEpoch = firstLeaderEpoch)
val secondTimestamp = firstTimestamp + 1
val secondLeaderEpoch = 1
log.appendAsLeader(TestUtils.singletonRecords(
value = TestUtils.randomBytes(10),
timestamp = secondTimestamp),
leaderEpoch = secondLeaderEpoch)
assertEquals(Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))),
log.fetchOffsetByTimestamp(firstTimestamp))
assertEquals(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))),
log.fetchOffsetByTimestamp(secondTimestamp))
assertEquals(Some(new TimestampAndOffset(ListOffsetResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))),
log.fetchOffsetByTimestamp(ListOffsetRequest.EARLIEST_TIMESTAMP))
assertEquals(Some(new TimestampAndOffset(ListOffsetResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))),
log.fetchOffsetByTimestamp(ListOffsetRequest.LATEST_TIMESTAMP))
// The cache can be updated directly after a leader change.
// The new latest offset should reflect the updated epoch.
log.maybeAssignEpochStartOffset(2, 2L)
assertEquals(Some(new TimestampAndOffset(ListOffsetResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))),
log.fetchOffsetByTimestamp(ListOffsetRequest.LATEST_TIMESTAMP))
}
/**
* Test that if messages format version of the messages in a segment is before 0.10.0, the time index should be empty.
*/
@Test
def testRebuildTimeIndexForOldMessages(): Unit = {
val numMessages = 200
val segmentSize = 200
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize, indexIntervalBytes = 1, messageFormatVersion = "0.9.0")
var log = createLog(logDir, logConfig)
for (i <- 0 until numMessages)
log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10),
timestamp = mockTime.milliseconds + i * 10, magicValue = RecordBatch.MAGIC_VALUE_V1), leaderEpoch = 0)
val timeIndexFiles = log.logSegments.map(_.lazyTimeIndex.file)
log.close()
// Delete the time index.
timeIndexFiles.foreach(file => Files.delete(file.toPath))
// The rebuilt time index should be empty
log = createLog(logDir, logConfig, recoveryPoint = numMessages + 1)
for (segment <- log.logSegments.init) {
assertEquals("The time index should be empty", 0, segment.timeIndex.entries)
assertEquals("The time index file size should be 0", 0, segment.lazyTimeIndex.file.length)
}
}
/**
* Test that if we have corrupted an index segment it is rebuilt when the log is re-opened
*/
@Test
def testCorruptIndexRebuild(): Unit = {
// publish the messages and close the log
val numMessages = 200
val logConfig = LogTest.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1)
var log = createLog(logDir, logConfig)
for(i <- 0 until numMessages)
log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0)
val indexFiles = log.logSegments.map(_.lazyOffsetIndex.file)
val timeIndexFiles = log.logSegments.map(_.lazyTimeIndex.file)
log.close()
// corrupt all the index files
for( file <- indexFiles) {
val bw = new BufferedWriter(new FileWriter(file))
bw.write(" ")
bw.close()
}
// corrupt all the index files
for( file <- timeIndexFiles) {
val bw = new BufferedWriter(new FileWriter(file))
bw.write(" ")
bw.close()
}
// reopen the log with recovery point=0 so that the segment recovery can be triggered
log = createLog(logDir, logConfig)
assertEquals("Should have %d messages when log is reopened".format(numMessages), numMessages, log.logEndOffset)
for(i <- 0 until numMessages) {
assertEquals(i, readLog(log, i, 100).records.batches.iterator.next().lastOffset)
if (i == 0)
assertEquals(log.logSegments.head.baseOffset, log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).get.offset)
else
assertEquals(i, log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).get.offset)
}
log.close()
}
/**
* Test the Log truncate operations
*/
@Test
def testTruncateTo(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val setSize = createRecords.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
// create a log
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize)
val log = createLog(logDir, logConfig)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (_ <- 1 to msgPerSeg)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("There should be exactly 1 segments.", 1, log.numberOfSegments)
assertEquals("Log end offset should be equal to number of messages", msgPerSeg, log.logEndOffset)
val lastOffset = log.logEndOffset
val size = log.size
log.truncateTo(log.logEndOffset) // keep the entire log
assertEquals("Should not change offset", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(log.logEndOffset + 1) // try to truncate beyond lastOffset
assertEquals("Should not change offset but should log error", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(msgPerSeg/2) // truncate somewhere in between
assertEquals("Should change offset", log.logEndOffset, msgPerSeg/2)
assertTrue("Should change log size", log.size < size)
log.truncateTo(0) // truncate the entire log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", 0, log.size)
for (_ <- 1 to msgPerSeg)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("Should be back to original offset", log.logEndOffset, lastOffset)
assertEquals("Should be back to original size", log.size, size)
log.truncateFullyAndStartAt(log.logEndOffset - (msgPerSeg - 1))
assertEquals("Should change offset", log.logEndOffset, lastOffset - (msgPerSeg - 1))
assertEquals("Should change log size", log.size, 0)
for (_ <- 1 to msgPerSeg)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertTrue("Should be ahead of to original offset", log.logEndOffset > msgPerSeg)
assertEquals("log size should be same as before", size, log.size)
log.truncateTo(0) // truncate before first start offset in the log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", log.size, 0)
}
/**
* Verify that when we truncate a log the index of the last segment is resized to the max index size to allow more appends
*/
@Test
def testIndexResizingAtTruncation(): Unit = {
val setSize = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds).sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
val logConfig = LogTest.createLogConfig(segmentBytes = segmentSize, indexIntervalBytes = setSize - 1)
val log = createLog(logDir, logConfig)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
mockTime.sleep(msgPerSeg)
for (i<- 1 to msgPerSeg)
log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0)
assertEquals("There should be exactly 2 segment.", 2, log.numberOfSegments)
val expectedEntries = msgPerSeg - 1
assertEquals(s"The index of the first segment should have $expectedEntries entries", expectedEntries, log.logSegments.toList.head.offsetIndex.maxEntries)
assertEquals(s"The time index of the first segment should have $expectedEntries entries", expectedEntries, log.logSegments.toList.head.timeIndex.maxEntries)
log.truncateTo(0)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
assertEquals("The index of segment 1 should be resized to maxIndexSize", log.config.maxIndexSize/8, log.logSegments.toList.head.offsetIndex.maxEntries)
assertEquals("The time index of segment 1 should be resized to maxIndexSize", log.config.maxIndexSize/12, log.logSegments.toList.head.timeIndex.maxEntries)
mockTime.sleep(msgPerSeg)
for (i<- 1 to msgPerSeg)
log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
}
/**
* When we open a log any index segments without an associated log segment should be deleted.
*/
@Test
def testBogusIndexSegmentsAreRemoved(): Unit = {
val bogusIndex1 = Log.offsetIndexFile(logDir, 0)
val bogusTimeIndex1 = Log.timeIndexFile(logDir, 0)
val bogusIndex2 = Log.offsetIndexFile(logDir, 5)
val bogusTimeIndex2 = Log.timeIndexFile(logDir, 5)
// The files remain absent until we first access it because we are doing lazy loading for time index and offset index
// files but in this test case we need to create these files in order to test we will remove them.
bogusIndex2.createNewFile()
bogusTimeIndex2.createNewFile()
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, indexIntervalBytes = 1)
val log = createLog(logDir, logConfig)
// Force the segment to access the index files because we are doing index lazy loading.
log.logSegments.toSeq.head.offsetIndex
log.logSegments.toSeq.head.timeIndex
assertTrue("The first index file should have been replaced with a larger file", bogusIndex1.length > 0)
assertTrue("The first time index file should have been replaced with a larger file", bogusTimeIndex1.length > 0)
assertFalse("The second index file should have been deleted.", bogusIndex2.exists)
assertFalse("The second time index file should have been deleted.", bogusTimeIndex2.exists)
// check that we can append to the log
for (_ <- 0 until 10)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.delete()
}
/**
* Verify that truncation works correctly after re-opening the log
*/
@Test
def testReopenThenTruncate(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
// create a log
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, indexIntervalBytes = 10000)
var log = createLog(logDir, logConfig)
// add enough messages to roll over several segments then close and re-open and attempt to truncate
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.close()
log = createLog(logDir, logConfig)
log.truncateTo(3)
assertEquals("All but one segment should be deleted.", 1, log.numberOfSegments)
assertEquals("Log end offset should be 3.", 3, log.logEndOffset)
}
/**
* Test that deleted files are deleted after the appropriate time.
*/
@Test
def testAsyncDelete(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds - 1000L)
val asyncDeleteMs = 1000
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, indexIntervalBytes = 10000,
retentionMs = 999, fileDeleteDelayMs = asyncDeleteMs)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
// files should be renamed
val segments = log.logSegments.toArray
val oldFiles = segments.map(_.log.file) ++ segments.map(_.lazyOffsetIndex.file)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("Only one segment should remain.", 1, log.numberOfSegments)
assertTrue("All log and index files should end in .deleted", segments.forall(_.log.file.getName.endsWith(Log.DeletedFileSuffix)) &&
segments.forall(_.lazyOffsetIndex.file.getName.endsWith(Log.DeletedFileSuffix)))
assertTrue("The .deleted files should still be there.", segments.forall(_.log.file.exists) &&
segments.forall(_.lazyOffsetIndex.file.exists))
assertTrue("The original file should be gone.", oldFiles.forall(!_.exists))
// when enough time passes the files should be deleted
val deletedFiles = segments.map(_.log.file) ++ segments.map(_.lazyOffsetIndex.file)
mockTime.sleep(asyncDeleteMs + 1)
assertTrue("Files should all be gone.", deletedFiles.forall(!_.exists))
}
/**
* Any files ending in .deleted should be removed when the log is re-opened.
*/
@Test
def testOpenDeletesObsoleteFiles(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds - 1000)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, retentionMs = 999)
var log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
// expire all segments
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
log.close()
log = createLog(logDir, logConfig)
assertEquals("The deleted segments should be gone.", 1, log.numberOfSegments)
}
@Test
def testAppendMessageWithNullPayload(): Unit = {
val log = createLog(logDir, LogConfig())
log.appendAsLeader(TestUtils.singletonRecords(value = null), leaderEpoch = 0)
val head = readLog(log, 0, 4096).records.records.iterator.next()
assertEquals(0, head.offset)
assertTrue("Message payload should be null.", !head.hasValue)
}
@Test
def testAppendWithOutOfOrderOffsetsThrowsException(): Unit = {
val log = createLog(logDir, LogConfig())
val appendOffsets = Seq(0L, 1L, 3L, 2L, 4L)
val buffer = ByteBuffer.allocate(512)
for (offset <- appendOffsets) {
val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, offset, mockTime.milliseconds(),
1L, 0, 0, false, 0)
builder.append(new SimpleRecord("key".getBytes, "value".getBytes))
builder.close()
}
buffer.flip()
val memoryRecords = MemoryRecords.readableRecords(buffer)
assertThrows[OffsetsOutOfOrderException] {
log.appendAsFollower(memoryRecords)
}
}
@Test
def testAppendBelowExpectedOffsetThrowsException(): Unit = {
val log = createLog(logDir, LogConfig())
val records = (0 until 2).map(id => new SimpleRecord(id.toString.getBytes)).toArray
records.foreach(record => log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE, record), leaderEpoch = 0))
val magicVals = Seq(RecordBatch.MAGIC_VALUE_V0, RecordBatch.MAGIC_VALUE_V1, RecordBatch.MAGIC_VALUE_V2)
val compressionTypes = Seq(CompressionType.NONE, CompressionType.LZ4)
for (magic <- magicVals; compression <- compressionTypes) {
val invalidRecord = MemoryRecords.withRecords(magic, compression, new SimpleRecord(1.toString.getBytes))
withClue(s"Magic=$magic, compressionType=$compression") {
assertThrows[UnexpectedAppendOffsetException] {
log.appendAsFollower(invalidRecord)
}
}
}
}
@Test
def testAppendEmptyLogBelowLogStartOffsetThrowsException(): Unit = {
createEmptyLogs(logDir, 7)
val log = createLog(logDir, LogConfig(), brokerTopicStats = brokerTopicStats)
assertEquals(7L, log.logStartOffset)
assertEquals(7L, log.logEndOffset)
val firstOffset = 4L
val magicVals = Seq(RecordBatch.MAGIC_VALUE_V0, RecordBatch.MAGIC_VALUE_V1, RecordBatch.MAGIC_VALUE_V2)
val compressionTypes = Seq(CompressionType.NONE, CompressionType.LZ4)
for (magic <- magicVals; compression <- compressionTypes) {
val batch = TestUtils.records(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes),
new SimpleRecord("k3".getBytes, "v3".getBytes)),
magicValue = magic, codec = compression,
baseOffset = firstOffset)
withClue(s"Magic=$magic, compressionType=$compression") {
val exception = intercept[UnexpectedAppendOffsetException] {
log.appendAsFollower(records = batch)
}
assertEquals(s"Magic=$magic, compressionType=$compression, UnexpectedAppendOffsetException#firstOffset",
firstOffset, exception.firstOffset)
assertEquals(s"Magic=$magic, compressionType=$compression, UnexpectedAppendOffsetException#lastOffset",
firstOffset + 2, exception.lastOffset)
}
}
}
@Test
def testAppendWithNoTimestamp(): Unit = {
val log = createLog(logDir, LogConfig())
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(RecordBatch.NO_TIMESTAMP, "key".getBytes, "value".getBytes)), leaderEpoch = 0)
}
@Test
def testCorruptLog(): Unit = {
// append some messages to create some segments
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val recoveryPoint = 50L
for (_ <- 0 until 10) {
// create a log and write some messages to it
logDir.mkdirs()
var log = createLog(logDir, logConfig)
val numMessages = 50 + TestUtils.random.nextInt(50)
for (_ <- 0 until numMessages)
log.appendAsLeader(createRecords, leaderEpoch = 0)
val records = log.logSegments.flatMap(_.log.records.asScala.toList).toList
log.close()
// corrupt index and log by appending random bytes
TestUtils.appendNonsenseToFile(log.activeSegment.lazyOffsetIndex.file, TestUtils.random.nextInt(1024) + 1)
TestUtils.appendNonsenseToFile(log.activeSegment.log.file, TestUtils.random.nextInt(1024) + 1)
// attempt recovery
log = createLog(logDir, logConfig, brokerTopicStats, 0L, recoveryPoint)
assertEquals(numMessages, log.logEndOffset)
val recovered = log.logSegments.flatMap(_.log.records.asScala.toList).toList
assertEquals(records.size, recovered.size)
for (i <- records.indices) {
val expected = records(i)
val actual = recovered(i)
assertEquals(s"Keys not equal", expected.key, actual.key)
assertEquals(s"Values not equal", expected.value, actual.value)
assertEquals(s"Timestamps not equal", expected.timestamp, actual.timestamp)
}
Utils.delete(logDir)
}
}
@Test
def testOverCompactedLogRecovery(): Unit = {
// append some messages to create some segments
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
val set1 = MemoryRecords.withRecords(0, CompressionType.NONE, 0, new SimpleRecord("v1".getBytes(), "k1".getBytes()))
val set2 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 2, CompressionType.NONE, 0, new SimpleRecord("v3".getBytes(), "k3".getBytes()))
val set3 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 3, CompressionType.NONE, 0, new SimpleRecord("v4".getBytes(), "k4".getBytes()))
val set4 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 4, CompressionType.NONE, 0, new SimpleRecord("v5".getBytes(), "k5".getBytes()))
//Writes into an empty log with baseOffset 0
log.appendAsFollower(set1)
assertEquals(0L, log.activeSegment.baseOffset)
//This write will roll the segment, yielding a new segment with base offset = max(1, Integer.MAX_VALUE+2) = Integer.MAX_VALUE+2
log.appendAsFollower(set2)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
assertTrue(Log.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 2).exists)
//This will go into the existing log
log.appendAsFollower(set3)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
//This will go into the existing log
log.appendAsFollower(set4)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
log.close()
val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index"))
assertEquals(2, indexFiles.length)
for (file <- indexFiles) {
val offsetIndex = new OffsetIndex(file, file.getName.replace(".index","").toLong)
assertTrue(offsetIndex.lastOffset >= 0)
offsetIndex.close()
}
Utils.delete(logDir)
}
@Test
def testWriteLeaderEpochCheckpointAfterDirectoryRename(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5)
assertEquals(Some(5), log.latestEpoch)
// Ensure that after a directory rename, the epoch cache is written to the right location
val tp = Log.parseTopicPartitionName(log.dir)
log.renameDir(Log.logDeleteDirName(tp))
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 10)
assertEquals(Some(10), log.latestEpoch)
assertTrue(LeaderEpochCheckpointFile.newFile(log.dir).exists())
assertFalse(LeaderEpochCheckpointFile.newFile(this.logDir).exists())
}
@Test
def testLeaderEpochCacheClearedAfterDowngradeInAppendedMessages(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5)
assertEquals(Some(5), log.leaderEpochCache.flatMap(_.latestEpoch))
log.appendAsFollower(TestUtils.records(List(new SimpleRecord("foo".getBytes())),
baseOffset = 1L,
magicValue = RecordVersion.V1.value))
assertEquals(None, log.leaderEpochCache.flatMap(_.latestEpoch))
}
@Test
def testLeaderEpochCacheClearedAfterStaticMessageFormatDowngrade(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5)
assertEquals(Some(5), log.latestEpoch)
log.close()
// reopen the log with an older message format version and check the cache
val downgradedLogConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1,
maxMessageBytes = 64 * 1024, messageFormatVersion = kafka.api.KAFKA_0_10_2_IV0.shortVersion)
val reopened = createLog(logDir, downgradedLogConfig)
assertLeaderEpochCacheEmpty(reopened)
reopened.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())),
magicValue = RecordVersion.V1.value), leaderEpoch = 5)
assertLeaderEpochCacheEmpty(reopened)
}
@Test
def testLeaderEpochCacheClearedAfterDynamicMessageFormatDowngrade(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5)
assertEquals(Some(5), log.latestEpoch)
val downgradedLogConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1,
maxMessageBytes = 64 * 1024, messageFormatVersion = kafka.api.KAFKA_0_10_2_IV0.shortVersion)
log.updateConfig(downgradedLogConfig)
assertLeaderEpochCacheEmpty(log)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())),
magicValue = RecordVersion.V1.value), leaderEpoch = 5)
assertLeaderEpochCacheEmpty(log)
}
@Test
def testLeaderEpochCacheCreatedAfterMessageFormatUpgrade(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1,
maxMessageBytes = 64 * 1024, messageFormatVersion = kafka.api.KAFKA_0_10_2_IV0.shortVersion)
val log = createLog(logDir, logConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())),
magicValue = RecordVersion.V1.value), leaderEpoch = 5)
assertLeaderEpochCacheEmpty(log)
val upgradedLogConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1,
maxMessageBytes = 64 * 1024, messageFormatVersion = kafka.api.KAFKA_0_11_0_IV0.shortVersion)
log.updateConfig(upgradedLogConfig)
log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5)
assertEquals(Some(5), log.latestEpoch)
}
private def assertLeaderEpochCacheEmpty(log: Log): Unit = {
assertEquals(None, log.leaderEpochCache)
assertEquals(None, log.latestEpoch)
assertFalse(LeaderEpochCheckpointFile.newFile(log.dir).exists())
}
@Test
def testOverCompactedLogRecoveryMultiRecord(): Unit = {
// append some messages to create some segments
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
val set1 = MemoryRecords.withRecords(0, CompressionType.NONE, 0, new SimpleRecord("v1".getBytes(), "k1".getBytes()))
val set2 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 2, CompressionType.GZIP, 0,
new SimpleRecord("v3".getBytes(), "k3".getBytes()),
new SimpleRecord("v4".getBytes(), "k4".getBytes()))
val set3 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 4, CompressionType.GZIP, 0,
new SimpleRecord("v5".getBytes(), "k5".getBytes()),
new SimpleRecord("v6".getBytes(), "k6".getBytes()))
val set4 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 6, CompressionType.GZIP, 0,
new SimpleRecord("v7".getBytes(), "k7".getBytes()),
new SimpleRecord("v8".getBytes(), "k8".getBytes()))
//Writes into an empty log with baseOffset 0
log.appendAsFollower(set1)
assertEquals(0L, log.activeSegment.baseOffset)
//This write will roll the segment, yielding a new segment with base offset = max(1, Integer.MAX_VALUE+2) = Integer.MAX_VALUE+2
log.appendAsFollower(set2)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
assertTrue(Log.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 2).exists)
//This will go into the existing log
log.appendAsFollower(set3)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
//This will go into the existing log
log.appendAsFollower(set4)
assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset)
log.close()
val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index"))
assertEquals(2, indexFiles.length)
for (file <- indexFiles) {
val offsetIndex = new OffsetIndex(file, file.getName.replace(".index","").toLong)
assertTrue(offsetIndex.lastOffset >= 0)
offsetIndex.close()
}
Utils.delete(logDir)
}
@Test
def testOverCompactedLogRecoveryMultiRecordV1(): Unit = {
// append some messages to create some segments
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
val log = createLog(logDir, logConfig)
val set1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0, CompressionType.NONE,
new SimpleRecord("v1".getBytes(), "k1".getBytes()))
val set2 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, Integer.MAX_VALUE.toLong + 2, CompressionType.GZIP,
new SimpleRecord("v3".getBytes(), "k3".getBytes()),
new SimpleRecord("v4".getBytes(), "k4".getBytes()))
val set3 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, Integer.MAX_VALUE.toLong + 4, CompressionType.GZIP,
new SimpleRecord("v5".getBytes(), "k5".getBytes()),
new SimpleRecord("v6".getBytes(), "k6".getBytes()))
val set4 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, Integer.MAX_VALUE.toLong + 6, CompressionType.GZIP,
new SimpleRecord("v7".getBytes(), "k7".getBytes()),
new SimpleRecord("v8".getBytes(), "k8".getBytes()))
//Writes into an empty log with baseOffset 0
log.appendAsFollower(set1)
assertEquals(0L, log.activeSegment.baseOffset)
//This write will roll the segment, yielding a new segment with base offset = max(1, 3) = 3
log.appendAsFollower(set2)
assertEquals(3, log.activeSegment.baseOffset)
assertTrue(Log.producerSnapshotFile(logDir, 3).exists)
//This will also roll the segment, yielding a new segment with base offset = max(5, Integer.MAX_VALUE+4) = Integer.MAX_VALUE+4
log.appendAsFollower(set3)
assertEquals(Integer.MAX_VALUE.toLong + 4, log.activeSegment.baseOffset)
assertTrue(Log.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 4).exists)
//This will go into the existing log
log.appendAsFollower(set4)
assertEquals(Integer.MAX_VALUE.toLong + 4, log.activeSegment.baseOffset)
log.close()
val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index"))
assertEquals(3, indexFiles.length)
for (file <- indexFiles) {
val offsetIndex = new OffsetIndex(file, file.getName.replace(".index","").toLong)
assertTrue(offsetIndex.lastOffset >= 0)
offsetIndex.close()
}
Utils.delete(logDir)
}
@Test
def testSplitOnOffsetOverflow(): Unit = {
// create a log such that one log segment has offsets that overflow, and call the split API on that segment
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
assertTrue("At least one segment must have offset overflow", LogTest.hasOffsetOverflow(log))
val allRecordsBeforeSplit = LogTest.allRecords(log)
// split the segment with overflow
log.splitOverflowedSegment(segmentWithOverflow)
// assert we were successfully able to split the segment
assertEquals(4, log.numberOfSegments)
LogTest.verifyRecordsInLog(log, allRecordsBeforeSplit)
// verify we do not have offset overflow anymore
assertFalse(LogTest.hasOffsetOverflow(log))
}
@Test
def testDegenerateSegmentSplit(): Unit = {
// This tests a scenario where all of the batches appended to a segment have overflowed.
// When we split the overflowed segment, only one new segment will be created.
val overflowOffset = Int.MaxValue + 1L
val batch1 = MemoryRecords.withRecords(overflowOffset, CompressionType.NONE, 0,
new SimpleRecord("a".getBytes))
val batch2 = MemoryRecords.withRecords(overflowOffset + 1, CompressionType.NONE, 0,
new SimpleRecord("b".getBytes))
testDegenerateSplitSegmentWithOverflow(segmentBaseOffset = 0L, List(batch1, batch2))
}
@Test
def testDegenerateSegmentSplitWithOutOfRangeBatchLastOffset(): Unit = {
// Degenerate case where the only batch in the segment overflows. In this scenario,
// the first offset of the batch is valid, but the last overflows.
val firstBatchBaseOffset = Int.MaxValue - 1
val records = MemoryRecords.withRecords(firstBatchBaseOffset, CompressionType.NONE, 0,
new SimpleRecord("a".getBytes),
new SimpleRecord("b".getBytes),
new SimpleRecord("c".getBytes))
testDegenerateSplitSegmentWithOverflow(segmentBaseOffset = 0L, List(records))
}
private def testDegenerateSplitSegmentWithOverflow(segmentBaseOffset: Long, records: List[MemoryRecords]): Unit = {
val segment = LogTest.rawSegment(logDir, segmentBaseOffset)
// Need to create the offset files explicitly to avoid triggering segment recovery to truncate segment.
Log.offsetIndexFile(logDir, segmentBaseOffset).createNewFile()
Log.timeIndexFile(logDir, segmentBaseOffset).createNewFile()
records.foreach(segment.append _)
segment.close()
// Create clean shutdown file so that we do not split during the load
createCleanShutdownFile()
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue)
val segmentWithOverflow = LogTest.firstOverflowSegment(log).getOrElse {
Assertions.fail("Failed to create log with a segment which has overflowed offsets")
}
val allRecordsBeforeSplit = LogTest.allRecords(log)
log.splitOverflowedSegment(segmentWithOverflow)
assertEquals(1, log.numberOfSegments)
val firstBatchBaseOffset = records.head.batches.asScala.head.baseOffset
assertEquals(firstBatchBaseOffset, log.activeSegment.baseOffset)
LogTest.verifyRecordsInLog(log, allRecordsBeforeSplit)
assertFalse(LogTest.hasOffsetOverflow(log))
}
@Test
def testRecoveryOfSegmentWithOffsetOverflow(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, _) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
// Run recovery on the log. This should split the segment underneath. Ignore .deleted files as we could have still
// have them lying around after the split.
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
// Running split again would throw an error
for (segment <- recoveredLog.logSegments) {
try {
log.splitOverflowedSegment(segment)
fail()
} catch {
case _: IllegalArgumentException =>
}
}
}
@Test
def testRecoveryAfterCrashDuringSplitPhase1(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
val numSegmentsInitial = log.logSegments.size
// Split the segment
val newSegments = log.splitOverflowedSegment(segmentWithOverflow)
// Simulate recovery just after .cleaned file is created, before rename to .swap. On recovery, existing split
// operation is aborted but the recovery process itself kicks off split which should complete.
newSegments.reverse.foreach(segment => {
segment.changeFileSuffixes("", Log.CleanedFileSuffix)
segment.truncateTo(0)
})
for (file <- logDir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix))
Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, "")))
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertEquals(numSegmentsInitial + 1, recoveredLog.logSegments.size)
recoveredLog.close()
}
@Test
def testRecoveryAfterCrashDuringSplitPhase2(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
val numSegmentsInitial = log.logSegments.size
// Split the segment
val newSegments = log.splitOverflowedSegment(segmentWithOverflow)
// Simulate recovery just after one of the new segments has been renamed to .swap. On recovery, existing split
// operation is aborted but the recovery process itself kicks off split which should complete.
newSegments.reverse.foreach { segment =>
if (segment != newSegments.last)
segment.changeFileSuffixes("", Log.CleanedFileSuffix)
else
segment.changeFileSuffixes("", Log.SwapFileSuffix)
segment.truncateTo(0)
}
for (file <- logDir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix))
Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, "")))
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertEquals(numSegmentsInitial + 1, recoveredLog.logSegments.size)
recoveredLog.close()
}
@Test
def testRecoveryAfterCrashDuringSplitPhase3(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
val numSegmentsInitial = log.logSegments.size
// Split the segment
val newSegments = log.splitOverflowedSegment(segmentWithOverflow)
// Simulate recovery right after all new segments have been renamed to .swap. On recovery, existing split operation
// is completed and the old segment must be deleted.
newSegments.reverse.foreach(segment => {
segment.changeFileSuffixes("", Log.SwapFileSuffix)
})
for (file <- logDir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix))
Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, "")))
// Truncate the old segment
segmentWithOverflow.truncateTo(0)
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertEquals(numSegmentsInitial + 1, recoveredLog.logSegments.size)
log.close()
}
@Test
def testRecoveryAfterCrashDuringSplitPhase4(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
val numSegmentsInitial = log.logSegments.size
// Split the segment
val newSegments = log.splitOverflowedSegment(segmentWithOverflow)
// Simulate recovery right after all new segments have been renamed to .swap and old segment has been deleted. On
// recovery, existing split operation is completed.
newSegments.reverse.foreach(_.changeFileSuffixes("", Log.SwapFileSuffix))
for (file <- logDir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix))
Utils.delete(file)
// Truncate the old segment
segmentWithOverflow.truncateTo(0)
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertEquals(numSegmentsInitial + 1, recoveredLog.logSegments.size)
recoveredLog.close()
}
@Test
def testRecoveryAfterCrashDuringSplitPhase5(): Unit = {
val logConfig = LogTest.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000)
val (log, segmentWithOverflow) = createLogWithOffsetOverflow(logConfig)
val expectedKeys = LogTest.keysInLog(log)
val numSegmentsInitial = log.logSegments.size
// Split the segment
val newSegments = log.splitOverflowedSegment(segmentWithOverflow)
// Simulate recovery right after one of the new segment has been renamed to .swap and the other to .log. On
// recovery, existing split operation is completed.
newSegments.last.changeFileSuffixes("", Log.SwapFileSuffix)
// Truncate the old segment
segmentWithOverflow.truncateTo(0)
val recoveredLog = recoverAndCheck(logConfig, expectedKeys)
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertEquals(numSegmentsInitial + 1, recoveredLog.logSegments.size)
recoveredLog.close()
}
@Test
def testCleanShutdownFile(): Unit = {
// append some messages to create some segments
val logConfig = LogTest.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024)
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds)
val cleanShutdownFile = createCleanShutdownFile()
assertTrue(".kafka_cleanshutdown must exist", cleanShutdownFile.exists())
var recoveryPoint = 0L
// create a log and write some messages to it
var log = createLog(logDir, logConfig)
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.close()
// check if recovery was attempted. Even if the recovery point is 0L, recovery should not be attempted as the
// clean shutdown file exists.
recoveryPoint = log.logEndOffset
log = createLog(logDir, logConfig)
assertEquals(recoveryPoint, log.logEndOffset)
Utils.delete(cleanShutdownFile)
}
@Test
def testParseTopicPartitionName(): Unit = {
val topic = "test_topic"
val partition = "143"
val dir = new File(logDir, topicPartitionName(topic, partition))
val topicPartition = Log.parseTopicPartitionName(dir)
assertEquals(topic, topicPartition.topic)
assertEquals(partition.toInt, topicPartition.partition)
}
/**
* Tests that log directories with a period in their name that have been marked for deletion
* are parsed correctly by `Log.parseTopicPartitionName` (see KAFKA-5232 for details).
*/
@Test
def testParseTopicPartitionNameWithPeriodForDeletedTopic(): Unit = {
val topic = "foo.bar-testtopic"
val partition = "42"
val dir = new File(logDir, Log.logDeleteDirName(new TopicPartition(topic, partition.toInt)))
val topicPartition = Log.parseTopicPartitionName(dir)
assertEquals("Unexpected topic name parsed", topic, topicPartition.topic)
assertEquals("Unexpected partition number parsed", partition.toInt, topicPartition.partition)
}
@Test
def testParseTopicPartitionNameForEmptyName(): Unit = {
try {
val dir = new File("")
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case _: KafkaException => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForNull(): Unit = {
try {
val dir: File = null
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir)
} catch {
case _: KafkaException => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingSeparator(): Unit = {
val topic = "test_topic"
val partition = "1999"
val dir = new File(logDir, topic + partition)
try {
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
// also test the "-delete" marker case
val deleteMarkerDir = new File(logDir, topic + partition + "." + DeleteDirSuffix)
try {
Log.parseTopicPartitionName(deleteMarkerDir)
fail("KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
}
@Test
def testParseTopicPartitionNameForMissingTopic(): Unit = {
val topic = ""
val partition = "1999"
val dir = new File(logDir, topicPartitionName(topic, partition))
try {
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
// also test the "-delete" marker case
val deleteMarkerDir = new File(logDir, Log.logDeleteDirName(new TopicPartition(topic, partition.toInt)))
try {
Log.parseTopicPartitionName(deleteMarkerDir)
fail("KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
}
@Test
def testParseTopicPartitionNameForMissingPartition(): Unit = {
val topic = "test_topic"
val partition = ""
val dir = new File(logDir.getPath + topicPartitionName(topic, partition))
try {
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
// also test the "-delete" marker case
val deleteMarkerDir = new File(logDir, topicPartitionName(topic, partition) + "." + DeleteDirSuffix)
try {
Log.parseTopicPartitionName(deleteMarkerDir)
fail("KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
}
@Test
def testParseTopicPartitionNameForInvalidPartition(): Unit = {
val topic = "test_topic"
val partition = "1999a"
val dir = new File(logDir, topicPartitionName(topic, partition))
try {
Log.parseTopicPartitionName(dir)
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
// also test the "-delete" marker case
val deleteMarkerDir = new File(logDir, topic + partition + "." + DeleteDirSuffix)
try {
Log.parseTopicPartitionName(deleteMarkerDir)
fail("KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath)
} catch {
case _: KafkaException => // expected
}
}
@Test
def testParseTopicPartitionNameForExistingInvalidDir(): Unit = {
val dir1 = new File(logDir.getPath + "/non_kafka_dir")
try {
Log.parseTopicPartitionName(dir1)
fail("KafkaException should have been thrown for dir: " + dir1.getCanonicalPath)
} catch {
case _: KafkaException => // should only throw KafkaException
}
val dir2 = new File(logDir.getPath + "/non_kafka_dir-delete")
try {
Log.parseTopicPartitionName(dir2)
fail("KafkaException should have been thrown for dir: " + dir2.getCanonicalPath)
} catch {
case _: KafkaException => // should only throw KafkaException
}
}
def topicPartitionName(topic: String, partition: String): String =
topic + "-" + partition
@Test
def testDeleteOldSegments(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds - 1000)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, retentionMs = 999)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.maybeAssignEpochStartOffset(0, 40)
log.maybeAssignEpochStartOffset(1, 90)
// segments are not eligible for deletion if no high watermark has been set
val numSegments = log.numberOfSegments
log.deleteOldSegments()
assertEquals(numSegments, log.numberOfSegments)
assertEquals(0L, log.logStartOffset)
// only segments with offset before the current high watermark are eligible for deletion
for (hw <- 25 to 30) {
log.updateHighWatermark(hw)
log.deleteOldSegments()
assertTrue(log.logStartOffset <= hw)
log.logSegments.foreach { segment =>
val segmentFetchInfo = segment.read(startOffset = segment.baseOffset, maxSize = Int.MaxValue)
val segmentLastOffsetOpt = segmentFetchInfo.records.records.asScala.lastOption.map(_.offset)
segmentLastOffsetOpt.foreach { lastOffset =>
assertTrue(lastOffset >= hw)
}
}
}
// expire all segments
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("The deleted segments should be gone.", 1, log.numberOfSegments)
assertEquals("Epoch entries should have gone.", 1, epochCache(log).epochEntries.size)
assertEquals("Epoch entry should be the latest epoch and the leo.", EpochEntry(1, 100), epochCache(log).epochEntries.head)
// append some messages to create some segments
for (_ <- 0 until 100)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.delete()
assertEquals("The number of segments should be 0", 0, log.numberOfSegments)
assertEquals("The number of deleted segments should be zero.", 0, log.deleteOldSegments())
assertEquals("Epoch entries should have gone.", 0, epochCache(log).epochEntries.size)
}
@Test
def testLogDeletionAfterClose(): Unit = {
def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds - 1000)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, retentionMs = 999)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("The deleted segments should be gone.", 1, log.numberOfSegments)
assertEquals("Epoch entries should have gone.", 1, epochCache(log).epochEntries.size)
log.close()
log.delete()
assertEquals("The number of segments should be 0", 0, log.numberOfSegments)
assertEquals("Epoch entries should have gone.", 0, epochCache(log).epochEntries.size)
}
@Test
def testLogDeletionAfterDeleteRecords(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5)
val log = createLog(logDir, logConfig)
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
assertEquals("should have 3 segments", 3, log.numberOfSegments)
assertEquals(log.logStartOffset, 0)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(1)
log.deleteOldSegments()
assertEquals("should have 3 segments", 3, log.numberOfSegments)
assertEquals(log.logStartOffset, 1)
log.maybeIncrementLogStartOffset(6)
log.deleteOldSegments()
assertEquals("should have 2 segments", 2, log.numberOfSegments)
assertEquals(log.logStartOffset, 6)
log.maybeIncrementLogStartOffset(15)
log.deleteOldSegments()
assertEquals("should have 1 segments", 1, log.numberOfSegments)
assertEquals(log.logStartOffset, 15)
}
def epochCache(log: Log): LeaderEpochFileCache = {
log.leaderEpochCache.get
}
@Test
def shouldDeleteSizeBasedSegments(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionBytes = createRecords.sizeInBytes * 10)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("should have 2 segments", 2,log.numberOfSegments)
}
@Test
def shouldNotDeleteSizeBasedSegmentsWhenUnderRetentionSize(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionBytes = createRecords.sizeInBytes * 15)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("should have 3 segments", 3,log.numberOfSegments)
}
@Test
def shouldDeleteTimeBasedSegmentsReadyToBeDeleted(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes, timestamp = 10)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionMs = 10000)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("There should be 1 segment remaining", 1, log.numberOfSegments)
}
@Test
def shouldNotDeleteTimeBasedSegmentsWhenNoneReadyToBeDeleted(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes, timestamp = mockTime.milliseconds)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionMs = 10000000)
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("There should be 3 segments remaining", 3, log.numberOfSegments)
}
@Test
def shouldNotDeleteSegmentsWhenPolicyDoesNotIncludeDelete(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = 10L)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionMs = 10000, cleanupPolicy = "compact")
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
// mark oldest segment as older the retention.ms
log.logSegments.head.lastModified = mockTime.milliseconds - 20000
val segments = log.numberOfSegments
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("There should be 3 segments remaining", segments, log.numberOfSegments)
}
@Test
def shouldDeleteSegmentsReadyToBeDeletedWhenCleanupPolicyIsCompactAndDelete(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes, timestamp = 10L)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionMs = 10000, cleanupPolicy = "compact,delete")
val log = createLog(logDir, logConfig)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("There should be 1 segment remaining", 1, log.numberOfSegments)
}
@Test
def shouldDeleteStartOffsetBreachedSegmentsWhenPolicyDoesNotIncludeDelete(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes, timestamp = 10L)
val recordsPerSegment = 5
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * recordsPerSegment, retentionMs = 10000, cleanupPolicy = "compact")
val log = createLog(logDir, logConfig, brokerTopicStats)
// append some messages to create some segments
for (_ <- 0 until 15)
log.appendAsLeader(createRecords, leaderEpoch = 0)
// Three segments should be created
assertEquals(3, log.logSegments.count(_ => true))
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(recordsPerSegment)
// The first segment, which is entirely before the log start offset, should be deleted
// Of the remaining the segments, the first can overlap the log start offset and the rest must have a base offset
// greater than the start offset
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals("There should be 2 segments remaining", 2, log.numberOfSegments)
assertTrue(log.logSegments.head.baseOffset <= log.logStartOffset)
assertTrue(log.logSegments.tail.forall(s => s.baseOffset > log.logStartOffset))
}
@Test
def shouldApplyEpochToMessageOnAppendIfLeader(): Unit = {
val records = (0 until 50).toArray.map(id => new SimpleRecord(id.toString.getBytes))
//Given this partition is on leader epoch 72
val epoch = 72
val log = createLog(logDir, LogConfig())
log.maybeAssignEpochStartOffset(epoch, records.size)
//When appending messages as a leader (i.e. assignOffsets = true)
for (record <- records)
log.appendAsLeader(
MemoryRecords.withRecords(CompressionType.NONE, record),
leaderEpoch = epoch
)
//Then leader epoch should be set on messages
for (i <- records.indices) {
val read = readLog(log, i, 1).records.batches.iterator.next()
assertEquals("Should have set leader epoch", 72, read.partitionLeaderEpoch)
}
}
@Test
def followerShouldSaveEpochInformationFromReplicatedMessagesToTheEpochCache(): Unit = {
val messageIds = (0 until 50).toArray
val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes))
//Given each message has an offset & epoch, as msgs from leader would
def recordsForEpoch(i: Int): MemoryRecords = {
val recs = MemoryRecords.withRecords(messageIds(i), CompressionType.NONE, records(i))
recs.batches.forEach{record =>
record.setPartitionLeaderEpoch(42)
record.setLastOffset(i)
}
recs
}
val log = createLog(logDir, LogConfig())
//When appending as follower (assignOffsets = false)
for (i <- records.indices)
log.appendAsFollower(recordsForEpoch(i))
assertEquals(Some(42), log.latestEpoch)
}
@Test
def shouldTruncateLeaderEpochsWhenDeletingSegments(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionBytes = createRecords.sizeInBytes * 10)
val log = createLog(logDir, logConfig)
val cache = epochCache(log)
// Given three segments of 5 messages each
for (e <- 0 until 15) {
log.appendAsLeader(createRecords, leaderEpoch = 0)
}
//Given epochs
cache.assign(0, 0)
cache.assign(1, 5)
cache.assign(2, 10)
//When first segment is removed
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
//The oldest epoch entry should have been removed
assertEquals(ListBuffer(EpochEntry(1, 5), EpochEntry(2, 10)), cache.epochEntries)
}
@Test
def shouldUpdateOffsetForLeaderEpochsWhenDeletingSegments(): Unit = {
def createRecords = TestUtils.singletonRecords("test".getBytes)
val logConfig = LogTest.createLogConfig(segmentBytes = createRecords.sizeInBytes * 5, retentionBytes = createRecords.sizeInBytes * 10)
val log = createLog(logDir, logConfig)
val cache = epochCache(log)
// Given three segments of 5 messages each
for (e <- 0 until 15) {
log.appendAsLeader(createRecords, leaderEpoch = 0)
}
//Given epochs
cache.assign(0, 0)
cache.assign(1, 7)
cache.assign(2, 10)
//When first segment removed (up to offset 5)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
//The first entry should have gone from (0,0) => (0,5)
assertEquals(ListBuffer(EpochEntry(0, 5), EpochEntry(1, 7), EpochEntry(2, 10)), cache.epochEntries)
}
@Test
def shouldTruncateLeaderEpochCheckpointFileWhenTruncatingLog(): Unit = {
def createRecords(startOffset: Long, epoch: Int): MemoryRecords = {
TestUtils.records(Seq(new SimpleRecord("value".getBytes)),
baseOffset = startOffset, partitionLeaderEpoch = epoch)
}
val logConfig = LogTest.createLogConfig(segmentBytes = 10 * createRecords(0, 0).sizeInBytes)
val log = createLog(logDir, logConfig)
val cache = epochCache(log)
def append(epoch: Int, startOffset: Long, count: Int): Unit = {
for (i <- 0 until count)
log.appendAsFollower(createRecords(startOffset + i, epoch))
}
//Given 2 segments, 10 messages per segment
append(epoch = 0, startOffset = 0, count = 10)
append(epoch = 1, startOffset = 10, count = 6)
append(epoch = 2, startOffset = 16, count = 4)
assertEquals(2, log.numberOfSegments)
assertEquals(20, log.logEndOffset)
//When truncate to LEO (no op)
log.truncateTo(log.logEndOffset)
//Then no change
assertEquals(3, cache.epochEntries.size)
//When truncate
log.truncateTo(11)
//Then no change
assertEquals(2, cache.epochEntries.size)
//When truncate
log.truncateTo(10)
//Then
assertEquals(1, cache.epochEntries.size)
//When truncate all
log.truncateTo(0)
//Then
assertEquals(0, cache.epochEntries.size)
}
/**
* Append a bunch of messages to a log and then re-open it with recovery and check that the leader epochs are recovered properly.
*/
@Test
def testLogRecoversForLeaderEpoch(): Unit = {
val log = createLog(logDir, LogConfig())
val leaderEpochCache = epochCache(log)
val firstBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 1, offset = 0)
log.appendAsFollower(records = firstBatch)
val secondBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 2, offset = 1)
log.appendAsFollower(records = secondBatch)
val thirdBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 2, offset = 2)
log.appendAsFollower(records = thirdBatch)
val fourthBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 3, offset = 3)
log.appendAsFollower(records = fourthBatch)
assertEquals(ListBuffer(EpochEntry(1, 0), EpochEntry(2, 1), EpochEntry(3, 3)), leaderEpochCache.epochEntries)
// deliberately remove some of the epoch entries
leaderEpochCache.truncateFromEnd(2)
assertNotEquals(ListBuffer(EpochEntry(1, 0), EpochEntry(2, 1), EpochEntry(3, 3)), leaderEpochCache.epochEntries)
log.close()
// reopen the log and recover from the beginning
val recoveredLog = createLog(logDir, LogConfig())
val recoveredLeaderEpochCache = epochCache(recoveredLog)
// epoch entries should be recovered
assertEquals(ListBuffer(EpochEntry(1, 0), EpochEntry(2, 1), EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries)
recoveredLog.close()
}
/**
* Wrap a single record log buffer with leader epoch.
*/
private def singletonRecordsWithLeaderEpoch(value: Array[Byte],
key: Array[Byte] = null,
leaderEpoch: Int,
offset: Long,
codec: CompressionType = CompressionType.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
val records = Seq(new SimpleRecord(timestamp, key, value))
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset,
mockTime.milliseconds, leaderEpoch)
records.foreach(builder.append)
builder.build()
}
@Test
def testFirstUnstableOffsetNoTransactionalData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val records = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord("foo".getBytes),
new SimpleRecord("bar".getBytes),
new SimpleRecord("baz".getBytes))
log.appendAsLeader(records, leaderEpoch = 0)
assertEquals(None, log.firstUnstableOffset)
}
@Test
def testFirstUnstableOffsetWithTransactionalData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val pid = 137L
val epoch = 5.toShort
var seq = 0
// add some transactional records
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid, epoch, seq,
new SimpleRecord("foo".getBytes),
new SimpleRecord("bar".getBytes),
new SimpleRecord("baz".getBytes))
val firstAppendInfo = log.appendAsLeader(records, leaderEpoch = 0)
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
// add more transactional records
seq += 3
log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid, epoch, seq,
new SimpleRecord("blah".getBytes)), leaderEpoch = 0)
// LSO should not have changed
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
// now transaction is committed
val commitAppendInfo = appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.COMMIT)
// first unstable offset is not updated until the high watermark is advanced
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
log.updateHighWatermark(commitAppendInfo.lastOffset + 1)
// now there should be no first unstable offset
assertEquals(None, log.firstUnstableOffset)
}
@Test
def testReadCommittedWithConcurrentHighWatermarkUpdates(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val lastOffset = 50L
val producerEpoch = 0.toShort
val producerId = 15L
val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch)
// Thread 1 writes single-record transactions and attempts to read them
// before they have been aborted, and then aborts them
val txnWriteAndReadLoop: Callable[Int] = () => {
var nonEmptyReads = 0
while (log.logEndOffset < lastOffset) {
val currentLogEndOffset = log.logEndOffset
appendProducer(1)
val readInfo = log.read(
startOffset = currentLogEndOffset,
maxLength = Int.MaxValue,
isolation = FetchTxnCommitted,
minOneMessage = false)
if (readInfo.records.sizeInBytes() > 0)
nonEmptyReads += 1
appendEndTxnMarkerAsLeader(log, producerId, producerEpoch, ControlRecordType.ABORT)
}
nonEmptyReads
}
// Thread 2 watches the log and updates the high watermark
val hwUpdateLoop: Runnable = () => {
while (log.logEndOffset < lastOffset) {
log.updateHighWatermark(log.logEndOffset)
}
}
val executor = Executors.newFixedThreadPool(2)
try {
executor.submit(hwUpdateLoop)
val future = executor.submit(txnWriteAndReadLoop)
val nonEmptyReads = future.get()
assertEquals(0, nonEmptyReads)
} finally {
executor.shutdownNow()
}
}
@Test
def testTransactionIndexUpdated(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid1 = 1L
val pid2 = 2L
val pid3 = 3L
val pid4 = 4L
val appendPid1 = appendTransactionalAsLeader(log, pid1, epoch)
val appendPid2 = appendTransactionalAsLeader(log, pid2, epoch)
val appendPid3 = appendTransactionalAsLeader(log, pid3, epoch)
val appendPid4 = appendTransactionalAsLeader(log, pid4, epoch)
// mix transactional and non-transactional data
appendPid1(5) // nextOffset: 5
appendNonTransactionalAsLeader(log, 3) // 8
appendPid2(2) // 10
appendPid1(4) // 14
appendPid3(3) // 17
appendNonTransactionalAsLeader(log, 2) // 19
appendPid1(10) // 29
appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT) // 30
appendPid2(6) // 36
appendPid4(3) // 39
appendNonTransactionalAsLeader(log, 10) // 49
appendPid3(9) // 58
appendEndTxnMarkerAsLeader(log, pid3, epoch, ControlRecordType.COMMIT) // 59
appendPid4(8) // 67
appendPid2(7) // 74
appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.ABORT) // 75
appendNonTransactionalAsLeader(log, 10) // 85
appendPid4(4) // 89
appendEndTxnMarkerAsLeader(log, pid4, epoch, ControlRecordType.COMMIT) // 90
val abortedTransactions = allAbortedTransactions(log)
val expectedTransactions = List(
new AbortedTxn(pid1, 0L, 29L, 8L),
new AbortedTxn(pid2, 8L, 74L, 36L)
)
assertEquals(expectedTransactions, abortedTransactions)
// Verify caching of the segment position of the first unstable offset
log.updateHighWatermark(30L)
assertCachedFirstUnstableOffset(log, expectedOffset = 8L)
log.updateHighWatermark(75L)
assertCachedFirstUnstableOffset(log, expectedOffset = 36L)
log.updateHighWatermark(log.logEndOffset)
assertEquals(None, log.firstUnstableOffset)
}
@Test
def testFullTransactionIndexRecovery(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 128 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid1 = 1L
val pid2 = 2L
val pid3 = 3L
val pid4 = 4L
val appendPid1 = appendTransactionalAsLeader(log, pid1, epoch)
val appendPid2 = appendTransactionalAsLeader(log, pid2, epoch)
val appendPid3 = appendTransactionalAsLeader(log, pid3, epoch)
val appendPid4 = appendTransactionalAsLeader(log, pid4, epoch)
// mix transactional and non-transactional data
appendPid1(5) // nextOffset: 5
appendNonTransactionalAsLeader(log, 3) // 8
appendPid2(2) // 10
appendPid1(4) // 14
appendPid3(3) // 17
appendNonTransactionalAsLeader(log, 2) // 19
appendPid1(10) // 29
appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT) // 30
appendPid2(6) // 36
appendPid4(3) // 39
appendNonTransactionalAsLeader(log, 10) // 49
appendPid3(9) // 58
appendEndTxnMarkerAsLeader(log, pid3, epoch, ControlRecordType.COMMIT) // 59
appendPid4(8) // 67
appendPid2(7) // 74
appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.ABORT) // 75
appendNonTransactionalAsLeader(log, 10) // 85
appendPid4(4) // 89
appendEndTxnMarkerAsLeader(log, pid4, epoch, ControlRecordType.COMMIT) // 90
// delete all the offset and transaction index files to force recovery
log.logSegments.foreach { segment =>
segment.offsetIndex.deleteIfExists()
segment.txnIndex.deleteIfExists()
}
log.close()
val reloadedLogConfig = LogTest.createLogConfig(segmentBytes = 1024 * 5)
val reloadedLog = createLog(logDir, reloadedLogConfig)
val abortedTransactions = allAbortedTransactions(reloadedLog)
assertEquals(List(new AbortedTxn(pid1, 0L, 29L, 8L), new AbortedTxn(pid2, 8L, 74L, 36L)), abortedTransactions)
}
@Test
def testRecoverOnlyLastSegment(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 128 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid1 = 1L
val pid2 = 2L
val pid3 = 3L
val pid4 = 4L
val appendPid1 = appendTransactionalAsLeader(log, pid1, epoch)
val appendPid2 = appendTransactionalAsLeader(log, pid2, epoch)
val appendPid3 = appendTransactionalAsLeader(log, pid3, epoch)
val appendPid4 = appendTransactionalAsLeader(log, pid4, epoch)
// mix transactional and non-transactional data
appendPid1(5) // nextOffset: 5
appendNonTransactionalAsLeader(log, 3) // 8
appendPid2(2) // 10
appendPid1(4) // 14
appendPid3(3) // 17
appendNonTransactionalAsLeader(log, 2) // 19
appendPid1(10) // 29
appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT) // 30
appendPid2(6) // 36
appendPid4(3) // 39
appendNonTransactionalAsLeader(log, 10) // 49
appendPid3(9) // 58
appendEndTxnMarkerAsLeader(log, pid3, epoch, ControlRecordType.COMMIT) // 59
appendPid4(8) // 67
appendPid2(7) // 74
appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.ABORT) // 75
appendNonTransactionalAsLeader(log, 10) // 85
appendPid4(4) // 89
appendEndTxnMarkerAsLeader(log, pid4, epoch, ControlRecordType.COMMIT) // 90
// delete the last offset and transaction index files to force recovery
val lastSegment = log.logSegments.last
val recoveryPoint = lastSegment.baseOffset
lastSegment.offsetIndex.deleteIfExists()
lastSegment.txnIndex.deleteIfExists()
log.close()
val reloadedLogConfig = LogTest.createLogConfig(segmentBytes = 1024 * 5)
val reloadedLog = createLog(logDir, reloadedLogConfig, recoveryPoint = recoveryPoint)
val abortedTransactions = allAbortedTransactions(reloadedLog)
assertEquals(List(new AbortedTxn(pid1, 0L, 29L, 8L), new AbortedTxn(pid2, 8L, 74L, 36L)), abortedTransactions)
}
@Test
def testRecoverLastSegmentWithNoSnapshots(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 128 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid1 = 1L
val pid2 = 2L
val pid3 = 3L
val pid4 = 4L
val appendPid1 = appendTransactionalAsLeader(log, pid1, epoch)
val appendPid2 = appendTransactionalAsLeader(log, pid2, epoch)
val appendPid3 = appendTransactionalAsLeader(log, pid3, epoch)
val appendPid4 = appendTransactionalAsLeader(log, pid4, epoch)
// mix transactional and non-transactional data
appendPid1(5) // nextOffset: 5
appendNonTransactionalAsLeader(log, 3) // 8
appendPid2(2) // 10
appendPid1(4) // 14
appendPid3(3) // 17
appendNonTransactionalAsLeader(log, 2) // 19
appendPid1(10) // 29
appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT) // 30
appendPid2(6) // 36
appendPid4(3) // 39
appendNonTransactionalAsLeader(log, 10) // 49
appendPid3(9) // 58
appendEndTxnMarkerAsLeader(log, pid3, epoch, ControlRecordType.COMMIT) // 59
appendPid4(8) // 67
appendPid2(7) // 74
appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.ABORT) // 75
appendNonTransactionalAsLeader(log, 10) // 85
appendPid4(4) // 89
appendEndTxnMarkerAsLeader(log, pid4, epoch, ControlRecordType.COMMIT) // 90
deleteProducerSnapshotFiles()
// delete the last offset and transaction index files to force recovery. this should force us to rebuild
// the producer state from the start of the log
val lastSegment = log.logSegments.last
val recoveryPoint = lastSegment.baseOffset
lastSegment.offsetIndex.deleteIfExists()
lastSegment.txnIndex.deleteIfExists()
log.close()
val reloadedLogConfig = LogTest.createLogConfig(segmentBytes = 1024 * 5)
val reloadedLog = createLog(logDir, reloadedLogConfig, recoveryPoint = recoveryPoint)
val abortedTransactions = allAbortedTransactions(reloadedLog)
assertEquals(List(new AbortedTxn(pid1, 0L, 29L, 8L), new AbortedTxn(pid2, 8L, 74L, 36L)), abortedTransactions)
}
@Test
def testTransactionIndexUpdatedThroughReplication(): Unit = {
val epoch = 0.toShort
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val buffer = ByteBuffer.allocate(2048)
val pid1 = 1L
val pid2 = 2L
val pid3 = 3L
val pid4 = 4L
val appendPid1 = appendTransactionalToBuffer(buffer, pid1, epoch)
val appendPid2 = appendTransactionalToBuffer(buffer, pid2, epoch)
val appendPid3 = appendTransactionalToBuffer(buffer, pid3, epoch)
val appendPid4 = appendTransactionalToBuffer(buffer, pid4, epoch)
appendPid1(0L, 5)
appendNonTransactionalToBuffer(buffer, 5L, 3)
appendPid2(8L, 2)
appendPid1(10L, 4)
appendPid3(14L, 3)
appendNonTransactionalToBuffer(buffer, 17L, 2)
appendPid1(19L, 10)
appendEndTxnMarkerToBuffer(buffer, pid1, epoch, 29L, ControlRecordType.ABORT)
appendPid2(30L, 6)
appendPid4(36L, 3)
appendNonTransactionalToBuffer(buffer, 39L, 10)
appendPid3(49L, 9)
appendEndTxnMarkerToBuffer(buffer, pid3, epoch, 58L, ControlRecordType.COMMIT)
appendPid4(59L, 8)
appendPid2(67L, 7)
appendEndTxnMarkerToBuffer(buffer, pid2, epoch, 74L, ControlRecordType.ABORT)
appendNonTransactionalToBuffer(buffer, 75L, 10)
appendPid4(85L, 4)
appendEndTxnMarkerToBuffer(buffer, pid4, epoch, 89L, ControlRecordType.COMMIT)
buffer.flip()
appendAsFollower(log, MemoryRecords.readableRecords(buffer))
val abortedTransactions = allAbortedTransactions(log)
val expectedTransactions = List(
new AbortedTxn(pid1, 0L, 29L, 8L),
new AbortedTxn(pid2, 8L, 74L, 36L)
)
assertEquals(expectedTransactions, abortedTransactions)
// Verify caching of the segment position of the first unstable offset
log.updateHighWatermark(30L)
assertCachedFirstUnstableOffset(log, expectedOffset = 8L)
log.updateHighWatermark(75L)
assertCachedFirstUnstableOffset(log, expectedOffset = 36L)
log.updateHighWatermark(log.logEndOffset)
assertEquals(None, log.firstUnstableOffset)
}
private def assertCachedFirstUnstableOffset(log: Log, expectedOffset: Long): Unit = {
assertTrue(log.producerStateManager.firstUnstableOffset.isDefined)
val firstUnstableOffset = log.producerStateManager.firstUnstableOffset.get
assertEquals(expectedOffset, firstUnstableOffset.messageOffset)
assertFalse(firstUnstableOffset.messageOffsetOnly)
assertValidLogOffsetMetadata(log, firstUnstableOffset)
}
private def assertValidLogOffsetMetadata(log: Log, offsetMetadata: LogOffsetMetadata): Unit = {
assertFalse(offsetMetadata.messageOffsetOnly)
val segmentBaseOffset = offsetMetadata.segmentBaseOffset
val segmentOpt = log.logSegments(segmentBaseOffset, segmentBaseOffset + 1).headOption
assertTrue(segmentOpt.isDefined)
val segment = segmentOpt.get
assertEquals(segmentBaseOffset, segment.baseOffset)
assertTrue(offsetMetadata.relativePositionInSegment <= segment.size)
val readInfo = segment.read(offsetMetadata.messageOffset,
maxSize = 2048,
maxPosition = segment.size,
minOneMessage = false)
if (offsetMetadata.relativePositionInSegment < segment.size)
assertEquals(offsetMetadata, readInfo.fetchOffsetMetadata)
else
assertNull(readInfo)
}
@Test(expected = classOf[TransactionCoordinatorFencedException])
def testZombieCoordinatorFenced(): Unit = {
val pid = 1L
val epoch = 0.toShort
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val append = appendTransactionalAsLeader(log, pid, epoch)
append(10)
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1)
append(5)
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.COMMIT, coordinatorEpoch = 2)
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1)
}
@Test
def testZombieCoordinatorFencedEmptyTransaction(): Unit = {
val pid = 1L
val epoch = 0.toShort
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val buffer = ByteBuffer.allocate(256)
val append = appendTransactionalToBuffer(buffer, pid, epoch, leaderEpoch = 1)
append(0, 10)
appendEndTxnMarkerToBuffer(buffer, pid, epoch, 10L, ControlRecordType.COMMIT,
coordinatorEpoch = 0, leaderEpoch = 1)
buffer.flip()
log.appendAsFollower(MemoryRecords.readableRecords(buffer))
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 2, leaderEpoch = 1)
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 2, leaderEpoch = 1)
assertThrows[TransactionCoordinatorFencedException] {
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1, leaderEpoch = 1)
}
}
@Test
def testEndTxnWithFencedProducerEpoch(): Unit = {
val producerId = 1L
val epoch = 5.toShort
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
appendEndTxnMarkerAsLeader(log, producerId, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1)
assertThrows[ProducerFencedException] {
appendEndTxnMarkerAsLeader(log, producerId, (epoch - 1).toShort, ControlRecordType.ABORT, coordinatorEpoch = 1)
}
}
@Test
def testLastStableOffsetDoesNotExceedLogStartOffsetMidSegment(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid = 1L
val appendPid = appendTransactionalAsLeader(log, pid, epoch)
appendPid(5)
appendNonTransactionalAsLeader(log, 3)
assertEquals(8L, log.logEndOffset)
log.roll()
assertEquals(2, log.logSegments.size)
appendPid(5)
assertEquals(Some(0L), log.firstUnstableOffset)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(5L)
// the first unstable offset should be lower bounded by the log start offset
assertEquals(Some(5L), log.firstUnstableOffset)
}
@Test
def testLastStableOffsetDoesNotExceedLogStartOffsetAfterSegmentDeletion(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val epoch = 0.toShort
val pid = 1L
val appendPid = appendTransactionalAsLeader(log, pid, epoch)
appendPid(5)
appendNonTransactionalAsLeader(log, 3)
assertEquals(8L, log.logEndOffset)
log.roll()
assertEquals(2, log.logSegments.size)
appendPid(5)
assertEquals(Some(0L), log.firstUnstableOffset)
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(8L)
log.updateHighWatermark(log.logEndOffset)
log.deleteOldSegments()
assertEquals(1, log.logSegments.size)
// the first unstable offset should be lower bounded by the log start offset
assertEquals(Some(8L), log.firstUnstableOffset)
}
@Test
def testAppendToTransactionIndexFailure(): Unit = {
val pid = 1L
val epoch = 0.toShort
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
val append = appendTransactionalAsLeader(log, pid, epoch)
append(10)
// Kind of a hack, but renaming the index to a directory ensures that the append
// to the index will fail.
log.activeSegment.txnIndex.renameTo(log.dir)
// The append will be written to the log successfully, but the write to the index will fail
assertThrows[KafkaStorageException] {
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1)
}
assertEquals(11L, log.logEndOffset)
assertEquals(0L, log.lastStableOffset)
// Try the append a second time. The appended offset in the log should still increase.
assertThrows[KafkaStorageException] {
appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, coordinatorEpoch = 1)
}
assertEquals(12L, log.logEndOffset)
assertEquals(0L, log.lastStableOffset)
// Even if the high watermark is updated, the first unstable offset does not move
log.updateHighWatermark(12L)
assertEquals(0L, log.lastStableOffset)
log.close()
val reopenedLog = createLog(logDir, logConfig)
assertEquals(12L, reopenedLog.logEndOffset)
assertEquals(2, reopenedLog.activeSegment.txnIndex.allAbortedTxns.size)
reopenedLog.updateHighWatermark(12L)
assertEquals(None, reopenedLog.firstUnstableOffset)
}
@Test
def testOffsetSnapshot(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
// append a few records
appendAsFollower(log, MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord("a".getBytes),
new SimpleRecord("b".getBytes),
new SimpleRecord("c".getBytes)), 5)
log.updateHighWatermark(2L)
var offsets: LogOffsetSnapshot = log.fetchOffsetSnapshot
assertEquals(offsets.highWatermark.messageOffset, 2L)
assertFalse(offsets.highWatermark.messageOffsetOnly)
offsets = log.fetchOffsetSnapshot
assertEquals(offsets.highWatermark.messageOffset, 2L)
assertFalse(offsets.highWatermark.messageOffsetOnly)
}
@Test
def testLastStableOffsetWithMixedProducerData(): Unit = {
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024 * 5)
val log = createLog(logDir, logConfig)
// for convenience, both producers share the same epoch
val epoch = 5.toShort
val pid1 = 137L
val seq1 = 0
val pid2 = 983L
val seq2 = 0
// add some transactional records
val firstAppendInfo = log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid1, epoch, seq1,
new SimpleRecord("a".getBytes),
new SimpleRecord("b".getBytes),
new SimpleRecord("c".getBytes)), leaderEpoch = 0)
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
// mix in some non-transactional data
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord("g".getBytes),
new SimpleRecord("h".getBytes),
new SimpleRecord("i".getBytes)), leaderEpoch = 0)
// append data from a second transactional producer
val secondAppendInfo = log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid2, epoch, seq2,
new SimpleRecord("d".getBytes),
new SimpleRecord("e".getBytes),
new SimpleRecord("f".getBytes)), leaderEpoch = 0)
// LSO should not have changed
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
// now first producer's transaction is aborted
val abortAppendInfo = appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT)
log.updateHighWatermark(abortAppendInfo.lastOffset + 1)
// LSO should now point to one less than the first offset of the second transaction
assertEquals(secondAppendInfo.firstOffset, log.firstUnstableOffset)
// commit the second transaction
val commitAppendInfo = appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.COMMIT)
log.updateHighWatermark(commitAppendInfo.lastOffset + 1)
// now there should be no first unstable offset
assertEquals(None, log.firstUnstableOffset)
}
@Test
def testAbortedTransactionSpanningMultipleSegments(): Unit = {
val pid = 137L
val epoch = 5.toShort
var seq = 0
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid, epoch, seq,
new SimpleRecord("a".getBytes),
new SimpleRecord("b".getBytes),
new SimpleRecord("c".getBytes))
val logConfig = LogTest.createLogConfig(segmentBytes = records.sizeInBytes)
val log = createLog(logDir, logConfig)
val firstAppendInfo = log.appendAsLeader(records, leaderEpoch = 0)
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
// this write should spill to the second segment
seq = 3
log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, pid, epoch, seq,
new SimpleRecord("d".getBytes),
new SimpleRecord("e".getBytes),
new SimpleRecord("f".getBytes)), leaderEpoch = 0)
assertEquals(firstAppendInfo.firstOffset, log.firstUnstableOffset)
assertEquals(3L, log.logEndOffsetMetadata.segmentBaseOffset)
// now abort the transaction
val abortAppendInfo = appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT)
log.updateHighWatermark(abortAppendInfo.lastOffset + 1)
assertEquals(None, log.firstUnstableOffset)
// now check that a fetch includes the aborted transaction
val fetchDataInfo = log.read(0L,
maxLength = 2048,
isolation = FetchTxnCommitted,
minOneMessage = true)
assertEquals(1, fetchDataInfo.abortedTransactions.size)
assertTrue(fetchDataInfo.abortedTransactions.isDefined)
assertEquals(new AbortedTransaction(pid, 0), fetchDataInfo.abortedTransactions.get.head)
}
@Test
def testLoadPartitionDirWithNoSegmentsShouldNotThrow(): Unit = {
val dirName = Log.logDeleteDirName(new TopicPartition("foo", 3))
val logDir = new File(tmpDir, dirName)
logDir.mkdirs()
val logConfig = LogTest.createLogConfig()
val log = createLog(logDir, logConfig)
assertEquals(1, log.numberOfSegments)
}
@Test
def testMetricsRemovedOnLogDeletion(): Unit = {
TestUtils.clearYammerMetrics()
val logConfig = LogTest.createLogConfig(segmentBytes = 1024 * 1024)
val log = createLog(logDir, logConfig)
val topicPartition = Log.parseTopicPartitionName(logDir)
val metricTag = s"topic=${topicPartition.topic},partition=${topicPartition.partition}"
val logMetrics = metricsKeySet.filter(_.getType == "Log")
assertEquals(LogMetricNames.allMetricNames.size, logMetrics.size)
logMetrics.foreach { metric =>
assertTrue(metric.getMBeanName.contains(metricTag))
}
// Delete the log and validate that corresponding metrics were removed.
log.delete()
val logMetricsAfterDeletion = metricsKeySet.filter(_.getType == "Log")
assertTrue(logMetricsAfterDeletion.isEmpty)
}
private def allAbortedTransactions(log: Log) = log.logSegments.flatMap(_.txnIndex.allAbortedTxns)
private def appendTransactionalAsLeader(log: Log, producerId: Long, producerEpoch: Short): Int => Unit = {
var sequence = 0
numRecords: Int => {
val simpleRecords = (sequence until sequence + numRecords).map { seq =>
new SimpleRecord(mockTime.milliseconds(), s"$seq".getBytes)
}
val records = MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId,
producerEpoch, sequence, simpleRecords: _*)
log.appendAsLeader(records, leaderEpoch = 0)
sequence += numRecords
}
}
private def appendEndTxnMarkerAsLeader(log: Log,
producerId: Long,
producerEpoch: Short,
controlType: ControlRecordType,
coordinatorEpoch: Int = 0,
leaderEpoch: Int = 0,
timestamp: Long = mockTime.milliseconds()): LogAppendInfo = {
val records = endTxnRecords(controlType, producerId, producerEpoch,
coordinatorEpoch = coordinatorEpoch, timestamp = timestamp)
log.appendAsLeader(records, origin = AppendOrigin.Coordinator, leaderEpoch = leaderEpoch)
}
private def appendNonTransactionalAsLeader(log: Log, numRecords: Int): Unit = {
val simpleRecords = (0 until numRecords).map { seq =>
new SimpleRecord(s"$seq".getBytes)
}
val records = MemoryRecords.withRecords(CompressionType.NONE, simpleRecords: _*)
log.appendAsLeader(records, leaderEpoch = 0)
}
private def appendTransactionalToBuffer(buffer: ByteBuffer,
producerId: Long,
producerEpoch: Short,
leaderEpoch: Int = 0): (Long, Int) => Unit = {
var sequence = 0
(offset: Long, numRecords: Int) => {
val builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME,
offset, mockTime.milliseconds(), producerId, producerEpoch, sequence, true, leaderEpoch)
for (seq <- sequence until sequence + numRecords) {
val record = new SimpleRecord(s"$seq".getBytes)
builder.append(record)
}
sequence += numRecords
builder.close()
}
}
private def appendEndTxnMarkerToBuffer(buffer: ByteBuffer,
producerId: Long,
producerEpoch: Short,
offset: Long,
controlType: ControlRecordType,
coordinatorEpoch: Int = 0,
leaderEpoch: Int = 0): Unit = {
val marker = new EndTransactionMarker(controlType, coordinatorEpoch)
MemoryRecords.writeEndTransactionalMarker(buffer, offset, mockTime.milliseconds(), leaderEpoch, producerId, producerEpoch, marker)
}
private def appendNonTransactionalToBuffer(buffer: ByteBuffer, offset: Long, numRecords: Int): Unit = {
val builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, offset)
(0 until numRecords).foreach { seq =>
builder.append(new SimpleRecord(s"$seq".getBytes))
}
builder.close()
}
private def appendAsFollower(log: Log, records: MemoryRecords, leaderEpoch: Int = 0): Unit = {
records.batches.forEach(_.setPartitionLeaderEpoch(leaderEpoch))
log.appendAsFollower(records)
}
private def createCleanShutdownFile(): File = {
val parentLogDir = logDir.getParentFile
assertTrue("Data directory %s must exist", parentLogDir.isDirectory)
val cleanShutdownFile = new File(parentLogDir, Log.CleanShutdownFile)
cleanShutdownFile.createNewFile()
assertTrue(".kafka_cleanshutdown must exist", cleanShutdownFile.exists())
cleanShutdownFile
}
private def deleteProducerSnapshotFiles(): Unit = {
val files = logDir.listFiles.filter(f => f.isFile && f.getName.endsWith(Log.ProducerSnapshotFileSuffix))
files.foreach(Utils.delete)
}
private def listProducerSnapshotOffsets: Seq[Long] =
ProducerStateManager.listSnapshotFiles(logDir).map(Log.offsetFromFile).sorted
private def createLog(dir: File,
config: LogConfig,
brokerTopicStats: BrokerTopicStats = brokerTopicStats,
logStartOffset: Long = 0L,
recoveryPoint: Long = 0L,
scheduler: Scheduler = mockTime.scheduler,
time: Time = mockTime,
maxProducerIdExpirationMs: Int = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs: Int = LogManager.ProducerIdExpirationCheckIntervalMs): Log = {
LogTest.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint,
maxProducerIdExpirationMs, producerIdExpirationCheckIntervalMs)
}
private def createLogWithOffsetOverflow(logConfig: LogConfig): (Log, LogSegment) = {
LogTest.initializeLogDirWithOverflowedSegment(logDir)
val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue)
val segmentWithOverflow = LogTest.firstOverflowSegment(log).getOrElse {
Assertions.fail("Failed to create log with a segment which has overflowed offsets")
}
(log, segmentWithOverflow)
}
private def recoverAndCheck(config: LogConfig,
expectedKeys: Iterable[Long],
expectDeletedFiles: Boolean = true): Log = {
LogTest.recoverAndCheck(logDir, config, expectedKeys, brokerTopicStats, mockTime, mockTime.scheduler,
expectDeletedFiles)
}
private def readLog(log: Log,
startOffset: Long,
maxLength: Int,
isolation: FetchIsolation = FetchLogEnd,
minOneMessage: Boolean = true): FetchDataInfo = {
log.read(startOffset, maxLength, isolation, minOneMessage)
}
}
object LogTest {
def createLogConfig(segmentMs: Long = Defaults.SegmentMs,
segmentBytes: Int = Defaults.SegmentSize,
retentionMs: Long = Defaults.RetentionMs,
retentionBytes: Long = Defaults.RetentionSize,
segmentJitterMs: Long = Defaults.SegmentJitterMs,
cleanupPolicy: String = Defaults.CleanupPolicy,
maxMessageBytes: Int = Defaults.MaxMessageSize,
indexIntervalBytes: Int = Defaults.IndexInterval,
segmentIndexBytes: Int = Defaults.MaxIndexSize,
messageFormatVersion: String = Defaults.MessageFormatVersion,
fileDeleteDelayMs: Long = Defaults.FileDeleteDelayMs): LogConfig = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentMsProp, segmentMs: java.lang.Long)
logProps.put(LogConfig.SegmentBytesProp, segmentBytes: Integer)
logProps.put(LogConfig.RetentionMsProp, retentionMs: java.lang.Long)
logProps.put(LogConfig.RetentionBytesProp, retentionBytes: java.lang.Long)
logProps.put(LogConfig.SegmentJitterMsProp, segmentJitterMs: java.lang.Long)
logProps.put(LogConfig.CleanupPolicyProp, cleanupPolicy)
logProps.put(LogConfig.MaxMessageBytesProp, maxMessageBytes: Integer)
logProps.put(LogConfig.IndexIntervalBytesProp, indexIntervalBytes: Integer)
logProps.put(LogConfig.SegmentIndexBytesProp, segmentIndexBytes: Integer)
logProps.put(LogConfig.MessageFormatVersionProp, messageFormatVersion)
logProps.put(LogConfig.FileDeleteDelayMsProp, fileDeleteDelayMs: java.lang.Long)
LogConfig(logProps)
}
def createLog(dir: File,
config: LogConfig,
brokerTopicStats: BrokerTopicStats,
scheduler: Scheduler,
time: Time,
logStartOffset: Long = 0L,
recoveryPoint: Long = 0L,
maxProducerIdExpirationMs: Int = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs: Int = LogManager.ProducerIdExpirationCheckIntervalMs): Log = {
Log(dir = dir,
config = config,
logStartOffset = logStartOffset,
recoveryPoint = recoveryPoint,
scheduler = scheduler,
brokerTopicStats = brokerTopicStats,
time = time,
maxProducerIdExpirationMs = maxProducerIdExpirationMs,
producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs,
logDirFailureChannel = new LogDirFailureChannel(10))
}
/**
* Check if the given log contains any segment with records that cause offset overflow.
* @param log Log to check
* @return true if log contains at least one segment with offset overflow; false otherwise
*/
def hasOffsetOverflow(log: Log): Boolean = firstOverflowSegment(log).isDefined
def firstOverflowSegment(log: Log): Option[LogSegment] = {
def hasOverflow(baseOffset: Long, batch: RecordBatch): Boolean =
batch.lastOffset > baseOffset + Int.MaxValue || batch.baseOffset < baseOffset
for (segment <- log.logSegments) {
val overflowBatch = segment.log.batches.asScala.find(batch => hasOverflow(segment.baseOffset, batch))
if (overflowBatch.isDefined)
return Some(segment)
}
None
}
private def rawSegment(logDir: File, baseOffset: Long): FileRecords =
FileRecords.open(Log.logFile(logDir, baseOffset))
/**
* Initialize the given log directory with a set of segments, one of which will have an
* offset which overflows the segment
*/
def initializeLogDirWithOverflowedSegment(logDir: File): Unit = {
def writeSampleBatches(baseOffset: Long, segment: FileRecords): Long = {
def record(offset: Long) = {
val data = offset.toString.getBytes
new SimpleRecord(data, data)
}
segment.append(MemoryRecords.withRecords(baseOffset, CompressionType.NONE, 0,
record(baseOffset)))
segment.append(MemoryRecords.withRecords(baseOffset + 1, CompressionType.NONE, 0,
record(baseOffset + 1),
record(baseOffset + 2)))
segment.append(MemoryRecords.withRecords(baseOffset + Int.MaxValue - 1, CompressionType.NONE, 0,
record(baseOffset + Int.MaxValue - 1)))
// Need to create the offset files explicitly to avoid triggering segment recovery to truncate segment.
Log.offsetIndexFile(logDir, baseOffset).createNewFile()
Log.timeIndexFile(logDir, baseOffset).createNewFile()
baseOffset + Int.MaxValue
}
def writeNormalSegment(baseOffset: Long): Long = {
val segment = rawSegment(logDir, baseOffset)
try writeSampleBatches(baseOffset, segment)
finally segment.close()
}
def writeOverflowSegment(baseOffset: Long): Long = {
val segment = rawSegment(logDir, baseOffset)
try {
val nextOffset = writeSampleBatches(baseOffset, segment)
writeSampleBatches(nextOffset, segment)
} finally segment.close()
}
// We create three segments, the second of which contains offsets which overflow
var nextOffset = 0L
nextOffset = writeNormalSegment(nextOffset)
nextOffset = writeOverflowSegment(nextOffset)
writeNormalSegment(nextOffset)
}
def allRecords(log: Log): List[Record] = {
val recordsFound = ListBuffer[Record]()
for (logSegment <- log.logSegments) {
for (batch <- logSegment.log.batches.asScala) {
recordsFound ++= batch.iterator().asScala
}
}
recordsFound.toList
}
def verifyRecordsInLog(log: Log, expectedRecords: List[Record]): Unit = {
assertEquals(expectedRecords, allRecords(log))
}
/* extract all the keys from a log */
def keysInLog(log: Log): Iterable[Long] = {
for (logSegment <- log.logSegments;
batch <- logSegment.log.batches.asScala if !batch.isControlBatch;
record <- batch.asScala if record.hasValue && record.hasKey)
yield TestUtils.readString(record.key).toLong
}
def recoverAndCheck(logDir: File,
config: LogConfig,
expectedKeys: Iterable[Long],
brokerTopicStats: BrokerTopicStats,
time: Time,
scheduler: Scheduler,
expectDeletedFiles: Boolean = false): Log = {
// Recover log file and check that after recovery, keys are as expected
// and all temporary files have been deleted
val recoveredLog = createLog(logDir, config, brokerTopicStats, scheduler, time)
time.sleep(config.fileDeleteDelayMs + 1)
for (file <- logDir.listFiles) {
if (!expectDeletedFiles)
assertFalse("Unexpected .deleted file after recovery", file.getName.endsWith(Log.DeletedFileSuffix))
assertFalse("Unexpected .cleaned file after recovery", file.getName.endsWith(Log.CleanedFileSuffix))
assertFalse("Unexpected .swap file after recovery", file.getName.endsWith(Log.SwapFileSuffix))
}
assertEquals(expectedKeys, LogTest.keysInLog(recoveredLog))
assertFalse(LogTest.hasOffsetOverflow(recoveredLog))
recoveredLog
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/log/LogTest.scala | Scala | apache-2.0 | 197,508 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import java.util
import com.intel.analytics.bigdl.dllib.tensor.Tensor
private[nn] object NNPrimitive {
def im2colDouble(
fInput: Tensor[Double], input: Tensor[Double],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
val nInputPlane = input.size(1)
val inputHeight = input.size(2)
val inputWidth = input.size(3)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var k = 0
while (k < nInputPlane * kH * kW) {
val nip = k / (kH * kW)
val rest = k % (kH * kW)
val kh = rest / kW
val kw = rest % kW
val dstOffset = k * outputHeight * outputWidth + fInput.storageOffset() - 1
val srcOffset = nip * inputWidth * inputHeight + input.storageOffset() - 1
if (padLeft > 0 || padRight > 0 || padTop > 0 || padBottom > 0) {
var y = 0
while (y < outputHeight) {
val iy = y * dH - padTop + kh
if (iy < 0 || iy >= inputHeight) {
util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + (y + 1) * outputWidth, 0)
} else {
if (dW == 1) {
val ix = 0 - padLeft + kw
val lpad = Math.max(0, padLeft - kw)
val rpad = Math.max(0, padRight - (kW - kw - 1))
if (outputWidth - rpad - lpad <= 0) {
util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + (y + 1) * outputWidth, 0)
} else {
if (lpad > 0) util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + y * outputWidth + lpad, 0)
System.arraycopy(inputData, srcOffset + iy * inputWidth + ix + lpad, fInputData,
dstOffset + y * outputWidth + lpad, outputWidth - rpad - lpad)
if (rpad > 0) util.Arrays.fill(fInputData, dstOffset + (y + 1) * outputWidth - rpad,
dstOffset + (y + 1) * outputWidth, 0)
}
} else {
var x = 0
while (x < outputWidth) {
val ix = x * dW - padLeft + kw
if (ix < 0 || ix >= inputWidth) {
fInputData(dstOffset + y * outputWidth + x) = 0
} else {
fInputData(dstOffset + y * outputWidth + x) =
inputData(srcOffset + iy * inputWidth + ix)
}
x += 1
}
}
}
y += 1
}
} else {
var y = 0
while (y < outputHeight) {
val iy = y * dH + kh
val ix = 0 + kw
if (dW == 1) {
System.arraycopy(inputData, srcOffset + iy * inputWidth + ix,
fInputData, dstOffset + y * outputWidth, outputWidth)
} else {
var x = 0
while (x < outputWidth) {
fInputData(dstOffset + y * outputWidth + x) =
inputData(srcOffset + iy * inputWidth + ix + x * dW)
x += 1
}
}
y += 1
}
}
k += 1
}
}
def im2colFloat(
fInput: Tensor[Float], input: Tensor[Float],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
val nInputPlane = input.size(1)
val inputHeight = input.size(2)
val inputWidth = input.size(3)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var k = 0
while (k < nInputPlane * kH * kW) {
val nip = k / (kH * kW)
val rest = k % (kH * kW)
val kh = rest / kW
val kw = rest % kW
val dstOffset = k * outputHeight * outputWidth + fInput.storageOffset() - 1
val srcOffset = nip * inputWidth * inputHeight + input.storageOffset() - 1
if (padLeft > 0 || padRight > 0 || padTop > 0 || padBottom > 0) {
var y = 0
while (y < outputHeight) {
val iy = y * dH - padTop + kh
if (iy < 0 || iy >= inputHeight) {
util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + (y + 1) * outputWidth, 0)
} else {
if (dW == 1) {
val ix = 0 - padLeft + kw
val lpad = Math.max(0, padLeft - kw)
val rpad = Math.max(0, padRight - (kW - kw - 1))
if (outputWidth - rpad - lpad <= 0) {
util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + (y + 1) * outputWidth, 0)
} else {
if (lpad > 0) util.Arrays.fill(fInputData, dstOffset + y * outputWidth,
dstOffset + y * outputWidth + lpad, 0)
System.arraycopy(inputData, srcOffset + iy * inputWidth + ix + lpad, fInputData,
dstOffset + y * outputWidth + lpad, outputWidth - rpad - lpad)
if (rpad > 0) util.Arrays.fill(fInputData, dstOffset + (y + 1) * outputWidth - rpad,
dstOffset + (y + 1) * outputWidth, 0)
}
} else {
var x = 0
while (x < outputWidth) {
val ix = x * dW - padLeft + kw
if (ix < 0 || ix >= inputWidth) {
fInputData(dstOffset + y * outputWidth + x) = 0
} else {
fInputData(dstOffset + y * outputWidth + x) =
inputData(srcOffset + iy * inputWidth + ix)
}
x += 1
}
}
}
y += 1
}
} else {
var y = 0
while (y < outputHeight) {
val iy = y * dH + kh
val ix = 0 + kw
if (dW == 1) {
System.arraycopy(inputData, srcOffset + iy * inputWidth + ix,
fInputData, dstOffset + y * outputWidth, outputWidth)
} else {
var x = 0
while (x < outputWidth) {
fInputData(dstOffset + y * outputWidth + x) =
inputData(srcOffset + iy * inputWidth + ix + x * dW)
x += 1
}
}
y += 1
}
}
k += 1
}
}
def col2imDouble(
fInput: Tensor[Double], input: Tensor[Double],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int
): Unit = {
val nInputPlane = input.size(1)
val inputHeight = input.size(2)
val inputWidth = input.size(3)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var nPlane = 0
while (nPlane < nInputPlane) {
var kh = 0
while (kh < kH) {
var kw = 0
while (kw < kW) {
val srcOffset = nPlane * (kH * kW * outputHeight * outputWidth) +
kh * (kW * outputHeight * outputWidth) +
kw * (outputHeight * outputWidth) + fInput.storageOffset() - 1
val dstOffset = nPlane * (inputHeight * inputWidth) + input.storageOffset() - 1
if (padLeft > 0 || padRight > 0 || padTop > 0 || padBottom > 0) {
var y = 0
while (y < outputHeight) {
val iy = y * dH - padTop + kh
if (iy >= 0 && iy < inputHeight) {
if (dW == 1) {
val ix = 0 - padLeft + kw
val lPad = Math.max(0, padLeft - kw)
val rPad = Math.max(0, padRight - (kW - kw - 1))
val inputDataOffset = dstOffset + iy * inputWidth + ix + lPad
val fInputDataOffset = srcOffset + y * outputWidth + lPad
val n = outputWidth - lPad - rPad
var i = 0
while (i < n) {
inputData(inputDataOffset + i) += fInputData(fInputDataOffset + i)
i += 1
}
} else {
var x = 0
while (x < outputWidth) {
val ix = x * dW - padLeft + kw
if (ix >= 0 && ix < inputWidth) {
inputData(dstOffset + iy * inputWidth + ix) +=
fInputData(srcOffset + y * outputWidth + x)
}
x += 1
}
}
}
y += 1
}
} else {
var y = 0
while (y < outputHeight) {
val iy = y * dH + kh
val ix = 0 + kw
if (dW == 1) {
var i = 0
val inputDataOffset = dstOffset + iy * inputWidth + ix
val fInputDataOffset = srcOffset + y * outputWidth
while (i < outputWidth) {
inputData(inputDataOffset + i) += fInputData(fInputDataOffset + i)
i += 1
}
} else {
var x = 0
while (x < outputWidth) {
inputData(dstOffset + iy * inputWidth + ix + x * dW) +=
fInputData(srcOffset + y * outputWidth + x)
x += 1
}
}
y += 1
}
}
kw += 1
}
kh += 1
}
nPlane += 1
}
}
def col2imFloat(
fInput: Tensor[Float], input: Tensor[Float],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int
): Unit = {
val nInputPlane = input.size(1)
val inputHeight = input.size(2)
val inputWidth = input.size(3)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var nPlane = 0
while (nPlane < nInputPlane) {
var kh = 0
while (kh < kH) {
var kw = 0
while (kw < kW) {
val srcOffset = nPlane * (kH * kW * outputHeight * outputWidth) + kh *
(kW * outputHeight * outputWidth) +
kw * (outputHeight * outputWidth) + fInput.storageOffset() - 1
val dstOffset = nPlane * (inputHeight * inputWidth) + input.storageOffset() - 1
if (padLeft > 0 || padRight > 0 || padTop > 0 || padBottom > 0) {
var y = 0
while (y < outputHeight) {
val iy = y * dH - padTop + kh
if (iy >= 0 && iy < inputHeight) {
if (dW == 1) {
val ix = 0 - padLeft + kw
val lPad = Math.max(0, padLeft - kw)
val rPad = Math.max(0, padRight - (kW - kw - 1))
val inputDataOffset = dstOffset + iy * inputWidth + ix + lPad
val fInputDataOffset = srcOffset + y * outputWidth + lPad
val n = outputWidth - lPad - rPad
var i = 0
while (i < n) {
inputData(inputDataOffset + i) += fInputData(fInputDataOffset + i)
i += 1
}
} else {
var x = 0
while (x < outputWidth) {
val ix = x * dW - padLeft + kw
if (ix >= 0 && ix < inputWidth) {
inputData(dstOffset + iy * inputWidth + ix) +=
fInputData(srcOffset + y * outputWidth + x)
}
x += 1
}
}
}
y += 1
}
} else {
var y = 0
while (y < outputHeight) {
val iy = y * dH + kh
val ix = 0 + kw
if (dW == 1) {
var i = 0
val inputDataOffset = dstOffset + iy * inputWidth + ix
val fInputDataOffset = srcOffset + y * outputWidth
while (i < outputWidth) {
inputData(inputDataOffset + i) += fInputData(fInputDataOffset + i)
i += 1
}
} else {
var x = 0
while (x < outputWidth) {
inputData(dstOffset + iy * inputWidth + ix + x * dW) +=
fInputData(srcOffset + y * outputWidth + x)
x += 1
}
}
y += 1
}
}
kw += 1
}
kh += 1
}
nPlane += 1
}
}
def im2colDoubleNHWC(
fInput: Tensor[Double], input: Tensor[Double],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
// padRight and padBottom are used in the NCHW version but not here,
// add it to keep api consistent
val nInputPlane = input.size(3)
val inputHeight = input.size(1)
val inputWidth = input.size(2)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
val srcOffset = input.storageOffset() - 1
val destOffset = fInput.storageOffset() - 1
var hPad = -padTop
var fInputCount = 0
var h = 0
while (h < outputHeight) {
var wPad = -padLeft
var w = 0
while (w < outputWidth) {
var ih = hPad
while (ih < hPad + kH) {
var iw = wPad
while(iw < wPad + kW) {
if (ih >= 0 && ih < inputHeight && iw >= 0 && iw < inputWidth) {
val src = srcOffset + (ih * inputWidth + iw) * nInputPlane
val dest = destOffset + fInputCount
val n = Math.min(inputWidth, wPad + kW) - iw
System.arraycopy(inputData, src,
fInputData, dest, nInputPlane * n)
fInputCount = fInputCount + nInputPlane * n
iw = iw + n
} else {
val n = if (ih < 0 || ih >= inputHeight || iw >= inputWidth) {
wPad + kW - iw
} else {
0 - iw
}
val fromIndex = destOffset + fInputCount
val toIndex = fromIndex + nInputPlane * n
util.Arrays.fill(fInputData, fromIndex, toIndex, 0.0)
fInputCount = fInputCount + nInputPlane * n
iw = iw + n
}
}
ih = ih + 1
}
w = w + 1
wPad = wPad + dW
}
h = h + 1
hPad = hPad + dH
}
}
def im2colFloatNHWC(
fInput: Tensor[Float], input: Tensor[Float],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
// padRight and padBottom are used in the NCHW version but not here,
// add it to keep api consistent
val nInputPlane = input.size(3)
val inputHeight = input.size(1)
val inputWidth = input.size(2)
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
val srcOffset = input.storageOffset() - 1
val destOffset = fInput.storageOffset() - 1
var hPad = -padTop
var fInputCount = 0
var h = 0
while (h < outputHeight) {
var wPad = -padLeft
var w = 0
while (w < outputWidth) {
var ih = hPad
while (ih < hPad + kH) {
var iw = wPad
while(iw < wPad + kW) {
if (ih >= 0 && ih < inputHeight && iw >= 0 && iw < inputWidth) {
val src = srcOffset + (ih * inputWidth + iw) * nInputPlane
val dest = destOffset + fInputCount
val n = Math.min(inputWidth, wPad + kW) - iw
System.arraycopy(inputData, src,
fInputData, dest, nInputPlane * n)
fInputCount = fInputCount + nInputPlane * n
iw = iw + n
} else {
val n = if (ih < 0 || ih >= inputHeight || iw >= inputWidth) {
wPad + kW - iw
} else {
0 - iw
}
val fromIndex = destOffset + fInputCount
val toIndex = fromIndex + nInputPlane * n
util.Arrays.fill(fInputData, fromIndex, toIndex, 0.0f)
fInputCount = fInputCount + nInputPlane * n
iw = iw + n
}
}
ih = ih + 1
}
w = w + 1
wPad = wPad + dW
}
h = h + 1
hPad = hPad + dH
}
}
def col2imDoubleNHWC(
fInput: Tensor[Double], input: Tensor[Double],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
// padRight and padBottom are used in the NCHW version but not here,
// add it to keep api consistent
val nInputPlane = input.size(3)
val inputHeight = input.size(1)
val inputWidth = input.size(2)
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val fInputData = fInput.storage().array()
val fInputOffset = fInput.storageOffset() - 1
var hPad = -padTop
var h = 0
var fInputCount = 0
while (h < outputHeight) {
var wPad = -padLeft
var w = 0
while (w < outputWidth) {
var ih = hPad
while (ih < hPad + kH) {
var iw = wPad
while (iw < wPad + kW) {
if (ih >= 0 && ih < inputHeight && iw >= 0 && iw < inputWidth) {
val dataImPatch = inputOffset + (ih * inputWidth + iw) * nInputPlane
var i = 0
while(i < nInputPlane) {
inputData(dataImPatch + i) += fInputData(fInputOffset + fInputCount)
fInputCount = fInputCount + 1
i = i + 1
}
} else {
fInputCount = fInputCount + nInputPlane
}
iw = iw + 1
}
ih = ih + 1
}
w = w + 1
wPad = wPad + dW
}
h = h + 1
hPad = hPad + dH
}
}
def col2imFloatNHWC(
fInput: Tensor[Float], input: Tensor[Float],
kW: Int, kH: Int,
dW: Int, dH: Int,
padLeft: Int, padTop: Int, padRight: Int, padBottom: Int,
outputWidth: Int, outputHeight: Int): Unit = {
// padRight and padBottom are used in the NCHW version but not here,
// add it to keep api consistent
val nInputPlane = input.size(3)
val inputHeight = input.size(1)
val inputWidth = input.size(2)
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val fInputData = fInput.storage().array()
val fInputOffset = fInput.storageOffset() - 1
var hPad = -padTop
var h = 0
var fInputCount = 0
while (h < outputHeight) {
var wPad = -padLeft
var w = 0
while (w < outputWidth) {
var ih = hPad
while (ih < hPad + kH) {
var iw = wPad
while (iw < wPad + kW) {
if (ih >= 0 && ih < inputHeight && iw >= 0 && iw < inputWidth) {
val dataImPatch = inputOffset + (ih * inputWidth + iw) * nInputPlane
var i = 0
while(i < nInputPlane) {
inputData(dataImPatch + i) += fInputData(fInputOffset + fInputCount)
fInputCount = fInputCount + 1
i = i + 1
}
} else {
fInputCount = fInputCount + nInputPlane
}
iw = iw + 1
}
ih = ih + 1
}
w = w + 1
wPad = wPad + dW
}
h = h + 1
hPad = hPad + dH
}
}
def maxPoolingForwardDouble(
inputTensor: Tensor[Double],
outputTensor: Tensor[Double],
indicesTensor: Tensor[Double],
oWidth: Int, oHeight: Int,
kW: Int, kH: Int, dW: Int, dH: Int, padW: Int, padH: Int) {
val nSlices = inputTensor.size(1)
val iHeight = inputTensor.size(2)
val iWidth = inputTensor.size(3)
val input = inputTensor.storage().array()
val inputOffset = inputTensor.storageOffset() - 1
val output = outputTensor.storage().array()
val outputOffset = outputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val k = slices.next()
var i = 0
while (i < oHeight) {
var j = 0
while (j < oWidth) {
// k, i, j output indexers
var hstart = i * dH - padH
var wstart = j * dW - padW
val hend = math.min(hstart + kH, iHeight)
val wend = math.min(wstart + kW, iWidth)
hstart = math.max(hstart, 0)
wstart = math.max(wstart, 0)
var maxindex = 0 // default is 0
var maxval = Double.MinValue
var tcntr = 0
var y = hstart
while (y < hend) {
var x = wstart
while (x < wend) {
// k, y, x input indexers
tcntr = y * iWidth + x
val value = input(tcntr + inputOffset + k * iWidth * iHeight)
if (value > maxval) {
maxval = value
maxindex = tcntr
}
x += 1
}
y += 1
}
output(outputOffset + k * oWidth * oHeight + i * oWidth + j) = maxval
indices(indicesOffset + k * oWidth * oHeight + i * oWidth + j) = maxindex + 1
j += 1
}
i += 1
}
}
}
def maxPoolingForwardFloat(
inputTensor: Tensor[Float],
outputTensor: Tensor[Float],
indicesTensor: Tensor[Float],
oWidth: Int, oHeight: Int,
kW: Int, kH: Int, dW: Int, dH: Int, padW: Int, padH: Int) {
val nSlices = inputTensor.size(1)
val iHeight = inputTensor.size(2)
val iWidth = inputTensor.size(3)
val input = inputTensor.storage().array()
val inputOffset = inputTensor.storageOffset() - 1
val output = outputTensor.storage().array()
val outputOffset = outputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val k = slices.next()
var i = 0
while (i < oHeight) {
var j = 0
while (j < oWidth) {
// k, i, j output indexers
var hstart = i * dH - padH
var wstart = j * dW - padW
val hend = math.min(hstart + kH, iHeight)
val wend = math.min(wstart + kW, iWidth)
hstart = math.max(hstart, 0)
wstart = math.max(wstart, 0)
var maxindex = 0 // default is 0
var maxval = Float.MinValue
var tcntr = 0
var y = hstart
while (y < hend) {
var x = wstart
while (x < wend) {
// k, y, x input indexers
tcntr = y * iWidth + x
val value = input(tcntr + inputOffset + k * iWidth * iHeight)
if (value > maxval) {
maxval = value
maxindex = tcntr
}
x += 1
}
y += 1
}
output(outputOffset + k * oWidth * oHeight + i * oWidth + j) = maxval
indices(indicesOffset + k * oWidth * oHeight + i * oWidth + j) = maxindex + 1
j += 1
}
i += 1
}
}
}
def maxPoolingBackwardFloat(
gradInputTensor: Tensor[Float],
gradOutputTensor: Tensor[Float],
indicesTensor: Tensor[Float],
owidth: Int, oheight: Int): Unit = {
val nSlices = gradInputTensor.size(1)
val iHeight = gradInputTensor.size(2)
val iWidth = gradInputTensor.size(3)
val gradInput = gradInputTensor.storage().array()
val gradInputOffset = gradInputTensor.storageOffset() - 1
val gradOutput = gradOutputTensor.storage().array()
val gradOutputOffset = gradOutputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val k = slices.next()
var i = 0
while (i < oheight) {
var j = 0
while (j < owidth) {
val maxp = indices(i * owidth + j + indicesOffset + k * owidth * oheight).toInt - 1
gradInput(maxp + k * iWidth * iHeight + gradInputOffset) +=
gradOutput(gradOutputOffset + k * owidth * oheight + i * owidth + j)
j += 1
}
i += 1
}
}
}
def maxPoolingBackwardDouble(
gradInputTensor: Tensor[Double],
gradOutputTensor: Tensor[Double],
indicesTensor: Tensor[Double],
owidth: Int, oheight: Int): Unit = {
val nSlices = gradInputTensor.size(1)
val iHeight = gradInputTensor.size(2)
val iWidth = gradInputTensor.size(3)
val gradInput = gradInputTensor.storage().array()
val gradInputOffset = gradInputTensor.storageOffset() - 1
val gradOutput = gradOutputTensor.storage().array()
val gradOutputOffset = gradOutputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val k = slices.next()
var i = 0
while (i < oheight) {
var j = 0
while (j < owidth) {
val maxp = indices(i * owidth + j + indicesOffset + k * owidth * oheight).toInt - 1
gradInput(maxp + k * iWidth * iHeight + gradInputOffset) += gradOutput(gradOutputOffset
+ k * owidth * oheight + i * owidth + j)
j += 1
}
i += 1
}
}
}
def maxPoolingForwardDoubleNHWC(
inputTensor: Tensor[Double], outputTensor: Tensor[Double], indicesTensor: Tensor[Double],
oWidth: Int, oHeight: Int,
kW: Int, kH: Int, dW: Int, dH: Int, padW: Int, padH: Int) {
val nSlices = inputTensor.size(3)
val iHeight = inputTensor.size(1)
val iWidth = inputTensor.size(2)
val input = inputTensor.storage().array()
val inputOffset = inputTensor.storageOffset() - 1
val output = outputTensor.storage().array()
val outputOffset = outputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
var i = 0
while (i < oHeight) {
var j = 0
var hstart = i * dH - padH
val hend = math.min(hstart + kH, iHeight)
hstart = math.max(hstart, 0)
while (j < oWidth) {
var wstart = j * dW - padW
val wend = math.min(wstart + kW, iWidth)
wstart = math.max(wstart, 0)
val currOutLocStart = outputOffset + (i * oWidth + j) * nSlices
val currOutLocEnd = currOutLocStart + nSlices
val currIndicesLocStart = indicesOffset + (i * oWidth + j) * nSlices
val currIndicesLocEnd = currIndicesLocStart + nSlices
util.Arrays.fill(output, currOutLocStart, currOutLocEnd, Double.MinValue)
util.Arrays.fill(indices, currIndicesLocStart, currIndicesLocEnd, 0)
var y = hstart
while (y < hend) {
var x = wstart
while (x < wend) {
// k, y, x input indexers
val tcntr = y *iWidth + x
val currInLocStart = inputOffset + tcntr * nSlices
var n = 0
while (n < nSlices) {
val value = input(currInLocStart + n)
if (value > output(currOutLocStart + n)) {
output(currOutLocStart + n) = value
indices(currOutLocStart + n) = tcntr + 1
}
n = n + 1
}
x += 1
}
y += 1
}
j += 1
}
i += 1
}
}
def maxPoolingForwardFloatNHWC(
inputTensor: Tensor[Float], outputTensor: Tensor[Float], indicesTensor: Tensor[Float],
oWidth: Int, oHeight: Int,
kW: Int, kH: Int, dW: Int, dH: Int, padW: Int, padH: Int) {
val nSlices = inputTensor.size(3)
val iHeight = inputTensor.size(1)
val iWidth = inputTensor.size(2)
val input = inputTensor.storage().array()
val inputOffset = inputTensor.storageOffset() - 1
val output = outputTensor.storage().array()
val outputOffset = outputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
var i = 0
while (i < oHeight) {
var j = 0
var hstart = i * dH - padH
val hend = math.min(hstart + kH, iHeight)
hstart = math.max(hstart, 0)
while (j < oWidth) {
var wstart = j * dW - padW
val wend = math.min(wstart + kW, iWidth)
wstart = math.max(wstart, 0)
val currOutLocStart = outputOffset + (i * oWidth + j) * nSlices
val currOutLocEnd = currOutLocStart + nSlices
val currIndicesLocStart = indicesOffset + (i * oWidth + j) * nSlices
val currIndicesLocEnd = currIndicesLocStart + nSlices
util.Arrays.fill(output, currOutLocStart, currOutLocEnd, Float.MinValue)
util.Arrays.fill(indices, currIndicesLocStart, currIndicesLocEnd, 0)
var y = hstart
while (y < hend) {
var x = wstart
while (x < wend) {
// k, y, x input indexers
val tcntr = y *iWidth + x
val currInLocStart = inputOffset + tcntr * nSlices
var n = 0
while (n < nSlices) {
val value = input(currInLocStart + n)
if (value > output(currOutLocStart + n)) {
output(currOutLocStart + n) = value
indices(currOutLocStart + n) = tcntr + 1
}
n = n + 1
}
x += 1
}
y += 1
}
j += 1
}
i += 1
}
}
def maxPoolingBackwardDoubleNHWC(
gradInputTensor: Tensor[Double],
gradOutputTensor: Tensor[Double],
indicesTensor: Tensor[Double],
oWidth: Int, oHeight: Int): Unit = {
val nSlices = gradInputTensor.size(3)
val iHeight = gradInputTensor.size(1)
val iWidth = gradInputTensor.size(2)
val gradInput = gradInputTensor.storage().array()
val gradInputOffset = gradInputTensor.storageOffset() - 1
val gradOutput = gradOutputTensor.storage().array()
val gradOutputOffset = gradOutputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
var i = 0
while (i < oHeight) {
var j = 0
while (j < oWidth) {
val currOutLocStart = gradOutputOffset + (i * oWidth + j) * nSlices
val currIndicesLocStart = indicesOffset + (i * oWidth + j) * nSlices
var n = 0
while (n < nSlices) {
val maxIndex = indices(currIndicesLocStart + n).toInt - 1
val grad = gradOutput(currOutLocStart + n)
gradInput(gradInputOffset + maxIndex * nSlices + n) += grad
n = n + 1
}
j += 1
}
i += 1
}
}
def maxPoolingBackwardFloatNHWC(
gradInputTensor: Tensor[Float],
gradOutputTensor: Tensor[Float],
indicesTensor: Tensor[Float],
oWidth: Int, oHeight: Int): Unit = {
val nSlices = gradInputTensor.size(3)
val iHeight = gradInputTensor.size(1)
val iWidth = gradInputTensor.size(2)
val gradInput = gradInputTensor.storage().array()
val gradInputOffset = gradInputTensor.storageOffset() - 1
val gradOutput = gradOutputTensor.storage().array()
val gradOutputOffset = gradOutputTensor.storageOffset() - 1
val indices = indicesTensor.storage().array()
val indicesOffset = indicesTensor.storageOffset() - 1
var i = 0
while (i < oHeight) {
var j = 0
while (j < oWidth) {
val currOutLocStart = gradOutputOffset + (i * oWidth + j) * nSlices
val currIndicesLocStart = indicesOffset + (i * oWidth + j) * nSlices
var n = 0
while (n < nSlices) {
val maxIndex = indices(currIndicesLocStart + n).toInt - 1
val grad = gradOutput(currOutLocStart + n)
gradInput(gradInputOffset + maxIndex * nSlices + n) += grad
n = n + 1
}
j += 1
}
i += 1
}
}
def temporalMaxPoolingBackwardDouble(
gradInput: Array[Double], gradInputOffset: Int,
gradOutput: Array[Double], gradOutputOffset: Int,
indices: Array[Double], indicesOffset: Int,
nSlices: Int, frameSize: Int,
kW: Int, dW: Int): Unit = {
for (t <- Range(0, nSlices)) {
val gip = gradInputOffset + t * frameSize * dW
val gop = gradOutputOffset + t * frameSize
val xp = indicesOffset + t * frameSize
var y = 0
while (y < frameSize) {
val maxIndex = indices(xp + y).toInt - 1
if (maxIndex != -1) {
gradInput(gip + maxIndex * frameSize + y) +=
gradOutput(gop + y)
}
y += 1
}
}
}
def temporalMaxPoolingBackwardFloat(
gradInput: Array[Float], gradInputOffset: Int,
gradOutput: Array[Float], gradOutputOffset: Int,
indices: Array[Float], indicesOffset: Int,
nSlices: Int, frameSize: Int,
kW: Int, dW: Int): Unit = {
for (t <- Range(0, nSlices)) {
val gip = gradInputOffset + t * frameSize * dW
val gop = gradOutputOffset + t * frameSize
val xp = indicesOffset + t * frameSize
var y = 0
while (y < frameSize) {
val maxIndex = indices(xp + y).toInt - 1
if (maxIndex != -1) {
gradInput(gip + maxIndex * frameSize + y) +=
gradOutput(gop + y)
}
y += 1
}
}
}
def temporalMaxPoolingForwardDouble(
input: Array[Double], inputOffset: Int,
output: Array[Double], outputOffset: Int,
indices: Array[Double], indicesOffset: Int,
nSlices: Int, frameSize: Int,
kW: Int, dW: Int): Unit = {
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val t = slices.next()
val ip = inputOffset + t * frameSize * dW
val op = outputOffset + t * frameSize
val xp = indicesOffset + t * frameSize
var y = 0
while (y < frameSize) {
var maxindex = 0 // default is 0
var maxval = Double.MinValue
var x = 0
while (x < kW) {
val value = input(ip + x * frameSize + y)
if (value > maxval) {
maxval = value
maxindex = x
}
x += 1
}
output(op + y) = maxval
indices(xp + y) = maxindex + 1
y += 1
}
}
}
def temporalMaxPoolingForwardFloat(
input: Array[Float], inputOffset: Int,
output: Array[Float], outputOffset: Int,
indices: Array[Float], indicesOffset: Int,
nSlices: Int, frameSize: Int,
kW: Int, dW: Int): Unit = {
val slices = Range(0, nSlices).iterator
while (slices.hasNext) {
val t = slices.next()
val ip = inputOffset + t * frameSize * dW
val op = outputOffset + t * frameSize
val xp = indicesOffset + t * frameSize
var y = 0
while (y < frameSize) {
var maxindex = 0 // default is 0
var maxval = Float.MinValue
var x = 0
while (x < kW) {
val value = input(ip + x * frameSize + y)
if (value > maxval) {
maxval = value
maxindex = x
}
x += 1
}
output(op + y) = maxval
indices(xp + y) = maxindex + 1
y += 1
}
}
}
// For SpatialFullConvolution
def col2imWithDilationDouble(columns : Tensor[Double], image : Tensor[Double],
channels : Int, height : Int, width : Int,
kernelH : Int, kernelW : Int,
padH : Int, padW : Int,
strideH : Int, strideW : Int,
dilationH : Int, dilationW : Int) {
val dataIm = image.storage().array()
val dataImOffset = image.storageOffset() - 1
val dataCol = columns.storage().array()
val dataColOffset = columns.storageOffset() - 1
val heightCol = (height + 2 * padH -
(dilationH * (kernelH - 1) + 1)) / strideH + 1
val widthCol = (width + 2 * padW -
(dilationW * (kernelW - 1) + 1)) / strideW + 1
val channelsCol = channels * kernelH * kernelW
var cCol = 0
while (cCol < channelsCol) {
val wOffset = cCol % kernelW
val hOffset = (cCol / kernelW) % kernelH
val cIm = cCol / kernelH / kernelW
var hCol = 0
while (hCol < heightCol) {
var wCol = 0
while (wCol < widthCol) {
val hIm = hCol * strideH - padH + hOffset * dilationH
val wIm = wCol * strideW - padW + wOffset * dilationW
if (hIm >= 0 && hIm < height && wIm >= 0 && wIm < width) {
dataIm((cIm * height + hIm) * width + wIm + dataImOffset) +=
dataCol((cCol * heightCol + hCol) * widthCol + wCol + dataColOffset)
}
wCol += 1
}
hCol += 1
}
cCol += 1
}
}
def col2imWithDilationFloat(columns : Tensor[Float], image : Tensor[Float],
channels : Int, height : Int, width : Int,
kernelH : Int, kernelW : Int,
padH : Int, padW : Int,
strideH : Int, strideW : Int,
dilationH : Int, dilationW : Int) {
val dataIm = image.storage().array()
val dataImOffset = image.storageOffset() - 1
val dataCol = columns.storage().array()
val dataColOffset = columns.storageOffset() - 1
val heightCol = (height + 2 * padH -
(dilationH * (kernelH - 1) + 1)) / strideH + 1
val widthCol = (width + 2 * padW -
(dilationW * (kernelW - 1) + 1)) / strideW + 1
val channelsCol = channels * kernelH * kernelW
var cCol = 0
while (cCol < channelsCol) {
val wOffset = cCol % kernelW
val hOffset = (cCol / kernelW) % kernelH
val cIm = cCol / kernelH / kernelW
var hCol = 0
while (hCol < heightCol) {
var wCol = 0
while (wCol < widthCol) {
val hIm = hCol * strideH - padH + hOffset * dilationH
val wIm = wCol * strideW - padW + wOffset * dilationW
if (hIm >= 0 && hIm < height && wIm >= 0 && wIm < width) {
dataIm((cIm * height + hIm) * width + wIm + dataImOffset) +=
dataCol((cCol * heightCol + hCol) * widthCol + wCol + dataColOffset)
}
wCol += 1
}
hCol += 1
}
cCol += 1
}
}
def im2colWithDilationDouble(image: Tensor[Double], columns: Tensor[Double],
channels : Int, height : Int, width : Int,
kernelH : Int, kernelW : Int,
padH : Int, padW : Int,
strideH : Int, strideW : Int,
dilationH : Int, dilationW : Int): Unit = {
val dataIm = image.storage().array()
val dataImOffset = image.storageOffset() - 1
val dataCol = columns.storage().array()
val dataColOffset = columns.storageOffset() - 1
val heightCol = (height + 2 * padH -
(dilationH * (kernelH - 1) + 1)) / strideH + 1
val widthCol = (width + 2 * padW -
(dilationW * (kernelW - 1) + 1)) / strideW + 1
val channelsCol = channels * kernelH * kernelW
var cCol = 0
while (cCol < channelsCol) {
val wOffset = cCol % kernelW
val hOffset = (cCol / kernelW) % kernelH
val cIm = cCol / kernelH / kernelW
var hCol = 0
while (hCol < heightCol) {
var wCol = 0
while (wCol < widthCol) {
val hIm = hCol * strideH - padH + hOffset * dilationH
val wIm = wCol * strideW - padW + wOffset * dilationW
dataCol((cCol * heightCol + hCol) * widthCol + wCol + dataColOffset) =
if (hIm >= 0 && wIm >= 0 && hIm < height && wIm < width) {
dataIm((cIm * height + hIm) * width + wIm + dataImOffset)
}
else {
0
}
wCol += 1
}
hCol += 1
}
cCol += 1
}
}
def im2colWithDilationFloat(image: Tensor[Float], columns: Tensor[Float],
channels : Int, height : Int, width : Int,
kernelH : Int, kernelW : Int,
padH : Int, padW : Int,
strideH : Int, strideW : Int,
dilationH : Int, dilationW : Int): Unit = {
val dataIm = image.storage().array()
val dataImOffset = image.storageOffset() - 1
val dataCol = columns.storage().array()
val dataColOffset = columns.storageOffset() - 1
val heightCol = (height + 2 * padH -
(dilationH * (kernelH - 1) + 1)) / strideH + 1
val widthCol = (width + 2 * padW -
(dilationW * (kernelW - 1) + 1)) / strideW + 1
val channelsCol = channels * kernelH * kernelW
var cCol = 0
while (cCol < channelsCol) {
val wOffset = cCol % kernelW
val hOffset = (cCol / kernelW) % kernelH
val cIm = cCol / kernelH / kernelW
var hCol = 0
while (hCol < heightCol) {
var wCol = 0
while (wCol < widthCol) {
val hIm = hCol * strideH - padH + hOffset * dilationH
val wIm = wCol * strideW - padW + wOffset * dilationW
dataCol((cCol * heightCol + hCol) * widthCol + wCol + dataColOffset) =
if (hIm >= 0 && wIm >= 0 && hIm < height && wIm < width) {
dataIm((cIm * height + hIm) * width + wIm + dataImOffset)
}
else {
0
}
wCol += 1
}
hCol += 1
}
cCol += 1
}
}
def unfoldedCopyVolDouble(fInput: Tensor[Double], input: Tensor[Double],
kT: Int, kW: Int, kH: Int,
dT: Int, dW: Int, dH: Int,
padFront: Int, padLeft: Int, padTop: Int,
padBack: Int, padRight: Int, padBottom: Int,
nInputPlane: Int,
inputDepth: Int, inputWidth: Int, inputHeight: Int, outputDepth: Int,
outputWidth: Int, outputHeight: Int): Unit = {
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var k = 0
while (k < nInputPlane * kT * kH * kW) {
val nip = k / (kT * kH * kW)
var rest = k % (kT * kH * kW)
val kt = rest / (kH * kW)
rest = rest % (kH * kW)
val kh = rest / kW
val kw = rest % kW
var t, x, y, it, ix, iy = 0
val dstOffset = nip * (kT * kH * kW * outputDepth * outputHeight * outputWidth) +
kt * (kH * kW * outputDepth * outputHeight * outputWidth) +
kh * (kW * outputDepth * outputHeight * outputWidth) +
kw * (outputDepth * outputHeight * outputWidth) + fInput.storageOffset() - 1
val srcOffset = nip * (inputDepth * inputHeight * inputWidth) + input.storageOffset() - 1
if (padFront > 0 || padBack > 0 || padLeft > 0 || padRight > 0 ||
padBottom > 0 || padTop > 0) {
t = 0
while (t < outputDepth) {
it = t * dT - padFront + kt
var y = 0
while (y < outputHeight) {
iy = y * dH - padTop + kh
x = 0
while (x < outputWidth) {
ix = x * dW - padLeft + kw
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight ||
ix < 0 || ix >= inputWidth) {
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x) = 0
} else {
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x)
= inputData(srcOffset + it * inputHeight * inputWidth + iy * inputWidth + ix)
}
x += 1
}
y += 1
}
t += 1
}
} else {
t = 0
while (t < outputDepth) {
it = t * dT + kt
y = 0
while (y < outputHeight) {
iy = y * dH + kh
x = 0
while (x < outputWidth) {
ix = x * dW + kw
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x)
= inputData(srcOffset + it * inputHeight * inputWidth + iy * inputWidth + ix)
x += 1
}
y += 1
}
t += 1
}
}
k += 1
}
}
def unfoldedCopyVolFloat(fInput: Tensor[Float], input: Tensor[Float],
kT: Int, kW: Int, kH: Int,
dT: Int, dW: Int, dH: Int,
padFront: Int, padLeft: Int, padTop: Int,
padBack: Int, padRight: Int, padBottom: Int,
nInputPlane: Int,
inputDepth: Int, inputWidth: Int, inputHeight: Int, outputDepth: Int,
outputWidth: Int, outputHeight: Int): Unit = {
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
var k = 0
while (k < nInputPlane * kT * kH * kW) {
val nip = k / (kT * kH * kW)
var rest = k % (kT * kH * kW)
val kt = rest / (kH * kW)
rest = rest % (kH * kW)
val kh = rest / kW
val kw = rest % kW
var t, x, y, it, ix, iy = 0
val dstOffset = nip * (kT * kH * kW * outputDepth * outputHeight * outputWidth) +
kt * (kH * kW * outputDepth * outputHeight * outputWidth) +
kh * (kW * outputDepth * outputHeight * outputWidth) +
kw * (outputDepth * outputHeight * outputWidth) + fInput.storageOffset() - 1
val srcOffset = nip * (inputDepth * inputHeight * inputWidth) + input.storageOffset() - 1
if (padFront > 0 || padLeft > 0 || padTop > 0 || padBack > 0
|| padRight > 0 || padBottom > 0) {
t = 0
while (t < outputDepth) {
it = t * dT - padFront + kt
var y = 0
while (y < outputHeight) {
iy = y * dH - padTop + kh
x = 0
while (x < outputWidth) {
ix = x * dW - padLeft + kw
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight ||
ix < 0 || ix >= inputWidth) {
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x) = 0f
} else {
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x)
= inputData(srcOffset + it * inputHeight * inputWidth + iy * inputWidth + ix)
}
x += 1
}
y += 1
}
t += 1
}
} else {
t = 0
while (t < outputDepth) {
it = t * dT + kt
y = 0
while (y < outputHeight) {
iy = y * dH + kh
x = 0
while (x < outputWidth) {
ix = x * dW + kw
fInputData(dstOffset + t * outputHeight * outputWidth + y * outputWidth + x)
= inputData(srcOffset + it * inputHeight * inputWidth + iy * inputWidth + ix)
x += 1
}
y += 1
}
t += 1
}
}
k += 1
}
}
def unfoldedAccVolDouble(fInput: Tensor[Double], input: Tensor[Double], kT: Int, kW: Int, kH: Int,
dT: Int, dW: Int, dH: Int,
padFront: Int, padLeft: Int, padTop: Int,
padBack: Int, padRight: Int, padBottom: Int,
nInputPlane: Int, inputDepth: Int,
inputWidth: Int, inputHeight: Int,
outputDepth: Int, outputWidth: Int, outputHeight: Int): Unit = {
var nip, kt, kw, kh, t, y, x, it, ix, iy = 0
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
nip = 0
while (nip < nInputPlane) {
kt = 0
while (kt < kT) {
kh = 0
while (kh < kH) {
kw = 0
while (kw < kW) {
val srcOffset = nip * (kT * kH * kW * outputDepth * outputHeight * outputWidth) +
kt * (kH * kW * outputDepth * outputHeight * outputWidth) +
kh * (kW * outputDepth * outputHeight * outputWidth) +
kw * (outputDepth * outputHeight * outputWidth) + fInput.storageOffset() - 1
val dstOffset = nip * (inputDepth * inputHeight * inputWidth) +
input.storageOffset() - 1
if (padFront > 0 || padLeft > 0 || padTop > 0 || padBack > 0
|| padRight > 0 || padBottom > 0) {
t = 0
while (t < outputDepth) {
it = t * dT - padFront + kt
y = 0
while (y < outputHeight) {
iy = y * dH - padTop + kh
x = 0
while (x < outputWidth) {
ix = x * dW - padLeft + kw
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight ||
ix < 0 || ix >= inputWidth) {
}
else {
inputData(dstOffset + it * inputHeight * inputWidth + iy * inputWidth + ix) +=
fInputData(srcOffset + t * outputHeight * outputWidth +
y * outputWidth + x)
}
x += 1
}
y += 1
}
t += 1
}
}
else {
t = 0
while (t < outputDepth) {
it = t * dT + kt
y = 0
while (y < outputHeight) {
iy = y * dH + kh
x = 0
while (x < outputWidth) {
ix = x * dW + kw
inputData(dstOffset + it * inputHeight * inputWidth + iy * inputWidth + ix) +=
fInputData(srcOffset + t * outputHeight * outputWidth + y * outputWidth + x)
x += 1
}
y += 1
}
t += 1
}
}
kw += 1
}
kh += 1
}
kt += 1
}
nip += 1
}
}
def unfoldedAccVolFloat(fInput: Tensor[Float], input: Tensor[Float], kT: Int, kW: Int, kH: Int,
dT: Int, dW: Int, dH: Int,
padFront: Int, padLeft: Int, padTop: Int,
padBack: Int, padRight: Int, padBottom: Int,
nInputPlane: Int, inputDepth: Int,
inputWidth: Int, inputHeight: Int,
outputDepth: Int, outputWidth: Int, outputHeight: Int): Unit = {
var nip, kt, kw, kh, t, y, x, it, ix, iy = 0
val inputData = input.storage().array()
val fInputData = fInput.storage().array()
nip = 0
while (nip < nInputPlane) {
kt = 0
while (kt < kT) {
kh = 0
while (kh < kH) {
kw = 0
while (kw < kW) {
val srcOffset = nip * (kT * kH * kW * outputDepth * outputHeight * outputWidth) +
kt * (kH * kW * outputDepth * outputHeight * outputWidth) +
kh * (kW * outputDepth * outputHeight * outputWidth) +
kw * (outputDepth * outputHeight * outputWidth) + fInput.storageOffset() - 1
val dstOffset = nip * (inputDepth * inputHeight * inputWidth) +
input.storageOffset() - 1
if (padFront > 0 || padLeft > 0 || padTop > 0 || padBack > 0
|| padRight > 0 || padBottom > 0) {
t = 0
while (t < outputDepth) {
it = t * dT - padFront + kt
y = 0
while (y < outputHeight) {
iy = y * dH - padTop + kh
x = 0
while (x < outputWidth) {
ix = x * dW - padLeft + kw
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight ||
ix < 0 || ix >= inputWidth) {
}
else {
inputData(dstOffset + it * inputHeight * inputWidth + iy * inputWidth + ix) +=
fInputData(srcOffset + t * outputHeight * outputWidth +
y * outputWidth + x)
}
x += 1
}
y += 1
}
t += 1
}
}
else {
t = 0
while (t < outputDepth) {
it = t * dT + kt
y = 0
while (y < outputHeight) {
iy = y * dH + kh
x = 0
while (x < outputWidth) {
ix = x * dW + kw
inputData(dstOffset + it * inputHeight * inputWidth + iy * inputWidth + ix) +=
fInputData(srcOffset + t * outputHeight * outputWidth + y * outputWidth + x)
x += 1
}
y += 1
}
t += 1
}
}
kw += 1
}
kh += 1
}
kt += 1
}
nip += 1
}
}
def vol2colDouble(
vol: Tensor[Double],
channels: Int,
depth: Int, height: Int, width: Int,
kT: Int, kH: Int, kW: Int,
pT: Int, pH: Int, pW: Int,
dT: Int, dH: Int, dW: Int,
dilationT: Int, dilationH: Int, dilationW: Int,
col: Tensor[Double]
): Unit = {
val colData = col.storage().array()
val colDataOffset = col.storageOffset() - 1
val volData = vol.storage().array()
val volDataOffset = vol.storageOffset() - 1
val depthCol = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1
val widthCol = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1
val heightCol = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1
val channelsCol = channels * kT * kW * kH
var c = 0
while (c < channelsCol) {
val wOffset = c % kW
val hOffset = (c / kW) % kH
val tOffset = (c / kW / kH) % kT
val cVol = c / kT / kH / kW
var t = 0
while (t < depthCol) {
var h = 0
while (h < heightCol) {
var w = 0
while (w < widthCol) {
val tPad = t * dT - pT + tOffset * dilationT
val hPad = h * dH - pH + hOffset * dilationH
val wPad = w * dW - pW + wOffset * dilationW
if (tPad >= 0 && tPad < depth &&
hPad >= 0 && hPad < height &&
wPad >= 0 && wPad < width) {
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset) =
volData(((cVol * depth + tPad) * height + hPad) * width + wPad + volDataOffset)
} else {
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset) = 0.0
}
w += 1
}
h += 1
}
t += 1
}
c += 1
}
}
def vol2colFloat(
vol: Tensor[Float],
channels: Int,
depth: Int, height: Int, width: Int,
kT: Int, kH: Int, kW: Int,
pT: Int, pH: Int, pW: Int,
dT: Int, dH: Int, dW: Int,
dilationT: Int, dilationH: Int, dilationW: Int,
col: Tensor[Float]
): Unit = {
val colData = col.storage().array()
val colDataOffset = col.storageOffset() - 1
val volData = vol.storage().array()
val volDataOffset = vol.storageOffset() - 1
val depthCol = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1
val widthCol = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1
val heightCol = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1
val channelsCol = channels * kT * kW * kH
var c = 0
while (c < channelsCol) {
val wOffset = c % kW
val hOffset = (c / kW) % kH
val tOffset = (c / kW / kH) % kT
val cVol = c / kT / kH / kW
var t = 0
while (t < depthCol) {
var h = 0
while (h < heightCol) {
var w = 0
while (w < widthCol) {
val tPad = t * dT - pT + tOffset * dilationT
val hPad = h * dH - pH + hOffset * dilationH
val wPad = w * dW - pW + wOffset * dilationW
if (tPad >= 0 && tPad < depth &&
hPad >= 0 && hPad < height &&
wPad >= 0 && wPad < width) {
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset) =
volData(((cVol * depth + tPad) * height + hPad) * width + wPad + volDataOffset)
} else {
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset) = 0f
}
w += 1
}
h += 1
}
t += 1
}
c += 1
}
}
def col2volDouble(
col: Tensor[Double],
channels: Int,
depth: Int, height: Int, width: Int,
kT: Int, kH: Int, kW: Int,
pT: Int, pH: Int, pW: Int,
dT: Int, dH: Int, dW: Int,
dilationT: Int, dilationH: Int, dilationW: Int,
vol: Tensor[Double]
): Unit = {
val colData = col.storage().array()
val colDataOffset = col.storageOffset() - 1
val volData = vol.storage().array()
val volDataOffset = vol.storageOffset() - 1
val depthCol = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1
val heightCol = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1
val widthCol = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1
val channelsCol = channels * kT * kW * kH
var c = 0
while (c < channelsCol) {
val wOffset = c % kW
val hOffset = (c / kW) % kH
val tOffset = (c / kW / kH) % kT
val cVol = c / kT / kH / kW
var t = 0
while (t < depthCol) {
var h = 0
while (h < heightCol) {
var w = 0
while (w < widthCol) {
val tPad = t * dT - pT + tOffset * dilationT
val hPad = h * dH - pH + hOffset * dilationH
val wPad = w * dW - pW + wOffset * dilationW
if (tPad >= 0 && tPad < depth &&
hPad >= 0 && hPad < height &&
wPad >= 0 && wPad < width) {
volData(((cVol * depth + tPad) * height + hPad) * width + wPad + volDataOffset) +=
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset)
}
w += 1
}
h += 1
}
t += 1
}
c += 1
}
}
def col2volFloat(
col: Tensor[Float],
channels: Int,
depth: Int, width: Int, height: Int,
kT: Int, kW: Int, kH: Int,
pT: Int, pW: Int, pH: Int,
dT: Int, dW: Int, dH: Int,
dilationT: Int, dilationW: Int, dilationH: Int,
vol: Tensor[Float]
): Unit = {
val colData = col.storage().array()
val colDataOffset = col.storageOffset() - 1
val volData = vol.storage().array()
val volDataOffset = vol.storageOffset() - 1
val depthCol = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1
val heightCol = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1
val widthCol = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1
val channelsCol = channels * kT * kW * kH
var c = 0
while (c < channelsCol) {
val wOffset = c % kW
val hOffset = (c / kW) % kH
val tOffset = (c / kW / kH) % kT
val cVol = c / kT / kH / kW
var t = 0
while (t < depthCol) {
var h = 0
while (h < heightCol) {
var w = 0
while (w < widthCol) {
val tPad = t * dT - pT + tOffset * dilationT
val hPad = h * dH - pH + hOffset * dilationH
val wPad = w * dW - pW + wOffset * dilationW
if (tPad >= 0 && tPad < depth &&
hPad >= 0 && hPad < height &&
wPad >= 0 && wPad < width) {
volData(((cVol * depth + tPad) * height + hPad) * width + wPad + volDataOffset) +=
colData(((c * depthCol + t) * heightCol + h) * widthCol + w + colDataOffset)
}
w += 1
}
h += 1
}
t += 1
}
c += 1
}
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/NNPrimitive.scala | Scala | apache-2.0 | 60,070 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.util
import scala.collection.mutable.ArrayBuffer
import org.apache.spark._
import org.apache.spark.sql.{functions, QueryTest}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Project}
import org.apache.spark.sql.execution.{QueryExecution, WholeStageCodegenExec}
import org.apache.spark.sql.test.SharedSQLContext
class DataFrameCallbackSuite extends QueryTest with SharedSQLContext {
import testImplicits._
import functions._
test("execute callback functions when a DataFrame action finished successfully") {
val metrics = ArrayBuffer.empty[(String, QueryExecution, Long)]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
metrics += ((funcName, qe, duration))
}
}
spark.listenerManager.register(listener)
val df = Seq(1 -> "a").toDF("i", "j")
df.select("i").collect()
df.filter($"i" > 0).count()
assert(metrics.length == 2)
assert(metrics(0)._1 == "collect")
assert(metrics(0)._2.analyzed.isInstanceOf[Project])
assert(metrics(0)._3 > 0)
assert(metrics(1)._1 == "count")
assert(metrics(1)._2.analyzed.isInstanceOf[Aggregate])
assert(metrics(1)._3 > 0)
spark.listenerManager.unregister(listener)
}
testQuietly("execute callback functions when a DataFrame action failed") {
val metrics = ArrayBuffer.empty[(String, QueryExecution, Exception)]
val listener = new QueryExecutionListener {
override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {
metrics += ((funcName, qe, exception))
}
// Only test failed case here, so no need to implement `onSuccess`
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {}
}
spark.listenerManager.register(listener)
val errorUdf = udf[Int, Int] { _ => throw new RuntimeException("udf error") }
val df = sparkContext.makeRDD(Seq(1 -> "a")).toDF("i", "j")
val e = intercept[SparkException](df.select(errorUdf($"i")).collect())
assert(metrics.length == 1)
assert(metrics(0)._1 == "collect")
assert(metrics(0)._2.analyzed.isInstanceOf[Project])
assert(metrics(0)._3.getMessage == e.getMessage)
spark.listenerManager.unregister(listener)
}
test("get numRows metrics by callback") {
val metrics = ArrayBuffer.empty[Long]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
val metric = qe.executedPlan match {
case w: WholeStageCodegenExec => w.child.longMetric("numOutputRows")
case other => other.longMetric("numOutputRows")
}
metrics += metric.value
}
}
spark.listenerManager.register(listener)
val df = Seq(1 -> "a").toDF("i", "j").groupBy("i").count()
df.collect()
df.collect()
Seq(1 -> "a", 2 -> "a").toDF("i", "j").groupBy("i").count().collect()
assert(metrics.length == 3)
assert(metrics(0) === 1)
assert(metrics(1) === 1)
assert(metrics(2) === 2)
spark.listenerManager.unregister(listener)
}
// TODO: Currently some LongSQLMetric use -1 as initial value, so if the accumulator is never
// updated, we can filter it out later. However, when we aggregate(sum) accumulator values at
// driver side for SQL physical operators, these -1 values will make our result smaller.
// A easy fix is to create a new SQLMetric(including new MetricValue, MetricParam, etc.), but we
// can do it later because the impact is just too small (1048576 tasks for 1 MB).
ignore("get size metrics by callback") {
val metrics = ArrayBuffer.empty[Long]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
metrics += qe.executedPlan.longMetric("dataSize").value
val bottomAgg = qe.executedPlan.children(0).children(0)
metrics += bottomAgg.longMetric("dataSize").value
}
}
spark.listenerManager.register(listener)
val sparkListener = new SaveInfoListener
spark.sparkContext.addSparkListener(sparkListener)
val df = (1 to 100).map(i => i -> i.toString).toDF("i", "j")
df.groupBy("i").count().collect()
def getPeakExecutionMemory(stageId: Int): Long = {
val peakMemoryAccumulator = sparkListener.getCompletedStageInfos(stageId).accumulables
.filter(_._2.name == InternalAccumulator.PEAK_EXECUTION_MEMORY)
assert(peakMemoryAccumulator.size == 1)
peakMemoryAccumulator.head._2.value.get.asInstanceOf[Long]
}
assert(sparkListener.getCompletedStageInfos.length == 2)
val bottomAggDataSize = getPeakExecutionMemory(0)
val topAggDataSize = getPeakExecutionMemory(1)
// For this simple case, the peakExecutionMemory of a stage should be the data size of the
// aggregate operator, as we only have one memory consuming operator per stage.
assert(metrics.length == 2)
assert(metrics(0) == topAggDataSize)
assert(metrics(1) == bottomAggDataSize)
spark.listenerManager.unregister(listener)
}
}
| spark0001/spark2.1.1 | sql/core/src/test/scala/org/apache/spark/sql/util/DataFrameCallbackSuite.scala | Scala | apache-2.0 | 6,539 |
package com.jeff.chaser.models.graph
import com.badlogic.gdx.utils.{Array => LibArray}
class Path(val path: LibArray[(Float, Float)])
| jregistr/Academia | CSC455-Game-Programming/Chaser/core/src/com/jeff/chaser/models/graph/Path.scala | Scala | mit | 136 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.vgg
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.DataSet
import com.intel.analytics.bigdl.dataset.image._
import com.intel.analytics.bigdl.nn.{ClassNLLCriterion, Module}
import com.intel.analytics.bigdl.optim._
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric._
import com.intel.analytics.bigdl.utils.{Engine, LoggerFilter, T, Table}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
object Train {
LoggerFilter.redirectSparkInfoLogs()
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
import Utils._
def main(args: Array[String]): Unit = {
trainParser.parse(args, new TrainParams()).map(param => {
val conf = Engine.createSparkConf().setAppName("Train Vgg on Cifar10")
// Will throw exception without this config when has only one executor
.set("spark.rpc.message.maxSize", "200")
val sc = new SparkContext(conf)
Engine.init
val trainDataSet = DataSet.array(Utils.loadTrain(param.folder), sc) ->
BytesToBGRImg() -> BGRImgNormalizer(trainMean, trainStd) ->
BGRImgToBatch(param.batchSize)
val model = if (param.modelSnapshot.isDefined) {
Module.load[Float](param.modelSnapshot.get)
} else {
VggForCifar10(classNum = 10)
}
val optimMethod = if (param.stateSnapshot.isDefined) {
OptimMethod.load[Float](param.stateSnapshot.get)
} else {
new SGD[Float](learningRate = 0.01, learningRateDecay = 0.0,
weightDecay = 0.0005, momentum = 0.9, dampening = 0.0, nesterov = false,
learningRateSchedule = SGD.EpochStep(25, 0.5))
}
val optimizer = Optimizer(
model = model,
dataset = trainDataSet,
criterion = new ClassNLLCriterion[Float]()
)
val validateSet = DataSet.array(Utils.loadTest(param.folder), sc) ->
BytesToBGRImg() -> BGRImgNormalizer(testMean, testStd) ->
BGRImgToBatch(param.batchSize)
if (param.checkpoint.isDefined) {
optimizer.setCheckpoint(param.checkpoint.get, Trigger.everyEpoch)
}
if(param.overWriteCheckpoint) {
optimizer.overWriteCheckpoint()
}
optimizer
.setValidation(Trigger.everyEpoch, validateSet, Array(new Top1Accuracy[Float]))
.setOptimMethod(optimMethod)
.setEndWhen(Trigger.maxEpoch(param.maxEpoch))
.optimize()
sc.stop()
})
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/models/vgg/Train.scala | Scala | apache-2.0 | 3,112 |
package nibbler
import java.io.File
import nibbler.api.{SparkContextService, NibblerServlet}
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, FunSuite}
import org.scalatra.test.scalatest.ScalatraSuite
@RunWith(classOf[JUnitRunner])
class NibblerServletTest extends ScalatraSuite with FunSuite with MockitoSugar with BeforeAndAfterEach {
private val configuration = new SparkConf().setAppName("test").setMaster("local")
private var sparkContext: SparkContext = null
override protected def beforeEach(): Unit = {
sparkContext = new SparkContext(configuration)
addServlet(new NibblerServlet(new SparkContextService(sparkContext)), "/*")
}
override protected def afterEach(): Unit = {
sparkContext.stop()
}
test("should fail registering not existing data set") {
post("/register", body = "{ \\"inputFile\\": \\"iDontExist\\" }") {
status should equal(500)
}
}
test("data set registration completes") {
val dataSetFile = File.createTempFile("nibbler", "suffix")
dataSetFile.deleteOnExit()
val requestBody = "{ \\"inputFile\\": \\"" + dataSetFile.getAbsolutePath + "\\" }"
post("/register", body = requestBody.getBytes) {
status should equal(200)
body should (include("numberOfRows") and include("numberOfColumns") and include("0"))
}
}
test("should return status") {
get("/status") {
status should equal(200)
body should include((1 to 9).mkString(","))
}
}
}
| pkoperek/nibbler | src/test/scala/nibbler/NibblerServletTest.scala | Scala | gpl-3.0 | 1,613 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import org.slf4j.{ Logger => Slf4jLogger }
import org.slf4j.LoggerFactory
import org.slf4j.Marker
import scala.collection.mutable
import scala.language.implicitConversions
import scala.collection.JavaConverters._
/**
* Typical logger interface.
*/
trait LoggerLike {
/**
* The underlying SLF4J Logger.
*/
def logger: Slf4jLogger
/**
* The underlying SLF4J Logger.
*/
lazy val underlyingLogger = logger
@inline def enabled: Boolean = true
/**
* `true` if the logger instance is enabled for the `TRACE` level.
*/
def isTraceEnabled(implicit mc: MarkerContext): Boolean =
enabled && mc.marker.fold(logger.isTraceEnabled)(logger.isTraceEnabled)
/**
* `true` if the logger instance is enabled for the `DEBUG` level.
*/
def isDebugEnabled(implicit mc: MarkerContext): Boolean =
enabled && mc.marker.fold(logger.isDebugEnabled)(logger.isDebugEnabled)
/**
* `true` if the logger instance is enabled for the `INFO` level.
*/
def isInfoEnabled(implicit mc: MarkerContext): Boolean =
enabled && mc.marker.fold(logger.isInfoEnabled)(logger.isInfoEnabled)
/**
* `true` if the logger instance is enabled for the `WARN` level.
*/
def isWarnEnabled(implicit mc: MarkerContext): Boolean =
enabled && mc.marker.fold(logger.isWarnEnabled)(logger.isWarnEnabled)
/**
* `true` if the logger instance is enabled for the `ERROR` level.
*/
def isErrorEnabled(implicit mc: MarkerContext): Boolean =
enabled && mc.marker.fold(logger.isErrorEnabled)(logger.isErrorEnabled)
/**
* Logs a message with the `TRACE` level.
*
* @param message the message to log
* @param mc the implicit marker context, if defined.
*/
def trace(message: => String)(implicit mc: MarkerContext): Unit = {
if (isTraceEnabled) {
mc.marker match {
case None => logger.trace(message)
case Some(marker) => logger.trace(marker, message)
}
}
}
/**
* Logs a message with the `TRACE` level.
*
* @param message the message to log
* @param error the associated exception
* @param mc the implicit marker context, if defined.
*/
def trace(message: => String, error: => Throwable)(implicit mc: MarkerContext): Unit = {
if (isTraceEnabled) {
mc.marker match {
case None => logger.trace(message, error)
case Some(marker) => logger.trace(marker, message, error)
}
}
}
/**
* Logs a message with the `DEBUG` level.
*
* @param message the message to log
* @param mc the implicit marker context, if defined.
*/
def debug(message: => String)(implicit mc: MarkerContext): Unit = {
if (isDebugEnabled) {
mc.marker match {
case None => logger.debug(message)
case Some(marker) => logger.debug(marker, message)
}
}
}
/**
* Logs a message with the `DEBUG` level.
*
* @param message the message to log
* @param error the associated exception
* @param mc the implicit marker context, if defined.
*/
def debug(message: => String, error: => Throwable)(implicit mc: MarkerContext): Unit = {
if (isDebugEnabled) {
mc.marker match {
case None => logger.debug(message, error)
case Some(marker) => logger.debug(marker, message, error)
}
}
}
/**
* Logs a message with the `INFO` level.
*
* @param message the message to log
* @param mc the implicit marker context, if defined.
*/
def info(message: => String)(implicit mc: MarkerContext): Unit = {
if (isInfoEnabled) {
mc.marker match {
case None => logger.info(message)
case Some(marker) => logger.info(marker, message)
}
}
}
/**
* Logs a message with the `INFO` level.
*
* @param message the message to log
* @param error the associated exception
* @param mc the implicit marker context, if defined.
*/
def info(message: => String, error: => Throwable)(implicit mc: MarkerContext): Unit = {
if (isInfoEnabled) {
mc.marker match {
case None => logger.info(message, error)
case Some(marker) => logger.info(marker, message, error)
}
}
}
/**
* Logs a message with the `WARN` level.
*
* @param message the message to log
* @param mc the implicit marker context, if defined.
*/
def warn(message: => String)(implicit mc: MarkerContext): Unit = {
if (isWarnEnabled) {
mc.marker match {
case None => logger.warn(message)
case Some(marker) => logger.warn(marker, message)
}
}
}
/**
* Logs a message with the `WARN` level.
*
* @param message the message to log
* @param error the associated exception
* @param mc the implicit marker context, if defined.
*/
def warn(message: => String, error: => Throwable)(implicit mc: MarkerContext): Unit = {
if (isWarnEnabled) {
mc.marker match {
case None => logger.warn(message, error)
case Some(marker) => logger.warn(marker, message, error)
}
}
}
/**
* Logs a message with the `ERROR` level.
*
* @param message the message to log
* @param mc the implicit marker context, if defined.
*/
def error(message: => String)(implicit mc: MarkerContext): Unit = {
if (isErrorEnabled) {
mc.marker match {
case None => logger.error(message)
case Some(marker) => logger.error(marker, message)
}
}
}
/**
* Logs a message with the `ERROR` level.
*
* @param message the message to log
* @param error the associated exception
* @param mc the implicit marker context, if defined.
*/
def error(message: => String, error: => Throwable)(implicit mc: MarkerContext): Unit = {
if (isErrorEnabled) {
mc.marker match {
case None => logger.error(message, error)
case Some(marker) => logger.error(marker, message, error)
}
}
}
}
/**
* A trait that can mixed into a class or trait to add a `logger` named based on the class name.
*/
trait Logging {
protected val logger: Logger = Logger(getClass)
}
/**
* A Play logger.
*
* @param logger the underlying SL4FJ logger
*/
class Logger private (val logger: Slf4jLogger, isEnabled: => Boolean) extends LoggerLike {
def this(logger: Slf4jLogger) = this(logger, true)
@inline override def enabled = isEnabled
/**
* Get a logger that only works when the application is in the given mode(s).
*
* If the global application mode has not been set (by calling Logger.setApplicationMode), this has no effect.
*/
def forMode(mode: Mode*): Logger = {
modeLoggerCache.getOrElseUpdate(mode, new Logger(logger, Logger.applicationMode.forall(mode.contains)))
}
private[this] val modeLoggerCache: mutable.Map[Seq[Mode], Logger] =
new ConcurrentHashMap[Seq[Mode], Logger]().asScala
}
/**
* High-level API for logging operations.
*
* For example, logging with the default application logger:
* {{{
* Logger.info("Hello!")
* }}}
*
* Logging with a custom logger:
* {{{
* Logger("my.logger").info("Hello!")
* }}}
*/
object Logger {
private[this] val log: Slf4jLogger = LoggerFactory.getLogger(getClass)
private[this] var _mode: Option[Mode] = None
private[this] val _appsRunning: AtomicInteger = new AtomicInteger(0)
/**
* The global application mode currently being used by the logging API.
*/
def applicationMode: Option[Mode] = _mode
/**
* Set the global application mode used for logging. Used when the Play application starts.
*/
def setApplicationMode(mode: Mode): Unit = {
val appsRunning = _appsRunning.incrementAndGet()
applicationMode.foreach { currentMode =>
if (currentMode != mode) {
log.warn(s"Setting logging mode to $mode when it was previously set to $currentMode")
log.warn(s"There are currently $appsRunning applications running.")
}
}
_mode = Some(mode)
}
/**
* Unset the global application mode. Used when the application shuts down.
*
* If multiple applications are running
*/
def unsetApplicationMode(): Unit = {
val appsRunning = _appsRunning.decrementAndGet()
if (appsRunning == 0) {
_mode = None
} else if (appsRunning < 0) {
log.warn("Cannot unset application mode because none was previously set")
_mode = None
_appsRunning.incrementAndGet()
}
}
/**
* Obtains a logger instance.
*
* @param name the name of the logger
* @return a logger
*/
def apply(name: String): Logger = new Logger(LoggerFactory.getLogger(name))
/**
* Obtains a logger instance.
*
* @param clazz a class whose name will be used as logger name
* @return a logger
*/
def apply(clazz: Class[_]): Logger = new Logger(LoggerFactory.getLogger(clazz.getName.stripSuffix("$")))
}
/**
* A MarkerContext trait, to provide easy access to org.slf4j.Marker in Logger API. This is usually accessed
* with a marker through an implicit conversion from a Marker.
*
* {{{
* implicit val markerContext: MarkerContext = org.slf4j.MarkerFactory.getMarker("EXAMPLEMARKER")
* log.error("This message will be logged with the EXAMPLEMARKER marker")
* }}}
*
*/
trait MarkerContext {
/**
* @return an SLF4J marker, if one has been defined.
*/
def marker: Option[Marker]
}
object MarkerContext extends LowPriorityMarkerContextImplicits {
/**
* Provides an instance of a MarkerContext from a Marker. The explicit form is useful when
* you want to explicitly tag a log message with a particular Marker and you already have a
* Marker in implicit scope.
*
* {{{
* implicit val implicitContext: MarkerContext = ...
* val explicitContext: MarkerContext = MarkerContext(MarkerFactory.getMarker("EXPLICITMARKER"))
*
* // do not use the implicit MarkerContext
* log.error("This message is logged with EXPLICITMARKER")(explicitContext)
* }}}
*
* @param marker the marker to wrap in DefaultMarkerContext
* @return an instance of DefaultMarkerContext.
*/
def apply(marker: Marker): MarkerContext = {
new DefaultMarkerContext(marker)
}
}
trait LowPriorityMarkerContextImplicits {
/**
* A MarkerContext that returns None. This is used as the "default" marker context if
* no implicit MarkerContext is found in local scope (meaning there is nothing defined
* through import or "implicit val").
*/
implicit val NoMarker = MarkerContext(null)
/**
* Enables conversion from a marker to a MarkerContext:
*
* {{{
* val mc: MarkerContext = MarkerFactory.getMarker("SOMEMARKER")
* }}}
*
* @param marker the SLF4J marker to convert
* @return the result of `MarkerContext.apply(marker)`
*/
implicit def markerToMarkerContext(marker: Marker): MarkerContext = {
MarkerContext(marker)
}
}
/**
* A default marker context. This is used by `MarkerContext.apply`, but can also be used to provide
* explicit typing for markers. For example, to define a SecurityContext marker, you can define a case
* object extending DefaultMarkerContext:
*
* {{{
* case object SecurityMarkerContext extends DefaultMarkerContext(MarkerFactory.getMarker("SECURITY"))
* }}}
*
* @param someMarker a marker used in the `marker` method.
*/
class DefaultMarkerContext(someMarker: Marker) extends MarkerContext {
def marker: Option[Marker] = Option(someMarker)
}
object MarkerContexts {
case object SecurityMarkerContext extends DefaultMarkerContext(org.slf4j.MarkerFactory.getMarker("SECURITY"))
}
| benmccann/playframework | core/play/src/main/scala/play/api/Logger.scala | Scala | apache-2.0 | 11,771 |
package org.elasticsearch.spark.sql
import scala.collection.JavaConverters.mapAsJavaMapConverter
import scala.collection.JavaConverters.propertiesAsScalaMapConverter
import scala.collection.Map
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SQLContext
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_QUERY
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_READ
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_WRITE
import org.elasticsearch.hadoop.cfg.PropertiesSettings
import org.elasticsearch.hadoop.util.ObjectUtils
import org.elasticsearch.spark.cfg.SparkSettingsManager
object EsSparkSQL {
private val init = { ObjectUtils.loadClass("org.elasticsearch.spark.rdd.CompatUtils", classOf[ObjectUtils].getClassLoader) }
def esDF(sc: SQLContext): DataFrame = esDF(sc, Map.empty[String, String])
def esDF(sc: SQLContext, resource: String): DataFrame = esDF(sc, Map(ES_RESOURCE_READ -> resource))
def esDF(sc: SQLContext, resource: String, query: String): DataFrame = esDF(sc, Map(ES_RESOURCE_READ -> resource, ES_QUERY -> query))
def esDF(sc: SQLContext, cfg: Map[String, String]): DataFrame = {
val esConf = new SparkSettingsManager().load(sc.sparkContext.getConf).copy();
esConf.merge(cfg.asJava)
val schema = SchemaUtils.discoverMapping(esConf)
val rowRDD = new ScalaEsRowRDD(sc.sparkContext, esConf.asProperties.asScala, schema)
sc.createDataFrame(rowRDD, schema.struct)
}
def esDF(sc: SQLContext, resource: String, query: String, cfg: Map[String, String]): DataFrame = {
esDF(sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource, ES_QUERY -> query))
}
def esDF(sc: SQLContext, resource: String, cfg: Map[String, String]): DataFrame = {
esDF(sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource))
}
def saveToEs(srdd: DataFrame, resource: String) {
saveToEs(srdd, Map(ES_RESOURCE_WRITE -> resource))
}
def saveToEs(srdd: DataFrame, resource: String, cfg: Map[String, String]) {
saveToEs(srdd, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_WRITE -> resource))
}
def saveToEs(srdd: DataFrame, cfg: Map[String, String]) {
if (srdd == null || srdd.take(1).length == 0) {
return
}
val sparkCtx = srdd.sqlContext.sparkContext
val sparkCfg = new SparkSettingsManager().load(sparkCtx.getConf)
val esCfg = new PropertiesSettings().load(sparkCfg.save())
esCfg.merge(cfg.asJava)
sparkCtx.runJob(srdd.rdd, new EsDataFrameWriter(srdd.schema, esCfg.save()).write _)
}
} | yonglehou/elasticsearch-hadoop | spark/sql-13/src/main/scala/org/elasticsearch/spark/sql/EsSparkSQL.scala | Scala | apache-2.0 | 2,594 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState}
import org.apache.spark.deploy.master.Master
import org.apache.spark.internal.Logging
import org.apache.spark.ui.{SparkUI, WebUI}
import org.apache.spark.ui.JettyUtils._
/**
* Web UI server for the standalone master.
*/
private[master]
class MasterWebUI(
val master: Master,
requestedPort: Int)
extends WebUI(master.securityMgr, master.securityMgr.getSSLOptions("standalone"),
requestedPort, master.conf, name = "MasterUI") with Logging {
val masterEndpointRef = master.self
val killEnabled = master.conf.getBoolean("spark.ui.killEnabled", true)
initialize()
/** Initialize all components of the server. */
def initialize() {
val masterPage = new MasterPage(this)
attachPage(new ApplicationPage(this))
attachPage(masterPage)
addStaticHandler(MasterWebUI.STATIC_RESOURCE_DIR)
attachHandler(createRedirectHandler(
"/app/kill", "/", masterPage.handleAppKillRequest, httpMethods = Set("POST")))
attachHandler(createRedirectHandler(
"/driver/kill", "/", masterPage.handleDriverKillRequest, httpMethods = Set("POST")))
}
def addProxy(): Unit = {
val handler = createProxyHandler(idToUiAddress)
attachHandler(handler)
}
def idToUiAddress(id: String): Option[String] = {
val state = masterEndpointRef.askSync[MasterStateResponse](RequestMasterState)
val maybeWorkerUiAddress = state.workers.find(_.id == id).map(_.webUiAddress)
val maybeAppUiAddress = state.activeApps.find(_.id == id).map(_.desc.appUiUrl)
maybeWorkerUiAddress.orElse(maybeAppUiAddress)
}
}
private[master] object MasterWebUI {
private val STATIC_RESOURCE_DIR = SparkUI.STATIC_RESOURCE_DIR
}
| bravo-zhang/spark | core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala | Scala | apache-2.0 | 2,591 |
package infcalcs.actors
import akka.actor.Actor
import infcalcs._
import infcalcs.exceptions.{InappropriateInitBinsException, ExcessActorException}
/**
* Created by ryansuderman on 9/18/15.
*/
/**
* Abstract class inherited by [[FixedDistributor]] and [[AdaptiveDistributor]]
* that holds a number of methods and variables for managing a parallel
* implementation of channel capacity estimation
*
* @param p
* @param calcConfig
*/
abstract class Distributor(p: DRData)(implicit calcConfig: CalcConfig) extends Actor {
/** Results sent back from [[Calculator]] instances */
var estList: Array[EstTuple] = Array()
var estBSList: Array[EstTupleBS] = Array()
/** Tracks the number of signal bins for calculations */
var signalBins: NTuple[Int] = calcConfig.initSignalBins
/** List of weights to try for a particular number of signal bins */
var weights = EstimateCC.getWeights(calcConfig)(p, signalBins)
var totalCalculations = weights.length
var sent = 0
var received = 0
var sigIndex = 0
def sentCalc() = sent = sent + 1
def receivedCalc() = received = received + 1
def sentAllCalcs: Boolean = sent == totalCalculations
def receivedAllCalcs: Boolean = received == totalCalculations
def updateEstList(r: Result) = estList = estList :+ r.res
def updateEstBSList(r: ResultBS) = estBSList = estBSList :+ r.res
/**
* Initializes some number of [[Calculator]] instances to calculate
* mutual information estimates
*
* @param init
*/
def initializeCalculators(init: Init) =
if (!EstimateMI.binNumberIsAppropriate(calcConfig)(p, (calcConfig.initBinTuples)))
throw new InappropriateInitBinsException("initial bin numbers are too large")
else {
if (init.numActors < weights.length) {
val calcList = (0 until init.numActors).toList map (x =>
context actorOf(Calculator props calcConfig, s"calc_${x}"))
calcList foreach { c => {
c ! Estimate(weights(sent), signalBins, p, sent, sigIndex)
sentCalc()
}
}
} else {
// requires that the number of actors is less than the number of weights per signal bin number
throw new ExcessActorException("excess actors")
}
}
/**
* Stops actor-based estimation of the channel capacity and outputs final result
*/
def stopCalculation() = {
if (EstCC.appConfig.verbose) {
println(s"Stop criterion reached with ${signalBins.product} total bins")
}
if (calcConfig.numParameters("numForBootstrap") > 0) {
val maxOpt = EstimateMI.optMIBS(calcConfig)(estBSList.toVector)
EstimateMI.finalEstimation(
maxOpt.pairBinTuples,
p,
maxOpt.weight)(calcConfig)
println(s"${(maxOpt.estimates getOrElse EstimateBS((0.0, (0.0, 0.0)), (0.0, (0.0, 0.0)), 0.0)).dataEstimate._1}")
} else {
val maxOpt = EstimateMI.optMIMult(calcConfig)(estList.toVector)
EstimateMI.optMIMult(calcConfig)(estList.toVector)
EstimateMI.finalEstimation(
maxOpt.pairBinTuples,
p,
maxOpt.weight)(calcConfig)
println(s"${(maxOpt.estimates getOrElse Estimates((0.0, 0.0), Nil, 0.0)).dataEstimate._1}")
}
context.system.shutdown()
}
}
| ryants/EstCC | src/main/scala/infcalcs/actors/Distributor.scala | Scala | mit | 3,235 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.nio.channels._
import java.nio.file.{FileAlreadyExistsException, Files}
/**
* A file lock a la flock/funlock
*
* The given path will be created and opened if it doesn't exist.
*/
class FileLock(val file: File) extends Logging {
try Files.createFile(file.toPath) // create the file if it doesn't exist
catch { case _: FileAlreadyExistsException => }
private val channel = new RandomAccessFile(file, "rw").getChannel()
private var flock: java.nio.channels.FileLock = null
/**
* Lock the file or throw an exception if the lock is already held
*/
def lock() {
this synchronized {
trace("Acquiring lock on " + file.getAbsolutePath)
flock = channel.lock()
}
}
/**
* Try to lock the file and return true if the locking succeeds
*/
def tryLock(): Boolean = {
this synchronized {
trace("Acquiring lock on " + file.getAbsolutePath)
try {
// weirdly this method will return null if the lock is held by another
// process, but will throw an exception if the lock is held by this process
// so we have to handle both cases
flock = channel.tryLock()
flock != null
} catch {
case _: OverlappingFileLockException => false
}
}
}
/**
* Unlock the lock if it is held
*/
def unlock() {
this synchronized {
trace("Releasing lock on " + file.getAbsolutePath)
if(flock != null)
flock.release()
}
}
/**
* Destroy this lock, closing the associated FileChannel
*/
def destroy() = {
this synchronized {
unlock()
channel.close()
}
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/utils/FileLock.scala | Scala | apache-2.0 | 2,477 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.