code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package debop4s.data.orm.hibernate.utils
import debop4s.data.orm.AbstractJpaJUnitSuite
import debop4s.data.orm.mapping.ScalaEmployee
import org.hibernate.SessionFactory
import org.junit.Test
import org.springframework.beans.factory.annotation.Autowired
import scala.collection.JavaConverters._
/**
* StatelessUtilsFunSuite
* @author sunghyouk.bae@gmail.com 2014. 9. 13.
*/
class StatelessUtilsFunSuite extends AbstractJpaJUnitSuite {
@Autowired val sf: SessionFactory = null
@Test
def testWithTransaction(): Unit = {
// debop4s.data.orm.jpa._ 에 있는 StatelessSessionExtensions 클래스를 사용합니다.
StatelessUtils.withTransaction(sf) { stateless =>
(0 until 10).foreach { i =>
val emp = new ScalaEmployee()
emp.empNo = s"empNo-$i"
emp.name = s"emp-name-$i"
stateless.insert(emp)
}
}
// debop4s.data.orm.jpa._ 에 있는 StatelessSessionExtensions 클래스를 사용합니다.
val emps = StatelessUtils.withReadOnly(sf) { stateless =>
val crit = stateless.createCriteria(classOf[ScalaEmployee])
crit.list().asInstanceOf[java.util.List[ScalaEmployee]]
}
emps should not be null
emps.size should be > 0
emps.asScala foreach { x => debug(s"emp=$x") }
}
}
| debop/debop4s | debop4s-data-orm/src/test/scala/debop4s/data/orm/hibernate/utils/StatelessUtilsFunSuite.scala | Scala | apache-2.0 | 1,276 |
/**
* Copyright (c) 2012, www.quartzsource.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.quartzsource.meutrino.client
import java.io.BufferedReader
import java.io.File
import java.io.InputStream
import java.io.InputStreamReader
import java.nio.charset.Charset
import scala.collection.JavaConverters._
import org.quartzsource.meutrino._
/**
* environment - global environment variables
* useGlobalHgrcPath - when this is false then only the local settings in .hg/hgrc from the current repository is read.
*
* --ssh CMD specify ssh command to use
* --remotecmd CMD specify hg command to run on the remote side
* --insecure do not verify server certificate (ignoring web.cacerts config)
*/
case class CommandServerConfig(
workingDir: Option[File] = None,
config: Map[String, Map[String, String]] = Map.empty,
useGlobalHgrcPath: Boolean = false,
environment: Map[String, String] = Map.empty,
sync: Boolean = false) {
}
case object CommandServerConfig {
/**
* Use Java collections
*/
def apply(workingDir: File, configMap: java.util.Map[String, java.util.Map[String, String]],
useGlobalHgrcPath: Boolean, envMap: java.util.Map[String, String], sync: Boolean): CommandServerConfig = {
new CommandServerConfig(Some(workingDir),
configMap.asScala.map {
case (key, submap) => (key -> submap.asScala.toMap)
}.toMap,
useGlobalHgrcPath,
envMap.asScala.toMap,
sync)
}
}
/**
* hg - full path to Mercurial executable
* config - configuration
*/
class CommandServerFactory(hg: String, config: CommandServerConfig = new CommandServerConfig()) extends QFactory {
val charSet = Charset.forName("UTF-8")
def create(path: File): QRepository = {
val args: List[String] = List("init", path.getCanonicalPath())
val output = executeCommand(args, config.workingDir)
open(path)
}
def open(path: File): QRepository = {
if (!new File(path, ".hg").isDirectory()) {
throw new IllegalStateException("No .hg repository found in " + path.getCanonicalPath())
}
val process = launch(List("serve", "--cmdserver", "pipe", "--config", "ui.interactive=True") ++
configToArguments(config.config), Some(path))
getVersion() match {
case v @ QVersion(1, minor, _) if minor < 9 =>
throw new CommandServerException("CommandServer is not supported before 1.9: " + v)
case _ => //Ok
}
val serverProcess = new CommandServer(path, process, config.sync)
new LocalRepository(serverProcess)
}
def clone(source: String, path: File, noupdate: Boolean,
uncompressed: Boolean): QRepository = {
val args: List[String] = List("clone", "--pull") ++ (if (noupdate) List("--noupdate") else Nil) ++
(if (uncompressed) List("--uncompressed") else Nil) ++ List(source, path.getCanonicalPath())
val output = executeCommand(args, config.workingDir)
open(path)
}
private[this] def launch(arguments: List[String], path: Option[File]): Process = {
val args = new java.util.ArrayList[String](arguments.size + 1)
args.add(hg)
args.addAll(arguments.asJava)
val processBuilder = new ProcessBuilder(args)
path.map(processBuilder.directory(_))
val env: java.util.Map[String, String] = processBuilder.environment()
//see http://selenic.com/hg/file/3e13ade423f0/mercurial/help/environment.txt
//disables any configuration settings that might change Mercurial's default output.
//(the value is not important, '1' is used to stay the same with hglib version 0.3)
env.put("HGPLAIN", "1") //always set HGPLAIN=1 (issue3502)
//This overrides the default locale setting detected by Mercurial,
//UTF-8 is always used
env.put("HGENCODING", charSet.displayName)
if (!config.useGlobalHgrcPath) {
//only the .hg/hgrc from the current repository is read.
env.put("HGRCPATH", "")
}
config.environment.foreach { case (key, value) => env.put(key, value) }
processBuilder.start()
}
private[this] def configToArguments(config: Map[String, Map[String, String]]): List[String] = {
val list = config.flatMap {
case (section, keyValueMap) => keyValueMap.map {
case (key, value) => section + "." + key + "=" + value
}
}
list.flatMap("--config" :: _ :: Nil).toList
}
def executeCommand(arguments: List[String], path: Option[File] = None): (String, String) = {
def read(input: InputStream): String = {
val reader = new BufferedReader(new InputStreamReader(input, charSet))
Stream.continually(reader.read()).takeWhile(_ != -1).map(_.toChar).toList.mkString
}
val process = launch(arguments, path)
val error = read(process.getErrorStream)
val input = read(process.getInputStream)
val processCode = process.waitFor()
if (processCode != 0) {
throw new CommandException(processCode, input.trim, error.trim);
}
(input, error)
}
def getVersion(): QVersion = {
val (output, _) = executeCommand(List("version", "--quiet"), config.workingDir)
CommandServerFactory.parseVersion(output)
}
}
object CommandServerFactory {
def parseVersion(output: String): QVersion = {
val VersionEntry = """(.+)\(version(.+)\)\s*""".r
output match {
case VersionEntry(_, version) => {
val splitted = version.split("""\.""")
val major = splitted(0).trim.toInt
val minor = splitted(1).trim.toInt
val micro = if (splitted.size > 2) splitted(2).trim.toInt else 0
QVersion(major, minor, micro)
}
case _ => throw new RuntimeException("Unexpected output with version: " + output)
}
}
}
| cyberspinach/meutrino | src/main/scala/org/quartzsource/meutrino/client/CommandServerFactory.scala | Scala | apache-2.0 | 6,168 |
package scala
package collection
import scala.collection.immutable.TreeMap
import scala.collection.mutable.Builder
import scala.language.higherKinds
import scala.annotation.unchecked.uncheckedVariance
/** Base type of sorted sets */
trait SortedMap[K, +V]
extends Map[K, V]
with SortedMapOps[K, V, SortedMap, SortedMap[K, V]] {
def unsorted: Map[K, V] = this
override protected def fromSpecificIterable(coll: Iterable[(K, V)] @uncheckedVariance): SortedMapCC[K, V] @uncheckedVariance = sortedMapFactory.from(coll)
override protected def newSpecificBuilder(): mutable.Builder[(K, V), SortedMapCC[K, V]] @uncheckedVariance = sortedMapFactory.newBuilder[K, V]()
/**
* @note This operation '''has''' to be overridden by concrete collection classes to effectively
* return a `SortedMapFactory[SortedMapCC]`. The implementation in `SortedMap` only returns
* a `SortedMapFactory[SortedMap]`, but the compiler will '''not''' throw an error if the
* effective `SortedMapCC` type constructor is more specific than `SortedMap`.
*
* @return The factory of this collection.
*/
def sortedMapFactory: SortedMapFactory[SortedMapCC] = SortedMap
override def empty: SortedMapCC[K, V] @uncheckedVariance = sortedMapFactory.empty
}
trait SortedMapOps[K, +V, +CC[X, Y] <: Map[X, Y] with SortedMapOps[X, Y, CC, _], +C <: SortedMapOps[K, V, CC, C]]
extends MapOps[K, V, Map, C]
with SortedOps[K, C] {
/**
* Type alias to `CC`. It is used to provide a default implementation of the `fromSpecificIterable`
* and `newSpecificBuilder` operations.
*
* Due to the `@uncheckedVariance` annotation, usage of this type member can be unsound and is
* therefore not recommended.
*/
protected type SortedMapCC[K, V] = CC[K, V] @uncheckedVariance
def sortedMapFactory: SortedMapFactory[SortedMapCC]
/** Similar to `mapFromIterable`, but returns a SortedMap collection type.
* Note that the return type is now `CC[K2, V2]` aka `SortedMapCC[K2, V2]` rather than `MapCC[(K2, V2)]`.
*/
@`inline` protected final def sortedMapFromIterable[K2, V2](it: Iterable[(K2, V2)])(implicit ordering: Ordering[K2]): CC[K2, V2] = sortedMapFactory.from(it)
def unsorted: Map[K, V]
/**
* Creates an iterator over all the key/value pairs
* contained in this map having a key greater than or
* equal to `start` according to the ordering of
* this map. x.iteratorFrom(y) is equivalent
* to but often more efficient than x.from(y).iterator.
*
* @param start The lower bound (inclusive)
* on the keys to be returned
*/
def iteratorFrom(start: K): Iterator[(K, V)]
/**
* Creates an iterator over all the keys(or elements) contained in this
* collection greater than or equal to `start`
* according to the ordering of this collection. x.keysIteratorFrom(y)
* is equivalent to but often more efficient than
* x.from(y).keysIterator.
*
* @param start The lower bound (inclusive)
* on the keys to be returned
*/
def keysIteratorFrom(start: K): Iterator[K]
/**
* Creates an iterator over all the values contained in this
* map that are associated with a key greater than or equal to `start`
* according to the ordering of this map. x.valuesIteratorFrom(y) is
* equivalent to but often more efficient than
* x.from(y).valuesIterator.
*
* @param start The lower bound (inclusive)
* on the keys to be returned
*/
def valuesIteratorFrom(start: K): Iterator[V] = iteratorFrom(start).map(_._2)
def firstKey: K = head._1
def lastKey: K = last._1
/** Find the element with smallest key larger than or equal to a given key.
* @param key The given key.
* @return `None` if there is no such node.
*/
def minAfter(key: K): Option[(K, V)] = rangeFrom(key).headOption
/** Find the element with largest key less than a given key.
* @param key The given key.
* @return `None` if there is no such node.
*/
def maxBefore(key: K): Option[(K, V)] = rangeUntil(key).lastOption
def rangeTo(to: K): C = {
val i = keySet.rangeFrom(to).iterator()
if (i.isEmpty) return coll
val next = i.next()
if (ordering.compare(next, to) == 0)
if (i.isEmpty) coll
else rangeUntil(i.next())
else
rangeUntil(next)
}
override def keySet: SortedSet[K] = new KeySortedSet
/** The implementation class of the set returned by `keySet` */
@SerialVersionUID(3L)
protected class KeySortedSet extends SortedSet[K] with GenKeySet with GenKeySortedSet {
def diff(that: Set[K]): SortedSet[K] = fromSpecificIterable(view.filterNot(that))
def rangeImpl(from: Option[K], until: Option[K]): SortedSet[K] = {
val map = SortedMapOps.this.rangeImpl(from, until)
new map.KeySortedSet
}
}
/** A generic trait that is reused by sorted keyset implementations */
protected trait GenKeySortedSet extends GenKeySet { this: SortedSet[K] =>
implicit def ordering: Ordering[K] = SortedMapOps.this.ordering
def iteratorFrom(start: K): Iterator[K] = SortedMapOps.this.keysIteratorFrom(start)
}
override def withFilter(p: ((K, V)) => Boolean): SortedMapOps.WithFilter[K, V, IterableCC, MapCC, CC] = new SortedMapOps.WithFilter(this, p)
// And finally, we add new overloads taking an ordering
/** Builds a new sorted map by applying a function to all elements of this $coll.
*
* @param f the function to apply to each element.
* @return a new $coll resulting from applying the given function
* `f` to each element of this $coll and collecting the results.
*/
def map[K2, V2](f: ((K, V)) => (K2, V2))(implicit ordering: Ordering[K2]): CC[K2, V2] =
sortedMapFactory.from(new View.Map[(K, V), (K2, V2)](toIterable, f))
/** Builds a new sorted map by applying a function to all elements of this $coll
* and using the elements of the resulting collections.
*
* @param f the function to apply to each element.
* @return a new $coll resulting from applying the given collection-valued function
* `f` to each element of this $coll and concatenating the results.
*/
def flatMap[K2, V2](f: ((K, V)) => IterableOnce[(K2, V2)])(implicit ordering: Ordering[K2]): CC[K2, V2] =
sortedMapFactory.from(new View.FlatMap(toIterable, f))
/** Builds a new sorted map by applying a partial function to all elements of this $coll
* on which the function is defined.
*
* @param pf the partial function which filters and maps the $coll.
* @return a new $coll resulting from applying the given partial function
* `pf` to each element on which it is defined and collecting the results.
* The order of the elements is preserved.
*/
def collect[K2, V2](pf: PartialFunction[(K, V), (K2, V2)])(implicit ordering: Ordering[K2]): CC[K2, V2] =
flatMap { kv =>
if (pf.isDefinedAt(kv)) new View.Single(pf(kv))
else View.Empty
}
/** Returns a new $coll containing the elements from the left hand operand followed by the elements from the
* right hand operand. The element type of the $coll is the most specific superclass encompassing
* the element types of the two operands.
*
* @param xs the traversable to append.
* @tparam K2 the type of the keys of the returned $coll.
* @tparam V2 the type of the values of the returned $coll.
* @return a new collection of type `CC[K2, V2]` which contains all elements
* of this $coll followed by all elements of `xs`.
*/
def concat[K2 >: K, V2 >: V](xs: Iterable[(K2, V2)])(implicit ordering: Ordering[K2]): CC[K2, V2] = sortedMapFactory.from(new View.Concat(toIterable, xs))
/** Alias for `concat` */
@`inline` final def ++ [K2 >: K, V2 >: V](xs: Iterable[(K2, V2)])(implicit ordering: Ordering[K2]): CC[K2, V2] = concat(xs)
@deprecated("Consider requiring an immutable SortedMap or fall back to SortedMap.concat ", "2.13.0")
override def + [V1 >: V](kv: (K, V1)): CC[K, V1] = sortedMapFactory.from(new View.Appended(toIterable, kv))
// We override these methods to fix their return type (which would be `Map` otherwise)
override def concat[V2 >: V](xs: collection.Iterable[(K, V2)]): CC[K, V2] = sortedMapFactory.from(new View.Concat(toIterable, xs))
override def ++ [V2 >: V](xs: collection.Iterable[(K, V2)]): CC[K, V2] = concat(xs)
// TODO Also override mapValues
}
object SortedMapOps {
/** Specializes `MapWithFilter` for sorted Map collections
*
* @define coll sorted map collection
*/
class WithFilter[K, +V, +IterableCC[_], +MapCC[X, Y] <: Map[X, Y], +CC[X, Y] <: Map[X, Y] with SortedMapOps[X, Y, CC, _]](
self: SortedMapOps[K, V, CC, _] with MapOps[K, V, MapCC, _] with IterableOps[(K, V), IterableCC, _],
p: ((K, V)) => Boolean
) extends MapOps.WithFilter[K, V, IterableCC, MapCC](self, p) {
def map[K2 : Ordering, V2](f: ((K, V)) => (K2, V2)): CC[K2, V2] =
self.sortedMapFactory.from(new View.Map(filtered, f))
def flatMap[K2 : Ordering, V2](f: ((K, V)) => IterableOnce[(K2, V2)]): CC[K2, V2] =
self.sortedMapFactory.from(new View.FlatMap(filtered, f))
override def withFilter(q: ((K, V)) => Boolean): WithFilter[K, V, IterableCC, MapCC, CC] =
new WithFilter[K, V, IterableCC, MapCC, CC](self, (kv: (K, V)) => p(kv) && q(kv))
}
}
object SortedMap extends SortedMapFactory.Delegate[SortedMap](TreeMap)
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/SortedMap.scala | Scala | apache-2.0 | 9,537 |
package org.cloudio.morpheus.square
/**
* Created by zslajchrt on 12/03/16.
*/
class DummyGraphics extends Graphics {
override def drawRect(x: Double, y: Double, w: Double, h: Double): Unit = {
println(s"Rectangle($x,$y,$w,$h)")
}
override def drawCircle(x: Double, y: Double, r: Double): Unit = {
println(s"Circle($x,$y,$r)")
}
}
| zslajchrt/morpheus-tutor | src/main/scala/org/cloudio/morpheus/square/DummyGraphics.scala | Scala | apache-2.0 | 354 |
package slamdata.engine.analysis
import scalaz.{Tree => ZTree, Validation, Semigroup, NonEmptyList, Foldable1, Show, Cord}
import scalaz.Validation.FlatMap._
import scalaz.syntax.traverse._
import scalaz.std.vector._
import scalaz.std.list._
import scalaz.std.tuple._
import scala.collection.JavaConverters._
trait Tree[N] { self =>
def root: N
def children(node: N): List[N]
final def parent(node: N): Option[N] = Option(parentMap.get(node))
final def siblings(node: N): List[N] = Option(siblingMap.get(node)).getOrElse(Nil)
final def isLeaf(node: N): Boolean = children(node).isEmpty
final def fork[Z, E: Semigroup](initial: Z)(f: (Z, N) => Validation[E, Z]): Validation[E, Vector[Z]] = {
def fork0(acc0: Z, node: N): Validation[E, Vector[Z]] = {
f(acc0, node).fold(
Validation.failure,
acc => {
val children = self.children(node)
if (children.length == 0) Validation.success(Vector(acc))
else children.toVector.map(child => fork0(acc, child)).sequence[({type V[X] = Validation[E, X]})#V, Vector[Z]].map(_.flatten)
}
)
}
fork0(initial, root)
}
final def join[Z: Semigroup, E: Semigroup](initial: Z)(f: (Z, N) => Validation[E, Z]): Validation[E, Z] = {
def join0: Z => N => Validation[E, Z] = (acc: Z) => (node: N) => {
val children = self.children(node)
(children.headOption.map { head =>
val children2 = NonEmptyList.nel(head, children.drop(1))
Foldable1[NonEmptyList].foldMap1(children2)(join0(acc)).flatMap(acc => f(acc, node))
}).getOrElse(f(acc, node))
}
join0(initial)(root)
}
final def subtree(node: N) = new Tree[N] {
def root = node
def children(node: N) = self.children(node)
}
final def foldDown[Z](acc: Z)(f: (Z, N) => Z): Z = {
def foldDown0(acc: Z, node: N): Z = children(node).foldLeft(f(acc, node))(foldDown0 _)
foldDown0(acc, root)
}
final def foldUp[Z](acc: Z)(f: (Z, N) => Z): Z = {
def foldUp0(acc: Z, node: N): Z = f(children(node).foldLeft(acc)(foldUp0 _), node)
foldUp0(acc, root)
}
final def collect[Z](f: PartialFunction[N, Z]): List[Z] = {
val lifted = f.lift
foldUp(List.empty[Z]) {
case (acc, n) => lifted(n).map(_ :: acc).getOrElse(acc)
}
}
lazy val nodes: List[N] = foldDown(List.empty[N])((acc, n) => n :: acc)
final def annotate[B](f: N => B): AnnotatedTree[N, B] = AnnotatedTree[N, B](root, self.children _, f)
lazy val leaves: List[N] = nodes.filter(v => !isLeaf(v)).toList
// TODO: Change from identity map once we implement Node properly...
private lazy val parentMap: java.util.Map[N, N] = (foldDown(new java.util.IdentityHashMap[N, N]) {
case (parentMap, parentNode) =>
self.children(parentNode).foldLeft(parentMap) {
case (parentMap, childNode) =>
parentMap.put(childNode, parentNode)
parentMap
}
})
// TODO: Ditto
private lazy val siblingMap: java.util.Map[N, List[N]] = foldDown(new java.util.IdentityHashMap[N, List[N]]) {
case (siblingMap, parentNode) =>
val children = self.children(parentNode)
children.foldLeft(siblingMap) {
case (siblingMap, childNode) =>
siblingMap.put(childNode, children.filter(_ != childNode))
siblingMap
}
}
}
trait TreeInstances {
implicit def ShowTree[N: Show] = new Show[Tree[N]] {
override def show(v: Tree[N]) = {
def toTree(node: N): ZTree[N] = ZTree.node(node, v.children(node).toStream.map(toTree _))
Cord(toTree(v.root).drawTree)
}
}
}
object Tree extends TreeInstances {
def apply[N](root0: N, children0: N => List[N]): Tree[N] = new Tree[N] {
def root: N = root0
def children(node: N): List[N] = children0(node)
}
}
| mossprescott/quasar | src/main/scala/slamdata/engine/analysis/Tree.scala | Scala | agpl-3.0 | 3,786 |
package io.github.hamsters
import scala.annotation.implicitNotFound
import scala.concurrent.{ExecutionContext, Future}
@implicitNotFound("""Cannot create monad instance for type ${Box}.
If you are combining Future with another monad you might pass
an (implicit ec: ExecutionContext) parameter to your method
or import scala.concurrent.ExecutionContext.Implicits.global""")
trait Monad[Box[_]] extends Functor[Box] {
def pure[A](a: A): Box[A]
def flatMap[A, B](boxA: Box[A])(f: A => Box[B]): Box[B]
}
object Monad {
implicit val optionMonad: Monad[Option] = new Monad[Option] {
override def pure[A](x: A): Option[A] = Option(x)
override def flatMap[A, B](boxA: Option[A])(f: A => Option[B]): Option[B] = boxA.flatMap(f)
override def map[A, B](boxA: Option[A])(f: A => B): Option[B] = boxA.map(f)
}
implicit def futureMonad(implicit ec: ExecutionContext): Monad[Future] = new Monad[Future] {
override def pure[A](x: A): Future[A] = Future.successful(x)
override def flatMap[A, B](boxA: Future[A])(f: A => Future[B]): Future[B] = boxA.flatMap(f)
override def map[A, B](boxA: Future[A])(f: A => B): Future[B] = boxA.map(f)
}
}
| scala-hamsters/hamsters | shared/src/main/scala/io/github/hamsters/Monad.scala | Scala | apache-2.0 | 1,210 |
package uk.gov.gds.ier.model
case class LastAddress(
hasAddress:Option[HasAddressOption],
address:Option[PartialAddress]
)
object LastAddress extends ModelMapping {
import playMappings._
lazy val mapping = playMappings.mapping(
keys.hasAddress.key -> optional(HasAddressOption.mapping),
keys.address.key -> optional(PartialAddress.mapping)
) (
LastAddress.apply
) (
LastAddress.unapply
)
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/model/LastAddress.scala | Scala | mit | 428 |
package us.feliscat.score.update
import us.feliscat.text.similarity.{AverageSimilarityCalculator, SimilarityCalculator}
import us.feliscat.text.vector._
import us.feliscat.types.{Document, Score, Sentence}
import us.feliscat.util.uima.fsList.FSListUtils
import scala.collection.mutable.ListBuffer
/**
* <pre>
* Created on 3/11/15.
* </pre>
* @param instruction instruction
* @param scoreIndex score index
* @author K.Sakamoto
*/
class RelevanceScorer(instruction: Document, scoreIndex: Int) extends Scorer {
private val SCORER_NAME: String = "WordBasedRelevanceScorer"
private lazy val similarityCalculator: SimilarityCalculator = {
new SimilarityCalculator(FrequencyVectorGeneratorFromJCas.getVectorFromAnnotation(instruction))
}
private lazy val averageSimilarityCalculator: AverageSimilarityCalculator = {
new AverageSimilarityCalculator({
for (sentence <- instruction.getSentenceSet.toSeq.asInstanceOf[Seq[Sentence]]) yield {
FrequencyVectorGeneratorFromJCas.getVectorFromAnnotation(sentence)
}
})
}
override def scoreBySentence(sentence: Sentence): Double = {
val similarity: Double = averageSimilarityCalculator.calculate(
FrequencyVectorGeneratorFromJCas.getVectorFromAnnotation(sentence)
)
val score: Score = sentence.getScoreList(scoreIndex)
score.setScorer(SCORER_NAME)
score.setScore(similarity)
sentence.setScoreList(scoreIndex, score)
similarity
}
override def scoreBySentences(document: Document): Double = {
val score: Double = similarityCalculator.calculate(
FrequencyVectorMerger.merge {
val buffer = ListBuffer.empty[FrequencyVector]
for (sentence <- document.getSentenceSet.toSeq.asInstanceOf[Seq[Sentence]]) {
buffer += FrequencyVectorGeneratorFromJCas.getVectorFromAnnotation(sentence)
}
buffer.result
}
)
val scoreType: Score = document.getScoreList(scoreIndex)
scoreType.setScorer(SCORER_NAME)
scoreType.setScore(score)
document.setScoreList(scoreIndex, scoreType)
score
}
}
| ktr-skmt/FelisCatusZero-multilingual | libraries4jcas/src/main/scala/us/feliscat/score/update/RelevanceScorer.scala | Scala | apache-2.0 | 2,078 |
package pl.newicom.dddd
trait Eventsourced {
this: BusinessEntity =>
def streamName: String = id
def department: String
}
| pawelkaczor/akka-ddd | akka-ddd-protocol/src/main/scala/pl/newicom/dddd/Eventsourced.scala | Scala | mit | 131 |
package chapter20
import java.io.PrintWriter
import java.util.Date
/**
* 20.8 구조적 서브타이핑
*
* 어떤 클래스 A가 다른 클래스 B를 상속할 때, A를 B의 이름에 의한 서브타입(nominal subtype)이라고 말한다.
* 스칼라는 구조적인 서브타이핑(structual subtyping)도 지원한다. 두 타입의 멤버가 같기 때문에 생기는 관계다.
* 스칼라에서 구조적 서브타입을 사용하려면, 세분화한 타입(refinement type)을 사용하면 된다.
*
* 무언가를 새로 설계하는 경우, 이름에 의한 서브타입 관계를 먼저 사용해야 한다. 이름은 짧은 식별자이므로, 타입을 모두
* 나열하는 거보다 훨씬 간결하다.
*
* 풀을 먹는 동물을 선언할 때, 이름에 의한 서브타입 보다, 세분화한 타입이 더 좋은 경우도 있다.
*
* AnimalThatEatsGrass 라는 트레이트를 만들기 보다는,
* Animal { Type SuitableFood = Graa }로, 기반클래스에서 멤버 타입을 더 자세히 지정한다.
*
* 구조적 서브 타이핑이 좋은 또 다른 경우는, 다른 사람이 작성한 여러 클래스를 한꺼번에 그룹으로 다루고 싶을 때다.
*
* 예를 들어 9.4절의 빌려주기 패턴을 일반화하고 싶다고 하자.
*
* 원래는 PrintWriter 타입에 대해서만 작동했지만, close 메소드를 제공하는 모든 타입에 대해
* 이 예제를 사용하고 싶다고 하자. 또한 열린 소켓을 정리하기를 바랄 수 있다.
*
* 이 메소드는 연산을 수행하고, 객체를 닫기에, 연산과 대상 객체를 인자로 받아야 한다.
*/
class Pasture {
var animals: List[Animal { type SuitableFood = Grass }] = Nil
// ...
}
object c20_i08 extends App {
/*
* T가 close() 메소드를 지원한다는 사실을 명시하기 위해, <: 표현을 사용해 T에 대한 상위 바운드를 지정할 수 있다.
*
* 기반 타입을 지정하지 않았기에 AnyRef를 자동으로 사용하며, AnyRef에는 결코 close 메소드가 없다는 것이다.
* 기술적으로 말해서, 이런 경우의 타입({def close(): Unit})을 일컬어 구조적 타입이라 한다.
*/
def using[T <: {def close(): Unit }, S](obj: T)(operation: T => S) = {
val result = operation(obj)
obj.close()
result
}
using(new PrintWriter("date.txt")) { writer => writer.println(new Date) }
//using(serverSocket.accept()) {socket => socket.getOutputStream().write("hello, world\\n".getBytes) }
} | seraekim/srkim-lang-scala | src/main/java/chapter20/c20_i08.scala | Scala | bsd-3-clause | 2,540 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sat Aug 8 20:26:34 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation
package util
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PackageInfo` trait provides methods to retrieve meta-data about packages.
*/
trait PackageInfo
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the current (calling context) package name.
*/
def getPackageName: String = getClass.getPackage.getName
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the default data path for the current package.
*/
def getDataPath: String =
{
getPackageName.replace ("scalation", "data").replace (".", ⁄) + ⁄
} // gatDataPath
} // PackageInfo trait
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/util/PackageInfo.scala | Scala | mit | 945 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.chill.config.{ScalaAnyRefMapConfig, ConfiguredInstantiator}
import cascading.pipe.assembly.AggregateBy
import cascading.flow.{Flow, FlowDef, FlowProps, FlowListener, FlowSkipStrategy, FlowStepStrategy}
import cascading.pipe.Pipe
import cascading.property.AppProps
import cascading.tuple.collect.SpillableProps
import cascading.stats.CascadingStats
import org.apache.hadoop.io.serializer.{Serialization => HSerialization}
//For java -> scala implicits on collections
import scala.collection.JavaConversions._
import java.io.{ BufferedWriter, File, FileOutputStream, OutputStreamWriter }
import java.util.{Calendar, UUID}
import java.util.concurrent.{Executors, TimeUnit, ThreadFactory, Callable, TimeoutException}
import java.util.concurrent.atomic.AtomicInteger
import java.security.MessageDigest
object Job {
val UNIQUE_JOB_ID = "scalding.job.uniqueId"
/**
* Use reflection to create the job by name. We use the thread's
* context classloader so that classes in the submitted jar and any
* jars included via -libjar can be found.
*/
def apply(jobName : String, args : Args) : Job = {
Class.forName(jobName, true, Thread.currentThread().getContextClassLoader)
.getConstructor(classOf[Args])
.newInstance(args)
.asInstanceOf[Job]
}
}
/** Job is a convenience class to make using Scalding easier.
* Subclasses of Job automatically have a number of nice implicits to enable more concise
* syntax, including:
* conversion from Pipe, Source or Iterable to RichPipe
* conversion from Source or Iterable to Pipe
* conversion to collections or Tuple[1-22] to cascading.tuple.Fields
*
* Additionally, the job provides an implicit Mode and FlowDef so that functions that
* register starts or ends of a flow graph, specifically anything that reads or writes data
* on Hadoop, has the needed implicits available.
*
* If you want to write code outside of a Job, you will want to either:
*
* make all methods that may read or write data accept implicit FlowDef and Mode parameters.
*
* OR:
*
* write code that rather than returning values, it returns a (FlowDef, Mode) => T,
* these functions can be combined Monadically using algebird.monad.Reader.
*/
class Job(val args : Args) extends FieldConversions with java.io.Serializable {
// Set specific Mode
implicit def mode: Mode = Mode.getMode(args).getOrElse(sys.error("No Mode defined"))
// This allows us to register this job in a global space when processing on the cluster
// and find it again.
// E.g. stats can all locate the same job back again to find the right flowProcess
final implicit val uniqueId = UniqueID(UUID.randomUUID.toString)
// Use this if a map or reduce phase takes a while before emitting tuples.
def keepAlive {
val flowProcess = RuntimeStats.getFlowProcessForUniqueId(uniqueId.get)
flowProcess.keepAlive
}
/**
* you should never call this directly, it is here to make
* the DSL work. Just know, you can treat a Pipe as a RichPipe
* within a Job
*/
implicit def pipeToRichPipe(pipe : Pipe): RichPipe = new RichPipe(pipe)
/**
* This implicit is to enable RichPipe methods directly on Source
* objects, such as map/flatMap, etc...
*
* Note that Mappable is a subclass of Source, and Mappable already
* has mapTo and flatMapTo BUT WITHOUT incoming fields used (see
* the Mappable trait). This creates some confusion when using these methods
* (this is an unfortuate mistake in our design that was not noticed until later).
* To remove ambiguity, explicitly call .read on any Source that you begin
* operating with a mapTo/flatMapTo.
*/
implicit def sourceToRichPipe(src : Source): RichPipe = new RichPipe(src.read)
// This converts an Iterable into a Pipe or RichPipe with index (int-based) fields
implicit def toPipe[T](iter : Iterable[T])(implicit set: TupleSetter[T], conv : TupleConverter[T]): Pipe =
IterableSource[T](iter)(set, conv).read
implicit def iterableToRichPipe[T](iter : Iterable[T])
(implicit set: TupleSetter[T], conv : TupleConverter[T]): RichPipe =
RichPipe(toPipe(iter)(set, conv))
// Override this if you want change how the mapred.job.name is written in Hadoop
def name : String = getClass.getName
//This is the FlowDef used by all Sources this job creates
@transient
implicit protected val flowDef = {
val fd = new FlowDef
fd.setName(name)
fd
}
/** Copy this job
* By default, this uses reflection and the single argument Args constructor
*/
def clone(nextargs: Args): Job =
this.getClass
.getConstructor(classOf[Args])
.newInstance(Mode.putMode(mode, nextargs))
.asInstanceOf[Job]
/**
* Implement this method if you want some other jobs to run after the current
* job. These will not execute until the current job has run successfully.
*/
def next : Option[Job] = None
/** Keep 100k tuples in memory by default before spilling
* Turn this up as high as you can without getting OOM.
*
* This is ignored if there is a value set in the incoming mode.config
*/
def defaultSpillThreshold: Int = 100 * 1000
/** Override this to control how dates are parsed */
implicit def dateParser: DateParser = DateParser.default
def fromInputStream(s: java.io.InputStream): Array[Byte] =
Stream.continually(s.read).takeWhile(-1 !=).map(_.toByte).toArray
def toHexString(bytes: Array[Byte]): String =
bytes.map("%02X".format(_)).mkString
def md5Hex(bytes: Array[Byte]): String = {
val md = MessageDigest.getInstance("MD5")
md.update(bytes)
toHexString(md.digest)
}
// Generated the MD5 hex of the the bytes in the job classfile
lazy val classIdentifier : String = {
val classAsPath = getClass.getName.replace(".", "/") + ".class"
val is = getClass.getClassLoader.getResourceAsStream(classAsPath)
val bytes = fromInputStream(is)
is.close()
md5Hex(bytes)
}
/** This is the exact config that is passed to the Cascading FlowConnector.
* By default:
* if there are no spill thresholds in mode.config, we replace with defaultSpillThreshold
* we overwrite io.serializations with ioSerializations
* we overwrite cascading.tuple.element.comparator.default to defaultComparator
* we add some scalding keys for debugging/logging
*
* Tip: override this method, call super, and ++ your additional
* map to add or overwrite more options
*/
def config: Map[AnyRef,AnyRef] = {
// These are ignored if set in mode.config
val lowPriorityDefaults =
Map(SpillableProps.LIST_THRESHOLD -> defaultSpillThreshold.toString,
SpillableProps.MAP_THRESHOLD -> defaultSpillThreshold.toString,
AggregateBy.AGGREGATE_BY_THRESHOLD -> defaultSpillThreshold.toString
)
// Set up the keys for chill
val chillConf = ScalaAnyRefMapConfig(lowPriorityDefaults)
ConfiguredInstantiator.setReflect(chillConf, classOf[serialization.KryoHadoop])
System.setProperty(AppProps.APP_FRAMEWORKS,
String.format("scalding:%s", scaldingVersion))
val m = chillConf.toMap ++
mode.config ++
// Optionally set a default Comparator
(defaultComparator match {
case Some(defcomp) => Map(FlowProps.DEFAULT_ELEMENT_COMPARATOR -> defcomp.getName)
case None => Map.empty[AnyRef, AnyRef]
}) ++
Map(
"io.serializations" -> ioSerializations.map { _.getName }.mkString(","),
"scalding.version" -> scaldingVersion,
"cascading.app.name" -> name,
"cascading.app.id" -> name,
"scalding.flow.class.name" -> getClass.getName,
"scalding.flow.class.signature" -> classIdentifier,
"scalding.job.args" -> args.toString,
Job.UNIQUE_JOB_ID -> uniqueId.get
)
val tsKey = "scalding.flow.submitted.timestamp"
m.updated(tsKey, m.getOrElse(tsKey, Calendar.getInstance().getTimeInMillis().toString))
}
def skipStrategy: Option[FlowSkipStrategy] = None
def stepStrategy: Option[FlowStepStrategy[_]] = None
/**
* combine the config, flowDef and the Mode to produce a flow
*/
def buildFlow: Flow[_] = {
val flow = mode.newFlowConnector(config).connect(flowDef)
listeners.foreach { flow.addListener(_) }
skipStrategy.foreach { flow.setFlowSkipStrategy(_) }
stepStrategy.foreach { flow.setFlowStepStrategy(_) }
flow
}
// called before run
// only override if you do not use flowDef
def validate {
FlowStateMap.validateSources(flowDef, mode)
}
// called after successfull run
// only override if you do not use flowDef
def clear {
FlowStateMap.clear(flowDef)
}
protected def handleStats(statsData: CascadingStats) {
scaldingCascadingStats = Some(statsData)
// TODO: Why the two ways to do stats? Answer: jank-den.
if(args.boolean("scalding.flowstats")) {
val statsFilename = args.getOrElse("scalding.flowstats", name + "._flowstats.json")
val br = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(statsFilename), "utf-8"))
br.write(JobStats(statsData).toJson)
br.close
}
// Print custom counters unless --scalding.nocounters is used or there are no custom stats
if (!args.boolean("scalding.nocounters")) {
implicit val statProvider = statsData
val jobStats = Stats.getAllCustomCounters
if (!jobStats.isEmpty) {
println("Dumping custom counters:")
jobStats.foreach { case (counter, value) =>
println("%s\t%s".format(counter, value))
}
}
}
}
// TODO design a better way to test stats.
// This awful name is designed to avoid collision
// with subclasses
@transient
private[scalding] var scaldingCascadingStats: Option[CascadingStats] = None
//Override this if you need to do some extra processing other than complete the flow
def run: Boolean = {
val flow = buildFlow
flow.complete
val statsData = flow.getFlowStats
handleStats(statsData)
statsData.isSuccessful
}
//override this to add any listeners you need
def listeners : List[FlowListener] = Nil
/** The exact list of Hadoop serializations passed into the config
* These replace the config serializations
* Cascading tuple serialization should be in this list, and probably
* before any custom code
*/
def ioSerializations: List[Class[_ <: HSerialization[_]]] = List(
classOf[org.apache.hadoop.io.serializer.WritableSerialization],
classOf[cascading.tuple.hadoop.TupleSerialization],
classOf[com.twitter.chill.hadoop.KryoSerialization]
)
/** Override this if you want to customize comparisons/hashing for your job
* the config method overwrites using this before sending to cascading
*/
def defaultComparator: Option[Class[_ <: java.util.Comparator[_]]] =
Some(classOf[IntegralComparator])
/**
* This is implicit so that a Source can be used as the argument
* to a join or other method that accepts Pipe.
*/
implicit def read(src : Source) : Pipe = src.read
/** This is only here for Java jobs which cannot automatically
* access the implicit Pipe => RichPipe which makes: pipe.write( )
* convenient
*/
def write(pipe : Pipe, src : Source) {src.writeFrom(pipe)}
/*
* Need to be lazy to be used within pipes.
*/
private lazy val timeoutExecutor =
Executors.newSingleThreadExecutor(new NamedPoolThreadFactory("job-timer", true))
/*
* Safely execute some operation within a deadline.
*
* TODO: once we have a mechanism to access FlowProcess from user functions, we can use this
* function to allow long running jobs by notifying Cascading of progress.
*/
def timeout[T](timeout: AbsoluteDuration)(t: =>T): Option[T] = {
val f = timeoutExecutor.submit(new Callable[Option[T]] {
def call(): Option[T] = Some(t)
});
try {
f.get(timeout.toMillisecs, TimeUnit.MILLISECONDS)
} catch {
case _: TimeoutException =>
f.cancel(true)
None
}
}
}
/*
* NamedPoolThreadFactory is copied from util.core to avoid dependency.
*/
class NamedPoolThreadFactory(name: String, makeDaemons: Boolean) extends ThreadFactory {
def this(name: String) = this(name, false)
val group = new ThreadGroup(Thread.currentThread().getThreadGroup(), name)
val threadNumber = new AtomicInteger(1)
def newThread(r: Runnable) = {
val thread = new Thread(group, r, name + "-" + threadNumber.getAndIncrement())
thread.setDaemon(makeDaemons)
if (thread.getPriority != Thread.NORM_PRIORITY) {
thread.setPriority(Thread.NORM_PRIORITY)
}
thread
}
}
/**
* Sets up an implicit dateRange to use in your sources and an implicit
* timezone.
* Example args: --date 2011-10-02 2011-10-04 --tz UTC
* If no timezone is given, Pacific is assumed.
*/
trait DefaultDateRangeJob extends Job {
//Get date implicits and PACIFIC and UTC vals.
import DateOps._
// Optionally take --tz argument, or use Pacific time. Derived classes may
// override defaultTimeZone to change the default.
def defaultTimeZone = PACIFIC
implicit lazy val tz = args.optional("tz") match {
case Some(tzn) => java.util.TimeZone.getTimeZone(tzn)
case None => defaultTimeZone
}
// Optionally take a --period, which determines how many days each job runs over (rather
// than over the whole date range)
// --daily and --weekly are aliases for --period 1 and --period 7 respectively
val period =
if (args.boolean("daily"))
1
else if (args.boolean("weekly"))
7
else
args.getOrElse("period", "0").toInt
lazy val (startDate, endDate) = {
val DateRange(s, e) = DateRange.parse(args.list("date"))
(s, e)
}
implicit lazy val dateRange = DateRange(startDate, if (period > 0) startDate + Days(period) - Millisecs(1) else endDate)
override def next : Option[Job] =
if (period > 0) {
val nextStartDate = startDate + Days(period)
if (nextStartDate + Days(period - 1) > endDate)
None // we're done
else // return a new job with the new startDate
Some(clone(args + ("date" -> List(nextStartDate.toString("yyyy-MM-dd"), endDate.toString("yyyy-MM-dd")))))
}
else
None
}
// DefaultDateRangeJob with default time zone as UTC instead of Pacific.
trait UtcDateRangeJob extends DefaultDateRangeJob {
override def defaultTimeZone = DateOps.UTC
}
// Used to inject a typed unique identifier into the Job class
case class UniqueID(get: String)
/*
* Run a list of shell commands through bash in the given order. Return success
* when all commands succeed. Excution stops after the first failure. The
* failing command is printed to stdout.
*/
class ScriptJob(cmds: Iterable[String]) extends Job(Args("")) {
override def run = {
try {
cmds.dropWhile {
cmd: String => {
new java.lang.ProcessBuilder("bash", "-c", cmd).start().waitFor() match {
case x if x != 0 =>
println(cmd + " failed, exitStatus: " + x)
false
case 0 => true
}
}
}.isEmpty
} catch {
case e : Exception => {
e.printStackTrace
false
}
}
}
}
| danosipov/scalding | scalding-core/src/main/scala/com/twitter/scalding/Job.scala | Scala | apache-2.0 | 15,869 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.io
import java.lang.{Character => JCharacter}
import scala.annotation.switch
import scala.language.postfixOps
/** Contains constants and classifier methods for characters */
trait Chars {
final val LF = '\\u000A'
final val FF = '\\u000C'
final val CR = '\\u000D'
final val SU = '\\u001A'
/** Convert a character digit to an Int according to given base,
* -1 if no success
*/
def digit2int(ch: Char, base: Int): Int = {
val num =
if (ch <= '9') ch - '0'
else if ('a' <= ch && ch <= 'z') ch - 'a' + 10
else if ('A' <= ch && ch <= 'Z') ch - 'A' + 10
else -1
if (0 <= num && num < base) num else -1
}
/** Buffer for creating '\\ u XXXX' strings. */
private[this] val char2uescapeArray = Array[Char]('\\\\', 'u', 0, 0, 0, 0)
/** Convert a character to a backslash-u escape */
def char2uescape(c: Char): String = {
@inline def hexChar(ch: Int): Char =
( if (ch < 10) '0' else 'A' - 10 ) + ch toChar
char2uescapeArray(2) = hexChar(c >> 12 )
char2uescapeArray(3) = hexChar((c >> 8) % 16)
char2uescapeArray(4) = hexChar((c >> 4) % 16)
char2uescapeArray(5) = hexChar(c % 16)
new String(char2uescapeArray)
}
/** Is character a line break? */
def isLineBreakChar(c: Char): Boolean = (c: @switch) match {
case LF|FF|CR|SU => true
case _ => false
}
/** Is character a whitespace character (but not a new line)? */
def isWhitespace(c: Char): Boolean =
c == ' ' || c == '\\t' || c == CR
/** Can character form part of a doc comment variable $xxx? */
def isVarPart(c: Char): Boolean =
'0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
def isIdentifierStart(c: Char): Boolean =
(c == '`') || Character.isJavaIdentifierPart(c)
def isIdentifierPart(c: Char, isGraveAccent: Boolean): Boolean = {
(c != '`' || c != ' ') &&
{if(isGraveAccent) {c == '.' || c == '/' || c == ';' || c == ':' || c == '_' || c == '(' || c == ')' || c == '<' || c == '>' || Character.isJavaIdentifierPart(c)}
else {c != '.' && Character.isJavaIdentifierPart(c)}}
}
def isSpecial(c: Char): Boolean = {
val chtp = Character.getType(c)
chtp == Character.MATH_SYMBOL.toInt || chtp == Character.OTHER_SYMBOL.toInt
}
private final val otherLetters = Set[Char]('\\u0024', '\\u005F') // '$' and '_'
private final val letterGroups = {
import JCharacter._
Set[Byte](LOWERCASE_LETTER, UPPERCASE_LETTER, OTHER_LETTER, TITLECASE_LETTER, LETTER_NUMBER)
}
def isJawaLetter(ch: Char): Boolean = letterGroups(JCharacter.getType(ch).toByte) || otherLetters(ch)
def isOperatorPart(c: Char): Boolean = (c: @switch) match {
case '+' | '-' | '/' | '\\\\' | '*' | '%' | '&' | '|' | '?' | '>' | '<' | '=' | '~' | ':' => true
case a => isSpecial(a)
}
}
object Chars extends Chars { }
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/core/io/Chars.scala | Scala | apache-2.0 | 3,224 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.http
import java.io.File
import java.lang.reflect.InvocationTargetException
import java.util
import java.util.concurrent.CompletableFuture
import java.util.concurrent.CompletionStage
import javax.inject.Provider
import play._
import play.api.mvc.RequestHeader
import play.api.test.ApplicationFactories
import play.api.test.ApplicationFactory
import play.api.test.PlaySpecification
import play.api.OptionalSourceMapper
import play.api.{ Application => ScalaApplication }
import play.core.BuildLink
import play.core.HandleWebCommandSupport
import play.core.SourceMapper
import play.http.HttpErrorHandler
import play.it.test.EndpointIntegrationSpecification
import play.it.test.OkHttpEndpointSupport
import play.mvc.EssentialAction
import play.mvc.EssentialFilter
import play.mvc.Http
import play.mvc.Result
import play.routing.RequestFunctions
import play.routing.RoutingDslComponents
class JavaHttpErrorHandlingSpec
extends PlaySpecification
with EndpointIntegrationSpecification
with ApplicationFactories
with OkHttpEndpointSupport {
def createApplicationFactory(
applicationContext: ApplicationLoader.Context,
webCommandHandler: Option[HandleWebCommandSupport],
filters: Seq[EssentialFilter]
): ApplicationFactory = new ApplicationFactory {
override def create(): ScalaApplication = {
val components = new BuiltInComponentsFromContext(applicationContext) with RoutingDslComponents {
import scala.collection.JavaConverters._
import scala.compat.java8.OptionConverters
// Add the web command handler if it is available
webCommandHandler.foreach(webCommands().addHandler)
override def httpFilters(): util.List[mvc.EssentialFilter] = filters.asJava
override def router(): routing.Router = {
routingDsl()
.GET("/")
.routingTo(new RequestFunctions.Params0[play.mvc.Result] {
override def apply(t: Http.Request): mvc.Result = play.mvc.Results.ok("Done!")
})
.GET("/error")
.routingTo(new RequestFunctions.Params0[play.mvc.Result] {
override def apply(t: Http.Request): mvc.Result = throw new RuntimeException("action exception!")
})
.build()
}
// Config config, Environment environment, OptionalSourceMapper sourceMapper, Provider<Router> routes
override def httpErrorHandler(): HttpErrorHandler = {
val mapper = OptionConverters.toScala(applicationContext.devContext()).map(_.sourceMapper)
val routesProvider: Provider[play.api.routing.Router] = new Provider[play.api.routing.Router] {
override def get(): play.api.routing.Router = router().asScala()
}
new play.http.DefaultHttpErrorHandler(
this.config(),
this.environment(),
new OptionalSourceMapper(mapper),
routesProvider
) {
override def onClientError(
request: Http.RequestHeader,
statusCode: Int,
message: String
): CompletionStage[Result] = {
CompletableFuture.completedFuture(mvc.Results.internalServerError(message))
}
override def onServerError(request: Http.RequestHeader, exception: Throwable): CompletionStage[Result] = {
exception match {
case ite: InvocationTargetException =>
CompletableFuture.completedFuture(
mvc.Results.internalServerError(s"got exception: ${exception.getCause.getMessage}")
)
case rex: Throwable =>
CompletableFuture.completedFuture(
mvc.Results.internalServerError(s"got exception: ${exception.getMessage}")
)
}
}
}
}
}
components.application().asScala()
}
}
"The configured HttpErrorHandler" should {
val appFactory: ApplicationFactory = createApplicationFactory(
applicationContext = new ApplicationLoader.Context(Environment.simple()),
webCommandHandler = None,
filters = Seq(
new EssentialFilter {
def apply(next: EssentialAction) = {
throw new RuntimeException("filter exception!")
}
}
)
)
val appFactoryWithoutFilters: ApplicationFactory = createApplicationFactory(
applicationContext = new ApplicationLoader.Context(Environment.simple()),
webCommandHandler = None,
filters = Seq.empty
)
"handle exceptions that happen in action" in appFactoryWithoutFilters.withAllOkHttpEndpoints { endpoint =>
val request = new okhttp3.Request.Builder()
.url(endpoint.endpoint.pathUrl("/error"))
.get()
.build()
val response = endpoint.client.newCall(request).execute()
response.code must_== 500
response.body.string must_== "got exception: action exception!"
}
"handle exceptions that happen in filters" in appFactory.withAllOkHttpEndpoints { endpoint =>
val request = new okhttp3.Request.Builder()
.url(endpoint.endpoint.pathUrl("/"))
.get()
.build()
val response = endpoint.client.newCall(request).execute()
response.code must_== 500
response.body.string must_== "got exception: filter exception!"
}
"in DEV mode" in {
val buildLink = new BuildLink {
override def reload(): AnyRef = null
override def findSource(className: String, line: Integer): Array[AnyRef] = null
override def projectPath(): File = new File("").getAbsoluteFile
override def forceReload(): Unit = { /* do nothing */ }
override def settings(): util.Map[String, String] = util.Collections.emptyMap()
}
val devSourceMapper = new SourceMapper {
override def sourceOf(className: String, line: Option[Int]): Option[(File, Option[Int])] = None
}
val scalaApplicationContext = play.api.ApplicationLoader.Context.create(
environment = play.api.Environment.simple(mode = play.api.Mode.Dev),
devContext = Some(play.api.ApplicationLoader.DevContext(devSourceMapper, buildLink))
)
val applicationContext = new ApplicationLoader.Context(scalaApplicationContext)
val appWithActionException: ApplicationFactory = createApplicationFactory(
applicationContext = applicationContext,
webCommandHandler = None,
filters = Seq.empty
)
val appWithFilterException: ApplicationFactory = createApplicationFactory(
applicationContext = applicationContext,
webCommandHandler = None,
filters = Seq(
new EssentialFilter {
def apply(next: EssentialAction) = {
throw new RuntimeException("filter exception!")
}
}
)
)
val appWithWebCommandExceptions: ApplicationFactory = createApplicationFactory(
applicationContext = applicationContext,
webCommandHandler = Some(
new HandleWebCommandSupport {
override def handleWebCommand(
request: RequestHeader,
buildLink: BuildLink,
path: File
): Option[api.mvc.Result] = {
throw new RuntimeException("webcommand exception!")
}
}
),
Seq.empty
)
"handle exceptions that happens in action" in appWithActionException.withAllOkHttpEndpoints { endpoint =>
val request = new okhttp3.Request.Builder()
.url(endpoint.endpoint.pathUrl("/error"))
.get()
.build()
val response = endpoint.client.newCall(request).execute()
response.code must_== 500
response.body.string must_== "got exception: action exception!"
}
"handle exceptions that happens in filters" in appWithFilterException.withAllOkHttpEndpoints { endpoint =>
val request = new okhttp3.Request.Builder()
.url(endpoint.endpoint.pathUrl("/"))
.get()
.build()
val response = endpoint.client.newCall(request).execute()
response.code must_== 500
response.body.string must_== "got exception: filter exception!"
}
"handle exceptions that happens in web command" in appWithWebCommandExceptions.withAllOkHttpEndpoints {
endpoint =>
val request = new okhttp3.Request.Builder()
.url(endpoint.endpoint.pathUrl("/"))
.get()
.build()
val response = endpoint.client.newCall(request).execute()
response.code must_== 500
response.body.string must_== "got exception: webcommand exception!"
}
}
}
}
| benmccann/playframework | core/play-integration-test/src/it/scala/play/it/http/JavaHttpErrorHandlingSpec.scala | Scala | apache-2.0 | 9,001 |
package me.tongfei.evalmetric
/**
* @author Tongfei Chen
*/
class RetrievalRanking[T] private(val retrieved: Seq[Seq[T]], val relevant: Seq[T => Boolean]) {
private def rr(top: Seq[T], rel: T => Boolean): Double = {
for ((cand, rank) ← top.zipWithIndex)
if (rel(cand))
return 1.0 / (rank + 1)
0.0
}
private def ap(top: Seq[T], rel: T => Boolean): Double = {
var n = 0
var sum = 0.0
for ((cand, rank) ← top.zipWithIndex) {
if (rel(cand)) {
n += 1
sum += n.toDouble / (rank + 1)
}
}
if (n == 0) 0.0 else sum / n
}
private def pAtK(k: Int)(top: Seq[T], rel: T => Boolean): Double = {
var n = 0
for (cand ← top.take(k))
if (rel(cand)) n += 1
n.toDouble / k
}
private def mean(f: (Seq[T], T => Boolean) => Double): Double = {
var n = 0
var sum = 0.0
for ((top, rel) ← retrieved zip relevant) {
sum += f(top, rel)
n += 1
}
if (sum == 0.0) 0.0
else sum / n
}
/** Returns the precision @ ''k''. */
def precisionAtK(k: Int) = mean(pAtK(k))
/** Returns the mean reciprocal rank (MRR). */
def meanReciprocalRank = mean(rr)
/** Returns the mean average preciison (MAP). */
def meanAveragePrecision = mean(ap)
}
object RetrievalRanking {
def apply[T](predicted: Seq[Seq[T]], gold: Seq[T => Boolean]) =
new RetrievalRanking(predicted, gold)
}
| ctongfei/evalmetric | src/main/scala/me/tongfei/evalmetric/RetrievalRanking.scala | Scala | mit | 1,411 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
package record
package field
import scala.xml.{Node, NodeSeq, Text}
import net.liftweb.common.{Box, Empty, Failure, Full}
import net.liftweb.http.S
import net.liftweb.http.js.JE._
import net.liftweb.util.{FatLazy, FieldError, Helpers, Safe}
import Helpers._
case class Password(pwd: String, salt: String) extends JsonObject[Password] {
def meta = Password
}
object Password extends JsonObjectMeta[Password] {
def apply(in: String): Password = Password(in, "")
}
object MongoPasswordField {
val blankPw = "*******"
def encrypt(s: String, salt: String) = hash("{"+s+"} salt={" + salt + "}")
}
class MongoPasswordField[OwnerType <: BsonRecord[OwnerType]](rec: OwnerType, minLen: Int) extends JsonObjectField[OwnerType, Password](rec, Password) {
def this(rec: OwnerType) = {
this(rec, 3)
}
def setPassword(in: String) = set(Password(in))
private val salt_i = FatLazy(Safe.randomString(16))
var validatorValue: Box[Password] = valueBox
override def set_!(in: Box[Password]): Box[Password] = {
validatorValue = in
in.map(p =>
if (p.salt.length == 0) // only encrypt the password if it hasn't already been encrypted
Password(MongoPasswordField.encrypt(p.pwd, salt_i.get), salt_i.get)
else
p
)
}
override def validate: List[FieldError] = runValidation(validatorValue)
private def elem = S.fmapFunc(S.SFuncHolder(this.setPassword(_))) {
funcName => <input type="password"
name={funcName}
value=""
tabindex={tabIndex.toString}/>}
override def toForm: Box[NodeSeq] =
uniqueFieldId match {
case Full(id) => Full(elem % ("id" -> id))
case _ => Full(elem)
}
private def validatePassword(pwd: Password): List[FieldError] = pwd match {
case null | Password("", _) | Password("*", _) | Password(MongoPasswordField.blankPw, _) =>
Text(S.?("password.must.be.set"))
case Password(pwd, _) if pwd.length < minLen =>
Text(S.?("password.too.short"))
case _ => Nil
}
override def validations = validatePassword _ :: Nil
override def defaultValue = Password("")
override def asJs = valueBox.map(vb => Str(vb.pwd)) openOr Str(defaultValue.pwd)
def isMatch(toMatch: String): Boolean =
MongoPasswordField.encrypt(toMatch, value.salt) == value.pwd
}
| lzpfmh/framework-2 | persistence/mongodb-record/src/main/scala/net/liftweb/mongodb/record/field/MongoPasswordField.scala | Scala | apache-2.0 | 2,954 |
package Objects
import Utils.{DebugInfo, Constants}
import spray.httpx.SprayJsonSupport
import spray.json._
object ObjectJsonSupport extends DefaultJsonProtocol with SprayJsonSupport {
def setToJsArray(setObj: scala.collection.mutable.Set[Int]) = JsArray(setObj.map(JsNumber(_)).toVector)
implicit object BaseObjectJsonFormat extends RootJsonFormat[BaseObject] {
def write(bs: BaseObject) = JsObject("id" -> JsNumber(bs.id),
"likes" -> setToJsArray(bs.likes))
def read(json: JsValue) = json.asJsObject.getFields("id", "likes") match {
case Seq(JsNumber(id), JsArray(ids)) =>
val bs = BaseObject(id.toInt)
ids.foreach { likeId => bs.appendLike(likeId.convertTo[Int]) }
bs
case _ => throw new DeserializationException("Failed to deser BaseObject")
}
}
implicit object DebugActorJsonFormat extends RootJsonFormat[DebugInfo] {
def write(da: DebugInfo) = JsObject(
"put-profiles" -> JsNumber(da.debugVar(Constants.putProfilesChar)),
"put-posts" -> JsNumber(da.debugVar(Constants.putPostsChar)),
"put-albums" -> JsNumber(da.debugVar(Constants.putAlbumsChar)),
"put-pictures" -> JsNumber(da.debugVar(Constants.putPicturesChar)),
"put-requestPersecond" -> JsNumber(da.putRequestPerSecond()),
"post-friendlistUpdates" -> JsNumber(da.debugVar(Constants.postFlChar)),
"post-userUpdates" -> JsNumber(da.debugVar(Constants.postUserChar)),
"post-pageUpdates" -> JsNumber(da.debugVar(Constants.postPageChar)),
"post-pictureUpdates" -> JsNumber(da.debugVar(Constants.postPictureChar)),
"post-postUpdates" -> JsNumber(da.debugVar(Constants.postPostChar)),
"post-albumUpdates" -> JsNumber(da.debugVar(Constants.postAlbumChar)),
"post-requestPersecond" -> JsNumber(da.postRequestPerSecond()),
"delete-users" -> JsNumber(da.debugVar(Constants.deleteUserChar)),
"delete-pages" -> JsNumber(da.debugVar(Constants.deletePageChar)),
"delete-posts" -> JsNumber(da.debugVar(Constants.deletePostChar)),
"delete-pictures" -> JsNumber(da.debugVar(Constants.deletePictureChar)),
"delete-albums" -> JsNumber(da.debugVar(Constants.deleteAlbumChar)),
"delete-requestPersecond" -> JsNumber(da.deleteRequestPerSecond()),
"get-profiles" -> JsNumber(da.debugVar(Constants.getProfilesChar)),
"get-posts" -> JsNumber(da.debugVar(Constants.getPostsChar)),
"get-albums" -> JsNumber(da.debugVar(Constants.getAlbumsChar)),
"get-pictures" -> JsNumber(da.debugVar(Constants.getPicturesChar)),
"get-friendlistUpdates" -> JsNumber(da.debugVar(Constants.getFlChar)),
"get-requestPersecond" -> JsNumber(da.getRequestPerSecond()),
"get-feed" -> JsNumber(da.debugVar(Constants.getFeedChar)),
"likes" -> JsNumber(da.debugVar(Constants.postLikeChar)),
"all-requestPersecond" -> JsNumber(da.allRequestPerSecond())
)
def read(value: JsValue) = {
val da = DebugInfo()
value.asJsObject.getFields("put-profiles", "put-posts", "put-albums", "put-pictures",
"post-friendlistUpdates", "post-userUpdates", "post-pageUpdates", "post-pictureUpdates", "post-postUpdates", "post-albumUpdates",
"delete-users", "delete-pages", "delete-posts", "delete-pictures", "delete-albums",
"get-profiles", "get-posts", "get-albums", "get-pictures", "get-friendlistUpdates", "get-feed", "likes") match {
case Seq(JsNumber(put_profiles),
JsNumber(put_posts),
JsNumber(put_albums),
JsNumber(put_pictures),
JsNumber(post_friendlistUpdates),
JsNumber(post_userUpdates),
JsNumber(post_pageUpdates),
JsNumber(post_postUpdates),
JsNumber(post_pictureUpdates),
JsNumber(post_albumUpdates),
JsNumber(delete_users),
JsNumber(delete_pages),
JsNumber(delete_posts),
JsNumber(delete_pictures),
JsNumber(delete_albums),
JsNumber(get_profiles),
JsNumber(get_posts),
JsNumber(get_albums),
JsNumber(get_pictures),
JsNumber(get_friendlistUpdates),
JsNumber(get_feed),
JsNumber(likes)) =>
da.debugVar(Constants.putProfilesChar) = put_profiles.toInt
da.debugVar(Constants.putPostsChar) = put_posts.toInt
da.debugVar(Constants.putAlbumsChar) = put_albums.toInt
da.debugVar(Constants.putPicturesChar) = put_pictures.toInt
da.debugVar(Constants.postFlChar) = post_friendlistUpdates.toInt
da.debugVar(Constants.postUserChar) = post_userUpdates.toInt
da.debugVar(Constants.postPageChar) = post_pageUpdates.toInt
da.debugVar(Constants.postPostChar) = post_postUpdates.toInt
da.debugVar(Constants.postPictureChar) = post_pictureUpdates.toInt
da.debugVar(Constants.postAlbumChar) = post_albumUpdates.toInt
da.debugVar(Constants.deleteUserChar) = delete_users.toInt
da.debugVar(Constants.deletePageChar) = delete_pages.toInt
da.debugVar(Constants.deletePostChar) = delete_posts.toInt
da.debugVar(Constants.deletePictureChar) = delete_pictures.toInt
da.debugVar(Constants.deleteAlbumChar) = delete_albums.toInt
da.debugVar(Constants.getProfilesChar) = get_profiles.toInt
da.debugVar(Constants.getPostsChar) = get_posts.toInt
da.debugVar(Constants.getAlbumsChar) = get_albums.toInt
da.debugVar(Constants.getPicturesChar) = get_pictures.toInt
da.debugVar(Constants.getFlChar) = get_friendlistUpdates.toInt
da.debugVar(Constants.getFeedChar) = get_feed.toInt
da.debugVar(Constants.postLikeChar) = likes.toInt
da
case _ => throw new DeserializationException("Debug Actor expected")
}
}
}
implicit object AlbumJsonFormat extends RootJsonFormat[Album] {
def write(a: Album) = JsObject(
"createdTime" -> JsString(a.createdTime),
"updatedTime" -> JsString(a.updatedTime),
"coverPhoto" -> JsNumber(a.coverPhoto),
"description" -> JsString(a.description),
"pictures" -> setToJsArray(a.pictures)
)
def read(json: JsValue) = json.asJsObject.
getFields("createdTime", "updatedTime", "coverPhoto", "description", "pictures") match {
case Seq(JsString(cTime), JsString(uTime), JsNumber(cInt), JsString(desc), JsArray(pics)) =>
val a = Album(cTime, uTime, cInt.toInt, desc)
pics.foreach { pic => a.pictures.add(pic.convertTo[Int]) }
a
case _ => throw new DeserializationException("Failed to deser Album")
}
}
implicit val PostJsonFormat = jsonFormat4(Post)
implicit val FriendListJsonFormat = jsonFormat2(FriendList)
implicit val PageJsonFormat = jsonFormat4(Page)
implicit val PictureJsonFormat = jsonFormat2(Picture)
implicit val UserJsonFormat = jsonFormat6(User)
implicit val SecureObjectJsonFormat = jsonFormat6(SecureObject)
implicit val SecureServerRequestJsonFormat = jsonFormat4(SecureMessage)
implicit val SecureRequestJsonFormat = jsonFormat4(SecureRequest)
}
| Nirespire/SecureFacebookAPI | src/main/scala/Objects/ObjectJsonSupport.scala | Scala | mit | 7,061 |
package com.artclod.slick
import org.junit.runner.RunWith
import org.scalatestplus.play._
import play.api.test.Helpers._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
class PackageSpec extends PlaySpec {
"listGroupBy" should {
"return empty if input is empty" in {
val grouped = listGroupBy(List[(Int, String)]())(_._1, _._2)
grouped.length mustBe(0)
}
"return groups by key function" in {
val grouped = listGroupBy(List((1, "a"), (1, "b"), (2, "foo")))(_._1, _._2)
grouped mustBe(List( ListGroup(1, List("a", "b")), ListGroup(2, List("foo")))
)
}
}
}
| kristiankime/calc-tutor | test/com/artclod/slick/PackageSpec.scala | Scala | mit | 636 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.Random
import scala.reflect.ClassTag
import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.util.random.RandomSampler
private[spark]
class PartitionwiseSampledRDDPartition(val prev: Partition, val seed: Long)
extends Partition with Serializable {
override val index: Int = prev.index
}
/**
* A RDD sampled from its parent RDD partition-wise. For each partition of the parent RDD,
* a user-specified [[org.apache.spark.util.random.RandomSampler]] instance is used to obtain
* a random sample of the records in the partition. The random seeds assigned to the samplers
* are guaranteed to have different values.
*
* @param prev RDD to be sampled
* @param sampler a random sampler
* @param seed random seed, default to System.nanoTime
* @tparam T input RDD item type
* @tparam U sampled RDD item type
*/
class PartitionwiseSampledRDD[T: ClassTag, U: ClassTag](
prev: RDD[T],
sampler: RandomSampler[T, U],
@transient seed: Long = System.nanoTime)
extends RDD[U](prev) {
override def getPartitions: Array[Partition] = {
val random = new Random(seed)
firstParent[T].partitions.map(x => new PartitionwiseSampledRDDPartition(x, random.nextLong()))
}
override def getPreferredLocations(split: Partition): Seq[String] =
firstParent[T].preferredLocations(split.asInstanceOf[PartitionwiseSampledRDDPartition].prev)
override def compute(splitIn: Partition, context: TaskContext): Iterator[U] = {
val split = splitIn.asInstanceOf[PartitionwiseSampledRDDPartition]
val thisSampler = sampler.clone
thisSampler.setSeed(split.seed)
thisSampler.sample(firstParent[T].iterator(split.prev, context))
}
}
| sryza/spark | core/src/main/scala/org/apache/spark/rdd/PartitionwiseSampledRDD.scala | Scala | apache-2.0 | 2,523 |
package com.gabeos.wafflemusic.camera
import com.typesafe.scalalogging.LazyLogging
import org.opencv.highgui.VideoCapture
import org.opencv.videoio.Videoio._
import org.opencv.core.CvType
/**
* Created by Gabriel Schubiner on 8/25/2014.
*/
case class DepthImageInfo(width: Int, height: Int, lowConfidence: Short,
saturation: Short, confidenceThreshold: Short,
cvType: Int, uvCVType: Int) extends LazyLogging {
def untrusted(value: Short) = value == saturation || value == lowConfidence || value < 0
def trusted(value: Short) = !untrusted(value)
def log() = {
val align = 30
val names = Seq("Width","Height","Low Confidence","Saturation","Confidence Threshold")
logger.info(s"\\nDepth Image Info\\n\\t${names.zip(productIterator.toIterable).
map(sa => sa._1 + ":" + " " * (align - sa._1.length) + sa._2).mkString("\\n\\t")}")
}
}
object DepthImageInfo {
def apply(capture: VideoCapture): DepthImageInfo = {
DepthImageInfo(capture.get(CAP_INTELPERC_DEPTH_GENERATOR | CAP_PROP_FRAME_WIDTH).toInt,
capture.get(CAP_INTELPERC_DEPTH_GENERATOR | CAP_PROP_FRAME_HEIGHT).toInt,
capture.get(CAP_INTELPERC_DEPTH_GENERATOR | CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE).toShort,
capture.get(CAP_INTELPERC_DEPTH_GENERATOR | CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE).toShort,
capture.get(CAP_INTELPERC_DEPTH_GENERATOR | CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD).toShort,
CvType.CV_16SC1, CvType.CV_32FC2)
}
} | gabeos/WaffleMusic | src/main/scala/com/gabeos/wafflemusic/camera/DepthImageInfo.scala | Scala | gpl-2.0 | 1,583 |
package com.pktippa
import scala.math.Ordering
object ComputingTheGCD {
def main(args: Array[String]) {
// Reading the input from command line and trimming it
var input = readLine().trim();
// Splitting the values by space
var individualValues = input.split(" ");
// Mapping values of Array[String] to List
var valuesToList = individualValues.map(x=>x.toInt).toList
// Sorting the List of Integers in descending order
var valuesInSortedDesc= valuesToList.sorted(Ordering[Int].reverse);
// Passing first and last value in the List to GCD
var gcdVal = gcd(valuesInSortedDesc.head,valuesInSortedDesc.reverse.head);
println(gcdVal);
}
// Using Euclidian Algorithm
// For GCD(x,y) - x = y*q + r if r == 0 ; y is GCD else calculate GCD(y,r) till(recursion) r == 0
def gcd(x: Int, y: Int): Int =
{
var temp = x%y;
if(temp == 0) // GCD found
y
else
gcd(y,temp) // Recursing
}
}
| pk-hackerrank/hr-funcprog | recursion/computing-the-GCD/scala/src/com/pktippa/ComputingTheGCD.scala | Scala | mit | 985 |
package demo
package components
package materialui
import chandu0101.macros.tojs.GhPagesMacros
import chandu0101.scalajs.react.components.materialui.MuiSlider
import japgolly.scalajs.react.ReactComponentB
import japgolly.scalajs.react.vdom.prefix_<^._
object MuiSliderDemo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
val component = ReactComponentB[Unit]("MuiSliderDemo")
.render(P => {
<.div(
CodeExample(code, "MuiSlider")(
MuiSlider(name = "slider1")(),
MuiSlider(name = "slider2", defaultValue = 0.5)(),
MuiSlider(name = "slider1", value = 0.3, disabled = true)()
)
)
}).buildU
// EXAMPLE:END
def apply() = component()
}
| elacin/scalajs-react-components | demo/src/main/scala/demo/components/materialui/MuiSliderDemo.scala | Scala | apache-2.0 | 721 |
/*
* Copyright (c) 2017 sadikovi
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.github.sadikovi.riff
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import com.github.sadikovi.riff.tree.FilterApi._
import com.github.sadikovi.testutil.implicits._
import com.github.sadikovi.testutil.UnitTestSuite
class FileReaderSuite extends UnitTestSuite {
import com.github.sadikovi.riff.RiffTestUtils._
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", StringType) ::
StructField("col3", LongType) :: Nil)
val td = new TypeDescription(schema, Array("col2"))
test("initialize file reader for non-existent path") {
withTempDir { dir =>
val path = dir / "file"
touch(path)
val reader = new FileReader(fs, new Configuration(), path)
reader.filePath() should be (new Path(s"file:$path"))
reader.bufferSize() should be (Riff.Options.BUFFER_SIZE_DEFAULT)
}
}
test("initialize file reader with different buffer size") {
withTempDir { dir =>
val path = dir / "file"
touch(path)
val conf = new Configuration()
conf.setInt(Riff.Options.BUFFER_SIZE, Riff.Options.BUFFER_SIZE_MAX)
val reader = new FileReader(fs, conf, path)
reader.filePath() should be (new Path(s"file:$path"))
reader.bufferSize() should be (Riff.Options.BUFFER_SIZE_MAX)
}
}
test("evaluate stripes for null predicate state") {
val stripes = Array(
new StripeInformation(1.toByte, 0L, 100, null),
new StripeInformation(2.toByte, 101L, 100, null),
new StripeInformation(3.toByte, 202L, 100, null))
val res = FileReader.evaluateStripes(stripes, null)
res should be (stripes)
}
test("evaluate stripes for predicate state and no statistics") {
val stripes = Array(
new StripeInformation(2.toByte, 101L, 100, null),
new StripeInformation(1.toByte, 0L, 100, null),
new StripeInformation(3.toByte, 202L, 100, null))
val state = new PredicateState(nvl("col1"), td)
val res = FileReader.evaluateStripes(stripes, state)
// must be sorted by offset
res should be (Array(
new StripeInformation(1.toByte, 0L, 100, null),
new StripeInformation(2.toByte, 101L, 100, null),
new StripeInformation(3.toByte, 202L, 100, null)))
}
test("evaluate stripes for predicate state - remove some stripes") {
val stripes = Array(
new StripeInformation(2.toByte, 101L, 100, Array(
stats("a", "z", false),
stats(1, 3, false),
stats(1L, 3L, false)
)),
new StripeInformation(1.toByte, 0L, 100, Array(
stats("a", "z", false),
stats(4, 5, false),
stats(1L, 3L, false)
)),
new StripeInformation(3.toByte, 202L, 100, Array(
stats("a", "z", false),
stats(1, 3, false),
stats(1L, 3L, false)
)))
val state = new PredicateState(eqt("col1", 5), td)
val res = FileReader.evaluateStripes(stripes, state)
// must be sorted by offset
res should be (Array(stripes(1)))
}
test("evaluate stripes for predicate state with column filters") {
val stripes = Array(
new StripeInformation(1.toByte, 0L, 100, Array(
stats("a", "z", false),
stats(1, 3, false),
stats(1L, 3L, false)
), Array(
filter("z"),
filter(1),
filter(2L)
)),
new StripeInformation(2.toByte, 101L, 100, Array(
stats("a", "z", false),
stats(1, 3, false),
stats(1L, 3L, false)
), Array(
filter("b"),
filter(1),
filter(2L)
)))
val state = new PredicateState(eqt("col2", "b"), td)
val res = FileReader.evaluateStripes(stripes, state)
// stripe 0 should be discarded because of column filter
res should be (Array(stripes(1)))
}
test("file reader reuse") {
withTempDir { dir =>
val writer = Riff.writer(dir / "path", td)
writer.prepareWrite()
writer.finishWrite()
// test prepareRead
val reader1 = Riff.reader(dir / "path")
reader1.prepareRead()
var err = intercept[IOException] { reader1.prepareRead() }
err.getMessage should be ("Reader reuse")
err = intercept[IOException] { reader1.readFileInfo(false) }
err.getMessage should be ("Reader reuse")
// test readTypeDescription
val reader2 = Riff.reader(dir / "path")
reader2.readFileInfo(false)
err = intercept[IOException] { reader2.readFileInfo(false) }
err.getMessage should be ("Reader reuse")
err = intercept[IOException] { reader2.prepareRead() }
err.getMessage should be ("Reader reuse")
}
}
test("read file header and footer") {
withTempDir { dir =>
val writer = Riff.writer(dir / "path", td)
writer.prepareWrite()
writer.finishWrite()
val reader = Riff.reader(dir / "path")
reader.readFileInfo(true)
val h1 = reader.getFileHeader()
val f1 = reader.getFileFooter()
h1.getTypeDescription() should be (td)
h1.state(0) should be (0)
f1.getNumRecords() should be (0)
f1.getFileStatistics().length should be (td.size())
}
}
test("fail to get file header if it is not set") {
withTempDir { dir =>
touch(dir / "path")
val reader = Riff.reader(dir / "path")
val err = intercept[IllegalStateException] {
reader.getFileHeader()
}
assert(err.getMessage.contains("File header is not set"))
}
}
test("fail to get file footer if it is not set") {
withTempDir { dir =>
touch(dir / "path")
val reader = Riff.reader(dir / "path")
val err = intercept[IllegalStateException] {
reader.getFileFooter()
}
assert(err.getMessage.contains("File footer is not set"))
}
}
}
| sadikovi/riff | format/src/test/scala/com/github/sadikovi/riff/FileReaderSuite.scala | Scala | mit | 7,029 |
package spire.algebra
import scala.{ specialized => spec }
import scala.annotation.tailrec
trait InnerProductSpace[V, @spec(Int, Long, Float, Double) F] extends Any with VectorSpace[V, F] { self =>
def dot(v: V, w: V): F
def normed(implicit ev: NRoot[F]): NormedVectorSpace[V, F] = new NormedInnerProductSpace[V, F] {
def space = self
def nroot: NRoot[F] = ev
}
}
object InnerProductSpace {
@inline final def apply[V, @spec(Int,Long,Float,Double) R](implicit V: InnerProductSpace[V, R]): InnerProductSpace[V, R] = V
}
private[algebra] trait NormedInnerProductSpace[V, @spec(Float, Double) F] extends Any with NormedVectorSpace[V, F] {
def space: InnerProductSpace[V, F]
def scalar: Field[F] = space.scalar
def nroot: NRoot[F]
def zero: V = space.zero
def plus(v: V, w: V): V = space.plus(v, w)
def negate(v: V): V = space.negate(v)
override def minus(v: V, w: V): V = space.minus(v, w)
def timesl(f: F, v: V): V = space.timesl(f, v)
override def divr(v: V, f: F): V = space.divr(v, f)
def norm(v: V): F = nroot.sqrt(space.dot(v, v))
}
| woparry/spire | core/src/main/scala/spire/algebra/InnerProductSpace.scala | Scala | mit | 1,077 |
package edu.cmu.dynet.examples
import edu.cmu.dynet._
import scala.language.implicitConversions
import java.nio.file.Paths
object PoissonRegression {
val LAYERS = 2
val INPUT_DIM = 16
val HIDDEN_DIM = 32
var VOCAB_SIZE = 0
class RNNLengthPredictor(model: Model) {
val builder = new LstmBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
val p_c = model.addLookupParameters(VOCAB_SIZE, Dim(INPUT_DIM))
val p_R = model.addParameters(Dim(1, HIDDEN_DIM))
val p_bias = model.addParameters(Dim(1))
def buildLMGraph(sent: IntVector,
len: Int,
flag: Boolean=false): Expression = {
val slen = (sent.size - 1)
builder.newGraph()
builder.startNewSequence()
val R = Expression.parameter(p_R)
val bias = Expression.parameter(p_bias)
for (t <- 0 until slen) {
val i_x_t = Expression.lookup(p_c, sent(t))
builder.addInput(i_x_t)
}
val ev = new ExpressionVector(Seq(bias, R, builder.back))
val pred = Expression.affineTransform(ev)
if (flag) {
val x = math.exp(ComputationGraph.incrementalForward(pred).toFloat)
println(s"PRED = ${x} TRUE = ${len} DIFF = ${x - len}")
}
Expression.poissonLoss(pred, len)
}
}
def main(args: Array[String]) {
Initialize.initialize()
val userDir = System.getProperty("user.dir")
val CORPUS_FILE = Paths.get(userDir, "../examples/cpp/example-data/train-poi.txt").toString
val DEV_FILE = Paths.get(userDir, "../examples/cpp/example-data/dev-poi.txt").toString
val d = new WordDict()
val kSOS = d.convert("<s>")
val kEOS = d.convert("</s>")
val training = new scala.collection.mutable.ArrayBuffer[(IntVector, Int)]
val dev = new scala.collection.mutable.ArrayBuffer[(IntVector, Int)]
var tlc = 0
var ttoks = 0
{
val td = new WordDict()
for (line <- scala.io.Source.fromFile(CORPUS_FILE).getLines) {
tlc += 1
val (x, y) = WordDict.read_sentence_pair(line, d, td)
training.append((x, y))
ttoks += x.size
if (x(0) != kSOS && x.last != kEOS) {
throw new RuntimeException("bad sentence")
}
}
println(s"${tlc} lines ${ttoks} tokens ${d.size} types")
}
d.freeze()
VOCAB_SIZE = d.size
var dlc = 0
var dtoks = 0
{
val td = new WordDict
for (line <- scala.io.Source.fromFile(DEV_FILE).getLines) {
dlc += 1
val (x, y) = WordDict.read_sentence_pair(line, d, td)
dev.append((x, y))
dtoks += x.size
if (x(0) != kSOS && x.last != kEOS) {
throw new RuntimeException("bad sentence")
}
}
println(s"${dlc} lines ${dtoks} tokens")
}
var best = Float.MaxValue
val model = new Model()
val sgd = new SimpleSGDTrainer(model)
val lm = new RNNLengthPredictor(model)
val report_every_i = 50
val dev_every_i_reports = 20
var si = training.size
val order = new IntVector((0 until training.size))
var first = true
var report = 0
var lines = 0
ComputationGraph.renew()
while (true) {
var loss = 0.0f
var chars = 0
for (i <- 0 until report_every_i) {
if (si == training.size) {
si = 0
if (first) {
first = false
} else {
sgd.updateEpoch()
}
Utilities.shuffle(order)
}
// build graph for this instance
// the cg.clear is IMPORTANT!
ComputationGraph.clear()
val (tokens, count) = training(order(si))
si += 1
val loss_expr = lm.buildLMGraph(tokens, count)
loss += ComputationGraph.forward(loss_expr).toFloat
ComputationGraph.backward(loss_expr)
sgd.update()
lines += 1
chars += 1
}
sgd.status()
println(s"E = ${loss / chars.toFloat} ppl = ${math.exp(loss / chars.toFloat)}")
report += 1
if (report % dev_every_i_reports == 0) {
var dloss = 0.0f
var dchars = 0
for ((tokens, count) <- dev) {
val loss_expr = lm.buildLMGraph(tokens, count, true)
dloss += ComputationGraph.forward(loss_expr).toFloat
dchars += 1
}
if (dloss < best) {
best = dloss
println(s"new best: ${best}")
}
println(s"DEV [epoch = ${lines / training.size.toFloat}] " +
s"E = ${dloss / dchars.toFloat} " +
s"ppl = ${math.exp(dloss / dchars.toFloat)}")
}
}
}
}
| cherryc/dynet | contrib/swig/src/main/scala/edu/cmu/dynet/examples/PoissonRegression.scala | Scala | apache-2.0 | 4,597 |
import java.net.URL
import org.apache.spark.{SparkContext, SparkConf}
import topic.SparkTopicExtractor
/**
* Created by cnavarro on 9/10/15.
*/
object ExecuteSparkTopicExtractor {
def main (args: Array[String]) {
val logFile = "/home/cnavarro/spark-1.5.1-bin-hadoop2.6/README.md" // Should be some file on your system
val taxonomy_url = new URL("file:///home/cnavarro/workspace/mixedemotions/me_extractors/spark_test/src/resources/example_taxonomy.json")
val topicExtractor = new SparkTopicExtractor(taxonomy_url)
val conf = new SparkConf().setAppName("External Classificator Application").setMaster("local[4]")
val sc = new SparkContext(conf)
val lines = sc.textFile(logFile, 2).cache()
val topicMaps = topicExtractor.extractTopics(lines)
for(topicMap <- topicMaps.collect()){
println(topicMap)
}
}
}
| canademar/me_extractors | spark_test/src/main/scala/ExecuteSparkTopicExtractor.scala | Scala | gpl-2.0 | 855 |
/**
* Copyright 2015 Gianluca Amato <gamato@unich.it>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.fixpoint
/**
* An IterativeStrategy is a hierarchical ordering, according to Bourdoncle definition in the paper
* "Efficient chaotic iteration strategies with widenings", FMPA'93.
* @tparam U the type of the elements to order
* @param seq a sequence of elements `Left`, `Right`, and `El(u)` of type `IterativeStrategy.StrategyElement[U]`
* which represents the ordering. No check is done that the hierarchical ordering satisfy the fundamental property
* that there are no two consecutive left parentheses.
*/
class IterativeStrategy[U] private (val seq: IndexedSeq[IterativeStrategy.StrategyElement[U]]) extends AnyVal {
/**
* Returns the i-th element of the sequence.
*/
def apply(i: Int) = seq(i)
/**
* Returns the number of the elements in the ordering.
*/
def length = seq.length
}
object IterativeStrategy {
/**
* A StrategyElement[U] is either `Left` (left parenthesis), `Right` (right parenthesis) or
* `El(u)` where `u` is a value of type `U`.
*/
sealed abstract class StrategyElement[+U]
case object Left extends StrategyElement[Nothing]
case object Right extends StrategyElement[Nothing]
case class El[U](val u: U) extends StrategyElement[U]
/**
* Builds an iterative strategy from a sequence of `StrategyElement`.
*/
def apply[U](els: StrategyElement[U]*) = new IterativeStrategy(els.toIndexedSeq)
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/fixpoint/IterativeStrategy.scala | Scala | lgpl-3.0 | 2,160 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import kafka.cluster.{Broker, Cluster}
import kafka.consumer.{ConsumerThreadId, TopicCount}
import org.I0Itec.zkclient.ZkClient
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException,
ZkMarshallingError, ZkBadVersionException}
import org.I0Itec.zkclient.serialize.ZkSerializer
import collection._
import kafka.api.LeaderAndIsr
import org.apache.zookeeper.data.Stat
import kafka.admin._
import kafka.common.{KafkaException, NoEpochForPartitionException}
import kafka.controller.ReassignedPartitionsContext
import kafka.controller.KafkaController
import scala.Some
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.common.TopicAndPartition
import scala.collection
object ZkUtils extends Logging {
val ConsumersPath = "/consumers"
val BrokerIdsPath = "/brokers/ids"
val BrokerTopicsPath = "/brokers/topics"
val TopicConfigPath = "/config/topics"
val TopicConfigChangesPath = "/config/changes"
val ControllerPath = "/controller"
val ControllerEpochPath = "/controller_epoch"
val ReassignPartitionsPath = "/admin/reassign_partitions"
val DeleteTopicsPath = "/admin/delete_topics"
val PreferredReplicaLeaderElectionPath = "/admin/preferred_replica_election"
def getTopicPath(topic: String): String = {
BrokerTopicsPath + "/" + topic
}
def getTopicPartitionsPath(topic: String): String = {
getTopicPath(topic) + "/partitions"
}
def getTopicConfigPath(topic: String): String =
TopicConfigPath + "/" + topic
def getDeleteTopicPath(topic: String): String =
DeleteTopicsPath + "/" + topic
def getController(zkClient: ZkClient): Int = {
readDataMaybeNull(zkClient, ControllerPath)._1 match {
case Some(controller) => KafkaController.parseControllerId(controller)
case None => throw new KafkaException("Controller doesn't exist")
}
}
def getTopicPartitionPath(topic: String, partitionId: Int): String =
getTopicPartitionsPath(topic) + "/" + partitionId
def getTopicPartitionLeaderAndIsrPath(topic: String, partitionId: Int): String =
getTopicPartitionPath(topic, partitionId) + "/" + "state"
def getSortedBrokerList(zkClient: ZkClient): Seq[Int] =
ZkUtils.getChildren(zkClient, BrokerIdsPath).map(_.toInt).sorted
def getAllBrokersInCluster(zkClient: ZkClient): Seq[Broker] = {
val brokerIds = ZkUtils.getChildrenParentMayNotExist(zkClient, ZkUtils.BrokerIdsPath).sorted
brokerIds.map(_.toInt).map(getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get)
}
def getLeaderAndIsrForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderAndIsr] = {
ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition).map(_.leaderAndIsr)
}
def setupCommonPaths(zkClient: ZkClient) {
for(path <- Seq(ConsumersPath, BrokerIdsPath, BrokerTopicsPath, TopicConfigChangesPath, TopicConfigPath, DeleteTopicsPath))
makeSurePersistentPathExists(zkClient, path)
}
def getLeaderForPartition(zkClient: ZkClient, topic: String, partition: Int): Option[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) =>
Some(m.asInstanceOf[Map[String, Any]].get("leader").get.asInstanceOf[Int])
case None => None
}
case None => None
}
}
/**
* This API should read the epoch in the ISR path. It is sufficient to read the epoch in the ISR path, since if the
* leader fails after updating epoch in the leader path and before updating epoch in the ISR path, effectively some
* other broker will retry becoming leader with the same new epoch value.
*/
def getEpochForPartition(zkClient: ZkClient, topic: String, partition: Int): Int = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case None => throw new NoEpochForPartitionException("No epoch, leaderAndISR data for partition [%s,%d] is invalid".format(topic, partition))
case Some(m) => m.asInstanceOf[Map[String, Any]].get("leader_epoch").get.asInstanceOf[Int]
}
case None => throw new NoEpochForPartitionException("No epoch, ISR path for partition [%s,%d] is empty"
.format(topic, partition))
}
}
/**
* Gets the in-sync replicas (ISR) for a specific topic and partition
*/
def getInSyncReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("isr").get.asInstanceOf[Seq[Int]]
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
/**
* Gets the assigned replicas (AR) for a specific topic and partition
*/
def getReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(replicaMap) => replicaMap.asInstanceOf[Map[String, Seq[Int]]].get(partition.toString) match {
case Some(seq) => seq
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
def registerBrokerInZk(zkClient: ZkClient, id: Int, host: String, port: Int, timeout: Int, jmxPort: Int) {
val brokerIdPath = ZkUtils.BrokerIdsPath + "/" + id
val timestamp = SystemTime.milliseconds.toString
val brokerInfo = Json.encode(Map("version" -> 1, "host" -> host, "port" -> port, "jmx_port" -> jmxPort, "timestamp" -> timestamp))
val expectedBroker = new Broker(id, host, port)
try {
createEphemeralPathExpectConflictHandleZKBug(zkClient, brokerIdPath, brokerInfo, expectedBroker,
(brokerString: String, broker: Any) => Broker.createBroker(broker.asInstanceOf[Broker].id, brokerString).equals(broker.asInstanceOf[Broker]),
timeout)
} catch {
case e: ZkNodeExistsException =>
throw new RuntimeException("A broker is already registered on the path " + brokerIdPath
+ ". This probably " + "indicates that you either have configured a brokerid that is already in use, or "
+ "else you have shutdown this broker and restarted it faster than the zookeeper "
+ "timeout so it appears to be re-registering.")
}
info("Registered broker %d at path %s with address %s:%d.".format(id, brokerIdPath, host, port))
}
def getConsumerPartitionOwnerPath(group: String, topic: String, partition: Int): String = {
val topicDirs = new ZKGroupTopicDirs(group, topic)
topicDirs.consumerOwnerDir + "/" + partition
}
def leaderAndIsrZkData(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int): String = {
Json.encode(Map("version" -> 1, "leader" -> leaderAndIsr.leader, "leader_epoch" -> leaderAndIsr.leaderEpoch,
"controller_epoch" -> controllerEpoch, "isr" -> leaderAndIsr.isr))
}
/**
* Get JSON partition to replica map from zookeeper.
*/
def replicaAssignmentZkData(map: Map[String, Seq[Int]]): String = {
Json.encode(Map("version" -> 1, "partitions" -> map))
}
/**
* make sure a persistent path exists in ZK. Create the path if not exist.
*/
def makeSurePersistentPathExists(client: ZkClient, path: String) {
if (!client.exists(path))
client.createPersistent(path, true) // won't throw NoNodeException or NodeExistsException
}
/**
* create the parent path
*/
private def createParentPath(client: ZkClient, path: String): Unit = {
val parentDir = path.substring(0, path.lastIndexOf('/'))
if (parentDir.length != 0)
client.createPersistent(parentDir, true)
}
/**
* Create an ephemeral node with the given path and data. Create parents if necessary.
*/
private def createEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.createEphemeral(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
}
}
/**
* Create an ephemeral node with the given path and data.
* Throw NodeExistException if node already exists.
*/
def createEphemeralPathExpectConflict(client: ZkClient, path: String, data: String): Unit = {
try {
createEphemeralPath(client, path, data)
} catch {
case e: ZkNodeExistsException => {
// this can happen when there is connection loss; make sure the data is what we intend to write
var storedData: String = null
try {
storedData = readData(client, path)._1
} catch {
case e1: ZkNoNodeException => // the node disappeared; treat as if node existed and let caller handles this
case e2: Throwable => throw e2
}
if (storedData == null || storedData != data) {
info("conflict in " + path + " data: " + data + " stored data: " + storedData)
throw e
} else {
// otherwise, the creation succeeded, return normally
info(path + " exists with value " + data + " during connection loss; this is ok")
}
}
case e2: Throwable => throw e2
}
}
/**
* Create an ephemeral node with the given path and data.
* Throw NodeExistsException if node already exists.
* Handles the following ZK session timeout bug:
*
* https://issues.apache.org/jira/browse/ZOOKEEPER-1740
*
* Upon receiving a NodeExistsException, read the data from the conflicted path and
* trigger the checker function comparing the read data and the expected data,
* If the checker function returns true then the above bug might be encountered, back off and retry;
* otherwise re-throw the exception
*/
def createEphemeralPathExpectConflictHandleZKBug(zkClient: ZkClient, path: String, data: String, expectedCallerData: Any, checker: (String, Any) => Boolean, backoffTime: Int): Unit = {
while (true) {
try {
createEphemeralPathExpectConflict(zkClient, path, data)
return
} catch {
case e: ZkNodeExistsException => {
// An ephemeral node may still exist even after its corresponding session has expired
// due to a Zookeeper bug, in this case we need to retry writing until the previous node is deleted
// and hence the write succeeds without ZkNodeExistsException
ZkUtils.readDataMaybeNull(zkClient, path)._1 match {
case Some(writtenData) => {
if (checker(writtenData, expectedCallerData)) {
info("I wrote this conflicted ephemeral node [%s] at %s a while back in a different session, ".format(data, path)
+ "hence I will backoff for this node to be deleted by Zookeeper and retry")
Thread.sleep(backoffTime)
} else {
throw e
}
}
case None => // the node disappeared; retry creating the ephemeral node immediately
}
}
case e2: Throwable => throw e2
}
}
}
/**
* Create an persistent node with the given path and data. Create parents if necessary.
*/
def createPersistentPath(client: ZkClient, path: String, data: String = ""): Unit = {
try {
client.createPersistent(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createPersistent(path, data)
}
}
}
def createSequentialPersistentPath(client: ZkClient, path: String, data: String = ""): String = {
client.createPersistentSequential(path, data)
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
* Return the updated path zkVersion
*/
def updatePersistentPath(client: ZkClient, path: String, data: String) = {
try {
client.writeData(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
try {
client.createPersistent(path, data)
} catch {
case e: ZkNodeExistsException =>
client.writeData(path, data)
case e2: Throwable => throw e2
}
}
case e2: Throwable => throw e2
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the path doesn't
* exist, the current version is not the expected version, etc.) return (false, -1)
*
* When there is a ConnectionLossException during the conditional update, zkClient will retry the update and may fail
* since the previous update may have succeeded (but the stored zkVersion no longer matches the expected one).
* In this case, we will run the optionalChecker to further check if the previous write did indeed succeeded.
*/
def conditionalUpdatePersistentPath(client: ZkClient, path: String, data: String, expectVersion: Int,
optionalChecker:Option[(ZkClient, String, String) => (Boolean,Int)] = None): (Boolean, Int) = {
try {
val stat = client.writeDataReturnStat(path, data, expectVersion)
debug("Conditional update of path %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case e1: ZkBadVersionException =>
optionalChecker match {
case Some(checker) => return checker(client, path, data)
case _ => debug("Checker method is not passed skipping zkData match")
}
warn("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e1.getMessage))
(false, -1)
case e2: Exception =>
warn("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e2.getMessage))
(false, -1)
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the current
* version is not the expected version, etc.) return (false, -1). If path doesn't exist, throws ZkNoNodeException
*/
def conditionalUpdatePersistentPathIfExists(client: ZkClient, path: String, data: String, expectVersion: Int): (Boolean, Int) = {
try {
val stat = client.writeDataReturnStat(path, data, expectVersion)
debug("Conditional update of path %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case nne: ZkNoNodeException => throw nne
case e: Exception =>
error("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e.getMessage))
(false, -1)
}
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
*/
def updateEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.writeData(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
case e2: Throwable => throw e2
}
}
def deletePath(client: ZkClient, path: String): Boolean = {
try {
client.delete(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
false
case e2: Throwable => throw e2
}
}
def deletePathRecursive(client: ZkClient, path: String) {
try {
client.deleteRecursive(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
case e2: Throwable => throw e2
}
}
def maybeDeletePath(zkUrl: String, dir: String) {
try {
val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
zk.deleteRecursive(dir)
zk.close()
} catch {
case _: Throwable => // swallow
}
}
def readData(client: ZkClient, path: String): (String, Stat) = {
val stat: Stat = new Stat()
val dataStr: String = client.readData(path, stat)
(dataStr, stat)
}
def readDataMaybeNull(client: ZkClient, path: String): (Option[String], Stat) = {
val stat: Stat = new Stat()
val dataAndStat = try {
(Some(client.readData(path, stat)), stat)
} catch {
case e: ZkNoNodeException =>
(None, stat)
case e2: Throwable => throw e2
}
dataAndStat
}
def getChildren(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
client.getChildren(path)
}
def getChildrenParentMayNotExist(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
try {
client.getChildren(path)
} catch {
case e: ZkNoNodeException => return Nil
case e2: Throwable => throw e2
}
}
/**
* Check if the given path exists
*/
def pathExists(client: ZkClient, path: String): Boolean = {
client.exists(path)
}
def getCluster(zkClient: ZkClient) : Cluster = {
val cluster = new Cluster
val nodes = getChildrenParentMayNotExist(zkClient, BrokerIdsPath)
for (node <- nodes) {
val brokerZKString = readData(zkClient, BrokerIdsPath + "/" + node)._1
cluster.add(Broker.createBroker(node.toInt, brokerZKString))
}
cluster
}
def getPartitionLeaderAndIsrForTopics(zkClient: ZkClient, topicAndPartitions: Set[TopicAndPartition])
: mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = {
val ret = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
for(topicAndPartition <- topicAndPartitions) {
ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topicAndPartition.topic, topicAndPartition.partition) match {
case Some(leaderIsrAndControllerEpoch) => ret.put(topicAndPartition, leaderIsrAndControllerEpoch)
case None =>
}
}
ret
}
def getReplicaAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] = {
val ret = new mutable.HashMap[TopicAndPartition, Seq[Int]]
topics.foreach { topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(repl) =>
val replicaMap = repl.asInstanceOf[Map[String, Seq[Int]]]
for((partition, replicas) <- replicaMap){
ret.put(TopicAndPartition(topic, partition.toInt), replicas)
debug("Replicas assigned to topic [%s], partition [%s] are [%s]".format(topic, partition, replicas))
}
case None =>
}
case None =>
}
case None =>
}
}
ret
}
def getPartitionAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[String, collection.Map[Int, Seq[Int]]] = {
val ret = new mutable.HashMap[String, Map[Int, Seq[Int]]]()
topics.foreach{ topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
val partitionMap = jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(replicaMap) =>
val m1 = replicaMap.asInstanceOf[Map[String, Seq[Int]]]
m1.map(p => (p._1.toInt, p._2))
case None => Map[Int, Seq[Int]]()
}
case None => Map[Int, Seq[Int]]()
}
case None => Map[Int, Seq[Int]]()
}
debug("Partition map for /brokers/topics/%s is %s".format(topic, partitionMap))
ret += (topic -> partitionMap)
}
ret
}
def getPartitionsForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[String, Seq[Int]] = {
getPartitionAssignmentForTopics(zkClient, topics).map { topicAndPartitionMap =>
val topic = topicAndPartitionMap._1
val partitionMap = topicAndPartitionMap._2
debug("partition assignment of /brokers/topics/%s is %s".format(topic, partitionMap))
(topic -> partitionMap.keys.toSeq.sortWith((s,t) => s < t))
}
}
def getPartitionsBeingReassigned(zkClient: ZkClient): Map[TopicAndPartition, ReassignedPartitionsContext] = {
// read the partitions and their new replica list
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, ReassignPartitionsPath)._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
val reassignedPartitions = parsePartitionReassignmentData(jsonPartitionMap)
reassignedPartitions.map(p => (p._1 -> new ReassignedPartitionsContext(p._2)))
case None => Map.empty[TopicAndPartition, ReassignedPartitionsContext]
}
}
// Parses without deduplicating keys so the the data can be checked before allowing reassignment to proceed
def parsePartitionReassignmentDataWithoutDedup(jsonData: String): Seq[(TopicAndPartition, Seq[Int])] = {
Json.parseFull(jsonData) match {
case Some(m) =>
m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(partitionsSeq) =>
partitionsSeq.asInstanceOf[Seq[Map[String, Any]]].map(p => {
val topic = p.get("topic").get.asInstanceOf[String]
val partition = p.get("partition").get.asInstanceOf[Int]
val newReplicas = p.get("replicas").get.asInstanceOf[Seq[Int]]
TopicAndPartition(topic, partition) -> newReplicas
})
case None =>
Seq.empty
}
case None =>
Seq.empty
}
}
def parsePartitionReassignmentData(jsonData: String): Map[TopicAndPartition, Seq[Int]] = {
parsePartitionReassignmentDataWithoutDedup(jsonData).toMap
}
def parseTopicsData(jsonData: String): Seq[String] = {
var topics = List.empty[String]
Json.parseFull(jsonData) match {
case Some(m) =>
m.asInstanceOf[Map[String, Any]].get("topics") match {
case Some(partitionsSeq) =>
val mapPartitionSeq = partitionsSeq.asInstanceOf[Seq[Map[String, Any]]]
mapPartitionSeq.foreach(p => {
val topic = p.get("topic").get.asInstanceOf[String]
topics ++= List(topic)
})
case None =>
}
case None =>
}
topics
}
def getPartitionReassignmentZkData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): String = {
Json.encode(Map("version" -> 1, "partitions" -> partitionsToBeReassigned.map(e => Map("topic" -> e._1.topic, "partition" -> e._1.partition,
"replicas" -> e._2))))
}
def updatePartitionReassignmentData(zkClient: ZkClient, partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]) {
val zkPath = ZkUtils.ReassignPartitionsPath
partitionsToBeReassigned.size match {
case 0 => // need to delete the /admin/reassign_partitions path
deletePath(zkClient, zkPath)
info("No more partitions need to be reassigned. Deleting zk path %s".format(zkPath))
case _ =>
val jsonData = getPartitionReassignmentZkData(partitionsToBeReassigned)
try {
updatePersistentPath(zkClient, zkPath, jsonData)
info("Updated partition reassignment path with %s".format(jsonData))
} catch {
case nne: ZkNoNodeException =>
ZkUtils.createPersistentPath(zkClient, zkPath, jsonData)
debug("Created path %s with %s for partition reassignment".format(zkPath, jsonData))
case e2: Throwable => throw new AdminOperationException(e2.toString)
}
}
}
def getPartitionsUndergoingPreferredReplicaElection(zkClient: ZkClient): Set[TopicAndPartition] = {
// read the partitions and their new replica list
val jsonPartitionListOpt = readDataMaybeNull(zkClient, PreferredReplicaLeaderElectionPath)._1
jsonPartitionListOpt match {
case Some(jsonPartitionList) => PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(jsonPartitionList)
case None => Set.empty[TopicAndPartition]
}
}
def deletePartition(zkClient : ZkClient, brokerId: Int, topic: String) {
val brokerIdPath = BrokerIdsPath + "/" + brokerId
zkClient.delete(brokerIdPath)
val brokerPartTopicPath = BrokerTopicsPath + "/" + topic + "/" + brokerId
zkClient.delete(brokerPartTopicPath)
}
def getConsumersInGroup(zkClient: ZkClient, group: String): Seq[String] = {
val dirs = new ZKGroupDirs(group)
getChildren(zkClient, dirs.consumerRegistryDir)
}
def getConsumersPerTopic(zkClient: ZkClient, group: String, excludeInternalTopics: Boolean) : mutable.Map[String, List[ConsumerThreadId]] = {
val dirs = new ZKGroupDirs(group)
val consumers = getChildrenParentMayNotExist(zkClient, dirs.consumerRegistryDir)
val consumersPerTopicMap = new mutable.HashMap[String, List[ConsumerThreadId]]
for (consumer <- consumers) {
val topicCount = TopicCount.constructTopicCount(group, consumer, zkClient, excludeInternalTopics)
for ((topic, consumerThreadIdSet) <- topicCount.getConsumerThreadIdsPerTopic) {
for (consumerThreadId <- consumerThreadIdSet)
consumersPerTopicMap.get(topic) match {
case Some(curConsumers) => consumersPerTopicMap.put(topic, consumerThreadId :: curConsumers)
case _ => consumersPerTopicMap.put(topic, List(consumerThreadId))
}
}
}
for ( (topic, consumerList) <- consumersPerTopicMap )
consumersPerTopicMap.put(topic, consumerList.sortWith((s,t) => s < t))
consumersPerTopicMap
}
/**
* This API takes in a broker id, queries zookeeper for the broker metadata and returns the metadata for that broker
* or throws an exception if the broker dies before the query to zookeeper finishes
* @param brokerId The broker id
* @param zkClient The zookeeper client connection
* @return An optional Broker object encapsulating the broker metadata
*/
def getBrokerInfo(zkClient: ZkClient, brokerId: Int): Option[Broker] = {
ZkUtils.readDataMaybeNull(zkClient, ZkUtils.BrokerIdsPath + "/" + brokerId)._1 match {
case Some(brokerInfo) => Some(Broker.createBroker(brokerId, brokerInfo))
case None => None
}
}
def getAllTopics(zkClient: ZkClient): Seq[String] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null)
Seq.empty[String]
else
topics
}
def getAllPartitions(zkClient: ZkClient): Set[TopicAndPartition] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null) Set.empty[TopicAndPartition]
else {
topics.map { topic =>
getChildren(zkClient, getTopicPartitionsPath(topic)).map(_.toInt).map(TopicAndPartition(topic, _))
}.flatten.toSet
}
}
}
object ZKStringSerializer extends ZkSerializer {
@throws(classOf[ZkMarshallingError])
def serialize(data : Object) : Array[Byte] = data.asInstanceOf[String].getBytes("UTF-8")
@throws(classOf[ZkMarshallingError])
def deserialize(bytes : Array[Byte]) : Object = {
if (bytes == null)
null
else
new String(bytes, "UTF-8")
}
}
class ZKGroupDirs(val group: String) {
def consumerDir = ZkUtils.ConsumersPath
def consumerGroupDir = consumerDir + "/" + group
def consumerRegistryDir = consumerGroupDir + "/ids"
}
class ZKGroupTopicDirs(group: String, topic: String) extends ZKGroupDirs(group) {
def consumerOffsetDir = consumerGroupDir + "/offsets/" + topic
def consumerOwnerDir = consumerGroupDir + "/owners/" + topic
}
class ZKConfig(props: VerifiableProperties) {
/** ZK host string */
val zkConnect = props.getString("zookeeper.connect")
/** zookeeper session timeout */
val zkSessionTimeoutMs = props.getInt("zookeeper.session.timeout.ms", 6000)
/** the max time that the client waits to establish a connection to zookeeper */
val zkConnectionTimeoutMs = props.getInt("zookeeper.connection.timeout.ms",zkSessionTimeoutMs)
/** how far a ZK follower can be behind a ZK leader */
val zkSyncTimeMs = props.getInt("zookeeper.sync.time.ms", 2000)
}
| Parth-Brahmbhatt/kafka | core/src/main/scala/kafka/utils/ZkUtils.scala | Scala | apache-2.0 | 30,659 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi
import scala.concurrent.duration._
import org.bytedeco.javacpp.opencv_core._
import scala.io._
package object common
{
type TimeSpan = FiniteDuration
val TimeSpan = FiniteDuration
type LightColor = CvScalar
def resourcePath(resource : String) : String =
"jar:" + classOf[TimePoint].getResource(resource).getPath
def sourceFromPath(filePath : String) =
{
if (filePath.contains(':')) {
Source.fromURL(filePath)
} else {
Source.fromFile(filePath)
}
}
}
| lingeringsocket/goseumdochi | base/src/main/scala/org/goseumdochi/common/package.scala | Scala | apache-2.0 | 1,157 |
package com.lightbend.coursegentools
/**
* Copyright © 2016 Lightbend, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* NO COMMERCIAL SUPPORT OR ANY OTHER FORM OF SUPPORT IS OFFERED ON
* THIS SOFTWARE BY LIGHTBEND, Inc.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
object Linearize {
def main(args: Array[String]): Unit = {
import java.io.File
import Helpers._
import sbt.io.{IO => sbtio}
implicit val eofe: ExitOnFirstError = ExitOnFirstError(true)
val cmdOptions = LinearizeCmdLineOptParse.parse(args)
if (cmdOptions.isEmpty) System.exit(-1)
val LinearizeCmdOptions(mainRepoPath,
linearizedOutputFolder,
multiJVM,
forceDeleteExistingDestinationFolder,
configurationFile,
isADottyProject,
autoReloadOnBuildDefChange,
bareLinRepo) = cmdOptions.get
val mainRepo = resolveMainRepoPath(mainRepoPath)
implicit val config: MainSettings = new MainSettings(mainRepo, configurationFile)
exitIfGitIndexOrWorkspaceIsntClean(mainRepo)
val projectName = mainRepo.getName
val exercises: Seq[String] = getExerciseNames(mainRepo)
val destinationFolder = new File(linearizedOutputFolder, projectName)
(destinationFolder.exists(), forceDeleteExistingDestinationFolder) match {
case (true, false) =>
printError(s"""
|Destination folder ${destinationFolder.getPath} exists: Either remove this folder
|manually or use the '-f' command-line option to delete it automatically
|""".stripMargin)
case (true, true) =>
sbtio.delete(destinationFolder)
case _ =>
}
val tmpDir = cleanMainViaGit(mainRepo, projectName)
val cleanMainRepo = new File(tmpDir, projectName)
printNotification(s"Cleaned main repo: $cleanMainRepo")
val relativeCleanMainRepo = new File(cleanMainRepo, config.relativeSourceFolder)
val linearizedProject = new File(linearizedOutputFolder, projectName)
copyMain(cleanMainRepo, linearizedProject)
createStudentifiedBuildFile(linearizedProject, multiJVM, isADottyProject, autoReloadOnBuildDefChange)
createBookmarkFile(config.studentifyModeClassic.studentifiedBaseFolder, linearizedProject)
val templateFileList: List[String] =
List(
"Man.scala",
"Navigation.scala",
"StudentCommandsPlugin.scala",
"StudentKeys.scala"
)
if (!bareLinRepo) {
addSbtCommands(templateFileList, linearizedProject)
loadStudentSettings(mainRepo, linearizedProject)
}
cleanUp(List(".git", "navigation.sbt"), linearizedProject)
removeExercisesFromCleanMain(linearizedProject, exercises)
addGitignoreFromMain(mainRepo, linearizedProject)
stageFirstExercise(exercises.head, relativeCleanMainRepo, linearizedProject)
val cmtFileList: List[String] =
List(
"project/MPSelection.scala",
"project/Man.scala",
"project/Navigation.scala",
"project/SSettings.scala",
"project/StudentCommandsPlugin.scala",
"project/StudentKeys.scala",
".courseName",
".bookmark"
)
if (bareLinRepo) deleteCMTConfig(cmtFileList, linearizedProject)
initializeGitRepo(linearizedProject)
commitFirstExercise(exercises.head, linearizedProject)
commitRemainingExercises(exercises.tail, cleanMainRepo, linearizedProject)
if( Helpers.getStudentifiedBranchName(linearizedProject) != "main")
renameMainBranch(linearizedProject)
sbtio.delete(tmpDir)
}
}
| lightbend-training/course-management-tools | linearize/src/main/scala/com/lightbend/coursegentools/Linearize.scala | Scala | apache-2.0 | 4,180 |
package collins.power
sealed trait PowerAction
case object PowerOff extends PowerAction {
override def toString: String = "PowerOff"
}
case object PowerOn extends PowerAction {
override def toString: String = "PowerOn"
}
case object PowerSoft extends PowerAction {
override def toString: String = "PowerSoft"
}
case object PowerState extends PowerAction {
override def toString: String = "PowerState"
}
sealed trait ChassisInfo extends PowerAction
case object Identify extends ChassisInfo {
override def toString: String = "Identify"
}
case object Verify extends ChassisInfo {
override def toString: String = "Verify"
}
sealed trait Reboot extends PowerAction
case object RebootSoft extends Reboot {
override def toString: String = "RebootSoft"
}
case object RebootHard extends Reboot {
override def toString: String = "RebootHard"
}
object PowerAction {
def off() = PowerOff
def on() = PowerOn
def soft() = PowerSoft
def state() = PowerState
def rebootSoft() = RebootSoft
def rebootHard() = RebootHard
def verify() = Verify
def identify() = Identify
def apply(s: String): PowerAction = unapply(s) match {
case Some(p) => p
case None => throw new MatchError("No such power action " + s)
}
def unapply(t: String) = t.toLowerCase match {
case r if rebootSoft().toString.toLowerCase == r => Some(rebootSoft())
case r if rebootHard().toString.toLowerCase == r => Some(rebootHard())
case r if off().toString.toLowerCase == r => Some(off())
case r if on().toString.toLowerCase == r => Some(on())
case r if soft().toString.toLowerCase == r => Some(soft())
case r if state().toString.toLowerCase == r => Some(state())
case r if identify().toString.toLowerCase == r => Some(identify())
case r if verify().toString.toLowerCase == r => Some(verify())
case _ => None
}
}
| byxorna/collins | app/collins/power/PowerAction.scala | Scala | apache-2.0 | 1,852 |
package com.sksamuel.avro4s
import org.scalatest.{FunSuite, Matchers}
sealed trait Foo
case class Bar(i: Int) extends Foo
case class Baz(s: String) extends Foo
case class MySchema(@AvroNamespace("broken") foo: Foo, id: String, x: Int)
class ToRecordTest extends FunSuite with Matchers {
ignore("ToRecord should work with a namespace annotation on an ADT") {
val schema = AvroSchema[MySchema]
val ms = MySchema(Bar(1), "", 0)
ToRecord[MySchema](schema).to(ms) //throws
}
}
| 51zero/avro4s | avro4s-core/src/test/scala/com/sksamuel/avro4s/ToRecordTest.scala | Scala | mit | 494 |
/***
* Copyright 2017 Andrea Lorenzani
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***/
package name.lorenzani.andrea.homeaway.services
import com.twitter.finagle.http.path._
import com.twitter.finagle.http.{Request, Response}
import com.twitter.util.Future
import name.lorenzani.andrea.homeaway.datastore.{DataStore, ListingWrapper}
import name.lorenzani.andrea.homeaway.json.JsonUtil
class PostRequestHandler(ds: DataStore) extends RequestHandler {
override def handle(request: Request): Future[Response] = {
val newid = Path(request.path) match {
case Root / "new" => putListing(request)
case _ => throw new IllegalArgumentException("API request failure")
}
Future {
val response = Response()
val content = JsonUtil.toJson(Map("newId" -> newid))
response.setContentString(content)
response
}
}
def putListing(req: Request): String = {
val content = req.contentString
val listing = JsonUtil.fromJson[ListingWrapper](content)
ds.add(listing)
}
}
| andrealorenzani/HAVacationRental | src/main/scala/name/lorenzani/andrea/homeaway/services/PostRequestHandler.scala | Scala | apache-2.0 | 1,547 |
package pl.pej.trelloilaro.api.model
import play.api.libs.json.Json
case class ListJson(
id: String,
name: Option[String] = None,
closed: Option[Boolean] = None,
idBoard: Option[String] = None,
pos: Option[Int] = None
// subscribed: Option[Option[String]]
)
object ListJson {
implicit val listFormat = Json.format[ListJson]
}
| tomaszym/trelloilaro | src/main/scala/pl/pej/trelloilaro/api/model/ListJson.scala | Scala | mit | 467 |
package nl.gideondk.raiku
//import commands.RWObject
import org.scalatest.time.{ Millis, Seconds, Span }
import scala.concurrent._
import scala.concurrent.duration._
import akka.actor._
import akka.stream.ActorMaterializer
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ Suite, WordSpec }
import org.scalatest.matchers.ShouldMatchers
import spray.json._
object DB {
implicit val system = ActorSystem("perf-bucket-system")
implicit val mat = ActorMaterializer()
implicit val ec = system.dispatcher
val client = RaikuClient("localhost", 8087)
}
case class Y(id: String, name: String, age: Int, groupId: String)
case class Z(id: String, name: String)
object TestModels extends DefaultJsonProtocol {
implicit val yFormat = jsonFormat4(Y)
implicit val yConverter = RaikuConverter.newConverter(
reader = (v: RaikuRWValue) ⇒ yFormat.read(new String(v.data).asJson),
writer = (o: Y) ⇒ RaikuRWValue(o.id, o.toJson.toString.getBytes, "application/json"),
binIndexes = (o: Y) ⇒ Map("group_id" -> Set(o.groupId)),
intIndexes = (o: Y) ⇒ Map("age" -> Set(o.age)))
implicit val zConverter = RaikuConverter.newConverter(
reader = (v: RaikuRWValue) ⇒ Z(v.key, new String(v.data)),
writer = (o: Z) ⇒ RaikuRWValue(o.id, o.name.getBytes(), "application/json"))
}
abstract class RaikuSpec extends WordSpec with Suite with ShouldMatchers with ScalaFutures {
implicit val timeout = 30 seconds
implicit val defaultPatience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(200, Millis))
val client = DB.client
} | gideondk/Raiku | src/test/scala/nl/gideondk/raiku/TestHelpers.scala | Scala | apache-2.0 | 1,587 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.scalding
package typed
import java.util.Properties
import java.io.{ InputStream, OutputStream, Serializable }
import cascading.scheme.Scheme
import cascading.scheme.hadoop.TextDelimited
import cascading.scheme.local.{ TextDelimited => LocalTextDelimited }
import cascading.tap.{ Tap, SinkMode }
import cascading.tap.hadoop.{ Hfs, PartitionTap }
import cascading.tap.local.{ FileTap, PartitionTap => LocalPartitionTap }
import cascading.tap.partition.Partition
import cascading.tuple.{ Fields, Tuple, TupleEntry }
/**
* Scalding source to read or write partitioned delimited text.
*
* For writing it expects a pair of `(P, R)`, where `P` is the data used for partitioning and
* `T` is the output to write out. Below is an example.
* {{{
* val data = List(
* (("a", "x"), ("i", 1)),
* (("a", "y"), ("j", 2)),
* (("b", "z"), ("k", 3))
* )
* IterablePipe(data, flowDef, mode)
* .write(PartitionedDelimited[(String, String), (String, Int)](args("out"), "col1=%s/col2=%s"))
* }}}
*
* For reading it produces a pair `(P, T` where `P` is the partition data and `T` is data in the
* files. Below is an example.
* {{{
* val in: TypedPipe[((String, String), (String, Int))] = PartitionedDelimited[(String, String), (String, Int)](args("in"), "col1=%s/col2=%s")
* }}}
*/
case class PartitionedDelimitedSource[P, T](
path: String, template: String, separator: String, fields: Fields, skipHeader: Boolean = false,
writeHeader: Boolean = false, quote: String = "\\"", strict: Boolean = true, safe: Boolean = true)(implicit mt: Manifest[T], val valueSetter: TupleSetter[T], val valueConverter: TupleConverter[T],
val partitionSetter: TupleSetter[P], val partitionConverter: TupleConverter[P]) extends PartitionSchemed[P, T] with Serializable {
assert(
fields.size == valueSetter.arity,
"The number of fields needs to be the same as the arity of the value setter")
val types: Array[Class[_]] = {
if (classOf[scala.Product].isAssignableFrom(mt.runtimeClass)) {
//Assume this is a Tuple:
mt.typeArguments.map { _.runtimeClass }.toArray
} else {
//Assume there is only a single item
Array(mt.runtimeClass)
}
}
// Create the underlying scheme and explicitly set the sink fields to be only the specified fields
// see sinkFields in PartitionSchemed for other half of this work around.
override def hdfsScheme = {
val scheme =
HadoopSchemeInstance(new TextDelimited(fields, null, skipHeader, writeHeader, separator, strict, quote, types, safe)
.asInstanceOf[Scheme[_, _, _, _, _]])
scheme.setSinkFields(fields)
scheme
}
// Create the underlying scheme and explicitly set the sink fields to be only the specified fields
// see sinkFields in PartitionSchemed for other half of this work around.
override def localScheme = {
val scheme =
new LocalTextDelimited(fields, skipHeader, writeHeader, separator, strict, quote, types, safe)
.asInstanceOf[Scheme[Properties, InputStream, OutputStream, _, _]]
scheme.setSinkFields(fields)
scheme
}
}
/**
* Trait to assist with creating objects such as [[PartitionedTsv]] to read from separated files.
* Override separator, skipHeader, writeHeader as needed.
*/
trait PartitionedDelimited extends Serializable {
def separator: String
def apply[P: Manifest: TupleConverter: TupleSetter, T: Manifest: TupleConverter: TupleSetter](path: String, template: String): PartitionedDelimitedSource[P, T] =
PartitionedDelimitedSource(path, template, separator, PartitionUtil.toFields(0, implicitly[TupleSetter[T]].arity))
def apply[P: Manifest: TupleConverter: TupleSetter, T: Manifest: TupleConverter: TupleSetter](path: String, template: String, fields: Fields): PartitionedDelimitedSource[P, T] =
PartitionedDelimitedSource(path, template, separator, fields)
}
/** Partitioned typed tab separated source.*/
object PartitionedTsv extends PartitionedDelimited {
val separator = "\\t"
}
/** Partitioned typed commma separated source.*/
object PartitionedCsv extends PartitionedDelimited {
val separator = ","
}
/** Partitioned typed pipe separated source.*/
object PartitionedPsv extends PartitionedDelimited {
val separator = "|"
}
/** Partitioned typed `\\1` separated source (commonly used by Pig).*/
object PartitionedOsv extends PartitionedDelimited {
val separator = "\\u0001"
}
| chrismoulton/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/PartitionedDelimitedSource.scala | Scala | apache-2.0 | 5,021 |
package blitztags
import blitztags.AddElementCommands._
import scala.xml.Unparsed
trait DefaultNodes {
// text nodes
val T = TextNode
// comment nodes
val / = CommentNode
// raw XML nodes
val Raw = Unparsed(_)
} | Luegg/blitztags | src/main/scala/blitztags/DefaultNodes.scala | Scala | mit | 231 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.external
import slamdata.Predef._
import cats.{Eq, Show}
import cats.implicits._
// NB: this is actually more general, I'm just sticking it here for now
final case class DisplayName(lowercase: String, uppercase: String, plural: String)
object DisplayName {
implicit val eq: Eq[DisplayName] =
Eq.by(dn => (dn.lowercase, dn.uppercase, dn.plural))
implicit val show: Show[DisplayName] =
Show.show(dn => s"DisplayName(lowercase = ${dn.lowercase}, uppercase = ${dn.uppercase}, plural = ${dn.plural})")
}
| djspiewak/quasar | impl/src/main/scala/quasar/impl/external/DisplayName.scala | Scala | apache-2.0 | 1,133 |
package 练习26
case class Item(name: String)
trait Number1 {
def method1(number2: Number2): Result
}
case class Number1P(tail: Number1, head: Item) extends Number1 {
override def method1(number2: Number2): Result = number2.method2(tail, head)
}
case object Number1O extends Number1 {
override def method1(number2: Number2): Result = ResultO
}
trait Number2 {
def method2(number1: Number1, item: Item): Result
}
trait Number3 extends Number2 {
var tail1: Number2
}
trait Number2P extends Number2 {
def tail: Number2
def head: Item
override def method2(number1: Number1, item: Item): Result = ResultP(tail.method2(number1, item), head)
}
trait Number2O1 extends Number3 {
def tail: Number2
override var tail1: Number2
override def method2(number1: Number1, item: Item): Result = number1.method1(tail)
}
class Number2O2 extends Number3 {
def tail: Number2 = tail1
override var tail1: Number2 = null
override def method2(number1: Number1, item: Item): Result = tail.method2(number1, item)
}
trait Result {
def length: Int
}
case class ResultP(tail: Result, item: Item) extends Result {
override def length: Int = tail.length + 1
}
case object ResultO extends Result {
override def length: Int = 0
}
| djx314/ubw | a28-练习/src/main/scala/练习26/指数.scala | Scala | bsd-3-clause | 1,308 |
package com.azavea.opentransit.service
import com.azavea.opentransit._
import akka.actor._
import spray.util.LoggingContext
import spray.routing.ExceptionHandler
import spray.http.{HttpResponse, HttpRequest, Timedout}
import spray.http.StatusCodes.InternalServerError
import spray.routing.{ExceptionHandler, HttpService}
import scala.concurrent._
class OpenTransitServiceActor extends Actor
with OpenTransitService
with ProductionDatabaseInstance
with DjangoClientComponent
{
val djangoClient = new ProductionDjangoClient {}
// This is the execution context to use for this Actor
implicit val dispatcher = context.dispatcher
// The HttpService trait (which GeoTrellisService will extend) defines
// only one abstract member, which connects the services environment
// to the enclosing actor or test.
def actorRefFactory = context
// timeout handling, from here:
// http://spray.io/documentation/1.1-SNAPSHOT/spray-routing/key-concepts/timeout-handling/
// return JSON message instead of default string message:
// The server was not able to produce a timely response to your request.
def handleTimeouts: Receive = {
case Timedout(x: HttpRequest) =>
sender ! HttpResponse(InternalServerError,
"""{ "success": false, "message": "Spray timeout encountered" }""")
}
def receive = runRoute(handleTimeouts orElse runRoute(openTransitRoute))
}
trait OpenTransitService
extends Route
with IngestRoute
with IndicatorsRoute
with ScenarioRoute
with ScenarioGtfsRoute
with MapInfoRoute
with ServiceDateRangeRoute
with TravelshedIndicatorRoute
with TravelshedGeotiffRoute
with TravelshedMinMaxRoute
with StationStatsCSVRoute
{ self: DatabaseInstance with DjangoClientComponent =>
def openTransitRoute =
pathPrefix("gt") {
pathPrefix("utils") {
ingestRoute ~
mapInfoRoute ~
serviceDateRangeRoute
} ~
pathPrefix("indicators") {
indicatorsRoute ~
stationStatsCSVRoute
} ~
pathPrefix("scenarios") {
scenariosRoute
} ~
pathPrefix("travelshed") {
travelshedIndicatorRoute ~
travelshedGeotiffRoute ~
jobsTravelshedMinMaxRoute ~
absoluteJobsMinMaxRoute ~
percentageJobsMinMaxRoute
}
}
}
| flibbertigibbet/open-transit-indicators | scala/opentransit/src/main/scala/com/azavea/opentransit/service/OpenTransitServiceActor.scala | Scala | gpl-3.0 | 2,327 |
package io.fintrospect.testing
import com.twitter.finagle.Service
import com.twitter.finagle.http.Status.{Accepted, Conflict}
import com.twitter.finagle.http.{Request, Response, Status}
import com.twitter.util.{Await, Future}
import org.scalatest.{FunSpec, Matchers}
class OverridableHttpServiceTest extends FunSpec with Matchers {
val originalStatus = Conflict
val overridableHttpService = new OverridableHttpService[Request](Service.mk { r: Request => Future(Response(originalStatus)) })
it("will serve routes that are passed to it") {
statusShouldBe(originalStatus)
}
it("can override status") {
overridableHttpService.respondWith(Accepted)
statusShouldBe(Accepted)
}
private def statusShouldBe(expected: Status): Unit = {
Await.result(overridableHttpService.service(Request())).status shouldBe expected
}
}
| daviddenton/fintrospect | core/src/test/scala/io/fintrospect/testing/OverridableHttpServiceTest.scala | Scala | apache-2.0 | 849 |
@main def Test =
sys.SystemProperties()
| lampepfl/dotty | tests/pos/scala2-creators.scala | Scala | apache-2.0 | 43 |
package com.github.fellowship_of_the_bus
package eshe.game
import lib.slick2d.ui.{Image, Animation, Drawable}
object IDMap{
val IVGuyW1ID = 100
val IVGuyW2ID = 101
val IVGuyJumpID = 102
val IVGuyKickID = 103
val IVGuy2W1ID = 105
val IVGuy2W2ID = 106
val IVGuy2JumpID = 107
val IVGuy2KickID = 108
val IVGuy3W1ID = 109
val IVGuy3W2ID = 110
val IVGuy3JumpID = 111
val IVGuy3KickID = 112
val IVGuy4W1ID = 113
val IVGuy4W2ID = 114
val IVGuy4JumpID = 115
val IVGuy4KickID = 116
val IVGuyArmID = 150
val IVGuyArmPunchID = 151
val IVGuyDodgeID = 152
val IVGuy2DodgeID = 153
val IVGuy3DodgeID = 154
val IVGuy4DodgeID = 155
val GhostW1ID = 200
val GhostW2ID = 201
val GhostKnockbackID = 202
val GhostWindupID = 203
val GhostKickID = 204
val ElsaID = 210
val ElsaShootID = 211
val ElsaKnockbackID = 212
val HotdogW1ID = 220
val HotdogW2ID = 221
val HotdogKnockbackID = 222
val PowerRangerW1ID = 230
val PowerRangerW2ID = 231
val PowerRangerKnockbackID = 232
val PowerRangerPunchID = 233
val PowerRangerW1BlueID = 240
val PowerRangerW2BlueID = 241
val PowerRangerKnockbackBlueID = 242
val PowerRangerPunchBlueID = 243
val PowerRangerW1GreenID = 250
val PowerRangerW2GreenID = 251
val PowerRangerKnockbackGreenID = 252
val PowerRangerPunchGreenID = 253
val PowerRangerW1YellowID = 260
val PowerRangerW2YellowID = 261
val PowerRangerKnockbackYellowID = 262
val PowerRangerPunchYellowID = 263
val PowerRangerW1PinkID = 270
val PowerRangerW2PinkID = 271
val PowerRangerKnockbackPinkID = 272
val PowerRangerPunchPinkID = 273
val PowerRangerW1BlackID = 280
val PowerRangerW2BlackID = 281
val PowerRangerKnockbackBlackID = 282
val PowerRangerPunchBlackID = 283
val HorseMaskID = 40
val FotBLogoID = 1000
val GameOverID = 1001
val BackgroundID = 1002
val LogoID = 1003
val ScrollArrowID = 1004
val BackgroundFullID = 1005
val SelectArrow = 1006
val ElsaShotID = 10000
val KetchupID = 10001
val BossFullID = 20000
val BossUncoatID = 20001
val BossUncoatAttackID = 20002
val BossUncoatAttackLegID = 20003
val BossUncoatWalkID = 20004
val BossFullAttackID = 20005
val BossFullSuperSoakerID = 20006
val BossCellphoneID = 20007
val BossCellphoneAttackID = 20008
val BossCellphoneWalkID = 20008
val BossFinalID = 20009
val BossFinalAttackID = 20010
val BossFinalWalkID = 20011
val TrenchcoatID = 20012
val BottomGuyID = 20013
val MiddleGuyID = 20014
val TopGuyID = 20015
val imageMap = Map(
IVGuyW1ID -> "img/IVWalk1.png",
IVGuyW2ID -> "img/IVWalk2.png",
IVGuyJumpID -> "img/Jump.png",
IVGuyKickID -> "img/KickFull.png",
IVGuy2W1ID -> "img/P2IVWalk1.png",
IVGuy2W2ID -> "img/P2IVWalk2.png",
IVGuy2JumpID -> "img/P2Jump.png",
IVGuy2KickID -> "img/P2KickFull.png",
IVGuy3W1ID -> "img/P3IVWalk1.png",
IVGuy3W2ID -> "img/P3IVWalk2.png",
IVGuy3JumpID -> "img/P3Jump.png",
IVGuy3KickID -> "img/P3KickFull.png",
IVGuy4W1ID -> "img/P4IVWalk1.png",
IVGuy4W2ID -> "img/P4IVWalk2.png",
IVGuy4JumpID -> "img/P4Jump.png",
IVGuy4KickID -> "img/P4KickFull.png",
IVGuyArmID -> "img/ArmDefault.png",
IVGuyArmPunchID -> "img/ArmPunch.png",
IVGuyDodgeID -> "img/Dodge.png",
IVGuy2DodgeID -> "img/P2Dodge.png",
IVGuy3DodgeID -> "img/P3Dodge.png",
IVGuy4DodgeID -> "img/P4Dodge.png",
GhostW1ID -> "img/GhostRun1.png",
GhostW2ID -> "img/GhostRun2.png",
GhostWindupID -> "img/GhostWindup.png",
GhostKnockbackID -> "img/GhostKnockback.png",
GhostKickID -> "img/GhostKick.png",
ElsaID -> "img/Elsa.png",
ElsaShootID -> "img/ElsaShoot.png",
ElsaKnockbackID -> "img/ElsaKnockback.png",
PowerRangerW1ID -> "img/PowerRangerRun1.png",
PowerRangerW2ID -> "img/PowerRangerRun2.png",
PowerRangerKnockbackID -> "img/PowerRangerKnockback.png",
PowerRangerPunchID -> "img/PowerRangerPunch.png",
PowerRangerW1BlueID -> "img/PowerRangerRun1Blue.png",
PowerRangerW2BlueID -> "img/PowerRangerRun2Blue.png",
PowerRangerKnockbackBlueID -> "img/PowerRangerKnockbackBlue.png",
PowerRangerPunchBlueID -> "img/PowerRangerPunchBlue.png",
PowerRangerW1GreenID -> "img/PowerRangerRun1Green.png",
PowerRangerW2GreenID -> "img/PowerRangerRun2Green.png",
PowerRangerKnockbackGreenID -> "img/PowerRangerKnockbackGreen.png",
PowerRangerPunchGreenID -> "img/PowerRangerPunchGreen.png",
PowerRangerW1YellowID -> "img/PowerRangerRun1Yellow.png",
PowerRangerW2YellowID -> "img/PowerRangerRun2Yellow.png",
PowerRangerKnockbackYellowID -> "img/PowerRangerKnockbackYellow.png",
PowerRangerPunchYellowID -> "img/PowerRangerPunchYellow.png",
PowerRangerW1PinkID -> "img/PowerRangerRun1Pink.png",
PowerRangerW2PinkID -> "img/PowerRangerRun2Pink.png",
PowerRangerKnockbackPinkID -> "img/PowerRangerKnockbackPink.png",
PowerRangerPunchPinkID -> "img/PowerRangerPunchPink.png",
PowerRangerW1BlackID -> "img/PowerRangerRun1Black.png",
PowerRangerW2BlackID -> "img/PowerRangerRun2Black.png",
PowerRangerKnockbackBlackID -> "img/PowerRangerKnockbackBlack.png",
PowerRangerPunchBlackID -> "img/PowerRangerPunchBlack.png",
HorseMaskID -> "img/HorseMask.png",
HotdogW1ID -> "img/HotdogWalk1.png",
HotdogW2ID -> "img/HotdogWalk2.png",
HotdogKnockbackID -> "img/HotdogKnockback.png",
BossFullID -> "img/BossFull.png",
BossUncoatID -> "img/BossUncoat.png",
BossUncoatAttackID -> "img/BossUncoatAttack.png",
BossUncoatAttackLegID -> "img/BossUncoatAttackLeg.png",
BossUncoatWalkID -> "img/BossUncoatWalk.png",
BossFullAttackID -> "img/Water.png",
BossFullSuperSoakerID -> "img/SuperSoaker.png",
BossCellphoneID -> "img/BossUncoat2.png",
BossCellphoneAttackID -> "img/BossUncoat2.png",
BossCellphoneWalkID -> "img/BossUncoat2Walk.png",
BossFinalID -> "img/BossFinal.png",
BossFinalAttackID -> "img/BossFinalAttack.png",
BossFinalWalkID -> "img/BossFinalWalk.png",
TrenchcoatID -> "img/Trenchcoat.png",
BottomGuyID -> "img/BottomGuy.png",
MiddleGuyID -> "img/MiddleGuy.png",
TopGuyID -> "img/TopGuy.png",
FotBLogoID -> "img/FotB-Logo.png",
GameOverID -> "img/GameOver.png",
BackgroundID -> "img/BackGround.png",
BackgroundFullID -> "img/BackGroundFull.png",
LogoID -> "img/GameLogo.png",
ScrollArrowID -> "img/Arrow.png",
SelectArrow -> "img/SelectionArrow.png",
ElsaShotID -> "img/Elsa_Projectile.png",
KetchupID -> "img/Ketchup.png",
12345 -> Array("img/GameOver.png", "img/FotB-Logo.png")
)
lazy val images: Map[Int, Drawable] = imageMap.map { x =>
val (id, loc) = x
val img = loc match {
case xs: Array[String] => Animation(xs, eshe.state.ui.GameArea.scaleFactor)
case str: String => Image(str, eshe.state.ui.GameArea.scaleFactor)
}
id -> img
}
images(BackgroundID).scaleFactor = 1.0f
}
| Fellowship-of-the-Bus/Elder-Strolls-Hallows-Eve | src/main/scala/game/IDMap.scala | Scala | apache-2.0 | 6,953 |
import com.ptsoftware.phillyshowchecker.service._
import org.scalatra._
import javax.servlet.ServletContext
class ScalatraBootstrap extends LifeCycle {
override def init(context: ServletContext) {
context.mount(new PhillyShowCheckerServiceServlet, "/*")
}
}
| pat-thomas/philly-show-checker | service/src/main/scala/ScalatraBootstrap.scala | Scala | epl-1.0 | 267 |
// Copyright 2015 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.spindle.common.thrift.bson
import io.fsq.spindle.common.thrift.base.TTransportInputStream
import java.io.InputStream
import java.lang.UnsupportedOperationException
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.Stack
import org.apache.thrift.{TBaseHelper, TException}
import org.apache.thrift.protocol.{TField, TList, TMap, TMessage, TProtocol, TProtocolFactory, TSet, TStruct, TType}
import org.apache.thrift.transport.TTransport
object TBSONBinaryProtocol {
// Are only read, never written, so can be used concurrently.
val ANONYMOUS_MESSAGE = new TMessage()
val ANONYMOUS_STRUCT = new TStruct()
val NO_MORE_FIELDS = new TField("", TType.STOP, 0)
val ERROR_KEY = "$err"
val CODE_KEY = "code"
class ReaderFactory extends TProtocolFactory {
def getProtocol(trans: TTransport): TProtocol = {
val stream = new TTransportInputStream(trans)
val protocol = new TBSONBinaryProtocol()
protocol.setSource(stream)
protocol
}
}
}
/**
* Thrift protocol to decode binary bson
* Not thread safe, but can be reused assuming prior full successful reads
* use setSource(InputStream) before passing into read method
*
* This only implements the read methods. Write methods will throw UnsupportedOperationException
*/
class TBSONBinaryProtocol() extends TProtocol(null) {
private var inputStream: InputStream = null
// used as intermediate copy buffer for field names and strings
private val buffer = new ByteStringBuilder(32)
private val readStack = new Stack[ReadState]
private var _errorMessage: String = null
private var _errorCode: Int = 0
def errorMessage = _errorMessage
def errorCode = _errorCode
def setSource(is: InputStream): TBSONBinaryProtocol = {
_errorMessage = null
_errorCode = 0
readStack.clear()
inputStream = is
this
}
private def checkReadState[T <: ReadState](readState: ReadState, clazz: Class[T]): T = {
if (readState == null) {
throw new TException("Internal state is null. Possibly readXEnd unpaired with readXBegin.")
}
if (!clazz.isInstance(readState)) {
throw new TException(s"Internal state error. Expected ${clazz} but was ${readState.getClass}")
}
readState.asInstanceOf[T]
}
private def popReadState[T <: ReadState](clazz: Class[T]): T = {
checkReadState(readStack.pop(), clazz)
}
private def currentState(): ReadState = {
val readState = readStack.peek()
if (readState == null) {
throw new TException("Internal state is null.")
}
readState
}
def getTType(bsonType: Byte): Byte = bsonType match {
case BSON.EOO => TType.STOP
case BSON.NUMBER => TType.DOUBLE
case BSON.STRING => TType.STRING
case BSON.OBJECT => TType.STRUCT
case BSON.ARRAY => TType.LIST
case BSON.BINARY => TType.STRING
case BSON.UNDEFINED => TType.VOID
case BSON.OID => TType.STRING
case BSON.BOOLEAN => TType.BOOL
case BSON.DATE => TType.I64
case BSON.NULL => TType.VOID
case BSON.REGEX => TType.STRING
case BSON.REF => TType.STRING
case BSON.CODE => TType.STRING
case BSON.SYMBOL => TType.STRING
case BSON.CODE_W_SCOPE => TType.STRING
case BSON.NUMBER_INT => TType.I32
case BSON.TIMESTAMP => TType.I64
case BSON.NUMBER_LONG => TType.I64
}
/**
* Reading methods.
*/
def readMessageBegin(): TMessage = {
TBSONBinaryProtocol.ANONYMOUS_MESSAGE
}
def readMessageEnd(): Unit = {}
def readStructBegin(): TStruct = {
if (readStack.size == 0) {
readStack.push(new StructReadState(inputStream, buffer))
} else {
readStack.push(currentState().readStruct())
}
TBSONBinaryProtocol.ANONYMOUS_STRUCT
}
def readStructEnd(): Unit = {
val readState = popReadState(classOf[StructReadState])
readState.readEnd()
}
def readFieldBegin(): TField = {
val readState = checkReadState(readStack.peek(), classOf[StructReadState])
def findNonNullField: TField = {
if (readState.hasAnotherField) {
readState.readFieldType()
if (readState.lastFieldType == BSON.NULL) {
findNonNullField
} else {
new TField(readState.lastFieldName, getTType(readState.lastFieldType), -1)
}
} else {
TBSONBinaryProtocol.NO_MORE_FIELDS
}
}
findNonNullField
}
def readFieldEnd(): Unit = {}
def readMapBegin(): TMap = {
val mapReadState = currentState().readMap()
readStack.push(mapReadState)
new TMap(TType.STRING, getTType(mapReadState.lastFieldType), mapReadState.itemCount)
}
def readMapEnd(): Unit = {
popReadState(classOf[MapReadState])
}
def readListBegin(): TList = {
val listReadState = currentState().readList()
readStack.push(listReadState)
new TList(getTType(listReadState.lastFieldType), listReadState.itemCount)
}
def readListEnd(): Unit = {
popReadState(classOf[ListReadState])
}
def readSetBegin(): TSet = {
val listReadState = currentState().readList()
readStack.push(listReadState)
new TSet(getTType(listReadState.lastFieldType), listReadState.itemCount)
}
def readSetEnd(): Unit = {
popReadState(classOf[ListReadState])
}
def readBool(): Boolean = {
currentState().readBool()
}
def readByte(): Byte = {
currentState().readI32().toByte
}
def readI16(): Short = {
currentState().readI32().toShort
}
def readI32(): Int = {
val readState = currentState()
val intValue = currentState().readI32()
// hack to keep track of mongo error
if (_errorMessage != null && readState.lastFieldName == TBSONBinaryProtocol.CODE_KEY) {
_errorCode = intValue
}
intValue
}
def readI64(): Long = {
val readState = currentState()
// be lenient here to handle legacy records written out in i32
if (readState.lastFieldType == BSON.NUMBER_INT) {
readState.readI32()
} else {
readState.readI64()
}
}
def readDouble(): Double = {
currentState().readDouble()
}
def readString(): String = {
val readState = currentState()
// A string field unknown to an older version of a struct will be serialized as binary.
if (readState.lastFieldType == BSON.BINARY) {
new String(TBaseHelper.byteBufferToByteArray(readState.readBinary()), StandardCharsets.UTF_8)
} else {
readState.readString()
}
}
def readBinary(): ByteBuffer = {
val readState = currentState()
// Thrift will skip string fields it doesn't know about using readBinary
if (readState.lastFieldType == BSON.STRING) {
val strValue = readState.readString()
// hack to keep track of mongo error
if (readState.lastFieldName == TBSONBinaryProtocol.ERROR_KEY) {
_errorMessage = strValue
}
ByteBuffer.wrap(strValue.getBytes(StandardCharsets.UTF_8))
} else {
readState.readBinary()
}
}
/**
* Writing methods.
*/
def writeMessageBegin(message: TMessage) = throw new UnsupportedOperationException()
def writeMessageEnd(): Unit = throw new UnsupportedOperationException()
def writeStructBegin(struct: TStruct): Unit = throw new UnsupportedOperationException()
def writeStructEnd(): Unit = throw new UnsupportedOperationException()
def writeFieldBegin(field: TField): Unit = throw new UnsupportedOperationException()
def writeFieldEnd(): Unit = throw new UnsupportedOperationException()
def writeFieldStop(): Unit = throw new UnsupportedOperationException()
def writeMapBegin(map: TMap): Unit = throw new UnsupportedOperationException()
def writeMapEnd(): Unit = throw new UnsupportedOperationException()
def writeListBegin(list: TList): Unit = throw new UnsupportedOperationException()
def writeListEnd(): Unit = throw new UnsupportedOperationException()
def writeSetBegin(set: TSet): Unit = throw new UnsupportedOperationException()
def writeSetEnd(): Unit = throw new UnsupportedOperationException()
def writeBool(b: Boolean): Unit = throw new UnsupportedOperationException()
def writeByte(b: Byte): Unit = throw new UnsupportedOperationException()
def writeI16(i16: Short): Unit = throw new UnsupportedOperationException()
def writeI32(i32: Int): Unit = throw new UnsupportedOperationException()
def writeI64(i64: Long): Unit = throw new UnsupportedOperationException()
def writeDouble(dub: Double): Unit = throw new UnsupportedOperationException()
def writeString(str: String): Unit = throw new UnsupportedOperationException()
def writeBinary(buf: ByteBuffer): Unit = throw new UnsupportedOperationException()
}
| foursquare/fsqio | src/jvm/io/fsq/spindle/common/thrift/bson/TBSONBinaryProtocol.scala | Scala | apache-2.0 | 8,690 |
package dotty.tools.dotc
package transform
import core._
import Flags._, Symbols._, Contexts._, Scopes._, Decorators._
import collection.mutable
import collection.immutable.BitSet
import scala.annotation.tailrec
/** A module that can produce a kind of iterator (`Cursor`),
* which yields all pairs of overriding/overridden symbols
* that are visible in some baseclass, unless there's a parent class
* that already contains the same pairs.
*
* Adapted from the 2.9 version of OverridingPairs. The 2.10 version is IMO
* way too unwieldy to be maintained.
*/
object OverridingPairs {
/** The cursor class
* @param base the base class that contains the overriding pairs
*/
class Cursor(base: Symbol)(implicit ctx: Context) {
private val self = base.thisType
/** Symbols to exclude: Here these are constructors and private locals.
* But it may be refined in subclasses.
*/
protected def exclude(sym: Symbol): Boolean = !sym.memberCanMatchInheritedSymbols
/** The parents of base that are checked when deciding whether an overriding
* pair has already been treated in a parent class.
* This may be refined in subclasses. @see Bridges for a use case.
*/
protected def parents: Array[Symbol] = base.info.parents.toArray map (_.typeSymbol)
/** Does `sym1` match `sym2` so that it qualifies as overriding.
* Types always match. Term symbols match if their membertypes
* relative to <base>.this do
*/
protected def matches(sym1: Symbol, sym2: Symbol): Boolean =
sym1.isType || self.memberInfo(sym1).matches(self.memberInfo(sym2))
/** The symbols that can take part in an overriding pair */
private val decls = {
val decls = newScope
// fill `decls` with overriding shadowing overridden */
def fillDecls(bcs: List[Symbol], deferred: Boolean): Unit = bcs match {
case bc :: bcs1 =>
fillDecls(bcs1, deferred)
var e = bc.info.decls.lastEntry
while (e != null) {
if (e.sym.is(Deferred) == deferred && !exclude(e.sym))
decls.enter(e.sym)
e = e.prev
}
case nil =>
}
// first, deferred (this will need to change if we change lookup rules!
fillDecls(base.info.baseClasses, deferred = true)
// then, concrete.
fillDecls(base.info.baseClasses, deferred = false)
decls
}
private val subParents = {
val subParents = newMutableSymbolMap[BitSet]
for (bc <- base.info.baseClasses)
subParents(bc) = BitSet(parents.indices.filter(parents(_).derivesFrom(bc)): _*)
subParents
}
private def hasCommonParentAsSubclass(cls1: Symbol, cls2: Symbol): Boolean =
(subParents(cls1) intersect subParents(cls2)).nonEmpty
/** The scope entries that have already been visited as overridden
* (maybe excluded because of hasCommonParentAsSubclass).
* These will not appear as overriding
*/
private val visited = new mutable.HashSet[Symbol]
/** The current entry candidate for overriding
*/
private var curEntry = decls.lastEntry
/** The current entry candidate for overridden */
private var nextEntry = curEntry
/** The current candidate symbol for overriding */
var overriding: Symbol = _
/** If not null: The symbol overridden by overriding */
var overridden: Symbol = _
//@M: note that next is called once during object initialization
final def hasNext: Boolean = nextEntry ne null
/** @post
* curEntry = the next candidate that may override something else
* nextEntry = curEntry
* overriding = curEntry.sym
*/
private def nextOverriding(): Unit = {
@tailrec def loop(): Unit =
if (curEntry ne null) {
overriding = curEntry.sym
if (visited.contains(overriding)) {
curEntry = curEntry.prev
loop()
}
}
loop()
nextEntry = curEntry
}
/** @post
* hasNext = there is another overriding pair
* overriding = overriding member of the pair, provided hasNext is true
* overridden = overridden member of the pair, provided hasNext is true
*/
@tailrec final def next(): Unit =
if (nextEntry ne null) {
nextEntry = decls.lookupNextEntry(nextEntry)
if (nextEntry ne null)
try {
overridden = nextEntry.sym
if (overriding.owner != overridden.owner && matches(overriding, overridden)) {
visited += overridden
if (!hasCommonParentAsSubclass(overriding.owner, overridden.owner)) return
}
}
catch {
case ex: TypeError =>
// See neg/i1750a for an example where a cyclic error can arise.
// The root cause in this example is an illegal "override" of an inner trait
ctx.error(ex, base.sourcePos)
}
else {
curEntry = curEntry.prev
nextOverriding()
}
next()
}
nextOverriding()
next()
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/OverridingPairs.scala | Scala | apache-2.0 | 5,132 |
object B extends A
{
val x: String = 3
} | jamesward/xsbt | sbt/src/sbt-test/source-dependencies/implicit/B.scala | Scala | bsd-3-clause | 41 |
package org.littlewings.lucene
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute
import org.codelibs.neologd.ipadic.lucene.analysis.ja.JapaneseAnalyzer
import org.scalatest.{FunSpec, Matchers}
class KuromojiCodelibsNeologdSpec extends FunSpec with Matchers {
describe("Kuromoji Codelibs Neologd Spec") {
it("simple usage.") {
val targetWord = "ゲスの極み乙女。もモーニング娘。も問題なく分割できます。"
val analyzer = new JapaneseAnalyzer
val tokenStream = analyzer.tokenStream("", targetWord)
val charTermAttr = tokenStream.addAttribute(classOf[CharTermAttribute])
tokenStream.reset()
val tokens = Iterator
.continually(tokenStream.incrementToken())
.takeWhile(identity)
.map(_ => charTermAttr.toString)
.toVector
tokenStream.end()
tokenStream.close()
tokens should contain inOrderOnly("ゲスの極み乙女。", "モーニング娘。", "問題", "分割")
}
}
}
| kazuhira-r/lucene-examples | lucene-kuromoji-codelibs-neologd/src/test/scala/org/littlewings/lucene/KuromojiCodelibsNeologdSpec.scala | Scala | mit | 1,017 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/**
* Singleton object providing an <code>apply</code> method for the ScalaTest shell and a
* <code>main</code> method for ScalaTest's simple runner.
*
* <p>
* The <code>apply</code> method can be used in the ScalaTest Shell (its DSL for the Scala
* interpreter) in this way:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">scala> import org.scalatest._
* import org.scalatest._
*
* scala> class ArithmeticSuite extends FunSuite with ShouldMatchers {
* | test("addition works") {
* | 1 + 1 should equal (2)
* | }
* | ignore("subtraction works") {
* | 1 - 1 should equal (0)
* | }
* | test("multiplication works") {
* | 1 * 1 should equal (2)
* | }
* | test("division works") (pending)
* | }
* defined class ArithmeticSuite
*
* scala> run(new ArithmeticSuite)</span>
* <span style="color: #00cc00">ArithmeticSuite:
* - addition works</span>
* <span style="color: #cfc923">- subtraction works !!! IGNORED !!!</span>
* <span style="color: #dd2233">- multiplication works *** FAILED ***
* 1 did not equal 2 (<console>:16)</span>
* <span style="color: #cfc923">- division works (pending)</span>
* </pre>
*
* <p>
* The last command is calling the <code>apply</code> method on the <code>run</code> singleton object. In other
* words, you could alternatively call it this way:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">scala> run.apply(new ArithmeticSuite)</span>
* <span style="color: #00cc00">ArithmeticSuite:
* - addition works</span>
* <span style="color: #cfc923">- subtraction works !!! IGNORED !!!</span>
* <span style="color: #dd2233">- multiplication works *** FAILED ***
* 1 did not equal 2 (<console>:16)</span>
* <span style="color: #cfc923">- division works (pending)</span>
* </pre>
*
* <p>
* The <code>run</code> singleton object also serves a different purpose. Its <code>main</code> method
* allows users to "run" <code>run</code> as a Scala application. ScalaTest's <code>Runner</code> application is very
* powerful, but doesn't provide the simplest out-of-box experience for people trying ScalaTest for the first time. For example,
* to run an <code>ExampleSpec</code> in the unnamed package from the directory where it is compiled with
* <code>Runner</code>'s standard out reporter requires this command:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">$ scala -cp scalatest-RELEASE.jar org.scalatest.tools.Runner -R . -o -s ExampleSpec</span>
* </pre>
*
* <p>
* Running it with the <code>run</code> application is simpler:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">$ scala -cp scalatest-RELEASE.jar org.scalatest.run ExampleSpec</span>
* </pre>
*
*
*/
object run {
private val defaultShell = ShellImpl()
/**
* Run the suites whose fully qualified names are passed as arguments.
*
* <p>
* This method will invoke the main method of <code>org.scalatest.tools.Runner</code>, passing
* in <code>"-R ."</code> to set the runpath to the current directory, <code>"-o"</code> to select the
* standard out reporter, and each argument preceded by <code>-s</code>. For example, this <code>run</code>
* command:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">$ scala -cp scalatest-RELEASE.jar org.scalatest.run ExampleSpec</span>
* </pre>
*
* <p>
* Has the same effect as this <code>Runner</code> command:
* </p>
*
* <pre style="background-color: #2c415c; padding: 10px">
* <span style="color: white">$ scala -cp scalatest-RELEASE.jar org.scalatest.tools.Runner -R . -o -s ExampleSpec</span>
* </pre>
*
* @param args
*/
def main(args: Array[String]) {
tools.Runner.main(Array("-R", ".", "-o") ++ args.flatMap(s => Array("-s", s)))
}
/**
* Run the passed suite, optionally passing in a test name and config map.
*
* <p>
* This method will invoke <code>execute</code> on the passed <code>suite</code>, passing in
* the specified (or default) <code>testName</code> and <code>configMap</code> and the configuration values
* passed to this <code>Shell</code>'s constructor (<code>colorPassed</code>, <code>durationsPassed</code>, <code>shortStacksPassed</code>,
* <code>fullStacksPassed</code>, and <code>statsPassed</code>).
* </p>
*/
def apply(suite: Suite, testName: String = null, configMap: Map[String, Any] = Map()) {
defaultShell.run(suite, testName, configMap)
}
}
| hubertp/scalatest | src/main/scala/org/scalatest/run.scala | Scala | apache-2.0 | 5,333 |
package si.gto76.comp
import scala.io.Source
import sys.exit
import scala.io.BufferedSource
object Assembler {
val MEMORY_LENGTH = 15
val MEMORY_WIDTH = 8
val TEST_FILE = "/fibbAsmb"
val TEST_URL = getClass.getResource(TEST_FILE)
val TEST = scala.io.Source.fromURL(TEST_URL, "UTF-8")
var binaryCode = getEmptyMemory
def assemble(args: Array[String]) {
val file: BufferedSource = getBufferredFile(args)
val assemblyStatements: Array[String] = readAssemblyFromFile(file);
var i = 0
for (line <- assemblyStatements) {
val tokens = line.split(' ')
val instructionCode = Commands.get(tokens(0))
val addressCode = Addresses.get(tokens(1))
val sentenceCode = instructionCode ++ addressCode
binaryCode(i) = sentenceCode
i = i + 1
}
println("\nBinary code:")
print(Util.getString(binaryCode))
if (Addresses.getNumOfAdresses + i > MEMORY_LENGTH) {
println("OUT OF MEMORY")
exit(5)
}
}
def getBufferredFile(args: Array[String]): BufferedSource = {
if (args.length > 0) {
val filename = args(0)
val fileDoesntExist = !(new java.io.File(filename).exists())
if (fileDoesntExist) {
println("Input file " + filename + " doesn't exist.")
exit
}
scala.io.Source.fromFile(filename, "UTF-8")
} else {
TEST
}
}
def getEmptyMemory() = {
var mem = new Array[Array[Boolean]](MEMORY_LENGTH)
for (i <- 0 to MEMORY_LENGTH - 1) {
mem(i) = new Array[Boolean](MEMORY_WIDTH)
}
mem
}
def writeToAddress(address: Int, value: Array[Boolean]) = {
binaryCode(address) = value
}
def readAssemblyFromFile(file: BufferedSource): Array[String] = {
var data = collection.mutable.ListBuffer[String]()
println("Assembly code:")
for (line <- file.getLines()) {
println(line)
data += line
}
data.toArray
}
object Addresses {
private val addresses = collection.mutable.Map[String, Int]()
private def getAddress(name: String): Int = {
val adr = addresses.get(name)
if (adr.isEmpty) {
val numOfVars = addresses.size
val address = (MEMORY_LENGTH - 1) - numOfVars
addresses += (name -> address)
address
} else {
adr.get
}
}
def getNumOfAdresses = addresses.size
// a19 -> absolute address
// 10 -> pointer to value (not logical for WRITE, JUMP, POINT, BIGGER
// v1 -> variable (not logical for JUMP, POINT, BIGGER
def get(adrStr: String): Array[Boolean] = {
// ABSOLUTE ADDRESS:
if (adrStr.head == 'a') {
val adr = adrStr.drop(1)
Util.getBoolNib(adr.toInt)
} // VARIABLE:
else if (adrStr.head == 'v') {
val adrInt = getAddress(adrStr)
Util.getBoolNib(adrInt)
} // INT VALUE:
else if (adrStr.head.isDigit) {
val intVal = adrStr.toInt
if (intVal < 0 || intVal > 255) {
println("ERROR 5: Integer value out of bounds")
exit(5)
}
val adrInt = getAddress(adrStr)
writeToAddress(adrInt, Util.getBool(intVal))
Util.getBoolNib(adrInt)
} else {
println("ERROR 4: Wrong address")
exit(4)
}
}
}
object Commands {
def get(comStr: String): Array[Boolean] = {
for (com <- commandList) {
if (com.name == comStr) {
return Util.getBoolNib(com.id)
}
}
print("ERROR 01: wrong command.")
exit(1)
}
val commandList = Array(
new Command("READ", 0),
new Command("WRITE", 1),
new Command("ADD", 2),
new Command("MINUS", 3),
new Command("JUMP", 4),
new Command("POINT", 5),
new Command("BIGGER", 6),
new Command("SMALLER", 7))
class Command(val name: String, val id: Int) {}
}
}
| gto76/comp | src/main/scala/si/gto76/comp/Assembler.scala | Scala | mit | 3,849 |
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.samples.lavasuit
import com.easyforger.base.EasyForger
import com.easyforger.util.Version
import net.minecraft.init.{Items, SoundEvents}
import net.minecraft.inventory.EntityEquipmentSlot
import net.minecraftforge.common.util.EnumHelper
import net.minecraftforge.fml.common.Mod
import net.minecraftforge.fml.common.Mod.EventHandler
import net.minecraftforge.fml.common.event.FMLInitializationEvent
@Mod(modid = LavaSuitMod.modId, name = "EasyForger Armor LavaSuit Mod", version = Version.version, modLanguage = "scala")
object LavaSuitMod extends EasyForger {
final val modId = "easyforger_lavasuit"
val materialName = "lavasuit"
val textureName = materialName
val durability = 20
val armorReductions = Array(4, 9, 7, 4)
val enchantability = 10
val lavaMaterial = EnumHelper.addArmorMaterial(
materialName, textureName, durability, armorReductions, enchantability, SoundEvents.ITEM_ARMOR_EQUIP_IRON, 0)
val lavaHelmet = new LavaSuitItemArmor(lavaMaterial, EntityEquipmentSlot.HEAD)
val lavaChestPlate = new LavaSuitItemArmor(lavaMaterial, EntityEquipmentSlot.CHEST)
val lavaLeggings = new LavaSuitItemArmor(lavaMaterial, EntityEquipmentSlot.LEGS)
val lavaBoots = new LavaSuitItemArmor(lavaMaterial, EntityEquipmentSlot.FEET)
@EventHandler
def init(event: FMLInitializationEvent): Unit = {
lavaHelmet.register()
lavaChestPlate.register()
lavaLeggings.register()
lavaBoots.register()
crafting(
Items.IRON_INGOT + Items.LAVA_BUCKET to lavaHelmet withShape
"""
|iii
|ili
|...
""".stripMargin,
Items.IRON_INGOT + Items.LAVA_BUCKET to lavaChestPlate withShape
"""
|ili
|iii
|iii
""".stripMargin,
Items.IRON_INGOT + Items.LAVA_BUCKET to lavaLeggings withShape
"""
|iii
|ili
|i.i
""".stripMargin,
Items.IRON_INGOT + Items.LAVA_BUCKET to lavaBoots withShape
"""
|...
|i.i
|ili
""".stripMargin
)
}
}
| easyforger/easyforger | mods/src/main/scala/com/easyforger/samples/lavasuit/LavaSuitMod.scala | Scala | gpl-3.0 | 2,267 |
package com.cloudray.scalapress.plugin.payment.worldpay.selectjunior
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
import com.cloudray.scalapress.plugin.payments.worldpay.selectjunior.{WorldpaySelectJuniorPlugin, WorldpaySelectJuniorProcessor}
import com.cloudray.scalapress.payments.Purchase
/** @author Stephen Samuel */
class WorldpaySelectJuniorTest extends FunSuite with MockitoSugar with OneInstancePerTest {
val plugin = new WorldpaySelectJuniorPlugin
val processor = new WorldpaySelectJuniorProcessor(plugin)
val purchase = new Purchase {
def successUrl: String = "http://coldplay.com/successUrl.com"
def failureUrl: String = "http://coldplay.com/failureUrl.com"
def accountName: String = "sammy"
def accountEmail: String = "snape@hp.com"
def total: Int = 58233
def uniqueIdent: String = "56789"
def callback = "Monkey"
def paymentDescription: String = "some payment"
}
test("given a parameter map with valid paypal fields then a transaction is created") {
val params = Map("transStatus" -> "Y",
"callbackPW" -> "letmein",
"cardType" -> "visa",
"transId" -> "73965qweqwe128390K",
"authAmount" -> "20.00",
"rawAuthCode" -> "0a1d74c2-f809-4815-a53b-60a28e8da6a0")
plugin.callbackPassword = "letmein"
val tx = processor.createTransaction(params).get
assert(tx.transactionId === "73965qweqwe128390K")
assert(tx.amount === 2000)
assert(tx.authCode === "0a1d74c2-f809-4815-a53b-60a28e8da6a0")
assert(tx.status === "Y")
}
test("the processor is enabled iff the plugin accountId is not null") {
plugin.accountId = null
assert(!plugin.enabled)
plugin.accountId = "sammy"
assert(plugin.enabled)
}
test("when set to live then the live url is returned") {
plugin.live = true
assert("https://select.worldpay.com/wcc/purchase" === processor.paymentUrl)
}
test("processor sets callback info into the cartId field") {
val params = processor.params("coldplay.com", purchase)
assert(params("cartId") === "Monkey-56789")
}
test("processor sets description from purchase") {
val params = processor.params("coldplay.com", purchase)
assert("some payment" === params("desc"))
}
test("processor sets amount from purchase") {
val params = processor.params("coldplay.com", purchase)
assert("582.33" === params("amount"))
}
test("processor sets name from purchase") {
val params = processor.params("coldplay.com", purchase)
assert("sammy" === params("name"))
}
test("processor sets email from purchase") {
val params = processor.params("coldplay.com", purchase)
assert("snape@hp.com" === params("email"))
}
// test("processor sets failure url from purchase") {
// val params = processor.params("coldplay.com", purchase)
// assert("http://coldplay.com/successUrl.com" === params("return"))
// }
//
// test("processor sets success url from purchase") {
// val params = processor.params("coldplay.com", purchase)
// assert("http://coldplay.com/failureUrl.com" === params("cancel_return"))
// }
// test("callback result parses the custom value correctly") {
// val result = processor
// .callback(Map("custom" -> "Order:567",
// "txn_id" -> "6346aa",
// "payment_status" -> "Completed",
// "payer_status" -> "unverified",
// "mc_currency" -> "GBP",
// "payment_type" -> "instant"))
// assert(result.get.uniqueId === "567")
// assert(result.get.callback === "Order")
// }
}
| vidyacraghav/scalapress | src/test/scala/com/cloudray/scalapress/plugin/payment/worldpay/selectjunior/WorldpaySelectJuniorTest.scala | Scala | apache-2.0 | 3,910 |
package ml.sparkling.graph.loaders.graphml
/**
* Created by Roman Bartusiak (roman.bartusiak@pwr.edu.pl http://riomus.github.io).
*/
object GraphMLTypes {
trait TypeHandler extends Serializable{
def apply(value:String):Any
}
def apply(typeName:String):TypeHandler={
val typeHandlerProvider = Map[String,TypeHandler]("string" -> StringHanlder,"boolean" -> BooleanHandler, "int" -> IntHandler, "long" -> LongHandler, "float" -> FloatHandler, "double" -> DoubleHandler )
typeHandlerProvider(typeName.toLowerCase)
}
object StringHanlder extends TypeHandler{
override def apply(value: String): String = value
}
object BooleanHandler extends TypeHandler{
override def apply(value: String): Boolean = value.toBoolean
}
object IntHandler extends TypeHandler{
override def apply(value: String): Int = value.toInt
}
object LongHandler extends TypeHandler{
override def apply(value: String): Long = value.toLong
}
object FloatHandler extends TypeHandler{
override def apply(value: String): Float = value.toFloat
}
object DoubleHandler extends TypeHandler{
override def apply(value: String): Double = value.toDouble
}
}
| sparkling-graph/sparkling-graph | loaders/src/main/scala/ml/sparkling/graph/loaders/graphml/GraphMLTypes.scala | Scala | bsd-2-clause | 1,182 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert
import java.io.{ByteArrayInputStream, InputStream}
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.{Collections, ServiceLoader}
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.convert2
import org.locationtech.geomesa.convert2.{AbstractConverter, ConverterConfig}
import org.locationtech.geomesa.utils.collection.{CloseableIterator, SelfClosingIterator}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypeLoader
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* Simplified API to build SimpleFeatureType converters
*/
@deprecated("Replaced with org.locationtech.geomesa.convert2.SimpleFeatureConverter")
object SimpleFeatureConverters extends LazyLogging {
import scala.collection.JavaConverters._
private val factories = ServiceLoader.load(classOf[convert2.SimpleFeatureConverterFactory]).asScala.toList
private val factoriesV1 = ServiceLoader.load(classOf[SimpleFeatureConverterFactory[_]]).asScala.toList
logger.debug(s"Found ${factories.size + factoriesV1.size} factories: " +
(factories ++ factoriesV1).map(_.getClass.getName).mkString(", "))
@deprecated("Replaced with org.locationtech.geomesa.convert2.SimpleFeatureConverter.apply")
def build[I](typeName: String, converterName: String): SimpleFeatureConverter[I] = {
val sft = SimpleFeatureTypeLoader.sftForName(typeName)
.getOrElse(throw new IllegalArgumentException(s"Unable to load SFT for typeName $typeName"))
build[I](sft, converterName)
}
@deprecated("Replaced with org.locationtech.geomesa.convert2.SimpleFeatureConverter.apply")
def build[I](sft: SimpleFeatureType, converterName: String): SimpleFeatureConverter[I] =
ConverterConfigLoader.configForName(converterName).map(build[I](sft, _))
.getOrElse(throw new IllegalArgumentException(s"Unable to find converter config for converterName $converterName"))
@deprecated("Replaced with org.locationtech.geomesa.convert2.SimpleFeatureConverter.apply")
def build[I](sft: SimpleFeatureType, converterConf: Config): SimpleFeatureConverter[I] = {
factoriesV1.find(_.canProcess(converterConf)).map(_.buildConverter(sft, converterConf)) match {
case Some(c) => c.asInstanceOf[SimpleFeatureConverter[I]]
case None =>
val converters = factories.iterator.flatMap(_.apply(sft, converterConf)).collect {
case c: AbstractConverter[_, _, _] => c.asInstanceOf[AbstractConverter[_ <: ConverterConfig, _, _]]
}
if (converters.hasNext) { new SimpleFeatureConverterWrapper(converters.next) } else {
throw new IllegalArgumentException(s"Cannot find factory for ${sft.getTypeName}")
}
}
}
/**
* Wrapper to present new converters under the old API
*
* @param converter new converter
* @tparam I type bounds
*/
class SimpleFeatureConverterWrapper[I](converter: AbstractConverter[_ <: ConverterConfig, _, _]) extends
SimpleFeatureConverter[I] {
private val open =
Collections.newSetFromMap(new ConcurrentHashMap[CloseableIterator[SimpleFeature], java.lang.Boolean])
private def register(iter: CloseableIterator[SimpleFeature]): Iterator[SimpleFeature] = {
open.add(iter)
SelfClosingIterator(iter, { iter.close(); open.remove(iter) })
}
override lazy val caches: Map[String, EnrichmentCache] =
converter.config.caches.map { case (k, v) => (k, EnrichmentCache(v)) }
override def targetSFT: SimpleFeatureType = converter.targetSft
override def processInput(is: Iterator[I], ec: EvaluationContext): Iterator[SimpleFeature] =
is.flatMap(processSingleInput(_, ec))
override def processSingleInput(i: I, ec: EvaluationContext): Iterator[SimpleFeature] = {
i match {
case s: String => register(converter.process(new ByteArrayInputStream(s.getBytes(StandardCharsets.UTF_8)), ec))
case b: Array[Byte] => register(converter.process(new ByteArrayInputStream(b), ec))
case _ => throw new NotImplementedError()
}
}
override def process(is: InputStream, ec: EvaluationContext): Iterator[SimpleFeature] =
register(converter.process(is, ec))
override def createEvaluationContext(globalParams: Map[String, Any], counter: Counter): EvaluationContext =
converter.createEvaluationContext(globalParams, Map.empty, counter)
override def close(): Unit = open.asScala.foreach(_.close())
}
}
| ddseapy/geomesa | geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert/SimpleFeatureConverters.scala | Scala | apache-2.0 | 5,023 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.metamx.tranquility
import com.github.nscala_time.time.Imports._
import com.metamx.common.scala.Predef._
import com.twitter.finagle.http
import org.joda.time.Duration
import scala.language.implicitConversions
package object finagle
{
val TwitterDuration = com.twitter.util.Duration
type TwitterDuration = com.twitter.util.Duration
implicit def jodaDurationToTwitterDuration(duration: Duration): TwitterDuration = {
TwitterDuration.fromMilliseconds(duration.millis)
}
lazy val FinagleLogger = java.util.logging.Logger.getLogger("finagle")
def HttpPost(path: String): http.Request = {
http.Request(http.Method.Post, path) withEffect {
req =>
decorateRequest(req)
}
}
def HttpGet(path: String): http.Request = {
http.Request(http.Method.Get, path) withEffect {
req =>
decorateRequest(req)
}
}
private[this] def decorateRequest(req: http.Request): Unit = {
// finagle-http doesn't set the host header, and we don't actually know what server we're hitting
req.headerMap("Host") = "127.0.0.1"
req.headerMap("Accept") = "*/*"
}
}
| druid-io/tranquility | core/src/main/scala/com/metamx/tranquility/finagle/package.scala | Scala | apache-2.0 | 1,935 |
package models.mission
import java.util.UUID
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import scala.slick.lifted.ForeignKeyQuery
case class CVMissionPanoStatus(itemId: Int, linkedMissionId: Int, panoId: String, completed: Boolean, lat: Float, lng: Float)
/**
* For computer vision ground truth audit missions, this table tracks the list of panos that need to be audited.
* @param tag
*/
class MissionProgressCVGroundtruthTable(tag: Tag) extends Table[CVMissionPanoStatus](tag, Some("sidewalk"), "mission_progress_cvgroundtruth") {
def itemId: Column[Int] = column[Int]("item_id", O.PrimaryKey, O.AutoInc)
def linkedMissionId: Column[Int] = column[Int]("linked_mission_id", O.NotNull)
def panoId: Column[String] = column[String]("panoid", O.NotNull)
def completed: Column[Boolean] = column[Boolean]("completed", O.NotNull)
def lat: Column[Float] = column[Float]("lat", O.NotNull)
def lng: Column[Float] = column[Float]("lng", O.NotNull)
def * = (itemId, linkedMissionId, panoId, completed, lat, lng) <> ((CVMissionPanoStatus.apply _).tupled,
CVMissionPanoStatus.unapply)
def linkedMission: ForeignKeyQuery[MissionTable, Mission] =
foreignKey("mission_progress_cvgroundtruth_linked_mission_id_fkey", linkedMissionId, TableQuery[MissionTable])(_.missionId)
}
object MissionProgressCVGroundtruthTable {
val db: Database = play.api.db.slick.DB
val cvMissionPanoStatuses: TableQuery[MissionProgressCVGroundtruthTable] = TableQuery[MissionProgressCVGroundtruthTable]
/**
* Fetches the remaining panos that still need to be audited for a particular user and ground truth audit mission.
*
* @param userId a user id
* @param missionId the id of a ground truth audit mission that belongs to this user
* @return a list of panoIds that still need to be audited in this mission
*/
def getRemainingPanos(userId:UUID, missionId: Int): List[String] = db.withSession { implicit session =>
val remaining: Query[Column[String], String, Seq] = for {
_panostatuses <- cvMissionPanoStatuses
_missions <- MissionTable.missions if _panostatuses.linkedMissionId === _missions.missionId
if _panostatuses.linkedMissionId === missionId && !_panostatuses.completed && _missions.userId === userId.toString
} yield _panostatuses.panoId
remaining.list
}
/**
* Marks a pano complete and updates the database. If all panos complete, entire mission is also marked as complete.
*
* @param userId a user id
* @param missionId id of a CV ground truth audit mission belonging to this user
* @param panoId a panoID that is part of the mission, to mark as complete
*/
def markPanoComplete(userId: UUID, missionId: Int, panoId: String) = db.withSession { implicit session =>
cvMissionPanoStatuses.filter(panoStatus =>
panoStatus.linkedMissionId === missionId &&
panoStatus.panoId === panoId
).map(c => c.completed).update(true)
// Get remaining incomplete panos for this mission. If none left, mark mission complete.
val remaining: List[String] = getRemainingPanos(userId, missionId)
if (remaining.isEmpty) {
MissionTable.updateComplete(missionId)
}
}
/**
* Gets the lat/lng position of a panoid that is part of an active CV ground truth audit mission.
* Note: these lat/lng positions are supplied by the client when the ground truth audit mission is created.
* @param userId a user id
* @param panoId a panoId that is part of an active CV ground truth audit mission for this user
* @return a lat/lng tuple specifying the location of the pano
*/
def getPanoLatLng(userId: UUID, panoId: String):(Option[Float],Option[Float]) = db.withSession { implicit session =>
val activeMission: Option[Mission] = MissionTable.getIncompleteCVGroundTruthMission(userId)
activeMission match {
case Some(mission) =>
val result: CVMissionPanoStatus = cvMissionPanoStatuses.filter(statusEntry =>
statusEntry.linkedMissionId === mission.missionId &&
statusEntry.panoId === panoId
).take(1).first
(Some(result.lat), Some(result.lng))
case None =>
(None, None)
}
}
/**
* Adds a new row to the table.
*/
def save(requiredPanoStatus: CVMissionPanoStatus): Int = db.withTransaction { implicit session =>
val requiredPanoStatusId: Int =
(cvMissionPanoStatuses returning cvMissionPanoStatuses.map(_.itemId)) += requiredPanoStatus
requiredPanoStatusId
}
}
| ProjectSidewalk/SidewalkWebpage | app/models/mission/MissionProgressCVGroundtruthTable.scala | Scala | mit | 4,529 |
package scalaoauth2.provider
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers._
class AuthorizationRequestSpec extends AnyFlatSpec {
it should "fetch Basic64" in {
val request = new AuthorizationRequest(
Map(
"Authorization" -> Seq(
"Basic Y2xpZW50X2lkX3ZhbHVlOmNsaWVudF9zZWNyZXRfdmFsdWU="
)
),
Map()
)
val Some(c) = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
c.clientId should be("client_id_value")
c.clientSecret should be(Some("client_secret_value"))
}
it should "fetch Basic64 by case insensitive" in {
val request = new AuthorizationRequest(
Map(
"authorization" -> Seq(
"Basic Y2xpZW50X2lkX3ZhbHVlOmNsaWVudF9zZWNyZXRfdmFsdWU="
)
),
Map()
)
val Some(c) = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
c.clientId should be("client_id_value")
c.clientSecret should be(Some("client_secret_value"))
}
it should "fetch authorization header without colon" in {
val request = new AuthorizationRequest(
Map("Authorization" -> Seq("Basic Y2xpZW50X2lkX3ZhbHVl")),
Map()
)
val parsedCred = request.parseClientCredential
parsedCred.isDefined shouldBe true
parsedCred.get.isLeft shouldBe true
}
it should "fetch empty client_secret with colon" in {
val request = new AuthorizationRequest(
Map("Authorization" -> Seq("Basic Y2xpZW50X2lkX3ZhbHVlOg==")),
Map()
)
val Some(c) = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
c.clientId should be("client_id_value")
c.clientSecret should be(None)
}
it should "not fetch not Authorization key in header" in {
val request = new AuthorizationRequest(
Map(
"authorizatio" -> Seq(
"Basic Y2xpZW50X2lkX3ZhbHVlOmNsaWVudF9zZWNyZXRfdmFsdWU="
)
),
Map()
)
request.parseClientCredential should be(None)
}
it should "not fetch invalid Base64" in {
val request = new AuthorizationRequest(
Map("Authorization" -> Seq("Basic basic")),
Map()
)
val parsedCred = request.parseClientCredential
parsedCred.isDefined shouldBe true
parsedCred.get.isLeft shouldBe true
}
it should "fetch parameter" in {
val request = new AuthorizationRequest(
Map(),
Map(
"client_id" -> Seq("client_id_value"),
"client_secret" -> Seq("client_secret_value")
)
)
val Some(c) = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
c.clientId should be("client_id_value")
c.clientSecret should be(Some("client_secret_value"))
}
it should "omit client_secret" in {
val request = new AuthorizationRequest(
Map(),
Map("client_id" -> Seq("client_id_value"))
)
val Some(c) = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
c.clientId should be("client_id_value")
c.clientSecret should be(None)
}
it should "not fetch missing parameter" in {
val request = new AuthorizationRequest(
Map(),
Map("client_secret" -> Seq("client_secret_value"))
)
request.parseClientCredential should be(None)
}
it should "not fetch invalid parameter" in {
val request =
new AuthorizationRequest(Map("Authorization" -> Seq("")), Map())
val parsedCred = request.parseClientCredential
parsedCred.isDefined shouldBe true
parsedCred.get.isLeft shouldBe true
}
it should "not fetch invalid Authorization header" in {
val request = new AuthorizationRequest(
Map("Authorization" -> Seq("Digest Y2xpZW50X2lkX3ZhbHVlOg==")),
Map()
)
val parsedCred = request.parseClientCredential
parsedCred.isDefined shouldBe true
parsedCred.get.isLeft shouldBe true
}
it should "not fetch if Authorization header is invalid, but client_id and client_secret are valid and present in parms" in {
val request = new AuthorizationRequest(
Map("Authorization" -> Seq("fakeheader aaaa")),
Map(
"client_id" -> Seq("client_id_value"),
"client_secret" -> Seq("client_secret_value")
)
)
val parsedCred = request.parseClientCredential
parsedCred.isDefined shouldBe true
parsedCred.get.isLeft shouldBe true
}
}
| nulab/scala-oauth2-provider | src/test/scala/scalaoauth2/provider/AuthorizationRequestSpec.scala | Scala | mit | 4,537 |
package redscaler
import cats.data.NonEmptyList
import cats.free.Free
import freasymonad.cats.free
@free
trait ConnectionOps {
sealed trait ConnectionOp[A]
type ConnectionIO[A] = Free[ConnectionOp, A]
def get(key: String): ConnectionIO[ErrorOr[Option[Vector[Byte]]]]
def set(key: String, value: Vector[Byte]): ConnectionIO[ErrorOr[Unit]]
def getset(key: String, value: Vector[Byte]): ConnectionIO[ErrorOr[Option[Vector[Byte]]]]
def append(key: String, value: Vector[Byte]): ConnectionIO[ErrorOr[Int]]
def lpush(key: String, values: NonEmptyList[Vector[Byte]]): ConnectionIO[ErrorOr[Int]]
def lpushx(key: String, values: NonEmptyList[Vector[Byte]]): ConnectionIO[ErrorOr[Int]]
def rpush(key: String, values: NonEmptyList[Vector[Byte]]): ConnectionIO[ErrorOr[Int]]
def rpushx(key: String, values: NonEmptyList[Vector[Byte]]): ConnectionIO[ErrorOr[Int]]
def lrange(key: String, startIndex: Int, endIndex: Int): ConnectionIO[ErrorOr[List[Vector[Byte]]]]
def selectDatabase(databaseIndex: Int): ConnectionIO[ErrorOr[Unit]]
def flushdb: ConnectionIO[ErrorOr[Unit]]
}
| agustafson/redscaler | core/src/main/scala/redscaler/ConnectionOps.scala | Scala | apache-2.0 | 1,103 |
package controllers
import java.time.LocalDateTime
import java.util.UUID
import javax.inject.{Inject, Singleton}
import entity.form.{InfoForm, LoginForm}
import service.{AdminService, CategoryService}
import utils.FormConverter.infoConvert
import base.Constants._
import base.action.MustBeAdminGo
import entity.Admin
import play.api.mvc._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.concurrent.Future
import scala.language.postfixOps
import scala.util.{Failure, Success}
/**
* Samsara Aquarius Route
* Admin Controller
*
* @author sczyh30
*/
@Singleton
class AdminController @Inject() (admin: AdminService, cs: CategoryService) extends Controller {
type PageMessage = (String, String)
private def convertResult(code: Int): PageMessage = code match {
case ADMIN_ADD_ARTICLE_SUCCESS => ("操作成功", "/adm1n6q9/add-article")
case ADMIN_PROCESS_FAIL_UNKNOWN => ("操作失败", "/adm1n6q9")
case ADMIN_ADD_ARTICLE_FAIL => ("添加文章失败", "/adm1n6q9/add-article")
}
def processOkResult(tp: Int)(implicit request: Request[AnyContent]): Result = {
val msg = convertResult(tp)
Ok(views.html.admin.processOK(msg._1, msg._2))
}
def processFailResult(tp: Int)(implicit request: Request[AnyContent]): Result = {
val msg = convertResult(if (tp >= ADMIN_PROCESS_FAIL_UNKNOWN) tp
else ADMIN_PROCESS_FAIL_UNKNOWN)
Ok(views.html.admin.processFail(msg._1, msg._2))
}
/**
* Admin Index Page(aka Go! Dashboard)
*/
def dashboard() = MustBeAdminGo.async { implicit request =>
admin.countDashboard map { counts =>
Ok(views.html.admin.dashboard(counts))
}
}
/**
* Admin Login Page
*/
def goIndex() = Action { implicit request =>
request.session.get("adm1n_go_token") match {
case Some(u) =>
Redirect(routes.AdminController.dashboard())
case None =>
Ok(views.html.admin.go(LoginForm.form))
}
}
/**
* Typeclass for Admin entity to generate admin token
* @param admin admin entity
*/
implicit class Converter(admin: Admin) {
def session: Session = {
val session = Map("adm1n_go_aid" -> admin.id.toString, "adm1n_go_name" -> admin.name,
"adm1n_go_token" -> UUID.randomUUID().toString,
"aq_go_timestamp" -> LocalDateTime.now().toString)
Session(session)
}
}
/**
* Admin Login Process(aka Go!Ahead)
*/
def goAhead() = Action.async { implicit request =>
LoginForm.form.bindFromRequest().fold(
errorForm => {
Future.successful(Redirect(routes.AdminController.goIndex()) flashing "adm1n_auth_error" -> "错误!错误!")
}, data => {
admin.login(data.username, data.password) map {
case Success(a) =>
Redirect(routes.AdminController.dashboard()) withSession a.session // will clean common user session!
case Failure(ex) =>
Redirect(routes.AdminController.goIndex()) flashing "adm1n_auth_error" -> "错误!错误!怎么搞的?"
}
})
}
/**
* Logout(aka Go! away)
* @return
*/
def goAway() = Action { implicit request =>
Redirect(routes.Application.index()) withNewSession
}
/**
* Add info page
*/
def addInfoPage() = MustBeAdminGo.async { implicit request =>
cs.fetchAll map { categories =>
Ok(views.html.admin.articles.addInfo(InfoForm.form, categories))
}
}
/**
* Add info process
*/
def addInfoProcess() = MustBeAdminGo.async { implicit request =>
InfoForm.form.bindFromRequest.fold(
errorForm => {
Future.successful(Redirect(routes.AdminController.addInfoPage()) flashing "add_article__error" -> "表单格式错误,请检查表单!")
},
data => {
admin.addArticle(data) map { res =>
if (res >= 0)
processOkResult(ADMIN_ADD_ARTICLE_SUCCESS)
else
processFailResult(ADMIN_ADD_ARTICLE_FAIL)
}
})
}
}
| sczyh30/samsara-aquarius | app/controllers/AdminController.scala | Scala | mit | 3,996 |
package org.jetbrains.plugins.scala
package lang
package checkers
package checkPrivateAccess
import java.io.File
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.PsiMember
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReference
import org.jetbrains.plugins.scala.lang.resolve.ResolveUtils
import org.junit.Assert._
import scala.annotation.nowarn
/**
* User: Alexander Podkhalyuzin
* Date: 08.10.2009
*/
@nowarn("msg=ScalaLightPlatformCodeInsightTestCaseAdapter")
abstract class CheckPrivateAccessTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
val refMarker = "/*ref*/"
protected def folderPath = baseRootPath + "checkers/checkPrivateAccess/"
protected def doTest(): Unit = {
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assertNotNull("file " + filePath + " not found", file)
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val offset = fileText.indexOf(refMarker) + refMarker.length
assertNotEquals("Not specified caret marker in test case. Use " + refMarker + " in scala file for this.", offset, refMarker.length - 1)
val elem = scalaFile.findElementAt(offset).getParent
if (!elem.isInstanceOf[ScReference])
fail("Ref marker should point on reference")
val ref = elem.asInstanceOf[ScReference]
val resolve: PsiMember = PsiTreeUtil.getParentOfType(ref.resolve(), classOf[PsiMember], false)
val actual = ResolveUtils.isAccessible(resolve, elem)
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val text = lastPsi.getText
val expected = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ => fail("Test result must be in last comment statement.")
}
if (shouldPass) {
assertEquals("Wrong reference accessibility: ", expected, actual.toString)
}
else {
if (expected == actual.toString) {
fail("Test has passed, but was supposed to fail")
}
}
}
protected def shouldPass: Boolean = true
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/checkers/checkPrivateAccess/CheckPrivateAccessTestBase.scala | Scala | apache-2.0 | 2,853 |
package exercises
/**
* Created by aguestuser on 1/3/15.
*/
class Ch13_Collections {
// THIS CHAPTER WAS AMAZING! BE SURE TO COME BACK AND DO THESE!
// 1. Write a function that, given a string, produces a map of the indexes of all characters. For example, indexes("Mississippi") should return a map associating 'M' with the set {0}, 'i' with the set {1, 4, 7, 10}, and so on. Use a mutable map of characters to mutable sets. How can you ensure that the set is sorted?
// 2. Repeat the preceding exercise, using an immutable map of characters to lists.
// 3. Write a function that removes all zeroes from a linked list of integers.
// 4. Write a function that receives a collection of strings and a map from strings to integers. Return a collection of integers that are values of the map corresponding to one of the strings in the collection. For example, given Array("Tom", "Fred", "Harry") and Map("Tom" -> 3, "Dick" -> 4, "Harry" -> 5), return Array(3, 5). Hint: Use flatMap to combine the Option values returned by get.
// 5. Implement a function that works just like mkString, using reduceLeft.
// 6. Given a list of integers lst, what is (lst :\\ List[Int]())(_ :: _)? (List[Int]() /: lst)(_ :+ _)? How
// can you modify one of them to reverse the list?
// 7. In Section 13.11, “Zipping,” on page 171, the expression (prices zip quantities) map { p => p._1 * p._2 } is a bit inelegant. We can’t do (prices zip quantities) map { _ * _ } because _ * _ is a function with two arguments, and we need a function with one argument that is a tuple. The tupled method of the Function2 object changes a function with two arguments to one that takes a tuple. Apply tupled to the multiplication function so you can map it over the list of pairs.
// 8. Write a function that turns an array of Double values into a two-dimensional array. Pass the number of columns as a parameter. For example, with Array(1, 2, 3, 4, 5, 6) and three columns, return Array(Array(1, 2, 3), Array(4, 5, 6)). Use the grouped method.
// 9. Harry Hacker writes a program that accepts a sequence of file names on the command line. For each, he starts a new thread that reads the file and updates a letter frequency map, declared as
// val frequencies = new scala.collection.mutable.HashMap[Char, Int] with
// scala.collection.mutable.SynchronizedMap[Char, Int]
// When reading a letter c, he calls
//
// frequencies(c) = frequencies.getOrElse(c, 0) + 1
// Why won’t this work? Will it work if he used instead
//
// import scala.collection.JavaConversions.asScalaConcurrentMap
// val frequencies: scala.collection.mutable.ConcurrentMap[Char, Int] =
// new java.util.concurrent.ConcurrentHashMap[Char, Int]
// 10. Harry Hacker reads a file into a string and wants to use a parallel collection to update the letter frequencies concurrently on portions of the string. He uses the following code:
// 
// 
// val frequencies = new scala.collection.mutable.HashMap[Char, Int]
// for (c <- str.par) frequencies(c) = frequencies.getOrElse(c, 0) + 1
// Why is this a terrible idea? How can he really parallelize the computation? (Hint: Use aggregate.)
}
| aguestuser/hackerschool | scala_for_the_impatient/src/main/scala/exercises/Ch13_Collections.scala | Scala | gpl-3.0 | 3,200 |
package c2.w3
object loops {
/**
* WHILE
* A while-true loop
* while (condition) {command}
*
* @param condition the condition
* @param command a command
*/
def WHILE(condition: => Boolean)(command: => Unit): Unit = {
if (condition) {
command
WHILE(condition)(command)
}
else ()
}
/**
* REPEAT
* A repeat-while-true loop
* {command} while (condition)
*
* @param command the condition
* @param condition a command
*/
def REPEAT(command: => Unit)(condition: => Boolean): Unit = {
command
if (condition) ()
else REPEAT(command)(condition)
}
/**
* DO
* A do-while-true loop
* do
*
* @param command a command
*/
class DO(command: => Unit) {
def WHILE(condition: => Boolean): Unit = {
command
if (condition) ()
else DO {
command
} WHILE condition
}
}
object DO {
def apply(command: => Unit) = new DO(command)
}
} | lwo/lwo.github.io | src/main/scala/c2/w3/loops.scala | Scala | gpl-3.0 | 997 |
/*
* This file is part of the ToolXiT project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolxit
package mouth
import util._
/** A bunch of parsers that transform and expand the TeX tokens. The results
* are primitive TeX commands (its mouth as explained in the TeX book).
* The parsers must perform macro expansion when needed.
*
* @author Lucas Satabin
*/
abstract class TeXMouth extends Parsers[Token]
with NumberParsers
with DimenParsers
with FontParsers
with TeXDefinitionParsers
with IfParsers
with TeXUtils {
type State = TeXState
case class TeXState(
// the rest of the input
stream: Stream[Token],
// the current position
pos: Pos,
// the current TeX environment with all local and global definitions
env: TeXEnvironment,
// when parsing macro parameter text, what is the current parameter number
currentParam: Int = 0,
// the current group nesting level
currentNesting: Int = 0,
// when parsing shall we expand the control sequences?
expansion: Boolean = true,
// when set to true, the next \\else part encountered will be skipped
// until the matching \\fi
skipElse: Boolean = false,
// when the current input is included by another one, refer to the parent input
including: Option[State] = None,
// when \\endinput was seen, the next EOL or EOI restores the including input
endinput: Boolean = false) extends Input
protected def makeState(old: State, stream: Stream[Token], pos: Pos): State =
old.copy(stream = stream, pos = pos)
/** Resolves the given name for \\input command */
protected def resolve(name: String): Option[Stream[Token]]
/** Parser that parses a command, performing all needed expansions */
lazy val command: Parser[Command] =
// TODO implement
fail("not implemented yet")
/** Parser that parses and expands the next token */
lazy val expanded: Parser[Token] =
// rules for expansion are in the TeX book, starting at page 212
expandedMacro <||>
expandedInput <||>
expandedNumber <||>
expandedRomanNumeral <||>
expandedString <||>
expandedJobname <||>
expandedFontname <||>
expandedMeaning <||>
expandedCsname <||>
expandedExpandafter <||>
expandedNoexpand <||>
expandEndinput <||>
any
lazy val expandedMacro: Parser[Token] =
for {
// if this is a control sequence...
ControlSequenceToken(name, _) <- any
// ... that is a macro, ...
Some(TeXMacro(_, params, repl, long)) <- fromEnv(name)
// ... parse the parameters (which are not expanded)...
() <- updateState(st => st.copy(expansion = false))
args <- paramParser(long, name, params)
// restore expansion
() <- updateState(st => st.copy(expansion = true))
// ... and substitute them in replacement text
replaced = substituteParameters(repl, args)
// finally, replace by replacement text...
() <- updateState { st =>
val newStream = flattened(replaced).toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedNumber: Parser[Token] =
for {
// if this is the \\number control sequence...
ControlSequenceToken("number", false) <- any
// ... read the following (expanded) number...
num <- number
// replace by the decimal representation of the number
() <- updateState { st =>
val decimal = toTokens(num)
val newStream = decimal.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedRomanNumeral: Parser[Token] =
for {
// if this is the \\romannumeral control sequence...
ControlSequenceToken("romannumeral", false) <- any
// ... read the following (expanded) number...
num <- number
// replace by the roman representation of the number
() <- updateState { st =>
val roman = toRoman(num)
val newStream = roman.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedString: Parser[Token] =
for {
// if this is the \\string control sequence...
ControlSequenceToken("string", false) <- any
// read the following (non expanded) token
tok <- any
// replace by the string representation of this token
() <- updateState { st =>
val toks = toTokens(st.env.toString(tok))
val newStream = toks.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedJobname: Parser[Token] =
for {
// if this is the \\jobname control sequence...
ControlSequenceToken("jobname", false) <- any
// ... simply print the control sequence corresponding to environment's job name...
() <- updateState { st =>
val toks = toTokens(st.env.jobname)
val newStream = toks.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedFontname: Parser[Token] =
for {
// if this is the \\fontname control sequence...
ControlSequenceToken("fontname", false) <- any
// parse the subsequent font...
f <- font
// ..., expand its name to a token list...
() <- updateState { st =>
val toks = toTokens(f.name + " at size " + f.atSize)
val newStream = toks.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedMeaning: Parser[Token] =
for {
// if this is the \\meaning control sequence...
ControlSequenceToken("meaning", false) <- any
// ... get the next unexpanded token...
tok <- any
// ... and expand to its meaning...
() <- updateState { st =>
val toks = toTokens(st.env.meaning(tok))
val newStream = toks.toStream ++ st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedCsname: Parser[Token] =
for {
// if this is \\csname ...
ControlSequenceToken("csname", false) <- any
// ... expand tokens until \\endcsname...
tokens <- until(expanded, controlSequence("endcsname"))
ControlSequenceToken("endcsname", false) <- any
// ... put tokens on top of the stream...
() <- updateState { st =>
val name = st.env.toString(tokens)
// the corresponding control sequence
val cs = st.env.css(name) match {
case Some(_) =>
ControlSequenceToken(name)
case None =>
ControlSequenceToken("relax")
}
val newStream = cs #:: st.stream
st.copy(stream = newStream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedExpandafter: Parser[Token] =
for {
// if this is \\expandafter...
ControlSequenceToken("expandafter", false) <- any
// ... read the next unexpanded token...
next <- any
// ... expand the next token...
after <- expanded
// put the unexpanded token in front...
() <- updateState { st =>
st.copy(stream = next #:: after #:: st.stream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedNoexpand: Parser[Token] =
for {
// if this is \\noexpand...
ControlSequenceToken("noexpand", false) <- any
// ... get the next unexpanded token
next <- any
// update it in the input stream
() <- updateState { st =>
// if this should be expanded, treat the token as \\relax
val tok = if(st.env.expandable(next))
ControlSequenceToken("relax")
else
next
st.copy(stream = tok #:: st.stream)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandedInput: Parser[Token] =
for {
// if this is \\input...
ControlSequenceToken("input", false) <- any
// read until next white space, this is the filename
name <- until(expanded, whitespace)
// replace the input by the new resolved stream if it can be resolved
st <- getState
resolved = resolve(st.env.toString(name))
if resolved.isDefined
() <- setState {
val input = resolved.get
st.copy(
// append \\endinput at the end to cause the parser to properly close it
// when reaching the end. it does not matter if there were previous occurrences
// of \\endinput in the included stream, this one will simply be ignored in this case.
// the implementation of `++` on stream ensures that the `input`
// stream is not completely evaluated here
stream = input ++ Stream(ControlSequenceToken("endinput")),
pos = TokenPosition(None, 0, 1, 1),
including = Some(st)
)
}
// ... and retry
tok <- expanded
} yield tok
lazy val expandEndinput: Parser[Token] =
for {
// if this is \\endinput
ControlSequenceToken("endinput", false) <- any
// input ends on next end of line character (or at EOI)
() <- updateState { st =>
st.copy(endinput = true)
}
// ... and retry
tok <- expanded
} yield tok
def fromEnv(name: String): Parser[Option[ControlSequence]] =
for {
st <- getState
} yield st.env.css(name)
/** Parser that parses the next expanded token if the expansion process is active, otherwise returns the next raw token */
lazy val next: Parser[Token] = {
val inner = attempt(for {
st <- getState
if st.expansion
t <- expanded
} yield t) <||>
any
for {
// end of line read
CharacterToken(_, Category.END_OF_LINE) <- inner
// and endinput flag set
st <- getState
if st.endinput
// restore including input if any or set empty stream
() <- setState {
st.including match {
case Some(state) => state.copy(env = st.env)
case None => makeState(st, Stream.empty, TokenPosition(None, 0, 1, 1))
}
}
// retry
tok <- inner
} yield tok
}
/** Parser that parses a single token, which can be a simple next token or a group token
* The groups must be correctly nested */
lazy val single: Parser[Token] =
(for {
open <- beginningOfGroup
// enter new group in environment
() <- updateState(st => st.copy(env = st.env.enterGroup))
tokens <- until(single, endOfGroup)
close <- endOfGroup
// leave group
() <- updateState(st => st.copy(env = st.env.leaveGroup))
} yield GroupToken(open, tokens, close)) <|>
param <|>
next
lazy val eol: Parser[CharacterToken] =
for {
(ch @ CharacterToken(_, Category.END_OF_LINE)) <- any
} yield ch
lazy val whitespace: Parser[CharacterToken] =
for {
(ch @ CharacterToken(value, cat)) <- next
if cat == Category.SPACE || cat == Category.END_OF_LINE
} yield ch
/** Parser that accepts the given character token, with same category code */
def char(c: CharacterToken): Parser[CharacterToken] =
for {
(ch @ CharacterToken(value, cat)) <- next
if value == c.value && cat == c.category
} yield ch
/** Parser that accepts the given character token sequence, with same category codes */
def charSequence(chars: List[CharacterToken]): Parser[Unit] = chars match {
case c :: rest => char(c) >>= (_ => charSequence(rest))
case Nil => success(())
}
/** Parser that accepts a sequence of 0 or more character tokens */
lazy val characters: Parser[List[CharacterToken]] =
many(character)
/** Parser that accepts any character token */
lazy val character: Parser[CharacterToken] =
for {
(c @ CharacterToken(_, _)) <- next
} yield c
/** Parser that accepts any control sequence */
lazy val controlSequence: Parser[ControlSequenceToken] =
for {
(cs @ ControlSequenceToken(_, _)) <- next
} yield cs
/** Parser that accepts any control sequence, without performing any expansion */
lazy val rawControlSequence: Parser[ControlSequenceToken] =
for {
(cs @ ControlSequenceToken(_, _)) <- any
} yield cs
/** Parser that accepts the control sequence with the given name */
def controlSequence(name: String): Parser[ControlSequenceToken] =
for {
cs <- controlSequence
if cs.name == name
} yield cs
lazy val parameter: Parser[CharacterToken] =
for {
(c @ CharacterToken(_, Category.PARAMETER)) <- next
} yield c
/** Parser that accepts any character of category 'beginning of group' */
lazy val beginningOfGroup: Parser[CharacterToken] =
for {
(c @ CharacterToken(_, cat)) <- next
if cat == Category.BEGINNING_OF_GROUP
} yield c
/** Parser that accepts any character of category 'end of group' */
lazy val endOfGroup: Parser[CharacterToken] =
for {
(c @ CharacterToken(_, cat)) <- next
if cat == Category.END_OF_GROUP
} yield c
lazy val param: Parser[ParameterToken] =
for {
_ <- parameter
nb <- digit
} yield ParameterToken(nb)
private lazy val digit =
for {
CharacterToken(c, _) <- next
if c.isDigit
} yield (c - 48)
/** Parser that parses the given parameter tokens for macro invocation */
def paramParser(long: Boolean, name: String, params: List[Parameter]): Parser[List[Token]] = params match {
case Left(ParameterToken(_)) :: rest if long =>
// number does not matter here, we know that it is correct
for {
// parse the next (single) token
p <- single
// then the rest of the parameters
rest <- paramParser(long, name, rest)
} yield p :: rest
case Left(ParameterToken(_)) :: rest =>
// the long modifier was not given, do not allow \\par to occur
(for {
// `\\par` is not allowed
() <- not(controlSequence("par"))
// parse the next (single) token
p <- single
// then the rest of the parameters
rest <- paramParser(long, name, rest)
} yield p :: rest) <|>
fail("Paragraph ended before \\\\$name was complete")
case Right(chars) :: rest =>
def sequence(tokens: List[Token]): Parser[Unit] = tokens match {
case (c @ CharacterToken(_, _)) :: rest =>
for {
_ <- char(c)
() <- sequence(rest)
} yield ()
case ControlSequenceToken(name, _) :: rest =>
for {
_ <- controlSequence(name)
() <- sequence(rest)
} yield ()
case token :: rest =>
fail("Unexpected token " + token)
case Nil =>
success(())
}
for {
// parse these delimiter characters (and ignore them)
_ <- sequence(chars)
// then the rest of the parameters
rest <- paramParser(long, name, rest)
} yield rest
case Nil =>
success(Nil)
}
}
| gnieh/toolxit-tex | src/main/scala/toolxit/mouth/TeXMouth.scala | Scala | apache-2.0 | 15,891 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.agents.integration
import com.bwsw.tstreams.agents.producer._
import com.bwsw.tstreams.env.ConfigurationOptions
import com.bwsw.tstreams.testutils._
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
class ProducerTest extends FlatSpec with Matchers with BeforeAndAfterAll with TestUtils {
val PARTITIONS_COUNT = 10
lazy val srv = TestStorageServer.getNewClean()
lazy val producer = f.getProducer(
name = "test_producer",
partitions = (0 until PARTITIONS_COUNT).toSet)
override def beforeAll(): Unit = {
f.setProperty(ConfigurationOptions.Stream.partitionsCount, PARTITIONS_COUNT)
srv
createNewStream(partitions = PARTITIONS_COUNT)
}
"BasicProducer.newTransaction()" should "return BasicProducerTransaction instance" in {
val transaction = producer.newTransaction(NewProducerTransactionPolicy.ErrorIfOpened)
transaction.checkpoint()
transaction.isInstanceOf[ProducerTransactionImpl] shouldEqual true
}
"BasicProducer.newTransaction(ProducerPolicies.ErrorIfOpened)" should "throw exception if previous transaction was not closed" in {
val transaction1 = producer.newTransaction(NewProducerTransactionPolicy.CheckpointIfOpened, 2)
intercept[IllegalStateException] {
producer.newTransaction(NewProducerTransactionPolicy.ErrorIfOpened, 2)
}
transaction1.checkpoint()
}
"BasicProducer.newTransaction(ProducerPolicies.EnqueueIfOpened)" should "not throw exception if previous transaction was not closed" in {
val transaction1 = producer.newTransaction(NewProducerTransactionPolicy.EnqueueIfOpened, 3)
val transaction2 = producer.newTransaction(NewProducerTransactionPolicy.EnqueueIfOpened, 3)
transaction1.checkpoint()
producer.getOpenedTransactionsForPartition(3).get.size shouldBe 1
transaction2.checkpoint()
producer.getOpenedTransactionsForPartition(3).get.size shouldBe 0
}
"BasicProducer.newTransaction(ProducerPolicies.EnqueueIfOpened) and checkpoint" should "not throw exception if previous transaction was not closed" in {
val transaction1 = producer.newTransaction(NewProducerTransactionPolicy.EnqueueIfOpened, 3)
val transaction2 = producer.newTransaction(NewProducerTransactionPolicy.EnqueueIfOpened, 3)
producer.checkpoint()
transaction1.isClosed shouldBe true
transaction2.isClosed shouldBe true
}
"BasicProducer.newTransaction(CheckpointIfOpen)" should "not throw exception if previous transaction was not closed" in {
producer.newTransaction(NewProducerTransactionPolicy.CheckpointIfOpened, 2)
val transaction2 = producer.newTransaction(NewProducerTransactionPolicy.CheckpointIfOpened, 2)
transaction2.checkpoint()
}
"BasicProducer.getTransaction()" should "return transaction reference if it was created or None" in {
val transaction = producer.newTransaction(NewProducerTransactionPolicy.CheckpointIfOpened, 1)
val transactionRef = producer.getOpenedTransactionsForPartition(1)
transaction.checkpoint()
transactionRef.get.contains(transaction) shouldBe true
}
"BasicProducer.instantTransaction" should "work well for reliable delivery" in {
val data = Seq(new Array[Byte](128))
producer.instantTransaction(0, data, isReliable = true) > 0 shouldBe true
}
"BasicProducer.instantTransaction" should "work well for unreliable delivery" in {
val data = Seq(new Array[Byte](128))
producer.instantTransaction(0, data, isReliable = false) == 0 shouldBe true
}
"BasicProducer.instantTransaction" should "work and doesn't prevent from correct functioning of regular one" in {
val regularTransaction = producer.newTransaction(NewProducerTransactionPolicy.ErrorIfOpened, 0)
regularTransaction.send("test".getBytes)
val data = Seq(new Array[Byte](128))
producer.instantTransaction(0, data, isReliable = false) == 0 shouldBe true
regularTransaction.checkpoint()
}
override def afterAll(): Unit = {
producer.stop()
TestStorageServer.dispose(srv)
onAfterAll()
}
}
| bwsw/t-streams | src/test/scala/com/bwsw/tstreams/agents/integration/ProducerTest.scala | Scala | apache-2.0 | 4,847 |
package helpers
import org.specs2.mock.Mockito
import play.api.http.Status._
import play.api.http.{ ContentTypeOf, HeaderNames }
import play.api.libs.ws._
import securesocial.core.services.HttpService
import scala.concurrent.Future
object MockHttpService {
type Params = Map[String, Seq[String]]
type ParamsWriter = BodyWritable[Params]
type ContentTypeOfParams = ContentTypeOf[Params]
}
class MockHttpService extends Mockito with HttpService {
val request = mock[WSRequest].as(s"Request($hashCode)")
val response = mock[WSResponse].as(s"Response($hashCode")
val urls: collection.mutable.Buffer[String] = new collection.mutable.ArrayBuffer[String]()
response.status returns OK
response.header(HeaderNames.CONTENT_TYPE) returns Some("text/html;charset=UTF-8")
response.body returns ""
request.get() returns Future.successful(response)
def url(url: String): WSRequest = {
urls += url
request
}
def underlying[T]: T = this.asInstanceOf[T]
}
| k4200/securesocial | module-code/test/helpers/MockHttpService.scala | Scala | apache-2.0 | 981 |
package org.skycastle.content.geometry
//mport com.vividsolutions.jts.geom.Geometry
import entity.Entity
/**
* An entity associated with a 2D shape.
*
* @author Hans Haggstrom
*/
// TODO: Could this be made a ModelPart instead? Or some kind of Space / Map Part instead?
class GeoEntity extends Entity {
// var geometry : Geometry = null
} | weimingtom/skycastle | src/main/scala/org/skycastle/content/geometry/GeoEntity.scala | Scala | gpl-2.0 | 350 |
package codechicken.multipart
import scala.collection.mutable.{Map => MMap}
import codechicken.lib.packet.PacketCustom
import codechicken.lib.data.MCDataOutput
import codechicken.lib.data.MCDataInput
import net.minecraft.world.World
import codechicken.lib.vec.BlockCoord
import scala.collection.mutable.ListBuffer
import cpw.mods.fml.common.ModContainer
import cpw.mods.fml.common.Loader
import net.minecraft.block.Block
import java.lang.Iterable
import com.google.common.collect.ArrayListMultimap
import scala.collection.JavaConversions._
import net.minecraft.nbt.NBTTagCompound
/**
* This class handles the registration and internal ID mapping of all multipart classes.
*/
object MultiPartRegistry
{
/**
* Interface to be registered for constructing parts.
* Every instance of every multipart is constructed from an implementor of this.
*/
@Deprecated
trait IPartFactory
{
/**
* Create a new instance of the part with the specified type name identifier
* @param client If the part instance is for the client or the server
*/
def createPart(name: String, client: Boolean): TMultiPart
}
/**
* Will replace IPartFactory in 1.8
*/
trait IPartFactory2
{
/**
* Create a new server instance of the part with the specified type name identifier
* @param nbt The tag compound that will be passed to part.load, can be used to change the class of part returned
*/
def createPart(name: String, nbt: NBTTagCompound): TMultiPart
/**
* Create a new client instance of the part with the specified type name identifier
* @param packet The packet that will be passed to part.readDesc, can be used to change the class of part returned
*/
def createPart(name: String, packet: MCDataInput): TMultiPart
}
/**
* An interface for converting existing blocks/tile entities to multipart versions.
*/
trait IPartConverter
{
/**
* Return true if this converter can handle the specific blockID (may or may not actually convert the block)
*/
def blockTypes: Iterable[Block]
/**
* Return a multipart version of the block at pos in world. Return null if no conversion is possible.
*/
def convert(world: World, pos: BlockCoord): TMultiPart
}
private val typeMap = MMap[String, IPartFactory2]()
private val nameMap = MMap[String, Int]()
private var idMap: Array[(String, IPartFactory2)] = _
private val idWriter = new IDWriter
private val converters = ArrayListMultimap.create[Block, IPartConverter]()
private val containers = MMap[String, ModContainer]()
/**
* The state of the registry. 0 = no parts, 1 = registering, 2 = registered
*/
private var state: Int = 0
/**
* Register a part factory with an array of types it is capable of instantiating. Must be called before postInit
* @deprecated Use IPartFactory2
*/
@Deprecated
def registerParts(partFactory: IPartFactory, types: Array[String]) {
registerParts(partFactory.createPart _, types: _*)
}
/**
* Scala function version of registerParts
* @deprecated Use IPartFactory2
*/
@Deprecated
def registerParts(partFactory: (String, Boolean) => TMultiPart, types: String*) {
registerParts(new IPartFactory2 {
override def createPart(name: String, packet: MCDataInput) = partFactory(name, true)
override def createPart(name: String, nbt: NBTTagCompound) = partFactory(name, false)
}, types:_*)
}
/**
* Register a part factory with an array of types it is capable of instantiating. Must be called before postInit
*/
def registerParts(partFactory: IPartFactory2, types: Array[String]) {
registerParts(partFactory, types:_*)
}
/**
* Scala va-args version of registerParts
*/
def registerParts(partFactory: IPartFactory2, types: String*) {
if (loaded)
throw new IllegalStateException("Parts must be registered in the init methods.")
state = 1
val container = Loader.instance.activeModContainer
if (container == null)
throw new IllegalStateException("Parts must be registered during the initialization phase of a mod container")
types.foreach { s =>
if (typeMap.contains(s))
throw new IllegalStateException("Part with id " + s + " is already registered.")
logger.debug("Registered multipart: "+s)
typeMap.put(s, partFactory)
containers.put(s, container)
}
}
/**
* Register a part converter instance
*/
def registerConverter(c: IPartConverter) {
c.blockTypes.foreach(converters.put(_, c))
}
private[multipart] def beforeServerStart() {
idMap = typeMap.toList.sortBy(_._1).toArray
idWriter.setMax(idMap.length)
nameMap.clear()
for (i <- 0 until idMap.length)
nameMap.put(idMap(i)._1, i)
}
private[multipart] def writeIDMap(packet: PacketCustom) {
packet.writeInt(idMap.length)
idMap.foreach(e => packet.writeString(e._1))
}
private[multipart] def readIDMap(packet: PacketCustom): Seq[String] = {
val k = packet.readInt()
idWriter.setMax(k)
idMap = new Array(k)
nameMap.clear()
val missing = ListBuffer[String]()
for (i <- 0 until k) {
val s = packet.readString()
val v = typeMap.get(s)
if (v.isEmpty)
missing += s
else {
idMap(i) = (s, v.get)
nameMap.put(s, i)
}
}
return missing
}
/**
* Return true if any multiparts have been registered
*/
private[multipart] def required = state > 0
/**
* Return true if no more parts can be registered
*/
def loaded = state == 2
private[multipart] def postInit() {
state = 2
}
/**
* Writes the id of part to data
*/
def writePartID(data: MCDataOutput, part: TMultiPart) {
idWriter.write(data, nameMap.get(part.getType).get)
}
/**
* Uses instantiators to creat a new part from the id read from data
*/
def readPart(data: MCDataInput) = {
val e = idMap(idWriter.read(data))
e._2.createPart(e._1, data)
}
/**
* Uses instantiators to creat a new part from the a tag compound
*/
def loadPart(name: String, nbt:NBTTagCompound) = typeMap.get(name) match {
case Some(factory) => factory.createPart(name, nbt)
case None =>
logger.error("Missing mapping for part with ID: " + name)
null
}
/**
* Uses instantiators to create a new part with specified identifier on side
* @deprecated currently calls the nbt/packet version with a null parameter, use readPart or loadPart instead
*/
@Deprecated
def createPart(name: String, client: Boolean) = typeMap.get(name) match {
case Some(factory) =>
if(client) factory.createPart(name, null: MCDataInput)
else factory.createPart(name, null: NBTTagCompound)
case None =>
logger.error("Missing mapping for part with ID: " + name)
null
}
/**
* Calls converters to create a multipart version of the block at pos
*/
def convertBlock(world: World, pos: BlockCoord, block: Block): TMultiPart = {
for (c <- converters.get(block)) {
val ret = c.convert(world, pos)
if (ret != null)
return ret
}
return null
}
def getModContainer(name: String) = containers(name)
}
| kenzierocks/ForgeMultipart | src/codechicken/multipart/MultiPartRegistry.scala | Scala | lgpl-2.1 | 7,837 |
package io.taskr
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import spray.can.Http
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
object Boot extends App {
implicit val system = ActorSystem("taskr")
val service = system.actorOf(Props[TaskServiceActor], "task-service")
implicit val timeout = Timeout(5.seconds)
IO(Http) ? Http.Bind(service, interface = "localhost", port = 8080)
}
| mcross1882/taskr | src/main/scala/io/taskr/Boot.scala | Scala | mit | 453 |
package com.twitter.finagle.util
/**
* ExitGuard prevents the process from exiting normally by use of a
* nondaemon thread whenever there is at least one guarder.
*/
object ExitGuard {
private var pending: Option[(Int, Thread)] = None
private def updateName() {
for ((n, t) <- pending)
t.setName("Finagle ExitGuard count=%d".format(n))
}
/** Prevent the process from exiting normally */
def guard(): Unit = synchronized {
pending = pending match {
case None =>
val t = new Thread {
setDaemon(false)
start()
override def run() {
while (true) {
try Thread.sleep(Long.MaxValue) catch {
case _: InterruptedException => return
}
}
}
}
Some(1, t)
case Some((n, t)) =>
Some((n+1, t))
}
updateName()
}
/** Undo a call to guard */
def unguard(): Unit = synchronized {
pending = pending match {
case None => throw new IllegalStateException("unguard() called too many times")
case Some((1, t)) =>
t.interrupt()
None
case Some((n, t)) =>
Some((n-1, t))
}
updateName()
}
}
| JustinTulloss/finagle | finagle-core/src/main/scala/com/twitter/finagle/util/ExitGuard.scala | Scala | apache-2.0 | 1,215 |
package br.unb.cic.poo.gol.view.commandline
import scala.io.StdIn.{readInt, readLine}
import br.unb.cic.poo.gol.controller.GameController
import br.unb.cic.poo.gol.model.GameEngine
import br.unb.cic.poo.gol.view.GameView
import br.unb.cic.poo.gol.Main
import br.unb.cic.poo.gol.model.ManufactureOfRules
import br.unb.cic.poo.gol.model.HistoryStates
object CommandLineView extends GameView {
private val LINE = "+-----+"
private val DEAD_CELL = "| |"
private val ALIVE_CELL = "| o |"
private val INVALID_OPTION = 0
private val MAKE_CELL_ALIVE = 1
private val NEXT_GENERATION = 2
private val MAKE_RANDOM_CELLS_ALIVE = 3
private val UNDO = 4
private val CHANGE_RULE = 5
private val HALT = 6
def startView {
update
}
/**
* Atualiza o componente view (representado pela classe GameBoard),
* possivelmente como uma resposta a uma atualizacao do jogo.
*/
def update {
printFirstRow
printLine
for(i <- (0 until GameEngine.height)) {
for(j <- (0 until GameEngine.width)) {
print(if (GameEngine.isCellAlive(i, j)) ALIVE_CELL else DEAD_CELL);
}
println(" " + i)
printLine
}
printOptions
}
private def printOptions {
var option = 0
println("\\n\\n")
do{
println("Select one of the options: \\n \\n")
println("[1] Make a cell alive")
println("[2] Next generation")
println("[3] Make random cells alive")
if(HistoryStates.canUndo) {
println("[4] Undo")
println("[5] Change rule")
println("[6] Halt")
} else {
println("[4] Change rule")
println("[5] Halt")
}
print("\\n \\n Option: ");
option = parseOption(readLine)
}while(option == 0)
option match {
case MAKE_CELL_ALIVE => makeCellAlive
case NEXT_GENERATION => nextGeneration
case MAKE_RANDOM_CELLS_ALIVE => randomCellsAlive
case UNDO => undo
case CHANGE_RULE => changeRule
case HALT => halt
}
}
private def changeRule(){
println("Select one of the rules: \\n \\n");
println("[1] Conway");
println("[2] Maze");
println("[3] Day Night");
println("[4] Walled Cities");
println("[5] Cancel");
print("\\n \\n Option: ");
val option = readLine
Main.rule = option match {
case "1" => ManufactureOfRules.getRule(1)
case "2" => ManufactureOfRules.getRule(2)
case "3" => ManufactureOfRules.getRule(3)
case "4" => ManufactureOfRules.getRule(4)
case _ => Main.rule
}
update
printOptions
}
private def makeCellAlive {
var i = 0
var j = 0
do {
print("\\n Inform the row number (0 - " + (GameEngine.height - 1) + "): ")
i = readInt
print("\\n Inform the column number (0 - " + (GameEngine.width - 1) + "): ")
j = readInt
} while(!validPosition(i,j))
GameController.makeCellAlive(i, j)
}
private def nextGeneration = GameController.nextGeneration
private def randomCellsAlive = GameController.randomCellsAlive
private def undo = GameController.undo
private def halt = GameController.halt
private def validPosition(i: Int, j: Int): Boolean = {
println(i);
println(j);
i >= 0 && i < GameEngine.height && j >= 0 && j < GameEngine.width
}
private def parseOption(option: String): Int = {
if(HistoryStates.canUndo){
option match {
case "1" => MAKE_CELL_ALIVE
case "2" => NEXT_GENERATION
case "3" => MAKE_RANDOM_CELLS_ALIVE
case "4" => UNDO
case "5" => CHANGE_RULE
case "6" => HALT
case _ => INVALID_OPTION
}
} else {
option match {
case "1" => MAKE_CELL_ALIVE
case "2" => NEXT_GENERATION
case "3" => MAKE_RANDOM_CELLS_ALIVE
case "4" => CHANGE_RULE
case "5" => HALT
case _ => INVALID_OPTION
}
}
}
/* Imprime uma linha usada como separador das linhas do tabuleiro */
private def printLine() {
for(j <- (0 until GameEngine.width)) {
print(LINE)
}
println()
}
/*
* Imprime os identificadores das colunas na primeira linha do tabuleiro
*/
private def printFirstRow {
println("\\n \\n");
for(j <- (0 until GameEngine.width)) {
print(" " + j + " ")
}
println()
}
} | brenoxp/GameOfLife-SPartial | src/br/unb/cic/poo/gol/view/commandline/CommandLineView.scala | Scala | gpl-3.0 | 4,243 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.mutable.StringBuilder
import scala.language.implicitConversions
import scala.math.{Numeric, Ordering}
import scala.reflect.ClassTag
/**
* A template trait for collections which can be traversed either once only
* or one or more times.
*
* Note: `IterableOnce` does not extend [[IterableOnceOps]]. This is different than the general
* design of the collections library, which uses the following pattern:
* {{{
* trait Seq extends Iterable with SeqOps
* trait SeqOps extends IterableOps
*
* trait IndexedSeq extends Seq with IndexedSeqOps
* trait IndexedSeqOps extends SeqOps
* }}}
*
* The goal is to provide a minimal interface without any sequential operations. This allows
* third-party extension like Scala parallel collections to integrate at the level of IterableOnce
* without inheriting unwanted implementations.
*
* @define coll collection
*/
trait IterableOnce[+A] extends Any {
/** Iterator can be used only once */
def iterator: Iterator[A]
/** Returns a [[scala.collection.Stepper]] for the elements of this collection.
*
* The Stepper enables creating a Java stream to operate on the collection, see
* [[scala.jdk.StreamConverters]]. For collections holding primitive values, the Stepper can be
* used as an iterator which doesn't box the elements.
*
* The implicit [[scala.collection.StepperShape]] parameter defines the resulting Stepper type according to the
* element type of this collection.
*
* - For collections of `Int`, `Short`, `Byte` or `Char`, an [[scala.collection.IntStepper]] is returned
* - For collections of `Double` or `Float`, a [[scala.collection.DoubleStepper]] is returned
* - For collections of `Long` a [[scala.collection.LongStepper]] is returned
* - For any other element type, an [[scala.collection.AnyStepper]] is returned
*
* Note that this method is overridden in subclasses and the return type is refined to
* `S with EfficientSplit`, for example [[scala.collection.IndexedSeqOps.stepper]]. For Steppers marked with
* [[scala.collection.Stepper.EfficientSplit]], the converters in [[scala.jdk.StreamConverters]]
* allow creating parallel streams, whereas bare Steppers can be converted only to sequential
* streams.
*/
def stepper[S <: Stepper[_]](implicit shape: StepperShape[A, S]): S = {
import convert.impl._
val s = shape.shape match {
case StepperShape.IntShape => new IntIteratorStepper (iterator.asInstanceOf[Iterator[Int]])
case StepperShape.LongShape => new LongIteratorStepper (iterator.asInstanceOf[Iterator[Long]])
case StepperShape.DoubleShape => new DoubleIteratorStepper(iterator.asInstanceOf[Iterator[Double]])
case _ => shape.seqUnbox(new AnyIteratorStepper[A](iterator))
}
s.asInstanceOf[S]
}
/** @return The number of elements in this $coll, if it can be cheaply computed,
* -1 otherwise. Cheaply usually means: Not requiring a collection traversal.
*/
def knownSize: Int = -1
}
final class IterableOnceExtensionMethods[A](private val it: IterableOnce[A]) extends AnyVal {
@deprecated("Use .iterator.withFilter(...) instead", "2.13.0")
def withFilter(f: A => Boolean): Iterator[A] = it.iterator.withFilter(f)
@deprecated("Use .iterator.reduceLeftOption(...) instead", "2.13.0")
def reduceLeftOption(f: (A, A) => A): Option[A] = it.iterator.reduceLeftOption(f)
@deprecated("Use .iterator.min instead", "2.13.0")
def min(implicit ord: Ordering[A]): A = it.iterator.min
@deprecated("Use .iterator.nonEmpty instead", "2.13.0")
def nonEmpty: Boolean = it.iterator.nonEmpty
@deprecated("Use .iterator.max instead", "2.13.0")
def max(implicit ord: Ordering[A]): A = it.iterator.max
@deprecated("Use .iterator.reduceRight(...) instead", "2.13.0")
def reduceRight(f: (A, A) => A): A = it.iterator.reduceRight(f)
@deprecated("Use .iterator.maxBy(...) instead", "2.13.0")
def maxBy[B](f: A => B)(implicit cmp: Ordering[B]): A = it.iterator.maxBy(f)
@deprecated("Use .iterator.reduceLeft(...) instead", "2.13.0")
def reduceLeft(f: (A, A) => A): A = it.iterator.reduceLeft(f)
@deprecated("Use .iterator.sum instead", "2.13.0")
def sum(implicit num: Numeric[A]): A = it.iterator.sum
@deprecated("Use .iterator.product instead", "2.13.0")
def product(implicit num: Numeric[A]): A = it.iterator.product
@deprecated("Use .iterator.count(...) instead", "2.13.0")
def count(f: A => Boolean): Int = it.iterator.count(f)
@deprecated("Use .iterator.reduceOption(...) instead", "2.13.0")
def reduceOption(f: (A, A) => A): Option[A] = it.iterator.reduceOption(f)
@deprecated("Use .iterator.minBy(...) instead", "2.13.0")
def minBy[B](f: A => B)(implicit cmp: Ordering[B]): A = it.iterator.minBy(f)
@deprecated("Use .iterator.size instead", "2.13.0")
def size: Int = it.iterator.size
@deprecated("Use .iterator.forall(...) instead", "2.13.0")
def forall(f: A => Boolean): Boolean = it.iterator.forall(f)
@deprecated("Use .iterator.collectFirst(...) instead", "2.13.0")
def collectFirst[B](f: PartialFunction[A, B]): Option[B] = it.iterator.collectFirst(f)
@deprecated("Use .iterator.filter(...) instead", "2.13.0")
def filter(f: A => Boolean): Iterator[A] = it.iterator.filter(f)
@deprecated("Use .iterator.exists(...) instead", "2.13.0")
def exists(f: A => Boolean): Boolean = it.iterator.exists(f)
@deprecated("Use .iterator.copyToBuffer(...) instead", "2.13.0")
def copyToBuffer(dest: mutable.Buffer[A]): Unit = it.iterator.copyToBuffer(dest)
@deprecated("Use .iterator.reduce(...) instead", "2.13.0")
def reduce(f: (A, A) => A): A = it.iterator.reduce(f)
@deprecated("Use .iterator.reduceRightOption(...) instead", "2.13.0")
def reduceRightOption(f: (A, A) => A): Option[A] = it.iterator.reduceRightOption(f)
@deprecated("Use .iterator.toIndexedSeq instead", "2.13.0")
def toIndexedSeq: IndexedSeq[A] = it.iterator.toIndexedSeq
@deprecated("Use .iterator.foreach(...) instead", "2.13.0")
@`inline` def foreach[U](f: A => U): Unit = it match {
case it: Iterable[A] => it.foreach(f)
case _ => it.iterator.foreach(f)
}
@deprecated("Use .iterator.to(factory) instead", "2.13.0")
def to[C1](factory: Factory[A, C1]): C1 = factory.fromSpecific(it)
@deprecated("Use .iterator.to(ArrayBuffer) instead", "2.13.0")
def toBuffer[B >: A]: mutable.Buffer[B] = mutable.ArrayBuffer.from(it)
@deprecated("Use .iterator.toArray", "2.13.0")
def toArray[B >: A: ClassTag]: Array[B] = it match {
case it: Iterable[B] => it.toArray[B]
case _ => it.iterator.toArray[B]
}
@deprecated("Use .iterator.to(List) instead", "2.13.0")
def toList: immutable.List[A] = immutable.List.from(it)
@deprecated("Use .iterator.to(Set) instead", "2.13.0")
@`inline` def toSet[B >: A]: immutable.Set[B] = immutable.Set.from(it)
@deprecated("Use .iterator.to(Iterable) instead", "2.13.0")
@`inline` final def toTraversable: Traversable[A] = toIterable
@deprecated("Use .iterator.to(Iterable) instead", "2.13.0")
@`inline` final def toIterable: Iterable[A] = Iterable.from(it)
@deprecated("Use .iterator.to(Seq) instead", "2.13.0")
@`inline` def toSeq: immutable.Seq[A] = immutable.Seq.from(it)
@deprecated("Use .iterator.to(LazyList) instead", "2.13.0")
@`inline` def toStream: immutable.Stream[A] = immutable.Stream.from(it)
@deprecated("Use .iterator.to(Vector) instead", "2.13.0")
@`inline` def toVector: immutable.Vector[A] = immutable.Vector.from(it)
@deprecated("Use .iterator.to(Map) instead", "2.13.0")
def toMap[K, V](implicit ev: A <:< (K, V)): immutable.Map[K, V] =
immutable.Map.from(it.asInstanceOf[IterableOnce[(K, V)]])
@deprecated("Use .iterator instead", "2.13.0")
@`inline` def toIterator: Iterator[A] = it.iterator
@deprecated("Use .iterator.isEmpty instead", "2.13.0")
def isEmpty: Boolean = it match {
case it: Iterable[A] => it.isEmpty
case _ => it.iterator.isEmpty
}
@deprecated("Use .iterator.mkString instead", "2.13.0")
def mkString(start: String, sep: String, end: String): String = it match {
case it: Iterable[A] => it.mkString(start, sep, end)
case _ => it.iterator.mkString(start, sep, end)
}
@deprecated("Use .iterator.mkString instead", "2.13.0")
def mkString(sep: String): String = it match {
case it: Iterable[A] => it.mkString(sep)
case _ => it.iterator.mkString(sep)
}
@deprecated("Use .iterator.mkString instead", "2.13.0")
def mkString: String = it match {
case it: Iterable[A] => it.mkString
case _ => it.iterator.mkString
}
@deprecated("Use .iterator.find instead", "2.13.0")
def find(p: A => Boolean): Option[A] = it.iterator.find(p)
@deprecated("Use .iterator.foldLeft instead", "2.13.0")
@`inline` def foldLeft[B](z: B)(op: (B, A) => B): B = it.iterator.foldLeft(z)(op)
@deprecated("Use .iterator.foldRight instead", "2.13.0")
@`inline` def foldRight[B](z: B)(op: (A, B) => B): B = it.iterator.foldRight(z)(op)
@deprecated("Use .iterator.fold instead", "2.13.0")
def fold[A1 >: A](z: A1)(op: (A1, A1) => A1): A1 = it.iterator.fold(z)(op)
@deprecated("Use .iterator.foldLeft instead", "2.13.0")
@`inline` def /: [B](z: B)(op: (B, A) => B): B = foldLeft[B](z)(op)
@deprecated("Use .iterator.foldRight instead", "2.13.0")
@`inline` def :\\ [B](z: B)(op: (A, B) => B): B = foldRight[B](z)(op)
@deprecated("Use .iterator.map instead or consider requiring an Iterable", "2.13.0")
def map[B](f: A => B): IterableOnce[B] = it match {
case it: Iterable[A] => it.map(f)
case _ => it.iterator.map(f)
}
@deprecated("Use .iterator.flatMap instead or consider requiring an Iterable", "2.13.0")
def flatMap[B](f: A => IterableOnce[B]): IterableOnce[B] = it match {
case it: Iterable[A] => it.flatMap(f)
case _ => it.iterator.flatMap(f)
}
@deprecated("Use .iterator.sameElements instead", "2.13.0")
def sameElements[B >: A](that: IterableOnce[B]): Boolean = it.iterator.sameElements(that)
}
object IterableOnce {
@`inline` implicit def iterableOnceExtensionMethods[A](it: IterableOnce[A]): IterableOnceExtensionMethods[A] =
new IterableOnceExtensionMethods[A](it)
/** Computes the number of elements to copy to an array from a source IterableOnce
*
* @param srcLen the length of the source collection
* @param destLen the length of the destination array
* @param start the index in the destination array at which to start copying elements to
* @param len the requested number of elements to copy (we may only be able to copy less than this)
* @return the number of elements that will be copied to the destination array
*/
@inline private[collection] def elemsToCopyToArray(srcLen: Int, destLen: Int, start: Int, len: Int): Int =
math.max(math.min(math.min(len, srcLen), destLen - start), 0)
/** Calls `copyToArray` on the given collection, regardless of whether or not it is an `Iterable`. */
@inline private[collection] def copyElemsToArray[A, B >: A](elems: IterableOnce[A],
xs: Array[B],
start: Int = 0,
len: Int = Int.MaxValue): Int =
elems match {
case src: Iterable[A] => src.copyToArray[B](xs, start, len)
case src => src.iterator.copyToArray[B](xs, start, len)
}
}
/** This implementation trait can be mixed into an `IterableOnce` to get the basic methods that are shared between
* `Iterator` and `Iterable`. The `IterableOnce` must support multiple calls to `iterator` but may or may not
* return the same `Iterator` every time.
*
* @define orderDependent
*
* Note: might return different results for different runs, unless the underlying collection type is ordered.
* @define orderDependentFold
*
* Note: might return different results for different runs, unless the
* underlying collection type is ordered or the operator is associative
* and commutative.
* @define mayNotTerminateInf
*
* Note: may not terminate for infinite-sized collections.
* @define willNotTerminateInf
*
* Note: will not terminate for infinite-sized collections.
* @define willForceEvaluation
* Note: Even when applied to a view or a lazy collection it will always force the elements.
* @define consumesIterator
* After calling this method, one should discard the iterator it was called
* on. Using it is undefined and subject to change.
* @define undefinedorder
* The order in which operations are performed on elements is unspecified
* and may be nondeterministic.
* @define coll collection
*
*/
trait IterableOnceOps[+A, +CC[_], +C] extends Any { this: IterableOnce[A] =>
/////////////////////////////////////////////////////////////// Abstract methods that must be implemented
/** Produces a $coll containing cumulative results of applying the
* operator going left to right, including the initial value.
*
* $willNotTerminateInf
* $orderDependent
*
* @tparam B the type of the elements in the resulting collection
* @param z the initial value
* @param op the binary operator applied to the intermediate result and the element
* @return collection with intermediate results
*/
def scanLeft[B](z: B)(op: (B, A) => B): CC[B]
/** Selects all elements of this $coll which satisfy a predicate.
*
* @param p the predicate used to test elements.
* @return a new iterator consisting of all elements of this $coll that satisfy the given
* predicate `p`. The order of the elements is preserved.
*/
def filter(p: A => Boolean): C
/** Selects all elements of this $coll which do not satisfy a predicate.
*
* @param pred the predicate used to test elements.
* @return a new $coll consisting of all elements of this $coll that do not satisfy the given
* predicate `pred`. Their order may not be preserved.
*/
def filterNot(pred: A => Boolean): C
/** Selects the first ''n'' elements.
* $orderDependent
* @param n the number of elements to take from this $coll.
* @return a $coll consisting only of the first `n` elements of this $coll,
* or else the whole $coll, if it has less than `n` elements.
* If `n` is negative, returns an empty $coll.
*/
def take(n: Int): C
/** Takes longest prefix of elements that satisfy a predicate.
* $orderDependent
* @param p The predicate used to test elements.
* @return the longest prefix of this $coll whose elements all satisfy
* the predicate `p`.
*/
def takeWhile(p: A => Boolean): C
/** Selects all elements except first ''n'' ones.
* $orderDependent
* @param n the number of elements to drop from this $coll.
* @return a $coll consisting of all elements of this $coll except the first `n` ones, or else the
* empty $coll, if this $coll has less than `n` elements.
* If `n` is negative, don't drop any elements.
*/
def drop(n: Int): C
/** Drops longest prefix of elements that satisfy a predicate.
* $orderDependent
* @param p The predicate used to test elements.
* @return the longest suffix of this $coll whose first element
* does not satisfy the predicate `p`.
*/
def dropWhile(p: A => Boolean): C
/** Selects an interval of elements. The returned $coll is made up
* of all elements `x` which satisfy the invariant:
* {{{
* from <= indexOf(x) < until
* }}}
* $orderDependent
*
* @param from the lowest index to include from this $coll.
* @param until the lowest index to EXCLUDE from this $coll.
* @return a $coll containing the elements greater than or equal to
* index `from` extending up to (but not including) index `until`
* of this $coll.
*/
def slice(from: Int, until: Int): C
/** Builds a new $coll by applying a function to all elements of this $coll.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned $coll.
* @return a new $coll resulting from applying the given function
* `f` to each element of this $coll and collecting the results.
*/
def map[B](f: A => B): CC[B]
/** Builds a new $coll by applying a function to all elements of this $coll
* and using the elements of the resulting collections.
*
* For example:
*
* {{{
* def getWords(lines: Seq[String]): Seq[String] = lines flatMap (line => line split "\\\\W+")
* }}}
*
* The type of the resulting collection is guided by the static type of $coll. This might
* cause unexpected results sometimes. For example:
*
* {{{
* // lettersOf will return a Seq[Char] of likely repeated letters, instead of a Set
* def lettersOf(words: Seq[String]) = words flatMap (word => word.toSet)
*
* // lettersOf will return a Set[Char], not a Seq
* def lettersOf(words: Seq[String]) = words.toSet flatMap ((word: String) => word.toSeq)
*
* // xs will be an Iterable[Int]
* val xs = Map("a" -> List(11,111), "b" -> List(22,222)).flatMap(_._2)
*
* // ys will be a Map[Int, Int]
* val ys = Map("a" -> List(1 -> 11,1 -> 111), "b" -> List(2 -> 22,2 -> 222)).flatMap(_._2)
* }}}
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned collection.
* @return a new $coll resulting from applying the given collection-valued function
* `f` to each element of this $coll and concatenating the results.
*/
def flatMap[B](f: A => IterableOnce[B]): CC[B]
/** Converts this $coll of traversable collections into
* a $coll formed by the elements of these traversable
* collections.
*
* The resulting collection's type will be guided by the
* type of $coll. For example:
*
* {{{
* val xs = List(
* Set(1, 2, 3),
* Set(1, 2, 3)
* ).flatten
* // xs == List(1, 2, 3, 1, 2, 3)
*
* val ys = Set(
* List(1, 2, 3),
* List(3, 2, 1)
* ).flatten
* // ys == Set(1, 2, 3)
* }}}
*
* @tparam B the type of the elements of each traversable collection.
* @param asIterable an implicit conversion which asserts that the element
* type of this $coll is a `GenTraversable`.
* @return a new $coll resulting from concatenating all element ${coll}s.
*/
def flatten[B](implicit asIterable: A => IterableOnce[B]): CC[B]
/** Builds a new $coll by applying a partial function to all elements of this $coll
* on which the function is defined.
*
* @param pf the partial function which filters and maps the $coll.
* @tparam B the element type of the returned $coll.
* @return a new $coll resulting from applying the given partial function
* `pf` to each element on which it is defined and collecting the results.
* The order of the elements is preserved.
*/
def collect[B](pf: PartialFunction[A, B]): CC[B]
/** Zips this $coll with its indices.
*
* @return A new $coll containing pairs consisting of all elements of this $coll paired with their index.
* Indices start at `0`.
* @example
* `List("a", "b", "c").zipWithIndex == List(("a", 0), ("b", 1), ("c", 2))`
*/
def zipWithIndex: CC[(A @uncheckedVariance, Int)]
/** Splits this $coll into a prefix/suffix pair according to a predicate.
*
* Note: `c span p` is equivalent to (but possibly more efficient than)
* `(c takeWhile p, c dropWhile p)`, provided the evaluation of the
* predicate `p` does not cause any side-effects.
* $orderDependent
*
* @param p the test predicate
* @return a pair consisting of the longest prefix of this $coll whose
* elements all satisfy `p`, and the rest of this $coll.
*/
def span(p: A => Boolean): (C, C)
/** Splits this $coll into a prefix/suffix pair at a given position.
*
* Note: `c splitAt n` is equivalent to (but possibly more efficient than)
* `(c take n, c drop n)`.
* $orderDependent
*
* @param n the position at which to split.
* @return a pair of ${coll}s consisting of the first `n`
* elements of this $coll, and the other elements.
*/
def splitAt(n: Int): (C, C) = {
var i = 0
span { _ => if (i < n) { i += 1; true } else false }
}
/** Applies a side-effecting function to each element in this collection.
* Strict collections will apply `f` to their elements immediately, while lazy collections
* like Views and LazyLists will only apply `f` on each element if and when that element
* is evaluated, and each time that element is evaluated.
*
* @param f a function to apply to each element in this $coll
* @tparam U the return type of f
* @return The same logical collection as this
*/
def tapEach[U](f: A => U): C
/////////////////////////////////////////////////////////////// Concrete methods based on iterator
/** Tests whether this $coll is known to have a finite size.
* All strict collections are known to have finite size. For a non-strict
* collection such as `Stream`, the predicate returns `'''true'''` if all
* elements have been computed. It returns `'''false'''` if the stream is
* not yet evaluated to the end. Non-empty Iterators usually return
* `'''false'''` even if they were created from a collection with a known
* finite size.
*
* Note: many collection methods will not work on collections of infinite sizes.
* The typical failure mode is an infinite loop. These methods always attempt a
* traversal without checking first that `hasDefiniteSize` returns `'''true'''`.
* However, checking `hasDefiniteSize` can provide an assurance that size is
* well-defined and non-termination is not a concern.
*
* @deprecated This method is deprecated in 2.13 because it does not provide any
* actionable information. As noted above, even the collection library itself
* does not use it. When there is no guarantee that a collection is finite, it
* is generally best to attempt a computation anyway and document that it will
* not terminate for infinite collections rather than backing out because this
* would prevent performing the computation on collections that are in fact
* finite even though `hasDefiniteSize` returns `false`.
*
* @see method `knownSize` for a more useful alternative
*
* @return `'''true'''` if this collection is known to have finite size,
* `'''false'''` otherwise.
*/
@deprecated("Check .knownSize instead of .hasDefiniteSize for more actionable information (see scaladoc for details)", "2.13.0")
def hasDefiniteSize: Boolean = true
/** Tests whether this $coll can be repeatedly traversed. Always
* true for Iterables and false for Iterators unless overridden.
*
* @return `true` if it is repeatedly traversable, `false` otherwise.
*/
def isTraversableAgain: Boolean = false
/** Apply `f` to each element for its side effects
* Note: [U] parameter needed to help scalac's type inference.
*/
def foreach[U](f: A => U): Unit = {
val it = iterator
while(it.hasNext) f(it.next())
}
/** Tests whether a predicate holds for all elements of this $coll.
*
* $mayNotTerminateInf
*
* @param p the predicate used to test elements.
* @return `true` if this $coll is empty or the given predicate `p`
* holds for all elements of this $coll, otherwise `false`.
*/
def forall(p: A => Boolean): Boolean = {
var res = true
val it = iterator
while (res && it.hasNext) res = p(it.next())
res
}
/** Tests whether a predicate holds for at least one element of this $coll.
*
* $mayNotTerminateInf
*
* @param p the predicate used to test elements.
* @return `true` if the given predicate `p` is satisfied by at least one element of this $coll, otherwise `false`
*/
def exists(p: A => Boolean): Boolean = {
var res = false
val it = iterator
while (!res && it.hasNext) res = p(it.next())
res
}
/** Counts the number of elements in the $coll which satisfy a predicate.
*
* $willNotTerminateInf
*
* @param p the predicate used to test elements.
* @return the number of elements satisfying the predicate `p`.
*/
def count(p: A => Boolean): Int = {
var res = 0
val it = iterator
while (it.hasNext) if (p(it.next())) res += 1
res
}
/** Finds the first element of the $coll satisfying a predicate, if any.
*
* $mayNotTerminateInf
* $orderDependent
*
* @param p the predicate used to test elements.
* @return an option value containing the first element in the $coll
* that satisfies `p`, or `None` if none exists.
*/
def find(p: A => Boolean): Option[A] = {
val it = iterator
while (it.hasNext) {
val a = it.next()
if (p(a)) return Some(a)
}
None
}
/** Applies a binary operator to a start value and all elements of this $coll,
* going left to right.
*
* $willNotTerminateInf
* $orderDependentFold
*
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this $coll,
* going left to right with the start value `z` on the left:
* `op(...op(z, x,,1,,), x,,2,,, ..., x,,n,,)` where `x,,1,,, ..., x,,n,,`
* are the elements of this $coll.
* Returns `z` if this $coll is empty.
*/
def foldLeft[B](z: B)(op: (B, A) => B): B = {
var result = z
val it = iterator
while (it.hasNext) {
result = op(result, it.next())
}
result
}
/** Applies a binary operator to all elements of this $coll and a start value,
* going right to left.
*
* $willNotTerminateInf
* $orderDependentFold
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this $coll,
* going right to left with the start value `z` on the right:
* `op(x,,1,,, op(x,,2,,, ... op(x,,n,,, z)...))` where `x,,1,,, ..., x,,n,,`
* are the elements of this $coll.
* Returns `z` if this $coll is empty.
*/
def foldRight[B](z: B)(op: (A, B) => B): B = reversed.foldLeft(z)((b, a) => op(a, b))
@deprecated("Use foldLeft instead of /:", "2.13.0")
@`inline` final def /: [B](z: B)(op: (B, A) => B): B = foldLeft[B](z)(op)
@deprecated("Use foldRight instead of :\\\\", "2.13.0")
@`inline` final def :\\ [B](z: B)(op: (A, B) => B): B = foldRight[B](z)(op)
/** Folds the elements of this $coll using the specified associative binary operator.
* The default implementation in `IterableOnce` is equivalent to `foldLeft` but may be
* overridden for more efficient traversal orders.
*
* $undefinedorder
* $willNotTerminateInf
*
* @tparam A1 a type parameter for the binary operator, a supertype of `A`.
* @param z a neutral element for the fold operation; may be added to the result
* an arbitrary number of times, and must not change the result (e.g., `Nil` for list concatenation,
* 0 for addition, or 1 for multiplication).
* @param op a binary operator that must be associative.
* @return the result of applying the fold operator `op` between all the elements and `z`, or `z` if this $coll is empty.
*/
def fold[A1 >: A](z: A1)(op: (A1, A1) => A1): A1 = foldLeft(z)(op)
/** Reduces the elements of this $coll using the specified associative binary operator.
*
* $undefinedorder
*
* @tparam B A type parameter for the binary operator, a supertype of `A`.
* @param op A binary operator that must be associative.
* @return The result of applying reduce operator `op` between all the elements if the $coll is nonempty.
* @throws UnsupportedOperationException if this $coll is empty.
*/
def reduce[B >: A](op: (B, B) => B): B = reduceLeft(op)
/** Reduces the elements of this $coll, if any, using the specified
* associative binary operator.
*
* $undefinedorder
*
* @tparam B A type parameter for the binary operator, a supertype of `A`.
* @param op A binary operator that must be associative.
* @return An option value containing result of applying reduce operator `op` between all
* the elements if the collection is nonempty, and `None` otherwise.
*/
def reduceOption[B >: A](op: (B, B) => B): Option[B] = reduceLeftOption(op)
/** Applies a binary operator to all elements of this $coll,
* going left to right.
* $willNotTerminateInf
* $orderDependentFold
*
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this $coll,
* going left to right:
* `op( op( ... op(x,,1,,, x,,2,,) ..., x,,n-1,,), x,,n,,)` where `x,,1,,, ..., x,,n,,`
* are the elements of this $coll.
* @throws UnsupportedOperationException if this $coll is empty. */
def reduceLeft[B >: A](op: (B, A) => B): B = {
val it = iterator
if (it.isEmpty)
throw new UnsupportedOperationException("empty.reduceLeft")
var first = true
var acc: B = null.asInstanceOf[B]
while (it.hasNext) {
val x = it.next()
if (first) {
acc = x
first = false
}
else acc = op(acc, x)
}
acc
}
/** Applies a binary operator to all elements of this $coll, going right to left.
* $willNotTerminateInf
* $orderDependentFold
*
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this $coll,
* going right to left:
* `op(x,,1,,, op(x,,2,,, ..., op(x,,n-1,,, x,,n,,)...))` where `x,,1,,, ..., x,,n,,`
* are the elements of this $coll.
* @throws UnsupportedOperationException if this $coll is empty.
*/
def reduceRight[B >: A](op: (A, B) => B): B = {
val it = iterator
if (it.isEmpty)
throw new UnsupportedOperationException("empty.reduceRight")
reversed.reduceLeft[B]((x, y) => op(y, x))
}
/** Optionally applies a binary operator to all elements of this $coll, going left to right.
* $willNotTerminateInf
* $orderDependentFold
*
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return an option value containing the result of `reduceLeft(op)` if this $coll is nonempty,
* `None` otherwise.
*/
def reduceLeftOption[B >: A](op: (B, A) => B): Option[B] = if (isEmpty) None else Some(reduceLeft(op))
/** Optionally applies a binary operator to all elements of this $coll, going
* right to left.
* $willNotTerminateInf
* $orderDependentFold
*
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return an option value containing the result of `reduceRight(op)` if this $coll is nonempty,
* `None` otherwise.
*/
def reduceRightOption[B >: A](op: (A, B) => B): Option[B] = if (isEmpty) None else Some(reduceRight(op))
/** Tests whether the $coll is empty.
*
* Note: Implementations in subclasses that are not repeatedly traversable must take
* care not to consume any elements when `isEmpty` is called.
*
* @return `true` if the $coll contains no elements, `false` otherwise.
*/
def isEmpty: Boolean = !iterator.hasNext
/** Tests whether the $coll is not empty.
*
* @return `true` if the $coll contains at least one element, `false` otherwise.
*/
@deprecatedOverriding("nonEmpty is defined as !isEmpty; override isEmpty instead", "2.13.0")
def nonEmpty: Boolean = !isEmpty
/** The size of this $coll.
*
* $willNotTerminateInf
*
* @return the number of elements in this $coll.
*/
def size: Int = {
if (knownSize >= 0) knownSize
else {
val it = iterator
var len = 0
while (it.hasNext) { len += 1; it.next() }
len
}
}
@deprecated("Use `dest ++= coll` instead", "2.13.0")
@inline final def copyToBuffer[B >: A](dest: mutable.Buffer[B]): Unit = dest ++= this
/** Copy elements to an array, returning the number of elements written.
*
* Fills the given array `xs` starting at index `start` with values of this $coll.
*
* Copying will stop once either all the elements of this $coll have been copied,
* or the end of the array is reached.
*
* @param xs the array to fill.
* @tparam B the type of the elements of the array.
* @return the number of elements written to the array
*
* @note Reuse: $consumesIterator
*/
@deprecatedOverriding("This should always forward to the 3-arg version of this method", since = "2.13.4")
def copyToArray[B >: A](xs: Array[B]): Int = copyToArray(xs, 0, Int.MaxValue)
/** Copy elements to an array, returning the number of elements written.
*
* Fills the given array `xs` starting at index `start` with values of this $coll.
*
* Copying will stop once either all the elements of this $coll have been copied,
* or the end of the array is reached.
*
* @param xs the array to fill.
* @param start the starting index of xs.
* @tparam B the type of the elements of the array.
* @return the number of elements written to the array
*
* @note Reuse: $consumesIterator
*/
@deprecatedOverriding("This should always forward to the 3-arg version of this method", since = "2.13.4")
def copyToArray[B >: A](xs: Array[B], start: Int): Int = copyToArray(xs, start, Int.MaxValue)
/** Copy elements to an array, returning the number of elements written.
*
* Fills the given array `xs` starting at index `start` with at most `len` elements of this $coll.
*
* Copying will stop once either all the elements of this $coll have been copied,
* or the end of the array is reached, or `len` elements have been copied.
*
* @param xs the array to fill.
* @param start the starting index of xs.
* @param len the maximal number of elements to copy.
* @tparam B the type of the elements of the array.
* @return the number of elements written to the array
*
* @note Reuse: $consumesIterator
*/
def copyToArray[B >: A](xs: Array[B], start: Int, len: Int): Int = {
val it = iterator
var i = start
val end = start + math.min(len, xs.length - start)
while (i < end && it.hasNext) {
xs(i) = it.next()
i += 1
}
i - start
}
/** Sums up the elements of this collection.
*
* $willNotTerminateInf
*
* @param num an implicit parameter defining a set of numeric operations
* which includes the `+` operator to be used in forming the sum.
* @tparam B the result type of the `+` operator.
* @return the sum of all elements of this $coll with respect to the `+` operator in `num`.
*/
def sum[B >: A](implicit num: Numeric[B]): B = if (isEmpty) num.zero else reduce(num.plus)
/** Multiplies up the elements of this collection.
*
* $willNotTerminateInf
*
* @param num an implicit parameter defining a set of numeric operations
* which includes the `*` operator to be used in forming the product.
* @tparam B the result type of the `*` operator.
* @return the product of all elements of this $coll with respect to the `*` operator in `num`.
*/
def product[B >: A](implicit num: Numeric[B]): B = if (isEmpty) num.one else reduce(num.times)
/** Finds the smallest element.
*
* $willNotTerminateInf
*
* @param ord An ordering to be used for comparing elements.
* @tparam B The type over which the ordering is defined.
* @throws UnsupportedOperationException if this $coll is empty.
* @return the smallest element of this $coll with respect to the ordering `ord`.
*
*/
def min[B >: A](implicit ord: Ordering[B]): A = {
if (isEmpty)
throw new UnsupportedOperationException("empty.min")
reduceLeft(ord.min)
}
/** Finds the smallest element.
*
* $willNotTerminateInf
*
* @param ord An ordering to be used for comparing elements.
* @tparam B The type over which the ordering is defined.
* @return an option value containing the smallest element of this $coll
* with respect to the ordering `ord`.
*/
def minOption[B >: A](implicit ord: Ordering[B]): Option[A] = {
if (isEmpty)
None
else
Some(min(ord))
}
/** Finds the largest element.
*
* $willNotTerminateInf
*
* @param ord An ordering to be used for comparing elements.
* @tparam B The type over which the ordering is defined.
* @throws UnsupportedOperationException if this $coll is empty.
* @return the largest element of this $coll with respect to the ordering `ord`.
*/
def max[B >: A](implicit ord: Ordering[B]): A = {
if (isEmpty)
throw new UnsupportedOperationException("empty.max")
reduceLeft(ord.max)
}
/** Finds the largest element.
*
* $willNotTerminateInf
*
* @param ord An ordering to be used for comparing elements.
* @tparam B The type over which the ordering is defined.
* @return an option value containing the largest element of this $coll with
* respect to the ordering `ord`.
*/
def maxOption[B >: A](implicit ord: Ordering[B]): Option[A] = {
if (isEmpty)
None
else
Some(max(ord))
}
/** Finds the first element which yields the largest value measured by function f.
*
* $willNotTerminateInf
*
* @param cmp An ordering to be used for comparing elements.
* @tparam B The result type of the function f.
* @param f The measuring function.
* @throws UnsupportedOperationException if this $coll is empty.
* @return the first element of this $coll with the largest value measured by function f
* with respect to the ordering `cmp`.
*/
def maxBy[B](f: A => B)(implicit cmp: Ordering[B]): A = {
if (isEmpty)
throw new UnsupportedOperationException("empty.maxBy")
var maxF: B = null.asInstanceOf[B]
var maxElem: A = null.asInstanceOf[A]
var first = true
for (elem <- this) {
val fx = f(elem)
if (first || cmp.gt(fx, maxF)) {
maxElem = elem
maxF = fx
first = false
}
}
maxElem
}
/** Finds the first element which yields the largest value measured by function f.
*
* $willNotTerminateInf
*
* @param cmp An ordering to be used for comparing elements.
* @tparam B The result type of the function f.
* @param f The measuring function.
* @return an option value containing the first element of this $coll with the
* largest value measured by function f with respect to the ordering `cmp`.
*/
def maxByOption[B](f: A => B)(implicit cmp: Ordering[B]): Option[A] = {
if (isEmpty)
None
else
Some(maxBy(f)(cmp))
}
/** Finds the first element which yields the smallest value measured by function f.
*
* $willNotTerminateInf
*
* @param cmp An ordering to be used for comparing elements.
* @tparam B The result type of the function f.
* @param f The measuring function.
* @throws UnsupportedOperationException if this $coll is empty.
* @return the first element of this $coll with the smallest value measured by function f
* with respect to the ordering `cmp`.
*/
def minBy[B](f: A => B)(implicit cmp: Ordering[B]): A = {
if (isEmpty)
throw new UnsupportedOperationException("empty.minBy")
var minF: B = null.asInstanceOf[B]
var minElem: A = null.asInstanceOf[A]
var first = true
for (elem <- this) {
val fx = f(elem)
if (first || cmp.lt(fx, minF)) {
minElem = elem
minF = fx
first = false
}
}
minElem
}
/** Finds the first element which yields the smallest value measured by function f.
*
* $willNotTerminateInf
*
* @param cmp An ordering to be used for comparing elements.
* @tparam B The result type of the function f.
* @param f The measuring function.
* @return an option value containing the first element of this $coll
* with the smallest value measured by function f
* with respect to the ordering `cmp`.
*/
def minByOption[B](f: A => B)(implicit cmp: Ordering[B]): Option[A] = {
if (isEmpty)
None
else
Some(minBy(f)(cmp))
}
/** Finds the first element of the $coll for which the given partial
* function is defined, and applies the partial function to it.
*
* $mayNotTerminateInf
* $orderDependent
*
* @param pf the partial function
* @return an option value containing pf applied to the first
* value for which it is defined, or `None` if none exists.
* @example `Seq("a", 1, 5L).collectFirst({ case x: Int => x*10 }) = Some(10)`
*/
def collectFirst[B](pf: PartialFunction[A, B]): Option[B] = {
// Presumably the fastest way to get in and out of a partial function is for a sentinel function to return itself
// (Tested to be lower-overhead than runWith. Would be better yet to not need to (formally) allocate it)
val sentinel: scala.Function1[A, Any] = new scala.runtime.AbstractFunction1[A, Any] {
def apply(a: A) = this
}
val it = iterator
while (it.hasNext) {
val x = pf.applyOrElse(it.next(), sentinel)
if (x.asInstanceOf[AnyRef] ne sentinel) return Some(x.asInstanceOf[B])
}
None
}
@deprecated("`aggregate` is not relevant for sequential collections. Use `foldLeft(z)(seqop)` instead.", "2.13.0")
def aggregate[B](z: => B)(seqop: (B, A) => B, combop: (B, B) => B): B = foldLeft(z)(seqop)
/** Tests whether every element of this collection's iterator relates to the
* corresponding element of another collection by satisfying a test predicate.
*
* $willNotTerminateInf
*
* @param that the other collection
* @param p the test predicate, which relates elements from both collections
* @tparam B the type of the elements of `that`
* @return `true` if both collections have the same length and
* `p(x, y)` is `true` for all corresponding elements `x` of this iterator
* and `y` of `that`, otherwise `false`
*/
def corresponds[B](that: IterableOnce[B])(p: (A, B) => Boolean): Boolean = {
val a = iterator
val b = that.iterator
while (a.hasNext && b.hasNext) {
if (!p(a.next(), b.next())) return false
}
a.hasNext == b.hasNext
}
/** Displays all elements of this $coll in a string using start, end, and
* separator strings.
*
* Delegates to addString, which can be overridden.
*
* @param start the starting string.
* @param sep the separator string.
* @param end the ending string.
* @return a string representation of this $coll. The resulting string
* begins with the string `start` and ends with the string
* `end`. Inside, the string representations (w.r.t. the method
* `toString`) of all elements of this $coll are separated by
* the string `sep`.
*
* @example `List(1, 2, 3).mkString("(", "; ", ")") = "(1; 2; 3)"`
*/
final def mkString(start: String, sep: String, end: String): String =
if (isEmpty) start + end
else addString(new StringBuilder(), start, sep, end).result()
/** Displays all elements of this $coll in a string using a separator string.
*
* Delegates to addString, which can be overridden.
*
* @param sep the separator string.
* @return a string representation of this $coll. In the resulting string
* the string representations (w.r.t. the method `toString`)
* of all elements of this $coll are separated by the string `sep`.
*
* @example `List(1, 2, 3).mkString("|") = "1|2|3"`
*/
@inline final def mkString(sep: String): String = mkString("", sep, "")
/** Displays all elements of this $coll in a string.
*
* Delegates to addString, which can be overridden.
*
* @return a string representation of this $coll. In the resulting string
* the string representations (w.r.t. the method `toString`)
* of all elements of this $coll follow each other without any
* separator string.
*/
@inline final def mkString: String = mkString("")
/** Appends all elements of this $coll to a string builder using start, end, and separator strings.
* The written text begins with the string `start` and ends with the string `end`.
* Inside, the string representations (w.r.t. the method `toString`)
* of all elements of this $coll are separated by the string `sep`.
*
* Example:
*
* {{{
* scala> val a = List(1,2,3,4)
* a: List[Int] = List(1, 2, 3, 4)
*
* scala> val b = new StringBuilder()
* b: StringBuilder =
*
* scala> a.addString(b , "List(" , ", " , ")")
* res5: StringBuilder = List(1, 2, 3, 4)
* }}}
*
* @param b the string builder to which elements are appended.
* @param start the starting string.
* @param sep the separator string.
* @param end the ending string.
* @return the string builder `b` to which elements were appended.
*/
def addString(b: StringBuilder, start: String, sep: String, end: String): StringBuilder = {
val jsb = b.underlying
if (start.length != 0) jsb.append(start)
val it = iterator
if (it.hasNext) {
jsb.append(it.next())
while (it.hasNext) {
jsb.append(sep)
jsb.append(it.next())
}
}
if (end.length != 0) jsb.append(end)
b
}
/** Appends all elements of this $coll to a string builder using a separator string.
* The written text consists of the string representations (w.r.t. the method `toString`)
* of all elements of this $coll, separated by the string `sep`.
*
* Example:
*
* {{{
* scala> val a = List(1,2,3,4)
* a: List[Int] = List(1, 2, 3, 4)
*
* scala> val b = new StringBuilder()
* b: StringBuilder =
*
* scala> a.addString(b, ", ")
* res0: StringBuilder = 1, 2, 3, 4
* }}}
*
* @param b the string builder to which elements are appended.
* @param sep the separator string.
* @return the string builder `b` to which elements were appended.
*/
@inline final def addString(b: StringBuilder, sep: String): StringBuilder = addString(b, "", sep, "")
/** Appends all elements of this $coll to a string builder.
* The written text consists of the string representations (w.r.t. the method
* `toString`) of all elements of this $coll without any separator string.
*
* Example:
*
* {{{
* scala> val a = List(1,2,3,4)
* a: List[Int] = List(1, 2, 3, 4)
*
* scala> val b = new StringBuilder()
* b: StringBuilder =
*
* scala> val h = a.addString(b)
* h: StringBuilder = 1234
* }}}
* @param b the string builder to which elements are appended.
* @return the string builder `b` to which elements were appended.
*/
@inline final def addString(b: StringBuilder): StringBuilder = addString(b, "")
/** Given a collection factory `factory`, convert this collection to the appropriate
* representation for the current element type `A`. Example uses:
*
* xs.to(List)
* xs.to(ArrayBuffer)
* xs.to(BitSet) // for xs: Iterable[Int]
*/
def to[C1](factory: Factory[A, C1]): C1 = factory.fromSpecific(this)
@deprecated("Use .iterator instead of .toIterator", "2.13.0")
@`inline` final def toIterator: Iterator[A] = iterator
def toList: immutable.List[A] = immutable.List.from(this)
def toVector: immutable.Vector[A] = immutable.Vector.from(this)
def toMap[K, V](implicit ev: A <:< (K, V)): immutable.Map[K, V] =
immutable.Map.from(this.asInstanceOf[IterableOnce[(K, V)]])
def toSet[B >: A]: immutable.Set[B] = immutable.Set.from(this)
/**
* @return This collection as a `Seq[A]`. This is equivalent to `to(Seq)` but might be faster.
*/
def toSeq: immutable.Seq[A] = immutable.Seq.from(this)
def toIndexedSeq: immutable.IndexedSeq[A] = immutable.IndexedSeq.from(this)
@deprecated("Use .to(LazyList) instead of .toStream", "2.13.0")
@`inline` final def toStream: immutable.Stream[A] = to(immutable.Stream)
@`inline` final def toBuffer[B >: A]: mutable.Buffer[B] = mutable.Buffer.from(this)
/** Convert collection to array.
*
* Implementation note: DO NOT call [[Array.from]] from this method.
*/
def toArray[B >: A: ClassTag]: Array[B] =
if (knownSize >= 0) {
val destination = new Array[B](knownSize)
copyToArray(destination, 0)
destination
}
else mutable.ArrayBuilder.make[B].addAll(this).result()
// For internal use
protected def reversed: Iterable[A] = {
var xs: immutable.List[A] = immutable.Nil
val it = iterator
while (it.hasNext) xs = it.next() :: xs
xs
}
}
| lrytz/scala | src/library/scala/collection/IterableOnce.scala | Scala | apache-2.0 | 51,599 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.plugins.waila
import mcp.mobius.waila.api.IWailaDataProvider
import net.minecraft.item.ItemStack
import mcp.mobius.waila.api.IWailaConfigHandler
import mcp.mobius.waila.api.IWailaDataAccessor
import com.castlebravostudios.rayguns.utils.Extensions.BlockExtensions
import com.castlebravostudios.rayguns.blocks.lensgrinder.LensGrinder
import com.castlebravostudios.rayguns.items.RaygunsBlocks
import com.castlebravostudios.rayguns.blocks.lensgrinder.LensGrinderTileEntity
object LensGrinderDataProvider extends IWailaDataProvider {
def getWailaStack(accessor: IWailaDataAccessor, config: IWailaConfigHandler): ItemStack =
RaygunsBlocks.lensGrinder.asStack
def getWailaHead(stack: ItemStack, currentTip: java.util.List[String],
accessor: IWailaDataAccessor, config: IWailaConfigHandler): java.util.List[String] = currentTip
def getWailaBody(stack: ItemStack, currentTip: java.util.List[String],
accessor: IWailaDataAccessor, config: IWailaConfigHandler): java.util.List[String] = {
val te = accessor.getTileEntity()
val lg = te.asInstanceOf[LensGrinderTileEntity]
val hasPower = lg.chargeStored > 0
val recipe = lg.recipe
val progress = recipe.map( _ -> lg.getTimeRemainingScaled(100) ).getOrElse( 0 )
val crafting = recipe.map( _.recipe.getRecipeOutput().getDisplayName() ).getOrElse( "None" )
currentTip.add( s"Has Power: $hasPower" )
currentTip.add( s"Progress: $progress%" )
currentTip.add( s"Crafting: $crafting" )
currentTip
}
def getWailaTail(stack: ItemStack, currentTip: java.util.List[String],
accessor: IWailaDataAccessor, config: IWailaConfigHandler): java.util.List[String] = currentTip
} | Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/plugins/waila/LensGrinderDataProvider.scala | Scala | bsd-3-clause | 3,339 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models.Task
object Application extends Controller {
def index = Action {
Redirect(routes.Application.tasks)
}
def tasks = Action {
Ok(views.html.index(Task.all(), taskForm))
}
def newTask = Action { implicit request =>
taskForm.bindFromRequest.fold(
errors => BadRequest(views.html.index(Task.all(), errors)),
label => {
Task.create(label)
Redirect(routes.Application.tasks)
}
)
}
def deleteTask(id: Long) = Action {
Task.delete(id)
Redirect(routes.Application.tasks)
}
def sample1(id: Long)=TODO
def sample2(id: Long) = Action {
Ok(views.html.sample(id))
}
def sample3(id: Long) = Action {
Ok(views.html.sample(id))
}
def sample4(id: Option[Int]) = Action {
val res = id match{
case Some(x) => x
case None => 99
}
Ok(views.html.sample(res))
}
val taskForm = Form(
"label" -> nonEmptyText
)
}
| khonda/playframeworkPractice | app/controllers/Application.scala | Scala | mit | 1,048 |
package com.sksamuel.elastic4s.bulk
import com.sksamuel.elastic4s.http.ElasticDsl
import com.sksamuel.elastic4s.testkit.ResponseConverterImplicits._
import com.sksamuel.elastic4s.testkit.{DualClient, DualElasticSugar}
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{FlatSpec, Matchers}
class BulkTest extends FlatSpec with Matchers with ElasticDsl with DualElasticSugar with DualClient {
override protected def beforeRunTests(): Unit = {
execute {
createIndex("chemistry").mappings {
mapping("elements").fields(
intField("atomicweight").stored(true),
textField("name").stored(true)
)
mapping("molecule").fields(
textField("name").stored(true)
).parent("elements")
}
}.await
}
"bulk request" should "handle multiple index operations" in {
execute {
bulk(
indexInto("chemistry/elements") fields("atomicweight" -> 2, "name" -> "helium") id 2,
indexInto("chemistry/elements") fields("atomicweight" -> 4, "name" -> "lithium") id 4,
indexInto("chemistry/molecule") fields("name" -> "LiH") id 1 parent "4"
).refresh(RefreshPolicy.IMMEDIATE)
}.await.errors shouldBe false
execute {
get(2).from("chemistry/elements")
}.await.found shouldBe true
execute {
get(4).from("chemistry/elements")
}.await.found shouldBe true
execute {
get(1).from("chemistry/molecule").parent("4")
}.await.found shouldBe true
}
it should "return details of which items succeeded and failed" in {
val result = execute {
bulk(
update(2).in("chemistry/elements").doc("atomicweight" -> 2, "name" -> "helium"),
indexInto("chemistry/elements").fields("atomicweight" -> 8, "name" -> "oxygen") id 8,
update(6).in("chemistry/elements").doc("atomicweight" -> 4, "name" -> "lithium"),
delete(10).from("chemistry/elements")
).refresh(RefreshPolicy.IMMEDIATE)
}.await
result.hasFailures shouldBe true
result.hasSuccesses shouldBe true
result.errors shouldBe true
result.failures.map(_.itemId).toSet shouldBe Set(2, 3)
result.successes.map(_.itemId).toSet shouldBe Set(0, 1)
}
it should "handle multiple update operations" in {
execute {
bulk(
update(2).in("chemistry/elements") doc("atomicweight" -> 6, "name" -> "carbon"),
update(4).in("chemistry/elements") doc("atomicweight" -> 8, "name" -> "oxygen"),
update(1).in("chemistry/molecule") parent "4" doc("name" -> "CO")
).refresh(RefreshPolicy.IMMEDIATE)
}.await.errors shouldBe false
execute {
get(2).from("chemistry/elements").storedFields("name")
}.await.storedField("name").value shouldBe "carbon"
execute {
get(4).from("chemistry/elements").storedFields("name")
}.await.storedField("name").value shouldBe "oxygen"
execute {
get(1).from("chemistry/molecule").parent("4").storedFields("name")
}.await.storedField("name").value shouldBe "CO"
}
it should "handle multiple delete operations" in {
execute {
bulk(
delete(2).from("chemistry/elements"),
delete(4).from("chemistry/elements"),
delete(1).from("chemistry/molecule").parent("4")
).refresh(RefreshPolicy.IMMEDIATE)
}.await.errors shouldBe false
execute {
get(2).from("chemistry/elements")
}.await.found shouldBe false
execute {
get(4).from("chemistry/elements")
}.await.found shouldBe false
execute {
get(1).from("chemistry/molecule").parent("4")
}.await.found shouldBe false
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/bulk/BulkTest.scala | Scala | apache-2.0 | 3,636 |
// scalac: -Werror
def logLevelDetail(level: Int): String =
s"""$level
// the following line is indented using [tab][tab]
Sets the global logging level to $level.
"""
/* was 2 errors with carets as shown
| ^ ^
| Incompatible combinations of tabs and spaces in indentation prefixes.
| Previous indent : 3 spaces
| Latest indent : 2 tabs
*/
def log(level: Int, msg: String): String =
s"""
$level
prefixed $level suffixed
"""
/*
^ ^
Incompatible combinations of tabs and spaces in indentation prefixes.
Previous indent : 2 tabs
Latest indent : 2 space
*/
// normal mixed tabs errors as a baseline
def g =
42
+ 17 // error
def p() =
println("hello")
println("world") // error
/*
| Incompatible combinations of tabs and spaces in indentation prefixes.
| Previous indent : 4 spaces
| Latest indent : 1 tab
*/
def braced() =
s"""begin
${
val level = 10
val msg = "hello, world" // error he lets me off with a warning
log(level, msg) // error
}
end"""
| dotty-staging/dotty | tests/neg/i14386.scala | Scala | apache-2.0 | 1,293 |
package com.github.gmspacagna.scalatable.descriptors
abstract case class HBaseTableDescriptor(path: String, family: String)
abstract class HBaseTableColumn(val qualifier: String) {
override def toString = qualifier
}
abstract class HBaseTableFields[T <: HBaseTableColumn](map: Map[String, T]) {
def getField(qualifier: String): Option[T] = if (map.contains(qualifier)) Some(map(qualifier)) else None
def isColumnOf(qualifier: String, field: HBaseTableColumn) = map.getOrElse(qualifier, None) == field
def getFirstPrefixMatchingField(prefix: String) = {
val list = map.filterKeys(_.startsWith(prefix)).toList
if (list.isEmpty) None else list(0)
}
}
trait TableColumnImplicits {
implicit def qualifier2String(col: HBaseTableColumn): String = col.qualifier
}
| gm-spacagna/scala-table-clients | src/main/scala/com/github/gmspacagna/scalatable/descriptors/HBaseTableDescriptor.scala | Scala | apache-2.0 | 782 |
package org.jetbrains.sbt
package project.structure
import java.io.File
import scala.collection.JavaConverters._
import com.intellij.openapi.util.io.FileUtil
/**
* Support for the .sbtopts file loaded by the sbt launcher script as alternative to command line options.
*/
object SbtOpts {
def loadFrom(directory: File): Seq[String] = {
val sbtOptsFile = directory / ".sbtopts"
if (sbtOptsFile.exists && sbtOptsFile.isFile && sbtOptsFile.canRead)
process(FileUtil.loadLines(sbtOptsFile).asScala.map(_.trim))
else
Seq.empty
}
private val noShareOpts = "-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy"
private val noGlobalOpts = "-Dsbt.global.base=project/.sbtboot"
private val debuggerOpts = "-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address="
private val sbtToJdkOpts: Map[String, String] = Map(
"-sbt-boot" -> "-Dsbt.boot.directory=",
"-sbt-dir" -> "-Dsbt.global.base=",
"-ivy" -> "-Dsbt.ivy.home=",
"-jvm-debug" -> debuggerOpts
)
private def process(opts: Seq[String]): Seq[String] = {
opts.flatMap { opt =>
if (opt.startsWith("-no-share"))
Some(noShareOpts)
else if (opt.startsWith("-no-global"))
Some(noGlobalOpts)
else if (sbtToJdkOpts.exists { case (k,_) => opt.startsWith(k) })
processOptWithArg(opt)
else if (opt.startsWith("-J"))
Some(opt.substring(2))
else if (opt.startsWith("-D"))
Some(opt)
else
None
}
}
private def processOptWithArg(opt: String): Option[String] = {
sbtToJdkOpts.find{ case (k,_) => opt.startsWith(k)}.flatMap { case (k,x) =>
val v = opt.replace(k, "").trim
if (v.isEmpty) None else Some(x + v)
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/project/structure/SbtOpts.scala | Scala | apache-2.0 | 1,783 |
/*
* Copyright 2014-2015 Brady Wood, Branko Juric
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gwen.web
class WebInterpreterParallelTest extends WebInterpreterTest {
"Parallel mode" should "evaluate all features in parallel" in {
evaluate(List("features/floodio", "features/blogs/pageObjectsBegone", "features/blogs/automationByMeta", "features/etsy"), true, false, "target/reports/parallel", None)
}
} | bltb/gwen-web | src/test/scala/gwen/web/WebInterpreterParallelTest.scala | Scala | apache-2.0 | 945 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
// SKIP-SCALATESTJS,NATIVE-START
import org.scalatest.tools.Runner
// SKIP-SCALATESTJS,NATIVE-END
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class ScaledTimeSpansSpec extends AnyFunSpec with Matchers with ScaledTimeSpans {
describe("ScaledTimeSpans") {
it("should use Runner's spanScaleFactor by default") {
// SKIP-SCALATESTJS,NATIVE-START
assert(spanScaleFactor === Runner.spanScaleFactor)
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY assert(spanScaleFactor === 1.0)
// These test may cause other test that use eventually to failed if run in concurrent.
// May be we could find a better way to test this.
//Runner.spanScaleFactor = 2.0
//assert(spanScaleFactor === 2.0)
// Reset back to original, else it'll affect other tests.
//Runner.spanScaleFactor = original
}
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/concurrent/ScaledTimeSpansSpec.scala | Scala | apache-2.0 | 1,581 |
package org.jetbrains.sbt.project.template
import org.jetbrains.plugins.scala.project.Versions
final case class SbtModuleBuilderSelections(
var sbtVersion: Option[String],
var scalaVersion: Option[String],
var downloadScalaSdkSources: Boolean,
var downloadSbtSources: Boolean,
var packagePrefix: Option[String]
) {
/**
* For now we show latest Scala 2 version in the dropdown list.<br>
* If the user wants to select some other version we need to show that there are Scala 3 versions above the the selected version.<br>
* By default combo box will show the selected element at the top and it's not clear that there are other versions above it.
*
* @see [[org.jetbrains.plugins.scala.project.Versions.Scala.initiallySelectedVersion]]
*/
var scrollScalaVersionDropdownToTheTop = false
def versionFromKind(kind: Versions.Kind): Option[String] = kind match {
case Versions.Scala => scalaVersion
case Versions.SBT => sbtVersion
}
def update(kind: Versions.Kind, versions: Versions): Unit = {
val explicitlySelectedVersion = versionFromKind(kind)
val version = explicitlySelectedVersion.getOrElse(kind.initiallySelectedVersion(versions.versions))
kind match {
case Versions.Scala =>
scalaVersion = Some(version)
scrollScalaVersionDropdownToTheTop = explicitlySelectedVersion.isEmpty
case Versions.SBT =>
sbtVersion = Some(version)
}
}
}
object SbtModuleBuilderSelections {
def default: SbtModuleBuilderSelections =
SbtModuleBuilderSelections(
sbtVersion = scala.Option.empty,
scalaVersion = scala.Option.empty,
downloadScalaSdkSources = true,
downloadSbtSources = false,
packagePrefix = scala.Option.empty
)
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/project/template/SbtModuleBuilderSelections.scala | Scala | apache-2.0 | 1,754 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.common.effect
import slamdata.Predef._
import quasar.fp.ski.κ
import simulacrum.typeclass
import scalaz._
import scalaz.syntax.functor._
/** A source of strings unique within `F[_]`, an implementation must have the
* property that, if Applicative[F], then (freshName |@| freshName)(_ != _).
*/
@typeclass trait NameGenerator[F[_]] {
/** Returns a fresh name, guaranteed to be unique among all the other names
* generated from `F`.
*/
def freshName: F[String]
/** Returns a fresh name, prefixed with the given string. */
def prefixedName(prefix: String)(implicit F: Functor[F]): F[String] =
freshName map (prefix + _)
}
object NameGenerator extends NameGeneratorInstances
sealed abstract class NameGeneratorInstances extends NameGeneratorInstances0 {
implicit def sequenceNameGenerator[F[_]](implicit F: MonadState[F, Long]): NameGenerator[F] =
new NameGenerator[F] {
def freshName = F.bind(F.get)(n => F.put(n + 1) as n.toString)
}
}
sealed abstract class NameGeneratorInstances0 {
implicit def eitherTNameGenerator[F[_]: NameGenerator : Functor, A]: NameGenerator[EitherT[F, A, ?]] =
new NameGenerator[EitherT[F, A, ?]] {
def freshName = EitherT.rightT(NameGenerator[F].freshName)
}
implicit def readerTNameGenerator[F[_]: NameGenerator, A]: NameGenerator[ReaderT[F, A, ?]] =
new NameGenerator[ReaderT[F, A, ?]] {
def freshName = ReaderT(κ(NameGenerator[F].freshName))
}
implicit def stateTNameGenerator[F[_]: NameGenerator : Monad, S]: NameGenerator[StateT[F, S, ?]] =
new NameGenerator[StateT[F, S, ?]] {
def freshName = StateT(s => NameGenerator[F].freshName strengthL s)
}
implicit def writerTNameGenerator[F[_]: NameGenerator : Functor, W: Monoid]: NameGenerator[WriterT[F, W, ?]] =
new NameGenerator[WriterT[F, W, ?]] {
def freshName = WriterT.put(NameGenerator[F].freshName)(Monoid[W].zero)
}
}
| slamdata/quasar | common/src/main/scala/quasar/common/effect/NameGenerator.scala | Scala | apache-2.0 | 2,539 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.testkit
import akka.actor.ActorSystem
import akka.dispatch.Dispatchers
import akka.event.{Logging, LoggingAdapter}
import akka.testkit.TestEvent._
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.Future
import scala.language.{postfixOps, reflectiveCalls}
object AkkaSpec {
val testConf: Config = ConfigFactory.parseString(
"""
akka {
loggers = ["akka.testkit.TestEventListener"]
loglevel = "WARNING"
stdout-loglevel = "WARNING"
actor {
default-dispatcher {
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 8
parallelism-factor = 2.0
parallelism-max = 8
}
}
}
}
""")
def mapToConfig(map: Map[String, AnyRef]): Config = {
import scala.collection.JavaConverters._
ConfigFactory.parseMap(map.asJava)
}
def getCallerName(clazz: Class[_]): String = {
val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1)
.dropWhile(_ matches "(java.lang.Thread|.*AkkaSpec.?$)")
val reduced = s.lastIndexWhere(_ == clazz.getName) match {
case -1 ⇒ s
case z ⇒ s drop (z + 1)
}
reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_")
}
}
abstract class AkkaSpec(_system: ActorSystem)
extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll {
def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass),
ConfigFactory.load(config.withFallback(AkkaSpec.testConf))))
def this(s: String) = this(ConfigFactory.parseString(s))
def this(configMap: Map[String, AnyRef]) = this(AkkaSpec.mapToConfig(configMap))
def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf))
val log: LoggingAdapter = Logging(system, this.getClass)
override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true
final override def beforeAll() {
atStartup()
}
final override def afterAll() {
beforeTermination()
shutdown()
afterTermination()
}
protected def atStartup() {}
protected def beforeTermination() {}
protected def afterTermination() {}
def spawn(dispatcherId: String = Dispatchers.DefaultDispatcherId)(body: ⇒ Unit): Unit =
Future(body)(system.dispatchers.lookup(dispatcherId))
def muteDeadLetters(messageClasses: Class[_]*)(sys: ActorSystem = system): Unit =
if (!sys.log.isDebugEnabled) {
def mute(clazz: Class[_]): Unit =
sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue)))
if (messageClasses.isEmpty) mute(classOf[AnyRef])
else messageClasses foreach mute
}
}
| ilya-epifanov/akka-dns | src/test/scala/akka/testkit/AkkaSpec.scala | Scala | apache-2.0 | 2,891 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.ingest
import java.io.{File, InputStream}
import java.util.concurrent.atomic.AtomicLong
import com.typesafe.config.{Config, ConfigRenderOptions}
import org.apache.commons.pool2.BasePooledObjectFactory
import org.apache.commons.pool2.impl.{DefaultPooledObject, GenericObjectPool}
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.locationtech.geomesa.convert.Transformers.DefaultCounter
import org.locationtech.geomesa.convert.{SimpleFeatureConverter, SimpleFeatureConverters}
import org.locationtech.geomesa.jobs.mapreduce.{ConverterInputFormat, GeoMesaOutputFormat}
import org.locationtech.geomesa.tools.Command
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* Ingestion that uses geomesa converters to process input files
*
* @param sft simple feature type
* @param dsParams data store parameters
* @param converterConfig converter definition
* @param inputs files to ingest
* @param libjarsFile file with list of jars needed for ingest
* @param libjarsPaths paths to search for libjars
* @param numLocalThreads for local ingest, how many threads to use
*/
class ConverterIngest(sft: SimpleFeatureType,
dsParams: Map[String, String],
converterConfig: Config,
inputs: Seq[String],
libjarsFile: String,
libjarsPaths: Iterator[() => Seq[File]],
numLocalThreads: Int)
extends AbstractIngest(dsParams, sft.getTypeName, inputs, libjarsFile, libjarsPaths, numLocalThreads) {
override def beforeRunTasks(): Unit = {
// create schema for the feature prior to Ingest job
Command.user.info(s"Creating schema ${sft.getTypeName}")
ds.createSchema(sft)
}
val factory = new BasePooledObjectFactory[SimpleFeatureConverter[_]] {
override def wrap(obj: SimpleFeatureConverter[_]) = new DefaultPooledObject[SimpleFeatureConverter[_]](obj)
override def create(): SimpleFeatureConverter[_] = SimpleFeatureConverters.build(sft, converterConfig)
}
private val converterPool =
new GenericObjectPool[SimpleFeatureConverter[_]](factory)
override def createLocalConverter(file: File, failures: AtomicLong): LocalIngestConverter =
new LocalIngestConverter {
class LocalIngestCounter extends DefaultCounter {
// keep track of failure at a global level, keep line counts and success local
override def incFailure(i: Long): Unit = failures.getAndAdd(i)
override def getFailure: Long = failures.get()
}
val converter = converterPool.borrowObject()
val ec = converter.createEvaluationContext(Map("inputFilePath" -> file.getAbsolutePath), new LocalIngestCounter)
override def convert(is: InputStream): (SimpleFeatureType, Iterator[SimpleFeature]) = (sft, converter.process(is, ec))
override def close(): Unit = {
converterPool.returnObject(converter)
}
}
override def runDistributedJob(statusCallback: (Float, Long, Long, Boolean) => Unit = (_, _, _, _) => Unit): (Long, Long) = {
val job = new ConverterIngestJob(sft, converterConfig)
job.run(dsParams, sft.getTypeName, inputs, libjarsFile, libjarsPaths, statusCallback)
}
}
/**
* Distributed job that uses converters to process input files
*
* @param sft simple feature type
* @param converterConfig converter definition
*/
class ConverterIngestJob(sft: SimpleFeatureType, converterConfig: Config) extends AbstractIngestJob {
import ConverterInputFormat.{Counters => ConvertCounters}
import GeoMesaOutputFormat.{Counters => OutCounters}
val failCounters =
Seq((ConvertCounters.Group, ConvertCounters.Failed), (OutCounters.Group, OutCounters.Failed))
override val inputFormatClass: Class[_ <: FileInputFormat[_, SimpleFeature]] = classOf[ConverterInputFormat]
override def configureJob(job: Job): Unit = {
ConverterInputFormat.setConverterConfig(job, converterConfig.root().render(ConfigRenderOptions.concise()))
ConverterInputFormat.setSft(job, sft)
}
override def written(job: Job): Long =
job.getCounters.findCounter(OutCounters.Group, OutCounters.Written).getValue
override def failed(job: Job): Long =
failCounters.map(c => job.getCounters.findCounter(c._1, c._2).getValue).sum
}
| nagavallia/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/ingest/ConverterIngest.scala | Scala | apache-2.0 | 4,845 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.notify.service
import org.beangle.notify.Notifier
import scala.collection.mutable
trait DefaultNotifierService extends NotifierService:
var notifiers: mutable.Map[String, Notifier] = _
def getNotifier(notifierId: String): Notifier = notifiers.get(notifierId).orNull
| beangle/notify | core/src/main/scala/org/beangle/notify/service/DefaultNotifierService.scala | Scala | gpl-3.0 | 1,015 |
package org.scalarules.dsl.core.types
import org.scalarules.finance.core.Quantity
import org.scalarules.finance.nl._
import scala.annotation.implicitNotFound
/**
* This type class allows values of different types to be added in the DSL.
*
* @tparam A type of the left hand side of the adding multiply
* @tparam B type of the right hand side of the adding multiply
* @tparam C type of the result of the adding multiply
*/
@implicitNotFound("No member of type class DivisibleValues available in scope for combination ${A} / ${B} = ${C}")
trait DivisibleValues[A, B, C] {
def divide(a: A, b: B): C
def leftUnit: A
def rightUnit: B
}
object DivisibleValues {
implicit def bigDecimalDividedByBigDecimal: DivisibleValues[BigDecimal, BigDecimal, BigDecimal] = new DivisibleValues[BigDecimal, BigDecimal, BigDecimal] {
override def divide(a: BigDecimal, b: BigDecimal): BigDecimal = a / b
override def leftUnit: BigDecimal = 0
override def rightUnit: BigDecimal = 1
}
implicit def somethingDividedByBigDecimal[N : Quantity]: DivisibleValues[N, BigDecimal, N] = new DivisibleValues[N, BigDecimal, N] {
private val ev = implicitly[Quantity[N]]
override def divide(a: N, b: BigDecimal): N = ev.divide(a, b)
override def leftUnit: N = ev.zero
override def rightUnit: BigDecimal = 1
}
implicit def somethingDividedByPercentage[N : Quantity]: DivisibleValues[N, Percentage, N] = new DivisibleValues[N, Percentage, N] {
private val ev = implicitly[Quantity[N]]
override def divide(a: N, b: Percentage): N = ev.divide(a, b.alsFractie)
override def leftUnit: N = ev.zero
override def rightUnit: Percentage = 100.procent
}
implicit def somethingDividedByInt[N : Quantity]: DivisibleValues[N, Int, N] = new DivisibleValues[N, Int, N] {
// Currently BigDecimal gets wrapped in a NumberLike, which is why this will also work for BigDecimal.
private val ev = implicitly[Quantity[N]]
override def divide(a: N, b: Int): N = ev.divide(a, b)
override def leftUnit: N = ev.zero
override def rightUnit: Int = 1
}
implicit def percentageDividedByBigDecimal: DivisibleValues[Percentage, BigDecimal, BigDecimal] = new DivisibleValues[Percentage, BigDecimal, BigDecimal] {
override def divide(a: Percentage, b: BigDecimal): BigDecimal = a / b
override def leftUnit: Percentage = 0.procent
override def rightUnit: BigDecimal = 1
}
implicit def somethingDividedBySomethingAsPercentage: DivisibleValues[Bedrag, Bedrag, Percentage] = new DivisibleValues[Bedrag, Bedrag, Percentage] {
// Note: this type class instance was initially as Quantity x Quantity => Percentage, but the QuantityBigDecimal throws a fit if we do that
// and makes it ambiguous to choose when trying to divide two BigDecimals
// private val ev = implicitly[Quantity[Bedrag]]
override def divide(a: Bedrag, b: Bedrag): Percentage = (a.waarde / b.waarde * 100).procent
override def leftUnit: Bedrag = 0.euro
override def rightUnit: Bedrag = 1.euro
}
// Note: there is no somethingDividedByBedrag, because the division is not a commutative operation
// and dividing a BigDecimal by something like a Bedrag would yield a BigDecimal Per Bedrag type, which
// I do not yet foresee any use for.
}
| scala-rules/rule-engine | engine/src/main/scala/org/scalarules/dsl/core/types/DivisibleValues.scala | Scala | mit | 3,288 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, Date, List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.mesos.{Scheduler, SchedulerDriver}
import org.apache.mesos.Protos.{TaskState => MesosTaskState, _}
import org.apache.mesos.Protos.Environment.Variable
import org.apache.mesos.Protos.TaskStatus.Reason
import org.apache.spark.{SecurityManager, SparkConf, SparkException, TaskState}
import org.apache.spark.deploy.mesos.MesosDriverDescription
import org.apache.spark.deploy.mesos.config
import org.apache.spark.deploy.rest.{CreateSubmissionResponse, KillSubmissionResponse, SubmissionStatusResponse}
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.util.Utils
/**
* Tracks the current state of a Mesos Task that runs a Spark driver.
* @param driverDescription Submitted driver description from
* [[org.apache.spark.deploy.rest.mesos.MesosRestServer]]
* @param taskId Mesos TaskID generated for the task
* @param slaveId Slave ID that the task is assigned to
* @param mesosTaskStatus The last known task status update.
* @param startDate The date the task was launched
* @param finishDate The date the task finished
* @param frameworkId Mesos framework ID the task registers with
*/
private[spark] class MesosClusterSubmissionState(
val driverDescription: MesosDriverDescription,
val taskId: TaskID,
val slaveId: SlaveID,
var mesosTaskStatus: Option[TaskStatus],
var startDate: Date,
var finishDate: Option[Date],
val frameworkId: String)
extends Serializable {
def copy(): MesosClusterSubmissionState = {
new MesosClusterSubmissionState(
driverDescription, taskId, slaveId, mesosTaskStatus, startDate, finishDate, frameworkId)
}
}
/**
* Tracks the retry state of a driver, which includes the next time it should be scheduled
* and necessary information to do exponential backoff.
* This class is not thread-safe, and we expect the caller to handle synchronizing state.
*
* @param lastFailureStatus Last Task status when it failed.
* @param retries Number of times it has been retried.
* @param nextRetry Time at which it should be retried next
* @param waitTime The amount of time driver is scheduled to wait until next retry.
*/
private[spark] class MesosClusterRetryState(
val lastFailureStatus: TaskStatus,
val retries: Int,
val nextRetry: Date,
val waitTime: Int) extends Serializable {
def copy(): MesosClusterRetryState =
new MesosClusterRetryState(lastFailureStatus, retries, nextRetry, waitTime)
}
/**
* The full state of the cluster scheduler, currently being used for displaying
* information on the UI.
*
* @param frameworkId Mesos Framework id for the cluster scheduler.
* @param masterUrl The Mesos master url
* @param queuedDrivers All drivers queued to be launched
* @param launchedDrivers All launched or running drivers
* @param finishedDrivers All terminated drivers
* @param pendingRetryDrivers All drivers pending to be retried
*/
private[spark] class MesosClusterSchedulerState(
val frameworkId: String,
val masterUrl: Option[String],
val queuedDrivers: Iterable[MesosDriverDescription],
val launchedDrivers: Iterable[MesosClusterSubmissionState],
val finishedDrivers: Iterable[MesosClusterSubmissionState],
val pendingRetryDrivers: Iterable[MesosDriverDescription])
/**
* The full state of a Mesos driver, that is being used to display driver information on the UI.
*/
private[spark] class MesosDriverState(
val state: String,
val description: MesosDriverDescription,
val submissionState: Option[MesosClusterSubmissionState] = None)
/**
* A Mesos scheduler that is responsible for launching submitted Spark drivers in cluster mode
* as Mesos tasks in a Mesos cluster.
* All drivers are launched asynchronously by the framework, which will eventually be launched
* by one of the slaves in the cluster. The results of the driver will be stored in slave's task
* sandbox which is accessible by visiting the Mesos UI.
* This scheduler supports recovery by persisting all its state and performs task reconciliation
* on recover, which gets all the latest state for all the drivers from Mesos master.
*/
private[spark] class MesosClusterScheduler(
engineFactory: MesosClusterPersistenceEngineFactory,
conf: SparkConf)
extends Scheduler with MesosSchedulerUtils {
var frameworkUrl: String = _
private val metricsSystem =
MetricsSystem.createMetricsSystem("mesos_cluster", conf, new SecurityManager(conf))
private val master = conf.get("spark.master")
private val appName = conf.get("spark.app.name")
private val queuedCapacity = conf.getInt("spark.mesos.maxDrivers", 200)
private val retainedDrivers = conf.getInt("spark.mesos.retainedDrivers", 200)
private val maxRetryWaitTime = conf.getInt("spark.mesos.cluster.retry.wait.max", 60) // 1 minute
private val useFetchCache = conf.getBoolean("spark.mesos.fetchCache.enable", false)
private val schedulerState = engineFactory.createEngine("scheduler")
private val stateLock = new Object()
// Keyed by submission id
private val finishedDrivers =
new mutable.ArrayBuffer[MesosClusterSubmissionState](retainedDrivers)
private var frameworkId: String = null
// Holds all the launched drivers and current launch state, keyed by submission id.
private val launchedDrivers = new mutable.HashMap[String, MesosClusterSubmissionState]()
// Holds a map of driver id to expected slave id that is passed to Mesos for reconciliation.
// All drivers that are loaded after failover are added here, as we need get the latest
// state of the tasks from Mesos. Keyed by task Id.
private val pendingRecover = new mutable.HashMap[String, SlaveID]()
// Stores all the submitted drivers that hasn't been launched, keyed by submission id
private val queuedDrivers = new ArrayBuffer[MesosDriverDescription]()
// All supervised drivers that are waiting to retry after termination, keyed by submission id
private val pendingRetryDrivers = new ArrayBuffer[MesosDriverDescription]()
private val queuedDriversState = engineFactory.createEngine("driverQueue")
private val launchedDriversState = engineFactory.createEngine("launchedDrivers")
private val pendingRetryDriversState = engineFactory.createEngine("retryList")
private final val RETRY_SEP = "-retry-"
// Flag to mark if the scheduler is ready to be called, which is until the scheduler
// is registered with Mesos master.
@volatile protected var ready = false
private var masterInfo: Option[MasterInfo] = None
private var schedulerDriver: SchedulerDriver = _
def submitDriver(desc: MesosDriverDescription): CreateSubmissionResponse = {
val c = new CreateSubmissionResponse
if (!ready) {
c.success = false
c.message = "Scheduler is not ready to take requests"
return c
}
stateLock.synchronized {
if (isQueueFull()) {
c.success = false
c.message = "Already reached maximum submission size"
return c
}
c.submissionId = desc.submissionId
c.success = true
addDriverToQueue(desc)
}
c
}
def killDriver(submissionId: String): KillSubmissionResponse = {
val k = new KillSubmissionResponse
if (!ready) {
k.success = false
k.message = "Scheduler is not ready to take requests"
return k
}
k.submissionId = submissionId
stateLock.synchronized {
// We look for the requested driver in the following places:
// 1. Check if submission is running or launched.
// 2. Check if it's still queued.
// 3. Check if it's in the retry list.
// 4. Check if it has already completed.
if (launchedDrivers.contains(submissionId)) {
val state = launchedDrivers(submissionId)
schedulerDriver.killTask(state.taskId)
k.success = true
k.message = "Killing running driver"
} else if (removeFromQueuedDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's still pending"
} else if (removeFromPendingRetryDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's being retried"
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
k.success = false
k.message = "Driver already terminated"
} else {
k.success = false
k.message = "Cannot find driver"
}
}
k
}
def getDriverStatus(submissionId: String): SubmissionStatusResponse = {
val s = new SubmissionStatusResponse
if (!ready) {
s.success = false
s.message = "Scheduler is not ready to take requests"
return s
}
s.submissionId = submissionId
stateLock.synchronized {
if (queuedDrivers.exists(_.submissionId.equals(submissionId))) {
s.success = true
s.driverState = "QUEUED"
} else if (launchedDrivers.contains(submissionId)) {
s.success = true
s.driverState = "RUNNING"
launchedDrivers(submissionId).mesosTaskStatus.foreach(state => s.message = state.toString)
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
s.success = true
s.driverState = "FINISHED"
finishedDrivers
.find(d => d.driverDescription.submissionId.equals(submissionId)).get.mesosTaskStatus
.foreach(state => s.message = state.toString)
} else if (pendingRetryDrivers.exists(_.submissionId.equals(submissionId))) {
val status = pendingRetryDrivers.find(_.submissionId.equals(submissionId))
.get.retryState.get.lastFailureStatus
s.success = true
s.driverState = "RETRYING"
s.message = status.toString
} else {
s.success = false
s.driverState = "NOT_FOUND"
}
}
s
}
/**
* Gets the driver state to be displayed on the Web UI.
*/
def getDriverState(submissionId: String): Option[MesosDriverState] = {
stateLock.synchronized {
queuedDrivers.find(_.submissionId.equals(submissionId))
.map(d => new MesosDriverState("QUEUED", d))
.orElse(launchedDrivers.get(submissionId)
.map(d => new MesosDriverState("RUNNING", d.driverDescription, Some(d))))
.orElse(finishedDrivers.find(_.driverDescription.submissionId.equals(submissionId))
.map(d => new MesosDriverState("FINISHED", d.driverDescription, Some(d))))
.orElse(pendingRetryDrivers.find(_.submissionId.equals(submissionId))
.map(d => new MesosDriverState("RETRYING", d)))
}
}
private def isQueueFull(): Boolean = launchedDrivers.size >= queuedCapacity
/**
* Recover scheduler state that is persisted.
* We still need to do task reconciliation to be up to date of the latest task states
* as it might have changed while the scheduler is failing over.
*/
private def recoverState(): Unit = {
stateLock.synchronized {
launchedDriversState.fetchAll[MesosClusterSubmissionState]().foreach { state =>
launchedDrivers(state.driverDescription.submissionId) = state
pendingRecover(state.taskId.getValue) = state.slaveId
}
queuedDriversState.fetchAll[MesosDriverDescription]().foreach(d => queuedDrivers += d)
// There is potential timing issue where a queued driver might have been launched
// but the scheduler shuts down before the queued driver was able to be removed
// from the queue. We try to mitigate this issue by walking through all queued drivers
// and remove if they're already launched.
queuedDrivers
.filter(d => launchedDrivers.contains(d.submissionId))
.foreach(d => removeFromQueuedDrivers(d.submissionId))
pendingRetryDriversState.fetchAll[MesosDriverDescription]()
.foreach(s => pendingRetryDrivers += s)
// TODO: Consider storing finished drivers so we can show them on the UI after
// failover. For now we clear the history on each recovery.
finishedDrivers.clear()
}
}
/**
* Starts the cluster scheduler and wait until the scheduler is registered.
* This also marks the scheduler to be ready for requests.
*/
def start(): Unit = {
// TODO: Implement leader election to make sure only one framework running in the cluster.
val fwId = schedulerState.fetch[String]("frameworkId")
fwId.foreach { id =>
frameworkId = id
}
recoverState()
metricsSystem.registerSource(new MesosClusterSchedulerSource(this))
metricsSystem.start()
val driver = createSchedulerDriver(
master,
MesosClusterScheduler.this,
Utils.getCurrentUserName(),
appName,
conf,
Some(frameworkUrl),
Some(true),
Some(Integer.MAX_VALUE),
fwId)
startScheduler(driver)
ready = true
}
def stop(): Unit = {
ready = false
metricsSystem.report()
metricsSystem.stop()
schedulerDriver.stop(true)
}
override def registered(
driver: SchedulerDriver,
newFrameworkId: FrameworkID,
masterInfo: MasterInfo): Unit = {
logInfo("Registered as framework ID " + newFrameworkId.getValue)
if (newFrameworkId.getValue != frameworkId) {
frameworkId = newFrameworkId.getValue
schedulerState.persist("frameworkId", frameworkId)
}
markRegistered()
stateLock.synchronized {
this.masterInfo = Some(masterInfo)
this.schedulerDriver = driver
if (!pendingRecover.isEmpty) {
// Start task reconciliation if we need to recover.
val statuses = pendingRecover.collect {
case (taskId, slaveId) =>
val newStatus = TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId).build())
.setSlaveId(slaveId)
.setState(MesosTaskState.TASK_STAGING)
.build()
launchedDrivers.get(getSubmissionIdFromTaskId(taskId))
.map(_.mesosTaskStatus.getOrElse(newStatus))
.getOrElse(newStatus)
}
// TODO: Page the status updates to avoid trying to reconcile
// a large amount of tasks at once.
driver.reconcileTasks(statuses.toSeq.asJava)
}
}
}
private def getDriverExecutorURI(desc: MesosDriverDescription): Option[String] = {
desc.conf.getOption("spark.executor.uri")
.orElse(desc.command.environment.get("SPARK_EXECUTOR_URI"))
}
private def getDriverFrameworkID(desc: MesosDriverDescription): String = {
val retries = desc.retryState.map { d => s"${RETRY_SEP}${d.retries.toString}" }.getOrElse("")
s"${frameworkId}-${desc.submissionId}${retries}"
}
private def getDriverTaskId(desc: MesosDriverDescription): String = {
val sId = desc.submissionId
desc.retryState.map(state => sId + s"${RETRY_SEP}${state.retries.toString}").getOrElse(sId)
}
private def getSubmissionIdFromTaskId(taskId: String): String = {
taskId.split(s"${RETRY_SEP}").head
}
private def adjust[A, B](m: collection.Map[A, B], k: A, default: B)(f: B => B) = {
m.updated(k, f(m.getOrElse(k, default)))
}
private def getDriverEnvironment(desc: MesosDriverDescription): Environment = {
// TODO(mgummelt): Don't do this here. This should be passed as a --conf
val commandEnv = adjust(desc.command.environment, "SPARK_SUBMIT_OPTS", "")(
v => s"$v -Dspark.mesos.driver.frameworkId=${getDriverFrameworkID(desc)}"
)
val env = desc.conf.getAllWithPrefix("spark.mesos.driverEnv.") ++ commandEnv
val envBuilder = Environment.newBuilder()
// add normal environment variables
env.foreach { case (k, v) =>
envBuilder.addVariables(Variable.newBuilder().setName(k).setValue(v))
}
// add secret environment variables
MesosSchedulerBackendUtil.getSecretEnvVar(desc.conf, config.driverSecretConfig)
.foreach { variable =>
if (variable.getSecret.getReference.isInitialized) {
logInfo(s"Setting reference secret ${variable.getSecret.getReference.getName} " +
s"on file ${variable.getName}")
} else {
logInfo(s"Setting secret on environment variable name=${variable.getName}")
}
envBuilder.addVariables(variable)
}
envBuilder.build()
}
private def getDriverUris(desc: MesosDriverDescription): List[CommandInfo.URI] = {
val confUris = List(conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.submit.pyFiles")).flatMap(
_.map(_.split(",").map(_.trim))
).flatten
val jarUrl = desc.jarUrl.stripPrefix("file:").stripPrefix("local:")
((jarUrl :: confUris) ++ getDriverExecutorURI(desc).toList).map(uri =>
CommandInfo.URI.newBuilder().setValue(uri.trim()).setCache(useFetchCache).build())
}
private def getContainerInfo(desc: MesosDriverDescription): ContainerInfo.Builder = {
val containerInfo = MesosSchedulerBackendUtil.buildContainerInfo(desc.conf)
MesosSchedulerBackendUtil.getSecretVolume(desc.conf, config.driverSecretConfig)
.foreach { volume =>
if (volume.getSource.getSecret.getReference.isInitialized) {
logInfo(s"Setting reference secret ${volume.getSource.getSecret.getReference.getName} " +
s"on file ${volume.getContainerPath}")
} else {
logInfo(s"Setting secret on file name=${volume.getContainerPath}")
}
containerInfo.addVolumes(volume)
}
containerInfo
}
private def getDriverCommandValue(desc: MesosDriverDescription): String = {
val dockerDefined = desc.conf.contains("spark.mesos.executor.docker.image")
val executorUri = getDriverExecutorURI(desc)
// Gets the path to run spark-submit, and the path to the Mesos sandbox.
val (executable, sandboxPath) = if (dockerDefined) {
// Application jar is automatically downloaded in the mounted sandbox by Mesos,
// and the path to the mounted volume is stored in $MESOS_SANDBOX env variable.
("./bin/spark-submit", "$MESOS_SANDBOX")
} else if (executorUri.isDefined) {
val folderBasename = executorUri.get.split('/').last.split('.').head
val entries = conf.getOption("spark.executor.extraLibraryPath")
.map(path => Seq(path) ++ desc.command.libraryPathEntries)
.getOrElse(desc.command.libraryPathEntries)
val prefixEnv = if (!entries.isEmpty) Utils.libraryPathEnvPrefix(entries) else ""
val cmdExecutable = s"cd $folderBasename*; $prefixEnv bin/spark-submit"
// Sandbox path points to the parent folder as we chdir into the folderBasename.
(cmdExecutable, "..")
} else {
val executorSparkHome = desc.conf.getOption("spark.mesos.executor.home")
.orElse(conf.getOption("spark.home"))
.orElse(Option(System.getenv("SPARK_HOME")))
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val cmdExecutable = new File(executorSparkHome, "./bin/spark-submit").getPath
// Sandbox points to the current directory by default with Mesos.
(cmdExecutable, ".")
}
val cmdOptions = generateCmdOption(desc, sandboxPath).mkString(" ")
val primaryResource = new File(sandboxPath, desc.jarUrl.split("/").last).toString()
val appArguments = desc.command.arguments.mkString(" ")
s"$executable $cmdOptions $primaryResource $appArguments"
}
private def buildDriverCommand(desc: MesosDriverDescription): CommandInfo = {
val builder = CommandInfo.newBuilder()
builder.setValue(getDriverCommandValue(desc))
builder.setEnvironment(getDriverEnvironment(desc))
builder.addAllUris(getDriverUris(desc).asJava)
builder.build()
}
private def generateCmdOption(desc: MesosDriverDescription, sandboxPath: String): Seq[String] = {
var options = Seq(
"--name", desc.conf.get("spark.app.name"),
"--master", s"mesos://${conf.get("spark.master")}",
"--driver-cores", desc.cores.toString,
"--driver-memory", s"${desc.mem}M")
// Assume empty main class means we're running python
if (!desc.command.mainClass.equals("")) {
options ++= Seq("--class", desc.command.mainClass)
}
desc.conf.getOption("spark.executor.memory").foreach { v =>
options ++= Seq("--executor-memory", v)
}
desc.conf.getOption("spark.cores.max").foreach { v =>
options ++= Seq("--total-executor-cores", v)
}
desc.conf.getOption("spark.submit.pyFiles").foreach { pyFiles =>
val formattedFiles = pyFiles.split(",")
.map { path => new File(sandboxPath, path.split("/").last).toString() }
.mkString(",")
options ++= Seq("--py-files", formattedFiles)
}
// --conf
val replicatedOptionsBlacklist = Set(
"spark.jars", // Avoids duplicate classes in classpath
"spark.submit.deployMode", // this would be set to `cluster`, but we need client
"spark.master" // this contains the address of the dispatcher, not master
)
val defaultConf = conf.getAllWithPrefix("spark.mesos.dispatcher.driverDefault.").toMap
val driverConf = desc.conf.getAll
.filter { case (key, _) => !replicatedOptionsBlacklist.contains(key) }
.toMap
(defaultConf ++ driverConf).foreach { case (key, value) =>
options ++= Seq("--conf", s"${key}=${value}") }
options.map(shellEscape)
}
/**
* Escape args for Unix-like shells, unless already quoted by the user.
* Based on: http://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
* and http://www.grymoire.com/Unix/Quote.html
*
* @param value argument
* @return escaped argument
*/
private[scheduler] def shellEscape(value: String): String = {
val WrappedInQuotes = """^(".+"|'.+')$""".r
val ShellSpecialChars = (""".*([ '<>&|\\?\\*;!#\\\\(\\)"$`]).*""").r
value match {
case WrappedInQuotes(c) => value // The user quoted his args, don't touch it!
case ShellSpecialChars(c) => "\\"" + value.replaceAll("""(["`\\$\\\\])""", """\\\\$1""") + "\\""
case _: String => value // Don't touch harmless strings
}
}
private class ResourceOffer(
val offer: Offer,
var remainingResources: JList[Resource],
var attributes: JList[Attribute]) {
override def toString(): String = {
s"Offer id: ${offer.getId}, resources: ${remainingResources}, attributes: ${attributes}"
}
}
private def createTaskInfo(desc: MesosDriverDescription, offer: ResourceOffer): TaskInfo = {
val taskId = TaskID.newBuilder().setValue(getDriverTaskId(desc)).build()
val (remainingResources, cpuResourcesToUse) =
partitionResources(offer.remainingResources, "cpus", desc.cores)
val (finalResources, memResourcesToUse) =
partitionResources(remainingResources.asJava, "mem", desc.mem)
offer.remainingResources = finalResources.asJava
val appName = desc.conf.get("spark.app.name")
val driverLabels = MesosProtoUtils.mesosLabels(desc.conf.get(config.DRIVER_LABELS)
.getOrElse(""))
TaskInfo.newBuilder()
.setTaskId(taskId)
.setName(s"Driver for ${appName}")
.setSlaveId(offer.offer.getSlaveId)
.setCommand(buildDriverCommand(desc))
.setContainer(getContainerInfo(desc))
.addAllResources(cpuResourcesToUse.asJava)
.addAllResources(memResourcesToUse.asJava)
.setLabels(driverLabels)
.build
}
/**
* This method takes all the possible candidates and attempt to schedule them with Mesos offers.
* Every time a new task is scheduled, the afterLaunchCallback is called to perform post scheduled
* logic on each task.
*/
private def scheduleTasks(
candidates: Seq[MesosDriverDescription],
afterLaunchCallback: (String) => Boolean,
currentOffers: List[ResourceOffer],
tasks: mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]): Unit = {
for (submission <- candidates) {
val driverCpu = submission.cores
val driverMem = submission.mem
val driverConstraints =
parseConstraintString(submission.conf.get(config.DRIVER_CONSTRAINTS))
logTrace(s"Finding offer to launch driver with cpu: $driverCpu, mem: $driverMem, " +
s"driverConstraints: $driverConstraints")
val offerOption = currentOffers.find { offer =>
getResource(offer.remainingResources, "cpus") >= driverCpu &&
getResource(offer.remainingResources, "mem") >= driverMem &&
matchesAttributeRequirements(driverConstraints, toAttributeMap(offer.attributes))
}
if (offerOption.isEmpty) {
logDebug(s"Unable to find offer to launch driver id: ${submission.submissionId}, " +
s"cpu: $driverCpu, mem: $driverMem")
} else {
val offer = offerOption.get
val queuedTasks = tasks.getOrElseUpdate(offer.offer.getId, new ArrayBuffer[TaskInfo])
try {
val task = createTaskInfo(submission, offer)
queuedTasks += task
logTrace(s"Using offer ${offer.offer.getId.getValue} to launch driver " +
submission.submissionId + s" with taskId: ${task.getTaskId.toString}")
val newState = new MesosClusterSubmissionState(
submission,
task.getTaskId,
offer.offer.getSlaveId,
None,
new Date(),
None,
getDriverFrameworkID(submission))
launchedDrivers(submission.submissionId) = newState
launchedDriversState.persist(submission.submissionId, newState)
afterLaunchCallback(submission.submissionId)
} catch {
case e: SparkException =>
afterLaunchCallback(submission.submissionId)
finishedDrivers += new MesosClusterSubmissionState(
submission,
TaskID.newBuilder().setValue(submission.submissionId).build(),
SlaveID.newBuilder().setValue("").build(),
None,
null,
None,
getDriverFrameworkID(submission))
logError(s"Failed to launch the driver with id: ${submission.submissionId}, " +
s"cpu: $driverCpu, mem: $driverMem, reason: ${e.getMessage}")
}
}
}
}
override def resourceOffers(driver: SchedulerDriver, offers: JList[Offer]): Unit = {
logTrace(s"Received offers from Mesos: \\n${offers.asScala.mkString("\\n")}")
val tasks = new mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]()
val currentTime = new Date()
val currentOffers = offers.asScala.map {
offer => new ResourceOffer(offer, offer.getResourcesList, offer.getAttributesList)
}.toList
stateLock.synchronized {
// We first schedule all the supervised drivers that are ready to retry.
// This list will be empty if none of the drivers are marked as supervise.
val driversToRetry = pendingRetryDrivers.filter { d =>
d.retryState.get.nextRetry.before(currentTime)
}
scheduleTasks(
copyBuffer(driversToRetry),
removeFromPendingRetryDrivers,
currentOffers,
tasks)
// Then we walk through the queued drivers and try to schedule them.
scheduleTasks(
copyBuffer(queuedDrivers),
removeFromQueuedDrivers,
currentOffers,
tasks)
}
tasks.foreach { case (offerId, taskInfos) =>
driver.launchTasks(Collections.singleton(offerId), taskInfos.asJava)
}
for (offer <- currentOffers if !tasks.contains(offer.offer.getId)) {
declineOffer(driver, offer.offer, None, Some(getRejectOfferDuration(conf)))
}
}
private def copyBuffer(
buffer: ArrayBuffer[MesosDriverDescription]): ArrayBuffer[MesosDriverDescription] = {
val newBuffer = new ArrayBuffer[MesosDriverDescription](buffer.size)
buffer.copyToBuffer(newBuffer)
newBuffer
}
def getSchedulerState(): MesosClusterSchedulerState = {
stateLock.synchronized {
new MesosClusterSchedulerState(
frameworkId,
masterInfo.map(m => s"http://${m.getIp}:${m.getPort}"),
copyBuffer(queuedDrivers),
launchedDrivers.values.map(_.copy()).toList,
finishedDrivers.map(_.copy()).toList,
copyBuffer(pendingRetryDrivers))
}
}
override def offerRescinded(driver: SchedulerDriver, offerId: OfferID): Unit = {}
override def disconnected(driver: SchedulerDriver): Unit = {}
override def reregistered(driver: SchedulerDriver, masterInfo: MasterInfo): Unit = {
logInfo(s"Framework re-registered with master ${masterInfo.getId}")
}
override def slaveLost(driver: SchedulerDriver, slaveId: SlaveID): Unit = {}
override def error(driver: SchedulerDriver, error: String): Unit = {
logError("Error received: " + error)
markErr()
}
/**
* Check if the task state is a recoverable state that we can relaunch the task.
* Task state like TASK_ERROR are not relaunchable state since it wasn't able
* to be validated by Mesos.
*/
private def shouldRelaunch(state: MesosTaskState): Boolean = {
state == MesosTaskState.TASK_FAILED ||
state == MesosTaskState.TASK_LOST
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
val taskId = status.getTaskId.getValue
logInfo(s"Received status update: taskId=${taskId}" +
s" state=${status.getState}" +
s" message=${status.getMessage}" +
s" reason=${status.getReason}")
stateLock.synchronized {
val subId = getSubmissionIdFromTaskId(taskId)
if (launchedDrivers.contains(subId)) {
if (status.getReason == Reason.REASON_RECONCILIATION &&
!pendingRecover.contains(taskId)) {
// Task has already received update and no longer requires reconciliation.
return
}
val state = launchedDrivers(subId)
// Check if the driver is supervise enabled and can be relaunched.
if (state.driverDescription.supervise && shouldRelaunch(status.getState)) {
removeFromLaunchedDrivers(subId)
state.finishDate = Some(new Date())
val retryState: Option[MesosClusterRetryState] = state.driverDescription.retryState
val (retries, waitTimeSec) = retryState
.map { rs => (rs.retries + 1, Math.min(maxRetryWaitTime, rs.waitTime * 2)) }
.getOrElse{ (1, 1) }
val nextRetry = new Date(new Date().getTime + waitTimeSec * 1000L)
val newDriverDescription = state.driverDescription.copy(
retryState = Some(new MesosClusterRetryState(status, retries, nextRetry, waitTimeSec)))
addDriverToPending(newDriverDescription, newDriverDescription.submissionId)
} else if (TaskState.isFinished(mesosToTaskState(status.getState))) {
retireDriver(subId, state)
}
state.mesosTaskStatus = Option(status)
} else {
logError(s"Unable to find driver with $taskId in status update")
}
}
}
private def retireDriver(
submissionId: String,
state: MesosClusterSubmissionState) = {
removeFromLaunchedDrivers(submissionId)
state.finishDate = Some(new Date())
if (finishedDrivers.size >= retainedDrivers) {
val toRemove = math.max(retainedDrivers / 10, 1)
finishedDrivers.trimStart(toRemove)
}
finishedDrivers += state
}
override def frameworkMessage(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
message: Array[Byte]): Unit = {}
override def executorLost(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
status: Int): Unit = {}
private def removeFromQueuedDrivers(subId: String): Boolean = {
val index = queuedDrivers.indexWhere(_.submissionId.equals(subId))
if (index != -1) {
queuedDrivers.remove(index)
queuedDriversState.expunge(subId)
true
} else {
false
}
}
private def removeFromLaunchedDrivers(subId: String): Boolean = {
if (launchedDrivers.remove(subId).isDefined) {
launchedDriversState.expunge(subId)
true
} else {
false
}
}
private def removeFromPendingRetryDrivers(subId: String): Boolean = {
val index = pendingRetryDrivers.indexWhere(_.submissionId.equals(subId))
if (index != -1) {
pendingRetryDrivers.remove(index)
pendingRetryDriversState.expunge(subId)
true
} else {
false
}
}
def getQueuedDriversSize: Int = queuedDrivers.size
def getLaunchedDriversSize: Int = launchedDrivers.size
def getPendingRetryDriversSize: Int = pendingRetryDrivers.size
private def addDriverToQueue(desc: MesosDriverDescription): Unit = {
queuedDriversState.persist(desc.submissionId, desc)
queuedDrivers += desc
revive()
}
private def addDriverToPending(desc: MesosDriverDescription, subId: String) = {
pendingRetryDriversState.persist(subId, desc)
pendingRetryDrivers += desc
revive()
}
private def revive(): Unit = {
logInfo("Reviving Offers.")
schedulerDriver.reviveOffers()
}
}
| szhem/spark | resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala | Scala | apache-2.0 | 33,977 |
package foo
final val foo="Foo"
object A:
/**
* @param i An argument
*/
def x(i:Int)=3
| sbt/sbt | sbt-app/src/sbt-test/actions/doc-scala3/rc1/src/main/scala/foo/A.scala | Scala | apache-2.0 | 100 |
package fpinscala.state
import scala.annotation.tailrec
trait RNG {
def nextInt: (Int, RNG) // Should generate a random `Int`. We'll later define other functions in terms of `nextInt`.
}
object RNG {
// NB - this was called SimpleRNG in the book text
case class Simple(seed: Long) extends RNG {
def nextInt: (Int, RNG) = {
val newSeed = (seed * 0x5DEECE66DL + 0xBL) & 0xFFFFFFFFFFFFL // `&` is bitwise AND. We use the current seed to generate a new seed.
val nextRNG = Simple(newSeed) // The next state, which is an `RNG` instance created from the new seed.
val n = (newSeed >>> 16).toInt // `>>>` is right binary shift with zero fill. The value `n` is our new pseudo-random integer.
(n, nextRNG) // The return value is a tuple containing both a pseudo-random integer and the next `RNG` state.
}
}
type Rand[+A] = RNG => (A, RNG)
val int: Rand[Int] = _.nextInt
def unit[A](a: A): Rand[A] =
rng => (a, rng)
def map[A,B](s: Rand[A])(f: A => B): Rand[B] =
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
def nonNegativeInt(rng: RNG): (Int, RNG) = {
val (n1, rng1) = rng.nextInt
(n1 & Int.MaxValue, rng1)
}
def double(rng: RNG): (Double, RNG) = {
val (n1, rng1) = nonNegativeInt(rng)
(n1 / (Int.MaxValue.toDouble+1), rng1)
}
def boolean(rng: RNG): (Boolean, RNG) = {
val (n, rng1) = rng.nextInt
(n % 2 == 0, rng1)
}
def intDouble(rng: RNG): ((Int,Double), RNG) = {
val (i, rng1) = rng.nextInt
val (d, rng2) = double(rng1)
((i, d), rng2)
}
def doubleInt(rng: RNG): ((Double,Int), RNG) = {
val (d, rng1) = double(rng)
val (i, rng2) = rng1.nextInt
((d, i), rng2)
}
def double3(rng: RNG): ((Double,Double,Double), RNG) = {
val (d1, rng1) = double(rng)
val (d2, rng2) = double(rng1)
val (d3, rng3) = double(rng2)
((d1, d2, d3), rng3)
}
def ints(count: Int)(rng: RNG): (List[Int], RNG) = {
@tailrec
def loop(n: Int, acc: List[Int], rng: RNG): (List[Int], RNG) = {
if (n == 0) (acc, rng)
else {
val (n1, rng1) = rng.nextInt
loop(n-1, n1 :: acc, rng1)
}
}
loop(count, Nil, rng)
}
def doubleMap: Rand[Double] =
map(nonNegativeInt)(_ / (Int.MaxValue.toDouble + 1))
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C] = {
rng => {
val (a, rng1) = ra(rng)
val (b, rng2) = rb(rng1)
(f(a, b), rng2)
}
}
def sequence[A](fs: List[Rand[A]]): Rand[List[A]] =
fs.foldRight(unit(List[A]()))((rng, acc) => map2(rng, acc)(_ :: _))
def intsSeq(count: Int)(rng: RNG): Rand[List[Int]] =
sequence(List.fill(count)(rng => rng.nextInt)) // or just use 'int'
def flatMap[A,B](f: Rand[A])(g: A => Rand[B]): Rand[B] =
rng => {
val (a, rng1) = f(rng)
g(a)(rng1)
}
def nonNegativeLessThan(n: Int): Rand[Int] =
flatMap(nonNegativeInt)(i => {rng => {
val mod = i % n
if (i + (n - 1) - mod >= 0)
(mod, rng)
else nonNegativeLessThan(n)(rng)
}})
def flatMapMap[A,B](s: Rand[A])(f: A => B): Rand[B] =
flatMap(s)(x => unit(f(x)))
def flatMapMap2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C] = {
flatMap(ra)((a: A) => flatMapMap(rb)((b: B) => f(a, b)))
}
}
case class State[S,+A](run: S => (A, S)) {
def map[B](f: A => B): State[S, B] = flatMap(a => State.unit(f(a)))
def map2[B,C](sb: State[S, B])(f: (A, B) => C): State[S, C] =
flatMap((a: A) => sb.map((b: B) => f(a, b)))
def flatMap[B](f: A => State[S, B]): State[S, B] =
State(s => {
val (a, s1) = run(s)
f(a).run(s1)
})
}
sealed trait Input
case object Coin extends Input
case object Turn extends Input
case class Machine(locked: Boolean, candies: Int, coins: Int)
object State {
type Rand[A] = State[RNG, A]
def simulateMachine(inputs: List[Input]): State[Machine, (Int, Int)] = ???
def unit[S, A](a: A): State[S, A] = State(s => (a, s))
def sequence[S,A](sas: List[State[S, A]]): State[S, List[A]] =
sas.foldRight(unit[S, List[A]](List()))((s, a) => s.map2(a)(_ :: _))
}
| conor10/fpinscala | exercises/src/main/scala/fpinscala/state/State.scala | Scala | mit | 4,122 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class CeilSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val ceil = Ceil[Float, Float]().setName("ceil")
val input = Tensor[Float](2, 2).apply1(_ => Random.nextFloat())
runSerializationTest(ceil, input)
}
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/CeilSpec.scala | Scala | apache-2.0 | 1,044 |
/*
* Copyright 2015 Databricks
* Copyright 2015 TouchType Ltd. (Added JDBC-based Data Source API implementation)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark
import org.apache.spark.sql.functions._
import org.apache.spark.sql.jdbc.DefaultJDBCWrapper
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
package object redshift {
/**
* Wrapper of SQLContext that provide `redshiftFile` method.
*/
implicit class RedshiftContext(sqlContext: SQLContext) {
/**
* Read a file unloaded from Redshift into a DataFrame.
* @param path input path
* @return a DataFrame with all string columns
*/
def redshiftFile(path: String, columns: Seq[String]): DataFrame = {
val sc = sqlContext.sparkContext
val rdd = sc.newAPIHadoopFile(path, classOf[RedshiftInputFormat],
classOf[java.lang.Long], classOf[Array[String]], sc.hadoopConfiguration)
// TODO: allow setting NULL string.
val nullable = rdd.values.map(_.map(f => if (f.isEmpty) null else f)).map(x => Row(x: _*))
val schema = StructType(columns.map(c => StructField(c, StringType, nullable = true)))
sqlContext.createDataFrame(nullable, schema)
}
/**
* Reads a table unload from Redshift with its schema in format "name0 type0 name1 type1 ...".
*/
def redshiftFile(path: String, schema: String): DataFrame = {
val structType = SchemaParser.parseSchema(schema)
val casts = structType.fields.map { field =>
col(field.name).cast(field.dataType).as(field.name)
}
redshiftFile(path, structType.fieldNames).select(casts: _*)
}
/**
* Read a Redshift table into a DataFrame, using S3 for data transfer and JDBC
* to control Redshift and resolve the schema
*/
def redshiftTable(parameters: Map[String, String]) = {
val params = Parameters.mergeParameters(parameters)
sqlContext.baseRelationToDataFrame(RedshiftRelation(DefaultJDBCWrapper, params, None)(sqlContext))
}
}
/**
* Add write functionality to DataFrame
*/
implicit class RedshiftDataFrame(dataFrame: DataFrame) {
/**
* Load the DataFrame into a Redshift database table
*/
def saveAsRedshiftTable(parameters: Map[String, String]): Unit = {
val params = Parameters.mergeParameters(parameters)
DefaultRedshiftWriter.saveToRedshift(dataFrame.sqlContext, dataFrame, params)
}
}
}
| methodmill/spark-redshift | src/main/scala/com/databricks/spark/redshift/package.scala | Scala | apache-2.0 | 3,026 |
package infographic
import play.api.libs.json.JsValue
import play.api.libs.json.Writes
import charts.graphics.ProgressTable
import charts.Region
import play.api.libs.json._
trait ProgressJsonBuilder extends ChartJsonBuilder {
type IndicatorName = String
def apply(chartList: Seq[charts.Chart]): JsValue =
toJson(chartList.map(hdl(_)))
def hdl(chart: charts.Chart): Option[(Region, JsValue)]
def getPtdWrites(includeIndicators: IndicatorName*): Writes[ProgressTable.Dataset] = {
import scala.collection.JavaConversions._
new Writes[ProgressTable.Dataset] {
override def writes(dataset: ProgressTable.Dataset) = {
val row = dataset.rows.toSeq.head
val indicators =
dataset.columns.toList.zipWithIndex.flatMap { case (column, i) =>
val indicator = column.header.toLowerCase
val cell = row.cells.get(i)
Option(cell) // Check cell isn't null
// Null condition means we can skip
.filter(_.condition != null)
// Get matching indicator, if there is one
.flatMap { cell =>
includeIndicators.find(_ == indicator).map((cell, _))
}
// Use cell and indicator to produce data
.map { case (cell, indicatorKey) =>
val target = column.target
.replaceAll("Target: ", "")
.replaceAll(" per cent", "%")
.replaceAll("\n", " ")
Seq(indicatorKey -> JsObject(List(
"qualitative" -> toJs(cell.condition),
"quantitative" -> JsString(cell.progress),
"target" -> JsString(target)
)))
}
.getOrElse(Seq[(String, JsValue)]())
}
indicators.foldLeft(Json.obj()) { (o, v) =>
o ++ Json.obj( v._1 -> v._2 )
}
}
def toJs(condition: ProgressTable.Condition) = {
condition match {
case null => JsNull
case ProgressTable.Condition.UNDECIDED => JsNull
case _ => JsString(condition.toString())
}
}
}
}
} | uq-eresearch/aorra | app/infographic/ProgressJsonBuilder.scala | Scala | mit | 2,169 |
/*
The MIT License (MIT)
Copyright (c) 2016 Tom Needham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.thomas.needham.neurophidea.examples.scala
import java.io._
import java.util
import org.neuroph.core.NeuralNetwork
import org.neuroph.core.learning.{SupervisedTrainingElement, TrainingSet}
import org.neuroph.nnet.Kohonen
import org.neuroph.nnet.learning.BackPropagation
object TestKohonen {
var inputSize: Int = 8
var outputSize: Int = 1
var network: NeuralNetwork = _
var trainingSet: TrainingSet[SupervisedTrainingElement] = _
var testingSet: TrainingSet[SupervisedTrainingElement] = _
var layers: Array[Int] = Array(8, 8, 1)
def loadNetwork() {
network = NeuralNetwork.load("D:/GitHub/Neuroph-Intellij-Plugin/TestKohonen.nnet")
}
def trainNetwork() {
val list = new util.ArrayList[Integer]()
for (layer <- layers) {
list.add(layer)
}
network = new Kohonen(inputSize, outputSize);
trainingSet = new TrainingSet[SupervisedTrainingElement](inputSize, outputSize)
trainingSet = TrainingSet.createFromFile("D:/GitHub/NeuralNetworkTest/Classroom Occupation Data.csv", inputSize, outputSize, ",").asInstanceOf[TrainingSet[SupervisedTrainingElement]]
val learningRule = new BackPropagation();
network.setLearningRule(learningRule)
network.learn(trainingSet)
network.save("D:/GitHub/Neuroph-Intellij-Plugin/TestKohonen.nnet")
}
def testNetwork() {
var input = ""
val fromKeyboard = new BufferedReader(new InputStreamReader(System.in))
val testValues = new util.ArrayList[Double]()
var testValuesDouble: Array[Double] = null
do {
try {
println("Enter test values or \"\": ")
input = fromKeyboard.readLine()
if (input == "") {
//break
}
input = input.replace(" ", "")
val stringVals = input.split(",")
testValues.clear()
for (value <- stringVals) {
testValues.add(value.toDouble)
}
} catch {
case ioe: IOException => ioe.printStackTrace(System.err)
case nfe: NumberFormatException => nfe.printStackTrace(System.err)
}
testValuesDouble = Array.ofDim[Double](testValues.size)
for (t <- testValuesDouble.indices) {
testValuesDouble(t) = testValues.get(t).doubleValue()
}
network.setInput(testValuesDouble: _*)
network.calculate()
} while (input != "")
}
def testNetworkAuto(setPath: String) {
var total: Double = 0.0
val list = new util.ArrayList[Integer]()
val outputLine = new util.ArrayList[String]()
for (layer <- layers) {
list.add(layer)
}
testingSet = TrainingSet.createFromFile(setPath, inputSize, outputSize, ",").asInstanceOf[TrainingSet[SupervisedTrainingElement]]
val count = testingSet.elements().size
var averageDeviance = 0.0
var resultString = ""
try {
val file = new File("Results " + setPath)
val fw = new FileWriter(file)
val bw = new BufferedWriter(fw)
for (i <- 0 until testingSet.elements().size) {
var expected: Double = 0.0
var calculated: Double = 0.0
network.setInput(testingSet.elementAt(i).getInput: _*)
network.calculate()
calculated = network.getOutput()(0)
expected = testingSet.elementAt(i).getIdealArray()(0)
println("Calculated Output: " + calculated)
println("Expected Output: " + expected)
println("Deviance: " + (calculated - expected))
averageDeviance += math.abs(math.abs(calculated) - math.abs(expected))
total += network.getOutput()(0)
resultString = ""
for (cols <- testingSet.elementAt(i).getInputArray.indices) {
resultString += testingSet.elementAt(i).getInputArray()(cols) + ", "
}
for (t <- network.getOutput.indices) {
resultString += network.getOutput()(t) + ", "
}
resultString = resultString.substring(0, resultString.length - 2)
resultString += ""
bw.write(resultString)
bw.flush()
}
println()
println("Average: " + total / count)
println("Average Deviance % : " + (averageDeviance / count) * 100)
bw.flush()
bw.close()
} catch {
case ex: IOException => ex.printStackTrace()
}
}
}
| 06needhamt/Neuroph-Intellij-Plugin | neuroph-plugin/src/com/thomas/needham/neurophidea/examples/scala/TestKohonen.scala | Scala | mit | 5,294 |
package com.twitter.finagle.mysql
import com.twitter.finagle.benchmark.StdBenchAnnotations
import java.sql.Timestamp
import java.util.TimeZone
import org.openjdk.jmh.annotations.{Benchmark, Scope, State}
@State(Scope.Benchmark)
class TimestampValueBenchmark extends StdBenchAnnotations {
private[this] final val timeZone =
TimeZone.getTimeZone("UTC")
private def rawValue(dateString: String): RawValue =
RawValue(
Type.Timestamp,
MysqlCharset.Utf8_general_ci,
isBinary = false,
dateString.getBytes("UTF-8")
)
private[this] final val noNanos: RawValue =
rawValue("2018-04-18 16:30:05")
private[this] final val withNanos: RawValue =
rawValue("2018-04-18 16:30:05.123456789")
private[this] final val paddedNanos: RawValue =
rawValue("2018-04-18 16:30:05.1")
@Benchmark
def fromValueStringNoNanos(): Timestamp =
TimestampValue.fromValue(noNanos, timeZone).get
@Benchmark
def fromValueStringWithNanos(): Timestamp =
TimestampValue.fromValue(withNanos, timeZone).get
@Benchmark
def fromValueStringWithNanosPadded(): Timestamp =
TimestampValue.fromValue(paddedNanos, timeZone).get
}
| twitter/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/mysql/TimestampValueBenchmark.scala | Scala | apache-2.0 | 1,169 |
package mesosphere.marathon
package core.task.bus.impl
import mesosphere.marathon.core.instance.update.InstanceChange
import mesosphere.marathon.core.task.bus.TaskStatusEmitter
import org.slf4j.LoggerFactory
private[bus] class TaskStatusEmitterImpl(internalTaskStatusEventStream: InternalTaskChangeEventStream)
extends TaskStatusEmitter {
private[this] val log = LoggerFactory.getLogger(getClass)
override def publish(update: InstanceChange): Unit = {
log.debug("publishing update {}", update)
internalTaskStatusEventStream.publish(update)
}
}
| Caerostris/marathon | src/main/scala/mesosphere/marathon/core/task/bus/impl/TaskStatusEmitterImpl.scala | Scala | apache-2.0 | 565 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred
trait HadoopMapRedUtil {
def newJobContext(conf: JobConf, jobId: JobID): JobContext = new JobContext(conf, jobId)
def newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
def newTaskAttemptID(jtIdentifier: String, jobId: Int, isMap: Boolean, taskId: Int, attemptId: Int) = new TaskAttemptID(jtIdentifier,
jobId, isMap, taskId, attemptId)
}
| vax11780/spark | core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala | Scala | apache-2.0 | 1,260 |
package slick.backend
import java.util.concurrent.atomic.{AtomicReferenceArray, AtomicBoolean, AtomicLong}
import com.typesafe.config.Config
import scala.collection.mutable.ArrayBuffer
import scala.language.existentials
import scala.concurrent.{Promise, ExecutionContext, Future}
import scala.util.{Try, Success, Failure}
import scala.util.control.NonFatal
import java.io.Closeable
import org.slf4j.LoggerFactory
import org.reactivestreams._
import slick.SlickException
import slick.dbio._
import slick.util._
/** Backend for the basic database and session handling features.
* Concrete backends like `JdbcBackend` extend this type and provide concrete
* types for `Database`, `DatabaseFactory` and `Session`. */
trait DatabaseComponent { self =>
protected lazy val actionLogger = new SlickLogger(LoggerFactory.getLogger(classOf[DatabaseComponent].getName+".action"))
protected lazy val streamLogger = new SlickLogger(LoggerFactory.getLogger(classOf[DatabaseComponent].getName+".stream"))
type This >: this.type <: DatabaseComponent
/** The type of database objects used by this backend. */
type Database <: DatabaseDef
/** The type of the database factory used by this backend. */
type DatabaseFactory
/** The type of session objects used by this backend. */
type Session >: Null <: SessionDef
/** The type of the context used for running SynchronousDatabaseActions */
type Context >: Null <: BasicActionContext
/** The type of the context used for streaming SynchronousDatabaseActions */
type StreamingContext >: Null <: Context with BasicStreamingActionContext
/** The database factory */
val Database: DatabaseFactory
/** Create a Database instance through [[https://github.com/typesafehub/config Typesafe Config]].
* The supported config keys are backend-specific. This method is used by `DatabaseConfig`.
* @param path The path in the configuration file for the database configuration, or an empty
* string for the top level of the `Config` object.
* @param config The `Config` object to read from.
*/
def createDatabase(config: Config, path: String): Database
/** A database instance to which connections can be created. */
trait DatabaseDef extends Closeable { this: Database =>
/** Create a new session. The session needs to be closed explicitly by calling its close() method. */
def createSession(): Session
/** Free all resources allocated by Slick for this Database. This is done asynchronously, so
* you need to wait for the returned `Future` to complete in order to ensure that everything
* has been shut down. */
def shutdown: Future[Unit] = Future(close)(ExecutionContext.fromExecutor(AsyncExecutor.shutdownExecutor))
/** Free all resources allocated by Slick for this Database, blocking the current thread until
* everything has been shut down.
*
* Backend implementations which are based on a naturally blocking shutdown procedure can
* simply implement this method and get `shutdown` as an asynchronous wrapper for free. If
* the underlying shutdown procedure is asynchronous, you should implement `shutdown` instead
* and wrap it with `Await.result` in this method. */
def close: Unit
/** Run an Action asynchronously and return the result as a Future. */
final def run[R](a: DBIOAction[R, NoStream, Nothing]): Future[R] = runInternal(a, false)
private[slick] final def runInternal[R](a: DBIOAction[R, NoStream, Nothing], useSameThread: Boolean): Future[R] =
try runInContext(a, createDatabaseActionContext(useSameThread), false, true)
catch { case NonFatal(ex) => Future.failed(ex) }
/** Create a `Publisher` for Reactive Streams which, when subscribed to, will run the specified
* `DBIOAction` and return the result directly as a stream without buffering everything first.
* This method is only supported for streaming actions.
*
* The Publisher itself is just a stub that holds a reference to the action and this Database.
* The action does not actually start to run until the call to `onSubscribe` returns, after
* which the Subscriber is responsible for reading the full response or cancelling the
* Subscription. The created Publisher can be reused to serve a multiple Subscribers,
* each time triggering a new execution of the action.
*
* For the purpose of combinators such as `cleanup` which can run after a stream has been
* produced, cancellation of a stream by the Subscriber is not considered an error. For
* example, there is no way for the Subscriber to cause a rollback when streaming the
* results of `someQuery.result.transactionally`.
*
* When using a JDBC back-end, all `onNext` calls are done synchronously and the ResultSet row
* is not advanced before `onNext` returns. This allows the Subscriber to access LOB pointers
* from within `onNext`. If streaming is interrupted due to back-pressure signaling, the next
* row will be prefetched (in order to buffer the next result page from the server when a page
* boundary has been reached). */
final def stream[T](a: DBIOAction[_, Streaming[T], Nothing]): DatabasePublisher[T] = streamInternal(a, false)
private[slick] final def streamInternal[T](a: DBIOAction[_, Streaming[T], Nothing], useSameThread: Boolean): DatabasePublisher[T] =
createPublisher(a, s => createStreamingDatabaseActionContext(s, useSameThread))
/** Create a Reactive Streams `Publisher` using the given context factory. */
protected[this] def createPublisher[T](a: DBIOAction[_, Streaming[T], Nothing], createCtx: Subscriber[_ >: T] => StreamingContext): DatabasePublisher[T] = new DatabasePublisher[T] {
def subscribe(s: Subscriber[_ >: T]) = {
if(s eq null) throw new NullPointerException("Subscriber is null")
val ctx = createCtx(s)
if(streamLogger.isDebugEnabled) streamLogger.debug(s"Signaling onSubscribe($ctx)")
val subscribed = try { s.onSubscribe(ctx.subscription); true } catch {
case NonFatal(ex) =>
streamLogger.warn("Subscriber.onSubscribe failed unexpectedly", ex)
false
}
if(subscribed) {
try {
runInContext(a, ctx, true, true).onComplete {
case Success(_) => ctx.tryOnComplete
case Failure(t) => ctx.tryOnError(t)
}(DBIO.sameThreadExecutionContext)
} catch { case NonFatal(ex) => ctx.tryOnError(ex) }
}
}
}
/** Create the default DatabaseActionContext for this backend. */
protected[this] def createDatabaseActionContext[T](_useSameThread: Boolean): Context
/** Create the default StreamingDatabaseActionContext for this backend. */
protected[this] def createStreamingDatabaseActionContext[T](s: Subscriber[_ >: T], useSameThread: Boolean): StreamingContext
/** Run an Action in an existing DatabaseActionContext. This method can be overridden in
* subclasses to support new DatabaseActions which cannot be expressed through
* SynchronousDatabaseAction.
*
* @param streaming Whether to return the result as a stream. In this case, the context must
* be a `StreamingDatabaseActionContext` and the Future result should be
* completed with `null` or failed after streaming has finished. This
* method should not call any `Subscriber` method other than `onNext`. */
protected[this] def runInContext[R](a: DBIOAction[R, NoStream, Nothing], ctx: Context, streaming: Boolean, topLevel: Boolean): Future[R] = {
logAction(a, ctx)
a match {
case SuccessAction(v) => Future.successful(v)
case FailureAction(t) => Future.failed(t)
case FutureAction(f) => f
case FlatMapAction(base, f, ec) =>
runInContext(base, ctx, false, topLevel).flatMap(v => runInContext(f(v), ctx, streaming, false))(ctx.getEC(ec))
case AndThenAction(actions) =>
val last = actions.length - 1
def run(pos: Int, v: Any): Future[Any] = {
val f1 = runInContext(actions(pos), ctx, streaming && pos == last, pos == 0)
if(pos == last) f1
else f1.flatMap(run(pos + 1, _))(DBIO.sameThreadExecutionContext)
}
run(0, null).asInstanceOf[Future[R]]
case sa @ SequenceAction(actions) =>
val len = actions.length
val results = new AtomicReferenceArray[Any](len)
def run(pos: Int): Future[Any] = {
if(pos == len) Future.successful {
val b = sa.cbf()
var i = 0
while(i < len) {
b += results.get(i)
i += 1
}
b.result()
}
else runInContext(actions(pos), ctx, false, pos == 0).flatMap { (v: Any) =>
results.set(pos, v)
run(pos + 1)
} (DBIO.sameThreadExecutionContext)
}
run(0).asInstanceOf[Future[R]]
case CleanUpAction(base, f, keepFailure, ec) =>
val p = Promise[R]()
runInContext(base, ctx, streaming, topLevel).onComplete { t1 =>
try {
val a2 = f(t1 match {
case Success(_) => None
case Failure(t) => Some(t)
})
runInContext(a2, ctx, false, false).onComplete { t2 =>
if(t2.isFailure && (t1.isSuccess || !keepFailure)) p.complete(t2.asInstanceOf[Failure[R]])
else p.complete(t1)
} (DBIO.sameThreadExecutionContext)
} catch {
case NonFatal(ex) =>
throw (t1 match {
case Failure(t) if keepFailure => t
case _ => ex
})
}
} (ctx.getEC(ec))
p.future
case FailedAction(a) =>
runInContext(a, ctx, false, topLevel).failed.asInstanceOf[Future[R]]
case AsTryAction(a) =>
val p = Promise[R]()
runInContext(a, ctx, false, topLevel).onComplete(v => p.success(v.asInstanceOf[R]))(DBIO.sameThreadExecutionContext)
p.future
case NamedAction(a, _) =>
runInContext(a, ctx, streaming, topLevel)
case a: SynchronousDatabaseAction[_, _, _, _] =>
if(streaming) {
if(a.supportsStreaming) streamSynchronousDatabaseAction(a.asInstanceOf[SynchronousDatabaseAction[_, _ <: NoStream, This, _ <: Effect]], ctx.asInstanceOf[StreamingContext], !topLevel).asInstanceOf[Future[R]]
else runInContext(CleanUpAction(AndThenAction(Vector(DBIO.Pin, a.nonFusedEquivalentAction)), _ => DBIO.Unpin, true, DBIO.sameThreadExecutionContext), ctx, streaming, topLevel)
} else runSynchronousDatabaseAction(a.asInstanceOf[SynchronousDatabaseAction[R, NoStream, This, _]], ctx, !topLevel)
case a: DatabaseAction[_, _, _] =>
throw new SlickException(s"Unsupported database action $a for $this")
}
}
/** Within a synchronous execution, ensure that a Session is available. */
protected[this] final def acquireSession(ctx: Context): Unit =
if(!ctx.isPinned) ctx.currentSession = createSession()
/** Within a synchronous execution, close the current Session unless it is pinned.
*
* @param discardErrors If set to true, swallow all non-fatal errors that arise while
* closing the Session. */
protected[this] final def releaseSession(ctx: Context, discardErrors: Boolean): Unit =
if(!ctx.isPinned) {
try ctx.currentSession.close() catch { case NonFatal(ex) if(discardErrors) => }
ctx.currentSession = null
}
/** Run a `SynchronousDatabaseAction` on this database. */
protected[this] def runSynchronousDatabaseAction[R](a: SynchronousDatabaseAction[R, NoStream, This, _], ctx: Context, highPrio: Boolean): Future[R] = {
val promise = Promise[R]()
ctx.getEC(synchronousExecutionContext).prepare.execute(new AsyncExecutor.PrioritizedRunnable {
def highPriority = highPrio
def run: Unit =
try {
ctx.sync
val res = try {
acquireSession(ctx)
val res = try a.run(ctx) catch { case NonFatal(ex) =>
releaseSession(ctx, true)
throw ex
}
releaseSession(ctx, false)
res
} finally { ctx.sync = 0 }
promise.success(res)
} catch { case NonFatal(ex) => promise.tryFailure(ex) }
})
promise.future
}
/** Stream a `SynchronousDatabaseAction` on this database. */
protected[this] def streamSynchronousDatabaseAction(a: SynchronousDatabaseAction[_, _ <: NoStream, This, _ <: Effect], ctx: StreamingContext, highPrio: Boolean): Future[Null] = {
ctx.streamingAction = a
scheduleSynchronousStreaming(a, ctx, highPrio)(null)
ctx.streamingResultPromise.future
}
/** Stream a part of the results of a `SynchronousDatabaseAction` on this database. */
protected[DatabaseComponent] def scheduleSynchronousStreaming(a: SynchronousDatabaseAction[_, _ <: NoStream, This, _ <: Effect], ctx: StreamingContext, highPrio: Boolean)(initialState: a.StreamState): Unit = try {
ctx.getEC(synchronousExecutionContext).prepare.execute(new AsyncExecutor.PrioritizedRunnable {
private[this] def str(l: Long) = if(l != Long.MaxValue) l else if(GlobalConfig.unicodeDump) "\u221E" else "oo"
def highPriority = highPrio
def run: Unit = try {
val debug = streamLogger.isDebugEnabled
var state = initialState
ctx.sync
if(state eq null) acquireSession(ctx)
var demand = ctx.demandBatch
var realDemand = if(demand < 0) demand - Long.MinValue else demand
do {
try {
if(debug)
streamLogger.debug((if(state eq null) "Starting initial" else "Restarting ") + " streaming action, realDemand = " + str(realDemand))
if(ctx.cancelled) {
if(ctx.deferredError ne null) throw ctx.deferredError
if(state ne null) { // streaming cancelled before finishing
val oldState = state
state = null
a.cancelStream(ctx, oldState)
}
} else if((realDemand > 0 || (state eq null))) {
val oldState = state
state = null
state = a.emitStream(ctx, realDemand, oldState)
}
if(state eq null) { // streaming finished and cleaned up
releaseSession(ctx, true)
ctx.streamingResultPromise.trySuccess(null)
}
} catch { case NonFatal(ex) =>
if(state ne null) try a.cancelStream(ctx, state) catch ignoreFollowOnError
releaseSession(ctx, true)
throw ex
} finally {
ctx.streamState = state
ctx.sync = 0
}
if(debug) {
if(state eq null) streamLogger.debug(s"Sent up to ${str(realDemand)} elements - Stream " + (if(ctx.cancelled) "cancelled" else "completely delivered"))
else streamLogger.debug(s"Sent ${str(realDemand)} elements, more available - Performing atomic state transition")
}
demand = ctx.delivered(demand)
realDemand = if(demand < 0) demand - Long.MinValue else demand
} while ((state ne null) && realDemand > 0)
if(debug) {
if(state ne null) streamLogger.debug("Suspending streaming action with continuation (more data available)")
else streamLogger.debug("Finished streaming action")
}
} catch { case NonFatal(ex) => ctx.streamingResultPromise.tryFailure(ex) }
})
} catch { case NonFatal(ex) =>
streamLogger.warn("Error scheduling synchronous streaming", ex)
throw ex
}
/** Return the default ExecutionContet for this Database which should be used for running
* SynchronousDatabaseActions for asynchronous execution. */
protected[this] def synchronousExecutionContext: ExecutionContext
protected[this] def logAction(a: DBIOAction[_, NoStream, Nothing], ctx: Context): Unit = {
if(actionLogger.isDebugEnabled && a.isLogged) {
ctx.sequenceCounter += 1
val logA = a.nonFusedEquivalentAction
val aPrefix = if(a eq logA) "" else "[fused] "
val dump = new TreePrinter(prefix = " ", firstPrefix = aPrefix, narrow = {
case a: DBIOAction[_, _, _] => a.nonFusedEquivalentAction
case o => o
}).get(logA)
val msg = DumpInfo.highlight("#" + ctx.sequenceCounter) + ": " + dump.substring(0, dump.length-1)
actionLogger.debug(msg)
}
}
}
/** A logical session of a `Database`. The underlying database connection is created lazily on demand. */
trait SessionDef extends Closeable {
/** Close this Session. */
def close(): Unit
/** Force an actual database session to be opened. Slick sessions are lazy, so you do not
* get a real database connection until you need it or you call force() on the session. */
def force(): Unit
}
/** The context object passed to database actions by the execution engine. */
trait BasicActionContext extends ActionContext {
/** Whether to run all operations on the current thread or schedule them normally on the
* appropriate ExecutionContext. This is used by the blocking API. */
protected[DatabaseComponent] val useSameThread: Boolean
/** Return the specified ExecutionContext unless running in same-thread mode, in which case
* `Action.sameThreadExecutionContext` is returned instead. */
private[DatabaseComponent] def getEC(ec: ExecutionContext): ExecutionContext =
if(useSameThread) DBIO.sameThreadExecutionContext else ec
/** A volatile variable to enforce the happens-before relationship (see
* [[https://docs.oracle.com/javase/specs/jls/se7/html/jls-17.html]] and
* [[http://gee.cs.oswego.edu/dl/jmm/cookbook.html]]) when executing something in
* a synchronous action context. It is read when entering the context and written when leaving
* so that all writes to non-volatile variables within the context are visible to the next
* synchronous execution. */
@volatile private[DatabaseComponent] var sync = 0
private[DatabaseComponent] var currentSession: Session = null
/** Used for the sequence counter in Action debug output. This variable is volatile because it
* is only updated sequentially but not protected by a synchronous action context. */
@volatile private[DatabaseComponent] var sequenceCounter = 0
def session: Session = currentSession
}
/** A special DatabaseActionContext for streaming execution. */
protected[this] class BasicStreamingActionContext(subscriber: Subscriber[_], protected[DatabaseComponent] val useSameThread: Boolean, database: Database) extends BasicActionContext with StreamingActionContext with Subscription {
/** Whether the Subscriber has been signaled with `onComplete` or `onError`. */
private[this] var finished = false
/** The total number of elements requested and not yet marked as delivered by the synchronous
* streaming action. Whenever this value drops to 0, streaming is suspended. When it is raised
* up from 0 in `request`, streaming is scheduled to be restarted. It is initially set to
* `Long.MinValue` when streaming starts. Any negative value above `Long.MinValue` indicates
* the actual demand at that point. It is reset to 0 when the initial streaming ends. */
private[this] val remaining = new AtomicLong(Long.MinValue)
/** An error that will be signaled to the Subscriber when the stream is cancelled or
* terminated. This is used for signaling demand overflow in `request()` while guaranteeing
* that the `onError` message does not overlap with an active `onNext` call. */
private[DatabaseComponent] var deferredError: Throwable = null
/** The state for a suspended streaming action. Must only be set from a synchronous action
* context. */
private[DatabaseComponent] var streamState: AnyRef = null
/** The streaming action which may need to be continued with the suspended state */
private[DatabaseComponent] var streamingAction: SynchronousDatabaseAction[_, _ <: NoStream, This, _ <: Effect] = null
@volatile private[this] var cancelRequested = false
/** The Promise to complete when streaming has finished. */
val streamingResultPromise = Promise[Null]()
/** Indicate that the specified number of elements has been delivered. Returns the remaining
* demand. This is an atomic operation. It must only be called from the synchronous action
* context which performs the streaming. */
def delivered(num: Long): Long = remaining.addAndGet(-num)
/** Get the current demand that has not yet been marked as delivered and mark it as being in
* the current batch. When this value is negative, the initial streaming action is still
* running and the real demand can be computed by subtracting `Long.MinValue` from the
* returned value. */
def demandBatch: Long = remaining.get()
/** Whether the stream has been cancelled by the Subscriber */
def cancelled: Boolean = cancelRequested
def emit(v: Any): Unit = subscriber.asInstanceOf[Subscriber[Any]].onNext(v)
/** Finish the stream with `onComplete` if it is not finished yet. May only be called from a
* synchronous action context. */
def tryOnComplete: Unit = if(!finished && !cancelRequested) {
if(streamLogger.isDebugEnabled) streamLogger.debug("Signaling onComplete()")
finished = true
try subscriber.onComplete() catch {
case NonFatal(ex) => streamLogger.warn("Subscriber.onComplete failed unexpectedly", ex)
}
}
/** Finish the stream with `onError` if it is not finished yet. May only be called from a
* synchronous action context. */
def tryOnError(t: Throwable): Unit = if(!finished) {
if(streamLogger.isDebugEnabled) streamLogger.debug(s"Signaling onError($t)")
finished = true
try subscriber.onError(t) catch {
case NonFatal(ex) => streamLogger.warn("Subscriber.onError failed unexpectedly", ex)
}
}
/** Restart a suspended streaming action. Must only be called from the Subscriber context. */
def restartStreaming: Unit = {
sync
val s = streamState
if(s ne null) {
streamState = null
if(streamLogger.isDebugEnabled) streamLogger.debug("Scheduling stream continuation after transition from demand = 0")
val a = streamingAction
database.scheduleSynchronousStreaming(a, this.asInstanceOf[StreamingContext], highPrio = true)(s.asInstanceOf[a.StreamState])
} else {
if(streamLogger.isDebugEnabled) streamLogger.debug("Saw transition from demand = 0, but no stream continuation available")
}
}
def subscription = this
////////////////////////////////////////////////////////////////////////// Subscription methods
def request(l: Long): Unit = if(!cancelRequested) {
if(l <= 0) {
deferredError = new IllegalArgumentException("Requested count must not be <= 0 (see Reactive Streams spec, 3.9)")
cancel
} else {
if(!cancelRequested && remaining.getAndAdd(l) == 0L) restartStreaming
}
}
def cancel: Unit = if(!cancelRequested) {
cancelRequested = true
// Restart streaming because cancelling requires closing the result set and the session from
// within a synchronous action context. This will also complete the result Promise and thus
// allow the rest of the scheduled Action to run.
if(remaining.getAndSet(Long.MaxValue) == 0L) restartStreaming
}
}
}
| seebcioo/slick | slick/src/main/scala/slick/backend/DatabaseComponent.scala | Scala | bsd-2-clause | 24,113 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.sexp
class SexpPrettyPrinterSpec extends SexpSpec {
private val foo = SexpString("foo")
private val foosym = SexpSymbol("foo")
private val barsym = SexpSymbol("bar")
private val fookey = SexpSymbol(":foo")
private val barkey = SexpSymbol(":bar")
private def assertPrinter(sexp: Sexp, expect: String): Unit = {
SexpPrettyPrinter(sexp) should ===(expect.replace("\\r", ""))
}
"CompactPrinter" should "handle nil or empty lists/data" in {
assertPrinter(SexpNil, "nil")
assertPrinter(SexpList(Nil), "nil")
}
it should "output lists of atoms" in {
assertPrinter(
SexpList(foo, SexpNumber(13), foosym),
"""("foo"
| 13
| foo)""".stripMargin
)
}
it should "output lists of lists" in {
assertPrinter(
SexpList(SexpList(foo), SexpList(foo)),
"""(("foo")
| ("foo"))""".stripMargin
)
}
it should "output data" in {
assertPrinter(
SexpData(fookey -> foosym, barkey -> foosym),
"""(
:foo foo
:bar foo
)"""
)
val datum = SexpData(fookey -> foo, barkey -> foo)
assertPrinter(SexpData(
fookey -> datum,
barkey -> datum
), """(
:foo (
:foo "foo"
:bar "foo"
)
:bar (
:foo "foo"
:bar "foo"
)
)""")
}
it should "output cons" in {
assertPrinter(SexpCons(foosym, barsym), "(foo .\\n bar)")
}
}
| espinhogr/ensime-server | s-express/src/test/scala/org/ensime/sexp/SexpPrettyPrinterSpec.scala | Scala | gpl-3.0 | 1,523 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.{Collections, Locale, Properties}
import kafka.api.{ApiVersion, ApiVersionValidator, KAFKA_0_10_0_IV1, KAFKA_2_1_IV0, KAFKA_2_7_IV0, KAFKA_2_8_IV0, KAFKA_3_0_IV1}
import kafka.cluster.EndPoint
import kafka.coordinator.group.OffsetConfig
import kafka.coordinator.transaction.{TransactionLog, TransactionStateManager}
import kafka.log.LogConfig
import kafka.log.LogConfig.MessageFormatVersion
import kafka.message.{BrokerCompressionCodec, CompressionCodec, ZStdCompressionCodec}
import kafka.security.authorizer.AuthorizerUtils
import kafka.server.KafkaRaftServer.{BrokerRole, ControllerRole, ProcessRole}
import kafka.utils.{CoreUtils, Logging}
import kafka.utils.Implicits._
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.common.Reconfigurable
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, ConfigResource, SaslConfigs, SecurityConfig, SslClientAuth, SslConfigs, TopicConfig}
import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList}
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.metrics.Sensor
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.record.{LegacyRecord, Records, TimestampType}
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.raft.RaftConfig
import org.apache.kafka.server.authorizer.Authorizer
import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig
import org.apache.zookeeper.client.ZKClientConfig
import scala.annotation.nowarn
import scala.jdk.CollectionConverters._
import scala.collection.{Map, Seq}
object Defaults {
/** ********* Zookeeper Configuration ***********/
val ZkSessionTimeoutMs = 18000
val ZkSyncTimeMs = 2000
val ZkEnableSecureAcls = false
val ZkMaxInFlightRequests = 10
val ZkSslClientEnable = false
val ZkSslProtocol = "TLSv1.2"
val ZkSslEndpointIdentificationAlgorithm = "HTTPS"
val ZkSslCrlEnable = false
val ZkSslOcspEnable = false
/** ********* General Configuration ***********/
val BrokerIdGenerationEnable = true
val MaxReservedBrokerId = 1000
val BrokerId = -1
val MessageMaxBytes = 1024 * 1024 + Records.LOG_OVERHEAD
val NumNetworkThreads = 3
val NumIoThreads = 8
val BackgroundThreads = 10
val QueuedMaxRequests = 500
val QueuedMaxRequestBytes = -1
val InitialBrokerRegistrationTimeoutMs = 60000
val BrokerHeartbeatIntervalMs = 2000
val BrokerSessionTimeoutMs = 9000
val MetadataSnapshotMaxNewRecordBytes = 20 * 1024 * 1024
/** KRaft mode configs */
val EmptyNodeId: Int = -1
/************* Authorizer Configuration ***********/
val AuthorizerClassName = ""
/** ********* Socket Server Configuration ***********/
val Listeners = "PLAINTEXT://:9092"
val ListenerSecurityProtocolMap: String = EndPoint.DefaultSecurityProtocolMap.map { case (listenerName, securityProtocol) =>
s"${listenerName.value}:${securityProtocol.name}"
}.mkString(",")
val SocketSendBufferBytes: Int = 100 * 1024
val SocketReceiveBufferBytes: Int = 100 * 1024
val SocketRequestMaxBytes: Int = 100 * 1024 * 1024
val MaxConnectionsPerIp: Int = Int.MaxValue
val MaxConnectionsPerIpOverrides: String = ""
val MaxConnections: Int = Int.MaxValue
val MaxConnectionCreationRate: Int = Int.MaxValue
val ConnectionsMaxIdleMs = 10 * 60 * 1000L
val RequestTimeoutMs = 30000
val ConnectionSetupTimeoutMs = CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS
val ConnectionSetupTimeoutMaxMs = CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS
val FailedAuthenticationDelayMs = 100
/** ********* Log Configuration ***********/
val NumPartitions = 1
val LogDir = "/tmp/kafka-logs"
val LogSegmentBytes = 1 * 1024 * 1024 * 1024
val LogRollHours = 24 * 7
val LogRollJitterHours = 0
val LogRetentionHours = 24 * 7
val LogRetentionBytes = -1L
val LogCleanupIntervalMs = 5 * 60 * 1000L
val Delete = "delete"
val Compact = "compact"
val LogCleanupPolicy = Delete
val LogCleanerThreads = 1
val LogCleanerIoMaxBytesPerSecond = Double.MaxValue
val LogCleanerDedupeBufferSize = 128 * 1024 * 1024L
val LogCleanerIoBufferSize = 512 * 1024
val LogCleanerDedupeBufferLoadFactor = 0.9d
val LogCleanerBackoffMs = 15 * 1000
val LogCleanerMinCleanRatio = 0.5d
val LogCleanerEnable = true
val LogCleanerDeleteRetentionMs = 24 * 60 * 60 * 1000L
val LogCleanerMinCompactionLagMs = 0L
val LogCleanerMaxCompactionLagMs = Long.MaxValue
val LogIndexSizeMaxBytes = 10 * 1024 * 1024
val LogIndexIntervalBytes = 4096
val LogFlushIntervalMessages = Long.MaxValue
val LogDeleteDelayMs = 60000
val LogFlushSchedulerIntervalMs = Long.MaxValue
val LogFlushOffsetCheckpointIntervalMs = 60000
val LogFlushStartOffsetCheckpointIntervalMs = 60000
val LogPreAllocateEnable = false
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0")
val LogMessageFormatVersion = KAFKA_3_0_IV1.version
val LogMessageTimestampType = "CreateTime"
val LogMessageTimestampDifferenceMaxMs = Long.MaxValue
val NumRecoveryThreadsPerDataDir = 1
val AutoCreateTopicsEnable = true
val MinInSyncReplicas = 1
val MessageDownConversionEnable = true
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMs = RequestTimeoutMs
val ControllerMessageQueueSize = Int.MaxValue
val DefaultReplicationFactor = 1
val ReplicaLagTimeMaxMs = 30000L
val ReplicaSocketTimeoutMs = 30 * 1000
val ReplicaSocketReceiveBufferBytes = 64 * 1024
val ReplicaFetchMaxBytes = 1024 * 1024
val ReplicaFetchWaitMaxMs = 500
val ReplicaFetchMinBytes = 1
val ReplicaFetchResponseMaxBytes = 10 * 1024 * 1024
val NumReplicaFetchers = 1
val ReplicaFetchBackoffMs = 1000
val ReplicaHighWatermarkCheckpointIntervalMs = 5000L
val FetchPurgatoryPurgeIntervalRequests = 1000
val ProducerPurgatoryPurgeIntervalRequests = 1000
val DeleteRecordsPurgatoryPurgeIntervalRequests = 1
val AutoLeaderRebalanceEnable = true
val LeaderImbalancePerBrokerPercentage = 10
val LeaderImbalanceCheckIntervalSeconds = 300
val UncleanLeaderElectionEnable = false
val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString
val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetries = 3
val ControlledShutdownRetryBackoffMs = 5000
val ControlledShutdownEnable = true
/** ********* Group coordinator configuration ***********/
val GroupMinSessionTimeoutMs = 6000
val GroupMaxSessionTimeoutMs = 1800000
val GroupInitialRebalanceDelayMs = 3000
val GroupMaxSize: Int = Int.MaxValue
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSize = OffsetConfig.DefaultMaxMetadataSize
val OffsetsLoadBufferSize = OffsetConfig.DefaultLoadBufferSize
val OffsetsTopicReplicationFactor = OffsetConfig.DefaultOffsetsTopicReplicationFactor
val OffsetsTopicPartitions: Int = OffsetConfig.DefaultOffsetsTopicNumPartitions
val OffsetsTopicSegmentBytes: Int = OffsetConfig.DefaultOffsetsTopicSegmentBytes
val OffsetsTopicCompressionCodec: Int = OffsetConfig.DefaultOffsetsTopicCompressionCodec.codec
val OffsetsRetentionMinutes: Int = 7 * 24 * 60
val OffsetsRetentionCheckIntervalMs: Long = OffsetConfig.DefaultOffsetsRetentionCheckIntervalMs
val OffsetCommitTimeoutMs = OffsetConfig.DefaultOffsetCommitTimeoutMs
val OffsetCommitRequiredAcks = OffsetConfig.DefaultOffsetCommitRequiredAcks
/** ********* Transaction management configuration ***********/
val TransactionalIdExpirationMs = TransactionStateManager.DefaultTransactionalIdExpirationMs
val TransactionsMaxTimeoutMs = TransactionStateManager.DefaultTransactionsMaxTimeoutMs
val TransactionsTopicMinISR = TransactionLog.DefaultMinInSyncReplicas
val TransactionsLoadBufferSize = TransactionLog.DefaultLoadBufferSize
val TransactionsTopicReplicationFactor = TransactionLog.DefaultReplicationFactor
val TransactionsTopicPartitions = TransactionLog.DefaultNumPartitions
val TransactionsTopicSegmentBytes = TransactionLog.DefaultSegmentBytes
val TransactionsAbortTimedOutTransactionsCleanupIntervalMS = TransactionStateManager.DefaultAbortTimedOutTransactionsIntervalMs
val TransactionsRemoveExpiredTransactionsCleanupIntervalMS = TransactionStateManager.DefaultRemoveExpiredTransactionalIdsIntervalMs
/** ********* Fetch Configuration **************/
val MaxIncrementalFetchSessionCacheSlots = 1000
val FetchMaxBytes = 55 * 1024 * 1024
/** ********* Quota Configuration ***********/
val NumQuotaSamples: Int = ClientQuotaManagerConfig.DefaultNumQuotaSamples
val QuotaWindowSizeSeconds: Int = ClientQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
val NumReplicationQuotaSamples: Int = ReplicationQuotaManagerConfig.DefaultNumQuotaSamples
val ReplicationQuotaWindowSizeSeconds: Int = ReplicationQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
val NumAlterLogDirsReplicationQuotaSamples: Int = ReplicationQuotaManagerConfig.DefaultNumQuotaSamples
val AlterLogDirsReplicationQuotaWindowSizeSeconds: Int = ReplicationQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
val NumControllerQuotaSamples: Int = ClientQuotaManagerConfig.DefaultNumQuotaSamples
val ControllerQuotaWindowSizeSeconds: Int = ClientQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
/** ********* Transaction Configuration ***********/
val TransactionalIdExpirationMsDefault = 604800000
val DeleteTopicEnable = true
val CompressionType = "producer"
val MaxIdMapSnapshots = 2
/** ********* Kafka Metrics Configuration ***********/
val MetricNumSamples = 2
val MetricSampleWindowMs = 30000
val MetricReporterClasses = ""
val MetricRecordingLevel = Sensor.RecordingLevel.INFO.toString()
/** ********* Kafka Yammer Metrics Reporter Configuration ***********/
val KafkaMetricReporterClasses = ""
val KafkaMetricsPollingIntervalSeconds = 10
/** ********* SSL configuration ***********/
val SslProtocol = SslConfigs.DEFAULT_SSL_PROTOCOL
val SslEnabledProtocols = SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS
val SslKeystoreType = SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE
val SslTruststoreType = SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE
val SslKeyManagerAlgorithm = SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM
val SslTrustManagerAlgorithm = SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM
val SslEndpointIdentificationAlgorithm = SslConfigs.DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM
val SslClientAuthentication = SslClientAuth.NONE.name().toLowerCase(Locale.ROOT)
val SslClientAuthenticationValidValues = SslClientAuth.VALUES.asScala.map(v => v.toString().toLowerCase(Locale.ROOT)).asJava.toArray(new Array[String](0))
val SslPrincipalMappingRules = BrokerSecurityConfigs.DEFAULT_SSL_PRINCIPAL_MAPPING_RULES
/** ********* General Security configuration ***********/
val ConnectionsMaxReauthMsDefault = 0L
val DefaultPrincipalSerde = classOf[DefaultKafkaPrincipalBuilder]
/** ********* Sasl configuration ***********/
val SaslMechanismInterBrokerProtocol = SaslConfigs.DEFAULT_SASL_MECHANISM
val SaslEnabledMechanisms = BrokerSecurityConfigs.DEFAULT_SASL_ENABLED_MECHANISMS
val SaslKerberosKinitCmd = SaslConfigs.DEFAULT_KERBEROS_KINIT_CMD
val SaslKerberosTicketRenewWindowFactor = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
val SaslKerberosTicketRenewJitter = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_JITTER
val SaslKerberosMinTimeBeforeRelogin = SaslConfigs.DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN
val SaslKerberosPrincipalToLocalRules = BrokerSecurityConfigs.DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES
val SaslLoginRefreshWindowFactor = SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR
val SaslLoginRefreshWindowJitter = SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_JITTER
val SaslLoginRefreshMinPeriodSeconds = SaslConfigs.DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS
val SaslLoginRefreshBufferSeconds = SaslConfigs.DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS
/** ********* Delegation Token configuration ***********/
val DelegationTokenMaxLifeTimeMsDefault = 7 * 24 * 60 * 60 * 1000L
val DelegationTokenExpiryTimeMsDefault = 24 * 60 * 60 * 1000L
val DelegationTokenExpiryCheckIntervalMsDefault = 1 * 60 * 60 * 1000L
/** ********* Password encryption configuration for dynamic configs *********/
val PasswordEncoderCipherAlgorithm = "AES/CBC/PKCS5Padding"
val PasswordEncoderKeyLength = 128
val PasswordEncoderIterations = 4096
/** ********* Raft Quorum Configuration *********/
val QuorumVoters = RaftConfig.DEFAULT_QUORUM_VOTERS
val QuorumElectionTimeoutMs = RaftConfig.DEFAULT_QUORUM_ELECTION_TIMEOUT_MS
val QuorumFetchTimeoutMs = RaftConfig.DEFAULT_QUORUM_FETCH_TIMEOUT_MS
val QuorumElectionBackoffMs = RaftConfig.DEFAULT_QUORUM_ELECTION_BACKOFF_MAX_MS
val QuorumLingerMs = RaftConfig.DEFAULT_QUORUM_LINGER_MS
val QuorumRequestTimeoutMs = RaftConfig.DEFAULT_QUORUM_REQUEST_TIMEOUT_MS
val QuorumRetryBackoffMs = RaftConfig.DEFAULT_QUORUM_RETRY_BACKOFF_MS
}
object KafkaConfig {
private val LogConfigPrefix = "log."
def main(args: Array[String]): Unit = {
System.out.println(configDef.toHtml(4, (config: String) => "brokerconfigs_" + config,
DynamicBrokerConfig.dynamicConfigUpdateModes))
}
/** ********* Zookeeper Configuration ***********/
val ZkConnectProp = "zookeeper.connect"
val ZkSessionTimeoutMsProp = "zookeeper.session.timeout.ms"
val ZkConnectionTimeoutMsProp = "zookeeper.connection.timeout.ms"
val ZkSyncTimeMsProp = "zookeeper.sync.time.ms"
val ZkEnableSecureAclsProp = "zookeeper.set.acl"
val ZkMaxInFlightRequestsProp = "zookeeper.max.in.flight.requests"
val ZkSslClientEnableProp = "zookeeper.ssl.client.enable"
val ZkClientCnxnSocketProp = "zookeeper.clientCnxnSocket"
val ZkSslKeyStoreLocationProp = "zookeeper.ssl.keystore.location"
val ZkSslKeyStorePasswordProp = "zookeeper.ssl.keystore.password"
val ZkSslKeyStoreTypeProp = "zookeeper.ssl.keystore.type"
val ZkSslTrustStoreLocationProp = "zookeeper.ssl.truststore.location"
val ZkSslTrustStorePasswordProp = "zookeeper.ssl.truststore.password"
val ZkSslTrustStoreTypeProp = "zookeeper.ssl.truststore.type"
val ZkSslProtocolProp = "zookeeper.ssl.protocol"
val ZkSslEnabledProtocolsProp = "zookeeper.ssl.enabled.protocols"
val ZkSslCipherSuitesProp = "zookeeper.ssl.cipher.suites"
val ZkSslEndpointIdentificationAlgorithmProp = "zookeeper.ssl.endpoint.identification.algorithm"
val ZkSslCrlEnableProp = "zookeeper.ssl.crl.enable"
val ZkSslOcspEnableProp = "zookeeper.ssl.ocsp.enable"
// a map from the Kafka config to the corresponding ZooKeeper Java system property
private[kafka] val ZkSslConfigToSystemPropertyMap: Map[String, String] = Map(
ZkSslClientEnableProp -> ZKClientConfig.SECURE_CLIENT,
ZkClientCnxnSocketProp -> ZKClientConfig.ZOOKEEPER_CLIENT_CNXN_SOCKET,
ZkSslKeyStoreLocationProp -> "zookeeper.ssl.keyStore.location",
ZkSslKeyStorePasswordProp -> "zookeeper.ssl.keyStore.password",
ZkSslKeyStoreTypeProp -> "zookeeper.ssl.keyStore.type",
ZkSslTrustStoreLocationProp -> "zookeeper.ssl.trustStore.location",
ZkSslTrustStorePasswordProp -> "zookeeper.ssl.trustStore.password",
ZkSslTrustStoreTypeProp -> "zookeeper.ssl.trustStore.type",
ZkSslProtocolProp -> "zookeeper.ssl.protocol",
ZkSslEnabledProtocolsProp -> "zookeeper.ssl.enabledProtocols",
ZkSslCipherSuitesProp -> "zookeeper.ssl.ciphersuites",
ZkSslEndpointIdentificationAlgorithmProp -> "zookeeper.ssl.hostnameVerification",
ZkSslCrlEnableProp -> "zookeeper.ssl.crl",
ZkSslOcspEnableProp -> "zookeeper.ssl.ocsp")
private[kafka] def getZooKeeperClientProperty(clientConfig: ZKClientConfig, kafkaPropName: String): Option[String] = {
Option(clientConfig.getProperty(ZkSslConfigToSystemPropertyMap(kafkaPropName)))
}
private[kafka] def setZooKeeperClientProperty(clientConfig: ZKClientConfig, kafkaPropName: String, kafkaPropValue: Any): Unit = {
clientConfig.setProperty(ZkSslConfigToSystemPropertyMap(kafkaPropName),
kafkaPropName match {
case ZkSslEndpointIdentificationAlgorithmProp => (kafkaPropValue.toString.toUpperCase == "HTTPS").toString
case ZkSslEnabledProtocolsProp | ZkSslCipherSuitesProp => kafkaPropValue match {
case list: java.util.List[_] => list.asInstanceOf[java.util.List[_]].asScala.mkString(",")
case _ => kafkaPropValue.toString
}
case _ => kafkaPropValue.toString
})
}
// For ZooKeeper TLS client authentication to be enabled the client must (at a minimum) configure itself as using TLS
// with both a client connection socket and a key store location explicitly set.
private[kafka] def zkTlsClientAuthEnabled(zkClientConfig: ZKClientConfig) = {
getZooKeeperClientProperty(zkClientConfig, ZkSslClientEnableProp).getOrElse("false") == "true" &&
getZooKeeperClientProperty(zkClientConfig, ZkClientCnxnSocketProp).isDefined &&
getZooKeeperClientProperty(zkClientConfig, ZkSslKeyStoreLocationProp).isDefined
}
/** ********* General Configuration ***********/
val BrokerIdGenerationEnableProp = "broker.id.generation.enable"
val MaxReservedBrokerIdProp = "reserved.broker.max.id"
val BrokerIdProp = "broker.id"
val MessageMaxBytesProp = "message.max.bytes"
val NumNetworkThreadsProp = "num.network.threads"
val NumIoThreadsProp = "num.io.threads"
val BackgroundThreadsProp = "background.threads"
val NumReplicaAlterLogDirsThreadsProp = "num.replica.alter.log.dirs.threads"
val QueuedMaxRequestsProp = "queued.max.requests"
val QueuedMaxBytesProp = "queued.max.request.bytes"
val RequestTimeoutMsProp = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG
val ConnectionSetupTimeoutMsProp = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG
val ConnectionSetupTimeoutMaxMsProp = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG
/** KRaft mode configs */
val ProcessRolesProp = "process.roles"
val InitialBrokerRegistrationTimeoutMsProp = "initial.broker.registration.timeout.ms"
val BrokerHeartbeatIntervalMsProp = "broker.heartbeat.interval.ms"
val BrokerSessionTimeoutMsProp = "broker.session.timeout.ms"
val NodeIdProp = "node.id"
val MetadataLogDirProp = "metadata.log.dir"
val MetadataSnapshotMaxNewRecordBytesProp = "metadata.log.max.record.bytes.between.snapshots"
val ControllerListenerNamesProp = "controller.listener.names"
val SaslMechanismControllerProtocolProp = "sasl.mechanism.controller.protocol"
val MetadataLogSegmentMinBytesProp = "metadata.log.segment.min.bytes"
val MetadataLogSegmentBytesProp = "metadata.log.segment.bytes"
val MetadataLogSegmentMillisProp = "metadata.log.segment.ms"
val MetadataMaxRetentionBytesProp = "metadata.max.retention.bytes"
val MetadataMaxRetentionMillisProp = "metadata.max.retention.ms"
val QuorumVotersProp = RaftConfig.QUORUM_VOTERS_CONFIG
/************* Authorizer Configuration ***********/
val AuthorizerClassNameProp = "authorizer.class.name"
/** ********* Socket Server Configuration ***********/
val ListenersProp = "listeners"
val AdvertisedListenersProp = "advertised.listeners"
val ListenerSecurityProtocolMapProp = "listener.security.protocol.map"
val ControlPlaneListenerNameProp = "control.plane.listener.name"
val SocketSendBufferBytesProp = "socket.send.buffer.bytes"
val SocketReceiveBufferBytesProp = "socket.receive.buffer.bytes"
val SocketRequestMaxBytesProp = "socket.request.max.bytes"
val MaxConnectionsPerIpProp = "max.connections.per.ip"
val MaxConnectionsPerIpOverridesProp = "max.connections.per.ip.overrides"
val MaxConnectionsProp = "max.connections"
val MaxConnectionCreationRateProp = "max.connection.creation.rate"
val ConnectionsMaxIdleMsProp = "connections.max.idle.ms"
val FailedAuthenticationDelayMsProp = "connection.failed.authentication.delay.ms"
/***************** rack configuration *************/
val RackProp = "broker.rack"
/** ********* Log Configuration ***********/
val NumPartitionsProp = "num.partitions"
val LogDirsProp = "log.dirs"
val LogDirProp = "log.dir"
val LogSegmentBytesProp = "log.segment.bytes"
val LogRollTimeMillisProp = "log.roll.ms"
val LogRollTimeHoursProp = "log.roll.hours"
val LogRollTimeJitterMillisProp = "log.roll.jitter.ms"
val LogRollTimeJitterHoursProp = "log.roll.jitter.hours"
val LogRetentionTimeMillisProp = "log.retention.ms"
val LogRetentionTimeMinutesProp = "log.retention.minutes"
val LogRetentionTimeHoursProp = "log.retention.hours"
val LogRetentionBytesProp = "log.retention.bytes"
val LogCleanupIntervalMsProp = "log.retention.check.interval.ms"
val LogCleanupPolicyProp = "log.cleanup.policy"
val LogCleanerThreadsProp = "log.cleaner.threads"
val LogCleanerIoMaxBytesPerSecondProp = "log.cleaner.io.max.bytes.per.second"
val LogCleanerDedupeBufferSizeProp = "log.cleaner.dedupe.buffer.size"
val LogCleanerIoBufferSizeProp = "log.cleaner.io.buffer.size"
val LogCleanerDedupeBufferLoadFactorProp = "log.cleaner.io.buffer.load.factor"
val LogCleanerBackoffMsProp = "log.cleaner.backoff.ms"
val LogCleanerMinCleanRatioProp = "log.cleaner.min.cleanable.ratio"
val LogCleanerEnableProp = "log.cleaner.enable"
val LogCleanerDeleteRetentionMsProp = "log.cleaner.delete.retention.ms"
val LogCleanerMinCompactionLagMsProp = "log.cleaner.min.compaction.lag.ms"
val LogCleanerMaxCompactionLagMsProp = "log.cleaner.max.compaction.lag.ms"
val LogIndexSizeMaxBytesProp = "log.index.size.max.bytes"
val LogIndexIntervalBytesProp = "log.index.interval.bytes"
val LogFlushIntervalMessagesProp = "log.flush.interval.messages"
val LogDeleteDelayMsProp = "log.segment.delete.delay.ms"
val LogFlushSchedulerIntervalMsProp = "log.flush.scheduler.interval.ms"
val LogFlushIntervalMsProp = "log.flush.interval.ms"
val LogFlushOffsetCheckpointIntervalMsProp = "log.flush.offset.checkpoint.interval.ms"
val LogFlushStartOffsetCheckpointIntervalMsProp = "log.flush.start.offset.checkpoint.interval.ms"
val LogPreAllocateProp = "log.preallocate"
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0")
val LogMessageFormatVersionProp = LogConfigPrefix + "message.format.version"
val LogMessageTimestampTypeProp = LogConfigPrefix + "message.timestamp.type"
val LogMessageTimestampDifferenceMaxMsProp = LogConfigPrefix + "message.timestamp.difference.max.ms"
val LogMaxIdMapSnapshotsProp = LogConfigPrefix + "max.id.map.snapshots"
val NumRecoveryThreadsPerDataDirProp = "num.recovery.threads.per.data.dir"
val AutoCreateTopicsEnableProp = "auto.create.topics.enable"
val MinInSyncReplicasProp = "min.insync.replicas"
val CreateTopicPolicyClassNameProp = "create.topic.policy.class.name"
val AlterConfigPolicyClassNameProp = "alter.config.policy.class.name"
val LogMessageDownConversionEnableProp = LogConfigPrefix + "message.downconversion.enable"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsProp = "controller.socket.timeout.ms"
val DefaultReplicationFactorProp = "default.replication.factor"
val ReplicaLagTimeMaxMsProp = "replica.lag.time.max.ms"
val ReplicaSocketTimeoutMsProp = "replica.socket.timeout.ms"
val ReplicaSocketReceiveBufferBytesProp = "replica.socket.receive.buffer.bytes"
val ReplicaFetchMaxBytesProp = "replica.fetch.max.bytes"
val ReplicaFetchWaitMaxMsProp = "replica.fetch.wait.max.ms"
val ReplicaFetchMinBytesProp = "replica.fetch.min.bytes"
val ReplicaFetchResponseMaxBytesProp = "replica.fetch.response.max.bytes"
val ReplicaFetchBackoffMsProp = "replica.fetch.backoff.ms"
val NumReplicaFetchersProp = "num.replica.fetchers"
val ReplicaHighWatermarkCheckpointIntervalMsProp = "replica.high.watermark.checkpoint.interval.ms"
val FetchPurgatoryPurgeIntervalRequestsProp = "fetch.purgatory.purge.interval.requests"
val ProducerPurgatoryPurgeIntervalRequestsProp = "producer.purgatory.purge.interval.requests"
val DeleteRecordsPurgatoryPurgeIntervalRequestsProp = "delete.records.purgatory.purge.interval.requests"
val AutoLeaderRebalanceEnableProp = "auto.leader.rebalance.enable"
val LeaderImbalancePerBrokerPercentageProp = "leader.imbalance.per.broker.percentage"
val LeaderImbalanceCheckIntervalSecondsProp = "leader.imbalance.check.interval.seconds"
val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol"
val InterBrokerProtocolVersionProp = "inter.broker.protocol.version"
val InterBrokerListenerNameProp = "inter.broker.listener.name"
val ReplicaSelectorClassProp = "replica.selector.class"
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries"
val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms"
val ControlledShutdownEnableProp = "controlled.shutdown.enable"
/** ********* Group coordinator configuration ***********/
val GroupMinSessionTimeoutMsProp = "group.min.session.timeout.ms"
val GroupMaxSessionTimeoutMsProp = "group.max.session.timeout.ms"
val GroupInitialRebalanceDelayMsProp = "group.initial.rebalance.delay.ms"
val GroupMaxSizeProp = "group.max.size"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeProp = "offset.metadata.max.bytes"
val OffsetsLoadBufferSizeProp = "offsets.load.buffer.size"
val OffsetsTopicReplicationFactorProp = "offsets.topic.replication.factor"
val OffsetsTopicPartitionsProp = "offsets.topic.num.partitions"
val OffsetsTopicSegmentBytesProp = "offsets.topic.segment.bytes"
val OffsetsTopicCompressionCodecProp = "offsets.topic.compression.codec"
val OffsetsRetentionMinutesProp = "offsets.retention.minutes"
val OffsetsRetentionCheckIntervalMsProp = "offsets.retention.check.interval.ms"
val OffsetCommitTimeoutMsProp = "offsets.commit.timeout.ms"
val OffsetCommitRequiredAcksProp = "offsets.commit.required.acks"
/** ********* Transaction management configuration ***********/
val TransactionalIdExpirationMsProp = "transactional.id.expiration.ms"
val TransactionsMaxTimeoutMsProp = "transaction.max.timeout.ms"
val TransactionsTopicMinISRProp = "transaction.state.log.min.isr"
val TransactionsLoadBufferSizeProp = "transaction.state.log.load.buffer.size"
val TransactionsTopicPartitionsProp = "transaction.state.log.num.partitions"
val TransactionsTopicSegmentBytesProp = "transaction.state.log.segment.bytes"
val TransactionsTopicReplicationFactorProp = "transaction.state.log.replication.factor"
val TransactionsAbortTimedOutTransactionCleanupIntervalMsProp = "transaction.abort.timed.out.transaction.cleanup.interval.ms"
val TransactionsRemoveExpiredTransactionalIdCleanupIntervalMsProp = "transaction.remove.expired.transaction.cleanup.interval.ms"
/** ********* Fetch Configuration **************/
val MaxIncrementalFetchSessionCacheSlots = "max.incremental.fetch.session.cache.slots"
val FetchMaxBytes = "fetch.max.bytes"
/** ********* Quota Configuration ***********/
val NumQuotaSamplesProp = "quota.window.num"
val NumReplicationQuotaSamplesProp = "replication.quota.window.num"
val NumAlterLogDirsReplicationQuotaSamplesProp = "alter.log.dirs.replication.quota.window.num"
val NumControllerQuotaSamplesProp = "controller.quota.window.num"
val QuotaWindowSizeSecondsProp = "quota.window.size.seconds"
val ReplicationQuotaWindowSizeSecondsProp = "replication.quota.window.size.seconds"
val AlterLogDirsReplicationQuotaWindowSizeSecondsProp = "alter.log.dirs.replication.quota.window.size.seconds"
val ControllerQuotaWindowSizeSecondsProp = "controller.quota.window.size.seconds"
val ClientQuotaCallbackClassProp = "client.quota.callback.class"
val DeleteTopicEnableProp = "delete.topic.enable"
val CompressionTypeProp = "compression.type"
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsProp = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG
val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG
val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG
val MetricRecordingLevelProp: String = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG
/** ********* Kafka Yammer Metrics Reporters Configuration ***********/
val KafkaMetricsReporterClassesProp = "kafka.metrics.reporters"
val KafkaMetricsPollingIntervalSecondsProp = "kafka.metrics.polling.interval.secs"
/** ******** Common Security Configuration *************/
val PrincipalBuilderClassProp = BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG
val ConnectionsMaxReauthMsProp = BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS
val securityProviderClassProp = SecurityConfig.SECURITY_PROVIDERS_CONFIG
/** ********* SSL Configuration ****************/
val SslProtocolProp = SslConfigs.SSL_PROTOCOL_CONFIG
val SslProviderProp = SslConfigs.SSL_PROVIDER_CONFIG
val SslCipherSuitesProp = SslConfigs.SSL_CIPHER_SUITES_CONFIG
val SslEnabledProtocolsProp = SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG
val SslKeystoreTypeProp = SslConfigs.SSL_KEYSTORE_TYPE_CONFIG
val SslKeystoreLocationProp = SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG
val SslKeystorePasswordProp = SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG
val SslKeyPasswordProp = SslConfigs.SSL_KEY_PASSWORD_CONFIG
val SslKeystoreKeyProp = SslConfigs.SSL_KEYSTORE_KEY_CONFIG
val SslKeystoreCertificateChainProp = SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG
val SslTruststoreTypeProp = SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG
val SslTruststoreLocationProp = SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG
val SslTruststorePasswordProp = SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG
val SslTruststoreCertificatesProp = SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG
val SslKeyManagerAlgorithmProp = SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG
val SslTrustManagerAlgorithmProp = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG
val SslEndpointIdentificationAlgorithmProp = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG
val SslSecureRandomImplementationProp = SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG
val SslClientAuthProp = BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG
val SslPrincipalMappingRulesProp = BrokerSecurityConfigs.SSL_PRINCIPAL_MAPPING_RULES_CONFIG
var SslEngineFactoryClassProp = SslConfigs.SSL_ENGINE_FACTORY_CLASS_CONFIG
/** ********* SASL Configuration ****************/
val SaslMechanismInterBrokerProtocolProp = "sasl.mechanism.inter.broker.protocol"
val SaslJaasConfigProp = SaslConfigs.SASL_JAAS_CONFIG
val SaslEnabledMechanismsProp = BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG
val SaslServerCallbackHandlerClassProp = BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS
val SaslClientCallbackHandlerClassProp = SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS
val SaslLoginClassProp = SaslConfigs.SASL_LOGIN_CLASS
val SaslLoginCallbackHandlerClassProp = SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS
val SaslKerberosServiceNameProp = SaslConfigs.SASL_KERBEROS_SERVICE_NAME
val SaslKerberosKinitCmdProp = SaslConfigs.SASL_KERBEROS_KINIT_CMD
val SaslKerberosTicketRenewWindowFactorProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
val SaslKerberosTicketRenewJitterProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER
val SaslKerberosMinTimeBeforeReloginProp = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN
val SaslKerberosPrincipalToLocalRulesProp = BrokerSecurityConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG
val SaslLoginRefreshWindowFactorProp = SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR
val SaslLoginRefreshWindowJitterProp = SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER
val SaslLoginRefreshMinPeriodSecondsProp = SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS
val SaslLoginRefreshBufferSecondsProp = SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS
/** ********* Delegation Token Configuration ****************/
val DelegationTokenSecretKeyAliasProp = "delegation.token.master.key"
val DelegationTokenSecretKeyProp = "delegation.token.secret.key"
val DelegationTokenMaxLifeTimeProp = "delegation.token.max.lifetime.ms"
val DelegationTokenExpiryTimeMsProp = "delegation.token.expiry.time.ms"
val DelegationTokenExpiryCheckIntervalMsProp = "delegation.token.expiry.check.interval.ms"
/** ********* Password encryption configuration for dynamic configs *********/
val PasswordEncoderSecretProp = "password.encoder.secret"
val PasswordEncoderOldSecretProp = "password.encoder.old.secret"
val PasswordEncoderKeyFactoryAlgorithmProp = "password.encoder.keyfactory.algorithm"
val PasswordEncoderCipherAlgorithmProp = "password.encoder.cipher.algorithm"
val PasswordEncoderKeyLengthProp = "password.encoder.key.length"
val PasswordEncoderIterationsProp = "password.encoder.iterations"
/* Documentation */
/** ********* Zookeeper Configuration ***********/
val ZkConnectDoc = "Specifies the ZooKeeper connection string in the form <code>hostname:port</code> where host and port are the " +
"host and port of a ZooKeeper server. To allow connecting through other ZooKeeper nodes when that ZooKeeper machine is " +
"down you can also specify multiple hosts in the form <code>hostname1:port1,hostname2:port2,hostname3:port3</code>.\\n" +
"The server can also have a ZooKeeper chroot path as part of its ZooKeeper connection string which puts its data under some path in the global ZooKeeper namespace. " +
"For example to give a chroot path of <code>/chroot/path</code> you would give the connection string as <code>hostname1:port1,hostname2:port2,hostname3:port3/chroot/path</code>."
val ZkSessionTimeoutMsDoc = "Zookeeper session timeout"
val ZkConnectionTimeoutMsDoc = "The max time that the client waits to establish a connection to zookeeper. If not set, the value in " + ZkSessionTimeoutMsProp + " is used"
val ZkSyncTimeMsDoc = "How far a ZK follower can be behind a ZK leader"
val ZkEnableSecureAclsDoc = "Set client to use secure ACLs"
val ZkMaxInFlightRequestsDoc = "The maximum number of unacknowledged requests the client will send to Zookeeper before blocking."
val ZkSslClientEnableDoc = "Set client to use TLS when connecting to ZooKeeper." +
" An explicit value overrides any value set via the <code>zookeeper.client.secure</code> system property (note the different name)." +
s" Defaults to false if neither is set; when true, <code>$ZkClientCnxnSocketProp</code> must be set (typically to <code>org.apache.zookeeper.ClientCnxnSocketNetty</code>); other values to set may include " +
ZkSslConfigToSystemPropertyMap.keys.toList.sorted.filter(x => x != ZkSslClientEnableProp && x != ZkClientCnxnSocketProp).mkString("<code>", "</code>, <code>", "</code>")
val ZkClientCnxnSocketDoc = "Typically set to <code>org.apache.zookeeper.ClientCnxnSocketNetty</code> when using TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the same-named <code>${ZkSslConfigToSystemPropertyMap(ZkClientCnxnSocketProp)}</code> system property."
val ZkSslKeyStoreLocationDoc = "Keystore location when using a client-side certificate with TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslKeyStoreLocationProp)}</code> system property (note the camelCase)."
val ZkSslKeyStorePasswordDoc = "Keystore password when using a client-side certificate with TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslKeyStorePasswordProp)}</code> system property (note the camelCase)." +
" Note that ZooKeeper does not support a key password different from the keystore password, so be sure to set the key password in the keystore to be identical to the keystore password; otherwise the connection attempt to Zookeeper will fail."
val ZkSslKeyStoreTypeDoc = "Keystore type when using a client-side certificate with TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslKeyStoreTypeProp)}</code> system property (note the camelCase)." +
" The default value of <code>null</code> means the type will be auto-detected based on the filename extension of the keystore."
val ZkSslTrustStoreLocationDoc = "Truststore location when using TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslTrustStoreLocationProp)}</code> system property (note the camelCase)."
val ZkSslTrustStorePasswordDoc = "Truststore password when using TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslTrustStorePasswordProp)}</code> system property (note the camelCase)."
val ZkSslTrustStoreTypeDoc = "Truststore type when using TLS connectivity to ZooKeeper." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslTrustStoreTypeProp)}</code> system property (note the camelCase)." +
" The default value of <code>null</code> means the type will be auto-detected based on the filename extension of the truststore."
val ZkSslProtocolDoc = "Specifies the protocol to be used in ZooKeeper TLS negotiation." +
s" An explicit value overrides any value set via the same-named <code>${ZkSslConfigToSystemPropertyMap(ZkSslProtocolProp)}</code> system property."
val ZkSslEnabledProtocolsDoc = "Specifies the enabled protocol(s) in ZooKeeper TLS negotiation (csv)." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslEnabledProtocolsProp)}</code> system property (note the camelCase)." +
s" The default value of <code>null</code> means the enabled protocol will be the value of the <code>${KafkaConfig.ZkSslProtocolProp}</code> configuration property."
val ZkSslCipherSuitesDoc = "Specifies the enabled cipher suites to be used in ZooKeeper TLS negotiation (csv)." +
s""" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslCipherSuitesProp)}</code> system property (note the single word \\"ciphersuites\\").""" +
" The default value of <code>null</code> means the list of enabled cipher suites is determined by the Java runtime being used."
val ZkSslEndpointIdentificationAlgorithmDoc = "Specifies whether to enable hostname verification in the ZooKeeper TLS negotiation process, with (case-insensitively) \\"https\\" meaning ZooKeeper hostname verification is enabled and an explicit blank value meaning it is disabled (disabling it is only recommended for testing purposes)." +
s""" An explicit value overrides any \\"true\\" or \\"false\\" value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslEndpointIdentificationAlgorithmProp)}</code> system property (note the different name and values; true implies https and false implies blank)."""
val ZkSslCrlEnableDoc = "Specifies whether to enable Certificate Revocation List in the ZooKeeper TLS protocols." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslCrlEnableProp)}</code> system property (note the shorter name)."
val ZkSslOcspEnableDoc = "Specifies whether to enable Online Certificate Status Protocol in the ZooKeeper TLS protocols." +
s" Overrides any explicit value set via the <code>${ZkSslConfigToSystemPropertyMap(ZkSslOcspEnableProp)}</code> system property (note the shorter name)."
/** ********* General Configuration ***********/
val BrokerIdGenerationEnableDoc = s"Enable automatic broker id generation on the server. When enabled the value configured for $MaxReservedBrokerIdProp should be reviewed."
val MaxReservedBrokerIdDoc = "Max number that can be used for a broker.id"
val BrokerIdDoc = "The broker id for this server. If unset, a unique broker id will be generated." +
"To avoid conflicts between zookeeper generated broker id's and user configured broker id's, generated broker ids " +
"start from " + MaxReservedBrokerIdProp + " + 1."
val MessageMaxBytesDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC +
s"This can be set per topic with the topic level <code>${TopicConfig.MAX_MESSAGE_BYTES_CONFIG}</code> config."
val NumNetworkThreadsDoc = "The number of threads that the server uses for receiving requests from the network and sending responses to the network"
val NumIoThreadsDoc = "The number of threads that the server uses for processing requests, which may include disk I/O"
val NumReplicaAlterLogDirsThreadsDoc = "The number of threads that can move replicas between log directories, which may include disk I/O"
val BackgroundThreadsDoc = "The number of threads to use for various background processing tasks"
val QueuedMaxRequestsDoc = "The number of queued requests allowed for data-plane, before blocking the network threads"
val QueuedMaxRequestBytesDoc = "The number of queued bytes allowed before no more requests are read"
val RequestTimeoutMsDoc = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC
val ConnectionSetupTimeoutMsDoc = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC
val ConnectionSetupTimeoutMaxMsDoc = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC
/** KRaft mode configs */
val ProcessRolesDoc = "The roles that this process plays: 'broker', 'controller', or 'broker,controller' if it is both. " +
"This configuration is only applicable for clusters in KRaft (Kafka Raft) mode (instead of ZooKeeper). Leave this config undefined or empty for Zookeeper clusters."
val InitialBrokerRegistrationTimeoutMsDoc = "When initially registering with the controller quorum, the number of milliseconds to wait before declaring failure and exiting the broker process."
val BrokerHeartbeatIntervalMsDoc = "The length of time in milliseconds between broker heartbeats. Used when running in KRaft mode."
val BrokerSessionTimeoutMsDoc = "The length of time in milliseconds that a broker lease lasts if no heartbeats are made. Used when running in KRaft mode."
val NodeIdDoc = "The node ID associated with the roles this process is playing when `process.roles` is non-empty. " +
"This is required configuration when running in KRaft mode."
val MetadataLogDirDoc = "This configuration determines where we put the metadata log for clusters in KRaft mode. " +
"If it is not set, the metadata log is placed in the first log directory from log.dirs."
val MetadataSnapshotMaxNewRecordBytesDoc = "This is the maximum number of bytes in the log between the latest snapshot and the high-watermark needed before generating a new snapshot."
val ControllerListenerNamesDoc = "A comma-separated list of the names of the listeners used by the controller. This is required " +
"if running in KRaft mode. The ZK-based controller will not use this configuration."
val SaslMechanismControllerProtocolDoc = "SASL mechanism used for communication with controllers. Default is GSSAPI."
val MetadataLogSegmentBytesDoc = "The maximum size of a single metadata log file."
val MetadataLogSegmentMinBytesDoc = "Override the minimum size for a single metadata log file. This should be used for testing only."
val MetadataLogSegmentMillisDoc = "The maximum time before a new metadata log file is rolled out (in milliseconds)."
val MetadataMaxRetentionBytesDoc = "The maximum combined size of the metadata log and snapshots before deleting old " +
"snapshots and log files. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit."
val MetadataMaxRetentionMillisDoc = "The number of milliseconds to keep a metadata log file or snapshot before " +
"deleting it. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit."
/************* Authorizer Configuration ***********/
val AuthorizerClassNameDoc = s"The fully qualified name of a class that implements s${classOf[Authorizer].getName}" +
" interface, which is used by the broker for authorization."
/** ********* Socket Server Configuration ***********/
val ListenersDoc = "Listener List - Comma-separated list of URIs we will listen on and the listener names." +
s" If the listener name is not a security protocol, <code>$ListenerSecurityProtocolMapProp</code> must also be set.\\n" +
" Listener names and port numbers must be unique.\\n" +
" Specify hostname as 0.0.0.0 to bind to all interfaces.\\n" +
" Leave hostname empty to bind to default interface.\\n" +
" Examples of legal listener lists:\\n" +
" PLAINTEXT://myhost:9092,SSL://:9091\\n" +
" CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093\\n"
val AdvertisedListenersDoc = s"Listeners to publish to ZooKeeper for clients to use, if different than the <code>$ListenersProp</code> config property." +
" In IaaS environments, this may need to be different from the interface to which the broker binds." +
s" If this is not set, the value for <code>$ListenersProp</code> will be used." +
s" Unlike <code>$ListenersProp</code>, it is not valid to advertise the 0.0.0.0 meta-address.\\n" +
s" Also unlike <code>$ListenersProp</code>, there can be duplicated ports in this property," +
" so that one listener can be configured to advertise another listener's address." +
" This can be useful in some cases where external load balancers are used."
val ListenerSecurityProtocolMapDoc = "Map between listener names and security protocols. This must be defined for " +
"the same security protocol to be usable in more than one port or IP. For example, internal and " +
"external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners " +
"with names INTERNAL and EXTERNAL and this property as: `INTERNAL:SSL,EXTERNAL:SSL`. As shown, key and value are " +
"separated by a colon and map entries are separated by commas. Each listener name should only appear once in the map. " +
"Different security (SSL and SASL) settings can be configured for each listener by adding a normalised " +
"prefix (the listener name is lowercased) to the config name. For example, to set a different keystore for the " +
"INTERNAL listener, a config with name <code>listener.name.internal.ssl.keystore.location</code> would be set. " +
"If the config for the listener name is not set, the config will fallback to the generic config (i.e. <code>ssl.keystore.location</code>). "
val controlPlaneListenerNameDoc = "Name of listener used for communication between controller and brokers. " +
s"Broker will use the $ControlPlaneListenerNameProp to locate the endpoint in $ListenersProp list, to listen for connections from the controller. " +
"For example, if a broker's config is :\\n" +
"listeners = INTERNAL://192.1.1.8:9092, EXTERNAL://10.1.1.5:9093, CONTROLLER://192.1.1.8:9094\\n" +
"listener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL\\n" +
"control.plane.listener.name = CONTROLLER\\n" +
"On startup, the broker will start listening on \\"192.1.1.8:9094\\" with security protocol \\"SSL\\".\\n" +
s"On controller side, when it discovers a broker's published endpoints through zookeeper, it will use the $ControlPlaneListenerNameProp " +
"to find the endpoint, which it will use to establish connection to the broker.\\n" +
"For example, if the broker's published endpoints on zookeeper are :\\n" +
"\\"endpoints\\" : [\\"INTERNAL://broker1.example.com:9092\\",\\"EXTERNAL://broker1.example.com:9093\\",\\"CONTROLLER://broker1.example.com:9094\\"]\\n" +
" and the controller's config is :\\n" +
"listener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL\\n" +
"control.plane.listener.name = CONTROLLER\\n" +
"then controller will use \\"broker1.example.com:9094\\" with security protocol \\"SSL\\" to connect to the broker.\\n" +
"If not explicitly configured, the default value will be null and there will be no dedicated endpoints for controller connections."
val SocketSendBufferBytesDoc = "The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used."
val SocketReceiveBufferBytesDoc = "The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used."
val SocketRequestMaxBytesDoc = "The maximum number of bytes in a socket request"
val MaxConnectionsPerIpDoc = "The maximum number of connections we allow from each ip address. This can be set to 0 if there are overrides " +
s"configured using $MaxConnectionsPerIpOverridesProp property. New connections from the ip address are dropped if the limit is reached."
val MaxConnectionsPerIpOverridesDoc = "A comma-separated list of per-ip or hostname overrides to the default maximum number of connections. " +
"An example value is \\"hostName:100,127.0.0.1:200\\""
val MaxConnectionsDoc = "The maximum number of connections we allow in the broker at any time. This limit is applied in addition " +
s"to any per-ip limits configured using $MaxConnectionsPerIpProp. Listener-level limits may also be configured by prefixing the " +
s"config name with the listener prefix, for example, <code>listener.name.internal.$MaxConnectionsProp</code>. Broker-wide limit " +
"should be configured based on broker capacity while listener limits should be configured based on application requirements. " +
"New connections are blocked if either the listener or broker limit is reached. Connections on the inter-broker listener are " +
"permitted even if broker-wide limit is reached. The least recently used connection on another listener will be closed in this case."
val MaxConnectionCreationRateDoc = "The maximum connection creation rate we allow in the broker at any time. Listener-level limits " +
s"may also be configured by prefixing the config name with the listener prefix, for example, <code>listener.name.internal.$MaxConnectionCreationRateProp</code>." +
"Broker-wide connection rate limit should be configured based on broker capacity while listener limits should be configured based on " +
"application requirements. New connections will be throttled if either the listener or the broker limit is reached, with the exception " +
"of inter-broker listener. Connections on the inter-broker listener will be throttled only when the listener-level rate limit is reached."
val ConnectionsMaxIdleMsDoc = "Idle connections timeout: the server socket processor threads close the connections that idle more than this"
val FailedAuthenticationDelayMsDoc = "Connection close delay on failed authentication: this is the time (in milliseconds) by which connection close will be delayed on authentication failure. " +
s"This must be configured to be less than $ConnectionsMaxIdleMsProp to prevent connection timeout."
/************* Rack Configuration **************/
val RackDoc = "Rack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: `RACK1`, `us-east-1d`"
/** ********* Log Configuration ***********/
val NumPartitionsDoc = "The default number of log partitions per topic"
val LogDirDoc = "The directory in which the log data is kept (supplemental for " + LogDirsProp + " property)"
val LogDirsDoc = "The directories in which the log data is kept. If not set, the value in " + LogDirProp + " is used"
val LogSegmentBytesDoc = "The maximum size of a single log file"
val LogRollTimeMillisDoc = "The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in " + LogRollTimeHoursProp + " is used"
val LogRollTimeHoursDoc = "The maximum time before a new log segment is rolled out (in hours), secondary to " + LogRollTimeMillisProp + " property"
val LogRollTimeJitterMillisDoc = "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in " + LogRollTimeJitterHoursProp + " is used"
val LogRollTimeJitterHoursDoc = "The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to " + LogRollTimeJitterMillisProp + " property"
val LogRetentionTimeMillisDoc = "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in " + LogRetentionTimeMinutesProp + " is used. If set to -1, no time limit is applied."
val LogRetentionTimeMinsDoc = "The number of minutes to keep a log file before deleting it (in minutes), secondary to " + LogRetentionTimeMillisProp + " property. If not set, the value in " + LogRetentionTimeHoursProp + " is used"
val LogRetentionTimeHoursDoc = "The number of hours to keep a log file before deleting it (in hours), tertiary to " + LogRetentionTimeMillisProp + " property"
val LogRetentionBytesDoc = "The maximum size of the log before deleting it"
val LogCleanupIntervalMsDoc = "The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion"
val LogCleanupPolicyDoc = "The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: \\"delete\\" and \\"compact\\""
val LogCleanerThreadsDoc = "The number of background threads to use for log cleaning"
val LogCleanerIoMaxBytesPerSecondDoc = "The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average"
val LogCleanerDedupeBufferSizeDoc = "The total memory used for log deduplication across all cleaner threads"
val LogCleanerIoBufferSizeDoc = "The total memory used for log cleaner I/O buffers across all cleaner threads"
val LogCleanerDedupeBufferLoadFactorDoc = "Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value " +
"will allow more log to be cleaned at once but will lead to more hash collisions"
val LogCleanerBackoffMsDoc = "The amount of time to sleep when there are no logs to clean"
val LogCleanerMinCleanRatioDoc = "The minimum ratio of dirty log to total log for a log to eligible for cleaning. " +
"If the " + LogCleanerMaxCompactionLagMsProp + " or the " + LogCleanerMinCompactionLagMsProp +
" configurations are also specified, then the log compactor considers the log eligible for compaction " +
"as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) " +
"records for at least the " + LogCleanerMinCompactionLagMsProp + " duration, or (ii) if the log has had " +
"dirty (uncompacted) records for at most the " + LogCleanerMaxCompactionLagMsProp + " period."
val LogCleanerEnableDoc = "Enable the log cleaner process to run on the server. Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size."
val LogCleanerDeleteRetentionMsDoc = "How long are delete records retained?"
val LogCleanerMinCompactionLagMsDoc = "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted."
val LogCleanerMaxCompactionLagMsDoc = "The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted."
val LogIndexSizeMaxBytesDoc = "The maximum size in bytes of the offset index"
val LogIndexIntervalBytesDoc = "The interval with which we add an entry to the offset index"
val LogFlushIntervalMessagesDoc = "The number of messages accumulated on a log partition before messages are flushed to disk "
val LogDeleteDelayMsDoc = "The amount of time to wait before deleting a file from the filesystem"
val LogFlushSchedulerIntervalMsDoc = "The frequency in ms that the log flusher checks whether any log needs to be flushed to disk"
val LogFlushIntervalMsDoc = "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in " + LogFlushSchedulerIntervalMsProp + " is used"
val LogFlushOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of the last flush which acts as the log recovery point"
val LogFlushStartOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of log start offset"
val LogPreAllocateEnableDoc = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true."
val LogMessageFormatVersionDoc = "Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. " +
"Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the " +
"user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly " +
"will cause consumers with older versions to break as they will receive messages with a format that they don't understand."
val LogMessageTimestampTypeDoc = "Define whether the timestamp in the message is message create time or log append time. The value should be either " +
"`CreateTime` or `LogAppendTime`"
val LogMessageTimestampDifferenceMaxMsDoc = "The maximum difference allowed between the timestamp when a broker receives " +
"a message and the timestamp specified in the message. If log.message.timestamp.type=CreateTime, a message will be rejected " +
"if the difference in timestamp exceeds this threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime." +
"The maximum timestamp difference allowed should be no greater than log.retention.ms to avoid unnecessarily frequent log rolling."
val NumRecoveryThreadsPerDataDirDoc = "The number of threads per data directory to be used for log recovery at startup and flushing at shutdown"
val AutoCreateTopicsEnableDoc = "Enable auto creation of topic on the server"
val MinInSyncReplicasDoc = "When a producer sets acks to \\"all\\" (or \\"-1\\"), " +
"min.insync.replicas specifies the minimum number of replicas that must acknowledge " +
"a write for the write to be considered successful. If this minimum cannot be met, " +
"then the producer will raise an exception (either NotEnoughReplicas or " +
"NotEnoughReplicasAfterAppend).<br>When used together, min.insync.replicas and acks " +
"allow you to enforce greater durability guarantees. A typical scenario would be to " +
"create a topic with a replication factor of 3, set min.insync.replicas to 2, and " +
"produce with acks of \\"all\\". This will ensure that the producer raises an exception " +
"if a majority of replicas do not receive a write."
val CreateTopicPolicyClassNameDoc = "The create topic policy class that should be used for validation. The class should " +
"implement the <code>org.apache.kafka.server.policy.CreateTopicPolicy</code> interface."
val AlterConfigPolicyClassNameDoc = "The alter configs policy class that should be used for validation. The class should " +
"implement the <code>org.apache.kafka.server.policy.AlterConfigPolicy</code> interface."
val LogMessageDownConversionEnableDoc = TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_DOC;
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsDoc = "The socket timeout for controller-to-broker channels"
val ControllerMessageQueueSizeDoc = "The buffer size for controller-to-broker-channels"
val DefaultReplicationFactorDoc = "default replication factors for automatically created topics"
val ReplicaLagTimeMaxMsDoc = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time," +
" the leader will remove the follower from isr"
val ReplicaSocketTimeoutMsDoc = "The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms"
val ReplicaSocketReceiveBufferBytesDoc = "The socket receive buffer for network requests"
val ReplicaFetchMaxBytesDoc = "The number of bytes of messages to attempt to fetch for each partition. This is not an absolute maximum, " +
"if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned " +
"to ensure that progress can be made. The maximum record batch size accepted by the broker is defined via " +
"<code>message.max.bytes</code> (broker config) or <code>max.message.bytes</code> (topic config)."
val ReplicaFetchWaitMaxMsDoc = "max wait time for each fetcher request issued by follower replicas. This value should always be less than the " +
"replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics"
val ReplicaFetchMinBytesDoc = "Minimum bytes expected for each fetch response. If not enough bytes, wait up to <code>replica.fetch.wait.max.ms</code> (broker config)."
val ReplicaFetchResponseMaxBytesDoc = "Maximum bytes expected for the entire fetch response. Records are fetched in batches, " +
"and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch " +
"will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. The maximum " +
"record batch size accepted by the broker is defined via <code>message.max.bytes</code> (broker config) or " +
"<code>max.message.bytes</code> (topic config)."
val NumReplicaFetchersDoc = "Number of fetcher threads used to replicate messages from a source broker. " +
"Increasing this value can increase the degree of I/O parallelism in the follower broker."
val ReplicaFetchBackoffMsDoc = "The amount of time to sleep when fetch partition error occurs."
val ReplicaHighWatermarkCheckpointIntervalMsDoc = "The frequency with which the high watermark is saved out to disk"
val FetchPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the fetch request purgatory"
val ProducerPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the producer request purgatory"
val DeleteRecordsPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the delete records request purgatory"
val AutoLeaderRebalanceEnableDoc = "Enables auto leader balancing. A background thread checks the distribution of partition leaders at regular intervals, configurable by `leader.imbalance.check.interval.seconds`. If the leader imbalance exceeds `leader.imbalance.per.broker.percentage`, leader rebalance to the preferred leader for partitions is triggered."
val LeaderImbalancePerBrokerPercentageDoc = "The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage."
val LeaderImbalanceCheckIntervalSecondsDoc = "The frequency with which the partition rebalance check is triggered by the controller"
val UncleanLeaderElectionEnableDoc = "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss"
val InterBrokerSecurityProtocolDoc = "Security protocol used to communicate between brokers. Valid values are: " +
s"${SecurityProtocol.names.asScala.mkString(", ")}. It is an error to set this and $InterBrokerListenerNameProp " +
"properties at the same time."
val InterBrokerProtocolVersionDoc = "Specify which version of the inter-broker protocol will be used.\\n" +
" This is typically bumped after all brokers were upgraded to a new version.\\n" +
" Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list."
val InterBrokerListenerNameDoc = s"Name of listener used for communication between brokers. If this is unset, the listener name is defined by $InterBrokerSecurityProtocolProp. " +
s"It is an error to set this and $InterBrokerSecurityProtocolProp properties at the same time."
val ReplicaSelectorClassDoc = "The fully qualified class name that implements ReplicaSelector. This is used by the broker to find the preferred read replica. By default, we use an implementation that returns the leader."
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesDoc = "Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens"
val ControlledShutdownRetryBackoffMsDoc = "Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying."
val ControlledShutdownEnableDoc = "Enable controlled shutdown of the server"
/** ********* Group coordinator configuration ***********/
val GroupMinSessionTimeoutMsDoc = "The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources."
val GroupMaxSessionTimeoutMsDoc = "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures."
val GroupInitialRebalanceDelayMsDoc = "The amount of time the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins."
val GroupMaxSizeDoc = "The maximum number of consumers that a single consumer group can accommodate."
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeDoc = "The maximum size for a metadata entry associated with an offset commit"
val OffsetsLoadBufferSizeDoc = "Batch size for reading from the offsets segments when loading offsets into the cache (soft-limit, overridden if records are too large)."
val OffsetsTopicReplicationFactorDoc = "The replication factor for the offsets topic (set higher to ensure availability). " +
"Internal topic creation will fail until the cluster size meets this replication factor requirement."
val OffsetsTopicPartitionsDoc = "The number of partitions for the offset commit topic (should not change after deployment)"
val OffsetsTopicSegmentBytesDoc = "The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads"
val OffsetsTopicCompressionCodecDoc = "Compression codec for the offsets topic - compression may be used to achieve \\"atomic\\" commits"
val OffsetsRetentionMinutesDoc = "After a consumer group loses all its consumers (i.e. becomes empty) its offsets will be kept for this retention period before getting discarded. " +
"For standalone consumers (using manual assignment), offsets will be expired after the time of last commit plus this retention period."
val OffsetsRetentionCheckIntervalMsDoc = "Frequency at which to check for stale offsets"
val OffsetCommitTimeoutMsDoc = "Offset commit will be delayed until all replicas for the offsets topic receive the commit " +
"or this timeout is reached. This is similar to the producer request timeout."
val OffsetCommitRequiredAcksDoc = "The required acks before the commit can be accepted. In general, the default (-1) should not be overridden"
/** ********* Transaction management configuration ***********/
val TransactionalIdExpirationMsDoc = "The time in ms that the transaction coordinator will wait without receiving any transaction status updates " +
"for the current transaction before expiring its transactional id. This setting also influences producer id expiration - producer ids are expired " +
"once this time has elapsed after the last write with the given producer id. Note that producer ids may expire sooner if the last write from the producer id is deleted due to the topic's retention settings."
val TransactionsMaxTimeoutMsDoc = "The maximum allowed timeout for transactions. " +
"If a client’s requested transaction time exceed this, then the broker will return an error in InitProducerIdRequest. This prevents a client from too large of a timeout, which can stall consumers reading from topics included in the transaction."
val TransactionsTopicMinISRDoc = "Overridden " + MinInSyncReplicasProp + " config for the transaction topic."
val TransactionsLoadBufferSizeDoc = "Batch size for reading from the transaction log segments when loading producer ids and transactions into the cache (soft-limit, overridden if records are too large)."
val TransactionsTopicReplicationFactorDoc = "The replication factor for the transaction topic (set higher to ensure availability). " +
"Internal topic creation will fail until the cluster size meets this replication factor requirement."
val TransactionsTopicPartitionsDoc = "The number of partitions for the transaction topic (should not change after deployment)."
val TransactionsTopicSegmentBytesDoc = "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads"
val TransactionsAbortTimedOutTransactionsIntervalMsDoc = "The interval at which to rollback transactions that have timed out"
val TransactionsRemoveExpiredTransactionsIntervalMsDoc = "The interval at which to remove transactions that have expired due to <code>transactional.id.expiration.ms</code> passing"
/** ********* Fetch Configuration **************/
val MaxIncrementalFetchSessionCacheSlotsDoc = "The maximum number of incremental fetch sessions that we will maintain."
val FetchMaxBytesDoc = "The maximum number of bytes we will return for a fetch request. Must be at least 1024."
/** ********* Quota Configuration ***********/
val NumQuotaSamplesDoc = "The number of samples to retain in memory for client quotas"
val NumReplicationQuotaSamplesDoc = "The number of samples to retain in memory for replication quotas"
val NumAlterLogDirsReplicationQuotaSamplesDoc = "The number of samples to retain in memory for alter log dirs replication quotas"
val NumControllerQuotaSamplesDoc = "The number of samples to retain in memory for controller mutation quotas"
val QuotaWindowSizeSecondsDoc = "The time span of each sample for client quotas"
val ReplicationQuotaWindowSizeSecondsDoc = "The time span of each sample for replication quotas"
val AlterLogDirsReplicationQuotaWindowSizeSecondsDoc = "The time span of each sample for alter log dirs replication quotas"
val ControllerQuotaWindowSizeSecondsDoc = "The time span of each sample for controller mutations quotas"
val ClientQuotaCallbackClassDoc = "The fully qualified name of a class that implements the ClientQuotaCallback interface, " +
"which is used to determine quota limits applied to client requests. By default, <user, client-id>, <user> or <client-id> " +
"quotas stored in ZooKeeper are applied. For any given request, the most specific quota that matches the user principal " +
"of the session and the client-id of the request is applied."
val DeleteTopicEnableDoc = "Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off"
val CompressionTypeDoc = "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs " +
"('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and " +
"'producer' which means retain the original compression codec set by the producer."
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsDoc = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC
val MetricNumSamplesDoc = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC
val MetricReporterClassesDoc = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC
val MetricRecordingLevelDoc = CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC
/** ********* Kafka Yammer Metrics Reporter Configuration ***********/
val KafkaMetricsReporterClassesDoc = "A list of classes to use as Yammer metrics custom reporters." +
" The reporters should implement <code>kafka.metrics.KafkaMetricsReporter</code> trait. If a client wants" +
" to expose JMX operations on a custom reporter, the custom reporter needs to additionally implement an MBean" +
" trait that extends <code>kafka.metrics.KafkaMetricsReporterMBean</code> trait so that the registered MBean is compliant with" +
" the standard MBean convention."
val KafkaMetricsPollingIntervalSecondsDoc = s"The metrics polling interval (in seconds) which can be used" +
s" in $KafkaMetricsReporterClassesProp implementations."
/** ******** Common Security Configuration *************/
val PrincipalBuilderClassDoc = BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC
val ConnectionsMaxReauthMsDoc = BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_DOC
val securityProviderClassDoc = SecurityConfig.SECURITY_PROVIDERS_DOC
/** ********* SSL Configuration ****************/
val SslProtocolDoc = SslConfigs.SSL_PROTOCOL_DOC
val SslProviderDoc = SslConfigs.SSL_PROVIDER_DOC
val SslCipherSuitesDoc = SslConfigs.SSL_CIPHER_SUITES_DOC
val SslEnabledProtocolsDoc = SslConfigs.SSL_ENABLED_PROTOCOLS_DOC
val SslKeystoreTypeDoc = SslConfigs.SSL_KEYSTORE_TYPE_DOC
val SslKeystoreLocationDoc = SslConfigs.SSL_KEYSTORE_LOCATION_DOC
val SslKeystorePasswordDoc = SslConfigs.SSL_KEYSTORE_PASSWORD_DOC
val SslKeyPasswordDoc = SslConfigs.SSL_KEY_PASSWORD_DOC
val SslKeystoreKeyDoc = SslConfigs.SSL_KEYSTORE_KEY_DOC
val SslKeystoreCertificateChainDoc = SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC
val SslTruststoreTypeDoc = SslConfigs.SSL_TRUSTSTORE_TYPE_DOC
val SslTruststorePasswordDoc = SslConfigs.SSL_TRUSTSTORE_PASSWORD_DOC
val SslTruststoreLocationDoc = SslConfigs.SSL_TRUSTSTORE_LOCATION_DOC
val SslTruststoreCertificatesDoc = SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_DOC
val SslKeyManagerAlgorithmDoc = SslConfigs.SSL_KEYMANAGER_ALGORITHM_DOC
val SslTrustManagerAlgorithmDoc = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC
val SslEndpointIdentificationAlgorithmDoc = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC
val SslSecureRandomImplementationDoc = SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_DOC
val SslClientAuthDoc = BrokerSecurityConfigs.SSL_CLIENT_AUTH_DOC
val SslPrincipalMappingRulesDoc = BrokerSecurityConfigs.SSL_PRINCIPAL_MAPPING_RULES_DOC
val SslEngineFactoryClassDoc = SslConfigs.SSL_ENGINE_FACTORY_CLASS_DOC
/** ********* Sasl Configuration ****************/
val SaslMechanismInterBrokerProtocolDoc = "SASL mechanism used for inter-broker communication. Default is GSSAPI."
val SaslJaasConfigDoc = SaslConfigs.SASL_JAAS_CONFIG_DOC
val SaslEnabledMechanismsDoc = BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_DOC
val SaslServerCallbackHandlerClassDoc = BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS_DOC
val SaslClientCallbackHandlerClassDoc = SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC
val SaslLoginClassDoc = SaslConfigs.SASL_LOGIN_CLASS_DOC
val SaslLoginCallbackHandlerClassDoc = SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC
val SaslKerberosServiceNameDoc = SaslConfigs.SASL_KERBEROS_SERVICE_NAME_DOC
val SaslKerberosKinitCmdDoc = SaslConfigs.SASL_KERBEROS_KINIT_CMD_DOC
val SaslKerberosTicketRenewWindowFactorDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC
val SaslKerberosTicketRenewJitterDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER_DOC
val SaslKerberosMinTimeBeforeReloginDoc = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC
val SaslKerberosPrincipalToLocalRulesDoc = BrokerSecurityConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC
val SaslLoginRefreshWindowFactorDoc = SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC
val SaslLoginRefreshWindowJitterDoc = SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC
val SaslLoginRefreshMinPeriodSecondsDoc = SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC
val SaslLoginRefreshBufferSecondsDoc = SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC
/** ********* Delegation Token Configuration ****************/
val DelegationTokenSecretKeyAliasDoc = s"DEPRECATED: An alias for $DelegationTokenSecretKeyProp, which should be used instead of this config."
val DelegationTokenSecretKeyDoc = "Secret key to generate and verify delegation tokens. The same key must be configured across all the brokers. " +
" If the key is not set or set to empty string, brokers will disable the delegation token support."
val DelegationTokenMaxLifeTimeDoc = "The token has a maximum lifetime beyond which it cannot be renewed anymore. Default value 7 days."
val DelegationTokenExpiryTimeMsDoc = "The token validity time in miliseconds before the token needs to be renewed. Default value 1 day."
val DelegationTokenExpiryCheckIntervalDoc = "Scan interval to remove expired delegation tokens."
/** ********* Password encryption configuration for dynamic configs *********/
val PasswordEncoderSecretDoc = "The secret used for encoding dynamically configured passwords for this broker."
val PasswordEncoderOldSecretDoc = "The old secret that was used for encoding dynamically configured passwords. " +
"This is required only when the secret is updated. If specified, all dynamically encoded passwords are " +
s"decoded using this old secret and re-encoded using $PasswordEncoderSecretProp when broker starts up."
val PasswordEncoderKeyFactoryAlgorithmDoc = "The SecretKeyFactory algorithm used for encoding dynamically configured passwords. " +
"Default is PBKDF2WithHmacSHA512 if available and PBKDF2WithHmacSHA1 otherwise."
val PasswordEncoderCipherAlgorithmDoc = "The Cipher algorithm used for encoding dynamically configured passwords."
val PasswordEncoderKeyLengthDoc = "The key length used for encoding dynamically configured passwords."
val PasswordEncoderIterationsDoc = "The iteration count used for encoding dynamically configured passwords."
@nowarn("cat=deprecation")
private[server] val configDef = {
import ConfigDef.Importance._
import ConfigDef.Range._
import ConfigDef.Type._
import ConfigDef.ValidString._
new ConfigDef()
/** ********* Zookeeper Configuration ***********/
.define(ZkConnectProp, STRING, null, HIGH, ZkConnectDoc)
.define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc)
.define(ZkConnectionTimeoutMsProp, INT, null, HIGH, ZkConnectionTimeoutMsDoc)
.define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc)
.define(ZkEnableSecureAclsProp, BOOLEAN, Defaults.ZkEnableSecureAcls, HIGH, ZkEnableSecureAclsDoc)
.define(ZkMaxInFlightRequestsProp, INT, Defaults.ZkMaxInFlightRequests, atLeast(1), HIGH, ZkMaxInFlightRequestsDoc)
.define(ZkSslClientEnableProp, BOOLEAN, Defaults.ZkSslClientEnable, MEDIUM, ZkSslClientEnableDoc)
.define(ZkClientCnxnSocketProp, STRING, null, MEDIUM, ZkClientCnxnSocketDoc)
.define(ZkSslKeyStoreLocationProp, STRING, null, MEDIUM, ZkSslKeyStoreLocationDoc)
.define(ZkSslKeyStorePasswordProp, PASSWORD, null, MEDIUM, ZkSslKeyStorePasswordDoc)
.define(ZkSslKeyStoreTypeProp, STRING, null, MEDIUM, ZkSslKeyStoreTypeDoc)
.define(ZkSslTrustStoreLocationProp, STRING, null, MEDIUM, ZkSslTrustStoreLocationDoc)
.define(ZkSslTrustStorePasswordProp, PASSWORD, null, MEDIUM, ZkSslTrustStorePasswordDoc)
.define(ZkSslTrustStoreTypeProp, STRING, null, MEDIUM, ZkSslTrustStoreTypeDoc)
.define(ZkSslProtocolProp, STRING, Defaults.ZkSslProtocol, LOW, ZkSslProtocolDoc)
.define(ZkSslEnabledProtocolsProp, LIST, null, LOW, ZkSslEnabledProtocolsDoc)
.define(ZkSslCipherSuitesProp, LIST, null, LOW, ZkSslCipherSuitesDoc)
.define(ZkSslEndpointIdentificationAlgorithmProp, STRING, Defaults.ZkSslEndpointIdentificationAlgorithm, LOW, ZkSslEndpointIdentificationAlgorithmDoc)
.define(ZkSslCrlEnableProp, BOOLEAN, Defaults.ZkSslCrlEnable, LOW, ZkSslCrlEnableDoc)
.define(ZkSslOcspEnableProp, BOOLEAN, Defaults.ZkSslOcspEnable, LOW, ZkSslOcspEnableDoc)
/** ********* General Configuration ***********/
.define(BrokerIdGenerationEnableProp, BOOLEAN, Defaults.BrokerIdGenerationEnable, MEDIUM, BrokerIdGenerationEnableDoc)
.define(MaxReservedBrokerIdProp, INT, Defaults.MaxReservedBrokerId, atLeast(0), MEDIUM, MaxReservedBrokerIdDoc)
.define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc)
.define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc)
.define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc)
.define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc)
.define(NumReplicaAlterLogDirsThreadsProp, INT, null, HIGH, NumReplicaAlterLogDirsThreadsDoc)
.define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc)
.define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc)
.define(QueuedMaxBytesProp, LONG, Defaults.QueuedMaxRequestBytes, MEDIUM, QueuedMaxRequestBytesDoc)
.define(RequestTimeoutMsProp, INT, Defaults.RequestTimeoutMs, HIGH, RequestTimeoutMsDoc)
.define(ConnectionSetupTimeoutMsProp, LONG, Defaults.ConnectionSetupTimeoutMs, MEDIUM, ConnectionSetupTimeoutMsDoc)
.define(ConnectionSetupTimeoutMaxMsProp, LONG, Defaults.ConnectionSetupTimeoutMaxMs, MEDIUM, ConnectionSetupTimeoutMaxMsDoc)
/*
* KRaft mode configs.
*/
.define(MetadataSnapshotMaxNewRecordBytesProp, LONG, Defaults.MetadataSnapshotMaxNewRecordBytes, atLeast(1), HIGH, MetadataSnapshotMaxNewRecordBytesDoc)
/*
* KRaft mode private configs. Note that these configs are defined as internal. We will make them public in the 3.0.0 release.
*/
.define(ProcessRolesProp, LIST, Collections.emptyList(), ValidList.in("broker", "controller"), HIGH, ProcessRolesDoc)
.define(NodeIdProp, INT, Defaults.EmptyNodeId, null, HIGH, NodeIdDoc)
.define(InitialBrokerRegistrationTimeoutMsProp, INT, Defaults.InitialBrokerRegistrationTimeoutMs, null, MEDIUM, InitialBrokerRegistrationTimeoutMsDoc)
.define(BrokerHeartbeatIntervalMsProp, INT, Defaults.BrokerHeartbeatIntervalMs, null, MEDIUM, BrokerHeartbeatIntervalMsDoc)
.define(BrokerSessionTimeoutMsProp, INT, Defaults.BrokerSessionTimeoutMs, null, MEDIUM, BrokerSessionTimeoutMsDoc)
.define(ControllerListenerNamesProp, STRING, null, null, HIGH, ControllerListenerNamesDoc)
.define(SaslMechanismControllerProtocolProp, STRING, SaslConfigs.DEFAULT_SASL_MECHANISM, null, HIGH, SaslMechanismControllerProtocolDoc)
.define(MetadataLogDirProp, STRING, null, null, HIGH, MetadataLogDirDoc)
.define(MetadataLogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Records.LOG_OVERHEAD), HIGH, MetadataLogSegmentBytesDoc)
.defineInternal(MetadataLogSegmentMinBytesProp, INT, 8 * 1024 * 1024, atLeast(Records.LOG_OVERHEAD), HIGH, MetadataLogSegmentMinBytesDoc)
.define(MetadataLogSegmentMillisProp, LONG, Defaults.LogRollHours * 60 * 60 * 1000L, null, HIGH, MetadataLogSegmentMillisDoc)
.define(MetadataMaxRetentionBytesProp, LONG, Defaults.LogRetentionBytes, null, HIGH, MetadataMaxRetentionBytesDoc)
.define(MetadataMaxRetentionMillisProp, LONG, Defaults.LogRetentionHours * 60 * 60 * 1000L, null, HIGH, MetadataMaxRetentionMillisDoc)
/************* Authorizer Configuration ***********/
.define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc)
/** ********* Socket Server Configuration ***********/
.define(ListenersProp, STRING, Defaults.Listeners, HIGH, ListenersDoc)
.define(AdvertisedListenersProp, STRING, null, HIGH, AdvertisedListenersDoc)
.define(ListenerSecurityProtocolMapProp, STRING, Defaults.ListenerSecurityProtocolMap, LOW, ListenerSecurityProtocolMapDoc)
.define(ControlPlaneListenerNameProp, STRING, null, HIGH, controlPlaneListenerNameDoc)
.define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc)
.define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc)
.define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc)
.define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(0), MEDIUM, MaxConnectionsPerIpDoc)
.define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc)
.define(MaxConnectionsProp, INT, Defaults.MaxConnections, atLeast(0), MEDIUM, MaxConnectionsDoc)
.define(MaxConnectionCreationRateProp, INT, Defaults.MaxConnectionCreationRate, atLeast(0), MEDIUM, MaxConnectionCreationRateDoc)
.define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc)
.define(FailedAuthenticationDelayMsProp, INT, Defaults.FailedAuthenticationDelayMs, atLeast(0), LOW, FailedAuthenticationDelayMsDoc)
/************ Rack Configuration ******************/
.define(RackProp, STRING, null, MEDIUM, RackDoc)
/** ********* Log Configuration ***********/
.define(NumPartitionsProp, INT, Defaults.NumPartitions, atLeast(1), MEDIUM, NumPartitionsDoc)
.define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc)
.define(LogDirsProp, STRING, null, HIGH, LogDirsDoc)
.define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), HIGH, LogSegmentBytesDoc)
.define(LogRollTimeMillisProp, LONG, null, HIGH, LogRollTimeMillisDoc)
.define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc)
.define(LogRollTimeJitterMillisProp, LONG, null, HIGH, LogRollTimeJitterMillisDoc)
.define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc)
.define(LogRetentionTimeMillisProp, LONG, null, HIGH, LogRetentionTimeMillisDoc)
.define(LogRetentionTimeMinutesProp, INT, null, HIGH, LogRetentionTimeMinsDoc)
.define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc)
.define(LogRetentionBytesProp, LONG, Defaults.LogRetentionBytes, HIGH, LogRetentionBytesDoc)
.define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc)
.define(LogCleanupPolicyProp, LIST, Defaults.LogCleanupPolicy, ValidList.in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc)
.define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc)
.define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc)
.define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc)
.define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc)
.define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc)
.define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc)
.define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc)
.define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc)
.define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc)
.define(LogCleanerMinCompactionLagMsProp, LONG, Defaults.LogCleanerMinCompactionLagMs, MEDIUM, LogCleanerMinCompactionLagMsDoc)
.define(LogCleanerMaxCompactionLagMsProp, LONG, Defaults.LogCleanerMaxCompactionLagMs, MEDIUM, LogCleanerMaxCompactionLagMsDoc)
.define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc)
.define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc)
.define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc)
.define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc)
.define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc)
.define(LogFlushIntervalMsProp, LONG, null, HIGH, LogFlushIntervalMsDoc)
.define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc)
.define(LogFlushStartOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushStartOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushStartOffsetCheckpointIntervalMsDoc)
.define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc)
.define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc)
.define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc)
.define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc)
.define(LogMessageFormatVersionProp, STRING, Defaults.LogMessageFormatVersion, ApiVersionValidator, MEDIUM, LogMessageFormatVersionDoc)
.define(LogMessageTimestampTypeProp, STRING, Defaults.LogMessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, LogMessageTimestampTypeDoc)
.define(LogMessageTimestampDifferenceMaxMsProp, LONG, Defaults.LogMessageTimestampDifferenceMaxMs, MEDIUM, LogMessageTimestampDifferenceMaxMsDoc)
.define(CreateTopicPolicyClassNameProp, CLASS, null, LOW, CreateTopicPolicyClassNameDoc)
.define(AlterConfigPolicyClassNameProp, CLASS, null, LOW, AlterConfigPolicyClassNameDoc)
.define(LogMessageDownConversionEnableProp, BOOLEAN, Defaults.MessageDownConversionEnable, LOW, LogMessageDownConversionEnableDoc)
/** ********* Replication configuration ***********/
.define(ControllerSocketTimeoutMsProp, INT, Defaults.ControllerSocketTimeoutMs, MEDIUM, ControllerSocketTimeoutMsDoc)
.define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc)
.define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc)
.define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc)
.define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc)
.define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, atLeast(0), MEDIUM, ReplicaFetchMaxBytesDoc)
.define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc)
.define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc)
.define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc)
.define(ReplicaFetchResponseMaxBytesProp, INT, Defaults.ReplicaFetchResponseMaxBytes, atLeast(0), MEDIUM, ReplicaFetchResponseMaxBytesDoc)
.define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc)
.define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc)
.define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc)
.define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc)
.define(DeleteRecordsPurgatoryPurgeIntervalRequestsProp, INT, Defaults.DeleteRecordsPurgatoryPurgeIntervalRequests, MEDIUM, DeleteRecordsPurgatoryPurgeIntervalRequestsDoc)
.define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc)
.define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc)
.define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc)
.define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc)
.define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc)
.define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, ApiVersionValidator, MEDIUM, InterBrokerProtocolVersionDoc)
.define(InterBrokerListenerNameProp, STRING, null, MEDIUM, InterBrokerListenerNameDoc)
.define(ReplicaSelectorClassProp, STRING, null, MEDIUM, ReplicaSelectorClassDoc)
/** ********* Controlled shutdown configuration ***********/
.define(ControlledShutdownMaxRetriesProp, INT, Defaults.ControlledShutdownMaxRetries, MEDIUM, ControlledShutdownMaxRetriesDoc)
.define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc)
.define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc)
/** ********* Group coordinator configuration ***********/
.define(GroupMinSessionTimeoutMsProp, INT, Defaults.GroupMinSessionTimeoutMs, MEDIUM, GroupMinSessionTimeoutMsDoc)
.define(GroupMaxSessionTimeoutMsProp, INT, Defaults.GroupMaxSessionTimeoutMs, MEDIUM, GroupMaxSessionTimeoutMsDoc)
.define(GroupInitialRebalanceDelayMsProp, INT, Defaults.GroupInitialRebalanceDelayMs, MEDIUM, GroupInitialRebalanceDelayMsDoc)
.define(GroupMaxSizeProp, INT, Defaults.GroupMaxSize, atLeast(1), MEDIUM, GroupMaxSizeDoc)
/** ********* Offset management configuration ***********/
.define(OffsetMetadataMaxSizeProp, INT, Defaults.OffsetMetadataMaxSize, HIGH, OffsetMetadataMaxSizeDoc)
.define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc)
.define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc)
.define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc)
.define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc)
.define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc)
.define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc)
.define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc)
.define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc)
.define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc)
.define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc)
.define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc)
/** ********* Transaction management configuration ***********/
.define(TransactionalIdExpirationMsProp, INT, Defaults.TransactionalIdExpirationMs, atLeast(1), HIGH, TransactionalIdExpirationMsDoc)
.define(TransactionsMaxTimeoutMsProp, INT, Defaults.TransactionsMaxTimeoutMs, atLeast(1), HIGH, TransactionsMaxTimeoutMsDoc)
.define(TransactionsTopicMinISRProp, INT, Defaults.TransactionsTopicMinISR, atLeast(1), HIGH, TransactionsTopicMinISRDoc)
.define(TransactionsLoadBufferSizeProp, INT, Defaults.TransactionsLoadBufferSize, atLeast(1), HIGH, TransactionsLoadBufferSizeDoc)
.define(TransactionsTopicReplicationFactorProp, SHORT, Defaults.TransactionsTopicReplicationFactor, atLeast(1), HIGH, TransactionsTopicReplicationFactorDoc)
.define(TransactionsTopicPartitionsProp, INT, Defaults.TransactionsTopicPartitions, atLeast(1), HIGH, TransactionsTopicPartitionsDoc)
.define(TransactionsTopicSegmentBytesProp, INT, Defaults.TransactionsTopicSegmentBytes, atLeast(1), HIGH, TransactionsTopicSegmentBytesDoc)
.define(TransactionsAbortTimedOutTransactionCleanupIntervalMsProp, INT, Defaults.TransactionsAbortTimedOutTransactionsCleanupIntervalMS, atLeast(1), LOW, TransactionsAbortTimedOutTransactionsIntervalMsDoc)
.define(TransactionsRemoveExpiredTransactionalIdCleanupIntervalMsProp, INT, Defaults.TransactionsRemoveExpiredTransactionsCleanupIntervalMS, atLeast(1), LOW, TransactionsRemoveExpiredTransactionsIntervalMsDoc)
/** ********* Fetch Configuration **************/
.define(MaxIncrementalFetchSessionCacheSlots, INT, Defaults.MaxIncrementalFetchSessionCacheSlots, atLeast(0), MEDIUM, MaxIncrementalFetchSessionCacheSlotsDoc)
.define(FetchMaxBytes, INT, Defaults.FetchMaxBytes, atLeast(1024), MEDIUM, FetchMaxBytesDoc)
/** ********* Kafka Metrics Configuration ***********/
.define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc)
.define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc)
.define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc)
.define(MetricRecordingLevelProp, STRING, Defaults.MetricRecordingLevel, LOW, MetricRecordingLevelDoc)
/** ********* Kafka Yammer Metrics Reporter Configuration for docs ***********/
.define(KafkaMetricsReporterClassesProp, LIST, Defaults.KafkaMetricReporterClasses, LOW, KafkaMetricsReporterClassesDoc)
.define(KafkaMetricsPollingIntervalSecondsProp, INT, Defaults.KafkaMetricsPollingIntervalSeconds, atLeast(1), LOW, KafkaMetricsPollingIntervalSecondsDoc)
/** ********* Quota configuration ***********/
.define(NumQuotaSamplesProp, INT, Defaults.NumQuotaSamples, atLeast(1), LOW, NumQuotaSamplesDoc)
.define(NumReplicationQuotaSamplesProp, INT, Defaults.NumReplicationQuotaSamples, atLeast(1), LOW, NumReplicationQuotaSamplesDoc)
.define(NumAlterLogDirsReplicationQuotaSamplesProp, INT, Defaults.NumAlterLogDirsReplicationQuotaSamples, atLeast(1), LOW, NumAlterLogDirsReplicationQuotaSamplesDoc)
.define(NumControllerQuotaSamplesProp, INT, Defaults.NumControllerQuotaSamples, atLeast(1), LOW, NumControllerQuotaSamplesDoc)
.define(QuotaWindowSizeSecondsProp, INT, Defaults.QuotaWindowSizeSeconds, atLeast(1), LOW, QuotaWindowSizeSecondsDoc)
.define(ReplicationQuotaWindowSizeSecondsProp, INT, Defaults.ReplicationQuotaWindowSizeSeconds, atLeast(1), LOW, ReplicationQuotaWindowSizeSecondsDoc)
.define(AlterLogDirsReplicationQuotaWindowSizeSecondsProp, INT, Defaults.AlterLogDirsReplicationQuotaWindowSizeSeconds, atLeast(1), LOW, AlterLogDirsReplicationQuotaWindowSizeSecondsDoc)
.define(ControllerQuotaWindowSizeSecondsProp, INT, Defaults.ControllerQuotaWindowSizeSeconds, atLeast(1), LOW, ControllerQuotaWindowSizeSecondsDoc)
.define(ClientQuotaCallbackClassProp, CLASS, null, LOW, ClientQuotaCallbackClassDoc)
/** ********* General Security Configuration ****************/
.define(ConnectionsMaxReauthMsProp, LONG, Defaults.ConnectionsMaxReauthMsDefault, MEDIUM, ConnectionsMaxReauthMsDoc)
.define(securityProviderClassProp, STRING, null, LOW, securityProviderClassDoc)
/** ********* SSL Configuration ****************/
.define(PrincipalBuilderClassProp, CLASS, Defaults.DefaultPrincipalSerde, MEDIUM, PrincipalBuilderClassDoc)
.define(SslProtocolProp, STRING, Defaults.SslProtocol, MEDIUM, SslProtocolDoc)
.define(SslProviderProp, STRING, null, MEDIUM, SslProviderDoc)
.define(SslEnabledProtocolsProp, LIST, Defaults.SslEnabledProtocols, MEDIUM, SslEnabledProtocolsDoc)
.define(SslKeystoreTypeProp, STRING, Defaults.SslKeystoreType, MEDIUM, SslKeystoreTypeDoc)
.define(SslKeystoreLocationProp, STRING, null, MEDIUM, SslKeystoreLocationDoc)
.define(SslKeystorePasswordProp, PASSWORD, null, MEDIUM, SslKeystorePasswordDoc)
.define(SslKeyPasswordProp, PASSWORD, null, MEDIUM, SslKeyPasswordDoc)
.define(SslKeystoreKeyProp, PASSWORD, null, MEDIUM, SslKeystoreKeyDoc)
.define(SslKeystoreCertificateChainProp, PASSWORD, null, MEDIUM, SslKeystoreCertificateChainDoc)
.define(SslTruststoreTypeProp, STRING, Defaults.SslTruststoreType, MEDIUM, SslTruststoreTypeDoc)
.define(SslTruststoreLocationProp, STRING, null, MEDIUM, SslTruststoreLocationDoc)
.define(SslTruststorePasswordProp, PASSWORD, null, MEDIUM, SslTruststorePasswordDoc)
.define(SslTruststoreCertificatesProp, PASSWORD, null, MEDIUM, SslTruststoreCertificatesDoc)
.define(SslKeyManagerAlgorithmProp, STRING, Defaults.SslKeyManagerAlgorithm, MEDIUM, SslKeyManagerAlgorithmDoc)
.define(SslTrustManagerAlgorithmProp, STRING, Defaults.SslTrustManagerAlgorithm, MEDIUM, SslTrustManagerAlgorithmDoc)
.define(SslEndpointIdentificationAlgorithmProp, STRING, Defaults.SslEndpointIdentificationAlgorithm, LOW, SslEndpointIdentificationAlgorithmDoc)
.define(SslSecureRandomImplementationProp, STRING, null, LOW, SslSecureRandomImplementationDoc)
.define(SslClientAuthProp, STRING, Defaults.SslClientAuthentication, in(Defaults.SslClientAuthenticationValidValues:_*), MEDIUM, SslClientAuthDoc)
.define(SslCipherSuitesProp, LIST, Collections.emptyList(), MEDIUM, SslCipherSuitesDoc)
.define(SslPrincipalMappingRulesProp, STRING, Defaults.SslPrincipalMappingRules, LOW, SslPrincipalMappingRulesDoc)
.define(SslEngineFactoryClassProp, CLASS, null, LOW, SslEngineFactoryClassDoc)
/** ********* Sasl Configuration ****************/
.define(SaslMechanismInterBrokerProtocolProp, STRING, Defaults.SaslMechanismInterBrokerProtocol, MEDIUM, SaslMechanismInterBrokerProtocolDoc)
.define(SaslJaasConfigProp, PASSWORD, null, MEDIUM, SaslJaasConfigDoc)
.define(SaslEnabledMechanismsProp, LIST, Defaults.SaslEnabledMechanisms, MEDIUM, SaslEnabledMechanismsDoc)
.define(SaslServerCallbackHandlerClassProp, CLASS, null, MEDIUM, SaslServerCallbackHandlerClassDoc)
.define(SaslClientCallbackHandlerClassProp, CLASS, null, MEDIUM, SaslClientCallbackHandlerClassDoc)
.define(SaslLoginClassProp, CLASS, null, MEDIUM, SaslLoginClassDoc)
.define(SaslLoginCallbackHandlerClassProp, CLASS, null, MEDIUM, SaslLoginCallbackHandlerClassDoc)
.define(SaslKerberosServiceNameProp, STRING, null, MEDIUM, SaslKerberosServiceNameDoc)
.define(SaslKerberosKinitCmdProp, STRING, Defaults.SaslKerberosKinitCmd, MEDIUM, SaslKerberosKinitCmdDoc)
.define(SaslKerberosTicketRenewWindowFactorProp, DOUBLE, Defaults.SaslKerberosTicketRenewWindowFactor, MEDIUM, SaslKerberosTicketRenewWindowFactorDoc)
.define(SaslKerberosTicketRenewJitterProp, DOUBLE, Defaults.SaslKerberosTicketRenewJitter, MEDIUM, SaslKerberosTicketRenewJitterDoc)
.define(SaslKerberosMinTimeBeforeReloginProp, LONG, Defaults.SaslKerberosMinTimeBeforeRelogin, MEDIUM, SaslKerberosMinTimeBeforeReloginDoc)
.define(SaslKerberosPrincipalToLocalRulesProp, LIST, Defaults.SaslKerberosPrincipalToLocalRules, MEDIUM, SaslKerberosPrincipalToLocalRulesDoc)
.define(SaslLoginRefreshWindowFactorProp, DOUBLE, Defaults.SaslLoginRefreshWindowFactor, MEDIUM, SaslLoginRefreshWindowFactorDoc)
.define(SaslLoginRefreshWindowJitterProp, DOUBLE, Defaults.SaslLoginRefreshWindowJitter, MEDIUM, SaslLoginRefreshWindowJitterDoc)
.define(SaslLoginRefreshMinPeriodSecondsProp, SHORT, Defaults.SaslLoginRefreshMinPeriodSeconds, MEDIUM, SaslLoginRefreshMinPeriodSecondsDoc)
.define(SaslLoginRefreshBufferSecondsProp, SHORT, Defaults.SaslLoginRefreshBufferSeconds, MEDIUM, SaslLoginRefreshBufferSecondsDoc)
/** ********* Delegation Token Configuration ****************/
.define(DelegationTokenSecretKeyAliasProp, PASSWORD, null, MEDIUM, DelegationTokenSecretKeyAliasDoc)
.define(DelegationTokenSecretKeyProp, PASSWORD, null, MEDIUM, DelegationTokenSecretKeyDoc)
.define(DelegationTokenMaxLifeTimeProp, LONG, Defaults.DelegationTokenMaxLifeTimeMsDefault, atLeast(1), MEDIUM, DelegationTokenMaxLifeTimeDoc)
.define(DelegationTokenExpiryTimeMsProp, LONG, Defaults.DelegationTokenExpiryTimeMsDefault, atLeast(1), MEDIUM, DelegationTokenExpiryTimeMsDoc)
.define(DelegationTokenExpiryCheckIntervalMsProp, LONG, Defaults.DelegationTokenExpiryCheckIntervalMsDefault, atLeast(1), LOW, DelegationTokenExpiryCheckIntervalDoc)
/** ********* Password encryption configuration for dynamic configs *********/
.define(PasswordEncoderSecretProp, PASSWORD, null, MEDIUM, PasswordEncoderSecretDoc)
.define(PasswordEncoderOldSecretProp, PASSWORD, null, MEDIUM, PasswordEncoderOldSecretDoc)
.define(PasswordEncoderKeyFactoryAlgorithmProp, STRING, null, LOW, PasswordEncoderKeyFactoryAlgorithmDoc)
.define(PasswordEncoderCipherAlgorithmProp, STRING, Defaults.PasswordEncoderCipherAlgorithm, LOW, PasswordEncoderCipherAlgorithmDoc)
.define(PasswordEncoderKeyLengthProp, INT, Defaults.PasswordEncoderKeyLength, atLeast(8), LOW, PasswordEncoderKeyLengthDoc)
.define(PasswordEncoderIterationsProp, INT, Defaults.PasswordEncoderIterations, atLeast(1024), LOW, PasswordEncoderIterationsDoc)
/** ********* Raft Quorum Configuration *********/
.define(RaftConfig.QUORUM_VOTERS_CONFIG, LIST, Defaults.QuorumVoters, new RaftConfig.ControllerQuorumVotersValidator(), HIGH, RaftConfig.QUORUM_VOTERS_DOC)
.define(RaftConfig.QUORUM_ELECTION_TIMEOUT_MS_CONFIG, INT, Defaults.QuorumElectionTimeoutMs, null, HIGH, RaftConfig.QUORUM_ELECTION_TIMEOUT_MS_DOC)
.define(RaftConfig.QUORUM_FETCH_TIMEOUT_MS_CONFIG, INT, Defaults.QuorumFetchTimeoutMs, null, HIGH, RaftConfig.QUORUM_FETCH_TIMEOUT_MS_DOC)
.define(RaftConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_CONFIG, INT, Defaults.QuorumElectionBackoffMs, null, HIGH, RaftConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_DOC)
.define(RaftConfig.QUORUM_LINGER_MS_CONFIG, INT, Defaults.QuorumLingerMs, null, MEDIUM, RaftConfig.QUORUM_LINGER_MS_DOC)
.define(RaftConfig.QUORUM_REQUEST_TIMEOUT_MS_CONFIG, INT, Defaults.QuorumRequestTimeoutMs, null, MEDIUM, RaftConfig.QUORUM_REQUEST_TIMEOUT_MS_DOC)
.define(RaftConfig.QUORUM_RETRY_BACKOFF_MS_CONFIG, INT, Defaults.QuorumRetryBackoffMs, null, LOW, RaftConfig.QUORUM_RETRY_BACKOFF_MS_DOC)
}
/** ********* Remote Log Management Configuration *********/
RemoteLogManagerConfig.CONFIG_DEF.configKeys().values().forEach(key => configDef.define(key))
def configNames: Seq[String] = configDef.names.asScala.toBuffer.sorted
private[server] def defaultValues: Map[String, _] = configDef.defaultValues.asScala
private[server] def configKeys: Map[String, ConfigKey] = configDef.configKeys.asScala
def fromProps(props: Properties): KafkaConfig =
fromProps(props, true)
def fromProps(props: Properties, doLog: Boolean): KafkaConfig =
new KafkaConfig(props, doLog)
def fromProps(defaults: Properties, overrides: Properties): KafkaConfig =
fromProps(defaults, overrides, true)
def fromProps(defaults: Properties, overrides: Properties, doLog: Boolean): KafkaConfig = {
val props = new Properties()
props ++= defaults
props ++= overrides
fromProps(props, doLog)
}
def apply(props: java.util.Map[_, _]): KafkaConfig = new KafkaConfig(props, true)
private def typeOf(name: String): Option[ConfigDef.Type] = Option(configDef.configKeys.get(name)).map(_.`type`)
def configType(configName: String): Option[ConfigDef.Type] = {
val configType = configTypeExact(configName)
if (configType.isDefined) {
return configType
}
typeOf(configName) match {
case Some(t) => Some(t)
case None =>
DynamicBrokerConfig.brokerConfigSynonyms(configName, matchListenerOverride = true).flatMap(typeOf).headOption
}
}
private def configTypeExact(exactName: String): Option[ConfigDef.Type] = {
val configType = typeOf(exactName).orNull
if (configType != null) {
Some(configType)
} else {
val configKey = DynamicConfig.Broker.brokerConfigDef.configKeys().get(exactName)
if (configKey != null) {
Some(configKey.`type`)
} else {
None
}
}
}
def maybeSensitive(configType: Option[ConfigDef.Type]): Boolean = {
// If we can't determine the config entry type, treat it as a sensitive config to be safe
configType.isEmpty || configType.contains(ConfigDef.Type.PASSWORD)
}
def loggableValue(resourceType: ConfigResource.Type, name: String, value: String): String = {
val maybeSensitive = resourceType match {
case ConfigResource.Type.BROKER => KafkaConfig.maybeSensitive(KafkaConfig.configType(name))
case ConfigResource.Type.TOPIC => KafkaConfig.maybeSensitive(LogConfig.configType(name))
case ConfigResource.Type.BROKER_LOGGER => false
case _ => true
}
if (maybeSensitive) Password.HIDDEN else value
}
}
class KafkaConfig(val props: java.util.Map[_, _], doLog: Boolean, dynamicConfigOverride: Option[DynamicBrokerConfig])
extends AbstractConfig(KafkaConfig.configDef, props, doLog) with Logging {
def this(props: java.util.Map[_, _]) = this(props, true, None)
def this(props: java.util.Map[_, _], doLog: Boolean) = this(props, doLog, None)
// Cache the current config to avoid acquiring read lock to access from dynamicConfig
@volatile private var currentConfig = this
private[server] val dynamicConfig = dynamicConfigOverride.getOrElse(new DynamicBrokerConfig(this))
private[server] def updateCurrentConfig(newConfig: KafkaConfig): Unit = {
this.currentConfig = newConfig
}
// The following captures any system properties impacting ZooKeeper TLS configuration
// and defines the default values this instance will use if no explicit config is given.
// We make it part of each instance rather than the object to facilitate testing.
private val zkClientConfigViaSystemProperties = new ZKClientConfig()
override def originals: util.Map[String, AnyRef] =
if (this eq currentConfig) super.originals else currentConfig.originals
override def values: util.Map[String, _] =
if (this eq currentConfig) super.values else currentConfig.values
override def nonInternalValues: util.Map[String, _] =
if (this eq currentConfig) super.nonInternalValues else currentConfig.values
override def originalsStrings: util.Map[String, String] =
if (this eq currentConfig) super.originalsStrings else currentConfig.originalsStrings
override def originalsWithPrefix(prefix: String): util.Map[String, AnyRef] =
if (this eq currentConfig) super.originalsWithPrefix(prefix) else currentConfig.originalsWithPrefix(prefix)
override def valuesWithPrefixOverride(prefix: String): util.Map[String, AnyRef] =
if (this eq currentConfig) super.valuesWithPrefixOverride(prefix) else currentConfig.valuesWithPrefixOverride(prefix)
override def get(key: String): AnyRef =
if (this eq currentConfig) super.get(key) else currentConfig.get(key)
// During dynamic update, we use the values from this config, these are only used in DynamicBrokerConfig
private[server] def originalsFromThisConfig: util.Map[String, AnyRef] = super.originals
private[server] def valuesFromThisConfig: util.Map[String, _] = super.values
private[server] def valuesFromThisConfigWithPrefixOverride(prefix: String): util.Map[String, AnyRef] =
super.valuesWithPrefixOverride(prefix)
/** ********* Zookeeper Configuration ***********/
val zkConnect: String = getString(KafkaConfig.ZkConnectProp)
val zkSessionTimeoutMs: Int = getInt(KafkaConfig.ZkSessionTimeoutMsProp)
val zkConnectionTimeoutMs: Int =
Option(getInt(KafkaConfig.ZkConnectionTimeoutMsProp)).map(_.toInt).getOrElse(getInt(KafkaConfig.ZkSessionTimeoutMsProp))
val zkSyncTimeMs: Int = getInt(KafkaConfig.ZkSyncTimeMsProp)
val zkEnableSecureAcls: Boolean = getBoolean(KafkaConfig.ZkEnableSecureAclsProp)
val zkMaxInFlightRequests: Int = getInt(KafkaConfig.ZkMaxInFlightRequestsProp)
private def zkBooleanConfigOrSystemPropertyWithDefaultValue(propKey: String): Boolean = {
// Use the system property if it exists and the Kafka config value was defaulted rather than actually provided
// Need to translate any system property value from true/false (String) to true/false (Boolean)
val actuallyProvided = originals.containsKey(propKey)
if (actuallyProvided) getBoolean(propKey) else {
val sysPropValue = KafkaConfig.getZooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey)
sysPropValue match {
case Some("true") => true
case Some(_) => false
case _ => getBoolean(propKey) // not specified so use the default value
}
}
}
private def zkStringConfigOrSystemPropertyWithDefaultValue(propKey: String): String = {
// Use the system property if it exists and the Kafka config value was defaulted rather than actually provided
val actuallyProvided = originals.containsKey(propKey)
if (actuallyProvided) getString(propKey) else {
val sysPropValue = KafkaConfig.getZooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey)
sysPropValue match {
case Some(_) => sysPropValue.get
case _ => getString(propKey) // not specified so use the default value
}
}
}
private def zkOptionalStringConfigOrSystemProperty(propKey: String): Option[String] = {
Option(getString(propKey)) match {
case config: Some[String] => config
case _ => KafkaConfig.getZooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey)
}
}
private def zkPasswordConfigOrSystemProperty(propKey: String): Option[Password] = {
Option(getPassword(propKey)) match {
case config: Some[Password] => config
case _ => {
val sysProp = KafkaConfig.getZooKeeperClientProperty (zkClientConfigViaSystemProperties, propKey)
if (sysProp.isDefined) Some (new Password (sysProp.get) ) else None
}
}
}
private def zkListConfigOrSystemProperty(propKey: String): Option[util.List[String]] = {
Option(getList(propKey)) match {
case config: Some[util.List[String]] => config
case _ => {
val sysProp = KafkaConfig.getZooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey)
if (sysProp.isDefined) Some(sysProp.get.split("\\\\s*,\\\\s*").toList.asJava) else None
}
}
}
val zkSslClientEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(KafkaConfig.ZkSslClientEnableProp)
val zkClientCnxnSocketClassName = zkOptionalStringConfigOrSystemProperty(KafkaConfig.ZkClientCnxnSocketProp)
val zkSslKeyStoreLocation = zkOptionalStringConfigOrSystemProperty(KafkaConfig.ZkSslKeyStoreLocationProp)
val zkSslKeyStorePassword = zkPasswordConfigOrSystemProperty(KafkaConfig.ZkSslKeyStorePasswordProp)
val zkSslKeyStoreType = zkOptionalStringConfigOrSystemProperty(KafkaConfig.ZkSslKeyStoreTypeProp)
val zkSslTrustStoreLocation = zkOptionalStringConfigOrSystemProperty(KafkaConfig.ZkSslTrustStoreLocationProp)
val zkSslTrustStorePassword = zkPasswordConfigOrSystemProperty(KafkaConfig.ZkSslTrustStorePasswordProp)
val zkSslTrustStoreType = zkOptionalStringConfigOrSystemProperty(KafkaConfig.ZkSslTrustStoreTypeProp)
val ZkSslProtocol = zkStringConfigOrSystemPropertyWithDefaultValue(KafkaConfig.ZkSslProtocolProp)
val ZkSslEnabledProtocols = zkListConfigOrSystemProperty(KafkaConfig.ZkSslEnabledProtocolsProp)
val ZkSslCipherSuites = zkListConfigOrSystemProperty(KafkaConfig.ZkSslCipherSuitesProp)
val ZkSslEndpointIdentificationAlgorithm = {
// Use the system property if it exists and the Kafka config value was defaulted rather than actually provided
// Need to translate any system property value from true/false to HTTPS/<blank>
val kafkaProp = KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp
val actuallyProvided = originals.containsKey(kafkaProp)
if (actuallyProvided) getString(kafkaProp) else {
val sysPropValue = KafkaConfig.getZooKeeperClientProperty(zkClientConfigViaSystemProperties, kafkaProp)
sysPropValue match {
case Some("true") => "HTTPS"
case Some(_) => ""
case _ => getString(kafkaProp) // not specified so use the default value
}
}
}
val ZkSslCrlEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(KafkaConfig.ZkSslCrlEnableProp)
val ZkSslOcspEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(KafkaConfig.ZkSslOcspEnableProp)
/** ********* General Configuration ***********/
val brokerIdGenerationEnable: Boolean = getBoolean(KafkaConfig.BrokerIdGenerationEnableProp)
val maxReservedBrokerId: Int = getInt(KafkaConfig.MaxReservedBrokerIdProp)
var brokerId: Int = {
val nodeId = getInt(KafkaConfig.NodeIdProp)
if (nodeId < 0) {
getInt(KafkaConfig.BrokerIdProp)
} else {
nodeId
}
}
val nodeId: Int = brokerId
val processRoles: Set[ProcessRole] = parseProcessRoles()
val initialRegistrationTimeoutMs: Int = getInt(KafkaConfig.InitialBrokerRegistrationTimeoutMsProp)
val brokerHeartbeatIntervalMs: Int = getInt(KafkaConfig.BrokerHeartbeatIntervalMsProp)
val brokerSessionTimeoutMs: Int = getInt(KafkaConfig.BrokerSessionTimeoutMsProp)
def requiresZookeeper: Boolean = processRoles.isEmpty
def usesSelfManagedQuorum: Boolean = processRoles.nonEmpty
private def parseProcessRoles(): Set[ProcessRole] = {
val roles = getList(KafkaConfig.ProcessRolesProp).asScala.map {
case "broker" => BrokerRole
case "controller" => ControllerRole
case role => throw new ConfigException(s"Unknown process role '$role'" +
" (only 'broker' and 'controller' are allowed roles)")
}
val distinctRoles: Set[ProcessRole] = roles.toSet
if (distinctRoles.size != roles.size) {
throw new ConfigException(s"Duplicate role names found in `${KafkaConfig.ProcessRolesProp}`: $roles")
}
distinctRoles
}
def metadataLogDir: String = {
Option(getString(KafkaConfig.MetadataLogDirProp)) match {
case Some(dir) => dir
case None => logDirs.head
}
}
def metadataLogSegmentBytes = getInt(KafkaConfig.MetadataLogSegmentBytesProp)
def metadataLogSegmentMillis = getLong(KafkaConfig.MetadataLogSegmentMillisProp)
def metadataRetentionBytes = getLong(KafkaConfig.MetadataMaxRetentionBytesProp)
def metadataRetentionMillis = getLong(KafkaConfig.MetadataMaxRetentionMillisProp)
def numNetworkThreads = getInt(KafkaConfig.NumNetworkThreadsProp)
def backgroundThreads = getInt(KafkaConfig.BackgroundThreadsProp)
val queuedMaxRequests = getInt(KafkaConfig.QueuedMaxRequestsProp)
val queuedMaxBytes = getLong(KafkaConfig.QueuedMaxBytesProp)
def numIoThreads = getInt(KafkaConfig.NumIoThreadsProp)
def messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp)
val requestTimeoutMs = getInt(KafkaConfig.RequestTimeoutMsProp)
val connectionSetupTimeoutMs = getLong(KafkaConfig.ConnectionSetupTimeoutMsProp)
val connectionSetupTimeoutMaxMs = getLong(KafkaConfig.ConnectionSetupTimeoutMaxMsProp)
def getNumReplicaAlterLogDirsThreads: Int = {
val numThreads: Integer = Option(getInt(KafkaConfig.NumReplicaAlterLogDirsThreadsProp)).getOrElse(logDirs.size)
numThreads
}
/************* Metadata Configuration ***********/
val metadataSnapshotMaxNewRecordBytes = getLong(KafkaConfig.MetadataSnapshotMaxNewRecordBytesProp)
/************* Authorizer Configuration ***********/
val authorizer: Option[Authorizer] = {
val className = getString(KafkaConfig.AuthorizerClassNameProp)
if (className == null || className.isEmpty)
None
else {
Some(AuthorizerUtils.createAuthorizer(className))
}
}
/** ********* Socket Server Configuration ***********/
val socketSendBufferBytes = getInt(KafkaConfig.SocketSendBufferBytesProp)
val socketReceiveBufferBytes = getInt(KafkaConfig.SocketReceiveBufferBytesProp)
val socketRequestMaxBytes = getInt(KafkaConfig.SocketRequestMaxBytesProp)
val maxConnectionsPerIp = getInt(KafkaConfig.MaxConnectionsPerIpProp)
val maxConnectionsPerIpOverrides: Map[String, Int] =
getMap(KafkaConfig.MaxConnectionsPerIpOverridesProp, getString(KafkaConfig.MaxConnectionsPerIpOverridesProp)).map { case (k, v) => (k, v.toInt)}
def maxConnections = getInt(KafkaConfig.MaxConnectionsProp)
def maxConnectionCreationRate = getInt(KafkaConfig.MaxConnectionCreationRateProp)
val connectionsMaxIdleMs = getLong(KafkaConfig.ConnectionsMaxIdleMsProp)
val failedAuthenticationDelayMs = getInt(KafkaConfig.FailedAuthenticationDelayMsProp)
/***************** rack configuration **************/
val rack = Option(getString(KafkaConfig.RackProp))
val replicaSelectorClassName = Option(getString(KafkaConfig.ReplicaSelectorClassProp))
/** ********* Log Configuration ***********/
val autoCreateTopicsEnable = getBoolean(KafkaConfig.AutoCreateTopicsEnableProp)
val numPartitions = getInt(KafkaConfig.NumPartitionsProp)
val logDirs = CoreUtils.parseCsvList(Option(getString(KafkaConfig.LogDirsProp)).getOrElse(getString(KafkaConfig.LogDirProp)))
def logSegmentBytes = getInt(KafkaConfig.LogSegmentBytesProp)
def logFlushIntervalMessages = getLong(KafkaConfig.LogFlushIntervalMessagesProp)
val logCleanerThreads = getInt(KafkaConfig.LogCleanerThreadsProp)
def numRecoveryThreadsPerDataDir = getInt(KafkaConfig.NumRecoveryThreadsPerDataDirProp)
val logFlushSchedulerIntervalMs = getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp)
val logFlushOffsetCheckpointIntervalMs = getInt(KafkaConfig.LogFlushOffsetCheckpointIntervalMsProp).toLong
val logFlushStartOffsetCheckpointIntervalMs = getInt(KafkaConfig.LogFlushStartOffsetCheckpointIntervalMsProp).toLong
val logCleanupIntervalMs = getLong(KafkaConfig.LogCleanupIntervalMsProp)
def logCleanupPolicy = getList(KafkaConfig.LogCleanupPolicyProp)
val offsetsRetentionMinutes = getInt(KafkaConfig.OffsetsRetentionMinutesProp)
val offsetsRetentionCheckIntervalMs = getLong(KafkaConfig.OffsetsRetentionCheckIntervalMsProp)
def logRetentionBytes = getLong(KafkaConfig.LogRetentionBytesProp)
val logCleanerDedupeBufferSize = getLong(KafkaConfig.LogCleanerDedupeBufferSizeProp)
val logCleanerDedupeBufferLoadFactor = getDouble(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp)
val logCleanerIoBufferSize = getInt(KafkaConfig.LogCleanerIoBufferSizeProp)
val logCleanerIoMaxBytesPerSecond = getDouble(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp)
def logCleanerDeleteRetentionMs = getLong(KafkaConfig.LogCleanerDeleteRetentionMsProp)
def logCleanerMinCompactionLagMs = getLong(KafkaConfig.LogCleanerMinCompactionLagMsProp)
def logCleanerMaxCompactionLagMs = getLong(KafkaConfig.LogCleanerMaxCompactionLagMsProp)
val logCleanerBackoffMs = getLong(KafkaConfig.LogCleanerBackoffMsProp)
def logCleanerMinCleanRatio = getDouble(KafkaConfig.LogCleanerMinCleanRatioProp)
val logCleanerEnable = getBoolean(KafkaConfig.LogCleanerEnableProp)
def logIndexSizeMaxBytes = getInt(KafkaConfig.LogIndexSizeMaxBytesProp)
def logIndexIntervalBytes = getInt(KafkaConfig.LogIndexIntervalBytesProp)
def logDeleteDelayMs = getLong(KafkaConfig.LogDeleteDelayMsProp)
def logRollTimeMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeHoursProp))
def logRollTimeJitterMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeJitterMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeJitterHoursProp))
def logFlushIntervalMs: java.lang.Long = Option(getLong(KafkaConfig.LogFlushIntervalMsProp)).getOrElse(getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp))
def minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp)
def logPreAllocateEnable: java.lang.Boolean = getBoolean(KafkaConfig.LogPreAllocateProp)
// We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0`
// is passed, `0.10.0-IV0` may be picked)
@nowarn("cat=deprecation")
private val logMessageFormatVersionString = getString(KafkaConfig.LogMessageFormatVersionProp)
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0")
lazy val logMessageFormatVersion =
if (LogConfig.shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion))
ApiVersion(Defaults.LogMessageFormatVersion)
else ApiVersion(logMessageFormatVersionString)
def logMessageTimestampType = TimestampType.forName(getString(KafkaConfig.LogMessageTimestampTypeProp))
def logMessageTimestampDifferenceMaxMs: Long = getLong(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
def logMessageDownConversionEnable: Boolean = getBoolean(KafkaConfig.LogMessageDownConversionEnableProp)
/** ********* Replication configuration ***********/
val controllerSocketTimeoutMs: Int = getInt(KafkaConfig.ControllerSocketTimeoutMsProp)
val defaultReplicationFactor: Int = getInt(KafkaConfig.DefaultReplicationFactorProp)
val replicaLagTimeMaxMs = getLong(KafkaConfig.ReplicaLagTimeMaxMsProp)
val replicaSocketTimeoutMs = getInt(KafkaConfig.ReplicaSocketTimeoutMsProp)
val replicaSocketReceiveBufferBytes = getInt(KafkaConfig.ReplicaSocketReceiveBufferBytesProp)
val replicaFetchMaxBytes = getInt(KafkaConfig.ReplicaFetchMaxBytesProp)
val replicaFetchWaitMaxMs = getInt(KafkaConfig.ReplicaFetchWaitMaxMsProp)
val replicaFetchMinBytes = getInt(KafkaConfig.ReplicaFetchMinBytesProp)
val replicaFetchResponseMaxBytes = getInt(KafkaConfig.ReplicaFetchResponseMaxBytesProp)
val replicaFetchBackoffMs = getInt(KafkaConfig.ReplicaFetchBackoffMsProp)
def numReplicaFetchers = getInt(KafkaConfig.NumReplicaFetchersProp)
val replicaHighWatermarkCheckpointIntervalMs = getLong(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp)
val fetchPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp)
val producerPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp)
val deleteRecordsPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.DeleteRecordsPurgatoryPurgeIntervalRequestsProp)
val autoLeaderRebalanceEnable = getBoolean(KafkaConfig.AutoLeaderRebalanceEnableProp)
val leaderImbalancePerBrokerPercentage = getInt(KafkaConfig.LeaderImbalancePerBrokerPercentageProp)
val leaderImbalanceCheckIntervalSeconds = getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp)
def uncleanLeaderElectionEnable: java.lang.Boolean = getBoolean(KafkaConfig.UncleanLeaderElectionEnableProp)
// We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0`
// is passed, `0.10.0-IV0` may be picked)
val interBrokerProtocolVersionString = getString(KafkaConfig.InterBrokerProtocolVersionProp)
val interBrokerProtocolVersion = ApiVersion(interBrokerProtocolVersionString)
/** ********* Controlled shutdown configuration ***********/
val controlledShutdownMaxRetries = getInt(KafkaConfig.ControlledShutdownMaxRetriesProp)
val controlledShutdownRetryBackoffMs = getLong(KafkaConfig.ControlledShutdownRetryBackoffMsProp)
val controlledShutdownEnable = getBoolean(KafkaConfig.ControlledShutdownEnableProp)
/** ********* Feature configuration ***********/
def isFeatureVersioningSupported = interBrokerProtocolVersion >= KAFKA_2_7_IV0
/** ********* Group coordinator configuration ***********/
val groupMinSessionTimeoutMs = getInt(KafkaConfig.GroupMinSessionTimeoutMsProp)
val groupMaxSessionTimeoutMs = getInt(KafkaConfig.GroupMaxSessionTimeoutMsProp)
val groupInitialRebalanceDelay = getInt(KafkaConfig.GroupInitialRebalanceDelayMsProp)
val groupMaxSize = getInt(KafkaConfig.GroupMaxSizeProp)
/** ********* Offset management configuration ***********/
val offsetMetadataMaxSize = getInt(KafkaConfig.OffsetMetadataMaxSizeProp)
val offsetsLoadBufferSize = getInt(KafkaConfig.OffsetsLoadBufferSizeProp)
val offsetsTopicReplicationFactor = getShort(KafkaConfig.OffsetsTopicReplicationFactorProp)
val offsetsTopicPartitions = getInt(KafkaConfig.OffsetsTopicPartitionsProp)
val offsetCommitTimeoutMs = getInt(KafkaConfig.OffsetCommitTimeoutMsProp)
val offsetCommitRequiredAcks = getShort(KafkaConfig.OffsetCommitRequiredAcksProp)
val offsetsTopicSegmentBytes = getInt(KafkaConfig.OffsetsTopicSegmentBytesProp)
val offsetsTopicCompressionCodec = Option(getInt(KafkaConfig.OffsetsTopicCompressionCodecProp)).map(value => CompressionCodec.getCompressionCodec(value)).orNull
/** ********* Transaction management configuration ***********/
val transactionalIdExpirationMs = getInt(KafkaConfig.TransactionalIdExpirationMsProp)
val transactionMaxTimeoutMs = getInt(KafkaConfig.TransactionsMaxTimeoutMsProp)
val transactionTopicMinISR = getInt(KafkaConfig.TransactionsTopicMinISRProp)
val transactionsLoadBufferSize = getInt(KafkaConfig.TransactionsLoadBufferSizeProp)
val transactionTopicReplicationFactor = getShort(KafkaConfig.TransactionsTopicReplicationFactorProp)
val transactionTopicPartitions = getInt(KafkaConfig.TransactionsTopicPartitionsProp)
val transactionTopicSegmentBytes = getInt(KafkaConfig.TransactionsTopicSegmentBytesProp)
val transactionAbortTimedOutTransactionCleanupIntervalMs = getInt(KafkaConfig.TransactionsAbortTimedOutTransactionCleanupIntervalMsProp)
val transactionRemoveExpiredTransactionalIdCleanupIntervalMs = getInt(KafkaConfig.TransactionsRemoveExpiredTransactionalIdCleanupIntervalMsProp)
/** ********* Metric Configuration **************/
val metricNumSamples = getInt(KafkaConfig.MetricNumSamplesProp)
val metricSampleWindowMs = getLong(KafkaConfig.MetricSampleWindowMsProp)
val metricRecordingLevel = getString(KafkaConfig.MetricRecordingLevelProp)
/** ********* SSL/SASL Configuration **************/
// Security configs may be overridden for listeners, so it is not safe to use the base values
// Hence the base SSL/SASL configs are not fields of KafkaConfig, listener configs should be
// retrieved using KafkaConfig#valuesWithPrefixOverride
private def saslEnabledMechanisms(listenerName: ListenerName): Set[String] = {
val value = valuesWithPrefixOverride(listenerName.configPrefix).get(KafkaConfig.SaslEnabledMechanismsProp)
if (value != null)
value.asInstanceOf[util.List[String]].asScala.toSet
else
Set.empty[String]
}
def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1
def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2
def controlPlaneListenerName = getControlPlaneListenerNameAndSecurityProtocol.map { case (listenerName, _) => listenerName }
def controlPlaneSecurityProtocol = getControlPlaneListenerNameAndSecurityProtocol.map { case (_, securityProtocol) => securityProtocol }
def saslMechanismInterBrokerProtocol = getString(KafkaConfig.SaslMechanismInterBrokerProtocolProp)
val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion >= KAFKA_0_10_0_IV1
/** ********* DelegationToken Configuration **************/
val delegationTokenSecretKey = Option(getPassword(KafkaConfig.DelegationTokenSecretKeyProp))
.getOrElse(getPassword(KafkaConfig.DelegationTokenSecretKeyAliasProp))
val tokenAuthEnabled = (delegationTokenSecretKey != null && !delegationTokenSecretKey.value.isEmpty)
val delegationTokenMaxLifeMs = getLong(KafkaConfig.DelegationTokenMaxLifeTimeProp)
val delegationTokenExpiryTimeMs = getLong(KafkaConfig.DelegationTokenExpiryTimeMsProp)
val delegationTokenExpiryCheckIntervalMs = getLong(KafkaConfig.DelegationTokenExpiryCheckIntervalMsProp)
/** ********* Password encryption configuration for dynamic configs *********/
def passwordEncoderSecret = Option(getPassword(KafkaConfig.PasswordEncoderSecretProp))
def passwordEncoderOldSecret = Option(getPassword(KafkaConfig.PasswordEncoderOldSecretProp))
def passwordEncoderCipherAlgorithm = getString(KafkaConfig.PasswordEncoderCipherAlgorithmProp)
def passwordEncoderKeyFactoryAlgorithm = Option(getString(KafkaConfig.PasswordEncoderKeyFactoryAlgorithmProp))
def passwordEncoderKeyLength = getInt(KafkaConfig.PasswordEncoderKeyLengthProp)
def passwordEncoderIterations = getInt(KafkaConfig.PasswordEncoderIterationsProp)
/** ********* Quota Configuration **************/
val numQuotaSamples = getInt(KafkaConfig.NumQuotaSamplesProp)
val quotaWindowSizeSeconds = getInt(KafkaConfig.QuotaWindowSizeSecondsProp)
val numReplicationQuotaSamples = getInt(KafkaConfig.NumReplicationQuotaSamplesProp)
val replicationQuotaWindowSizeSeconds = getInt(KafkaConfig.ReplicationQuotaWindowSizeSecondsProp)
val numAlterLogDirsReplicationQuotaSamples = getInt(KafkaConfig.NumAlterLogDirsReplicationQuotaSamplesProp)
val alterLogDirsReplicationQuotaWindowSizeSeconds = getInt(KafkaConfig.AlterLogDirsReplicationQuotaWindowSizeSecondsProp)
val numControllerQuotaSamples = getInt(KafkaConfig.NumControllerQuotaSamplesProp)
val controllerQuotaWindowSizeSeconds = getInt(KafkaConfig.ControllerQuotaWindowSizeSecondsProp)
/** ********* Fetch Configuration **************/
val maxIncrementalFetchSessionCacheSlots = getInt(KafkaConfig.MaxIncrementalFetchSessionCacheSlots)
val fetchMaxBytes = getInt(KafkaConfig.FetchMaxBytes)
val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp)
def compressionType = getString(KafkaConfig.CompressionTypeProp)
/** ********* Raft Quorum Configuration *********/
val quorumVoters = getList(RaftConfig.QUORUM_VOTERS_CONFIG)
val quorumElectionTimeoutMs = getInt(RaftConfig.QUORUM_ELECTION_TIMEOUT_MS_CONFIG)
val quorumFetchTimeoutMs = getInt(RaftConfig.QUORUM_FETCH_TIMEOUT_MS_CONFIG)
val quorumElectionBackoffMs = getInt(RaftConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_CONFIG)
val quorumLingerMs = getInt(RaftConfig.QUORUM_LINGER_MS_CONFIG)
val quorumRequestTimeoutMs = getInt(RaftConfig.QUORUM_REQUEST_TIMEOUT_MS_CONFIG)
val quorumRetryBackoffMs = getInt(RaftConfig.QUORUM_RETRY_BACKOFF_MS_CONFIG)
def addReconfigurable(reconfigurable: Reconfigurable): Unit = {
dynamicConfig.addReconfigurable(reconfigurable)
}
def removeReconfigurable(reconfigurable: Reconfigurable): Unit = {
dynamicConfig.removeReconfigurable(reconfigurable)
}
def logRetentionTimeMillis: Long = {
val millisInMinute = 60L * 1000L
val millisInHour = 60L * millisInMinute
val millis: java.lang.Long =
Option(getLong(KafkaConfig.LogRetentionTimeMillisProp)).getOrElse(
Option(getInt(KafkaConfig.LogRetentionTimeMinutesProp)) match {
case Some(mins) => millisInMinute * mins
case None => getInt(KafkaConfig.LogRetentionTimeHoursProp) * millisInHour
})
if (millis < 0) return -1
millis
}
private def getMap(propName: String, propValue: String): Map[String, String] = {
try {
CoreUtils.parseCsvMap(propValue)
} catch {
case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage))
}
}
def listeners: Seq[EndPoint] =
CoreUtils.listenerListToEndPoints(getString(KafkaConfig.ListenersProp), listenerSecurityProtocolMap)
def controllerListenerNames: Seq[String] =
Option(getString(KafkaConfig.ControllerListenerNamesProp)).getOrElse("").split(",")
def controllerListeners: Seq[EndPoint] =
listeners.filter(l => controllerListenerNames.contains(l.listenerName.value()))
def saslMechanismControllerProtocol = getString(KafkaConfig.SaslMechanismControllerProtocolProp)
def controlPlaneListener: Option[EndPoint] = {
controlPlaneListenerName.map { listenerName =>
listeners.filter(endpoint => endpoint.listenerName.value() == listenerName.value()).head
}
}
def dataPlaneListeners: Seq[EndPoint] = {
listeners.filterNot { listener =>
val name = listener.listenerName.value()
name.equals(getString(KafkaConfig.ControlPlaneListenerNameProp)) ||
controllerListenerNames.contains(name)
}
}
// Use advertised listeners if defined, fallback to listeners otherwise
def advertisedListeners: Seq[EndPoint] = {
val advertisedListenersProp = getString(KafkaConfig.AdvertisedListenersProp)
if (advertisedListenersProp != null)
CoreUtils.listenerListToEndPoints(advertisedListenersProp, listenerSecurityProtocolMap, requireDistinctPorts=false)
else
listeners.filterNot(l => controllerListenerNames.contains(l.listenerName.value()))
}
private def getInterBrokerListenerNameAndSecurityProtocol: (ListenerName, SecurityProtocol) = {
Option(getString(KafkaConfig.InterBrokerListenerNameProp)) match {
case Some(_) if originals.containsKey(KafkaConfig.InterBrokerSecurityProtocolProp) =>
throw new ConfigException(s"Only one of ${KafkaConfig.InterBrokerListenerNameProp} and " +
s"${KafkaConfig.InterBrokerSecurityProtocolProp} should be set.")
case Some(name) =>
val listenerName = ListenerName.normalised(name)
val securityProtocol = listenerSecurityProtocolMap.getOrElse(listenerName,
throw new ConfigException(s"Listener with name ${listenerName.value} defined in " +
s"${KafkaConfig.InterBrokerListenerNameProp} not found in ${KafkaConfig.ListenerSecurityProtocolMapProp}."))
(listenerName, securityProtocol)
case None =>
val securityProtocol = getSecurityProtocol(getString(KafkaConfig.InterBrokerSecurityProtocolProp),
KafkaConfig.InterBrokerSecurityProtocolProp)
(ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
}
}
private def getControlPlaneListenerNameAndSecurityProtocol: Option[(ListenerName, SecurityProtocol)] = {
Option(getString(KafkaConfig.ControlPlaneListenerNameProp)) match {
case Some(name) =>
val listenerName = ListenerName.normalised(name)
val securityProtocol = listenerSecurityProtocolMap.getOrElse(listenerName,
throw new ConfigException(s"Listener with ${listenerName.value} defined in " +
s"${KafkaConfig.ControlPlaneListenerNameProp} not found in ${KafkaConfig.ListenerSecurityProtocolMapProp}."))
Some(listenerName, securityProtocol)
case None => None
}
}
private def getSecurityProtocol(protocolName: String, configName: String): SecurityProtocol = {
try SecurityProtocol.forName(protocolName)
catch {
case _: IllegalArgumentException =>
throw new ConfigException(s"Invalid security protocol `$protocolName` defined in $configName")
}
}
def listenerSecurityProtocolMap: Map[ListenerName, SecurityProtocol] = {
getMap(KafkaConfig.ListenerSecurityProtocolMapProp, getString(KafkaConfig.ListenerSecurityProtocolMapProp))
.map { case (listenerName, protocolName) =>
ListenerName.normalised(listenerName) -> getSecurityProtocol(protocolName, KafkaConfig.ListenerSecurityProtocolMapProp)
}
}
// Topic IDs are used with all self-managed quorum clusters and ZK cluster with IBP greater than or equal to 2.8
def usesTopicId: Boolean =
usesSelfManagedQuorum || interBrokerProtocolVersion >= KAFKA_2_8_IV0
validateValues()
@nowarn("cat=deprecation")
private def validateValues(): Unit = {
if (requiresZookeeper) {
if (zkConnect == null) {
throw new ConfigException(s"Missing required configuration `${KafkaConfig.ZkConnectProp}` which has no default value.")
}
if (brokerIdGenerationEnable) {
require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be greater than or equal to -1 and not greater than reserved.broker.max.id")
} else {
require(brokerId >= 0, "broker.id must be greater than or equal to 0")
}
} else {
// KRaft-based metadata quorum
if (nodeId < 0) {
throw new ConfigException(s"Missing configuration `${KafkaConfig.NodeIdProp}` which is required " +
s"when `process.roles` is defined (i.e. when running in KRaft mode).")
}
// Validate process.roles with controller.quorum.voters
val voterIds: Set[Integer] = RaftConfig.parseVoterConnections(quorumVoters).asScala.keySet.toSet
if (voterIds.isEmpty) {
throw new ConfigException(s"If using ${KafkaConfig.ProcessRolesProp}, ${KafkaConfig.QuorumVotersProp} must contain a parseable set of voters.")
} else if (processRoles.contains(ControllerRole)) {
// Ensure that controllers use their node.id as a voter in controller.quorum.voters
require(voterIds.contains(nodeId), s"If ${KafkaConfig.ProcessRolesProp} contains the 'controller' role, the node id $nodeId must be included in the set of voters ${KafkaConfig.QuorumVotersProp}=$voterIds")
} else {
// Ensure that the broker's node.id is not an id in controller.quorum.voters
require(!voterIds.contains(nodeId), s"If ${KafkaConfig.ProcessRolesProp} does not contain the 'controller' role, the node id $nodeId must not be included in the set of voters ${KafkaConfig.QuorumVotersProp}=$voterIds")
}
require(getClass(KafkaConfig.AlterConfigPolicyClassNameProp) == null, s"${KafkaConfig.AlterConfigPolicyClassNameProp} is not supported in KRaft.")
require(getClass(KafkaConfig.CreateTopicPolicyClassNameProp) == null, s"${KafkaConfig.CreateTopicPolicyClassNameProp} is not supported in KRaft.")
}
require(logRollTimeMillis >= 1, "log.roll.ms must be greater than or equal to 1")
require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be greater than or equal to 0")
require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, greater than or equal to 1")
require(logDirs.nonEmpty, "At least one log directory must be defined via log.dirs or log.dir.")
require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.")
require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" +
" to prevent unnecessary socket timeouts")
require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be less than or equal to replica.lag.time.max.ms" +
" to prevent frequent changes in ISR")
require(offsetCommitRequiredAcks >= -1 && offsetCommitRequiredAcks <= offsetsTopicReplicationFactor,
"offsets.commit.required.acks must be greater or equal -1 and less or equal to offsets.topic.replication.factor")
require(BrokerCompressionCodec.isValid(compressionType), "compression.type : " + compressionType + " is not valid." +
" Valid options are " + BrokerCompressionCodec.brokerCompressionOptions.mkString(","))
require(!processRoles.contains(ControllerRole) || controllerListeners.nonEmpty,
s"${KafkaConfig.ControllerListenerNamesProp} cannot be empty if the server has the controller role")
val advertisedListenerNames = advertisedListeners.map(_.listenerName).toSet
val listenerNames = listeners.map(_.listenerName).toSet
if (processRoles.isEmpty || processRoles.contains(BrokerRole)) {
require(advertisedListenerNames.contains(interBrokerListenerName),
s"${KafkaConfig.InterBrokerListenerNameProp} must be a listener name defined in ${KafkaConfig.AdvertisedListenersProp}. " +
s"The valid options based on currently configured listeners are ${advertisedListenerNames.map(_.value).mkString(",")}")
require(advertisedListenerNames.subsetOf(listenerNames),
s"${KafkaConfig.AdvertisedListenersProp} listener names must be equal to or a subset of the ones defined in ${KafkaConfig.ListenersProp}. " +
s"Found ${advertisedListenerNames.map(_.value).mkString(",")}. The valid options based on the current configuration " +
s"are ${listenerNames.map(_.value).mkString(",")}"
)
}
require(!advertisedListeners.exists(endpoint => endpoint.host=="0.0.0.0"),
s"${KafkaConfig.AdvertisedListenersProp} cannot use the nonroutable meta-address 0.0.0.0. "+
s"Use a routable IP address.")
// Ensure controller listeners are not in the advertised listeners list
require(!controllerListeners.exists(advertisedListeners.contains),
s"${KafkaConfig.AdvertisedListenersProp} cannot contain any of ${KafkaConfig.ControllerListenerNamesProp}")
// validate controller.listener.name config
if (controlPlaneListenerName.isDefined) {
require(advertisedListenerNames.contains(controlPlaneListenerName.get),
s"${KafkaConfig.ControlPlaneListenerNameProp} must be a listener name defined in ${KafkaConfig.AdvertisedListenersProp}. " +
s"The valid options based on currently configured listeners are ${advertisedListenerNames.map(_.value).mkString(",")}")
// controlPlaneListenerName should be different from interBrokerListenerName
require(!controlPlaneListenerName.get.value().equals(interBrokerListenerName.value()),
s"${KafkaConfig.ControlPlaneListenerNameProp}, when defined, should have a different value from the inter broker listener name. " +
s"Currently they both have the value ${controlPlaneListenerName.get}")
}
val messageFormatVersion = new MessageFormatVersion(logMessageFormatVersionString, interBrokerProtocolVersionString)
if (messageFormatVersion.shouldWarn)
warn(messageFormatVersion.brokerWarningMessage)
val recordVersion = logMessageFormatVersion.recordVersion
require(interBrokerProtocolVersion.recordVersion.value >= recordVersion.value,
s"log.message.format.version $logMessageFormatVersionString can only be used when inter.broker.protocol.version " +
s"is set to version ${ApiVersion.minSupportedFor(recordVersion).shortVersion} or higher")
if (offsetsTopicCompressionCodec == ZStdCompressionCodec)
require(interBrokerProtocolVersion.recordVersion.value >= KAFKA_2_1_IV0.recordVersion.value,
"offsets.topic.compression.codec zstd can only be used when inter.broker.protocol.version " +
s"is set to version ${KAFKA_2_1_IV0.shortVersion} or higher")
val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL
require(!interBrokerUsesSasl || saslInterBrokerHandshakeRequestEnable || saslMechanismInterBrokerProtocol == SaslConfigs.GSSAPI_MECHANISM,
s"Only GSSAPI mechanism is supported for inter-broker communication with SASL when inter.broker.protocol.version is set to $interBrokerProtocolVersionString")
require(!interBrokerUsesSasl || saslEnabledMechanisms(interBrokerListenerName).contains(saslMechanismInterBrokerProtocol),
s"${KafkaConfig.SaslMechanismInterBrokerProtocolProp} must be included in ${KafkaConfig.SaslEnabledMechanismsProp} when SASL is used for inter-broker communication")
require(queuedMaxBytes <= 0 || queuedMaxBytes >= socketRequestMaxBytes,
s"${KafkaConfig.QueuedMaxBytesProp} must be larger or equal to ${KafkaConfig.SocketRequestMaxBytesProp}")
if (maxConnectionsPerIp == 0)
require(!maxConnectionsPerIpOverrides.isEmpty, s"${KafkaConfig.MaxConnectionsPerIpProp} can be set to zero only if" +
s" ${KafkaConfig.MaxConnectionsPerIpOverridesProp} property is set.")
val invalidAddresses = maxConnectionsPerIpOverrides.keys.filterNot(address => Utils.validHostPattern(address))
if (!invalidAddresses.isEmpty)
throw new IllegalArgumentException(s"${KafkaConfig.MaxConnectionsPerIpOverridesProp} contains invalid addresses : ${invalidAddresses.mkString(",")}")
if (connectionsMaxIdleMs >= 0)
require(failedAuthenticationDelayMs < connectionsMaxIdleMs,
s"${KafkaConfig.FailedAuthenticationDelayMsProp}=$failedAuthenticationDelayMs should always be less than" +
s" ${KafkaConfig.ConnectionsMaxIdleMsProp}=$connectionsMaxIdleMs to prevent failed" +
s" authentication responses from timing out")
val principalBuilderClass = getClass(KafkaConfig.PrincipalBuilderClassProp)
require(principalBuilderClass != null, s"${KafkaConfig.PrincipalBuilderClassProp} must be non-null")
require(classOf[KafkaPrincipalSerde].isAssignableFrom(principalBuilderClass),
s"${KafkaConfig.PrincipalBuilderClassProp} must implement KafkaPrincipalSerde")
}
}
| lindong28/kafka | core/src/main/scala/kafka/server/KafkaConfig.scala | Scala | apache-2.0 | 151,217 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.request.builder
import io.gatling.core.session.Expression
sealed trait HttpParam
final case class SimpleParam(key: Expression[String], value: Expression[Any]) extends HttpParam
final case class MultivaluedParam(key: Expression[String], values: Expression[Seq[Any]]) extends HttpParam
final case class ParamSeq(seq: Expression[Seq[(String, Any)]]) extends HttpParam
final case class ParamMap(map: Expression[Map[String, Any]]) extends HttpParam
| gatling/gatling | gatling-http/src/main/scala/io/gatling/http/request/builder/HttpParam.scala | Scala | apache-2.0 | 1,089 |
package io.flow.delta.actors
import java.util.concurrent.atomic.AtomicReference
import db.{ConfigsDao, OrganizationsDao, ProjectsDao}
import io.flow.delta.api.lib.{GithubUtil, Repo}
import io.flow.delta.config.v0.models.{ConfigError, ConfigProject, ConfigUndefinedType}
import io.flow.delta.v0.models.{Organization, Project}
import io.flow.delta.v0.models.json._
import io.flow.log.RollbarLogger
import io.flow.postgresql.Authorization
import play.api.libs.json.Json
trait DataProject {
val logger: RollbarLogger
def configsDao: ConfigsDao
def organizationsDao: OrganizationsDao
def projectsDao: ProjectsDao
private[this] val dataProject: AtomicReference[Option[Project]] = new AtomicReference(None)
/**
* Looks up the project with the specified ID, setting the local
* dataProject var to that project
*/
def setProjectId(id: String): Unit = {
val p = projectsDao.findById(Authorization.All, id)
dataProject.set(p)
if (p.isEmpty) {
logger.withKeyValue("project_id", id).warn(s"Could not find project")
}
}
def getProject: Option[Project] = dataProject.get
/**
* Invokes the specified function w/ the current project, but only
* if we have a project set.
*/
def withProject[T](f: Project => T): Unit = {
dataProject.get.foreach(f)
}
/**
* Invokes the specified function w/ the current organization, but only
* if we have one
*/
def withOrganization[T](f: Organization => T): Unit = {
dataProject.get.foreach { project =>
organizationsDao.findById(Authorization.All, project.organization.id).foreach { org =>
f(org)
}
}
}
/**
* Invokes the specified function w/ the current project config, if
* it is valid.
*/
def withConfig[T](f: ConfigProject => T): Option[T] = {
dataProject.get.flatMap { project =>
configsDao.findByProjectId(Authorization.All, project.id).map(_.config) match {
case None => {
logger.withKeyValue("project_id", project.id).info(s"Project does not have a configuration")
None
}
case Some(config) => config match {
case c: ConfigProject => {
Some(f(c))
}
case ConfigError(_) | ConfigUndefinedType(_) => {
logger.withKeyValue("project_id", project.id).info(s"Project has an erroneous configuration")
None
}
}
}
}
}
/**
* Invokes the specified function w/ the current project, parsed
* into a Repo, but only if we have a project set and it parses
* into a valid Repo.
*/
def withRepo[T](f: Repo => T): Option[T] = {
dataProject.get.flatMap { project =>
GithubUtil.parseUri(project.uri) match {
case Left(error) => {
logger.withKeyValue("project", Json.toJson(project)).withKeyValue("error", error).warn(s"Cannot parse repo from project")
None
}
case Right(repo) => {
Some(
f(repo)
)
}
}
}
}
}
| flowcommerce/delta | api/app/actors/DataProject.scala | Scala | mit | 3,035 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.