code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package uk.co.pollett.flink.newsreader.nlp.classify
import opennlp.tools.doccat.{DoccatModel, DocumentCategorizerME, DocumentSampleStream}
import opennlp.tools.util.PlainTextByLineStream
class Sentiment {
def train(): Unit = {
val dataIn = getClass.getResourceAsStream("/sentimentdatatext")
val lineStream = new PlainTextByLineStream(dataIn, "UTF-8")
val sampleStream = new DocumentSampleStream(lineStream)
// Specifies the minimum number of times a feature must be seen
val cutoff = 2
val trainingIterations = 30
def model = DocumentCategorizerME.train("en", sampleStream)
import java.io.BufferedOutputStream
import java.io.FileOutputStream
def modelOut = new BufferedOutputStream(new FileOutputStream("/tmp/output.bin"))
model.serialize(modelOut)
println("model out")
Thread.sleep(30000)
}
def categorize(text: List[String]): String = {
def modelIn = getClass.getResourceAsStream("/en-sentiment.bin")
def model = new DoccatModel(modelIn)
def categorizer = new DocumentCategorizerME(model)
def outcomes = categorizer.categorize(text.toArray)
def category = categorizer.getBestCategory(outcomes)
category
}
}
| pollett/flink-newsreader | src/main/scala/uk/co/pollett/flink/newsreader/nlp/classify/Sentiment.scala | Scala | mit | 1,200 |
package com.kaching.logos
import org.scalatest.{BeforeAndAfterEach, Matchers, WordSpec}
import uk.org.lidalia.slf4jtest.{LoggingEvent, TestLoggerFactory}
import com.kaching.logos.Ops._
class OpsTest extends WordSpec with Matchers with BeforeAndAfterEach {
implicit val logger = TestLoggerFactory.getTestLogger(classOf[OpsTest])
"Operator" when {
"Info ~|: (before)" should {
"generate info log before base execution" in {
"foo" ~|: logger.error("middle")
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.info("foo"),
LoggingEvent.error("middle")
)
}
}
"Warn -|: (before)" should {
"generate info log before base execution" in {
"foo" -|: logger.error("middle")
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.warn("foo"),
LoggingEvent.error("middle")
)
}
}
"Debug +|: (before)" should {
"generate info log before base execution" in {
"foo" +|: logger.error("middle")
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.debug("foo"),
LoggingEvent.error("middle")
)
}
}
"Error *|: (before)" should {
"generate info log before base execution" in {
"foo" *|: logger.error("middle")
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.error("foo"),
LoggingEvent.error("middle")
)
}
}
"Info :|~ (after)" should {
"generate info log before base execution" in {
logger.error("middle") :|~ "foo"
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.error("middle"),
LoggingEvent.info("foo")
)
}
}
"Warn :|- (after)" should {
"generate info log before base execution" in {
logger.error("middle") :|- "foo"
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.error("middle"),
LoggingEvent.warn("foo")
)
}
}
"Debug :|+ (after)" should {
"generate info log before base execution" in {
logger.error("middle") :|+ "foo"
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.error("middle"),
LoggingEvent.debug("foo")
)
}
}
"Error :|* (after)" should {
"generate info log before base execution" in {
logger.error("middle") :|* "foo"
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.error("middle"),
LoggingEvent.error("foo")
)
}
}
"stacked" should {
"apply one to another with correct execution order" in {
"foo" +|: "foo1" ~|: logger.error("middle") :|~ "foo2" :|- "foo3" :|* "foo4"
logger.getLoggingEvents should contain theSameElementsInOrderAs List(
LoggingEvent.debug("foo"),
LoggingEvent.info("foo1"),
LoggingEvent.error("middle"),
LoggingEvent.info("foo2"),
LoggingEvent.warn("foo3"),
LoggingEvent.error("foo4")
)
}
}
}
override protected def afterEach(): Unit = TestLoggerFactory.clearAll()
}
| kaching88/logos | core/src/test/scala/com/kaching/logos/OpsTest.scala | Scala | mit | 3,380 |
package howitworks.wip
import rng.RNG
class RandomQuestions extends wp.Spec {
"random questions " in {
import cats.data.State
type StateRNG[A] = State[RNG, A]
val nextLong: State[RNG, Long] = State { rng => (rng.next, rng.run) }
val nextPositiveLong: State[RNG, Long] = nextLong.map(math.abs)
sealed trait Op
case object Plus extends Op
case object Minus extends Op
case object Multi extends Op
val allOps = Array(Plus, Minus, Multi)
val nextOperator: State[RNG, Op] = nextPositiveLong.map( x => allOps((x % 3).toInt))
type Challenge = String
type ExpectedAns = Long
type ChallengeResult = Long
def opToQuestion(a: Long, b: Long, op: Op): (Challenge, ExpectedAns) = op match {
case Plus => (s"What is $a + $b?", a+b)
case Minus => (s"What is $a - $b?", a-b)
case Multi => (s"What is $a * $b?", a*b)
}
val nextQuestion: StateRNG[(Challenge, ExpectedAns)] = for {
a <- nextLong
b <- nextLong
op <- nextOperator
} yield opToQuestion(a, b, op)
sealed trait QuizState
case class Initial(name: String, rng: RNG) extends QuizState
case class GotToken(token: String, name: String) extends QuizState
case class Question(token: String, name: String, no: Int) extends QuizState
case class Failed(token: String, name: String, no: Int)
case class Success(token: String)
def nextToken(name: String) = nextPositiveLong.map(n => s"name$n")
def quiz(answerHook: String => String): State[RNG, Challenge] = for {
t <- nextQuestion
} yield {
val answer = answerHook(t._1)
if(answer == t._2) "Great Success" else "Spot"
}
}
}
| jawp/wicked-playground | modules/server/src/test/scala/howitworks/wip/RandomQuestions.scala | Scala | mit | 1,691 |
package com.arcusys.valamis.web.servlet.lessonStudio
import com.arcusys.learn.liferay.util.PortletName
import com.arcusys.valamis.web.portlet.base.ViewPermission
import com.arcusys.valamis.web.servlet.base.{BaseApiController, PermissionUtil}
import org.apache.http.client.methods.HttpGet
import org.apache.http.impl.client.HttpClients
import scala.util.{Failure, Success, Try}
/**
* Url checks
*/
class UrlServlet extends BaseApiController {
before(request.getMethod == "POST") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonStudio)
}
post("/url/check(/)") {
val url = params("url").trim
if(url.nonEmpty) {
val client = HttpClients.createDefault()
Try {
val httpResponse = client.execute(new HttpGet(url))
// Check if headers set and if not, return true
val headers = httpResponse.getHeaders("X-Frame-Options") ++ httpResponse.getHeaders("x-frame-options")
httpResponse.close()
client.close()
headers
} match {
case Success(headers) => headers.length == 0
case Failure(ex) => log.warn(ex.getMessage)
false
}
} else false
}
}
| arcusys/Valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/lessonStudio/UrlServlet.scala | Scala | gpl-3.0 | 1,186 |
package tupol.sparx
import java.io.PrintWriter
import org.apache.hadoop.conf.{ Configuration => HadoopConfiguration }
import org.apache.hadoop.fs.{ Path, FileSystem }
import org.apache.spark.sql.DataFrame
import scala.util.Try
/**
*
*/
package object benchmarks {
/**
* DataFrame decorator adding utility functions for benchmarking.
*
* @param data
*/
implicit class TestDataFrame(val data: DataFrame) {
/**
* Reduces or increases the size of a data frame with the given ratio.
*
* @param ratio
* @return
*/
def scale(ratio: Double): DataFrame = {
require(ratio > 0)
require(data.count > 0)
val i = ratio.toInt
val r = ratio - i
val p1 = if (i >= 1) (1 until i).foldLeft(data)((acc, i) => acc unionAll data) else data.limit(0)
val p2 = data.limit((data.count * r).round.toInt)
val rez = p1 unionAll p2
rez
}
/**
* Reduces or increases the size of a data frame to the given number of rows.
*
* @param records
* @return
*/
def scale(records: Int): DataFrame = {
require(records > 0)
require(data.count > 0)
scale(records.toDouble / data.count)
}
}
/**
* Run a block and return the block result and the runtime in millis
*
* @param block
* @return
*/
def timeCode[T](block: => T): (T, Long) = {
val start = new java.util.Date
val result = block
val runtime = (new java.util.Date).toInstant.toEpochMilli - start.toInstant.toEpochMilli
(result, runtime)
}
def removeHdfsFile(path: String) = {
val hdfs = FileSystem.get(new HadoopConfiguration())
val workingPath = new Path(path)
hdfs.delete(workingPath, true) // delete recursively
}
/**
* Save a sequence of Strings as lines in an HDFS file
*
* @param rez
* @param path
* @return
*/
def saveLinesToFile(rez: Iterable[String], path: String, extension: String = "", overwrite: Boolean = true) = {
import java.io.BufferedOutputStream
Try {
// Create the HDFS file system handle
val hdfs = FileSystem.get(new HadoopConfiguration())
// Create a writer
val writer = new PrintWriter(new BufferedOutputStream(hdfs.create(new Path(addExtension(path, extension)), overwrite)), true)
//write each line
rez.foreach(line => Try(writer.print(line + "\n")))
// Close the streams
Try(writer.close())
}
}
/**
* Set the extension to the given path preventing the double extension
*
* @param path
* @param extension
* @return
*/
private[this] def addExtension(path: String, extension: String) = {
val normExt = extension.trim
val ext = if (extension.startsWith(".") || normExt.isEmpty) extension else "." + extension
if (path.endsWith(ext)) path
else path + ext
}
}
| tupol/serial-killer | src/main/scala/tupol/sparx/benchmarks/package.scala | Scala | apache-2.0 | 2,846 |
final class Settings {
def f[T](a_args: T*): List[T] = Nil
}
abstract class Factory {
type libraryType <: Base
final def apply(settings: Settings): libraryType = sys.error("bla")
}
abstract class Base {
val settings: Settings
protected val demands: List[Factory] = Nil
}
class SA(val settings: Settings) extends Base {
override val demands = List(
SD
) ::: settings.f(
SC
)
}
object SC extends Factory {
type libraryType = Base
}
object SD extends Factory {
type libraryType = SA
}
| yusuke2255/dotty | tests/pos/t0227.scala | Scala | bsd-3-clause | 563 |
object A {
opaque type T = 3
object T {
val x: T = 3
}
}
object O {
val x = 3
opaque type T = x.type
object T {
def wrap(a: x.type): T = a // was an error, now OK
def unwrap(a: T): x.type = a // OK
}
} | som-snytt/dotty | tests/pos/i6288.scala | Scala | apache-2.0 | 230 |
package io.github.mandar2812.PlasmaML.dynamics.diffusion
import breeze.linalg.{DenseMatrix, DenseVector, diag, kron, norm}
import io.github.mandar2812.dynaml.graphics.charts.Highcharts._
import io.github.mandar2812.PlasmaML.dynamics.diffusion.SGRadialDiffusionModel.GaussianQuadrature
import io.github.mandar2812.PlasmaML.utils.DiracTuple2Kernel
import io.github.mandar2812.dynaml.DynaMLPipe._
import io.github.mandar2812.dynaml.kernels.LocalScalarKernel
import io.github.mandar2812.dynaml.models.gp.AbstractGPRegressionModel
import io.github.mandar2812.dynaml.optimization.GloballyOptimizable
import io.github.mandar2812.dynaml.pipes._
import io.github.mandar2812.dynaml.utils
import org.apache.log4j.Logger
import io.github.mandar2812.dynaml.probability.UniformRV
import io.github.mandar2812.PlasmaML.dynamics.diffusion.SGRadialDiffusionModel.QuadratureRule
/**
* Inverse inference over plasma radial diffusion parameters.
*
* @param Kp A function which returns the Kp value for a given
* time coordinate. Must be cast as a [[DataPipe]]
*
* @param init_dll_params A [[Tuple4]] containing the diffusion field
* parameters. See [[io.github.mandar2812.PlasmaML.utils.MagConfigEncoding]] and
* [[MagnetosphericProcessTrend]].
*
* @param init_lambda_params A [[Tuple4]] containing the loss process parameters.
*
* @param init_q_params A [[Tuple4]] containing the injection process parameters.
*
* @param covariance A kernel function representing the covariance of
* the Phase Space Density at a pair of space time locations.
*
* @param noise_psd A kernel function representing the measurement noise of the
* Phase Space Density at a pair of space time locations.
*
* @param psd_data A Stream of space time locations and measured PSD values.
*
* @param basis A basis function expansion for the PSD, as an instance
* of [[PSDBasis]].
*
* @param lShellDomain A tuple specifying the limits of the spatial domain (L*)
*
* @param timeDomain A tuple specifying the limits of the temporal domain (t)
*
* @param quadrature_l A quadrature method (space domain), used for computing an approximation
* to the weak form PDE.
* @param quadrature_t A quadrature method (time domain), used for computing an approximation
* to the weak form PDE.
* */
class SGRadialDiffusionModel(
val Kp: DataPipe[Double, Double],
init_dll_params: (Double, Double, Double, Double),
init_lambda_params: (Double, Double, Double, Double),
init_q_params: (Double, Double, Double, Double)
)(val covariance: LocalScalarKernel[(Double, Double)],
val noise_psd: DiracTuple2Kernel,
val psd_data: Stream[((Double, Double), Double)],
val basis: PSDBasis,
val lShellDomain: (Double, Double),
val timeDomain: (Double, Double),
val quadrature_l: QuadratureRule =
SGRadialDiffusionModel.eightPointGaussLegendre,
val quadrature_t: QuadratureRule =
SGRadialDiffusionModel.eightPointGaussLegendre,
val hyper_param_basis: Map[String, MagParamBasis] = Map(),
val basisCovFlag: Boolean = true)
extends GloballyOptimizable {
protected val logger: Logger = Logger.getLogger(this.getClass)
private val baseCovID: String = "base::" + covariance.toString
.split("\\\\.")
.last
private val baseNoiseID: String = "base_noise::" + noise_psd.toString
.split("\\\\.")
.last
val diffusionField: MagTrend = MagTrend(Kp, "dll")
val lossRate: MagTrend = MagTrend(Kp, "lambda")
val injection_process: BoundaryInjection = BoundaryInjection(Kp, lShellDomain._2, "Q")
//Compute the integration nodes and weights for the domain.
val (ghost_points, quadrature_weight_matrix): (
Stream[(Double, Double)],
DenseMatrix[Double]
) =
SGRadialDiffusionModel.quadrature_primitives(quadrature_l, quadrature_t)(
lShellDomain,
timeDomain
)
val num_observations: Int = psd_data.length
val num_colocation_points: Int = ghost_points.length
val psd_mean: Double = psd_data.map(_._2).sum / num_observations
val psd_std: Double = math.sqrt(
psd_data
.map(p => p._2 - psd_mean)
.map(p => math.pow(p, 2d))
.sum / (num_observations - 1)
)
private lazy val targets = DenseVector(psd_data.map(_._2).toArray)
private lazy val std_targets = targets.map(psd => (psd - psd_mean) / psd_std)
private val (covStEncoder, noiseStEncoder) = (
SGRadialDiffusionModel.stateEncoder(baseCovID),
SGRadialDiffusionModel.stateEncoder(baseNoiseID)
)
private val designMatrixFlow = SGRadialDiffusionModel.metaDesignMatFlow(basis)
lazy val phi = designMatrixFlow(psd_data.map(_._1))
def _operator_hyper_parameters: List[String] = operator_hyper_parameters
protected val operator_hyper_parameters: List[String] = {
val dll_hyp = diffusionField.transform.keys
val tau_hyp = lossRate.transform.keys
val q_hyp = injection_process.transform.keys
List(
dll_hyp._1,
dll_hyp._2,
dll_hyp._3,
dll_hyp._4,
tau_hyp._1,
tau_hyp._2,
tau_hyp._3,
tau_hyp._4,
q_hyp._1,
q_hyp._2,
q_hyp._3,
q_hyp._4
)
}
/**
* Stores the value of the PDE operator parameters
* as a [[Map]].
* */
protected var operator_state: Map[String, Double] = {
val dll_hyp = diffusionField.transform.keys
val lambda_hyp = lossRate.transform.keys
val q_hyp = injection_process.transform.keys
Map(
dll_hyp._1 -> init_dll_params._1,
dll_hyp._2 -> init_dll_params._2,
dll_hyp._3 -> init_dll_params._3,
dll_hyp._4 -> init_dll_params._4,
lambda_hyp._1 -> init_lambda_params._1,
lambda_hyp._2 -> init_lambda_params._2,
lambda_hyp._3 -> init_lambda_params._3,
lambda_hyp._4 -> init_lambda_params._4,
q_hyp._1 -> init_q_params._1,
q_hyp._2 -> init_q_params._2,
q_hyp._3 -> init_q_params._3,
q_hyp._4 -> init_q_params._4
)
}
override var hyper_parameters: List[String] =
covariance.hyper_parameters.map(h => baseCovID + "/" + h) ++
noise_psd.hyper_parameters.map(h => baseNoiseID + "/" + h) ++
operator_hyper_parameters
/**
* A Map which stores the current state of the system.
* */
override protected var current_state: Map[String, Double] =
covStEncoder(covariance.state) ++
noiseStEncoder(noise_psd.state) ++
operator_state
var blocked_hyper_parameters: List[String] =
covariance.blocked_hyper_parameters.map(h => baseCovID + "/" + h) ++
noise_psd.blocked_hyper_parameters.map(h => baseNoiseID + "/" + h)
var reg: Double = 1d
var (regObs, regCol): (Double, Double) = (1d, 1d)
def block(hyp: String*): Unit = {
val (blocked_cov_hyp, _) =
hyp.partition(c => c.contains(baseCovID) || c.contains(baseNoiseID))
val proc_cov_hyp = blocked_cov_hyp
.filter(_.contains(baseCovID))
.map(h => h.replace(baseCovID, "").tail)
val proc_noise_hyp = blocked_cov_hyp
.filter(_.contains(baseNoiseID))
.map(h => h.replace(baseNoiseID, "").tail)
covariance.block(proc_cov_hyp: _*)
noise_psd.block(proc_noise_hyp: _*)
blocked_hyper_parameters = hyp.toList
}
def block_++(h: String*): Unit = block(blocked_hyper_parameters.union(h): _*)
def effective_hyper_parameters: List[String] =
hyper_parameters.filterNot(h => blocked_hyper_parameters.contains(h))
def effective_state: Map[String, Double] =
_current_state.filterKeys(effective_hyper_parameters.contains)
def setState(h: Map[String, Double]): Unit = {
require(
effective_hyper_parameters.forall(h.contains),
"All Hyper-parameters must be contained in state assignment"
)
val base_kernel_state = h
.filterKeys(_.contains(baseCovID))
.map(
c => (c._1.replace(baseCovID, "").tail, c._2)
)
val base_noise_state = h
.filterKeys(_.contains(baseNoiseID))
.map(
c => (c._1.replace(baseNoiseID, "").tail, c._2)
)
covariance.setHyperParameters(base_kernel_state)
noise_psd.setHyperParameters(base_noise_state)
val op_state =
h.filterNot(c => c._1.contains(baseCovID) || c._1.contains(baseNoiseID))
op_state.foreach(keyval => operator_state += (keyval._1 -> keyval._2))
current_state = operator_state ++
covStEncoder(covariance.state) ++
noiseStEncoder(noise_psd.state)
}
def getParams(
h: Map[String, Double]
): (DenseVector[Double], DenseMatrix[Double]) = {
setState(h)
//println("Constructing Model for PSD")
val dll = diffusionField(operator_state)
val grad_dll = diffusionField.gradL.apply(operator_state)
val lambda = lossRate(operator_state)
val q = injection_process(operator_state)
val psi_basis = basis.operator_basis(dll, grad_dll, lambda)
val (psi_stream, f_stream, lambda_stream) = ghost_points
.map(
p => (psi_basis(p), (q(p) - lambda(p) * psd_mean) / psd_std, lambda(p))
)
.unzip3
val g_basis_mat =
if (hyper_param_basis.isEmpty) DenseMatrix(1.0)
else
hyper_param_basis
.filterKeys(effective_hyper_parameters.contains(_))
.map(kv => kv._2(_current_state(kv._1)).toDenseMatrix)
.reduceLeft((u, v) => kron(u, v))
.toDenseMatrix
//print("Dimension = ")
//pprint.pprintln(basis.dimension * g_basis_mat.cols)
val (psi, f, lambda_vec) = (
DenseMatrix.vertcat(psi_stream.map(_.toDenseMatrix): _*),
DenseVector(f_stream.toArray),
DenseVector(lambda_stream.toArray)
)
val phi_ext = kron(g_basis_mat, phi)
val psi_ext = kron(g_basis_mat, psi)
val (no, nc) = (num_observations, num_colocation_points)
val ones_obs = DenseVector.fill[Double](no)(1d)
val omega_phi = phi_ext * phi_ext.t
val omega_cross = phi_ext * psi_ext.t
val omega_psi = psi_ext * psi_ext.t
val responses = DenseVector.vertcat(
DenseVector(0d),
targets.map(psd => (psd - psd_mean) / psd_std),
f
)
def I(n: Int): DenseMatrix[Double] = DenseMatrix.eye[Double](n)
val A = DenseMatrix.vertcat(
DenseMatrix.horzcat(
DenseMatrix(0d),
ones_obs.toDenseMatrix,
lambda_vec.toDenseMatrix
),
DenseMatrix.horzcat(
ones_obs.toDenseMatrix.t,
omega_phi * reg + I(no) * regObs,
omega_cross * reg
),
DenseMatrix.horzcat(
lambda_vec.toDenseMatrix.t,
omega_cross.t * reg,
(omega_psi * reg) + (quadrature_weight_matrix * regCol)
)
)
(A \\ responses, psi)
}
def get_design_mat(
g: DenseMatrix[Double],
h: DenseMatrix[Double]
): DenseMatrix[Double] =
DenseMatrix.vertcat(
DenseVector.ones[Double](g.rows).toDenseMatrix,
(phi * g.t) * reg,
(h * g.t) * reg
)
def get_surrogate(
h: Map[String, Double]
): DataPipe[Iterable[(Double, Double)], Iterable[Double]] = {
val (params, psi) = getParams(h)
DataPipe((xs: Iterable[(Double, Double)]) => {
val phi_mat: DenseMatrix[Double] = designMatrixFlow(xs.toStream)
val dMat: DenseMatrix[Double] = get_design_mat(phi_mat, psi)
println(s"Dimensions of design matrix ${dMat.rows} * ${dMat.cols}")
println(s"Dimensions of parameter matrix ${params.size} * 1")
val surrogate_predictor: DenseVector[Double] = dMat.t * params
surrogate_predictor.toArray.map(x => (x * psd_std) + psd_mean)
})
}
/**
* Computes the log-likelihood of the observational data,
* given the hyper-parameter configuration.
*
* @param h The value of the hyper-parameters in the configuration space
* @param options Optional parameters about configuration
* @return Configuration Energy E(h)
* */
def energy(
h: Map[String, Double],
options: Map[String, String] = Map()
): Double =
try {
val (params, psi) = getParams(h)
val dMat = get_design_mat(phi, psi)
val surrogate = dMat.t * params
val modelVariance = norm(
targets.map(psd => (psd - psd_mean) / psd_std) - surrogate
) / num_observations
//print("variance = ")
//pprint.pprintln(modelVariance)
/*
* Construct partitioned covariance matrix
* */
//println("Constructing partitions of covariance matrix")
//println("Partition K_uu")
val k_uu = covariance
.buildKernelMatrix(psd_data.map(_._1), num_observations)
.getKernelMatrix
//println("Partition K_nn")
val noise_mat_psd = noise_psd
.buildKernelMatrix(psd_data.map(_._1), num_observations)
.getKernelMatrix
AbstractGPRegressionModel.logLikelihood(
std_targets - surrogate,
if (basisCovFlag) k_uu + noise_mat_psd + (phi * phi.t)*reg
else k_uu + noise_mat_psd
)
} catch {
case _: breeze.linalg.MatrixSingularException => Double.NaN
case _: breeze.linalg.NotConvergedException => Double.PositiveInfinity
case _: breeze.linalg.MatrixNotSymmetricException => Double.NaN
}
}
class GalerkinRDModel(
override val Kp: DataPipe[Double, Double],
init_dll_params: (Double, Double, Double, Double),
init_lambda_params: (Double, Double, Double, Double),
init_q_params: (Double, Double, Double, Double)
)(override val covariance: LocalScalarKernel[(Double, Double)],
override val noise_psd: DiracTuple2Kernel,
override val psd_data: Stream[((Double, Double), Double)],
override val basis: PSDBasis,
override val lShellDomain: (Double, Double),
override val timeDomain: (Double, Double),
override val quadrature_l: QuadratureRule =
SGRadialDiffusionModel.eightPointGaussLegendre,
override val quadrature_t: QuadratureRule =
SGRadialDiffusionModel.eightPointGaussLegendre,
override val hyper_param_basis: Map[String, MagParamBasis] = Map(),
override val basisCovFlag: Boolean = true)
extends SGRadialDiffusionModel(
Kp,
init_dll_params,
init_lambda_params,
init_q_params
)(
covariance,
noise_psd,
psd_data,
basis,
lShellDomain,
timeDomain,
quadrature_l,
quadrature_t,
hyper_param_basis,
basisCovFlag
) {
override def getParams(
h: Map[String, Double]
): (DenseVector[Double], DenseMatrix[Double]) = {
setState(h)
//println("Constructing Model for PSD")
val dll = diffusionField(operator_state)
val grad_dll = diffusionField.gradL.apply(operator_state)
val lambda = lossRate(operator_state)
val q = injection_process(operator_state)
val psi_basis = basis.operator_basis(dll, grad_dll, lambda)
val (psi_stream, f_stream, lambda_stream) = ghost_points
.map(
p => (psi_basis(p), (q(p) - lambda(p) * psd_mean) / psd_std, lambda(p))
)
.unzip3
val g_basis_mat =
if (hyper_param_basis.isEmpty) DenseMatrix(1.0)
else
hyper_param_basis
.filterKeys(effective_hyper_parameters.contains(_))
.map(kv => kv._2(_current_state(kv._1)).toDenseMatrix)
.reduceLeft((u, v) => kron(u, v))
.toDenseMatrix
//print("Dimension = ")
//pprint.pprintln(basis.dimension * g_basis_mat.cols)
val (psi, f, lambda_vec) = (
DenseMatrix.vertcat(psi_stream.map(_.toDenseMatrix): _*),
DenseVector(f_stream.toArray),
DenseVector(lambda_stream.toArray)
)
val phi_ext = kron(g_basis_mat, phi)
val psi_ext = kron(g_basis_mat, psi)
val (no, nc) = (num_observations, num_colocation_points)
val ones_obs = DenseVector.fill[Double](no)(1d)
val omega_phi = phi_ext * phi_ext.t
val omega_cross = phi_ext * psi_ext.t
val omega_psi = psi_ext * psi_ext.t
val responses = DenseVector.vertcat(
DenseVector(0d),
f
)
def I(n: Int): DenseMatrix[Double] = DenseMatrix.eye[Double](n)
val A = DenseMatrix.vertcat(
DenseMatrix.horzcat(
DenseMatrix(0d),
lambda_vec.toDenseMatrix
),
DenseMatrix.horzcat(
lambda_vec.toDenseMatrix.t,
omega_psi * reg + (quadrature_weight_matrix * regCol)
)
)
(A \\ responses, psi)
}
override def get_design_mat(
g: DenseMatrix[Double],
h: DenseMatrix[Double]
): DenseMatrix[Double] =
DenseMatrix.vertcat(
DenseVector.ones[Double](g.rows).toDenseMatrix,
(h * g.t) * reg
)
}
object SGRadialDiffusionModel {
sealed trait QuadratureRule {
def scale(lower: Double, upper: Double): (Seq[Double], Seq[Double])
}
case class MonteCarloQuadrature(num: Int) extends QuadratureRule {
val nodes: Seq[Double] = UniformRV(0d, 1d).iid(num).draw
val weights: Seq[Double] = Seq.fill[Double](num)(1d / num.toDouble)
override def scale(
lower: Double,
upper: Double
): (Seq[Double], Seq[Double]) = {
(nodes.map(x => lower + x * (upper - lower)), weights)
}
}
case class GaussianQuadrature(nodes: Seq[Double], weights: Seq[Double])
extends QuadratureRule {
override def scale(
lower: Double,
upper: Double
): (Seq[Double], Seq[Double]) = {
val sc_nodes = nodes.map(n => {
val mid_point = (lower + upper) / 2d
val mid_diff = (upper - lower) / 2d
mid_point + mid_diff * n
})
val sc_weights = weights.map(_ * (upper - lower) / 2d)
(sc_nodes, sc_weights)
}
def integrate(f: Double => Double)(lower: Double, upper: Double): Double = {
val (sc_nodes, sc_weights) = scale(lower, upper)
sc_weights.zip(sc_nodes.map(f)).map(c => c._2 * c._1).sum
}
}
def monte_carlo_quadrature(num: Int): MonteCarloQuadrature =
MonteCarloQuadrature(num)
val twoPointGaussLegendre = GaussianQuadrature(
Seq(-0.5773502692d, 0.5773502692d),
Seq(1d, 1d)
)
val threePointGaussLegendre = GaussianQuadrature(
Seq(-0.7745966692, 0d, 0.7745966692),
Seq(0.5555555556, 0.8888888888, 0.5555555556)
)
val fourPointGaussLegendre = GaussianQuadrature(
Seq(-0.8611363116, -0.3399810436, 0.3399810436, 0.8611363116),
Seq(0.3478548451, 0.6521451549, 0.6521451549, 0.3478548451)
)
val fivePointGaussLegendre = GaussianQuadrature(
Seq(-0.9061798459, -0.5384693101, 0d, 0.5384693101, 0.9061798459),
Seq(0.2369268851, 0.4786286705, 0.5688888888, 0.4786286705, 0.2369268851)
)
val sixPointGaussLegendre = GaussianQuadrature(
Seq(-0.9324695142, -0.6612093865, -0.2386191861, 0.2386191861, 0.6612093865,
0.9324695142),
Seq(0.1713244924, 0.3607615730, 0.4679139346, 0.4679139346, 0.3607615730,
0.1713244924)
)
val sevenPointGaussLegendre = GaussianQuadrature(
Seq(-0.9491079123, -0.7415311856, -0.4058451514, 0d, 0.4058451514,
0.7415311856, 0.9491079123),
Seq(0.1294849662, 0.2797053915, 0.3818300505, 0.4179591837, 0.3818300505,
0.2797053915, 0.1294849662)
)
val eightPointGaussLegendre = GaussianQuadrature(
Seq(
-0.9602898565, -0.7966664774, -0.5255324099, -0.1834346425, 0.1834346425,
0.5255324099, 0.7966664774, 0.9602898565
),
Seq(
0.1012285363, 0.2223810345, 0.3137066459, 0.3626837834, 0.3626837834,
0.3137066459, 0.2223810345, 0.1012285363
)
)
/**
* Compute the colocation points (quadrature nodes) and weights.
*
* @param quadrature_l A [[QuadratureRule]] rule, for the space domain.
* @param quadrature_t A [[QuadratureRule]] rule, for the time domain.
* @param lShellDomain L-shell domain limits.
* @param timeDomain Temporal domain limits.
* */
def quadrature_primitives(
quadrature_l: QuadratureRule,
quadrature_t: QuadratureRule
)(lShellDomain: (Double, Double),
timeDomain: (Double, Double)
): (Stream[(Double, Double)], DenseMatrix[Double]) = {
val (l_nodes, l_weights) =
quadrature_l.scale(lShellDomain._1, lShellDomain._2)
val (t_nodes, t_weights) = quadrature_t.scale(timeDomain._1, timeDomain._2)
val (points, weights) = utils
.combine(Seq(l_nodes.zip(l_weights), t_nodes.zip(t_weights)))
.map(s => {
val point = (s.head._1, s.last._1)
val weight = s.head._2 * s.last._2
(point, 1d / weight)
})
.unzip
(points.toStream, diag(DenseVector(weights.toArray)))
}
def stateEncoder(
prefix: String
): Encoder[Map[String, Double], Map[String, Double]] = Encoder(
(s: Map[String, Double]) => s.map(h => (prefix + "/" + h._1, h._2)),
(s: Map[String, Double]) =>
s.map(h => (h._1.replace(prefix, "").tail, h._2))
)
val metaDesignMatFlow = MetaPipe(
(bf: Basis[(Double, Double)]) =>
(s: Stream[(Double, Double)]) =>
(StreamDataPipe(bf) >
StreamDataPipe((v: DenseVector[Double]) => v.toDenseMatrix) >
DataPipe(
(s: Stream[DenseMatrix[Double]]) => DenseMatrix.vertcat(s: _*)
))(s)
)
def loadCachedResults(
lambda_alpha: Double,
lambda_beta: Double,
lambda_a: Double,
lambda_b: Double
)(file: String
): Stream[DenseVector[Double]] = {
val strToVector = IterableDataPipe(
(p: String) => DenseVector(p.split(",").map(_.toDouble))
)
val load_results = fileToStream > strToVector
val post_samples = load_results(".cache/" + file)
scatter(post_samples.map(c => (c(0), c(2))))
hold()
scatter(
Seq(
(math.log(math.exp(lambda_alpha) * math.pow(10d, lambda_a)), lambda_b)
)
)
legend(Seq("Posterior Samples", "Ground Truth"))
title("Posterior Samples:- " + 0x03B1.toChar + " vs b")
xAxis(0x03C4.toChar + ": " + 0x03B1.toChar)
yAxis(0x03C4.toChar + ": b")
unhold()
scatter(post_samples.map(c => (c(0), c(1))))
hold()
scatter(
Seq(
(
math.log(math.exp(lambda_alpha) * math.pow(10d, lambda_a)),
lambda_beta
)
)
)
legend(Seq("Posterior Samples", "Ground Truth"))
title("Posterior Samples " + 0x03B1.toChar + " vs " + 0x03B2.toChar)
xAxis(0x03C4.toChar + ": " + 0x03B1.toChar)
yAxis(0x03C4.toChar + ": " + 0x03B2.toChar)
unhold()
post_samples.toStream
}
}
| transcendent-ai-labs/PlasmaML | mag-core/src/main/scala/io/github/mandar2812/PlasmaML/dynamics/diffusion/SGRadialDiffusionModel.scala | Scala | lgpl-2.1 | 22,309 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.kafka
import org.junit.Test
import org.junit.Assert._
import net.elodina.mesos.util.{Constraint, Period, Range}
import ly.stealth.mesos.kafka.Broker.{Endpoint, ExecutionOptions, Failover, Stickiness, Task}
import scala.io.Source
class JsonTest {
def getResourceJson[T](file: String)(implicit m: Manifest[T]): T = {
val fileData = this.getClass.getResource(file)
val txt = Source.fromFile(fileData.getFile)
json.JsonUtil.fromJson[T](txt.mkString)
}
@Test
def broker_legacy(): Unit = {
val broker = getResourceJson[Broker]("/broker.json")
val b = new Broker(1)
b.task = Task(
id = "kafka-general-0-705d56e2-7d62-4d7e-b033-74ea5526ed82",
hostname = "host1",
executorId = "kafka-general-0-ff258207-18f3-4fd2-9028-5f4c4143f84d",
attributes = Map(
"ip" -> "10.253.166.214",
"host" -> "host1",
"ami" -> "ami-d0232eba",
"cluster" -> "us-east-1",
"dedicated" -> "kafka/general",
"zone" -> "us-east-1d",
"instance_type" -> "i2.2xlarge"
),
slaveId = "1fbd3a0d-a685-47e6-8066-01be06d68fac-S821"
)
b.task.state = Broker.State.RUNNING
b.task.endpoint = new Endpoint("host1:9092")
b.syslog = false
b.stickiness = new Stickiness(new Period("10m"))
b.stickiness.registerStart("host1")
b.log4jOptions = Map("k1" -> "v1", "k2" -> "v2")
b.options = Map("a" -> "1", "b" -> "2")
b.active = true
b.port = new Range(9092)
b.constraints = Map("dedicated" -> new Constraint("like:kafka/general"))
b.mem = 56320
b.executionOptions = ExecutionOptions(jvmOptions = "-server")
b.cpus = 7
b.heap = 5120
b.failover = new Failover(new Period("1m"), new Period("10m"))
BrokerTest.assertBrokerEquals(b, broker)
}
@Test
def topic_legacy(): Unit = {
val topic = getResourceJson[Topic]("/topic.json")
val t = Topic(
"__consumer_offsets",
Map(
45 -> Seq(5,3,4),
34 -> Seq(2,7,0)
),
Map(
"cleanup.policy" -> "compact",
"compression.type" -> "uncompressed",
"segment.bytes" -> "104857600"
)
)
assertEquals(t, topic)
}
}
| tc-dc/kafka-mesos | src/test/ly/stealth/mesos/kafka/JsonTest.scala | Scala | apache-2.0 | 3,015 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalatest._
import org.scalatest.funsuite._
import matchers.should.Matchers._
import org.scalatestplus.scalacheck._
import breeze.math.Complex
import breeze.numerics._
import breeze.stats.distributions.Rand
import breeze.storage.Zero
import breeze.util.DoubleImplicits
import scala.reflect.ClassTag
class DenseMatrixTest extends AnyFunSuite with Checkers with DoubleImplicits with MatrixTestUtils {
test("Slicing") {
val m = DenseMatrix((0, 1, 2), (3, 4, 5))
// slice sub-matrix
val s1 = m(0 to 1, 1 to 2)
assert(s1 === DenseMatrix((1, 2), (4, 5)))
s1 += 1
assert(m === DenseMatrix((0, 2, 3), (3, 5, 6)))
// slice row
val s2 = m(0, ::)
assert(s2 === DenseVector(0, 2, 3).t)
s2 *= 2
assert(m === DenseMatrix((0, 4, 6), (3, 5, 6)))
// slice column
val s3: DenseVector[Int] = m(::, 1)
assert(s3 === DenseVector(4, 5))
s3 -= 1
assert(m === DenseMatrix((0, 3, 6), (3, 4, 6)))
// slice rows
val s4 = m(1 to 1, ::)
assert(s4 === DenseMatrix((3, 4, 6)))
val mbig = DenseMatrix(
(0, 1, 2, 3, 4, 5),
(3, 4, 5, 6, 7, 8),
(3, 4, 5, 6, 7, 8),
(5, 4, 5, 9, 7, 8)
)
val sbig1 = mbig(::, 0 to 2 by 2)
assert(
sbig1 === DenseMatrix(
(0, 2),
(3, 5),
(3, 5),
(5, 5)
))
// slice columns
val s5 = m(::, 1 to 2)
assert(s5 === DenseMatrix((3, 6), (4, 6)))
// slice part of a row
val s6a = m(0, 1 to 2)
s6a += 1
assert(m === DenseMatrix((0, 4, 7), (3, 4, 6)))
// slice part of a column
val s7a = m(0 to 1, 0)
s7a += 2
val s7b = m(0 to 1, 0)
s7b += 1
assert(m === DenseMatrix((3, 4, 7), (6, 4, 6)))
}
test("Multiple Slicing") {
val m = new DenseMatrix[Int](6, 6, (1 to 36).toArray)
val slice1 = m(1 to 3, 1 to 3)
assert(slice1(::, 1) === DenseVector(14, 15, 16))
assert(slice1(::, 1 to 2) === DenseMatrix((14, 20), (15, 21), (16, 22)))
}
test("Transpose") {
val m = DenseMatrix((1, 2, 3), (4, 5, 6))
// check that the double transpose gives us back the original
assert(m.t.t == m)
// check static type and write-through
val t = m.t
assert(t === DenseMatrix((1, 4), (2, 5), (3, 6)))
t(0, 1) = 0
assert(m === DenseMatrix((1, 2, 3), (0, 5, 6)))
}
test("Sliced Transpose") {
val m = DenseMatrix((0, 1, 2), (3, 4, 5))
// column of original looks same as row of tranpose
val sm1 = m(::, 1)
val smt1 = m.t(1, ::)
assert(sm1.t === smt1)
val sm2 = m(::, 2)
val smt2 = m.t(2, ::)
assert(sm2.t === smt2)
val sm1c = m(1, ::)
val smt1c = m.t(::, 1)
assert(sm1c === smt1c.t)
val sm2c = m(0, ::)
val smt2c = m.t(::, 0)
assert(sm2c === smt2c.t)
// slice sub-matrix
val s1 = m(0 to 1, 1 to 2)
assert(s1 === DenseMatrix((1, 2), (4, 5)))
val t1 = s1.t
assert(t1 === DenseMatrix((1, 4), (2, 5)))
val t1b = m.t(1 to 2, 0 to 1)
assert(t1 === t1b)
val s2 = m(0 to 1, 1)
val t2 = m.t(1, 0 to 1)
assert(s2 === t2.t)
val s3 = m(0, 0 to 1)
val t3 = m.t(0 to 1, 0)
assert(s3.t === t3)
{
val s2 = m(0 to 1, ::)
val t2 = m.t(::, 0 to 1)
assert(s2.t === t2)
assert(s2 === t2.t)
val s3 = m(::, 0 to 1)
val t3 = m.t(0 to 1, ::)
assert(s3.t === t3)
assert(s3 === t3.t)
}
}
test("#759 - slice assignment broken for transpose matrices") {
def okay(x: DenseMatrix[Double]) = {
val res = DenseMatrix.zeros[Double](5, 2)
res(::, 1 until 2) := x
res
}
def problem[T: ClassTag: Zero](x: DenseMatrix[T]) ={
val res = DenseMatrix.zeros[T](5, 2)
res(::, 1 until 2) := x
res
}
val x = DenseMatrix.ones[Double](1, 5).t
assert(okay(x) == problem(x))
}
test("Min/Max") {
val m = DenseMatrix((1, 0, 0), (2, 3, -1))
assert(argmin(m) === (1, 2))
assert(argmax(m) === (1, 1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1, 3))
assert(ptp(m) === 4)
}
test("elementwise max") {
val v = DenseVector(2, 0, 3, 2, -1).asDenseMatrix
val v2 = DenseVector(3, -1, 3, 4, -4).asDenseMatrix
assert(max(v, v2) === DenseVector(3, 0, 3, 4, -1).asDenseMatrix)
assert(max(v, 2) === DenseVector(2, 2, 3, 2, 2).asDenseMatrix)
assert(min(v, 2) === DenseVector(2, 0, 2, 2, -1).asDenseMatrix)
}
test("Min/Max[Float]") {
val m = convert(DenseMatrix((1, 0, 0), (2, 3, -1)), Float)
assert(argmin(m) === (1, 2))
assert(argmax(m) === (1, 1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1.0f, 3.0f))
assert(ptp(m) === 4)
}
test("Min/Max[Double]") {
val m = convert(DenseMatrix((1, 0, 0), (2, 3, -1)), Double)
assert(argmin(m) === (1, 2))
assert(argmax(m) === (1, 1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1.0, 3.0))
assert(ptp(m) === 4)
}
test("Min/Max[Long]") {
val m = convert(DenseMatrix((1, 0, 0), (2, 3, -1)), Long)
assert(argmin(m) === (1, 2))
assert(argmax(m) === (1, 1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1L, 3L))
assert(ptp(m) === 4)
}
test("MapValues") {
val a: DenseMatrix[Int] = DenseMatrix((1, 0, 0), (2, 3, -1))
val b1: DenseMatrix[Int] = a.mapValues(_ + 1)
assert(b1 === DenseMatrix((2, 1, 1), (3, 4, 0)))
val b2: DenseMatrix[Double] = a.mapValues(_ + 1.0)
assert(b2 === DenseMatrix((2.0, 1.0, 1.0), (3.0, 4.0, 0.0)))
val b3 = a.t.mapValues(_ + 1)
assert(b3 === DenseMatrix((2, 3), (1, 4), (1, 0)))
}
/*
test("Map Triples") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1))
val b1 : DenseMatrix[Int] = a.mapTriples((i,j,v) => i + v)
assert(b1 === DenseMatrix((1,0,0),(3,4,0)))
val b2 : DenseMatrix[Double] = a.mapTriples((i,j,v) => j + v.toDouble)
assert(b2 === DenseMatrix((1.0,1.0,2.0),(2.0,4.0,1.0)))
}
test("Triples") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1))
var s = 0
// foreach
s = 0
for ((i,j,v) <- a.triples) s += v
assert(s === sum(a))
// filter
s = 0
for ((i,j,v) <- a.triples; if i % 2 == 0 || j % 2 == 0) s += v
assert(s === 1+2-1)
// // map
// val b1 : DenseMatrix[Double] = for ((i,j,v) <- a) yield v * 2.0
// assert(b1 === DenseMatrix((2.0,0.0,0.0),(4.0,6.0,-2.0)))
//
// // map with filter
// val b2 : DenseMatrix[Int] = for ((i,j,v) <- a; if j == 0) yield v * 2
// assert(b2 === DenseMatrix((2,0,0),(4,0,0)))
}
*/
test("set") {
{
val a = DenseMatrix.zeros[Int](2, 2)
val b = DenseMatrix((1, 0), (2, 3))
a := b
assert(a === b)
}
val a = DenseMatrix.zeros[Int](2, 3)
val b = DenseMatrix((1, 0, 5), (2, 3, -1))
a := b
assert(a === b)
}
test("horzcat") {
val a: DenseMatrix[Int] = DenseMatrix((1, 0, 5), (2, 3, -1))
val result: DenseMatrix[Int] = DenseMatrix((1, 0, 5, 1, 0, 5), (2, 3, -1, 2, 3, -1))
assert(DenseMatrix.horzcat(a, a) === result)
}
test("vertcat") {
val a: DenseMatrix[Int] = DenseMatrix((1, 0, 5), (2, 3, -1))
val result: DenseMatrix[Int] = DenseMatrix((1, 0, 5), (2, 3, -1), (1, 0, 5), (2, 3, -1))
assert(DenseMatrix.vertcat(a, a) === result)
}
test("Multiply") {
val a = DenseMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = DenseMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0), (12.0, 0.0, 5.0))
val c = DenseVector(6.0, 2.0, 3.0)
val cs = SparseVector(6.0, 2.0, 3.0)
assert(a * b === DenseMatrix((37.0, -8.0, 25.0), (85.0, -23.0, 67.0)))
assert(a * c === DenseVector(19.0, 52.0))
assert(b * c === DenseVector(62.0, -21.0, 87.0))
assert(a * cs === DenseVector(19.0, 52.0))
assert(b * cs === DenseVector(62.0, -21.0, 87.0))
assert(b.t * c === DenseVector(72.0, -18.0, 65.0))
assert(a.t * DenseVector(4.0, 3.0) === DenseVector(16.0, 23.0, 30.0))
assert(c.t * a.t === (a * c).t)
// should be dense
val x: DenseMatrix[Double] = a * a.t
assert(x === DenseMatrix((14.0, 32.0), (32.0, 77.0)))
// should be dense
val y: DenseMatrix[Double] = a.t * a
assert(y === DenseMatrix((17.0, 22.0, 27.0), (22.0, 29.0, 36.0), (27.0, 36.0, 45.0)))
val z: DenseMatrix[Double] = b * (b + 1.0)
assert(z === DenseMatrix((164.0, 5.0, 107.0), (-5.0, 10.0, -27.0), (161.0, -7.0, 138.0)))
}
test("Multiply Int") {
val a = DenseMatrix((1, 2, 3), (4, 5, 6))
val b = DenseMatrix((7, -2, 8), (-3, -3, 1), (12, 0, 5))
val c = DenseVector(6, 2, 3)
assert(a * b === DenseMatrix((37, -8, 25), (85, -23, 67)))
assert(a * c === DenseVector(19, 52))
assert(b * c === DenseVector(62, -21, 87))
assert(b.t * c === DenseVector(72, -18, 65))
assert(a.t * DenseVector(4, 3) === DenseVector(16, 23, 30))
// should be dense
val x = a * a.t
assert(x === DenseMatrix((14, 32), (32, 77)))
// should be dense
val y = a.t * a
assert(y === DenseMatrix((17, 22, 27), (22, 29, 36), (27, 36, 45)))
val z: DenseMatrix[Int] = b * ((b + 1): DenseMatrix[Int])
assert(z === DenseMatrix((164, 5, 107), (-5, 10, -27), (161, -7, 138)))
}
test("Multiply Boolean") {
val a = DenseMatrix((true, true, true), (true, true, true))
val b = DenseMatrix((true, false, true), (true, false, true), (true, false, true))
assert(a * b === DenseMatrix((true, false, true), (true, false, true)))
}
test("Multiply Float") {
val a = DenseMatrix((1.0f, 2.0f, 3.0f), (4.0f, 5.0f, 6.0f))
val b = DenseMatrix((7.0f, -2.0f, 8.0f), (-3.0f, -3.0f, 1.0f), (12.0f, 0.0f, 5.0f))
val c = DenseVector(6.0f, 2.0f, 3.0f)
val cs = SparseVector(6.0f, 2.0f, 3.0f)
assert(a * b === DenseMatrix((37.0f, -8.0f, 25.0f), (85.0f, -23.0f, 67.0f)))
assert(a * c === DenseVector(19.0f, 52.0f))
assert(b * c === DenseVector(62.0f, -21.0f, 87.0f))
assert(a * cs === DenseVector(19.0f, 52.0f))
assert(b * cs === DenseVector(62.0f, -21.0f, 87.0f))
assert(b.t * c === DenseVector(72.0f, -18.0f, 65.0f))
assert(a.t * DenseVector(4.0f, 3.0f) === DenseVector(16.0f, 23.0f, 30.0f))
// should be dense
val x = a * a.t
assert(x === DenseMatrix((14.0f, 32.0f), (32.0f, 77.0f)))
// should be dense
val y = a.t * a
assert(y === DenseMatrix((17.0f, 22.0f, 27.0f), (22.0f, 29.0f, 36.0f), (27.0f, 36.0f, 45.0f)))
val z: DenseMatrix[Float] = b * (b + 1.0f)
assert(z === DenseMatrix((164.0f, 5.0f, 107.0f), (-5.0f, 10.0f, -27.0f), (161.0f, -7.0f, 138.0f)))
}
test("Multiply Complex") {
val a = DenseMatrix((Complex(1, 1), Complex(2, 2), Complex(3, 3)), (Complex(4, 4), Complex(5, 5), Complex(6, 6)))
val b = DenseMatrix(
(Complex(7, 7), Complex(-2, -2), Complex(8, 8)),
(Complex(-3, -3), Complex(-3, -3), Complex(1, 1)),
(Complex(12, 12), Complex(0, 0), Complex(5, 5)))
val c = DenseVector(Complex(6, 0), Complex(2, 0), Complex(3, 0))
val cs = SparseVector(Complex(6, 0), Complex(2, 0), Complex(3, 0))
val value: DenseMatrix[Complex] = a * b
assert(
value === DenseMatrix(
(Complex(0, 74), Complex(0, -16), Complex(0, 50)),
(Complex(0, 170), Complex(0, -46), Complex(0, 134))))
assert(b * c === DenseVector(Complex(62, 62), Complex(-21, -21), Complex(87, 87)))
assert(b * cs === DenseVector(Complex(62, 62), Complex(-21, -21), Complex(87, 87)))
assert(b.t * c === DenseVector(Complex(72, -72), Complex(-18, 18), Complex(65, -65)))
}
test("Multiply BigDecimal") {
val a = DenseMatrix((1, 2, 3), (4, 5, 6)).mapValues(BigDecimal(_))
val b = DenseMatrix((7, -2, 8), (-3, -3, 1), (12, 0, 5)).mapValues(BigDecimal(_))
val c = DenseVector(6, 2, 3).mapValues(BigDecimal(_))
assert(
a.*(b) === DenseMatrix((37, -8, 25), (85, -23, 67))
.mapValues(BigDecimal(_)))
assert(a * c === DenseVector(19, 52).mapValues(BigDecimal(_)))
assert(b * c === DenseVector(62, -21, 87).mapValues(BigDecimal(_)))
assert(b.t * c === DenseVector(72, -18, 65).mapValues(BigDecimal(_)))
assert(a.t * DenseVector(4, 3).mapValues(BigDecimal(_)) === DenseVector(16, 23, 30).mapValues(BigDecimal(_)))
// should be dense
val x = a * a.t
assert(x === DenseMatrix((14, 32), (32, 77)).mapValues(BigDecimal(_)))
// should be dense
val y = a.t * a
assert(y === DenseMatrix((17, 22, 27), (22, 29, 36), (27, 36, 45)).mapValues(BigDecimal(_)))
val z: DenseMatrix[BigDecimal] = b * ((b + BigDecimal(1)): DenseMatrix[BigDecimal])
assert(z === DenseMatrix((164, 5, 107), (-5, 10, -27), (161, -7, 138)).mapValues(BigDecimal(_)))
}
test("toDenseVector") {
val a = DenseMatrix((1, 2, 3), (4, 5, 6))
val b = a(0 to 1, 1 to 2)
val c = b.t
assert(a.toDenseVector === DenseVector(1, 4, 2, 5, 3, 6))
assert(b.toDenseVector === DenseVector(2, 5, 3, 6))
assert(c.toDenseVector === DenseVector(2, 3, 5, 6))
}
test("flattenView") {
val a = DenseMatrix((1, 2, 3), (4, 5, 6))
a.flatten(true)(2) = 4
assert(a === DenseMatrix((1, 4, 3), (4, 5, 6)))
}
test("Trace") {
assert(trace(DenseMatrix((1, 2), (4, 5))) === 1 + 5)
assert(trace(DenseMatrix((1, 2, 3), (3, 4, 5), (5, 6, 7))) == 1 + 4 + 7)
assert(trace(DenseMatrix((1, 2, 3), (4, 5, 6), (7, 8, 9))) === 1 + 5 + 9)
}
test("Reshape") {
val m: DenseMatrix[Int] = DenseMatrix((1, 2, 3), (4, 5, 6))
val r: DenseMatrix[Int] = m.reshape(3, 2, true)
assert(m.data eq r.data)
assert(r.rows === 3)
assert(r.cols === 2)
assert(r === DenseMatrix((1, 5), (4, 3), (2, 6)))
}
test("Solve") {
// square solve
val r1: DenseMatrix[Double] = DenseMatrix((1.0, 3.0), (2.0, 0.0)) \\ DenseMatrix((1.0, 2.0), (3.0, 4.0))
assert(r1 === DenseMatrix((1.5, 2.0), (-1.0 / 6, 0.0)))
// matrix-vector solve
val r2: DenseVector[Double] = DenseMatrix((1.0, 3.0, 4.0), (2.0, 0.0, 6.0)) \\ DenseVector(1.0, 3.0)
assert(norm(r2 - DenseVector(0.1813186813186811, -0.3131868131868131, 0.43956043956043944), inf) < 1E-5)
// wide matrix solve
val r3: DenseMatrix[Double] = DenseMatrix((1.0, 3.0, 4.0), (2.0, 0.0, 6.0)) \\ DenseMatrix((1.0, 2.0), (3.0, 4.0))
matricesNearlyEqual(
r3,
DenseMatrix(
(0.1813186813186811, 0.2197802197802196),
(-0.3131868131868131, -0.1978021978021977),
(0.43956043956043944, 0.5934065934065933)))
// tall matrix solve
val r4: DenseMatrix[Double] = DenseMatrix((1.0, 3.0), (2.0, 0.0), (4.0, 6.0)) \\ DenseMatrix(
(1.0, 4.0),
(2.0, 5.0),
(3.0, 6.0))
assert(max(abs(
r4 - DenseMatrix((0.9166666666666667, 1.9166666666666672), (-0.08333333333333352, -0.08333333333333436)))) < 1E-5)
}
test("Solve Float") {
// square solve
val r1: DenseMatrix[Float] = DenseMatrix((1.0f, 3.0f), (2.0f, 0.0f)) \\ DenseMatrix((1.0f, 2.0f), (3.0f, 4.0f))
assert(r1 === DenseMatrix((1.5f, 2.0f), (-1.0f / 6, 0.0f)))
// matrix-vector solve
val r2: DenseVector[Float] = DenseMatrix((1.0f, 3.0f, 4.0f), (2.0f, 0.0f, 6.0f)) \\ DenseVector(1.0f, 3.0f)
assert(norm(r2 - DenseVector(0.1813186813186811f, -0.3131868131868131f, 0.43956043956043944f)) < 1E-5)
// wide matrix solve
val r3: DenseMatrix[Float] = DenseMatrix((1.0f, 3.0f, 4.0f), (2.0f, 0.0f, 6.0f)) \\ DenseMatrix(
(1.0f, 2.0f),
(3.0f, 4.0f))
assert(
max(
abs(
r3 - DenseMatrix(
(0.1813186813186811f, 0.2197802197802196f),
(-0.3131868131868131f, -0.1978021978021977f),
(0.43956043956043944f, 0.5934065934065933f)))) < 1E-5)
// tall matrix solve
val r4: DenseMatrix[Float] = DenseMatrix((1.0f, 3.0f), (2.0f, 0.0f), (4.0f, 6.0f)) \\ DenseMatrix(
(1.0f, 4.0f),
(2.0f, 5.0f),
(3.0f, 6.0f))
assert(
max(
abs(r4 - DenseMatrix(
(0.9166666666666667f, 1.9166666666666672f),
(-0.08333333333333352f, -0.08333333333333436f)))) < 1E-5)
}
test("GH#29 transpose solve is broken") {
val A = DenseMatrix((1.0, 0.0), (1.0, -1.0))
val t = DenseVector(1.0, 0.0)
assert(A \\ t === DenseVector(1.0, 1.0))
assert(A.t \\ t === DenseVector(1.0, 0.0))
}
test("sum") {
// Test square and rectangular matrices
val A = DenseMatrix((1.0, 3.0), (2.0, 4.0))
assert(sum(A, Axis._0) === DenseVector(3.0, 7.0).t)
assert(sum(A(::, *)) === DenseVector(3.0, 7.0).t)
assert(sum(DenseMatrix((1.0, 3.0, 5.0), (2.0, 4.0, 6.0)), Axis._0) === DenseVector(3.0, 7.0, 11.0).t)
assert(sum(DenseMatrix((1.0, 3.0), (2.0, 4.0), (5.0, 6.0)), Axis._0) === DenseVector(8.0, 13.0).t)
assert(sum(A, Axis._1) === DenseVector(4.0, 6.0))
assert(sum(DenseMatrix((1.0, 3.0, 5.0), (2.0, 4.0, 6.0)), Axis._1) === DenseVector(9.0, 12.0))
assert(sum(DenseMatrix((1.0, 3.0), (2.0, 4.0), (5.0, 6.0)), Axis._1) === DenseVector(4.0, 6.0, 11.0))
assert(sum(A) === 10.0)
}
test("normalize rows and columns") {
val A = DenseMatrix((1.0, 3.0), (2.0, 4.0))
assert(normalize(A, Axis._0, 1) === DenseMatrix((1.0 / 3.0, 3.0 / 7.0), (2.0 / 3.0, 4.0 / 7.0)))
assert(normalize(A, Axis._1, 1) === DenseMatrix((1.0 / 4.0, 3.0 / 4.0), (2.0 / 6.0, 4.0 / 6.0)))
// handle odd sized matrices (test for a bug.)
val dm = DenseMatrix.tabulate(2, 5)((i, j) => i * j * 1.0 + 1)
dm := normalize(dm, Axis._1, 2)
assert(abs(sum(dm(0, ::).t.map(x => x * x)) - 1.0) < 1E-4, dm.toString + " not normalized!")
}
test("Generic Dense ops") {
// mostly for coverage
val a = DenseMatrix.create[String](1, 1, Array("SSS"))
intercept[IndexOutOfBoundsException] {
a(3, 3) = ":("
assert(false, "Shouldn't be here!")
}
assert(a(0, 0) === "SSS")
intercept[IndexOutOfBoundsException] {
a(3, 3)
assert(false, "Shouldn't be here!")
}
a(0, 0) = ":("
assert(a(0, 0) === ":(")
a := ":)"
assert(a(0, 0) === ":)")
val b = DenseMatrix.zeros[String](1, 1)
b := a
assert(b === a)
}
test("toString with no rows doesn't throw") {
DenseMatrix.zeros[Double](0, 2).toString
}
test("GH #30: Shaped solve of transposed and slice matrix does not work") {
val A = DenseMatrix((1.0, 0.0), (1.0, -1.0))
val i = DenseMatrix.eye[Double](2)
val res = i \\ A.t(::, 1)
assert(res === DenseVector(1.0, -1.0))
val res2 = i \\ A(1, ::).t
assert(res2 === DenseVector(1.0, -1.0))
}
test("GH #148: out of bounds slice throws") {
val temp2 = DenseMatrix.tabulate(5, 5)((x: Int, y: Int) => x + y * 10)
intercept[IndexOutOfBoundsException] {
temp2(Range(4, 6), 3)
}
}
test("softmax on dm slices") {
val a = DenseMatrix((1.0, 2.0, 3.0))
assert(softmax(a(::, 1)) === 2.0)
}
test("Delete") {
val a = DenseMatrix((1, 2, 3), (4, 5, 6), (7, 8, 9))
assert(a.delete(0, Axis._0) === DenseMatrix((4, 5, 6), (7, 8, 9)))
assert(a.delete(1, Axis._0) === DenseMatrix((1, 2, 3), (7, 8, 9)))
assert(a.delete(2, Axis._0) === DenseMatrix((1, 2, 3), (4, 5, 6)))
assert(a.delete(0, Axis._1) === DenseMatrix((2, 3), (5, 6), (8, 9)))
assert(a.delete(1, Axis._1) === DenseMatrix((1, 3), (4, 6), (7, 9)))
assert(a.delete(2, Axis._1) === DenseMatrix((1, 2), (4, 5), (7, 8)))
assert(a.delete(Seq(0, 2), Axis._1) === DenseMatrix(2, 5, 8))
assert(a.delete(Seq(1, 2), Axis._1) === DenseMatrix(1, 4, 7))
assert(a.delete(Seq(0, 2), Axis._0) === DenseMatrix((4, 5, 6)))
assert(a.delete(Seq(1, 2), Axis._0) === DenseMatrix((1, 2, 3)))
}
test("Big Int zeros are the right thing") {
val dm = DenseMatrix.zeros[BigInt](1, 1)
assert(dm(0, 0) === BigInt(0))
}
test("BigInt multiply") {
val m = DenseMatrix((BigInt(1), BigInt(1)), (BigInt(1), BigInt(0)))
val m2 = DenseMatrix((1, 1), (1, 0))
assert(m * m === convert(m2 * m2, Int))
}
test("comparisons") {
val one = DenseMatrix.ones[Double](5, 6)
val zero = DenseMatrix.zeros[Double](5, 6)
assert((one >:> zero) === DenseMatrix.ones[Boolean](5, 6))
}
// test("Some ill-typedness") {
// import shapeless.test.illTyped
// illTyped {
// """
// val one = DenseMatrix.ones[Double](5, 6)
// val z = DenseVector.zeros[Double](5)
// (z + one)
// """
// }
// }
test("ensure we don't crash on weird strides") {
val dm = DenseMatrix.zeros[Double](3, 3)
assert((dm(::, 0 until 0) * dm(0 until 0, ::)) === dm)
assert((dm(0 until 0, ::) * dm(::, 0 until 0)) === DenseMatrix.zeros[Double](0, 0))
// assert( (dm(::, 2 until 0 by -1) * dm(2 until 0 by -1, ::)) === dm)
}
test("Ensure a += a.t gives the right result") {
val dm = DenseMatrix.rand[Double](3, 3)
val dmdmt = dm + dm.t
dm += dm.t
assert(dm === dmdmt)
}
test("#221") {
val data = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val mat = new DenseMatrix(rows = 10, data, offset = 0).t
val area = mat(3 until 6, 2 until 7)
assert(area === DenseMatrix((3, 4, 5, 6, 7), (3, 4, 5, 6, 7), (3, 4, 5, 6, 7)))
assert(area.t === DenseMatrix((3, 4, 5, 6, 7), (3, 4, 5, 6, 7), (3, 4, 5, 6, 7)).t)
val sl2t = area.t(0 until area.cols, 1 until area.rows)
assert(
sl2t.offset === area.offset + area.majorStride,
sl2t.data(area.offset + area.majorStride) + " " + area.offset)
assert(sl2t.t === DenseMatrix((3, 4, 5, 6, 7), (3, 4, 5, 6, 7)))
val sl2 = area(1 until area.rows, 0 until area.cols)
assert(sl2 === DenseMatrix((3, 4, 5, 6, 7), (3, 4, 5, 6, 7)))
}
test("DenseMatrix construction with list of lists") {
val dm = DenseMatrix(
List(List(1, 2, 3, 0, 0, 0, 0, 0, 0), List(0, 0, 0, 1, 2, 3, 0, 0, 0), List(0, 0, 0, 0, 0, 0, 1, 2, 3)): _*)
}
test("#265: slices of :: and IndexedSeq") {
val dm = DenseMatrix((0, 1, 2), (3, 4, 5))
assert(dm(::, IndexedSeq(2, 1, 0)).toDenseMatrix === fliplr(dm))
assert(dm(IndexedSeq(1, 0), ::).toDenseMatrix === flipud(dm))
}
test("#278: don't crash on solve when majorStride == 0") {
val d = DenseVector[Double]()
val m = DenseMatrix.tabulate(0, 0) { case x => 0.0 }
assert(m \\ d === d)
}
test("#283: slice of dm by dm boolean") {
val dm = DenseMatrix((0, 1, 2), (3, 4, 5))
dm(dm >:= 2) := 3
assert(dm === DenseMatrix((0, 1, 3), (3, 3, 3)))
}
test("#286: argsort diverging implicit") {
val dm = DenseMatrix((0.1f), (0.0f))
assert(argsort(dm) === IndexedSeq((1, 0), (0, 0)))
}
test("#289: sigmoid dm slice") {
val m = DenseMatrix.zeros[Double](10, 10)
assert(sigmoid(m(::, 0 to 5)) === DenseMatrix.fill(10, 6)(0.5))
assert(sigmoid(m(::, 3 to 5)) === DenseMatrix.fill(10, 3)(0.5))
}
test("#336 argmax for Dense Matrices") {
val m = DenseMatrix.zeros[Double](3, 3)
m(2, ::) := DenseVector(1.0, 2.0, 3.0).t
assert(argmax(m(2, ::).t) === 2)
assert(max(m(2, ::).t) === 3.0)
}
test("lhs scalars") {
assert(1.0 /:/ (DenseMatrix.fill(2, 2)(10.0)) === DenseMatrix.fill(2, 2)(1 / 10.0))
assert(1.0 -:- (DenseMatrix.fill(2, 2)(10.0)) === DenseMatrix.fill(2, 2)(-9.0))
}
test("mapping ufunc") {
val r = DenseMatrix.rand(100, 100)
val explicit = new DenseMatrix(100, 100, r.data.map(math.sin))
assert(sin(r) == explicit)
sin.inPlace(r)
assert(explicit == r)
}
test("mapping ufunc, strides") {
val r = (DenseMatrix.rand(100, 100)).apply(10 until 27, 4 until 37 by 4)
var explicit = new DenseMatrix(100, 100, r.data.map(math.sin))
explicit = explicit(10 until 27, 4 until 37 by 4)
assert(sin(r) == explicit)
sin.inPlace(r)
assert(explicit == r)
}
test("#449") {
val m = DenseMatrix.rand(10, 10)
m(List(1, 2, 3), 0 to 0) := 5d //WORKS FINE
m(List(1, 2, 3), 0) := 5d //NOT WORKING
m(1 to 3, 0) := 5d //WORKING
m(List(1, 2, 3), 0 to 0) := m(List(1, 2, 3), 0 to 0) //WORKS FINE
m(List(1, 2, 3), 0) := m(List(1, 2, 3), 0) //NOT WORKING
m(1 to 3, 0) := m(1 to 3, 0) //WORKS FINE
}
test("#476: DM * DV when rows == 0") {
val m = DenseMatrix.zeros[Double](0, 10)
val v = DenseVector.zeros[Double](10)
assert(m * v == DenseVector.zeros[Double](0))
val m2 = DenseMatrix.zeros[Double](10, 0)
val v2 = DenseVector.zeros[Double](0)
assert(m2 * v2 == DenseVector.zeros[Double](10))
}
test("#534: DenseMatrix construction from empty row sequence") {
val rows = Seq.empty[Seq[Double]]
val matrix = DenseMatrix(rows: _*)
assert(matrix.rows == 0)
assert(matrix.cols == 0)
}
test("#577: Empty DenseMatrix can be transposed") {
val m = new DenseMatrix(0, 0, Array.empty[Double])
val mt = m.t
assert(mt.rows == 0)
assert(mt.cols == 0)
assert(m === mt)
}
test("#592: can take an empty column or row slice") {
val m = DenseMatrix.fill(5, 5)(0)
m(4 until 4, 0 until 5)
m(0 until 5, 4 until 4)
}
test("#559: reshape of transpose matrix") {
val a = DenseMatrix((1, 4, 7), (2, 5, 8), (3, 6, 9)) //Matrix A
val b = DenseMatrix((1, 2, 3), (4, 5, 6), (7, 8, 9)) // Matrix B
assert(a != b)
assert(a == b.t)
assert(a.reshape(9, 1) == b.t.reshape(9, 1))
}
test("#620 solving transposed matrices, tall.t") {
val W: DenseMatrix[Double] = DenseMatrix((1.0, 3.0), (2.0, 0.0), (4.0, 6.0)).t
val y = DenseVector(1.0, 2.0)
val target = DenseVector(0.166667, -0.083333, 0.250000)
solveCompare(W, y, target)
}
test("#620 solving transposed matrices, wide.t") {
val W: DenseMatrix[Double] = DenseMatrix((1.0, 2.0, 4.0), (3.0, 0.0, 6.0)).t
val y = DenseVector(1.0, 2.0, 3.0)
val target = DenseVector(0.916667, -0.083333)
solveCompare(W, y, target)
}
private def solveCompare(W: DenseMatrix[Double], y: DenseVector[Double], target: DenseVector[Double]) = {
val WcopyY = W.copy \\ y.copy
val Wy = W \\ y.copy
assert(norm(Wy - WcopyY) < 1e-3)
assert(norm(Wy - target) < 1e-3)
}
test("#682 bug in slice outer product") {
val c = DenseVector(3, 1)
val a = DenseMatrix((3, 1), (-1, -2))
val rr = c * c.t
val yy = a(0, ::).t * a(0, ::)
assert(rr == yy)
}
// TODO: we should profile just copying to Double if we have BLAS (and even if we don't...)
// this is hilariously slow somehow.
test("large matrix multiply, int") {
val rI = DenseMatrix.rand[Int](2002, 2002, Rand.randInt(-3, 3))
val rD = convert(rI, Double)
assert((rI * rI).mapValues(_.toDouble) === (rD * rD))
}
test("#772 - weird copy bug") {
val rows = 3
val cols = 4
val data = Array[Double](1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
val dm = new DenseMatrix(rows, cols, data, 0, cols, isTranspose = true)
val sm = dm(2 until 3, 0 until 2)
assert(sm == sm.copy)
}
}
trait MatrixTestUtils {
def matricesNearlyEqual(A: Matrix[Double], B: Matrix[Double], threshold: Double = 1E-6): Unit = {
for (i <- 0 until A.rows; j <- 0 until A.cols)
A(i, j) should be(B(i, j) +- threshold)
}
}
| scalanlp/breeze | math/src/test/scala/breeze/linalg/DenseMatrixTest.scala | Scala | apache-2.0 | 27,860 |
package spark.metrics.sink
import com.codahale.metrics.{ConsoleReporter, MetricRegistry}
import java.util.Properties
import java.util.concurrent.TimeUnit
import spark.metrics.MetricsSystem
class ConsoleSink(val property: Properties, val registry: MetricRegistry) extends Sink {
val CONSOLE_DEFAULT_PERIOD = 10
val CONSOLE_DEFAULT_UNIT = "SECONDS"
val CONSOLE_KEY_PERIOD = "period"
val CONSOLE_KEY_UNIT = "unit"
val pollPeriod = Option(property.getProperty(CONSOLE_KEY_PERIOD)) match {
case Some(s) => s.toInt
case None => CONSOLE_DEFAULT_PERIOD
}
val pollUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
case Some(s) => TimeUnit.valueOf(s.toUpperCase())
case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
}
MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)
val reporter: ConsoleReporter = ConsoleReporter.forRegistry(registry)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build()
override def start() {
reporter.start(pollPeriod, pollUnit)
}
override def stop() {
reporter.stop()
}
}
| vax11780/spark | core/src/main/scala/spark/metrics/sink/ConsoleSink.scala | Scala | apache-2.0 | 1,125 |
/*
* Copyright (C) 2014 - 2020 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.dfasdl.utils
object ElementNames {
val BINARY = "bin"
val BINARY_64 = "bin64"
val BINARY_HEX = "binHex"
val CHOICE = "choice"
val CONSTANT = "const"
val CUSTOM_ID = "cid"
val DATE = "date"
val DATETIME = "datetime"
val CHOICE_ELEMENT = "celem"
val ELEMENT = "elem"
val FIXED_SEQUENCE = "fixseq"
val FORMATTED_NUMBER = "formatnum"
val FORMATTED_STRING = "formatstr"
val FORMATTED_TIME = "formattime"
val NUMBER = "num"
val REFERENCE = "ref"
val ROOT = "dfasdl"
val SCALA_EXPRESSION = "sxp"
val SEQUENCE = "seq"
val STRING = "str"
val TIME = "time"
}
| DFASDL/dfasdl-utils | src/main/scala/org/dfasdl/utils/ElementNames.scala | Scala | agpl-3.0 | 1,377 |
package com.scalableminds.webknossos.tracingstore.tracings.skeleton.updating
import play.api.libs.json.{Json, OFormat}
case class UpdateActionTreeGroup(name: String, groupId: Int, children: List[UpdateActionTreeGroup])
object UpdateActionTreeGroup {
implicit val jsonFormat: OFormat[UpdateActionTreeGroup] = Json.format[UpdateActionTreeGroup]
}
| scalableminds/webknossos | webknossos-tracingstore/app/com/scalableminds/webknossos/tracingstore/tracings/skeleton/updating/UpdateActionTreeGroup.scala | Scala | agpl-3.0 | 350 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.receiver
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.scalatest.BeforeAndAfter
import org.scalatest.Matchers._
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.SpanSugar._
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite}
import org.apache.spark.storage.StreamBlockId
import org.apache.spark.util.ManualClock
class BlockGeneratorSuite extends SparkFunSuite with BeforeAndAfter {
private val blockIntervalMs = 10
private val conf = new SparkConf().set("spark.streaming.blockInterval", s"${blockIntervalMs}ms")
@volatile private var blockGenerator: BlockGenerator = null
after {
if (blockGenerator != null) {
blockGenerator.stop()
}
}
test("block generation and data callbacks") {
val listener = new TestBlockGeneratorListener
val clock = new ManualClock()
require(blockIntervalMs > 5)
require(listener.onAddDataCalled === false)
require(listener.onGenerateBlockCalled === false)
require(listener.onPushBlockCalled === false)
// Verify that creating the generator does not start it
blockGenerator = new BlockGenerator(listener, 0, conf, clock)
assert(blockGenerator.isActive() === false, "block generator active before start()")
assert(blockGenerator.isStopped() === false, "block generator stopped before start()")
assert(listener.onAddDataCalled === false)
assert(listener.onGenerateBlockCalled === false)
assert(listener.onPushBlockCalled === false)
// Verify start marks the generator active, but does not call the callbacks
blockGenerator.start()
assert(blockGenerator.isActive() === true, "block generator active after start()")
assert(blockGenerator.isStopped() === false, "block generator stopped after start()")
withClue("callbacks called before adding data") {
assert(listener.onAddDataCalled === false)
assert(listener.onGenerateBlockCalled === false)
assert(listener.onPushBlockCalled === false)
}
// Verify whether addData() adds data that is present in generated blocks
val data1 = 1 to 10
data1.foreach { blockGenerator.addData _ }
withClue("callbacks called on adding data without metadata and without block generation") {
assert(listener.onAddDataCalled === false) // should be called only with addDataWithCallback()
assert(listener.onGenerateBlockCalled === false)
assert(listener.onPushBlockCalled === false)
}
clock.advance(blockIntervalMs) // advance clock to generate blocks
withClue("blocks not generated or pushed") {
eventually(timeout(1 second)) {
assert(listener.onGenerateBlockCalled === true)
assert(listener.onPushBlockCalled === true)
}
}
listener.pushedData.asScala.toSeq should contain theSameElementsInOrderAs (data1)
assert(listener.onAddDataCalled === false) // should be called only with addDataWithCallback()
// Verify addDataWithCallback() add data+metadata and callbacks are called correctly
val data2 = 11 to 20
val metadata2 = data2.map { _.toString }
data2.zip(metadata2).foreach { case (d, m) => blockGenerator.addDataWithCallback(d, m) }
assert(listener.onAddDataCalled === true)
listener.addedData.asScala.toSeq should contain theSameElementsInOrderAs (data2)
listener.addedMetadata.asScala.toSeq should contain theSameElementsInOrderAs (metadata2)
clock.advance(blockIntervalMs) // advance clock to generate blocks
eventually(timeout(1 second)) {
val combined = data1 ++ data2
listener.pushedData.asScala.toSeq should contain theSameElementsInOrderAs combined
}
// Verify addMultipleDataWithCallback() add data+metadata and callbacks are called correctly
val data3 = 21 to 30
val metadata3 = "metadata"
blockGenerator.addMultipleDataWithCallback(data3.iterator, metadata3)
val combinedMetadata = metadata2 :+ metadata3
listener.addedMetadata.asScala.toSeq should contain theSameElementsInOrderAs (combinedMetadata)
clock.advance(blockIntervalMs) // advance clock to generate blocks
eventually(timeout(1 second)) {
val combinedData = data1 ++ data2 ++ data3
listener.pushedData.asScala.toSeq should contain theSameElementsInOrderAs (combinedData)
}
// Stop the block generator by starting the stop on a different thread and
// then advancing the manual clock for the stopping to proceed.
val thread = stopBlockGenerator(blockGenerator)
eventually(timeout(1 second), interval(10 milliseconds)) {
clock.advance(blockIntervalMs)
assert(blockGenerator.isStopped() === true)
}
thread.join()
// Verify that the generator cannot be used any more
intercept[SparkException] {
blockGenerator.addData(1)
}
intercept[SparkException] {
blockGenerator.addDataWithCallback(1, 1)
}
intercept[SparkException] {
blockGenerator.addMultipleDataWithCallback(Iterator(1), 1)
}
intercept[SparkException] {
blockGenerator.start()
}
blockGenerator.stop() // Calling stop again should be fine
}
test("stop ensures correct shutdown") {
val listener = new TestBlockGeneratorListener
val clock = new ManualClock()
blockGenerator = new BlockGenerator(listener, 0, conf, clock)
require(listener.onGenerateBlockCalled === false)
blockGenerator.start()
assert(blockGenerator.isActive() === true, "block generator")
assert(blockGenerator.isStopped() === false)
val data = 1 to 1000
data.foreach { blockGenerator.addData _ }
// Verify that stop() shutdowns everything in the right order
// - First, stop receiving new data
// - Second, wait for final block with all buffered data to be generated
// - Finally, wait for all blocks to be pushed
clock.advance(1) // to make sure that the timer for another interval to complete
val thread = stopBlockGenerator(blockGenerator)
eventually(timeout(1 second), interval(10 milliseconds)) {
assert(blockGenerator.isActive() === false)
}
assert(blockGenerator.isStopped() === false)
// Verify that data cannot be added
intercept[SparkException] {
blockGenerator.addData(1)
}
intercept[SparkException] {
blockGenerator.addDataWithCallback(1, null)
}
intercept[SparkException] {
blockGenerator.addMultipleDataWithCallback(Iterator(1), null)
}
// Verify that stop() stays blocked until another block containing all the data is generated
// This intercept always succeeds, as the body either will either throw a timeout exception
// (expected as stop() should never complete) or a SparkException (unexpected as stop()
// completed and thread terminated).
val exception = intercept[Exception] {
failAfter(200 milliseconds) {
thread.join()
throw new SparkException(
"BlockGenerator.stop() completed before generating timer was stopped")
}
}
exception should not be a [SparkException]
// Verify that the final data is present in the final generated block and
// pushed before complete stop
assert(blockGenerator.isStopped() === false) // generator has not stopped yet
eventually(timeout(10 seconds), interval(10 milliseconds)) {
// Keep calling `advance` to avoid blocking forever in `clock.waitTillTime`
clock.advance(blockIntervalMs)
assert(thread.isAlive === false)
}
assert(blockGenerator.isStopped() === true) // generator has finally been completely stopped
assert(listener.pushedData.asScala.toSeq === data, "All data not pushed by stop()")
}
test("block push errors are reported") {
val listener = new TestBlockGeneratorListener {
override def onPushBlock(
blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[_]): Unit = {
throw new SparkException("test")
}
}
blockGenerator = new BlockGenerator(listener, 0, conf)
blockGenerator.start()
assert(listener.onErrorCalled === false)
blockGenerator.addData(1)
eventually(timeout(1 second), interval(10 milliseconds)) {
assert(listener.onErrorCalled === true)
}
blockGenerator.stop()
}
/**
* Helper method to stop the block generator with manual clock in a different thread,
* so that the main thread can advance the clock that allows the stopping to proceed.
*/
private def stopBlockGenerator(blockGenerator: BlockGenerator): Thread = {
val thread = new Thread() {
override def run(): Unit = {
blockGenerator.stop()
}
}
thread.start()
thread
}
/** A listener for BlockGenerator that records the data in the callbacks */
private class TestBlockGeneratorListener extends BlockGeneratorListener {
val pushedData = new ConcurrentLinkedQueue[Any]
val addedData = new ConcurrentLinkedQueue[Any]
val addedMetadata = new ConcurrentLinkedQueue[Any]
@volatile var onGenerateBlockCalled = false
@volatile var onAddDataCalled = false
@volatile var onPushBlockCalled = false
@volatile var onErrorCalled = false
override def onPushBlock(blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[_]): Unit = {
pushedData.addAll(arrayBuffer.asJava)
onPushBlockCalled = true
}
override def onError(message: String, throwable: Throwable): Unit = {
onErrorCalled = true
}
override def onGenerateBlock(blockId: StreamBlockId): Unit = {
onGenerateBlockCalled = true
}
override def onAddData(data: Any, metadata: Any): Unit = {
addedData.add(data)
addedMetadata.add(metadata)
onAddDataCalled = true
}
}
}
| aokolnychyi/spark | streaming/src/test/scala/org/apache/spark/streaming/receiver/BlockGeneratorSuite.scala | Scala | apache-2.0 | 10,604 |
package org.jetbrains.plugins.scala.project.gradle
import java.util
import java.util.Collections
import com.intellij.openapi.externalSystem.model.DataNode
import com.intellij.openapi.externalSystem.model.project.ModuleData
import com.intellij.openapi.externalSystem.util.{ExternalSystemConstants, Order}
import org.gradle.tooling.model.idea.IdeaModule
import org.jetbrains.plugins.gradle.model.data.{ScalaCompileOptionsData, ScalaModelData}
import org.jetbrains.plugins.gradle.model.scala.{ScalaCompileOptions, ScalaForkOptions, ScalaModel}
import org.jetbrains.plugins.gradle.service.project.AbstractProjectResolverExtension
import org.jetbrains.plugins.gradle.util.GradleConstants
import org.jetbrains.plugins.scala.project.gradle.ScalaGradleProjectResolverExtension._
import scala.annotation.nowarn
@Order(ExternalSystemConstants.UNORDERED)
class ScalaGradleProjectResolverExtension extends AbstractProjectResolverExtension {
override def populateModuleExtraModels(gradleModule: IdeaModule, ideModule: DataNode[ModuleData]): Unit = {
Option(resolverCtx.getExtraProject(gradleModule, classOf[ScalaModel])).foreach { scalaModel =>
ideModule.createChild(ScalaModelData.KEY, dataOf(scalaModel))
}
nextResolver.populateModuleExtraModels(gradleModule, ideModule)
}
override def getExtraProjectModelClasses: util.Set[Class[_]] = Collections.singleton(classOf[ScalaModel])
}
@Order(ExternalSystemConstants.UNORDERED)
private object ScalaGradleProjectResolverExtension {
private def dataOf(model: ScalaModel): ScalaModelData = {
val data = new ScalaModelData(GradleConstants.SYSTEM_ID);
data.setZincClasspath(model.getZincClasspath)
data.setScalaClasspath(model.getScalaClasspath)
data.setScalaCompileOptions(Option(model.getScalaCompileOptions).map(dataOf).orNull)
data.setSourceCompatibility(model.getSourceCompatibility)
data.setTargetCompatibility(model.getTargetCompatibility)
data
}
private[this] def dataOf(options: ScalaCompileOptions): ScalaCompileOptionsData = {
val data = new ScalaCompileOptionsData;
data.setAdditionalParameters(options.getAdditionalParameters)
data.setDaemonServer(options.getDaemonServer: @nowarn("cat=deprecation"))
data.setDebugLevel(options.getDebugLevel)
data.setDeprecation(options.isDeprecation)
data.setEncoding(options.getEncoding)
data.setFailOnError(options.isFailOnError)
data.setForce(options.getForce)
data.setFork(options.isFork: @nowarn("cat=deprecation"))
data.setListFiles(options.isListFiles)
data.setLoggingLevel(options.getLoggingLevel)
data.setDebugLevel(options.getDebugLevel)
data.setLoggingPhases(options.getLoggingPhases)
data.setOptimize(options.isOptimize)
data.setUnchecked(options.isUnchecked)
data.setUseAnt(options.isUseAnt: @nowarn("cat=deprecation"))
data.setUseCompileDaemon(options.isUseCompileDaemon: @nowarn("cat=deprecation"))
data.setForkOptions(Option(options.getForkOptions).map(dataOf).orNull)
data
}
private[this] def dataOf(options: ScalaForkOptions): ScalaCompileOptionsData.ScalaForkOptions = {
val data = new ScalaCompileOptionsData.ScalaForkOptions;
data.setJvmArgs(options.getJvmArgs)
data.setMemoryInitialSize(options.getMemoryInitialSize)
data.setMemoryMaximumSize(options.getMemoryMaximumSize)
data
}
} | JetBrains/intellij-scala | scala/integration/gradle/src/org/jetbrains/plugins/scala/project/gradle/ScalaGradleProjectResolverExtension.scala | Scala | apache-2.0 | 3,348 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree.impl
import java.io.IOException
import scala.collection.mutable
import scala.util.Random
import org.apache.spark.internal.Logging
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.impl.Utils
import org.apache.spark.ml.regression.DecisionTreeRegressionModel
import org.apache.spark.ml.tree._
import org.apache.spark.ml.util.Instrumentation
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy}
import org.apache.spark.mllib.tree.impurity.ImpurityCalculator
import org.apache.spark.mllib.tree.model.ImpurityStats
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom}
/**
* ALGORITHM
*
* This is a sketch of the algorithm to help new developers.
*
* The algorithm partitions data by instances (rows).
* On each iteration, the algorithm splits a set of nodes. In order to choose the best split
* for a given node, sufficient statistics are collected from the distributed data.
* For each node, the statistics are collected to some worker node, and that worker selects
* the best split.
*
* This setup requires discretization of continuous features. This binning is done in the
* findSplits() method during initialization, after which each continuous feature becomes
* an ordered discretized feature with at most maxBins possible values.
*
* The main loop in the algorithm operates on a queue of nodes (nodeStack). These nodes
* lie at the periphery of the tree being trained. If multiple trees are being trained at once,
* then this queue contains nodes from all of them. Each iteration works roughly as follows:
* On the master node:
* - Some number of nodes are pulled off of the queue (based on the amount of memory
* required for their sufficient statistics).
* - For random forests, if featureSubsetStrategy is not "all," then a subset of candidate
* features are chosen for each node. See method selectNodesToSplit().
* On worker nodes, via method findBestSplits():
* - The worker makes one pass over its subset of instances.
* - For each (tree, node, feature, split) tuple, the worker collects statistics about
* splitting. Note that the set of (tree, node) pairs is limited to the nodes selected
* from the queue for this iteration. The set of features considered can also be limited
* based on featureSubsetStrategy.
* - For each node, the statistics for that node are aggregated to a particular worker
* via reduceByKey(). The designated worker chooses the best (feature, split) pair,
* or chooses to stop splitting if the stopping criteria are met.
* On the master node:
* - The master collects all decisions about splitting nodes and updates the model.
* - The updated model is passed to the workers on the next iteration.
* This process continues until the node queue is empty.
*
* Most of the methods in this implementation support the statistics aggregation, which is
* the heaviest part of the computation. In general, this implementation is bound by either
* the cost of statistics computation on workers or by communicating the sufficient statistics.
*/
private[spark] object RandomForest extends Logging with Serializable {
/**
* Train a random forest.
*
* @param input Training data: RDD of `LabeledPoint`
* @return an unweighted set of trees
*/
def run(
input: RDD[LabeledPoint],
strategy: OldStrategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Long): Array[DecisionTreeModel] = {
val instances = input.map { case LabeledPoint(label, features) =>
Instance(label, 1.0, features.asML)
}
run(instances, strategy, numTrees, featureSubsetStrategy, seed, None)
}
/**
* Train a random forest.
*
* @param input Training data: RDD of `Instance`
* @return an unweighted set of trees
*/
def run(
input: RDD[Instance],
strategy: OldStrategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Long,
instr: Option[Instrumentation],
prune: Boolean = true, // exposed for testing only, real trees are always pruned
parentUID: Option[String] = None): Array[DecisionTreeModel] = {
val timer = new TimeTracker()
timer.start("total")
timer.start("init")
val retaggedInput = input.retag(classOf[Instance])
val metadata =
DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy)
instr match {
case Some(instrumentation) =>
instrumentation.logNumFeatures(metadata.numFeatures)
instrumentation.logNumClasses(metadata.numClasses)
instrumentation.logNumExamples(metadata.numExamples)
case None =>
logInfo("numFeatures: " + metadata.numFeatures)
logInfo("numClasses: " + metadata.numClasses)
logInfo("numExamples: " + metadata.numExamples)
}
// Find the splits and the corresponding bins (interval between the splits) using a sample
// of the input data.
timer.start("findSplits")
val splits = findSplits(retaggedInput, metadata, seed)
timer.stop("findSplits")
logDebug("numBins: feature: number of bins")
logDebug(Range(0, metadata.numFeatures).map { featureIndex =>
s"\\t$featureIndex\\t${metadata.numBins(featureIndex)}"
}.mkString("\\n"))
// Bin feature values (TreePoint representation).
// Cache input RDD for speedup during multiple passes.
val treeInput = TreePoint.convertToTreeRDD(retaggedInput, splits, metadata)
val withReplacement = numTrees > 1
val baggedInput = BaggedPoint
.convertToBaggedRDD(treeInput, strategy.subsamplingRate, numTrees, withReplacement,
(tp: TreePoint) => tp.weight, seed = seed)
.persist(StorageLevel.MEMORY_AND_DISK)
// depth of the decision tree
val maxDepth = strategy.maxDepth
require(maxDepth <= 30,
s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.")
// Max memory usage for aggregates
// TODO: Calculate memory usage more precisely.
val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L
logDebug("max memory usage for aggregates = " + maxMemoryUsage + " bytes.")
/*
* The main idea here is to perform group-wise training of the decision tree nodes thus
* reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup).
* Each data sample is handled by a particular node (or it reaches a leaf and is not used
* in lower levels).
*/
// Create an RDD of node Id cache.
// At first, all the rows belong to the root nodes (node Id == 1).
val nodeIdCache = if (strategy.useNodeIdCache) {
Some(NodeIdCache.init(
data = baggedInput,
numTrees = numTrees,
checkpointInterval = strategy.checkpointInterval,
initVal = 1))
} else {
None
}
/*
Stack of nodes to train: (treeIndex, node)
The reason this is a stack is that we train many trees at once, but we want to focus on
completing trees, rather than training all simultaneously. If we are splitting nodes from
1 tree, then the new nodes to split will be put at the top of this stack, so we will continue
training the same tree in the next iteration. This focus allows us to send fewer trees to
workers on each iteration; see topNodesForGroup below.
*/
val nodeStack = new mutable.ArrayStack[(Int, LearningNode)]
val rng = new Random()
rng.setSeed(seed)
// Allocate and queue root nodes.
val topNodes = Array.fill[LearningNode](numTrees)(LearningNode.emptyNode(nodeIndex = 1))
Range(0, numTrees).foreach(treeIndex => nodeStack.push((treeIndex, topNodes(treeIndex))))
timer.stop("init")
while (nodeStack.nonEmpty) {
// Collect some nodes to split, and choose features for each node (if subsampling).
// Each group of nodes may come from one or multiple trees, and at multiple levels.
val (nodesForGroup, treeToNodeToIndexInfo) =
RandomForest.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng)
// Sanity check (should never occur):
assert(nodesForGroup.nonEmpty,
s"RandomForest selected empty nodesForGroup. Error for unknown reason.")
// Only send trees to worker if they contain nodes being split this iteration.
val topNodesForGroup: Map[Int, LearningNode] =
nodesForGroup.keys.map(treeIdx => treeIdx -> topNodes(treeIdx)).toMap
// Choose node splits, and enqueue new nodes as needed.
timer.start("findBestSplits")
RandomForest.findBestSplits(baggedInput, metadata, topNodesForGroup, nodesForGroup,
treeToNodeToIndexInfo, splits, nodeStack, timer, nodeIdCache)
timer.stop("findBestSplits")
}
baggedInput.unpersist()
timer.stop("total")
logInfo("Internal timing for DecisionTree:")
logInfo(s"$timer")
// Delete any remaining checkpoints used for node Id cache.
if (nodeIdCache.nonEmpty) {
try {
nodeIdCache.get.deleteAllCheckpoints()
} catch {
case e: IOException =>
logWarning(s"delete all checkpoints failed. Error reason: ${e.getMessage}")
}
}
val numFeatures = metadata.numFeatures
parentUID match {
case Some(uid) =>
if (strategy.algo == OldAlgo.Classification) {
topNodes.map { rootNode =>
new DecisionTreeClassificationModel(uid, rootNode.toNode(prune), numFeatures,
strategy.getNumClasses)
}
} else {
topNodes.map { rootNode =>
new DecisionTreeRegressionModel(uid, rootNode.toNode(prune), numFeatures)
}
}
case None =>
if (strategy.algo == OldAlgo.Classification) {
topNodes.map { rootNode =>
new DecisionTreeClassificationModel(rootNode.toNode(prune), numFeatures,
strategy.getNumClasses)
}
} else {
topNodes.map(rootNode =>
new DecisionTreeRegressionModel(rootNode.toNode(prune), numFeatures))
}
}
}
/**
* Helper for binSeqOp, for data which can contain a mix of ordered and unordered features.
*
* For ordered features, a single bin is updated.
* For unordered features, bins correspond to subsets of categories; either the left or right bin
* for each subset is updated.
*
* @param agg Array storing aggregate calculation, with a set of sufficient statistics for
* each (feature, bin).
* @param treePoint Data point being aggregated.
* @param splits Possible splits indexed (numFeatures)(numSplits)
* @param unorderedFeatures Set of indices of unordered features.
* @param numSamples Number of times this instance occurs in the sample.
* @param sampleWeight Weight (importance) of instance in dataset.
*/
private def mixedBinSeqOp(
agg: DTStatsAggregator,
treePoint: TreePoint,
splits: Array[Array[Split]],
unorderedFeatures: Set[Int],
numSamples: Int,
sampleWeight: Double,
featuresForNode: Option[Array[Int]]): Unit = {
val numFeaturesPerNode = if (featuresForNode.nonEmpty) {
// Use subsampled features
featuresForNode.get.length
} else {
// Use all features
agg.metadata.numFeatures
}
// Iterate over features.
var featureIndexIdx = 0
while (featureIndexIdx < numFeaturesPerNode) {
val featureIndex = if (featuresForNode.nonEmpty) {
featuresForNode.get.apply(featureIndexIdx)
} else {
featureIndexIdx
}
if (unorderedFeatures.contains(featureIndex)) {
// Unordered feature
val featureValue = treePoint.binnedFeatures(featureIndex)
val leftNodeFeatureOffset = agg.getFeatureOffset(featureIndexIdx)
// Update the left or right bin for each split.
val numSplits = agg.metadata.numSplits(featureIndex)
val featureSplits = splits(featureIndex)
var splitIndex = 0
while (splitIndex < numSplits) {
if (featureSplits(splitIndex).shouldGoLeft(featureValue, featureSplits)) {
agg.featureUpdate(leftNodeFeatureOffset, splitIndex, treePoint.label, numSamples,
sampleWeight)
}
splitIndex += 1
}
} else {
// Ordered feature
val binIndex = treePoint.binnedFeatures(featureIndex)
agg.update(featureIndexIdx, binIndex, treePoint.label, numSamples, sampleWeight)
}
featureIndexIdx += 1
}
}
/**
* Helper for binSeqOp, for regression and for classification with only ordered features.
*
* For each feature, the sufficient statistics of one bin are updated.
*
* @param agg Array storing aggregate calculation, with a set of sufficient statistics for
* each (feature, bin).
* @param treePoint Data point being aggregated.
* @param numSamples Number of times this instance occurs in the sample.
* @param sampleWeight Weight (importance) of instance in dataset.
*/
private def orderedBinSeqOp(
agg: DTStatsAggregator,
treePoint: TreePoint,
numSamples: Int,
sampleWeight: Double,
featuresForNode: Option[Array[Int]]): Unit = {
val label = treePoint.label
// Iterate over features.
if (featuresForNode.nonEmpty) {
// Use subsampled features
var featureIndexIdx = 0
while (featureIndexIdx < featuresForNode.get.length) {
val binIndex = treePoint.binnedFeatures(featuresForNode.get.apply(featureIndexIdx))
agg.update(featureIndexIdx, binIndex, label, numSamples, sampleWeight)
featureIndexIdx += 1
}
} else {
// Use all features
val numFeatures = agg.metadata.numFeatures
var featureIndex = 0
while (featureIndex < numFeatures) {
val binIndex = treePoint.binnedFeatures(featureIndex)
agg.update(featureIndex, binIndex, label, numSamples, sampleWeight)
featureIndex += 1
}
}
}
/**
* Given a group of nodes, this finds the best split for each node.
*
* @param input Training data: RDD of [[TreePoint]]
* @param metadata Learning and dataset metadata
* @param topNodesForGroup For each tree in group, tree index -> root node.
* Used for matching instances with nodes.
* @param nodesForGroup Mapping: treeIndex --> nodes to be split in tree
* @param treeToNodeToIndexInfo Mapping: treeIndex --> nodeIndex --> nodeIndexInfo,
* where nodeIndexInfo stores the index in the group and the
* feature subsets (if using feature subsets).
* @param splits possible splits for all features, indexed (numFeatures)(numSplits)
* @param nodeStack Queue of nodes to split, with values (treeIndex, node).
* Updated with new non-leaf nodes which are created.
* @param nodeIdCache Node Id cache containing an RDD of Array[Int] where
* each value in the array is the data point's node Id
* for a corresponding tree. This is used to prevent the need
* to pass the entire tree to the executors during
* the node stat aggregation phase.
*/
private[tree] def findBestSplits(
input: RDD[BaggedPoint[TreePoint]],
metadata: DecisionTreeMetadata,
topNodesForGroup: Map[Int, LearningNode],
nodesForGroup: Map[Int, Array[LearningNode]],
treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]],
splits: Array[Array[Split]],
nodeStack: mutable.ArrayStack[(Int, LearningNode)],
timer: TimeTracker = new TimeTracker,
nodeIdCache: Option[NodeIdCache] = None): Unit = {
/*
* The high-level descriptions of the best split optimizations are noted here.
*
* *Group-wise training*
* We perform bin calculations for groups of nodes to reduce the number of
* passes over the data. Each iteration requires more computation and storage,
* but saves several iterations over the data.
*
* *Bin-wise computation*
* We use a bin-wise best split computation strategy instead of a straightforward best split
* computation strategy. Instead of analyzing each sample for contribution to the left/right
* child node impurity of every split, we first categorize each feature of a sample into a
* bin. We exploit this structure to calculate aggregates for bins and then use these aggregates
* to calculate information gain for each split.
*
* *Aggregation over partitions*
* Instead of performing a flatMap/reduceByKey operation, we exploit the fact that we know
* the number of splits in advance. Thus, we store the aggregates (at the appropriate
* indices) in a single array for all bins and rely upon the RDD aggregate method to
* drastically reduce the communication overhead.
*/
// numNodes: Number of nodes in this group
val numNodes = nodesForGroup.values.map(_.length).sum
logDebug("numNodes = " + numNodes)
logDebug("numFeatures = " + metadata.numFeatures)
logDebug("numClasses = " + metadata.numClasses)
logDebug("isMulticlass = " + metadata.isMulticlass)
logDebug("isMulticlassWithCategoricalFeatures = " +
metadata.isMulticlassWithCategoricalFeatures)
logDebug("using nodeIdCache = " + nodeIdCache.nonEmpty.toString)
/*
* Performs a sequential aggregation over a partition for a particular tree and node.
*
* For each feature, the aggregate sufficient statistics are updated for the relevant
* bins.
*
* @param treeIndex Index of the tree that we want to perform aggregation for.
* @param nodeInfo The node info for the tree node.
* @param agg Array storing aggregate calculation, with a set of sufficient statistics
* for each (node, feature, bin).
* @param baggedPoint Data point being aggregated.
*/
def nodeBinSeqOp(
treeIndex: Int,
nodeInfo: NodeIndexInfo,
agg: Array[DTStatsAggregator],
baggedPoint: BaggedPoint[TreePoint]): Unit = {
if (nodeInfo != null) {
val aggNodeIndex = nodeInfo.nodeIndexInGroup
val featuresForNode = nodeInfo.featureSubset
val numSamples = baggedPoint.subsampleCounts(treeIndex)
val sampleWeight = baggedPoint.sampleWeight
if (metadata.unorderedFeatures.isEmpty) {
orderedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, numSamples, sampleWeight,
featuresForNode)
} else {
mixedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, splits,
metadata.unorderedFeatures, numSamples, sampleWeight, featuresForNode)
}
agg(aggNodeIndex).updateParent(baggedPoint.datum.label, numSamples, sampleWeight)
}
}
/*
* Performs a sequential aggregation over a partition.
*
* Each data point contributes to one node. For each feature,
* the aggregate sufficient statistics are updated for the relevant bins.
*
* @param agg Array storing aggregate calculation, with a set of sufficient statistics for
* each (node, feature, bin).
* @param baggedPoint Data point being aggregated.
* @return agg
*/
def binSeqOp(
agg: Array[DTStatsAggregator],
baggedPoint: BaggedPoint[TreePoint]): Array[DTStatsAggregator] = {
treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) =>
val nodeIndex =
topNodesForGroup(treeIndex).predictImpl(baggedPoint.datum.binnedFeatures, splits)
nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), agg, baggedPoint)
}
agg
}
/**
* Do the same thing as binSeqOp, but with nodeIdCache.
*/
def binSeqOpWithNodeIdCache(
agg: Array[DTStatsAggregator],
dataPoint: (BaggedPoint[TreePoint], Array[Int])): Array[DTStatsAggregator] = {
treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) =>
val baggedPoint = dataPoint._1
val nodeIdCache = dataPoint._2
val nodeIndex = nodeIdCache(treeIndex)
nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), agg, baggedPoint)
}
agg
}
/**
* Get node index in group --> features indices map,
* which is a short cut to find feature indices for a node given node index in group.
*/
def getNodeToFeatures(
treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]]): Option[Map[Int, Array[Int]]] = {
if (!metadata.subsamplingFeatures) {
None
} else {
val mutableNodeToFeatures = new mutable.HashMap[Int, Array[Int]]()
treeToNodeToIndexInfo.values.foreach { nodeIdToNodeInfo =>
nodeIdToNodeInfo.values.foreach { nodeIndexInfo =>
assert(nodeIndexInfo.featureSubset.isDefined)
mutableNodeToFeatures(nodeIndexInfo.nodeIndexInGroup) = nodeIndexInfo.featureSubset.get
}
}
Some(mutableNodeToFeatures.toMap)
}
}
// array of nodes to train indexed by node index in group
val nodes = new Array[LearningNode](numNodes)
nodesForGroup.foreach { case (treeIndex, nodesForTree) =>
nodesForTree.foreach { node =>
nodes(treeToNodeToIndexInfo(treeIndex)(node.id).nodeIndexInGroup) = node
}
}
// Calculate best splits for all nodes in the group
timer.start("chooseSplits")
// In each partition, iterate all instances and compute aggregate stats for each node,
// yield a (nodeIndex, nodeAggregateStats) pair for each node.
// After a `reduceByKey` operation,
// stats of a node will be shuffled to a particular partition and be combined together,
// then best splits for nodes are found there.
// Finally, only best Splits for nodes are collected to driver to construct decision tree.
val nodeToFeatures = getNodeToFeatures(treeToNodeToIndexInfo)
val nodeToFeaturesBc = input.sparkContext.broadcast(nodeToFeatures)
val partitionAggregates: RDD[(Int, DTStatsAggregator)] = if (nodeIdCache.nonEmpty) {
input.zip(nodeIdCache.get.nodeIdsForInstances).mapPartitions { points =>
// Construct a nodeStatsAggregators array to hold node aggregate stats,
// each node will have a nodeStatsAggregator
val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex =>
val featuresForNode = nodeToFeaturesBc.value.map { nodeToFeatures =>
nodeToFeatures(nodeIndex)
}
new DTStatsAggregator(metadata, featuresForNode)
}
// iterator all instances in current partition and update aggregate stats
points.foreach(binSeqOpWithNodeIdCache(nodeStatsAggregators, _))
// transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs,
// which can be combined with other partition using `reduceByKey`
nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator
}
} else {
input.mapPartitions { points =>
// Construct a nodeStatsAggregators array to hold node aggregate stats,
// each node will have a nodeStatsAggregator
val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex =>
val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures =>
Some(nodeToFeatures(nodeIndex))
}
new DTStatsAggregator(metadata, featuresForNode)
}
// iterator all instances in current partition and update aggregate stats
points.foreach(binSeqOp(nodeStatsAggregators, _))
// transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs,
// which can be combined with other partition using `reduceByKey`
nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator
}
}
val nodeToBestSplits = partitionAggregates.reduceByKey((a, b) => a.merge(b)).map {
case (nodeIndex, aggStats) =>
val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures =>
Some(nodeToFeatures(nodeIndex))
}
// find best split for each node
val (split: Split, stats: ImpurityStats) =
binsToBestSplit(aggStats, splits, featuresForNode, nodes(nodeIndex))
(nodeIndex, (split, stats))
}.collectAsMap()
timer.stop("chooseSplits")
val nodeIdUpdaters = if (nodeIdCache.nonEmpty) {
Array.fill[mutable.Map[Int, NodeIndexUpdater]](
metadata.numTrees)(mutable.Map[Int, NodeIndexUpdater]())
} else {
null
}
// Iterate over all nodes in this group.
nodesForGroup.foreach { case (treeIndex, nodesForTree) =>
nodesForTree.foreach { node =>
val nodeIndex = node.id
val nodeInfo = treeToNodeToIndexInfo(treeIndex)(nodeIndex)
val aggNodeIndex = nodeInfo.nodeIndexInGroup
val (split: Split, stats: ImpurityStats) =
nodeToBestSplits(aggNodeIndex)
logDebug("best split = " + split)
// Extract info for this node. Create children if not leaf.
val isLeaf =
(stats.gain <= 0) || (LearningNode.indexToLevel(nodeIndex) == metadata.maxDepth)
node.isLeaf = isLeaf
node.stats = stats
logDebug("Node = " + node)
if (!isLeaf) {
node.split = Some(split)
val childIsLeaf = (LearningNode.indexToLevel(nodeIndex) + 1) == metadata.maxDepth
val leftChildIsLeaf = childIsLeaf || (math.abs(stats.leftImpurity) < Utils.EPSILON)
val rightChildIsLeaf = childIsLeaf || (math.abs(stats.rightImpurity) < Utils.EPSILON)
node.leftChild = Some(LearningNode(LearningNode.leftChildIndex(nodeIndex),
leftChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.leftImpurityCalculator)))
node.rightChild = Some(LearningNode(LearningNode.rightChildIndex(nodeIndex),
rightChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.rightImpurityCalculator)))
if (nodeIdCache.nonEmpty) {
val nodeIndexUpdater = NodeIndexUpdater(
split = split,
nodeIndex = nodeIndex)
nodeIdUpdaters(treeIndex).put(nodeIndex, nodeIndexUpdater)
}
// enqueue left child and right child if they are not leaves
if (!leftChildIsLeaf) {
nodeStack.push((treeIndex, node.leftChild.get))
}
if (!rightChildIsLeaf) {
nodeStack.push((treeIndex, node.rightChild.get))
}
logDebug("leftChildIndex = " + node.leftChild.get.id +
", impurity = " + stats.leftImpurity)
logDebug("rightChildIndex = " + node.rightChild.get.id +
", impurity = " + stats.rightImpurity)
}
}
}
if (nodeIdCache.nonEmpty) {
// Update the cache if needed.
nodeIdCache.get.updateNodeIndices(input, nodeIdUpdaters, splits)
}
}
/**
* Calculate the impurity statistics for a given (feature, split) based upon left/right
* aggregates.
*
* @param stats the recycle impurity statistics for this feature's all splits,
* only 'impurity' and 'impurityCalculator' are valid between each iteration
* @param leftImpurityCalculator left node aggregates for this (feature, split)
* @param rightImpurityCalculator right node aggregate for this (feature, split)
* @param metadata learning and dataset metadata for DecisionTree
* @return Impurity statistics for this (feature, split)
*/
private def calculateImpurityStats(
stats: ImpurityStats,
leftImpurityCalculator: ImpurityCalculator,
rightImpurityCalculator: ImpurityCalculator,
metadata: DecisionTreeMetadata): ImpurityStats = {
val parentImpurityCalculator: ImpurityCalculator = if (stats == null) {
leftImpurityCalculator.copy.add(rightImpurityCalculator)
} else {
stats.impurityCalculator
}
val impurity: Double = if (stats == null) {
parentImpurityCalculator.calculate()
} else {
stats.impurity
}
val leftRawCount = leftImpurityCalculator.rawCount
val rightRawCount = rightImpurityCalculator.rawCount
val leftCount = leftImpurityCalculator.count
val rightCount = rightImpurityCalculator.count
val totalCount = leftCount + rightCount
val violatesMinInstancesPerNode = (leftRawCount < metadata.minInstancesPerNode) ||
(rightRawCount < metadata.minInstancesPerNode)
val violatesMinWeightPerNode = (leftCount < metadata.minWeightPerNode) ||
(rightCount < metadata.minWeightPerNode)
// If left child or right child doesn't satisfy minimum weight per node or minimum
// instances per node, then this split is invalid, return invalid information gain stats.
if (violatesMinInstancesPerNode || violatesMinWeightPerNode) {
return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)
}
val leftImpurity = leftImpurityCalculator.calculate() // Note: This equals 0 if count = 0
val rightImpurity = rightImpurityCalculator.calculate()
val leftWeight = leftCount / totalCount.toDouble
val rightWeight = rightCount / totalCount.toDouble
val gain = impurity - leftWeight * leftImpurity - rightWeight * rightImpurity
// if information gain doesn't satisfy minimum information gain,
// then this split is invalid, return invalid information gain stats.
if (gain < metadata.minInfoGain) {
return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)
}
new ImpurityStats(gain, impurity, parentImpurityCalculator,
leftImpurityCalculator, rightImpurityCalculator)
}
/**
* Find the best split for a node.
*
* @param binAggregates Bin statistics.
* @return tuple for best split: (Split, information gain, prediction at node)
*/
private[tree] def binsToBestSplit(
binAggregates: DTStatsAggregator,
splits: Array[Array[Split]],
featuresForNode: Option[Array[Int]],
node: LearningNode): (Split, ImpurityStats) = {
// Calculate InformationGain and ImpurityStats if current node is top node
val level = LearningNode.indexToLevel(node.id)
var gainAndImpurityStats: ImpurityStats = if (level == 0) {
null
} else {
node.stats
}
val validFeatureSplits =
Range(0, binAggregates.metadata.numFeaturesPerNode).view.map { featureIndexIdx =>
featuresForNode.map(features => (featureIndexIdx, features(featureIndexIdx)))
.getOrElse((featureIndexIdx, featureIndexIdx))
}.withFilter { case (_, featureIndex) =>
binAggregates.metadata.numSplits(featureIndex) != 0
}
// For each (feature, split), calculate the gain, and select the best (feature, split).
val splitsAndImpurityInfo =
validFeatureSplits.map { case (featureIndexIdx, featureIndex) =>
val numSplits = binAggregates.metadata.numSplits(featureIndex)
if (binAggregates.metadata.isContinuous(featureIndex)) {
// Cumulative sum (scanLeft) of bin statistics.
// Afterwards, binAggregates for a bin is the sum of aggregates for
// that bin + all preceding bins.
val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx)
var splitIndex = 0
while (splitIndex < numSplits) {
binAggregates.mergeForFeature(nodeFeatureOffset, splitIndex + 1, splitIndex)
splitIndex += 1
}
// Find best split.
val (bestFeatureSplitIndex, bestFeatureGainStats) =
Range(0, numSplits).map { case splitIdx =>
val leftChildStats =
binAggregates.getImpurityCalculator(nodeFeatureOffset, splitIdx)
val rightChildStats =
binAggregates.getImpurityCalculator(nodeFeatureOffset, numSplits)
rightChildStats.subtract(leftChildStats)
gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats,
leftChildStats, rightChildStats, binAggregates.metadata)
(splitIdx, gainAndImpurityStats)
}.maxBy(_._2.gain)
(splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats)
} else if (binAggregates.metadata.isUnordered(featureIndex)) {
// Unordered categorical feature
val leftChildOffset = binAggregates.getFeatureOffset(featureIndexIdx)
val (bestFeatureSplitIndex, bestFeatureGainStats) =
Range(0, numSplits).map { splitIndex =>
val leftChildStats = binAggregates.getImpurityCalculator(leftChildOffset, splitIndex)
val rightChildStats = binAggregates.getParentImpurityCalculator()
.subtract(leftChildStats)
gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats,
leftChildStats, rightChildStats, binAggregates.metadata)
(splitIndex, gainAndImpurityStats)
}.maxBy(_._2.gain)
(splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats)
} else {
// Ordered categorical feature
val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx)
val numCategories = binAggregates.metadata.numBins(featureIndex)
/* Each bin is one category (feature value).
* The bins are ordered based on centroidForCategories, and this ordering determines which
* splits are considered. (With K categories, we consider K - 1 possible splits.)
*
* centroidForCategories is a list: (category, centroid)
*/
val centroidForCategories = Range(0, numCategories).map { case featureValue =>
val categoryStats =
binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue)
val centroid = if (categoryStats.count != 0) {
if (binAggregates.metadata.isMulticlass) {
// multiclass classification
// For categorical variables in multiclass classification,
// the bins are ordered by the impurity of their corresponding labels.
categoryStats.calculate()
} else if (binAggregates.metadata.isClassification) {
// binary classification
// For categorical variables in binary classification,
// the bins are ordered by the count of class 1.
categoryStats.stats(1)
} else {
// regression
// For categorical variables in regression and binary classification,
// the bins are ordered by the prediction.
categoryStats.predict
}
} else {
Double.MaxValue
}
(featureValue, centroid)
}
logDebug("Centroids for categorical variable: " + centroidForCategories.mkString(","))
// bins sorted by centroids
val categoriesSortedByCentroid = centroidForCategories.toList.sortBy(_._2)
logDebug("Sorted centroids for categorical variable = " +
categoriesSortedByCentroid.mkString(","))
// Cumulative sum (scanLeft) of bin statistics.
// Afterwards, binAggregates for a bin is the sum of aggregates for
// that bin + all preceding bins.
var splitIndex = 0
while (splitIndex < numSplits) {
val currentCategory = categoriesSortedByCentroid(splitIndex)._1
val nextCategory = categoriesSortedByCentroid(splitIndex + 1)._1
binAggregates.mergeForFeature(nodeFeatureOffset, nextCategory, currentCategory)
splitIndex += 1
}
// lastCategory = index of bin with total aggregates for this (node, feature)
val lastCategory = categoriesSortedByCentroid.last._1
// Find best split.
val (bestFeatureSplitIndex, bestFeatureGainStats) =
Range(0, numSplits).map { splitIndex =>
val featureValue = categoriesSortedByCentroid(splitIndex)._1
val leftChildStats =
binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue)
val rightChildStats =
binAggregates.getImpurityCalculator(nodeFeatureOffset, lastCategory)
rightChildStats.subtract(leftChildStats)
gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats,
leftChildStats, rightChildStats, binAggregates.metadata)
(splitIndex, gainAndImpurityStats)
}.maxBy(_._2.gain)
val categoriesForSplit =
categoriesSortedByCentroid.map(_._1.toDouble).slice(0, bestFeatureSplitIndex + 1)
val bestFeatureSplit =
new CategoricalSplit(featureIndex, categoriesForSplit.toArray, numCategories)
(bestFeatureSplit, bestFeatureGainStats)
}
}
val (bestSplit, bestSplitStats) =
if (splitsAndImpurityInfo.isEmpty) {
// If no valid splits for features, then this split is invalid,
// return invalid information gain stats. Take any split and continue.
// Splits is empty, so arbitrarily choose to split on any threshold
val dummyFeatureIndex = featuresForNode.map(_.head).getOrElse(0)
val parentImpurityCalculator = binAggregates.getParentImpurityCalculator()
if (binAggregates.metadata.isContinuous(dummyFeatureIndex)) {
(new ContinuousSplit(dummyFeatureIndex, 0),
ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator))
} else {
val numCategories = binAggregates.metadata.featureArity(dummyFeatureIndex)
(new CategoricalSplit(dummyFeatureIndex, Array(), numCategories),
ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator))
}
} else {
splitsAndImpurityInfo.maxBy(_._2.gain)
}
(bestSplit, bestSplitStats)
}
/**
* Returns splits for decision tree calculation.
* Continuous and categorical features are handled differently.
*
* Continuous features:
* For each feature, there are numBins - 1 possible splits representing the possible binary
* decisions at each node in the tree.
* This finds locations (feature values) for splits using a subsample of the data.
*
* Categorical features:
* For each feature, there is 1 bin per split.
* Splits and bins are handled in 2 ways:
* (a) "unordered features"
* For multiclass classification with a low-arity feature
* (i.e., if isMulticlass && isSpaceSufficientForAllCategoricalSplits),
* the feature is split based on subsets of categories.
* (b) "ordered features"
* For regression and binary classification,
* and for multiclass classification with a high-arity feature,
* there is one bin per category.
*
* @param input Training data: RDD of [[Instance]]
* @param metadata Learning and dataset metadata
* @param seed random seed
* @return Splits, an Array of [[Split]]
* of size (numFeatures, numSplits)
*/
protected[tree] def findSplits(
input: RDD[Instance],
metadata: DecisionTreeMetadata,
seed: Long): Array[Array[Split]] = {
logDebug("isMulticlass = " + metadata.isMulticlass)
val numFeatures = metadata.numFeatures
// Sample the input only if there are continuous features.
val continuousFeatures = Range(0, numFeatures).filter(metadata.isContinuous)
val sampledInput = if (continuousFeatures.nonEmpty) {
val fraction = samplesFractionForFindSplits(metadata)
logDebug("fraction of data used for calculating quantiles = " + fraction)
input.sample(withReplacement = false, fraction, new XORShiftRandom(seed).nextInt())
} else {
input.sparkContext.emptyRDD[Instance]
}
findSplitsBySorting(sampledInput, metadata, continuousFeatures)
}
private def findSplitsBySorting(
input: RDD[Instance],
metadata: DecisionTreeMetadata,
continuousFeatures: IndexedSeq[Int]): Array[Array[Split]] = {
val continuousSplits: scala.collection.Map[Int, Array[Split]] = {
// reduce the parallelism for split computations when there are less
// continuous features than input partitions. this prevents tasks from
// being spun up that will definitely do no work.
val numPartitions = math.min(continuousFeatures.length, input.partitions.length)
input
.flatMap { point =>
continuousFeatures.map(idx => (idx, (point.weight, point.features(idx))))
.filter(_._2._2 != 0.0)
}.groupByKey(numPartitions)
.map { case (idx, samples) =>
val thresholds = findSplitsForContinuousFeature(samples, metadata, idx)
val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh))
logDebug(s"featureIndex = $idx, numSplits = ${splits.length}")
(idx, splits)
}.collectAsMap()
}
val numFeatures = metadata.numFeatures
val splits: Array[Array[Split]] = Array.tabulate(numFeatures) {
case i if metadata.isContinuous(i) =>
// some features may contain only zero, so continuousSplits will not have a record
val split = continuousSplits.getOrElse(i, Array.empty[Split])
metadata.setNumSplits(i, split.length)
split
case i if metadata.isCategorical(i) && metadata.isUnordered(i) =>
// Unordered features
// 2^(maxFeatureValue - 1) - 1 combinations
val featureArity = metadata.featureArity(i)
Array.tabulate[Split](metadata.numSplits(i)) { splitIndex =>
val categories = extractMultiClassCategories(splitIndex + 1, featureArity)
new CategoricalSplit(i, categories.toArray, featureArity)
}
case i if metadata.isCategorical(i) =>
// Ordered features
// Splits are constructed as needed during training.
Array.empty[Split]
}
splits
}
/**
* Nested method to extract list of eligible categories given an index. It extracts the
* position of ones in a binary representation of the input. If binary
* representation of an number is 01101 (13), the output list should (3.0, 2.0,
* 0.0). The maxFeatureValue depict the number of rightmost digits that will be tested for ones.
*/
private[tree] def extractMultiClassCategories(
input: Int,
maxFeatureValue: Int): List[Double] = {
var categories = List[Double]()
var j = 0
var bitShiftedInput = input
while (j < maxFeatureValue) {
if (bitShiftedInput % 2 != 0) {
// updating the list of categories.
categories = j.toDouble :: categories
}
// Right shift by one
bitShiftedInput = bitShiftedInput >> 1
j += 1
}
categories
}
/**
* Find splits for a continuous feature
* NOTE: Returned number of splits is set based on `featureSamples` and
* could be different from the specified `numSplits`.
* The `numSplits` attribute in the `DecisionTreeMetadata` class will be set accordingly.
*
* @param featureSamples feature values and sample weights of each sample
* @param metadata decision tree metadata
* NOTE: `metadata.numbins` will be changed accordingly
* if there are not enough splits to be found
* @param featureIndex feature index to find splits
* @return array of split thresholds
*/
private[tree] def findSplitsForContinuousFeature(
featureSamples: Iterable[(Double, Double)],
metadata: DecisionTreeMetadata,
featureIndex: Int): Array[Double] = {
require(metadata.isContinuous(featureIndex),
"findSplitsForContinuousFeature can only be used to find splits for a continuous feature.")
val splits: Array[Double] = if (featureSamples.isEmpty) {
Array.empty[Double]
} else {
val numSplits = metadata.numSplits(featureIndex)
// get count for each distinct value except zero value
val partValueCountMap = mutable.Map[Double, Double]()
var partNumSamples = 0.0
var unweightedNumSamples = 0.0
featureSamples.foreach { case (sampleWeight, feature) =>
partValueCountMap(feature) = partValueCountMap.getOrElse(feature, 0.0) + sampleWeight;
partNumSamples += sampleWeight;
unweightedNumSamples += 1.0
}
// Calculate the expected number of samples for finding splits
val weightedNumSamples = samplesFractionForFindSplits(metadata) *
metadata.weightedNumExamples
// add expected zero value count and get complete statistics
val tolerance = Utils.EPSILON * unweightedNumSamples * unweightedNumSamples
val valueCountMap = if (weightedNumSamples - partNumSamples > tolerance) {
partValueCountMap + (0.0 -> (weightedNumSamples - partNumSamples))
} else {
partValueCountMap
}
// sort distinct values
val valueCounts = valueCountMap.toSeq.sortBy(_._1).toArray
val possibleSplits = valueCounts.length - 1
if (possibleSplits == 0) {
// constant feature
Array.empty[Double]
} else if (possibleSplits <= numSplits) {
// if possible splits is not enough or just enough, just return all possible splits
(1 to possibleSplits)
.map(index => (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0)
.toArray
} else {
// stride between splits
val stride: Double = weightedNumSamples / (numSplits + 1)
logDebug("stride = " + stride)
// iterate `valueCount` to find splits
val splitsBuilder = mutable.ArrayBuilder.make[Double]
var index = 1
// currentCount: sum of counts of values that have been visited
var currentCount = valueCounts(0)._2
// targetCount: target value for `currentCount`.
// If `currentCount` is closest value to `targetCount`,
// then current value is a split threshold.
// After finding a split threshold, `targetCount` is added by stride.
var targetCount = stride
while (index < valueCounts.length) {
val previousCount = currentCount
currentCount += valueCounts(index)._2
val previousGap = math.abs(previousCount - targetCount)
val currentGap = math.abs(currentCount - targetCount)
// If adding count of current value to currentCount
// makes the gap between currentCount and targetCount smaller,
// previous value is a split threshold.
if (previousGap < currentGap) {
splitsBuilder += (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0
targetCount += stride
}
index += 1
}
splitsBuilder.result()
}
}
splits
}
private[tree] class NodeIndexInfo(
val nodeIndexInGroup: Int,
val featureSubset: Option[Array[Int]]) extends Serializable
/**
* Pull nodes off of the queue, and collect a group of nodes to be split on this iteration.
* This tracks the memory usage for aggregates and stops adding nodes when too much memory
* will be needed; this allows an adaptive number of nodes since different nodes may require
* different amounts of memory (if featureSubsetStrategy is not "all").
*
* @param nodeStack Queue of nodes to split.
* @param maxMemoryUsage Bound on size of aggregate statistics.
* @return (nodesForGroup, treeToNodeToIndexInfo).
* nodesForGroup holds the nodes to split: treeIndex --> nodes in tree.
*
* treeToNodeToIndexInfo holds indices selected features for each node:
* treeIndex --> (global) node index --> (node index in group, feature indices).
* The (global) node index is the index in the tree; the node index in group is the
* index in [0, numNodesInGroup) of the node in this group.
* The feature indices are None if not subsampling features.
*/
private[tree] def selectNodesToSplit(
nodeStack: mutable.ArrayStack[(Int, LearningNode)],
maxMemoryUsage: Long,
metadata: DecisionTreeMetadata,
rng: Random): (Map[Int, Array[LearningNode]], Map[Int, Map[Int, NodeIndexInfo]]) = {
// Collect some nodes to split:
// nodesForGroup(treeIndex) = nodes to split
val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[LearningNode]]()
val mutableTreeToNodeToIndexInfo =
new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]()
var memUsage: Long = 0L
var numNodesInGroup = 0
// If maxMemoryInMB is set very small, we want to still try to split 1 node,
// so we allow one iteration if memUsage == 0.
var groupDone = false
while (nodeStack.nonEmpty && !groupDone) {
val (treeIndex, node) = nodeStack.top
// Choose subset of features for node (if subsampling).
val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) {
Some(SamplingUtils.reservoirSampleAndCount(Range(0,
metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong())._1)
} else {
None
}
// Check if enough memory remains to add this node to the group.
val nodeMemUsage = RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L
if (memUsage + nodeMemUsage <= maxMemoryUsage || memUsage == 0) {
nodeStack.pop()
mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[LearningNode]()) +=
node
mutableTreeToNodeToIndexInfo
.getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id)
= new NodeIndexInfo(numNodesInGroup, featureSubset)
numNodesInGroup += 1
memUsage += nodeMemUsage
} else {
groupDone = true
}
}
if (memUsage > maxMemoryUsage) {
// If maxMemoryUsage is 0, we should still allow splitting 1 node.
logWarning(s"Tree learning is using approximately $memUsage bytes per iteration, which" +
s" exceeds requested limit maxMemoryUsage=$maxMemoryUsage. This allows splitting" +
s" $numNodesInGroup nodes in this iteration.")
}
// Convert mutable maps to immutable ones.
val nodesForGroup: Map[Int, Array[LearningNode]] =
mutableNodesForGroup.mapValues(_.toArray).toMap
val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap
(nodesForGroup, treeToNodeToIndexInfo)
}
/**
* Get the number of values to be stored for this node in the bin aggregates.
*
* @param featureSubset Indices of features which may be split at this node.
* If None, then use all features.
*/
private def aggregateSizeForNode(
metadata: DecisionTreeMetadata,
featureSubset: Option[Array[Int]]): Long = {
val totalBins = if (featureSubset.nonEmpty) {
featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum
} else {
metadata.numBins.map(_.toLong).sum
}
if (metadata.isClassification) {
metadata.numClasses * totalBins
} else {
3 * totalBins
}
}
/**
* Calculate the subsample fraction for finding splits
*
* @param metadata decision tree metadata
* @return subsample fraction
*/
private def samplesFractionForFindSplits(
metadata: DecisionTreeMetadata): Double = {
// Calculate the number of samples for approximate quantile calculation.
val requiredSamples = math.max(metadata.maxBins * metadata.maxBins, 10000)
if (requiredSamples < metadata.numExamples) {
requiredSamples.toDouble / metadata.numExamples
} else {
1.0
}
}
}
| hhbyyh/spark | mllib/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest.scala | Scala | apache-2.0 | 52,973 |
package com.enkidu.lignum.parsers.java.v8
import com.enkidu.lignum.parsers.ast.expression.types.Type
import com.enkidu.lignum.parsers.ast.expression.{ArrayInitializer, Expression}
import com.enkidu.lignum.parsers.ast.expression.discardable.binary._
import com.enkidu.lignum.parsers.ast.expression.discardable.binary.assignment.{Binding, AugmentedBinding, Assignment}
import com.enkidu.lignum.parsers.ast.expression.discardable.dimension.{AbstractDimension, InitializedDimension, Dimension}
import com.enkidu.lignum.parsers.ast.expression.discardable.instantiation._
import com.enkidu.lignum.parsers.ast.expression.discardable._
import com.enkidu.lignum.parsers.ast.expression.discardable.literals._
import com.enkidu.lignum.parsers.ast.expression.discardable.unary._
import com.enkidu.lignum.parsers.ast.expression.operators._
import com.enkidu.lignum.parsers.ast.expression.types.annotations._
import com.enkidu.lignum.parsers.ast.expression.types.coupled.ChildOfAll
import com.enkidu.lignum.parsers.ast.expression.types.references._
import com.enkidu.lignum.parsers.ast.expression.types.templates._
import com.enkidu.lignum.parsers.ast.statement.declaration.members._
import com.enkidu.lignum.parsers.ast.statement.declaration.types.{InterfaceDeclaration, AnnotationDeclaration, TypeDeclaration}
import com.enkidu.lignum.parsers.ast.statement.flow._
import com.enkidu.lignum.parsers.ast.statement.modifers._
import com.enkidu.lignum.parsers.ast.statement.parameter._
import org.parboiled2.{ CharPredicate, ParserInput, Rule1, Rule2, RuleN }
import shapeless.HNil
abstract class JavaInterfaceParser extends JavaClassParser{
def interfaceDeclaration: Rule1[TypeDeclaration] = rule {
interfaceModifiers ~ `interface` ~ identifier ~ optionalTypeParameters ~
extendedInterfaces ~ `{` ~ interfaceBody ~ `}` ~> InterfaceDeclaration
}
def annotationDeclaration: Rule1[TypeDeclaration] = rule {
interfaceModifiers ~ `@` ~ `interface` ~ identifier ~ `{` ~
zeroOrMore(annotationMemberDeclaration) ~ `}` ~> AnnotationDeclaration
}
private def extendedInterfaces: Rule1[Seq[ClassType]] = rule {
`extends` ~ (oneOrMore(classType) separatedBy comma) | push(Vector())
}
private def interfaceBody: Rule1[Seq[MemberDeclaration]] = rule { zeroOrMore(interfaceMemberDeclaration) }
def interfaceMemberDeclaration: Rule1[MemberDeclaration] = rule {
constantDeclaration | interfaceMethodDeclaration | typeDeclaration
}
private def constantDeclaration: Rule1[MemberDeclaration] = rule {
constantModifiers ~ `type` ~ variableDeclarators ~ semicolon ~> ConstantDeclaration
}
private def interfaceMethodDeclaration: Rule1[MemberDeclaration] = rule {
interfaceMethodModifiers ~ methodHeader ~ methodBody ~> InterfaceMethodDeclaration
}
protected def annotations: Rule1[Seq[Annotation]] = rule { zeroOrMore(annotation) }
def annotation: Rule1[Annotation] = rule {
`@` ~ qualifiedIdentifier ~ {
`(` ~ (zeroOrMore(elementValuePair) separatedBy `comma`) ~ `)` ~> NormalAnnotation |
`(` ~ elementValue ~ `)` ~> SingleElementAnnotation |
MATCH ~> MarkerAnnotation
}
}
private def elementValuePair: Rule1[(String, Expression)] = rule {
identifier ~ `=` ~ elementValue ~> ((id: String, expr: Expression) => (id, expr))
}
private def elementValue: Rule1[Expression] = rule {
elementValueArrayInitializer | annotation | conditionalExpression
}
private def elementValueArrayInitializer: Rule1[Expression] = rule {
`{` ~ `}` ~ push(ArrayInitializer(Vector())) |
`{` ~ oneOrMore(elementValue).separatedBy(`comma`) ~ optional(comma) ~ `}` ~> ArrayInitializer
}
def annotationMemberDeclaration: Rule1[MemberDeclaration] = rule {
annotationElementDeclaration | constantDeclaration | typeDeclaration
}
private def annotationElementDeclaration: Rule1[MemberDeclaration] = rule {
annotationElementModifiers ~ `type` ~ identifier ~ `(` ~ `)` ~ optionalDims ~ {
`default` ~ elementValue ~> { (as: Seq[Annotation], ms: Seq[Modifier], t: Type, n: String, dims: Seq[Dimension], e: Expression) =>
if (dims.size == 0) AnnotationDefaultElementDeclaration(as, ms, t, n, e)
else AnnotationDefaultElementDeclaration(as, ms, ArrayType(t, dims), n, e)
} |
MATCH ~> { (as: Seq[Annotation], ms: Seq[Modifier], t: Type, n: String, dims: Seq[Dimension]) =>
if (dims.size == 0) AnnotationElementDeclaration(as, ms, t, n)
else AnnotationElementDeclaration(as, ms, ArrayType(t, dims), n)
}
} ~ semicolon
}
}
| marek1840/java-parser | src/main/scala/com/enkidu/lignum/parsers/java/v8/JavaInterfaceParser.scala | Scala | mit | 4,569 |
/*
Copyright 2019 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.chill
import scala.collection.generic.CanBuildFrom
class TraversableSerializer[T, C <: Traversable[T]](override val isImmutable: Boolean = true)(implicit
cbf: CanBuildFrom[C, T, C]
) extends KSerializer[C] {
def write(kser: Kryo, out: Output, obj: C): Unit = {
// Write the size:
out.writeInt(obj.size, true)
obj.foreach { t =>
val tRef = t.asInstanceOf[AnyRef]
kser.writeClassAndObject(out, tRef)
// After each intermediate object, flush
out.flush()
}
}
def read(kser: Kryo, in: Input, cls: Class[C]): C = {
val size = in.readInt(true)
// Go ahead and be faster, and not as functional cool, and be mutable in here
var idx = 0
val builder = cbf()
builder.sizeHint(size)
while (idx < size) {
val item = kser.readClassAndObject(in).asInstanceOf[T]
builder += item
idx += 1
}
builder.result()
}
}
| twitter/chill | chill-scala/src/main/scala-2.12-/com/twitter/chill/Traversable.scala | Scala | apache-2.0 | 1,478 |
package domala.internal.macros.meta.generator
import domala.internal.macros.MacrosAbortException
import domala.message.Message
import org.scalatest.FunSuite
import scala.meta._
class SelectGeneratorTestSuite extends FunSuite{
test("wildcard type return") {
val trt = q"""
trait WildcardTypeReturnDao {
@Select("select height from emp")
def select: Height[_]
}
"""
val caught = intercept[MacrosAbortException] {
DaoGenerator.generate(trt, null, None)
}
assert(caught.message == Message.DOMALA4207)
}
test("no stream function parameters") {
val trt = q"""
trait StreamNoFunctionParamDao {
@Select("select * from emp", strategy = SelectType.STREAM)
def select(mapper: Int => Int): Int
}
"""
val caught = intercept[MacrosAbortException] {
DaoGenerator.generate(trt, null, None)
}
assert(caught.message == Message.DOMALA4247)
}
test("multiple stream function parameters") {
val trt = q"""
trait StreamNoFunctionParamDao {
@Select("select * from emp", strategy = SelectType.STREAM)
def select(mapper1: Stream[Emp] => Int, mapper2: Stream[Emp] => Int): Int
}
"""
val caught = intercept[MacrosAbortException] {
DaoGenerator.generate(trt, null, None)
}
assert(caught.message == Message.DOMALA4249)
}
test("Simultaneous specification of SQL annotation and SQL file") {
val trt = q"""
trait StreamNoFunctionParamDao {
@Delete("delete from emp", sqlFile = true)
def delete(entity: Emp): Result[Emp]
}
"""
val caught = intercept[MacrosAbortException] {
DaoGenerator.generate(trt, null, None)
}
assert(caught.message == Message.DOMALA6021)
}
}
| bakenezumi/domala | meta/src/test/scala/domala/internal/macros/meta/generator/SelectGeneratorTestSuite.scala | Scala | apache-2.0 | 1,658 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.web
import org.fusesource.scalate.TemplateEngine
import org.apache.activemq.apollo.broker.Broker
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*
*/
class Boot(engine: TemplateEngine) {
// Put some references to the jersey classes in our code so that the osgi
// metadata creates the proper imports.
val we_are_using = Array(
classOf[com.sun.jersey.spi.container.servlet.ServletContainer]
)
def run: Unit = {
engine.packagePrefix = "org.apache.activemq.apollo.web.templates"
}
} | chirino/activemq-apollo | apollo-web/src/main/scala/org/apache/activemq/apollo/web/Boot.scala | Scala | apache-2.0 | 1,368 |
package bio4j.data.uniprot.test
import org.scalatest.FunSuite
import org.scalatest.concurrent.TimeLimitedTests
import org.scalatest.time.SpanSugar._
import bio4j.test.ReleaseOnlyTest
import bio4j.data.uniprot._
import java.time.LocalDate
class LinesParsingSpeed extends FunSuite with TimeLimitedTests {
def timeLimit = 30 seconds
import testData.entries
test("SwissProt ID", ReleaseOnlyTest) { entries.foreach { e => val id = e.identification; } }
test("SwissProt AC", ReleaseOnlyTest) { entries.foreach { e => val ac = e.accessionNumbers; } }
test("SwissProt DT", ReleaseOnlyTest) { entries.foreach { e => val dt = e.date } }
test("SwissProt DE", ReleaseOnlyTest) { entries.foreach { e => val de = e.description } }
test("SwissProt GN", ReleaseOnlyTest) { entries.foreach { e => val gn = e.geneNames } }
test("SwissProt OG", ReleaseOnlyTest) { entries.foreach { e => val og = e.organelles } }
test("SwissProt OX", ReleaseOnlyTest) { entries.foreach { e => val ox = e.taxonomyCrossReference } }
test("SwissProt OH", ReleaseOnlyTest) { entries.foreach { e => val oh = e.organismHost } }
test("SwissProt CC", ReleaseOnlyTest) { entries.foreach { e => val cc = e.comments } }
test("SwissProt DR", ReleaseOnlyTest) { entries.foreach { e => val dr = e.databaseCrossReferences } }
test("SwissProt PE", ReleaseOnlyTest) { entries.foreach { e => val pe = e.proteinExistence } }
test("SwissProt KW", ReleaseOnlyTest) { entries.foreach { e => val kw = e.keywords } }
test("SwissProt FT", ReleaseOnlyTest) { entries.foreach { e => val ft = e.features } }
test("SwissProt SQ", ReleaseOnlyTest) { entries.foreach { e => val sq = e.sequenceHeader } }
test("SwissProt --", ReleaseOnlyTest) { entries.foreach { e => val x = e.sequence } }
}
| bio4j/data.uniprot | src/test/scala/LineParsingSpeed.scala | Scala | agpl-3.0 | 1,925 |
package io.argos.agent.workers
import java.io.IOException
import javax.management.{InstanceNotFoundException, NotificationListener, ObjectName}
import akka.actor.Actor.Receive
import akka.actor.{Actor, ActorContext, ActorLogging, ActorRef}
import io.argos.agent.{Constants, Messages}
import io.argos.agent.bean._
import io.argos.agent.util.{CommonLoggerFactory, HostnameProvider, JmxClient}
import Constants._
import Messages._
import io.argos.agent.bean._
import ActorProtocol._
import CommonLoggerFactory._
import io.argos.agent.util.JmxClient
import java.rmi.ConnectException
import com.typesafe.config.Config
import org.apache.cassandra.tools.NodeProbe
import scala.collection.JavaConverters._
/**
* Created by eric on 27/06/16.
*/
class MetricsProvider(jmxConfig: Config) extends NotificationListener with Actor with ActorLogging {
val hostname = jmxConfig.getString(CONF_ORCHESTRATOR_JMX_HOST)
val port = jmxConfig.getInt(CONF_ORCHESTRATOR_JMX_PORT)
val user = if (jmxConfig.hasPath(CONF_ORCHESTRATOR_JMX_USER)) Some(jmxConfig.getString(CONF_ORCHESTRATOR_JMX_USER)) else None
val pwd = if (jmxConfig.hasPath(CONF_ORCHESTRATOR_JMX_PWD)) Some(jmxConfig.getString(CONF_ORCHESTRATOR_JMX_PWD)) else None
val downLevel = jmxConfig.getString(CONF_ORCHESTRATOR_DOWN_LEVEL)
val downLabel = jmxConfig.getString(CONF_ORCHESTRATOR_DOWN_LABEL)
val upLevel = jmxConfig.getString(CONF_ORCHESTRATOR_UP_LEVEL)
val upLabel = jmxConfig.getString(CONF_ORCHESTRATOR_UP_LABEL)
JmxClient.initInstance(hostname, port, user, pwd)(context.system)
val jmxClient = JmxClient.getInstance()
log.debug("Start MetricsProvider with params : hostname=<{}>, port=<{}>, user=<{}>, password=<{}>", hostname, port, user, pwd)
val nodeProbe = (user, pwd) match {
case (Some(u), Some(p)) => new NodeProbe(hostname, port, u, p)
case (_, _) => new NodeProbe(hostname, port)
}
lazy val thisEndpoint = nodeProbe.getEndpoint
override def receive: Receive = {
case CheckNodeStatus => log.debug("Node is online, ignore the ping message")
case AvailabilityRequirements(ks, cl) => {
log.debug(s"MetricsProvider receives Availability(${ks}, ${cl})")
checkAvailability(sender(), ks, cl)
}
case req : MetricsRequest => {
log.debug(s"MetricsProvider receives ${req}")
req match {
case MetricsRequest(ACTION_CHECK_DROPPED_MESSAGES, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_DROPPED_MESSAGES, Some(jmxClient.getDroppedMessages(msgType))))
case MetricsRequest(ACTION_CHECK_INTERNAL_STAGE, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_INTERNAL_STAGE, Some(jmxClient.getInternalStageValue(msgType))))
case MetricsRequest(ACTION_CHECK_STAGE, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_STAGE, Some(jmxClient.getStageValue(msgType))))
case MetricsRequest(ACTION_CHECK_STORAGE_SPACE, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_STORAGE_SPACE, Some(jmxClient.getStorageSpaceInformation())))
case MetricsRequest(ACTION_CHECK_STORAGE_HINTS, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_STORAGE_HINTS, Some(jmxClient.getStorageHints())))
case MetricsRequest(ACTION_CHECK_STORAGE_EXCEPTION, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_STORAGE_EXCEPTION, Some(jmxClient.getStorageMetricExceptions())))
case MetricsRequest(ACTION_CHECK_READ_REPAIR, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_READ_REPAIR, Some(jmxClient.getReadRepairs(msgType))))
case MetricsRequest(ACTION_CHECK_CNX_TIMEOUT, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_CNX_TIMEOUT, Some(jmxClient.getConnectionTimeouts())))
case MetricsRequest(ACTION_CHECK_GC, msgType) => requestMetric (MetricsResponse(ACTION_CHECK_GC, Some(jmxClient.getGCInspector())))
}
}
case MetricsAttributeRequest(ACTION_CHECK_JMX_ATTR, jmxName, jmxAttr) => {
log.debug(s"MetricsAttributeRequest receives (${ACTION_CHECK_JMX_ATTR}, ${jmxName}, ${jmxAttr})")
requestMetric(MetricsResponse(ACTION_CHECK_JMX_ATTR, Some(jmxClient.getJmxAttrValue(jmxName, jmxAttr))))
}
}
private def requestMetric(delegateResponse : => MetricsResponse[Any]) : Unit = {
try {
sender ! delegateResponse
} catch {
case ex: InstanceNotFoundException =>
log.info("JMX Instance not found : {}", ex.getMessage)
case ex: ConnectException =>
log.warning("Connection error : {}", ex.getMessage, ex);
context.system.eventStream.publish(
Notification(self.path.name, s"[${downLevel}] Cassandra node ${HostnameProvider.hostname} is DOWN",
s"The node ${HostnameProvider.hostname} may be down!!!",
downLevel,
downLabel,
HostnameProvider.hostname))
context.become(offline) // become offline. this mode try to check the metrics but call logger with debug level
context.system.eventStream.publish(NodeStatus(OFFLINE_NODE))
case ex: IOException =>
log.warning("Unexpected IO Exception : {}", ex.getMessage, ex) // do we have to become offline in this case??
}
}
private def offline : Receive = {
case CheckNodeStatus => if (tryToProcessControls) {
//sender() ! NodeStatus(ONLINE_NODE)
context.system.eventStream.publish(NodeStatus(ONLINE_NODE))
}
case msg => log.debug("node is offline, message <{}> will be ignored", msg)
}
private def tryToProcessControls: Boolean = {
try {
log.debug("{} received, try to reconnect", CHECK_METRICS);
jmxClient.reconnect
log.info("Reconnected to the cassandra node");
context.system.eventStream.publish(
Notification(self.path.name, s"[${upLevel}] Cassandra node ${HostnameProvider.hostname} is UP",
s"The node ${HostnameProvider.hostname} joins the cluster",
upLevel,
upLabel,
HostnameProvider.hostname))
context.unbecome // if checks succeeded, the connection is established with the Cassandra node, we can retrieve our nominal state
true
} catch {
case ex: ConnectException => log.debug("Connection error : {}", ex.getMessage); false;
case ex: IOException => log.debug("Unexpected IO Exception : {}", ex.getMessage); false;
}
}
jmxClient.addNotificationListener(new ObjectName("org.apache.cassandra.db:type=StorageService"), this)
override def handleNotification(notification: javax.management.Notification, handback: scala.Any): Unit = {
context.system.eventStream.publish(JmxNotification(notification))
}
def checkAvailability(sender: ActorRef, keyspace: String, consistencyLevel: String): Unit = {
val listOfAvailability = nodeProbe.describeRing(keyspace).asScala.map(interpretTokenRangeString(_))
.filter(_.endpoints.contains(thisEndpoint)) // compute only replicas set containing the current node.
.map(detectAvailabilityIssue(_, thisEndpoint, consistencyLevel, keyspace))
.filter(_.isDefined) // keep endpoints with CL issue
.map(_.get).toList
val mergedByEndpointList = listOfAvailability.groupBy(_.unreachableEndpoints).map {
case (unreachEndpoint:List[String], details: List[Availability]) => {
details.foldLeft(details(0).copy(tokenRanges = List()))((collector, entry) => collector.copy(tokenRanges = collector.tokenRanges ::: entry.tokenRanges))
}
}.toList
sender ! AvailabilityIssue(mergedByEndpointList)
}
private def interpretTokenRangeString(tokenrange: String): ArgosTokenRange = {
val cTokenRange = ("start_token:([^,]+),\\\\send_token:([^,]+),\\\\sendpoints:\\\\[([^\\\\]]+)\\\\],\\\\srpc_endpoints:\\\\[([^\\\\]]+)").r.findAllIn(tokenrange).matchData map {
m => ArgosTokenRange(
m.group(1).toLong,
m.group(2).toLong,
m.group(3).split(",").map(_.trim).toList,
m.group(4).split(",").map(_.trim).toList,
List())
}
val details = ("host:([^,]+),\\\\sdatacenter:([^,]+),\\\\srack:([^\\\\)]+)").r.findAllIn(tokenrange).matchData map {
m => ArgosEndpointDetails(m.group(1), m.group(2), m.group(3))
}
cTokenRange.next().copy(endpointDetails = details.toList)
}
private def detectAvailabilityIssue(range: ArgosTokenRange, connectedEndpoint: String, cl: String, ks: String) : Option[Availability] = {
val localDC = range.endpointDetails.filter(_.host == connectedEndpoint).head.dc
val consistencyLevel = cl.toLowerCase
if(log.isDebugEnabled) {
log.debug("exec detectAvailabilityIssue: token=<"+ range.start +", " + range.end +">, ks=<{}>, cl=<{}>, local-dc=<{}>, allReplicas=<{}>",
ks, cl, localDC, range.endpoints.mkString(","))
}
//
val targetEndpoints =
if (consistencyLevel.startsWith("local")) range.endpointDetails.groupBy(_.dc)(localDC).map(_.host)
else range.endpoints
val unreachNodes = (targetEndpoints intersect(nodeProbe.getUnreachableNodes.asScala)).sorted
val maxUnreachNodes = consistencyLevel match {
case "one"|"local_one" => targetEndpoints.length - 1
case "two" => targetEndpoints.length - 2
case "three" => targetEndpoints.length - 3
case "all" => 0
case _ => (targetEndpoints.length - (1 + (targetEndpoints.length/2))) // QUORUM / LOCAL_QUORUM
}
if (unreachNodes.length > maxUnreachNodes) {
if(log.isDebugEnabled) {
log.debug("FOUND AvailabilityIssue: token=<["+ range.start +", " + range.end +"]>, ks=<"+ks+">, cl=<{}>, local-dc=<{}>, targetReplicas=<{}>, unreachables=<{}>",
cl, localDC,targetEndpoints.mkString(","),
unreachNodes.mkString(","))
}
Some(Availability(ks, cl, unreachNodes, List(range)))
} else
None
}
}
| leleueri/argos | argos-agent/src/main/scala/io/argos/agent/workers/MetricsProvider.scala | Scala | apache-2.0 | 9,711 |
package com.sksamuel.elastic4s
import org.elasticsearch.common.geo.{GeoDistance, GeoPoint}
import org.elasticsearch.script.Script
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode
import org.elasticsearch.search.aggregations._
import org.elasticsearch.search.aggregations.bucket.children.ChildrenBuilder
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridBuilder
import org.elasticsearch.search.aggregations.bucket.global.GlobalBuilder
import org.elasticsearch.search.aggregations.bucket.histogram.{DateHistogramBuilder, DateHistogramInterval, Histogram, HistogramBuilder}
import org.elasticsearch.search.aggregations.bucket.missing.MissingBuilder
import org.elasticsearch.search.aggregations.bucket.nested.{NestedBuilder, ReverseNestedBuilder}
import org.elasticsearch.search.aggregations.bucket.range.RangeBuilder
import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeBuilder
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceBuilder
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder
import org.elasticsearch.search.aggregations.bucket.terms.Terms.ValueType
import org.elasticsearch.search.aggregations.bucket.terms.{Terms, TermsBuilder}
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityBuilder
import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsBuilder
import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidBuilder
import org.elasticsearch.search.aggregations.metrics.{MetricsAggregationBuilder, ValuesSourceMetricsAggregationBuilder}
import org.elasticsearch.search.sort.SortBuilder
/** @author Nicolas Yzet */
trait AbstractAggregationDefinition {
def builder: AbstractAggregationBuilder
}
abstract class AggregationResult[T <: AbstractAggregationDefinition] {
type Result <: Aggregation
}
object AggregationResults {
implicit object TermsAggregationResult extends AggregationResult[TermAggregationDefinition] {
override type Result = org.elasticsearch.search.aggregations.bucket.terms.Terms
}
implicit object DateHistogramAggregationResult extends AggregationResult[DateHistogramAggregation] {
override type Result = org.elasticsearch.search.aggregations.bucket.histogram.Histogram
}
implicit object CountAggregationResult extends AggregationResult[ValueCountAggregationDefinition] {
override type Result = org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount
}
}
trait AggregationDefinition[+Self <: AggregationDefinition[Self, B], B <: AggregationBuilder[B]]
extends AbstractAggregationDefinition {
val aggregationBuilder: B
def builder = aggregationBuilder
def aggregations(it: Iterable[AbstractAggregationDefinition]): Self = {
it.foreach { aad => aggregationBuilder.subAggregation(aad.builder) }
this.asInstanceOf[Self]
}
def aggregations(a: AbstractAggregationDefinition*): Self = aggregations(a.toIterable)
def aggs(a: AbstractAggregationDefinition*): Self = aggregations(a)
def aggs(a: Iterable[AbstractAggregationDefinition]): Self = aggregations(a)
}
trait MetricsAggregationDefinition[+Self <: MetricsAggregationDefinition[Self, B], B <: MetricsAggregationBuilder[B]]
extends AbstractAggregationDefinition {
val aggregationBuilder: B
def builder = aggregationBuilder
}
trait ValuesSourceMetricsAggregationDefinition[+Self <: ValuesSourceMetricsAggregationDefinition[Self, B], B <: ValuesSourceMetricsAggregationBuilder[B]]
extends MetricsAggregationDefinition[Self, B] {
self: Self =>
def field(field: String): Self = {
builder.field(field)
this
}
def script(script: ScriptDefinition): Self = {
builder.script(script.toJavaAPI)
this
}
}
trait CardinalityMetricsAggregationDefinition[+Self <: CardinalityMetricsAggregationDefinition[Self]]
extends MetricsAggregationDefinition[Self, CardinalityBuilder] {
def field(field: String): CardinalityMetricsAggregationDefinition[Self] = {
builder.field(field)
this
}
def script(script: String): CardinalityMetricsAggregationDefinition[Self] = {
builder.script(new Script(script))
this
}
def rehash(rehash: Boolean): CardinalityMetricsAggregationDefinition[Self] = {
builder.rehash(rehash)
this
}
def precisionThreshold(precisionThreshold: Long): CardinalityMetricsAggregationDefinition[Self] = {
builder.precisionThreshold(precisionThreshold)
this
}
}
case class MissingAggregationDefinition(name: String)
extends AggregationDefinition[MissingAggregationDefinition, MissingBuilder] {
val aggregationBuilder = AggregationBuilders.missing(name)
def field(field: String): this.type = {
builder.field(field)
this
}
}
case class TermAggregationDefinition(name: String)
extends AggregationDefinition[TermAggregationDefinition, TermsBuilder] {
val aggregationBuilder = AggregationBuilders.terms(name)
//def builder = builder
def script(script: ScriptDefinition): TermAggregationDefinition = {
builder.script(script.toJavaAPI)
this
}
def size(size: Int): TermAggregationDefinition = {
builder.size(size)
this
}
def minDocCount(minDocCount: Int): this.type = {
builder.minDocCount(minDocCount)
this
}
def showTermDocCountError(showTermDocCountError: Boolean): this.type = {
builder.showTermDocCountError(showTermDocCountError)
this
}
def collectMode(mode: SubAggCollectionMode): this.type = {
builder.collectMode(mode)
this
}
def valueType(valueType: ValueType): this.type = {
builder.valueType(valueType)
this
}
def order(order: Terms.Order): TermAggregationDefinition = {
builder.order(order)
this
}
def field(field: String): TermAggregationDefinition = {
builder.field(field)
this
}
def script(script: String): TermAggregationDefinition = {
builder.script(new Script(script))
this
}
def shardSize(shardSize: Int): TermAggregationDefinition = {
builder.shardSize(shardSize)
this
}
def include(regex: String): TermAggregationDefinition = {
builder.include(regex)
this
}
def exclude(regex: String): TermAggregationDefinition = {
builder.exclude(regex)
this
}
}
case class RangeAggregationDefinition(name: String)
extends AggregationDefinition[RangeAggregationDefinition, RangeBuilder] {
val aggregationBuilder = AggregationBuilders.range(name)
def range(from: Double, to: Double): RangeAggregationDefinition = {
builder.addRange(from, to)
this
}
def unboundedTo(to: Double): this.type = {
builder.addUnboundedTo(to)
this
}
def unboundedTo(key: String, to: Double): this.type = {
builder.addUnboundedTo(key, to)
this
}
def unboundedFrom(from: Double): this.type = {
builder.addUnboundedFrom(from)
this
}
def unboundedFrom(key: String, from: Double): this.type = {
builder.addUnboundedFrom(key, from)
this
}
def ranges(ranges: (Double, Double)*): this.type = {
for ( range <- ranges )
builder.addRange(range._1, range._2)
this
}
def range(key: String, from: Double, to: Double): RangeAggregationDefinition = {
builder.addRange(key, from, to)
this
}
def field(field: String): RangeAggregationDefinition = {
builder.field(field)
this
}
}
case class DateRangeAggregation(name: String) extends AggregationDefinition[DateRangeAggregation, DateRangeBuilder] {
val aggregationBuilder = AggregationBuilders.dateRange(name)
def range(from: String, to: String): DateRangeAggregation = {
builder.addRange(from, to)
this
}
def range(key: String, from: String, to: String): DateRangeAggregation = {
builder.addRange(key, from, to)
this
}
def range(from: Long, to: Long): DateRangeAggregation = {
builder.addRange(from, to)
this
}
def range(key: String, from: Long, to: Long): DateRangeAggregation = {
builder.addRange(key, from, to)
this
}
def field(field: String): DateRangeAggregation = {
builder.field(field)
this
}
def unboundedFrom(from: String): DateRangeAggregation = {
builder.addUnboundedFrom(from)
this
}
def unboundedTo(to: String): DateRangeAggregation = {
builder.addUnboundedTo(to)
this
}
def unboundedFrom(key: String, from: String): DateRangeAggregation = {
builder.addUnboundedFrom(key, from)
this
}
def unboundedTo(key: String, to: String): DateRangeAggregation = {
builder.addUnboundedTo(key, to)
this
}
def format(fmt: String): DateRangeAggregation = {
builder.format(fmt)
this
}
}
case class ChildrenAggregationDefinition(name: String)
extends AggregationDefinition[ChildrenAggregationDefinition, ChildrenBuilder] {
val aggregationBuilder = AggregationBuilders.children(name)
def childType(childType: String): this.type = {
builder.childType(childType)
this
}
}
case class HistogramAggregation(name: String) extends AggregationDefinition[HistogramAggregation, HistogramBuilder] {
val aggregationBuilder = AggregationBuilders.histogram(name)
def field(field: String): HistogramAggregation = {
builder.field(field)
this
}
def interval(interval: Long): HistogramAggregation = {
builder.interval(interval)
this
}
}
case class DateHistogramAggregation(name: String)
extends AggregationDefinition[DateHistogramAggregation, DateHistogramBuilder] {
val aggregationBuilder = AggregationBuilders.dateHistogram(name)
def field(field: String): DateHistogramAggregation = {
builder.field(field)
this
}
def extendedBounds(minMax: (String, String)): DateHistogramAggregation = {
builder.extendedBounds(minMax._1, minMax._2)
this
}
def interval(interval: Long): DateHistogramAggregation = {
builder.interval(interval)
this
}
def interval(interval: DateHistogramInterval): DateHistogramAggregation = {
builder.interval(interval)
this
}
def minDocCount(minDocCount: Long) = {
builder.minDocCount(minDocCount)
this
}
def timeZone(timeZone: String): this.type = {
builder.timeZone(timeZone)
this
}
def offset(offset: String) = {
builder.offset(offset)
this
}
def order(order: Histogram.Order) = {
builder.order(order)
this
}
def format(format: String) = {
builder.format(format)
this
}
}
case class GeoHashGridAggregationDefinition(name: String)
extends AggregationDefinition[GeoHashGridAggregationDefinition, GeoHashGridBuilder] {
val aggregationBuilder = AggregationBuilders.geohashGrid(name)
def precision(precision: Int): this.type = {
builder.precision(precision)
this
}
def field(field: String): this.type = {
builder.field(field)
this
}
def shardSize(shardSize: Int): this.type = {
builder.shardSize(shardSize)
this
}
def size(size: Int): this.type = {
builder.size(size)
this
}
}
case class GeoBoundsAggregationDefinition(name: String)
extends AggregationDefinition[GeoBoundsAggregationDefinition, GeoBoundsBuilder] {
val aggregationBuilder = AggregationBuilders.geoBounds(name)
def field(field: String): GeoBoundsAggregationDefinition = {
builder.field(field)
this
}
def script(script: Script): GeoBoundsAggregationDefinition = {
aggregationBuilder.script(script)
this
}
def missing(missing: String): GeoBoundsAggregationDefinition = {
aggregationBuilder.missing(missing)
this
}
def wrapLongitude(wrapLongitude: Boolean): GeoBoundsAggregationDefinition = {
builder.wrapLongitude(wrapLongitude)
this
}
}
case class GeoCentroidAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[GeoCentroidAggregationDefinition, GeoCentroidBuilder] {
val aggregationBuilder = AggregationBuilders.geoCentroid(name)
def missing(missing: String): GeoCentroidAggregationDefinition = {
aggregationBuilder.missing(missing)
this
}
def format(format: String): GeoCentroidAggregationDefinition = {
aggregationBuilder.format(format)
this
}
}
case class GeoDistanceAggregationDefinition(name: String)
extends AggregationDefinition[GeoDistanceAggregationDefinition, GeoDistanceBuilder] {
val aggregationBuilder = AggregationBuilders.geoDistance(name)
def range(tuple: (Double, Double)): GeoDistanceAggregationDefinition = range(tuple._1, tuple._2)
def range(from: Double, to: Double): GeoDistanceAggregationDefinition = {
builder.addRange(from, to)
this
}
def field(field: String): GeoDistanceAggregationDefinition = {
builder.field(field)
this
}
def geoDistance(geoDistance: GeoDistance): GeoDistanceAggregationDefinition = {
builder.distanceType(geoDistance)
this
}
def geohash(geohash: String): GeoDistanceAggregationDefinition = {
builder.geohash(geohash)
this
}
def point(lat: Double, long: Double): GeoDistanceAggregationDefinition = {
builder.point(new GeoPoint(lat, long))
this
}
def addUnboundedFrom(addUnboundedFrom: Double): GeoDistanceAggregationDefinition = {
builder.addUnboundedFrom(addUnboundedFrom)
this
}
def addUnboundedTo(addUnboundedTo: Double): GeoDistanceAggregationDefinition = {
builder.addUnboundedTo(addUnboundedTo)
this
}
}
case class FilterAggregationDefinition(name: String)
extends AggregationDefinition[FilterAggregationDefinition, FilterAggregationBuilder] {
val aggregationBuilder = AggregationBuilders.filter(name)
def filter(block: => QueryDefinition): this.type = {
builder.filter(block.builder)
this
}
}
case class FiltersAggregationDefinition(name: String)
extends AggregationDefinition[FiltersAggregationDefinition, FiltersAggregationBuilder] {
val aggregationBuilder = AggregationBuilders.filters(name)
def filter(block: => QueryDefinition): this.type = {
builder.filter(block.builder)
this
}
def filter(key: String, block: => QueryDefinition): this.type = {
builder.filter(key, block.builder)
this
}
}
case class SigTermsAggregationDefinition(name: String)
extends AggregationDefinition[SigTermsAggregationDefinition, SignificantTermsBuilder] {
val aggregationBuilder = AggregationBuilders.significantTerms(name)
def exclude(regex: String): this.type = {
aggregationBuilder.exclude(regex: String)
this
}
def minDocCount(minDocCount: Int): this.type = {
aggregationBuilder.minDocCount(minDocCount)
this
}
def executionHint(regex: String): this.type = {
aggregationBuilder.executionHint(regex)
this
}
def size(size: Int): this.type = {
aggregationBuilder.size(size)
this
}
def include(include: String): this.type = {
aggregationBuilder.include(include)
this
}
def field(field: String): this.type = {
aggregationBuilder.field(field)
this
}
def shardMinDocCount(shardMinDocCount: Int): this.type = {
aggregationBuilder.shardMinDocCount(shardMinDocCount)
this
}
def backgroundFilter(backgroundFilter: QueryDefinition): this.type = {
aggregationBuilder.backgroundFilter(backgroundFilter.builder)
this
}
def shardSize(shardSize: Int): this.type = {
aggregationBuilder.shardSize(shardSize)
this
}
}
case class IpRangeAggregationDefinition(name: String) extends AbstractAggregationDefinition {
val builder = AggregationBuilders.ipRange(name)
def maskRange(key: String, mask: String): this.type = {
builder.addMaskRange(key, mask)
this
}
def maskRange(mask: String): this.type = {
builder.addMaskRange(mask)
this
}
def range(from: String, to: String): this.type = {
builder.addRange(from, to)
this
}
def range(key: String, from: String, to: String): this.type = {
builder.addRange(key, from, to)
this
}
def unboundedFrom(from: String): this.type = {
builder.addUnboundedFrom(from)
this
}
def unboundedTo(to: String): this.type = {
builder.addUnboundedTo(to)
this
}
}
case class MinAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[MinAggregationDefinition, metrics.min.MinBuilder] {
val aggregationBuilder = AggregationBuilders.min(name)
}
case class MaxAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[MaxAggregationDefinition, metrics.max.MaxBuilder] {
val aggregationBuilder = AggregationBuilders.max(name)
}
case class SumAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[SumAggregationDefinition, metrics.sum.SumBuilder] {
val aggregationBuilder = AggregationBuilders.sum(name)
}
case class AvgAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[AvgAggregationDefinition, metrics.avg.AvgBuilder] {
val aggregationBuilder = AggregationBuilders.avg(name)
}
case class StatsAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[StatsAggregationDefinition, metrics.stats.StatsBuilder] {
val aggregationBuilder = AggregationBuilders.stats(name)
}
case class PercentilesAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[PercentilesAggregationDefinition, metrics.percentiles.PercentilesBuilder] {
val aggregationBuilder = AggregationBuilders.percentiles(name)
def percents(percents: Double*): PercentilesAggregationDefinition = {
builder.percentiles(percents: _*)
this
}
def compression(compression: Double): PercentilesAggregationDefinition = {
builder.compression(compression)
this
}
}
case class PercentileRanksAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[PercentileRanksAggregationDefinition, metrics.percentiles.PercentileRanksBuilder] {
val aggregationBuilder = AggregationBuilders.percentileRanks(name)
def percents(percents: Double*): PercentileRanksAggregationDefinition = {
builder.percentiles(percents: _*)
this
}
def compression(compression: Double): PercentileRanksAggregationDefinition = {
builder.compression(compression)
this
}
}
case class ExtendedStatsAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[ExtendedStatsAggregationDefinition, metrics.stats.extended.ExtendedStatsBuilder] {
val aggregationBuilder = AggregationBuilders.extendedStats(name)
}
case class ValueCountAggregationDefinition(name: String)
extends ValuesSourceMetricsAggregationDefinition[ValueCountAggregationDefinition, metrics.valuecount.ValueCountBuilder] {
val aggregationBuilder = AggregationBuilders.count(name)
}
case class CardinalityAggregationDefinition(name: String)
extends CardinalityMetricsAggregationDefinition[CardinalityAggregationDefinition] {
val aggregationBuilder = AggregationBuilders.cardinality(name)
}
case class GlobalAggregationDefinition(name: String)
extends AggregationDefinition[GlobalAggregationDefinition, GlobalBuilder] {
val aggregationBuilder = AggregationBuilders.global(name)
}
case class TopHitsAggregationDefinition(name: String) extends AbstractAggregationDefinition {
val builder = AggregationBuilders.topHits(name)
def from(from: Int): this.type = {
builder.setFrom(from)
this
}
def size(size: Int): this.type = {
builder.setSize(size)
this
}
def sort(sorts: SortDefinition*): this.type = sort2(sorts.map(_.builder): _*)
def sort2(sorts: SortBuilder*): this.type = {
sorts.foreach(builder.addSort)
this
}
def fetchSource(includes: Array[String], excludes: Array[String]): this.type = {
builder.setFetchSource(includes, excludes)
this
}
}
case class NestedAggregationDefinition(name: String)
extends AggregationDefinition[NestedAggregationDefinition, NestedBuilder] {
val aggregationBuilder = AggregationBuilders.nested(name)
def path(path: String): NestedAggregationDefinition = {
builder.path(path)
this
}
}
case class ReverseNestedAggregationDefinition(name: String)
extends AggregationDefinition[ReverseNestedAggregationDefinition, ReverseNestedBuilder] {
val aggregationBuilder = AggregationBuilders.reverseNested(name)
def path(path: String): ReverseNestedAggregationDefinition = {
builder.path(path)
this
}
}
| k4200/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/aggregations.scala | Scala | apache-2.0 | 20,481 |
package com.github.al.roulette.game.impl
import java.time.Duration
import akka.Done
import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.github.al.roulette.test.persistence.EntitySpecSugar
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.lightbend.lagom.scaladsl.testkit.PersistentEntityTestDriver.Reply
import org.mockito.Mockito.when
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterAll, Matchers, OptionValues, WordSpec}
class GameEntitySpec extends WordSpec with Matchers with BeforeAndAfterAll with OptionValues with MockitoSugar with EntitySpecSugar {
override type P = GameEntity
private final val GameName = "Some new game"
private final val GameDuration = Duration.ofMinutes(30)
private final val SampleGameState = GameState(GameName, GameDuration)
private implicit val system = ActorSystem("test", JsonSerializerRegistry.actorSystemSetupFor(GameSerializerRegistry))
private val mockRouletteBallLander = mock[RouletteBallLander]
override def persistenceEntity = new GameEntity(mockRouletteBallLander)
override val persistenceEntityId: String = "7e595fac-830e-44f1-b73e-f8fd60594ace"
"The game entity" should {
"allow creating a game" in withDriver { driver =>
val outcome = driver.run(CreateGame(SampleGameState))
outcome.events should contain only GameCreated(SampleGameState)
outcome.state should ===(Some(SampleGameState))
outcome.sideEffects should contain only Reply(Done)
}
"allow creating and retrieving a game" in withDriver { driver =>
val outcome = driver.run(CreateGame(SampleGameState), GetGame)
outcome.events should contain only GameCreated(SampleGameState)
outcome.state should ===(Some(SampleGameState))
outcome.sideEffects should contain theSameElementsInOrderAs Seq(Reply(Done), Reply(Some(SampleGameState)))
}
"allow creating and starting a game" in withDriver { driver =>
val outcome = driver.run(CreateGame(SampleGameState), StartGame)
outcome.events should contain theSameElementsInOrderAs Seq(GameCreated(SampleGameState), GameStarted)
outcome.state should matchPattern { case Some(GameState(GameName, GameDuration, Some(_), None, None)) => }
outcome.sideEffects should contain theSameElementsInOrderAs Seq(Reply(Done), Reply(Done))
}
"allow creating, starting and finishing a game" in withDriver { driver =>
when(mockRouletteBallLander.landBall()).thenReturn(11)
val outcome = driver.run(CreateGame(SampleGameState), StartGame, FinishGame)
outcome.events should contain theSameElementsInOrderAs Seq(GameCreated(SampleGameState), GameStarted, GameFinished, GameResulted(11))
outcome.state should matchPattern { case Some(GameState(GameName, GameDuration, Some(_), Some(_), Some(11))) => }
outcome.sideEffects should contain theSameElementsInOrderAs Seq(Reply(Done), Reply(Done), Reply(Done))
}
}
protected override def afterAll: Unit = TestKit.shutdownActorSystem(system)
}
| andrei-l/reactive-roulette | roulette-game-impl/src/test/scala/com/github/al/roulette/game/impl/GameEntitySpec.scala | Scala | mit | 3,049 |
import scala.annotation.tailrec
object Sublist {
trait SublistType
object Equal extends SublistType
object Unequal extends SublistType
object Sublist extends SublistType
object Superlist extends SublistType
}
class Sublist {
def sublist[A](as: List[A], bs: List[A]): Sublist.SublistType =
if (as == bs) Sublist.Equal
else if (isSublist(as, bs)) Sublist.Sublist
else if (isSublist(bs, as)) Sublist.Superlist
else Sublist.Unequal
@tailrec
final def isSublist[A](as: List[A], bs: List[A]): Boolean = (as, bs) match {
case (Nil, _) => true
case (_, Nil) => false
case (_, bh :: bt) => startsWith(as, bh :: bt) || isSublist(as, bt)
}
@tailrec
private def startsWith[A](as: List[A], bs: List[A]): Boolean = (as, bs) match {
case (Nil, _) => true
case (_, Nil) => false
case (ah :: at, bh :: bt) if ah == bh => startsWith(at, bt)
case (ah :: at, bh :: bt) => false
}
}
| daewon/til | exercism/scala/sublist/src/main/scala/sublist.scala | Scala | mpl-2.0 | 935 |
package com.softwaremill
import language.experimental.macros
import reflect.macros.blackbox.Context
object Step3Complete {
def debug(param: Any): Unit = macro debug_impl
def debug_impl(c: Context)(param: c.Expr[Any]): c.Expr[Unit] = {
import c.universe._
println("Example println tree:")
println(showRaw(reify { println("Hello World!") }.tree))
val paramRep = show(param.tree)
val paramRepTree = Literal(Constant(paramRep))
val paramRepExpr = c.Expr[String](paramRepTree)
reify { println(paramRepExpr.splice + " = " + param.splice) }
}
}
| adamw/scala-macro-tutorial | macros/src/main/scala/com/softwaremill/Step3Complete.scala | Scala | apache-2.0 | 578 |
package mesosphere.util
import java.net.ServerSocket
object PortAllocator {
def ephemeralPort(): Int = {
val socket = new ServerSocket(0)
val port = socket.getLocalPort
socket.close()
port
}
}
| yp-engineering/marathon | src/test/scala/mesosphere/util/PortAllocator.scala | Scala | apache-2.0 | 215 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import scala.collection.mutable.ArrayBuffer
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.rpc.{RpcAddress, RpcCallContext, RpcEnv}
import org.apache.spark.scheduler.{CompressedMapStatus, MapStatus}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{BlockManagerId, ShuffleBlockId}
class MapOutputTrackerSuite extends SparkFunSuite {
private val conf = new SparkConf
private def newTrackerMaster(sparkConf: SparkConf = conf) = {
val broadcastManager = new BroadcastManager(true, sparkConf,
new SecurityManager(sparkConf))
new MapOutputTrackerMaster(sparkConf, broadcastManager, true)
}
def createRpcEnv(name: String, host: String = "localhost", port: Int = 0,
securityManager: SecurityManager = new SecurityManager(conf)): RpcEnv = {
RpcEnv.create(name, host, port, conf, securityManager)
}
test("master start and stop") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
tracker.stop()
rpcEnv.shutdown()
}
test("master register shuffle and fetch") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
tracker.registerShuffle(10, 2)
assert(tracker.containsShuffle(10))
val size1000 = MapStatus.decompressSize(MapStatus.compressSize(1000L))
val size10000 = MapStatus.decompressSize(MapStatus.compressSize(10000L))
tracker.registerMapOutput(10, 0, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(1000L, 10000L)))
tracker.registerMapOutput(10, 1, MapStatus(BlockManagerId("b", "hostB", 1000),
Array(10000L, 1000L)))
val statuses = tracker.getMapSizesByExecutorId(10, 0)
assert(statuses.toSet ===
Seq((BlockManagerId("a", "hostA", 1000), ArrayBuffer((ShuffleBlockId(10, 0, 0), size1000))),
(BlockManagerId("b", "hostB", 1000), ArrayBuffer((ShuffleBlockId(10, 1, 0), size10000))))
.toSet)
assert(0 == tracker.getNumCachedSerializedBroadcast)
tracker.stop()
rpcEnv.shutdown()
}
test("master register and unregister shuffle") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
tracker.registerShuffle(10, 2)
val compressedSize1000 = MapStatus.compressSize(1000L)
val compressedSize10000 = MapStatus.compressSize(10000L)
tracker.registerMapOutput(10, 0, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(compressedSize1000, compressedSize10000)))
tracker.registerMapOutput(10, 1, MapStatus(BlockManagerId("b", "hostB", 1000),
Array(compressedSize10000, compressedSize1000)))
assert(tracker.containsShuffle(10))
assert(tracker.getMapSizesByExecutorId(10, 0).nonEmpty)
assert(0 == tracker.getNumCachedSerializedBroadcast)
tracker.unregisterShuffle(10)
assert(!tracker.containsShuffle(10))
assert(tracker.getMapSizesByExecutorId(10, 0).isEmpty)
tracker.stop()
rpcEnv.shutdown()
}
test("master register shuffle and unregister map output and fetch") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
tracker.registerShuffle(10, 2)
val compressedSize1000 = MapStatus.compressSize(1000L)
val compressedSize10000 = MapStatus.compressSize(10000L)
tracker.registerMapOutput(10, 0, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(compressedSize1000, compressedSize1000, compressedSize1000)))
tracker.registerMapOutput(10, 1, MapStatus(BlockManagerId("b", "hostB", 1000),
Array(compressedSize10000, compressedSize1000, compressedSize1000)))
assert(0 == tracker.getNumCachedSerializedBroadcast)
// As if we had two simultaneous fetch failures
tracker.unregisterMapOutput(10, 0, BlockManagerId("a", "hostA", 1000))
tracker.unregisterMapOutput(10, 0, BlockManagerId("a", "hostA", 1000))
// The remaining reduce task might try to grab the output despite the shuffle failure;
// this should cause it to fail, and the scheduler will ignore the failure due to the
// stage already being aborted.
intercept[FetchFailedException] { tracker.getMapSizesByExecutorId(10, 1) }
tracker.stop()
rpcEnv.shutdown()
}
test("remote fetch") {
val hostname = "localhost"
val rpcEnv = createRpcEnv("spark", hostname, 0, new SecurityManager(conf))
val masterTracker = newTrackerMaster()
masterTracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, masterTracker, conf))
val slaveRpcEnv = createRpcEnv("spark-slave", hostname, 0, new SecurityManager(conf))
val slaveTracker = new MapOutputTrackerWorker(conf)
slaveTracker.trackerEndpoint =
slaveRpcEnv.setupEndpointRef(rpcEnv.address, MapOutputTracker.ENDPOINT_NAME)
masterTracker.registerShuffle(10, 1)
slaveTracker.updateEpoch(masterTracker.getEpoch)
// This is expected to fail because no outputs have been registered for the shuffle.
intercept[FetchFailedException] { slaveTracker.getMapSizesByExecutorId(10, 0) }
val size1000 = MapStatus.decompressSize(MapStatus.compressSize(1000L))
masterTracker.registerMapOutput(10, 0, MapStatus(
BlockManagerId("a", "hostA", 1000), Array(1000L)))
slaveTracker.updateEpoch(masterTracker.getEpoch)
assert(slaveTracker.getMapSizesByExecutorId(10, 0).toSeq ===
Seq((BlockManagerId("a", "hostA", 1000), ArrayBuffer((ShuffleBlockId(10, 0, 0), size1000)))))
assert(0 == masterTracker.getNumCachedSerializedBroadcast)
val masterTrackerEpochBeforeLossOfMapOutput = masterTracker.getEpoch
masterTracker.unregisterMapOutput(10, 0, BlockManagerId("a", "hostA", 1000))
assert(masterTracker.getEpoch > masterTrackerEpochBeforeLossOfMapOutput)
slaveTracker.updateEpoch(masterTracker.getEpoch)
intercept[FetchFailedException] { slaveTracker.getMapSizesByExecutorId(10, 0) }
// failure should be cached
intercept[FetchFailedException] { slaveTracker.getMapSizesByExecutorId(10, 0) }
assert(0 == masterTracker.getNumCachedSerializedBroadcast)
masterTracker.stop()
slaveTracker.stop()
rpcEnv.shutdown()
slaveRpcEnv.shutdown()
}
test("remote fetch below max RPC message size") {
val newConf = new SparkConf
newConf.set("spark.rpc.message.maxSize", "1")
newConf.set("spark.rpc.askTimeout", "1") // Fail fast
newConf.set("spark.shuffle.mapOutput.minSizeForBroadcast", "1048576")
val masterTracker = newTrackerMaster(newConf)
val rpcEnv = createRpcEnv("spark")
val masterEndpoint = new MapOutputTrackerMasterEndpoint(rpcEnv, masterTracker, newConf)
masterTracker.trackerEndpoint =
rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME, masterEndpoint)
// Message size should be ~123B, and no exception should be thrown
masterTracker.registerShuffle(10, 1)
masterTracker.registerMapOutput(10, 0, MapStatus(
BlockManagerId("88", "mph", 1000), Array.fill[Long](10)(0)))
val senderAddress = RpcAddress("localhost", 12345)
val rpcCallContext = mock(classOf[RpcCallContext])
when(rpcCallContext.senderAddress).thenReturn(senderAddress)
masterEndpoint.receiveAndReply(rpcCallContext)(GetMapOutputStatuses(10))
// Default size for broadcast in this testsuite is set to -1 so should not cause broadcast
// to be used.
verify(rpcCallContext, timeout(30000)).reply(any())
assert(0 == masterTracker.getNumCachedSerializedBroadcast)
masterTracker.stop()
rpcEnv.shutdown()
}
test("min broadcast size exceeds max RPC message size") {
val newConf = new SparkConf
newConf.set("spark.rpc.message.maxSize", "1")
newConf.set("spark.rpc.askTimeout", "1") // Fail fast
newConf.set("spark.shuffle.mapOutput.minSizeForBroadcast", Int.MaxValue.toString)
intercept[IllegalArgumentException] { newTrackerMaster(newConf) }
}
test("getLocationsWithLargestOutputs with multiple outputs in same machine") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
// Setup 3 map tasks
// on hostA with output size 2
// on hostA with output size 2
// on hostB with output size 3
tracker.registerShuffle(10, 3)
tracker.registerMapOutput(10, 0, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(2L)))
tracker.registerMapOutput(10, 1, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(2L)))
tracker.registerMapOutput(10, 2, MapStatus(BlockManagerId("b", "hostB", 1000),
Array(3L)))
// When the threshold is 50%, only host A should be returned as a preferred location
// as it has 4 out of 7 bytes of output.
val topLocs50 = tracker.getLocationsWithLargestOutputs(10, 0, 1, 0.5)
assert(topLocs50.nonEmpty)
assert(topLocs50.get.size === 1)
assert(topLocs50.get.head === BlockManagerId("a", "hostA", 1000))
// When the threshold is 20%, both hosts should be returned as preferred locations.
val topLocs20 = tracker.getLocationsWithLargestOutputs(10, 0, 1, 0.2)
assert(topLocs20.nonEmpty)
assert(topLocs20.get.size === 2)
assert(topLocs20.get.toSet ===
Seq(BlockManagerId("a", "hostA", 1000), BlockManagerId("b", "hostB", 1000)).toSet)
tracker.stop()
rpcEnv.shutdown()
}
test("remote fetch using broadcast") {
val newConf = new SparkConf
newConf.set("spark.rpc.message.maxSize", "1")
newConf.set("spark.rpc.askTimeout", "1") // Fail fast
newConf.set("spark.shuffle.mapOutput.minSizeForBroadcast", "10240") // 10 KiB << 1MiB framesize
// needs TorrentBroadcast so need a SparkContext
withSpark(new SparkContext("local", "MapOutputTrackerSuite", newConf)) { sc =>
val masterTracker = sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]
val rpcEnv = sc.env.rpcEnv
val masterEndpoint = new MapOutputTrackerMasterEndpoint(rpcEnv, masterTracker, newConf)
rpcEnv.stop(masterTracker.trackerEndpoint)
rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME, masterEndpoint)
// Frame size should be ~1.1MB, and MapOutputTrackerMasterEndpoint should throw exception.
// Note that the size is hand-selected here because map output statuses are compressed before
// being sent.
masterTracker.registerShuffle(20, 100)
(0 until 100).foreach { i =>
masterTracker.registerMapOutput(20, i, new CompressedMapStatus(
BlockManagerId("999", "mps", 1000), Array.fill[Long](4000000)(0)))
}
val senderAddress = RpcAddress("localhost", 12345)
val rpcCallContext = mock(classOf[RpcCallContext])
when(rpcCallContext.senderAddress).thenReturn(senderAddress)
masterEndpoint.receiveAndReply(rpcCallContext)(GetMapOutputStatuses(20))
// should succeed since majority of data is broadcast and actual serialized
// message size is small
verify(rpcCallContext, timeout(30000)).reply(any())
assert(1 == masterTracker.getNumCachedSerializedBroadcast)
masterTracker.unregisterShuffle(20)
assert(0 == masterTracker.getNumCachedSerializedBroadcast)
}
}
test("equally divide map statistics tasks") {
val func = newTrackerMaster().equallyDivide _
val cases = Seq((0, 5), (4, 5), (15, 5), (16, 5), (17, 5), (18, 5), (19, 5), (20, 5))
val expects = Seq(
Seq(0, 0, 0, 0, 0),
Seq(1, 1, 1, 1, 0),
Seq(3, 3, 3, 3, 3),
Seq(4, 3, 3, 3, 3),
Seq(4, 4, 3, 3, 3),
Seq(4, 4, 4, 3, 3),
Seq(4, 4, 4, 4, 3),
Seq(4, 4, 4, 4, 4))
cases.zip(expects).foreach { case ((num, divisor), expect) =>
val answer = func(num, divisor).toSeq
var wholeSplit = (0 until num)
answer.zip(expect).foreach { case (split, expectSplitLength) =>
val (currentSplit, rest) = wholeSplit.splitAt(expectSplitLength)
assert(currentSplit.toSet == split.toSet)
wholeSplit = rest
}
}
}
test("zero-sized blocks should be excluded when getMapSizesByExecutorId") {
val rpcEnv = createRpcEnv("test")
val tracker = newTrackerMaster()
tracker.trackerEndpoint = rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
new MapOutputTrackerMasterEndpoint(rpcEnv, tracker, conf))
tracker.registerShuffle(10, 2)
val size0 = MapStatus.decompressSize(MapStatus.compressSize(0L))
val size1000 = MapStatus.decompressSize(MapStatus.compressSize(1000L))
val size10000 = MapStatus.decompressSize(MapStatus.compressSize(10000L))
tracker.registerMapOutput(10, 0, MapStatus(BlockManagerId("a", "hostA", 1000),
Array(size0, size1000, size0, size10000)))
tracker.registerMapOutput(10, 1, MapStatus(BlockManagerId("b", "hostB", 1000),
Array(size10000, size0, size1000, size0)))
assert(tracker.containsShuffle(10))
assert(tracker.getMapSizesByExecutorId(10, 0, 4).toSeq ===
Seq(
(BlockManagerId("a", "hostA", 1000),
Seq((ShuffleBlockId(10, 0, 1), size1000), (ShuffleBlockId(10, 0, 3), size10000))),
(BlockManagerId("b", "hostB", 1000),
Seq((ShuffleBlockId(10, 1, 0), size10000), (ShuffleBlockId(10, 1, 2), size1000)))
)
)
tracker.unregisterShuffle(10)
tracker.stop()
rpcEnv.shutdown()
}
}
| guoxiaolongzte/spark | core/src/test/scala/org/apache/spark/MapOutputTrackerSuite.scala | Scala | apache-2.0 | 14,856 |
package com.softwaremill.macmemo
import scala.annotation.StaticAnnotation
import scala.concurrent.duration.FiniteDuration
import scala.language.experimental.macros
class memoize(val maxSize: Long, expiresAfter: FiniteDuration, val concurrencyLevel: Option[Int] = None)
extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro memoizeMacro.impl
}
| kciesielski/macmemo | macros/src/main/scala/com/softwaremill/macmemo/memoize.scala | Scala | apache-2.0 | 372 |
package slick.test.codegen
import java.io.File
import java.sql.Blob
import com.typesafe.slick.testkit.util.{TestCodeGenerator, InternalJdbcTestDB, StandardTestDBs, JdbcTestDB}
import scala.concurrent.{Future, Await}
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.{Codec, Source}
import slick.dbio.DBIO
import slick.codegen.{OutputHelpers, SourceCodeGenerator}
import slick.jdbc.JdbcBackend
import slick.jdbc.meta.MTable
import slick.model.Model
/** Generates files for GeneratedCodeTest */
object GenerateMainSources extends TestCodeGenerator {
def packageName = "slick.test.codegen.generated"
def defaultTestCode(c: Config): String = "slick.test.codegen.GeneratedCodeTest.test" + c.objectName
lazy val configurations = Seq(
new Config("CG1", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/h2.sql")),
new Config("CG2", StandardTestDBs.HsqldbMem, "HsqldbMem", Seq("/dbs/hsqldb.sql")),
new Config("CG3", StandardTestDBs.SQLiteMem, "SQLiteMem", Seq("/dbs/sqlite.sql")),
new Config("DB2", StandardTestDBs.DB2, "DB2", Seq("/dbs/db2.sql")),
new Config("DerbyMem", StandardTestDBs.DerbyMem, "DerbyMem", Seq("/dbs/derby.sql")),
new Config("CG7", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/h2.sql")) {
override def generator = tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_) {
override def entityName = {
case "COFFEES" => "Coff"
case other => super.entityName(other)
}
override def tableName = {
case "COFFEES" => "Coffs"
case "SUPPLIERS" => "Supps"
case other => super.tableName(other)
}
override def code = "trait AA; trait BB\\n" + super.code
override def Table = new Table(_){
override def EntityType = new EntityType{
override def parents = Seq("AA","BB")
}
override def TableClass = new TableClass{
override def parents = Seq("AA","BB")
}
}
})
},
new Config("CG8", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/h2-simple.sql")) {
override def generator = tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_) {
override def Table = new Table(_){
override def EntityType = new EntityType{
override def enabled = false
}
override def mappingEnabled = true
override def code = {
if(model.name.table == "SIMPLE_AS"){
Seq("""
import slick.test.codegen.CustomTyping._
import slick.test.codegen.CustomTyping
type SimpleA = CustomTyping.SimpleA
val SimpleA = CustomTyping.SimpleA
""".trim) ++ super.code
} else super.code
}
override def Column = new Column(_){
override def rawType = model.name match {
case "A1" => "Bool"
case _ => super.rawType
}
}
}
})
},
new Config("CG9", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/h2.sql")) {
override def generator = tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_) {
override def Table = new Table(_){
override def autoIncLastAsOption = true
override def Column = new Column(_){
override def asOption = autoInc
}
}
})
},
new UUIDConfig("CG10", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/uuid-h2.sql")),
new Config("CG11", StandardTestDBs.H2Mem, "H2Mem", Seq("/dbs/h2-simple.sql")) {
override def generator = tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_) {
override def Table = new Table(_){
override def Column = new Column(_){
override def asOption = true
}
}
})
},
new Config("Postgres1", StandardTestDBs.Postgres, "Postgres", Nil) {
import tdb.profile.api._
class A(tag: Tag) extends Table[(Int, Array[Byte], Blob)](tag, "a") {
def id = column[Int]("id")
def ba = column[Array[Byte]]("ba")
def blob = column[Blob]("blob")
def * = (id, ba, blob)
}
override def generator =
TableQuery[A].schema.create >>
tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_))
override def testCode =
"""
| import java.sql.Blob
| import javax.sql.rowset.serial.SerialBlob
| val a1 = ARow(1, Array[Byte](1,2,3), new SerialBlob(Array[Byte](4,5,6)))
| DBIO.seq(
| schema.create,
| A += a1,
| A.result.map { case Seq(ARow(id, ba, blob)) => assertEquals("1123", ""+id+ba.mkString) }
| ).transactionally
""".stripMargin
},
new UUIDConfig("Postgres2", StandardTestDBs.Postgres, "Postgres", Seq("/dbs/uuid-postgres.sql")),
new Config("Postgres3", StandardTestDBs.Postgres, "Postgres", Seq("/dbs/postgres.sql")) {
override def testCode: String =
"""import slick.ast.{FieldSymbol, Select}
|import slick.jdbc.meta.MTable
|import slick.relational.RelationalProfile
|DBIO.seq(
| schema.create,
| MTable.getTables(Some(""), Some("public"), None, None).map { tables =>
| def optionsOfColumn(c: slick.lifted.Rep[_]) =
| c.toNode.asInstanceOf[Select].field.asInstanceOf[FieldSymbol].options.toList
| //val smallserialOptions = optionsOfColumn(TestDefault.baseTableRow.smallintAutoInc)
| val serialOptions = optionsOfColumn(TestDefault.baseTableRow.intAutoInc)
| val bigserialOptions = optionsOfColumn(TestDefault.baseTableRow.bigintAutoInc)
| val char1EmptyOptions = optionsOfColumn(TestDefault.baseTableRow.char1DefaultEmpty)
| val char1ValidOptions = optionsOfColumn(TestDefault.baseTableRow.char1DefaultValid)
| val char1InvalidOptions = optionsOfColumn(TestDefault.baseTableRow.char1DefaultInvalid)
| //assertTrue("smallint_auto_inc should be AutoInc", smallserialOptions.exists(option => (option equals TestDefault.baseTableRow.O.AutoInc)))
| assertTrue("int_auto_inc should be AutoInc", serialOptions.exists(option => (option equals TestDefault.baseTableRow.O.AutoInc)))
| assertTrue("bigint_auto_inc should be AutoInc", bigserialOptions.exists(option => (option equals TestDefault.baseTableRow.O.AutoInc)))
| assertTrue("default value of char1_default_empty should be ' '", char1EmptyOptions.exists(option => (option equals TestDefault.baseTableRow.O.Default(Some(' ')))))
| assertTrue("default value of char1_default_valid should be 'a'", char1ValidOptions.exists(option => (option equals TestDefault.baseTableRow.O.Default(Some('a')))))
| assertTrue("default value of char1_default_invalid should not exist", char1InvalidOptions.forall(option => (option.isInstanceOf[RelationalProfile.ColumnOption.Default[_]])))
| }
|)
""".stripMargin
},
new Config("MySQL1", StandardTestDBs.MySQL, "MySQL", Nil) {
import tdb.profile.api._
class A(tag: Tag) extends Table[(String)](tag, "a") {
def quote = column[String]("x", O.Default("\\"\\"")) // column name with double quote
def * = quote
}
override def generator =
TableQuery[A].schema.create >>
tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_))
override def testCode =
"""
| val a1 = ARow("e")
| DBIO.seq(
| schema.create,
| A += a1,
| A.result.map { case Seq(ARow(quote)) => assertEquals("e", quote) }
| ).transactionally
""".stripMargin
},
new Config("MySQL", StandardTestDBs.MySQL, "MySQL", Seq("/dbs/mysql.sql") ){
override def generator: DBIO[SourceCodeGenerator] =
tdb.profile.createModel(ignoreInvalidDefaults=false).map(new SourceCodeGenerator(_){
override def parentType = Some("com.typesafe.slick.testkit.util.TestCodeRunner.TestCase")
override def code = {
val testcode =
"""
| val entry = DefaultNumericRow(d0 = scala.math.BigDecimal(123.45), d1 = scala.math.BigDecimal(90), d3 = 0)
| val createStmt = schema.create.statements.mkString
| assertTrue("Schema name should be `slick_test`" , TableName.baseTableRow.schemaName.getOrElse("") == "slick_test" )
| assertTrue(createStmt contains "`entry1` LONGTEXT")
| assertTrue(createStmt contains "`entry2` MEDIUMTEXT")
| assertTrue(createStmt contains "`entry3` TEXT")
| assertTrue(createStmt contains "`entry4` VARCHAR(255)")
| def assertType(r: Rep[_], t: String) = assert(r.toNode.nodeType.toString == s"$t'")
| assertType(TableName.baseTableRow.id, "Int")
| assertType(TableName.baseTableRow.si, "Int")
| assertType(TableName.baseTableRow.mi, "Int")
| assertType(TableName.baseTableRow.ui, "Long")
| assertType(TableName.baseTableRow.bi, "Long")
| //assertType(TableName.baseTableRow.ubi, "BigInt")
| val bitEntries = Seq(BitTestRow(true), BitTestRow(false, true, true))
| DBIO.seq(
| schema.create,
| TableName += TableNameRow(0, 0, 0, 0, 0/*, BigInt(0)*/),
| BitTest ++= bitEntries,
| BitTest.result.map{assertEquals(_, bitEntries)},
| TableName.result.map{ case rows: Seq[TableNameRow] => assert(rows.length == 1) },
| DefaultNumeric += entry,
| DefaultNumeric.result.head.map{ r => assertEquals(r , entry) }
| )
""".stripMargin
s"""
|lazy val tdb = $fullTdbName
|def test = {
| import org.junit.Assert._
| import scala.concurrent.ExecutionContext.Implicits.global
| $testcode
|}
""".stripMargin + super.code
}
})
},
new Config("EmptyDB", StandardTestDBs.H2Mem, "H2Mem", Nil),
new Config("Oracle1", StandardTestDBs.Oracle, "Oracle", Seq("/dbs/oracle1.sql")) {
override def useSingleLineStatements = true
override def testCode =
"""
| val entry = PersonRow(1)
| assertEquals(scala.math.BigDecimal(0), entry.age)
| DBIO.seq (
| schema.create,
| Person += entry,
| Person.result.head.map{ r => assertEquals(r , entry) }
| )
""".stripMargin
}
)
//Unified UUID config
class UUIDConfig(objectName: String, tdb: JdbcTestDB, tdbName: String, initScripts: Seq[String])
extends Config(objectName, tdb, tdbName, initScripts) {
override def generator = tdb.profile.createModel(ignoreInvalidDefaults=false).map(new MyGen(_) {
override def Table = new Table(_) {
override def Column = new Column(_){
override def defaultCode: (Any) => String = {
case v: java.util.UUID => s"""java.util.UUID.fromString("${v.toString}")"""
case v => super.defaultCode(v)
}
}
override def code = {
Seq("""
| /* default UUID, which is the same as for 'uuid.sql' */
| val defaultUUID = java.util.UUID.fromString("2f3f866c-d8e6-11e2-bb56-50e549c9b654")
| /* convert UUID */
| implicit object GetUUID extends slick.jdbc.GetResult[java.util.UUID] {
| def apply(rs: slick.jdbc.PositionedResult) = rs.nextObject().asInstanceOf[java.util.UUID]
| }
| /* convert Option[UUID] for H2 */
| implicit object GetOptionUUID extends slick.jdbc.GetResult[Option[java.util.UUID]] {
| def apply(rs: slick.jdbc.PositionedResult) = Option(rs.nextObject().asInstanceOf[java.util.UUID])
| }
""".stripMargin) ++ super.code
}
}
})
override def testCode =
"""
| import java.util.UUID
| val u1 = UUID.randomUUID()
| val u2 = UUID.randomUUID()
| val u3 = UUID.randomUUID()
| val u4 = UUID.randomUUID()
| val p1 = PersonRow(1, u1, uuidFunc = Some(u3))
| val p2 = PersonRow(2, u2, uuidFunc = Some(u4))
|
| def assertAll(all: Seq[PersonRow]) = {
| assertEquals( 2, all.size )
| assertEquals( Set(1,2), all.map(_.id).toSet )
| assertEquals( Set(u1, u2), all.map(_.uuid).toSet )
| assertEquals( Set(Some(u3), Some(u4)), all.map(_.uuidFunc).toSet )
| //it should contain sample UUID
| assert(all.forall(_.uuidDef == Some(defaultUUID)))
| }
|
| DBIO.seq(
| schema.create,
| Person += p1,
| Person += p2,
| Person.result.map(assertAll)
| ).transactionally
""".stripMargin
}
}
| marko-asplund/slick | slick-testkit/src/codegen/scala/slick/test/codegen/GenerateMainSources.scala | Scala | bsd-2-clause | 13,158 |
/*
* Copyright 2019 CJWW Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms.validation
import com.cjwwdev.regex.RegexPack
import play.api.data.Forms.text
import play.api.data.Mapping
import play.api.data.validation.{Constraint, Invalid, Valid, ValidationError}
object CommonValidation extends CommonValidation
trait CommonValidation extends RegexPack {
def hasTextBeenEntered(key: String): Mapping[String] = {
val enteredTextConstraint: Constraint[String] = Constraint(s"constraints.$key")({ formValue =>
if(formValue.isEmpty) Invalid(Seq(ValidationError(s"You have not entered the form value for key $key"))) else Valid
})
text.verifying(enteredTextConstraint)
}
def userNameValidation: Mapping[String] = {
val userNameConstraint: Constraint[String] = Constraint("constraints.userName")({ userName =>
val errors = userName match {
case userNameRegex() => Nil
case _ => Seq(ValidationError("You have not entered a valid user name"))
}
if(errors.isEmpty) Valid else Invalid(errors)
})
text.verifying(userNameConstraint)
}
def emailAddressValidation: Mapping[String] = {
val emailAddressConstraint: Constraint[String] = Constraint("constraints.email")({ email =>
val errors = email match {
case emailRegex() => Nil
case _ => Seq(ValidationError("You have not entered a valid email address"))
}
if(errors.isEmpty) Valid else Invalid(errors)
})
text.verifying(emailAddressConstraint)
}
def passwordValidation: Mapping[String] = {
val passwordConstraint: Constraint[String] = Constraint("constraints.password")({ password =>
val errors = password match {
case passwordRegex() => Nil
case "" => Seq(ValidationError("You have not entered your password"))
case _ => Seq(ValidationError("You have not entered a valid password"))
}
if(errors.isEmpty) Valid else Invalid(errors)
})
text.verifying(passwordConstraint)
}
def confirmPasswordValidation: Mapping[String] = {
val confirmPasswordConstraint: Constraint[String] = Constraint("constraints.confirmPassword")({
text =>
val errors = text match {
case passwordRegex() => Nil
case "" => Seq(ValidationError("You have not confirmed your password"))
case _ => Seq(ValidationError("You have not entered a valid password"))
}
if(errors.isEmpty) Valid else Invalid(errors)
})
text.verifying(confirmPasswordConstraint)
}
}
| cjww-development/auth-service | app/forms/validation/CommonValidation.scala | Scala | apache-2.0 | 3,146 |
import sbt._
import sbt.Keys._
import com.github.retronym.SbtOneJar._
object MigratorBuild extends Build {
lazy val loader = Project("migrator", file("."), settings = Defaults.defaultSettings ++ oneJarSettings ++ Seq(
// basic project settings
name := "Migrator",
version := "0.1-SNAPSHOT",
scalaVersion := "2.10.0",
// Resolvers
resolvers ++= Seq(
"Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases/"
),
// Dependencies
libraryDependencies ++= Seq(
// Testing Dependencies
"org.specs2" %% "specs2" % "1.13" % "test",
"org.mockito" % "mockito-all" % "1.9.5" % "test",
// Application Dependencies
"com.typesafe" % "config" % "1.0.0",
"org.reactivemongo" %% "reactivemongo" % "0.8",
"joda-time" % "joda-time" % "2.1",
"org.joda" % "joda-convert" % "1.3"
),
artifact in oneJar <<= moduleName(Artifact(_, "dist"))
))
}
| DFID/aid-platform-beta | src/migrator/project/MigratorBuild.scala | Scala | mit | 1,043 |
package com.poshwolf.ws
import com.poshwolf.core._
import java.util.HashMap
import scala.actors.Actor
private case class PostTaskRequest(task: TaskDefinition)
private case class GetTaskDefinitionRequest(id: Int)
private case class SetProgressRequest(id: Int, progress: Int)
private case class GetProgressRequest(id: Int)
private case class GetResultRequest(id: Int)
private case class GetAllProgressesRequest(ids: Array[Int])
private case class FinishTaskRequest(id: Int, result: Result)
class TaskProgressEntry(_id: Int, _progress: Int) {
private var id = _id
private var progress = _progress
def this() = this(0, 0)
def getId = id
def setId(_id: Int) = { id = _id }
def getProgress = progress
def setProgress(_progress: Int) = { progress = _progress }
}
class ControllerActor extends Actor {
override def act() {
val tasks = new HashMap[Int, TaskDefinition]
val status = new HashMap[Int, Int]
val results = new HashMap[Int, Result]
loop {
receive {
case PostTaskRequest(task) =>
val newId = status.size + 1
tasks.put(newId, task)
status.put(newId, 0)
reply(newId)
case GetTaskDefinitionRequest(id) =>
reply(tasks.get(id))
case SetProgressRequest(id, progress) =>
status.put(id, progress)
case GetProgressRequest(id) =>
reply(status.get(id))
case GetResultRequest(id) =>
reply(results.get(id))
case GetAllProgressesRequest(ids) =>
val res = for(id <- ids) yield new TaskProgressEntry(id, status.get(id))
reply(res.toArray)
case FinishTaskRequest(id, result) =>
status.put(id, 100)
results.put(id, result)
}
}
}
}
| tilius/posh-wolf-ws | src/main/scala/com/poshwolf/ws/ControllerActor.scala | Scala | gpl-2.0 | 1,769 |
package beyond
import akka.actor.Actor
import akka.actor.ActorLogging
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.LeaderSelector
import org.apache.curator.framework.recipes.leader.LeaderSelectorListenerAdapter
import play.api.libs.concurrent.Akka
object LeaderSelectorActor {
val Name: String = "leaderSelector"
val LeaderPath: String = "/leader"
sealed trait LeadershipMessage
case object LeadershipTaken extends LeadershipMessage
case object LeadershipLost extends LeadershipMessage
}
class LeaderSelectorActor(curatorFramework: CuratorFramework) extends LeaderSelectorListenerAdapter with Actor with ActorLogging {
import LeaderSelectorActor._
private val leaderSelector = new LeaderSelector(curatorFramework, LeaderPath, this)
override def preStart() {
curatorFramework.create().inBackground().forPath(LeaderPath, Array[Byte](0))
leaderSelector.autoRequeue()
leaderSelector.start()
log.info("LeaderSelectorActor started")
}
override def postStop() {
leaderSelector.close()
log.info("LeaderSelectorActor stopped")
}
// takeLeadership is called by an internal Curator thread.
override def takeLeadership(framework: CuratorFramework) {
import play.api.Play.current
log.info("Leadership is taken")
Akka.system.eventStream.publish(LeadershipTaken)
try {
while (!Thread.currentThread.isInterrupted) {
Thread.sleep(Int.MaxValue)
}
} catch {
case _: InterruptedException => Thread.currentThread().interrupt()
} finally {
Akka.system.eventStream.publish(LeadershipLost)
log.info("Leadership is lost")
}
}
override def receive: Receive = Actor.emptyBehavior
}
| SollmoStudio/beyond | core/app/beyond/LeaderSelectorActor.scala | Scala | apache-2.0 | 1,747 |
/**
* Copyright 2012-2013 greencheek.org (www.greencheek.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.greencheek.jms.yankeedo.stats
import java.util.concurrent.TimeUnit
import java.io.{PrintWriter, StringWriter, Writer}
/**
* Created by dominictootell on 22/03/2014.
*/
case class OutputStats(val timeUnit : TimeUnit,
val formatter : StatsFormatter,
val writer : Writer)
| tootedom/yankeedo | yankeedo-core/src/main/scala/org/greencheek/jms/yankeedo/stats/OutputStats.scala | Scala | apache-2.0 | 955 |
package japgolly.scalajs.react
import japgolly.scalajs.react.extra.router.RoutingRule
import japgolly.scalajs.react.extra.router.StaticDsl.RouteCommon
import japgolly.scalajs.react.internal.monocle.{MonocleExtComponent, MonocleExtStateSnapshot}
import monocle._
object ReactMonocle extends MonocleExtComponent with MonocleExtStateSnapshot {
implicit final class MonocleReactExtModStateFn[F[_], A[_], S](private val self: ModStateFn[F, A, S]) extends AnyVal {
def xmapStateL[T](l: Iso[S, T]): ModStateFn[F, A, T] =
self.xmapState(l.get)(l.reverseGet)
}
implicit final class MonocleReactExtModStateWithPropsFn[F[_], A[_], P, S](private val self: ModStateWithPropsFn[F, A, P, S]) extends AnyVal {
def xmapStateL[T](l: Iso[S, T]): ModStateWithPropsFn[F, A, P, T] =
self.xmapState(l.get)(l.reverseGet)
}
implicit final class MonocleReactExtRouteCommon[R[X] <: RouteCommon[R, X], A](private val r: RouteCommon[R, A]) extends AnyVal {
def pmapL[B](l: Prism[A, B]): R[B] =
r.pmap(l.getOption)(l.reverseGet)
def xmapL[B](l: Iso[A, B]): R[B] =
r.xmap(l.get)(l.reverseGet)
}
implicit final class MonocleReactExtRouterRule[Page, Props](private val r: RoutingRule[Page, Props]) extends AnyVal {
def xmapL[A](l: Iso[Page, A]): RoutingRule[A, Props] =
r.xmap(l.get)(l.reverseGet)
def pmapL[W](l: Prism[W, Page]): RoutingRule[W, Props] =
r.pmapF(l.reverseGet)(l.getOption)
}
}
| japgolly/scalajs-react | extraExtMonocle3/src/main/scala/japgolly/scalajs/react/ReactMonocle.scala | Scala | apache-2.0 | 1,446 |
package mesosphere.marathon.api.v2
import javax.servlet.http.HttpServletRequest
import javax.validation.ConstraintViolationException
import akka.event.EventStream
import mesosphere.marathon._
import mesosphere.marathon.api.v2.json.V2AppDefinition
import mesosphere.marathon.api.{ JsonTestHelper, TaskKiller }
import mesosphere.marathon.core.appinfo.{ EnrichedTask, TaskCounts, AppInfo, AppInfoService }
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.state._
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.upgrade.DeploymentPlan
import mesosphere.util.Mockito
import org.scalatest.{ GivenWhenThen, Matchers }
import play.api.libs.json.{ JsObject, JsValue, Json }
import collection.immutable
import scala.concurrent.Future
class AppsResourceTest extends MarathonSpec with Matchers with Mockito with GivenWhenThen {
import mesosphere.marathon.api.v2.json.Formats._
test("Create a new app successfully") {
Given("An app and group")
val req = mock[HttpServletRequest]
val app = V2AppDefinition(id = PathId("/app"), cmd = Some("cmd"))
val group = Group(PathId("/"), Set(app.toAppDefinition))
val plan = DeploymentPlan(group, group)
val body = Json.stringify(Json.toJson(app)).getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.rootGroup() returns Future.successful(group)
When("The create request is made")
val response = appsResource.create(req, body, force = false)
Then("It is successful")
response.getStatus should be(201)
val expected = AppInfo(
app.toAppDefinition,
maybeTasks = Some(immutable.Seq.empty),
maybeCounts = Some(TaskCounts.zero),
maybeDeployments = Some(immutable.Seq(Identifiable(plan.id)))
)
JsonTestHelper.assertThatJsonString(response.getEntity.asInstanceOf[String]).correspondsToJsonOf(expected)
}
test("Create a new app fails with Validation errors") {
Given("An app with validation errors")
val req = mock[HttpServletRequest]
val app = V2AppDefinition(id = PathId("/app"))
val group = Group(PathId("/"), Set(app.toAppDefinition))
val plan = DeploymentPlan(group, group)
val body = Json.stringify(Json.toJson(app)).getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
Then("A constraint violation exception is thrown")
intercept[ConstraintViolationException] { appsResource.create(req, body, false) }
}
test("Replace an existing application") {
Given("An app and group")
val req = mock[HttpServletRequest]
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body = """{ "cmd": "bla" }""".getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
When("The application is updates")
val response = appsResource.replace(req, app.id.toString, false, body)
Then("The application is updated")
response.getStatus should be(200)
}
test("Restart an existing app") {
val app = AppDefinition(id = PathId("/app"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
service.deploy(any, any) returns Future.successful(())
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
val response = appsResource.restart(app.id.toString, force = true)
response.getStatus should be(200)
}
test("Restart a non existing app will fail") {
val missing = PathId("/app")
groupManager.updateApp(any, any, any, any, any) returns Future.failed(new UnknownAppException(missing))
intercept[UnknownAppException] { appsResource.restart(missing.toString, force = true) }
}
test("Search apps can be filtered") {
val app1 = AppDefinition(id = PathId("/app/service-a"), cmd = Some("party hard"), labels = Map("a" -> "1", "b" -> "2"))
val app2 = AppDefinition(id = PathId("/app/service-b"), cmd = Some("work hard"), labels = Map("a" -> "1", "b" -> "3"))
val apps = Set(app1, app2)
def search(cmd: Option[String], id: Option[String], label: Option[String]): Set[AppDefinition] = {
val selector = appsResource.search(cmd, id, label)
apps.filter(selector.matches(_))
}
search(cmd = None, id = None, label = None) should be(Set(app1, app2))
search(cmd = Some(""), id = None, label = None) should be(Set(app1, app2))
search(cmd = Some("party"), id = None, label = None) should be(Set(app1))
search(cmd = Some("work"), id = None, label = None) should be(Set(app2))
search(cmd = Some("hard"), id = None, label = None) should be(Set(app1, app2))
search(cmd = Some("none"), id = None, label = None) should be(Set.empty)
search(cmd = None, id = Some("app"), label = None) should be(Set(app1, app2))
search(cmd = None, id = Some("service-a"), label = None) should be(Set(app1))
search(cmd = Some("party"), id = Some("app"), label = None) should be(Set(app1))
search(cmd = Some("work"), id = Some("app"), label = None) should be(Set(app2))
search(cmd = Some("hard"), id = Some("service-a"), label = None) should be(Set(app1))
search(cmd = Some(""), id = Some(""), label = None) should be(Set(app1, app2))
search(cmd = None, id = None, label = Some("b==2")) should be(Set(app1))
search(cmd = Some("party"), id = Some("app"), label = Some("a==1")) should be(Set(app1))
search(cmd = Some("work"), id = Some("app"), label = Some("a==1")) should be(Set(app2))
search(cmd = Some("hard"), id = Some("service-a"), label = Some("a==1")) should be(Set(app1))
search(cmd = Some(""), id = Some(""), label = Some("")) should be(Set(app1, app2))
}
var eventBus: EventStream = _
var service: MarathonSchedulerService = _
var taskTracker: TaskTracker = _
var taskKiller: TaskKiller = _
var healthCheckManager: HealthCheckManager = _
var taskFailureRepo: TaskFailureRepository = _
var config: MarathonConf = _
var groupManager: GroupManager = _
var appInfoService: AppInfoService = _
var appsResource: AppsResource = _
before {
eventBus = mock[EventStream]
service = mock[MarathonSchedulerService]
taskTracker = mock[TaskTracker]
taskKiller = mock[TaskKiller]
healthCheckManager = mock[HealthCheckManager]
taskFailureRepo = mock[TaskFailureRepository]
config = mock[MarathonConf]
groupManager = mock[GroupManager]
appsResource = new AppsResource(
eventBus,
mock[AppTasksResource],
service,
appInfoService,
config,
groupManager
)
}
}
| cgvarela/marathon | src/test/scala/mesosphere/marathon/api/v2/AppsResourceTest.scala | Scala | apache-2.0 | 6,691 |
/**
* Copyright (C) 2014 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package debiki.dao
import com.debiki.core._
import com.debiki.core.Prelude._
import debiki._
import debiki.DebikiHttp._
import java.{util => ju}
import requests.PageRequest
import CachingDao.CacheKey
/** Loads and saves pages and page parts (e.g. posts and patches).
*/
trait PageDao {
self: SiteDao =>
def nextPageId(): PageId = siteDbDao.nextPageId()
def createPage(pageNoId: Page): Page = {
siteDbDao.createPage(pageNoId)
// COULD generate and save notfs [notifications]
}
def savePageActionGenNotfs[A](pageReq: PageRequest[_], action: RawPostAction[A]) = {
val (pageAfter, actionsWithId) = savePageActionsGenNotfs(pageReq, Seq(action))
(pageAfter, actionsWithId.head.asInstanceOf[RawPostAction[A]])
}
/** Saves page actions and places messages in users' inboxes, as needed.
* Returns a pair with 1) the page including new actions plus the current user,
* and 2) the actions, but with ids assigned.
*/
def savePageActionsGenNotfs(pageReq: PageRequest[_], actions: Seq[RawPostAction[_]])
: (PageNoPath, Seq[RawPostAction[_]]) = {
val pagePartsNoAuthor = pageReq.thePageNoPath.parts
// We're probably going to render parts of the page later, and then we
// need the user, so add it to the page — it's otherwise absent if this is
// the user's first contribution to the page.
val pageParts = pagePartsNoAuthor ++ pageReq.anyMeAsPeople
val page = PageNoPath(pageParts, pageReq.ancestorIdsParentFirst_!, pageReq.thePageMeta)
savePageActionsGenNotfsImpl(page, actions)
}
def savePageActionsGenNotfs(pageId: PageId, actions: Seq[RawPostAction[_]], authors: People)
: (PageNoPath, Seq[RawPostAction[_]]) = {
val pageMeta = siteDbDao.loadPageMeta(pageId) getOrElse
throwNotFound("DwE115Xf3", s"Page `${pageId}' does not exist")
// BUG race condition: What if page deleted, here? Then we'd falsely return an empty page.
var pageNoAuthor = loadPageParts(pageId) getOrElse PageParts(pageId)
val page = pageNoAuthor ++ authors
val ancestorPageIds = loadAncestorIdsParentFirst(pageId)
savePageActionsGenNotfsImpl(PageNoPath(page, ancestorPageIds, pageMeta), actions)
}
def savePageActionsGenNotfsImpl(page: PageNoPath, actions: Seq[RawPostAction[_]])
: (PageNoPath, Seq[RawPostAction[_]]) = {
if (actions isEmpty)
return (page, Nil)
// COULD check that e.g. a deleted post is really a post, an applied edit is
// really an edit, an action undone is not itself an Undo action,
// and lots of other similar tests.
val (pageWithNewActions, actionsWithId) =
siteDbDao.savePageActions(page, actions.toList)
val notfs = NotificationGenerator(page, this).generateNotifications(actionsWithId)
siteDbDao.saveDeleteNotifications(notfs)
(pageWithNewActions, actionsWithId)
}
def deleteVoteAndNotf(userIdData: UserIdData, pageId: PageId, postId: PostId,
voteType: PostActionPayload.Vote) {
siteDbDao.deleteVote(userIdData, pageId, postId, voteType)
// Delete vote notf too once they're being generated, see [953kGF21X].
}
def updatePostsReadStats(pageId: PageId, postIdsRead: Set[PostId],
actionMakingThemRead: RawPostAction[_]) {
siteDbDao.updatePostsReadStats(pageId, postIdsRead, actionMakingThemRead)
}
def loadPostsReadStats(pageId: PageId): PostsReadStats =
siteDbDao.loadPostsReadStats(pageId)
def loadPageParts(debateId: PageId): Option[PageParts] =
siteDbDao.loadPageParts(debateId)
def loadPageAnyTenant(sitePageId: SitePageId): Option[PageParts] =
loadPageAnyTenant(tenantId = sitePageId.siteId, pageId = sitePageId.pageId)
def loadPageAnyTenant(tenantId: SiteId, pageId: PageId): Option[PageParts] =
siteDbDao.loadPageParts(pageId, tenantId = Some(tenantId))
}
trait CachingPageDao extends PageDao {
self: CachingSiteDao =>
onPageSaved { sitePageId =>
uncachePageParts(sitePageId)
}
override def createPage(page: Page): Page = {
val pageWithIds = super.createPage(page)
firePageCreated(pageWithIds)
pageWithIds
}
override def savePageActionsGenNotfsImpl(page: PageNoPath, actions: Seq[RawPostAction[_]])
: (PageNoPath, Seq[RawPostAction[_]]) = {
if (actions isEmpty)
return (page, Nil)
val newPageAndActionsWithId =
super.savePageActionsGenNotfsImpl(page, actions)
refreshPageInCache(page.id)
newPageAndActionsWithId
}
private def refreshPageInCache(pageId: PageId) {
// Possible optimization: Examine all actions, and refresh cache e.g.
// the RenderedPageHtmlDao cache only
// if there are e.g. EditApp:s or approved Post:s (but ignore Edit:s --
// unless applied & approved). Include that info in the call to `firePageSaved` below.
firePageSaved(SitePageId(siteId = siteId, pageId = pageId))
// if (is _site.conf || is any stylesheet or script)
// then clear all asset bundle related caches. For ... all websites, for now??
// Would it be okay to simply overwrite the in mem cache with this
// updated page? — Only if I make `++` avoid adding stuff that's already
// present!
//val pageWithNewActions =
// page_! ++ actionsWithId ++ pageReq.login_! ++ pageReq.user_!
// In the future, also refresh page index cache, and cached page titles?
// (I.e. a cache for DW1_PAGE_PATHS.)
// ------ Page action cache (I'll probably remove it)
// COULD instead update value in cache (adding the new actions to
// the cached page). But then `savePageActionsGenNotfs` also needs to know
// which users created the actions, so their login/idty/user instances
// can be cached as well (or it won't be possible to render the page,
// later, when it's retrieved from the cache).
// So: COULD save login, idty and user to databaze *lazily*.
// Also, logins that doesn't actually do anything won't be saved
// to db, which is goood since they waste space.
// (They're useful for statistics, but that should probably be
// completely separated from the "main" db?)
/* Updating the cache would be something like: (with ~= Google Guava cache)
val key = Key(tenantId, debateId)
var replaced = false
while (!replaced) {
val oldPage =
_cache.tenantDaoDynVar.withValue(this) {
_cache.cache.get(key)
}
val newPage = oldPage ++ actions ++ people-who-did-the-actions
// newPage might == oldPage, if another thread just refreshed
// the page from the database.
replaced = _cache.cache.replace(key, oldPage, newPage)
*/
// ------ /Page action cache
}
override def deleteVoteAndNotf(userIdData: UserIdData, pageId: PageId, postId: PostId,
voteType: PostActionPayload.Vote) {
super.deleteVoteAndNotf(userIdData, pageId, postId, voteType)
refreshPageInCache(pageId)
}
override def loadPageParts(pageId: PageId): Option[PageParts] =
lookupInCache[PageParts](pagePartsKey(siteId, pageId),
orCacheAndReturn = {
super.loadPageParts(pageId)
})
private def uncachePageParts(sitePageId: SitePageId) {
removeFromCache(pagePartsKey(sitePageId.siteId, sitePageId.pageId))
}
def pagePartsKey(siteId: SiteId, pageId: PageId) = CacheKey(siteId, s"$pageId|PageParts")
}
| debiki/debiki-server-old | app/debiki/dao/PageDao.scala | Scala | agpl-3.0 | 8,083 |
package com.hadooparchitecturebook.spark.dedup
import java.io.{OutputStreamWriter, BufferedWriter}
import java.util.Random
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
/**
* Created by ted.malaska on 12/6/14.
*/
object GenDedupInput {
def main(args:Array[String]): Unit = {
if (args.length == 0) {
println("{outputPath} {numberOfRecords} {numberOfUniqueRecords}")
return
}
//The output file that will hold the data
val outputPath = new Path(args(0))
//Number of records to be written to the file
val numberOfRecords = args(1).toInt
//Number of unique primary keys
val numberOfUniqueRecords = args(2).toInt
//Open fileSystem to HDFS
val fileSystem = FileSystem.get(new Configuration())
//Create buffered writer
val writer = new BufferedWriter(
new OutputStreamWriter(fileSystem.create(outputPath)))
val r = new Random()
//This loop will write out all the record
// Every primary key will get about
// numberOfRecords/numberOfUniqueRecords records
for (i <- 0 until numberOfRecords) {
val uniqueId = r.nextInt(numberOfUniqueRecords)
//Format: {key}, {timeStamp}, {value}
writer.write(uniqueId + "," + i + "," + r.nextInt(10000))
writer.newLine()
}
writer.close()
}
}
| nvoron23/hadoop-arch-book | ch05-processing-patterns/dedup/spark/src/main/java/com/hadooparchitecturebook/spark/dedup/GenDedupInput.scala | Scala | apache-2.0 | 1,346 |
package org.jetbrains.plugins.scala.debugger.evaluateExpression
import org.jetbrains.plugins.scala.compiler.CompileServerLauncher
import org.jetbrains.plugins.scala.debugger.{ScalaDebuggerTestCase, ScalaVersion_2_11, ScalaVersion_2_12}
/**
* @author Nikolay.Tropin
*/
class CompilingEvaluatorTest extends CompilingEvaluatorTestBase with ScalaVersion_2_11
class CompilingEvaluatorTest_212 extends CompilingEvaluatorTestBase with ScalaVersion_2_12
abstract class CompilingEvaluatorTestBase extends ScalaDebuggerTestCase {
override def setUp(): Unit = {
super.setUp()
CompileServerLauncher.ensureServerRunning(getProject)
}
addFileWithBreakpoints("SimplePlace.scala",
s"""
|object SimplePlace {
| val f = "f"
|
| def foo(i: Int): Unit = {
| val x = 1
| ""$bp
| }
|
| def main(args: Array[String]) {
| foo(3)
| }
|}
""".stripMargin.trim)
def testSimplePlace(): Unit = {
evaluateCodeFragments(
"Seq(i, x).map(z => z * z).mkString(\\", \\")" -> "9, 1",
"""val result = for (z <- Seq(3, 4)) yield z * z
|result.mkString
""" -> "916",
"""def sqr(x: Int) = x * x
|val a = sqr(12)
|val b = sqr(1)
|a + b
""" -> "145",
"""Option(Seq(x)) match {
| case None => 1
| case Some(Seq(2)) => 2
| case Some(Seq(_)) => 0
|}
""" -> "0",
"""case class AAA(s: String, i: Int)
|AAA("a", 1).toString
""" -> "AAA(a,1)"
)
}
addFileWithBreakpoints("InForStmt.scala",
s"""
|object InForStmt {
| def main(args: Array[String]) {
| for {
| x <- Seq(1, 2)
| if x == 2
| } {
| println()$bp
| }
| }
|}
""".stripMargin.trim
)
def testInForStmt(): Unit = {
evaluateCodeFragments (
"Seq(x, 2).map(z => z * z).mkString(\\", \\")" -> "4, 4",
"""def sqr(x: Int) = x * x
|val a = sqr(12)
|val b = sqr(1)
|a + b
""" -> "145"
)
}
addFileWithBreakpoints("InConstructor.scala",
s"""
|object InConstructor {
| def main(args: Array[String]) {
| new Sample().foo()
| }
|
| class Sample {
| val a = "a"
| val b = "b"$bp
|
| def foo() = "foo"
| }
|}
""".stripMargin.trim
)
def testInConstructor(): Unit = {
evaluateCodeFragments (
"None.getOrElse(a)" -> "a",
"foo().map(_.toUpper)" -> "FOO"
)
}
addFileWithBreakpoints("AddBraces.scala",
s"""
|object AddBraces {
| def main(args: Array[String]) {
| foo()
| }
|
| def foo(): String = "foo"$bp
|}
""".stripMargin.trim)
def testAddBraces(): Unit = {
evaluateCodeFragments(
"None.getOrElse(foo())" -> "foo",
"""def bar = "bar"
|foo() + bar
""" -> "foobar"
)
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/debugger/evaluateExpression/CompilingEvaluatorTestBase.scala | Scala | apache-2.0 | 2,974 |
package org.decaf.distributed
import akka.actor.ActorSystem
package object website {
lazy val WebsiteActorSystem = ActorSystem("website")
}
| adamdecaf/distributed | website/src/main/scala/package.scala | Scala | apache-2.0 | 143 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait ServerAuthenticationFormats { self: sjsonnew.BasicJsonProtocol =>
implicit lazy val ServerAuthenticationFormat: JsonFormat[sbt.ServerAuthentication] = new JsonFormat[sbt.ServerAuthentication] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.ServerAuthentication = {
__jsOpt match {
case Some(__js) =>
unbuilder.readString(__js) match {
case "Token" => sbt.ServerAuthentication.Token
}
case None =>
deserializationError("Expected JsString but found None")
}
}
override def write[J](obj: sbt.ServerAuthentication, builder: Builder[J]): Unit = {
val str = obj match {
case sbt.ServerAuthentication.Token => "Token"
}
builder.writeString(str)
}
}
}
| sbt/sbt | main-command/src/main/contraband-scala/ServerAuthenticationFormats.scala | Scala | apache-2.0 | 950 |
package org.akka.essentials.unittest.example
import akka.actor.Actor
case class Tick(message: String)
case class Tock(message: String)
class TickTock extends Actor {
var state = false
def receive: Receive = {
case message: Tick => tick(message)
case message: Tock => tock(message)
case _ => throw new IllegalArgumentException("boom!")
}
def tock(message: Tock) = {
// do some processing here
if (state == false)
state = true
else
state = false
}
def tick(message: Tick) = {
// do some processing here
sender.tell("processed the tick message")
}
} | rokumar7/trial | AkkaUnitTest/src/main/scala/org/akka/essentials/unittest/example/TickTock.scala | Scala | unlicense | 610 |
package mesosphere.mesos.scale
import java.net.URL
import mesosphere.mesos.scale.MetricsFormat._
import scala.collection.immutable.Iterable
/**
* Compare Metrics Samples and find all metrics, that get deteriorated by given factor.
*/
object FindDeterioratedMetrics {
/**
* Compare 2 Metrics Samples and filter all metrics, that get deteriorated by given factor.
* @param baseLine the base line to compare the values
* @param sample the metric samples to compare
* @param factor the deterioration factor. 1 means not worse than the base.
* @return all deteriorated metrics (before, after)
*/
def filterDeteriorated(baseLine: MetricsSample, sample: MetricsSample, factor: Double): Map[Metric, Metric] = {
for {
(name, metricsBase) <- baseLine.all
metricsSample <- sample.all.get(name).toList
metricBase <- metricsBase if metricBase.mean > 0
metricSample <- metricsSample.find(_.name == metricBase.name)
if (metricSample.mean / metricBase.mean) > factor
} yield metricBase -> metricSample
}
def filterDeteriorated(before: URL, after: URL, deterioration: Double): Map[Metric, Metric] = {
//only compare last
filterDeteriorated(readMetrics(before).last, readMetrics(after).last, deterioration)
}
/**
* FindDeterioratedMetrics <file_base> <file_sample> <deterioration_factor>
* url_base: the file with the base metrics
* url_sample: the file with the actual sampled metrics
* deterioration_factor: number [0..] where 1 means the same as the base line
*
* @param args three args expected: baseFile sampleFile factor
*/
def main(args: Array[String]): Unit = {
def printSlope(metrics: Map[Metric, Metric]): Unit = {
import DisplayHelpers._
val header = Vector("Metric", "Base", "Sample", "Increase in %")
val rows: Iterable[Vector[String]] = metrics.map {
case (a, b) => Vector(a.name, a.mean, b.mean, (b.mean / a.mean * 100).toInt - 100).map(_.toString)
}
printTable(Seq(left, right, right, right), withUnderline(header) ++ rows.toSeq)
}
if (args.length == 3) {
println("\n\nMetrics that got worse (deterioration factor == 1):")
printSlope(filterDeteriorated(new URL(args(0)), new URL(args(1)), 1))
println(s"\n\nMetrics that got deteriorated (deterioration factor == ${args(2)}):")
val deteriorated = filterDeteriorated(new URL(args(0)), new URL(args(1)), args(2).toDouble)
if (deteriorated.nonEmpty) {
printSlope(deteriorated)
throw new IllegalStateException(s"Sample is deteriorated according to deterioration factor")
}
} else {
println(
"""Usage:
| FindDeterioratedMetrics <file_base> <file_sample> <deterioration_factor>"
| file_base: the file with the base metrics
| file_sample: the file with the actual sampled metrics
| deterioration_factor: number [0..] where 1 means the same as the base line
""".stripMargin)
sys.exit(1)
}
}
}
| timcharper/marathon | mesos-simulation/src/test/scala/mesosphere/mesos/scale/FindDeterioratedMetrics.scala | Scala | apache-2.0 | 3,039 |
package spire.benchmark
import scala.reflect.ClassTag
import scala.annotation.tailrec
import scala.{specialized => spec}
import scala.util.Random
import Random._
import spire.implicits._
import com.google.caliper.Runner
import com.google.caliper.SimpleBenchmark
import com.google.caliper.Param
import java.lang.Math
import java.math.BigInteger
import java.lang.Long.numberOfTrailingZeros
object GcdBenchmarks extends MyRunner(classOf[GcdBenchmarks])
class GcdBenchmarks extends MyBenchmark {
var longs: Array[Long] = null
var bigs: Array[BigInteger] = null
override def setUp(): Unit = {
longs = init(200000)(nextLong)
bigs = init(200000)(new BigInteger(nextLong.toString))
}
def timeXorEuclidGcdLong(reps:Int) = run(reps)(xorEuclidGcdLong(longs))
def timeXorBinaryGcdLong(reps:Int) = run(reps)(xorBinaryGcdLong(longs))
//def timeXorBuiltinGcdBigInteger(reps:Int) = run(reps)(xorBuiltinGcdBigInteger(bigs))
def xorEuclidGcdLong(data:Array[Long]):Long = {
var t = 0L
var i = 0
val len = data.length - 1
while (i < len) {
t ^= euclidGcdLong(data(i), data(i + 1))
i += 1
}
t
}
def xorBinaryGcdLong(data:Array[Long]):Long = {
var t = 0L
var i = 0
val len = data.length - 1
while (i < len) {
t ^= binaryGcdLong(data(i), data(i + 1))
i += 1
}
t
}
def xorBuiltinGcdBigInteger(data:Array[BigInteger]):BigInteger = {
var t = BigInteger.ZERO
var i = 0
val len = data.length - 1
while (i < len) {
t = t.xor(data(i).gcd(data(i + 1)))
i += 1
}
t
}
@tailrec
final def euclidGcdLong(x: Long, y: Long): Long = {
if (y == 0L) Math.abs(x) else euclidGcdLong(y, x % y)
}
def binaryGcdLong(_x: Long, _y: Long): Long = {
if (_x == 0L) return _y
if (_y == 0L) return _x
var x = Math.abs(_x)
var xz = numberOfTrailingZeros(x)
x >>= xz
var y = Math.abs(_y)
var yz = numberOfTrailingZeros(y)
y >>= yz
while (x != y) {
if (x > y) {
x -= y
x >>= numberOfTrailingZeros(x)
} else {
y -= x
y >>= numberOfTrailingZeros(y)
}
}
if (xz < yz) x << xz else x << yz
}
}
| guersam/spire | benchmark/src/main/scala/spire/benchmark/GcdBenchmarks.scala | Scala | mit | 2,203 |
/*******************************************************************************
* Copyright (c) 2014 eBay Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package org.ebaysf.ostara.upgrade
import grizzled.slf4j.Logging
import org.apache.commons.io.FilenameUtils
import java.io.File
import org.apache.maven.model.Dependency
class BaseReport extends Logging {
var automated:Boolean = _;
var manualChanges = Map[String, String]()
var warnings = Map[String, String]()
def addManualChange(description:String, action:String) {
warn(s"Description: $description '\n'Action: $action")
manualChanges += (description -> action)
}
}
object PomReport {
val NOT_MISSING = 1
val MISSING_PROVIDER = 2
val MISSING_THIRDPARTY = 3
}
class PomReport extends BaseReport {
var missingArtifacts = Map[NiceDependency, (Int, String, List[String] /*repos*/)]()
var mergedArtifacts = Map[List[NiceDependency], NiceDependency]()
var unmergedArtifacts = Map[NiceDependency, List[NiceDependency]]()
var mappedArtifacts = Map[NiceDependency, NiceDependency]()
var addedArtifacts = List[NiceDependency]()
var removedArtifacts = List[NiceDependency]()
var mappedPlugins = Map[NiceDependency, NiceDependency]()
var removedPlugins = List[NiceDependency]()
def addMissingArtifact(dep:Dependency, depType:Int, description:String, repo:List[String]) {
addMissingArtifact(new NiceDependency(dep), depType, description, repo)
}
def addMissingArtifact(dep:NiceDependency, depType:Int = PomReport.NOT_MISSING, description:String, repo:List[String]=null) {
warn(s"Dependency analysis of $dep: \n$description")
missingArtifacts += (dep -> (depType, description, repo))
}
}
class JavaFileReport extends BaseReport {
var changes = 0
}
class WebXmlReport extends BaseReport {
var changes = 0
}
/**
* File specific changes
*/
class FileReport extends BaseReport {
var message:String = _;
def this(msg:String, atmtd:Boolean) {
this()
this.message = msg
automated = atmtd
}
}
/**
* @author renyedi
*/
class UpgradeReportBuilder extends Logging {
var projectArtifacts = Set[(File, Dependency)]()
var pathToParentPom:File = _
var changes = Map[String/*relative path*/, BaseReport]()
var manualChecksAndChanges = List[String]()
def appMetadataFileName:String = ""
def platformName:String = "Ostara dummy platform"
def disclaimer(teamDL:String):String = "This is totally safe to use, probably"
def additionalManualChecksAndChanges:String = "N/A"
/**
* Syntax reference https://help.github.com/articles/github-flavored-markdown
*/
def buildGitHubMarkdownReport(teamDL:String, taskId:String, relativePath:String):String = {
// Construct the summary data
var manualChanges = 0
var pomFileChanges = 0
var webXmlFileChanges = 0
var totalWebXmlChanges = 0
var appMetadataFileChanges = 0
var javaFileChanges = 0
for(change <- changes) {
manualChanges += change._2.manualChanges.size
if(change._1.endsWith("web.xml")) {webXmlFileChanges+=1; totalWebXmlChanges += change._2.asInstanceOf[WebXmlReport].changes}
else if(change._1.endsWith(appMetadataFileName)) appMetadataFileChanges+=1
else if(change._1.endsWith(".java")) javaFileChanges+=1
else pomFileChanges+=1
}
var report = s"""
# Project upgrade report to ${platformName}
## Project details
Name | Description
---- | -----------
Path to project POM | ${relativePath}
Upgrade job ID | ${taskId}
Full upgrade log | [link](platform-upgrade-debug-${taskId}.log)
Upgrade warnings only log | [link](platform-upgrade-warn-${taskId}.log)
### Artifacts
This upgrade request processed only the ${projectArtifacts.size} Maven project artifacts that were referenced directly or indirectly by the project's parent POM:
| No. | POM file | Artifact ID |
| ---:| -------- | ----------- |
"""
var pomCount = 0
for(crtModule <- projectArtifacts) {
pomCount += 1
val crtFile = pathToParentPom.toURI().relativize(crtModule._1.toURI())
report += s"| $pomCount | [$crtFile]($crtFile) | ${crtModule._2.getArtifactId()} |\n"
}
report += s"""
## Disclaimer
${disclaimer(teamDL)}
## Summary
Operation | Details
---- | -----------
[Manual changes](#manualChanges) | ${manualChanges} required by user
[Additional manual checks and changes](#manualChecksAndChanges) | Potentially several, depending on the project
[Automated changes](#automatedChanges) were applied to | ${pomFileChanges} POM file(s), ${totalWebXmlChanges} changes in ${webXmlFileChanges} web.xml file(s), ${appMetadataFileChanges} ${appMetadataFileName} file(s), ${javaFileChanges} Java file(s)
<a name="manualChanges"/>
## Pending manual operations
"""
if(hasNoManualChanges && hasNoMissingDependencies) {
report += "No manual operations seem to be required. Please note that this is no guarantee that your project will build and run without errors.\n"
} else {
if(!hasNoManualChanges) {
report +=
s"""
### Code changes
The following code changes could not be performed by the ${platformName} upgrade tool. You will need to do them manually by following the recommended approach.
"""
for(change <- changes) {
if(!change._2.manualChanges.isEmpty) {
val crtFile = FilenameUtils.separatorsToUnix(change._1)
report +=
s"""
### File [$crtFile]($crtFile)
Problem | Action
------- | ------
"""
for(manualChange <- change._2.manualChanges) {
report += manualChange._1 + " | " + manualChange._2 + "\n"
}
}
}
}
if(!hasNoMissingDependencies) {
report +=
s"""
### Missing dependencies
<p>NOTE: This is an experimental feature and might not be fully accurate
<p>The following dependencies could not be found in the ${platformName} Maven repository (ebaycentral) and have been adjusted or need additional changes.
"""
for(change <- changes) {
if(change._2.isInstanceOf[PomReport] && !change._2.asInstanceOf[PomReport].missingArtifacts.isEmpty) {
val crtFile = FilenameUtils.separatorsToUnix(change._1)
report +=
s"""
### File [$crtFile]($crtFile)
Artifact | Description
-------- | -----------
"""
for(missingArtifact <- change._2.asInstanceOf[PomReport].missingArtifacts) {
report += missingArtifact._1 + " | " + missingArtifact._2._2 + "\n"
}
}
}
}
}
report +=
s"""
<a name="manualChecksAndChanges"/>
## Additional manual checks and changes
"""
for(manualCheckAndChange <- manualChecksAndChanges) {
report += manualCheckAndChange
}
for(change <- changes;
if !change._2.automated;
if change._2.isInstanceOf[FileReport]) {
val crtFile = FilenameUtils.separatorsToUnix(change._1)
report +=
s"""
### File [$crtFile]($crtFile)
"""
val fileReport: FileReport = change._2.asInstanceOf[FileReport]
report += fileReport.message
}
report +=
s"""
<a name="automatedChanges"/>
## Automated upgrade operations
This section lists all the changes that were automatically applied to the original project.
Please consult the changeset for the definitive list of changes made by the upgrade service as some of those might not be described below.
"""
for(change <- changes;
if change._2.automated) {
val crtFile = FilenameUtils.separatorsToUnix(change._1)
report +=
s"""
### File [$crtFile]($crtFile)
"""
if(change._2.isInstanceOf[PomReport]) {
val pomChange = change._2.asInstanceOf[PomReport]
if(!pomChange.mergedArtifacts.isEmpty) {
report +=
s"""
#### Merged artifacts
The following groups of artifacts were consolidated into a single one.
Artifact group | Consolidated artifact
-------------- | ---------------------
"""
for(crtGroup <- pomChange.mergedArtifacts) {
report += crtGroup._1.mkString("\n") + " | " + crtGroup._2.toString + "\n"
}
}
if(!pomChange.unmergedArtifacts.isEmpty) {
report +=
s"""
#### Unmerged artifacts
The following groups of artifacts were expanded into individual ones.
Artifact | Expanded artifacts
-------- | ------------------
"""
for(crtGroup <- pomChange.unmergedArtifacts) {
report += "| " + crtGroup._1.toString + " | " + crtGroup._2.mkString(", ") + " |\n"
}
}
if(!pomChange.mappedArtifacts.isEmpty) {
report +=
s"""
#### Mapped artifacts
The following groups of artifacts were mapped to new ones.
Original artifact | Mapped artifact
----------------- | ---------------
"""
for(crtGroup <- pomChange.mappedArtifacts) {
report += crtGroup._1.toString + " | " + crtGroup._2 + "\n"
}
}
if(!pomChange.addedArtifacts.isEmpty) {
report +=
s"""
#### Added artifacts
The following artifacts were added to the POM:
"""
for(crtArtifact <- pomChange.addedArtifacts) {
report += "1. " + crtArtifact.toString + "\n"
}
}
if(!pomChange.removedArtifacts.isEmpty) {
report +=
s"""
#### Removed artifacts
The following artifacts were removed from the POM:
"""
for(crtArtifact <- pomChange.removedArtifacts) {
report += "1. " + crtArtifact.toString + "\n"
}
}
if(!pomChange.mappedPlugins.isEmpty) {
report +=
s"""
#### Mapped plugins
The following plugins were mapped to new ones.
Original plugin | Mapped plugin
--------------- | -------------
"""
for(crtGroup <- pomChange.mappedPlugins) {
report += crtGroup._1.toString + " | " + crtGroup._2 + "\n"
}
}
if(!pomChange.removedPlugins.isEmpty) {
report +=
s"""
#### Removed plugins
The following plugins were removed from the POM:
"""
for(crtPlugin <- pomChange.removedPlugins) {
report += "1. " + crtPlugin.toString + "\n"
}
}
} else
if(change._2.isInstanceOf[WebXmlReport]) {
report += "Various web.xml changes.\n"
} else
if(change._2.isInstanceOf[JavaFileReport]) {
report += "Java code changes.\n"
} else
if(change._2.isInstanceOf[FileReport]) {
val fileReport: FileReport = change._2.asInstanceOf[FileReport]
report += fileReport.message
} else {
val additionalReport = handleAdditionalReportTypes(change._2)
if(additionalReport != null) {
report += additionalReport;
} else {
warn("Unknown report type. Ignoring")
report += "N/A \n"
}
}
}
return report
}
def handleAdditionalReportTypes(report: BaseReport):String = null
def hasNoManualChanges : Boolean = {
for(change <- changes) {
if(!change._2.manualChanges.isEmpty) {
return false
}
}
true
}
def hasNoMissingDependencies : Boolean = {
for(change <- changes) {
if(change._2.isInstanceOf[PomReport] && !change._2.asInstanceOf[PomReport].missingArtifacts.isEmpty) {
return false
}
}
true
}
}
| eBay/ostara | ostara-upgrade/src/main/scala/org/ebaysf/ostara/upgrade/UpgradeReportBuilder.scala | Scala | apache-2.0 | 11,870 |
package im.mange.shoreditch.engine
import im.mange.little.json.{LittleJodaSerialisers, LittleSerialisers}
import im.mange.shoreditch.api._
import im.mange.shoreditch._
import im.mange.shoreditch.engine.model.TestRunReport
import org.json4s.NoTypeHints
import org.json4s.native.{JsonParser, Serialization}
import org.json4s._
import org.json4s.native.Serialization._
import org.json4s.native.JsonMethods._
object Json {
private val shoreditchFormats = Serialization.formats(NoTypeHints) ++ LittleSerialisers.all ++ LittleJodaSerialisers.all
def deserialiseActionResponse(json: String) = {
implicit val formats = shoreditchFormats
parse(json).extract[ActionResponse]
}
def deserialiseCheckResponse(json: String) = {
implicit val formats = shoreditchFormats
parse(json).extract[CheckResponse]
}
def deserialiseMetaDataResponse(json: String) = {
implicit val formats = shoreditchFormats
parse(json).extract[MetaDataResponse]
}
def serialise(r: List[In]) = {
implicit val formats = shoreditchFormats
pretty(render(JsonParser.parse(write(r))))
}
def serialise(r: TestRunReport) = {
implicit val formats = shoreditchFormats
parse(write(r))
}
}
| alltonp/shoreditch-engine | src/main/scala/im/mange/shoreditch/engine/Json.scala | Scala | apache-2.0 | 1,206 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
class RunnerSuite() extends Suite with PrivateMethodTester {
def testParseArgsIntoLists() {
// this is how i solved the problem of wanting to reuse these val names, runpathList, reportersList, etc.
// by putting them in a little verify method, it gets reused each time i call that method
def verify(
args: Array[String],
expectedRunpathList: List[String],
expectedReporterList: List[String],
expectedSuitesList: List[String],
expectedJunitsList: List[String],
expectedPropsList: List[String],
expectedIncludesList: List[String],
expectedExcludesList: List[String],
expectedConcurrentList: List[String],
expectedMemberOfList: List[String],
expectedBeginsWithList: List[String],
expectedTestNGList: List[String]
) = {
val (
runpathList,
reportersList,
suitesList,
junitsList,
propsList,
includesList,
excludesList,
concurrentList,
memberOfList,
beginsWithList,
testNGList
) = Runner.parseArgs(args)
assert(runpathList === expectedRunpathList)
assert(reportersList === expectedReporterList)
assert(suitesList === expectedSuitesList)
assert(junitsList === expectedJunitsList)
assert(propsList === expectedPropsList)
assert(includesList === expectedIncludesList)
assert(excludesList === expectedExcludesList)
assert(concurrentList === expectedConcurrentList)
assert(memberOfList === expectedMemberOfList)
assert(beginsWithList === expectedBeginsWithList)
assert(testNGList === expectedTestNGList)
}
verify(
Array("-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out", "-p"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-p"),
List("-g", "-g", "-f", "file.out"),
Nil,
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
Nil,
Nil,
Nil,
Nil,
Nil,
Nil
)
verify(
Array("-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-s", "SuiteOne", "-s", "SuiteTwo"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
Nil,
Nil,
Nil,
Nil,
Nil,
Nil
)
verify(
Array(),
Nil,
Nil,
Nil,
Nil,
Nil,
Nil,
Nil,
Nil,
Nil,
Nil,
Nil
)
verify(
Array("-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "JustOne", "-s", "SuiteOne", "-s", "SuiteTwo"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "JustOne"),
Nil,
Nil,
Nil,
Nil,
Nil
)
verify(
Array("-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne", "-s", "SuiteTwo"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
Nil,
Nil,
Nil,
Nil
)
verify(
Array("-c", "-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne", "-s", "SuiteTwo"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
List("-c"),
Nil,
Nil,
Nil
)
verify(
Array("-c", "-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne", "-s", "SuiteTwo", "-m", "com.example.webapp",
"-w", "com.example.root"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
List("-c"),
List("-m", "com.example.webapp"),
List("-w", "com.example.root"),
Nil
)
// Try a TestNGSuite
verify(
Array("-c", "-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne", "-s", "SuiteTwo", "-m", "com.example.webapp",
"-w", "com.example.root", "-t", "some/path/file.xml"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne", "-s", "SuiteTwo"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
List("-c"),
List("-m", "com.example.webapp"),
List("-w", "com.example.root"),
List("-t", "some/path/file.xml")
)
// Try a junit Suite
verify(
Array("-c", "-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-f", "file.out",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne", "-j", "junitTest", "-j", "junitTest2",
"-m", "com.example.webapp", "-w", "com.example.root", "-t", "some/path/file.xml"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-f", "file.out"),
List("-s", "SuiteOne"),
List("-j", "junitTest", "-j", "junitTest2"),
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
List("-c"),
List("-m", "com.example.webapp"),
List("-w", "com.example.root"),
List("-t", "some/path/file.xml")
)
// Test -u option
verify(
Array("-c", "-g", "-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188", "-p",
"\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\"", "-g", "-u", "directory/",
"-n", "One Two Three", "-l", "SlowTests", "-s", "SuiteOne",
"-m", "com.example.webapp", "-w", "com.example.root", "-t", "some/path/file.xml"),
List("-p", "\\"serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar\\""),
List("-g", "-g", "-u", "directory/"),
List("-s", "SuiteOne"),
Nil,
List("-Dincredible=whatshername", "-Ddbname=testdb", "-Dserver=192.168.1.188"),
List("-n", "One Two Three"),
List("-l", "SlowTests"),
List("-c"),
List("-m", "com.example.webapp"),
List("-w", "com.example.root"),
List("-t", "some/path/file.xml")
)
}
def testParseCompoundArgIntoSet() {
expect(Set("Cat", "Dog")) {
Runner.parseCompoundArgIntoSet(List("-n", "Cat Dog"), "-n")
}
}
def testParseConfigSet() {
val parseConfigSet = PrivateMethod[Set[ReporterConfigParam]]('parseConfigSet)
intercept[NullPointerException] {
Runner invokePrivate parseConfigSet(null)
}
intercept[IllegalArgumentException] {
Runner invokePrivate parseConfigSet("-fJ")
}
intercept[IllegalArgumentException] {
Runner invokePrivate parseConfigSet("-uJ")
}
intercept[IllegalArgumentException] {
Runner invokePrivate parseConfigSet("-oYZTFUPBISARG-")
}
intercept[IllegalArgumentException] {
Runner invokePrivate parseConfigSet("-")
}
intercept[IllegalArgumentException] {
Runner invokePrivate parseConfigSet("")
}
expect(Set(FilterTestStarting)) {
Runner invokePrivate parseConfigSet("-oN")
}
expect(Set(FilterTestSucceeded)) {
Runner invokePrivate parseConfigSet("-oC")
}
expect(Set(FilterTestIgnored)) {
Runner invokePrivate parseConfigSet("-oX")
}
expect(Set(FilterTestPending)) {
Runner invokePrivate parseConfigSet("-oE")
}
expect(Set(FilterSuiteStarting)) {
Runner invokePrivate parseConfigSet("-oH")
}
expect(Set(FilterSuiteCompleted)) {
Runner invokePrivate parseConfigSet("-oL")
}
expect(Set(FilterInfoProvided)) {
Runner invokePrivate parseConfigSet("-oO")
}
expect(Set(PresentWithoutColor)) {
Runner invokePrivate parseConfigSet("-oW")
}
expect(Set(PresentAllDurations)) {
Runner invokePrivate parseConfigSet("-oD")
}
expect(Set(PresentFullStackTraces)) {
Runner invokePrivate parseConfigSet("-oF")
}
expect(Set[ReporterConfigParam]()) {
Runner invokePrivate parseConfigSet("-f")
}
expect(Set[ReporterConfigParam]()) {
Runner invokePrivate parseConfigSet("-u")
}
expect(Set(FilterInfoProvided, PresentWithoutColor)) {
Runner invokePrivate parseConfigSet("-oOW")
}
expect(Set(FilterInfoProvided, PresentWithoutColor)) {
Runner invokePrivate parseConfigSet("-oWO") // Just reverse the order of the params
}
val allOpts = Set(
FilterInfoProvided,
FilterSuiteCompleted,
FilterSuiteStarting,
FilterTestIgnored,
FilterTestPending,
FilterTestStarting,
FilterTestSucceeded,
PresentAllDurations,
PresentWithoutColor,
PresentFullStackTraces
)
expect(allOpts) {
Runner invokePrivate parseConfigSet("-oNCXEHLOWDF")
}
}
def testParseReporterArgsIntoSpecs() {
intercept[NullPointerException] {
Runner.parseReporterArgsIntoConfigurations(null)
}
intercept[NullPointerException] {
Runner.parseReporterArgsIntoConfigurations(List("Hello", null, "World"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("Hello", "-", "World"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("Hello", "", "World"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-g", "-l", "-o"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("Hello", " there", " world!"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-g", "-o", "-g", "-e"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-o", "-o", "-g", "-e"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-e", "-o", "-g", "-e"))
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-f")) // Can't have -f last, because need a file name
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-u")) // Can't have -u last, because need a directory name
}
intercept[IllegalArgumentException] {
Runner.parseReporterArgsIntoConfigurations(List("-r")) // Can't have -r last, because need a reporter class
}
expect(new ReporterConfigurations(None, Nil, Nil, None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(Nil)
}
expect(new ReporterConfigurations(Some(new GraphicReporterConfiguration(Set())), Nil, Nil, None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-g"))
}
expect(new ReporterConfigurations(Some(new GraphicReporterConfiguration(Set(FilterSuiteCompleted))), Nil, Nil, None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-gL"))
}
expect(new ReporterConfigurations(None, Nil, Nil, Some(new StandardOutReporterConfiguration(Set())), None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-o"))
}
expect(new ReporterConfigurations(None, Nil, Nil, Some(new StandardOutReporterConfiguration(Set(FilterTestSucceeded,FilterTestIgnored))), None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-oCX"))
}
expect(new ReporterConfigurations(None, Nil, Nil, None, Some(new StandardErrReporterConfiguration(Set())), Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-e"))
}
expect(new ReporterConfigurations(None, Nil, Nil, None, Some(new StandardErrReporterConfiguration(Set(PresentFullStackTraces))), Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-eF"))
}
expect(new ReporterConfigurations(None, List(new FileReporterConfiguration(Set(), "theFilename")), Nil, None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-f", "theFilename"))
}
expect(new ReporterConfigurations(None, Nil, List(new XmlReporterConfiguration(Set(), "target")), None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-u", "target"))
}
expect(new ReporterConfigurations(None, Nil, List(new XmlReporterConfiguration(Set(), "target")), None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-uN", "target"))
}
expect(new ReporterConfigurations(None, List(new FileReporterConfiguration(Set(FilterTestStarting), "theFilename")), Nil, None, None, Nil, Nil)) {
Runner.parseReporterArgsIntoConfigurations(List("-fN", "theFilename"))
}
expect(new ReporterConfigurations(None, Nil, Nil, None, None, Nil, List(new CustomReporterConfiguration(Set(), "the.reporter.Class")))) {
Runner.parseReporterArgsIntoConfigurations(List("-r", "the.reporter.Class"))
}
expect(new ReporterConfigurations(None, Nil, Nil, None, None, Nil, List(new CustomReporterConfiguration(Set(FilterTestPending), "the.reporter.Class")))) {
Runner.parseReporterArgsIntoConfigurations(List("-rE", "the.reporter.Class"))
}
}
def testParseSuiteArgsIntoClassNameStrings() {
intercept[NullPointerException] {
Runner.parseSuiteArgsIntoNameStrings(null, "-s")
}
intercept[NullPointerException] {
Runner.parseSuiteArgsIntoNameStrings(List("-s", null, "-s"), "-s")
}
intercept[IllegalArgumentException] {
Runner.parseSuiteArgsIntoNameStrings(List("-s", "SweetSuite", "-s"), "-s")
}
intercept[IllegalArgumentException] {
Runner.parseSuiteArgsIntoNameStrings(List("-s", "SweetSuite", "-s", "-s"), "-s")
}
expect(List("SweetSuite", "OKSuite")) {
Runner.parseSuiteArgsIntoNameStrings(List("-s", "SweetSuite", "-s", "OKSuite"), "-s")
}
expect(List("SweetSuite", "OKSuite", "SomeSuite")) {
Runner.parseSuiteArgsIntoNameStrings(List("-s", "SweetSuite", "-s", "OKSuite", "-s", "SomeSuite"), "-s")
}
}
def testParseRunpathArgIntoList() {
intercept[NullPointerException] {
Runner.parseRunpathArgIntoList(null)
}
intercept[NullPointerException] {
Runner.parseRunpathArgIntoList(List("-p", null))
}
intercept[NullPointerException] {
Runner.parseRunpathArgIntoList(List(null, "serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar"))
}
intercept[IllegalArgumentException] {
Runner.parseRunpathArgIntoList(List("-p"))
}
intercept[IllegalArgumentException] {
Runner.parseRunpathArgIntoList(List("-p", "bla", "bla"))
}
intercept[IllegalArgumentException] {
Runner.parseRunpathArgIntoList(List("-pX", "bla"))
}
intercept[IllegalArgumentException] {
Runner.parseRunpathArgIntoList(List("-p", " "))
}
intercept[IllegalArgumentException] {
Runner.parseRunpathArgIntoList(List("-p", "\\t"))
}
expect(List("bla")) {
Runner.parseRunpathArgIntoList(List("-p", "bla"))
}
expect(List("bla", "bla", "bla")) {
Runner.parseRunpathArgIntoList(List("-p", "bla bla bla"))
}
expect(List("serviceuitest-1.1beta4.jar", "myjini", "http://myhost:9998/myfile.jar")) {
Runner.parseRunpathArgIntoList(List("-p", "serviceuitest-1.1beta4.jar myjini http://myhost:9998/myfile.jar"))
}
expect(List("\\\\", "c:\\\\", "c:\\\\Program Files", "c:\\\\Documents and Settings", "\\\\", "myjini")) {
Runner.parseRunpathArgIntoList(List("-p", """\\ c:\\ c:\\Program\\ Files c:\\Documents\\ and\\ Settings \\ myjini"""))
}
}
def testParsePropertiesArgsIntoMap() {
intercept[NullPointerException] {
Runner.parsePropertiesArgsIntoMap(null)
}
intercept[NullPointerException] {
Runner.parsePropertiesArgsIntoMap(List("-Da=b", null))
}
intercept[IllegalArgumentException] {
Runner.parsePropertiesArgsIntoMap(List("-Dab")) // = sign missing
}
intercept[IllegalArgumentException] {
Runner.parsePropertiesArgsIntoMap(List("ab")) // needs to start with -D
}
intercept[IllegalArgumentException] {
Runner.parsePropertiesArgsIntoMap(List("-D=ab")) // no key
}
intercept[IllegalArgumentException] {
Runner.parsePropertiesArgsIntoMap(List("-Dab=")) // no value
}
expect(Map("a" -> "b", "cat" -> "dog", "Glorp" -> "Glib")) {
Runner.parsePropertiesArgsIntoMap(List("-Da=b", "-Dcat=dog", "-DGlorp=Glib"))
}
}
def testCheckArgsForValidity() {
intercept[NullPointerException] {
Runner.checkArgsForValidity(null)
}
expect(None) {
Runner.checkArgsForValidity(Array("-Ddbname=testdb", "-Dserver=192.168.1.188", "-p", "serviceuitest-1.1beta4.jar", "-g", "-eFBA", "-s", "MySuite"))
}
assert(Runner.checkArgsForValidity(Array("-Ddbname=testdb", "-Dserver=192.168.1.188", "-z", "serviceuitest-1.1beta4.jar", "-g", "-eFBA", "-s", "MySuite")) != None)
expect(None) {
Runner.checkArgsForValidity(Array("-Ddbname=testdb", "-Dserver=192.168.1.188", "-p", "serviceuitest-1.1beta4.jar", "-g", "-eFBA", "-s", "MySuite", "-c"))
}
}
/*
def testRunpathPropertyAddedToPropertiesMap() {
val a = new Suite {
var theProperties: Map[String, Any] = Map()
override def execute(testName: Option[String], reporter: Reporter, stopper: Stopper, includes: Set[String], excludes: Set[String],
properties: Map[String, Any], distributor: Option[Distributor]) {
theProperties = properties
}
}
val dispatchReporter = new DispatchReporter(Nil, System.out)
val suitesList = List("org.scalatest.usefulstuff.RunpathPropCheckerSuite")
// Runner.doRunRunRunADoRunRun(new DispatchReporter)
// Runner.doRunRunRunADoRunRun(dispatchReporter, suitesList, new Stopper {}, Filter(), Map(), false,
List(), List(), runpath: "build_tests", loader: ClassLoader,
doneListener: RunDoneListener) = {
()
}
}
package org.scalatest.usefulstuff {
class RunpathPropCheckerSuite extends Suite {
var theProperties: Map[String, Any] = Map()
override def execute(testName: Option[String], reporter: Reporter, stopper: Stopper, includes: Set[String], excludes: Set[String],
properties: Map[String, Any], distributor: Option[Distributor]) {
theProperties = properties
}
}
*/
}
| JimCallahan/Graphics | external/scalatest/src/test/scala/org/scalatest/tools/RunnerSuite.scala | Scala | apache-2.0 | 21,392 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.protocol.types.{Struct, Schema, Field}
import org.apache.kafka.common.protocol.types.Type.STRING
import org.apache.kafka.common.protocol.types.Type.INT32
import org.apache.kafka.common.protocol.types.Type.INT64
import org.apache.kafka.common.utils.Utils
import kafka.utils._
import kafka.common._
import kafka.log.FileMessageSet
import kafka.message._
import kafka.metrics.KafkaMetricsGroup
import kafka.common.TopicAndPartition
import kafka.tools.MessageFormatter
import kafka.api.ProducerResponseStatus
import kafka.coordinator.ConsumerCoordinator
import scala.Some
import scala.collection._
import java.io.PrintStream
import java.util.concurrent.atomic.AtomicBoolean
import java.nio.ByteBuffer
import java.util.concurrent.TimeUnit
import com.yammer.metrics.core.Gauge
import org.I0Itec.zkclient.ZkClient
/**
* Configuration settings for in-built offset management
* @param maxMetadataSize The maximum allowed metadata for any offset commit.
* @param loadBufferSize Batch size for reading from the offsets segments when loading offsets into the cache.
* @param offsetsRetentionMs Offsets older than this retention period will be discarded.
* @param offsetsRetentionCheckIntervalMs Frequency at which to check for expired offsets.
* @param offsetsTopicNumPartitions The number of partitions for the offset commit topic (should not change after deployment).
* @param offsetsTopicSegmentBytes The offsets topic segment bytes should be kept relatively small to facilitate faster
* log compaction and faster offset loads
* @param offsetsTopicReplicationFactor The replication factor for the offset commit topic (set higher to ensure availability).
* @param offsetsTopicCompressionCodec Compression codec for the offsets topic - compression should be turned on in
* order to achieve "atomic" commits.
* @param offsetCommitTimeoutMs The offset commit will be delayed until all replicas for the offsets topic receive the
* commit or this timeout is reached. (Similar to the producer request timeout.)
* @param offsetCommitRequiredAcks The required acks before the commit can be accepted. In general, the default (-1)
* should not be overridden.
*/
case class OffsetManagerConfig(maxMetadataSize: Int = OffsetManagerConfig.DefaultMaxMetadataSize,
loadBufferSize: Int = OffsetManagerConfig.DefaultLoadBufferSize,
offsetsRetentionMs: Long = OffsetManagerConfig.DefaultOffsetRetentionMs,
offsetsRetentionCheckIntervalMs: Long = OffsetManagerConfig.DefaultOffsetsRetentionCheckIntervalMs,
offsetsTopicNumPartitions: Int = OffsetManagerConfig.DefaultOffsetsTopicNumPartitions,
offsetsTopicSegmentBytes: Int = OffsetManagerConfig.DefaultOffsetsTopicSegmentBytes,
offsetsTopicReplicationFactor: Short = OffsetManagerConfig.DefaultOffsetsTopicReplicationFactor,
offsetsTopicCompressionCodec: CompressionCodec = OffsetManagerConfig.DefaultOffsetsTopicCompressionCodec,
offsetCommitTimeoutMs: Int = OffsetManagerConfig.DefaultOffsetCommitTimeoutMs,
offsetCommitRequiredAcks: Short = OffsetManagerConfig.DefaultOffsetCommitRequiredAcks)
object OffsetManagerConfig {
val DefaultMaxMetadataSize = 4096
val DefaultLoadBufferSize = 5*1024*1024
val DefaultOffsetRetentionMs = 24*60*60*1000L
val DefaultOffsetsRetentionCheckIntervalMs = 600000L
val DefaultOffsetsTopicNumPartitions = 50
val DefaultOffsetsTopicSegmentBytes = 100*1024*1024
val DefaultOffsetsTopicReplicationFactor = 3.toShort
val DefaultOffsetsTopicCompressionCodec = NoCompressionCodec
val DefaultOffsetCommitTimeoutMs = 5000
val DefaultOffsetCommitRequiredAcks = (-1).toShort
}
class OffsetManager(val config: OffsetManagerConfig,
replicaManager: ReplicaManager,
zkClient: ZkClient,
scheduler: Scheduler) extends Logging with KafkaMetricsGroup {
/* offsets and metadata cache */
private val offsetsCache = new Pool[GroupTopicPartition, OffsetAndMetadata]
private val followerTransitionLock = new Object
private val loadingPartitions: mutable.Set[Int] = mutable.Set()
private val cleanupOrLoadMutex = new Object
private val shuttingDown = new AtomicBoolean(false)
this.logIdent = "[Offset Manager on Broker " + replicaManager.config.brokerId + "]: "
scheduler.schedule(name = "delete-expired-consumer-offsets",
fun = deleteExpiredOffsets,
period = config.offsetsRetentionCheckIntervalMs,
unit = TimeUnit.MILLISECONDS)
newGauge("NumOffsets",
new Gauge[Int] {
def value = offsetsCache.size
}
)
newGauge("NumGroups",
new Gauge[Int] {
def value = offsetsCache.keys.map(_.group).toSet.size
}
)
private def deleteExpiredOffsets() {
debug("Collecting expired offsets.")
val startMs = SystemTime.milliseconds
val numExpiredOffsetsRemoved = cleanupOrLoadMutex synchronized {
val expiredOffsets = offsetsCache.filter { case (groupTopicPartition, offsetAndMetadata) =>
offsetAndMetadata.expireTimestamp < startMs
}
debug("Found %d expired offsets.".format(expiredOffsets.size))
// delete the expired offsets from the table and generate tombstone messages to remove them from the log
val tombstonesForPartition = expiredOffsets.map { case (groupTopicAndPartition, offsetAndMetadata) =>
val offsetsPartition = partitionFor(groupTopicAndPartition.group)
trace("Removing expired offset and metadata for %s: %s".format(groupTopicAndPartition, offsetAndMetadata))
offsetsCache.remove(groupTopicAndPartition)
val commitKey = OffsetManager.offsetCommitKey(groupTopicAndPartition.group,
groupTopicAndPartition.topicPartition.topic, groupTopicAndPartition.topicPartition.partition)
(offsetsPartition, new Message(bytes = null, key = commitKey))
}.groupBy { case (partition, tombstone) => partition }
// Append the tombstone messages to the offset partitions. It is okay if the replicas don't receive these (say,
// if we crash or leaders move) since the new leaders will get rid of expired offsets during their own purge cycles.
tombstonesForPartition.flatMap { case (offsetsPartition, tombstones) =>
val partitionOpt = replicaManager.getPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition)
partitionOpt.map { partition =>
val appendPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition)
val messages = tombstones.map(_._2).toSeq
trace("Marked %d offsets in %s for deletion.".format(messages.size, appendPartition))
try {
// do not need to require acks since even if the tombsone is lost,
// it will be appended again in the next purge cycle
partition.appendMessagesToLeader(new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages: _*))
tombstones.size
}
catch {
case t: Throwable =>
error("Failed to mark %d expired offsets for deletion in %s.".format(messages.size, appendPartition), t)
// ignore and continue
0
}
}
}.sum
}
info("Removed %d expired offsets in %d milliseconds.".format(numExpiredOffsetsRemoved, SystemTime.milliseconds - startMs))
}
def partitionFor(group: String): Int = Utils.abs(group.hashCode) % config.offsetsTopicNumPartitions
/**
* Fetch the current offset for the given group/topic/partition from the underlying offsets storage.
*
* @param key The requested group-topic-partition
* @return If the key is present, return the offset and metadata; otherwise return None
*/
private def getOffset(key: GroupTopicPartition) = {
val offsetAndMetadata = offsetsCache.get(key)
if (offsetAndMetadata == null)
OffsetMetadataAndError.NoOffset
else
OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, ErrorMapping.NoError)
}
/**
* Put the (already committed) offset for the given group/topic/partition into the cache.
*
* @param key The group-topic-partition
* @param offsetAndMetadata The offset/metadata to be stored
*/
private def putOffset(key: GroupTopicPartition, offsetAndMetadata: OffsetAndMetadata) {
offsetsCache.put(key, offsetAndMetadata)
}
/*
* Check if the offset metadata length is valid
*/
def validateOffsetMetadataLength(metadata: String) : Boolean = {
metadata == null || metadata.length() <= config.maxMetadataSize
}
/**
* Store offsets by appending it to the replicated log and then inserting to cache
*/
def storeOffsets(groupId: String,
consumerId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicAndPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicAndPartition, Short] => Unit) {
// first filter out partitions with offset metadata size exceeding limit
val filteredOffsetMetadata = offsetMetadata.filter { case (topicAndPartition, offsetAndMetadata) =>
validateOffsetMetadataLength(offsetAndMetadata.metadata)
}
// construct the message set to append
val messages = filteredOffsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
new Message(
key = OffsetManager.offsetCommitKey(groupId, topicAndPartition.topic, topicAndPartition.partition),
bytes = OffsetManager.offsetCommitValue(offsetAndMetadata)
)
}.toSeq
val offsetTopicPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, partitionFor(groupId))
val offsetsAndMetadataMessageSet = Map(offsetTopicPartition ->
new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages:_*))
// set the callback function to insert offsets into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || ! responseStatus.contains(offsetTopicPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, offsetTopicPartition))
// construct the commit response status and insert
// the offset and metadata to cache if the append status has no error
val status = responseStatus(offsetTopicPartition)
val responseCode =
if (status.error == ErrorMapping.NoError) {
filteredOffsetMetadata.foreach { case (topicAndPartition, offsetAndMetadata) =>
putOffset(GroupTopicPartition(groupId, topicAndPartition), offsetAndMetadata)
}
ErrorMapping.NoError
} else {
debug("Offset commit %s from group %s consumer %s with generation %d failed when appending to log due to %s"
.format(filteredOffsetMetadata, groupId, consumerId, generationId, ErrorMapping.exceptionNameFor(status.error)))
// transform the log append error code to the corresponding the commit status error code
if (status.error == ErrorMapping.UnknownTopicOrPartitionCode)
ErrorMapping.ConsumerCoordinatorNotAvailableCode
else if (status.error == ErrorMapping.NotLeaderForPartitionCode)
ErrorMapping.NotCoordinatorForConsumerCode
else if (status.error == ErrorMapping.MessageSizeTooLargeCode
|| status.error == ErrorMapping.MessageSetSizeTooLargeCode
|| status.error == ErrorMapping.InvalidFetchSizeCode)
Errors.INVALID_COMMIT_OFFSET_SIZE.code
else
status.error
}
// compute the final error codes for the commit response
val commitStatus = offsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
if (validateOffsetMetadataLength(offsetAndMetadata.metadata))
(topicAndPartition, responseCode)
else
(topicAndPartition, ErrorMapping.OffsetMetadataTooLargeCode)
}
// finally trigger the callback logic passed from the API layer
responseCallback(commitStatus)
}
// call replica manager to append the offset messages
replicaManager.appendMessages(
config.offsetCommitTimeoutMs.toLong,
config.offsetCommitRequiredAcks,
true, // allow appending to internal offset topic
offsetsAndMetadataMessageSet,
putCacheCallback)
}
/**
* The most important guarantee that this API provides is that it should never return a stale offset. i.e., it either
* returns the current offset or it begins to sync the cache from the log (and returns an error code).
*/
def getOffsets(group: String, topicPartitions: Seq[TopicAndPartition]): Map[TopicAndPartition, OffsetMetadataAndError] = {
trace("Getting offsets %s for group %s.".format(topicPartitions, group))
val offsetsPartition = partitionFor(group)
/**
* followerTransitionLock protects against fetching from an empty/cleared offset cache (i.e., cleared due to a
* leader->follower transition). i.e., even if leader-is-local is true a follower transition can occur right after
* the check and clear the cache. i.e., we would read from the empty cache and incorrectly return NoOffset.
*/
followerTransitionLock synchronized {
if (leaderIsLocal(offsetsPartition)) {
if (loadingPartitions synchronized loadingPartitions.contains(offsetsPartition)) {
debug("Cannot fetch offsets for group %s due to ongoing offset load.".format(group))
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, OffsetMetadataAndError.OffsetsLoading)
}.toMap
} else {
if (topicPartitions.size == 0) {
// Return offsets for all partitions owned by this consumer group. (this only applies to consumers that commit offsets to Kafka.)
offsetsCache.filter(_._1.group == group).map { case(groupTopicPartition, offsetAndMetadata) =>
(groupTopicPartition.topicPartition, OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, ErrorMapping.NoError))
}.toMap
} else {
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, getOffset(groupTopicPartition))
}.toMap
}
}
} else {
debug("Could not fetch offsets for group %s (not offset coordinator).".format(group))
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, OffsetMetadataAndError.NotCoordinatorForGroup)
}.toMap
}
}
}
/**
* Asynchronously read the partition from the offsets topic and populate the cache
*/
def loadOffsetsFromLog(offsetsPartition: Int) {
val topicPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition)
loadingPartitions synchronized {
if (loadingPartitions.contains(offsetsPartition)) {
info("Offset load from %s already in progress.".format(topicPartition))
} else {
loadingPartitions.add(offsetsPartition)
scheduler.schedule(topicPartition.toString, loadOffsets)
}
}
def loadOffsets() {
info("Loading offsets from " + topicPartition)
val startMs = SystemTime.milliseconds
try {
replicaManager.logManager.getLog(topicPartition) match {
case Some(log) =>
var currOffset = log.logSegments.head.baseOffset
val buffer = ByteBuffer.allocate(config.loadBufferSize)
// loop breaks if leader changes at any time during the load, since getHighWatermark is -1
cleanupOrLoadMutex synchronized {
while (currOffset < getHighWatermark(offsetsPartition) && !shuttingDown.get()) {
buffer.clear()
val messages = log.read(currOffset, config.loadBufferSize).messageSet.asInstanceOf[FileMessageSet]
messages.readInto(buffer, 0)
val messageSet = new ByteBufferMessageSet(buffer)
messageSet.foreach { msgAndOffset =>
require(msgAndOffset.message.key != null, "Offset entry key should not be null")
val key = OffsetManager.readMessageKey(msgAndOffset.message.key)
if (msgAndOffset.message.payload == null) {
if (offsetsCache.remove(key) != null)
trace("Removed offset for %s due to tombstone entry.".format(key))
else
trace("Ignoring redundant tombstone for %s.".format(key))
} else {
// special handling for version 0:
// set the expiration time stamp as commit time stamp + server default retention time
val value = OffsetManager.readMessageValue(msgAndOffset.message.payload)
putOffset(key, value.copy (
expireTimestamp = {
if (value.expireTimestamp == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP)
value.commitTimestamp + config.offsetsRetentionMs
else
value.expireTimestamp
}
))
trace("Loaded offset %s for %s.".format(value, key))
}
currOffset = msgAndOffset.nextOffset
}
}
}
if (!shuttingDown.get())
info("Finished loading offsets from %s in %d milliseconds."
.format(topicPartition, SystemTime.milliseconds - startMs))
case None =>
warn("No log found for " + topicPartition)
}
}
catch {
case t: Throwable =>
error("Error in loading offsets from " + topicPartition, t)
}
finally {
loadingPartitions synchronized loadingPartitions.remove(offsetsPartition)
}
}
}
private def getHighWatermark(partitionId: Int): Long = {
val partitionOpt = replicaManager.getPartition(ConsumerCoordinator.OffsetsTopicName, partitionId)
val hw = partitionOpt.map { partition =>
partition.leaderReplicaIfLocal().map(_.highWatermark.messageOffset).getOrElse(-1L)
}.getOrElse(-1L)
hw
}
def leaderIsLocal(partition: Int) = { getHighWatermark(partition) != -1L }
/**
* When this broker becomes a follower for an offsets topic partition clear out the cache for groups that belong to
* that partition.
* @param offsetsPartition Groups belonging to this partition of the offsets topic will be deleted from the cache.
*/
def removeOffsetsFromCacheForPartition(offsetsPartition: Int) {
var numRemoved = 0
followerTransitionLock synchronized {
offsetsCache.keys.foreach { key =>
if (partitionFor(key.group) == offsetsPartition) {
offsetsCache.remove(key)
numRemoved += 1
}
}
}
if (numRemoved > 0) info("Removed %d cached offsets for %s on follower transition."
.format(numRemoved, TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition)))
}
def shutdown() {
shuttingDown.set(true)
}
}
object OffsetManager {
private case class KeyAndValueSchemas(keySchema: Schema, valueSchema: Schema)
private val CURRENT_OFFSET_SCHEMA_VERSION = 1.toShort
private val OFFSET_COMMIT_KEY_SCHEMA_V0 = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("group")
private val KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("topic")
private val KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("commit_timestamp", INT64),
new Field("expire_timestamp", INT64))
private val VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
private val VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset")
private val VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata")
private val VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
// map of versions to schemas
private val OFFSET_SCHEMAS = Map(0 -> KeyAndValueSchemas(OFFSET_COMMIT_KEY_SCHEMA_V0, OFFSET_COMMIT_VALUE_SCHEMA_V0),
1 -> KeyAndValueSchemas(OFFSET_COMMIT_KEY_SCHEMA_V0, OFFSET_COMMIT_VALUE_SCHEMA_V1))
private val CURRENT_SCHEMA = schemaFor(CURRENT_OFFSET_SCHEMA_VERSION)
private def schemaFor(version: Int) = {
val schemaOpt = OFFSET_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
private def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_SCHEMA.keySchema)
key.set(KEY_GROUP_FIELD, group)
key.set(KEY_TOPIC_FIELD, topic)
key.set(KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
private def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_SCHEMA.valueSchema)
value.set(VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
value.set(VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
value.set(VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
value.set(VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
private def readMessageKey(buffer: ByteBuffer): GroupTopicPartition = {
val version = buffer.getShort()
val keySchema = schemaFor(version).keySchema
val key = keySchema.read(buffer).asInstanceOf[Struct]
val group = key.get(KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(KEY_PARTITION_FIELD).asInstanceOf[Int]
GroupTopicPartition(group, TopicAndPartition(topic, partition))
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
private def readMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
val structAndVersion = readMessageValueStruct(buffer)
if (structAndVersion.value == null) { // tombstone
null
} else {
if (structAndVersion.version == 0) {
val offset = structAndVersion.value.get(VALUE_OFFSET_FIELD_V0).asInstanceOf[Long]
val metadata = structAndVersion.value.get(VALUE_METADATA_FIELD_V0).asInstanceOf[String]
val timestamp = structAndVersion.value.get(VALUE_TIMESTAMP_FIELD_V0).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
} else if (structAndVersion.version == 1) {
val offset = structAndVersion.value.get(VALUE_OFFSET_FIELD_V1).asInstanceOf[Long]
val metadata = structAndVersion.value.get(VALUE_METADATA_FIELD_V1).asInstanceOf[String]
val commitTimestamp = structAndVersion.value.get(VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = structAndVersion.value.get(VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else {
throw new IllegalStateException("Unknown offset message version")
}
}
}
private def readMessageValueStruct(buffer: ByteBuffer): MessageValueStructAndVersion = {
if(buffer == null) { // tombstone
MessageValueStructAndVersion(null, -1)
} else {
val version = buffer.getShort()
val valueSchema = schemaFor(version).valueSchema
val value = valueSchema.read(buffer).asInstanceOf[Struct]
MessageValueStructAndVersion(value, version)
}
}
// Formatter for use with tools such as console consumer: Consumer should also set exclude.internal.topics to false.
// (specify --formatter "kafka.server.OffsetManager\$OffsetsMessageFormatter" when consuming __consumer_offsets)
class OffsetsMessageFormatter extends MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val formattedKey = if (key == null) "NULL" else OffsetManager.readMessageKey(ByteBuffer.wrap(key)).toString
val formattedValue = if (value == null) "NULL" else OffsetManager.readMessageValueStruct(ByteBuffer.wrap(value)).value.toString
output.write(formattedKey.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\n".getBytes)
}
}
}
case class MessageValueStructAndVersion(value: Struct, version: Short)
case class GroupTopicPartition(group: String, topicPartition: TopicAndPartition) {
def this(group: String, topic: String, partition: Int) =
this(group, new TopicAndPartition(topic, partition))
override def toString =
"[%s,%s,%d]".format(group, topicPartition.topic, topicPartition.partition)
}
| iheartradio/kafka | core/src/main/scala/kafka/server/OffsetManager.scala | Scala | apache-2.0 | 28,596 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.util
import org.junit.runner.RunWith
import java.io.File
import java.lang.String
import collection.immutable.Map
import org.scalatest._
import java.util.concurrent.TimeUnit
import FileSupport._
import scala.Some
import org.apache.activemq.apollo.util.FunSuiteSupport._
import java.util.concurrent.locks.{ReentrantReadWriteLock, Lock, ReadWriteLock}
import java.util.concurrent.atomic.AtomicLong
object FunSuiteSupport {
class SkipTestException extends RuntimeException
val parallel_test_class_lock = new ReentrantReadWriteLock()
val id_counter = new AtomicLong()
}
/**
* @version $Revision : 1.1 $
*/
@RunWith(classOf[org.scalatest.junit.ParallelJUnitRunner])
abstract class FunSuiteSupport extends FunSuite with Logging with ParallelBeforeAndAfterAll {
def next_id(prefix:String="", suffix:String="") = prefix+id_counter.incrementAndGet()+suffix
protected var _basedir = try {
var file = new File(getClass.getProtectionDomain.getCodeSource.getLocation.getFile)
file = (file / ".." / "..").getCanonicalFile
if( file.isDirectory ) {
file.getPath
} else {
"."
}
} catch {
case x:Throwable =>
"."
}
def skip(check:Boolean=true):Unit = if(check) throw new SkipTestException()
var _log:Log = null
override protected def log: Log = {
if( _log == null ) {
super.log
} else {
_log
}
}
override protected def test(testName: String, testTags: Tag*)(testFun: => Unit) {
super.test(testName, testTags:_*) {
try {
_log = Log(getClass.getName.stripSuffix("$")+":"+testName)
testFun
} catch {
case e:SkipTestException =>
case e:Throwable =>
onTestFailure(e)
throw e
} finally {
_log = null
}
}
}
def onTestFailure(e:Throwable) = {}
/**
* Returns the base directory of the current project
*/
def basedir = new File(_basedir).getCanonicalFile
/**
* Returns ${basedir}/target/test-data
*/
def test_data_dir = basedir / "target"/ "test-data" / (getClass.getName)
/**
* Can this test class run in parallel with other
* test classes.
* @return
*/
def is_parallel_test_class = true
override protected def beforeAll(map: Map[String, Any]): Unit = {
if ( is_parallel_test_class ) {
parallel_test_class_lock.readLock().lock()
} else {
parallel_test_class_lock.writeLock().lock()
}
_basedir = map.get("basedir") match {
case Some(basedir) =>
basedir.toString
case _ =>
System.getProperty("basedir", _basedir)
}
System.setProperty("basedir", _basedir)
test_data_dir.recursive_delete
super.beforeAll(map)
}
override protected def afterAll(configMap: Map[String, Any]) {
try {
super.afterAll(configMap)
} finally {
if ( is_parallel_test_class ) {
parallel_test_class_lock.readLock().unlock()
} else {
parallel_test_class_lock.writeLock().unlock()
}
}
}
//
// Allows us to get the current test name.
//
/**
* Defines a method (that takes a <code>configMap</code>) to be run after
* all of this suite's tests and nested suites have been run.
*
* <p>
* This trait's implementation
* of <code>run</code> invokes this method after executing all tests
* and nested suites (passing in the <code>configMap</code> passed to it), thus this
* method can be used to tear down a test fixture
* needed by the entire suite. This trait's implementation of this method invokes the
* overloaded form of <code>afterAll</code> that takes no <code>configMap</code>.
* </p>
*/
val _testName = new ThreadLocal[String]();
def testName = _testName.get
protected override def runTest(testName: String, reporter: org.scalatest.Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) = {
_testName.set(testName)
try {
super.runTest(testName, reporter, stopper, configMap, tracker)
} finally {
_testName.remove
}
}
private class ShortCircuitFailure(msg:String) extends RuntimeException(msg)
def exit_within_with_failure[T](msg:String):T = throw new ShortCircuitFailure(msg)
def within[T](timeout:Long, unit:TimeUnit)(func: => Unit ):Unit = {
val start = System.currentTimeMillis
var amount = unit.toMillis(timeout)
var sleep_amount = amount / 100
var last:Throwable = null
if( sleep_amount < 1 ) {
sleep_amount = 1
}
try {
func
return
} catch {
case e:ShortCircuitFailure => throw e
case e:Throwable => last = e
}
while( (System.currentTimeMillis-start) < amount ) {
Thread.sleep(sleep_amount)
try {
func
return
} catch {
case e:ShortCircuitFailure => throw e
case e:Throwable => last = e
}
}
throw last
}
} | chirino/activemq-apollo | apollo-util/src/test/scala/org/apache/activemq/apollo/util/FunSuiteSupport.scala | Scala | apache-2.0 | 5,715 |
package me.danielpes.spark.datetime
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.sql._
import org.scalatest._
abstract class SharedContext extends FlatSpecLike with BeforeAndAfterAll { self: Suite =>
Logger.getRootLogger.setLevel(Level.WARN)
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
Logger.getLogger("/executors").setLevel(Level.FATAL)
private var _spark: SparkSession = _
protected val debug: Boolean = false
protected def spark: SparkSession = _spark
protected def sc: SparkContext = _spark.sparkContext
protected def assertDFs(ds1: DataFrame, ds2: DataFrame, debug: Boolean = this.debug): Unit = assertDSs[Row](ds1, ds2, debug)
protected def assertDSs[A](ds1: Dataset[A], ds2: Dataset[A], debug: Boolean = this.debug): Unit = {
val df1 = ds1.toDF
val df2 = ds2.toDF
try {
df1.persist()
df2.persist()
if (debug) {
df1.printSchema()
df2.printSchema()
df1.show(100, truncate=false)
df2.show(100, truncate=false)
}
assert(df1.collect().toSet == df2.collect().toSet)
} finally {
df1.unpersist()
df2.unpersist()
}
}
protected override def beforeAll(): Unit = {
if (_spark == null) {
_spark = SparkSession
.builder()
.appName("Tests")
.master("local[*]")
.config("spark.default.parallelism", 2)
.config("spark.sql.shuffle.partitions", 2)
.config("spark.sql.testkey", "true")
.getOrCreate()
}
// Ensure we have initialized the context before calling parent code
super.beforeAll()
}
} | danielpes/spark-datetime-lite | src/test/scala/me/danielpes/spark/datetime/SharedContext.scala | Scala | apache-2.0 | 1,691 |
package com.scalableQuality.quick.core.fileComponentDescriptions
import com.scalableQuality.quick.core.Reporting.{InvalidColumns, IrrelevantColumns, ReportingColumns, ValidColumns}
import com.scalableQuality.quick.core.checks.CheckColumnValue
import com.scalableQuality.quick.core.phases.{MatchingStage, ReportingStage, ShouldUseDuring, ValidationStage}
import com.scalableQuality.quick.core.valueMapping.ValueMapper
import com.scalableQuality.quick.mantle.parsing.{LiteralDelimiter, RawRow}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class DelimitedRowDividerTest
extends FlatSpec
with Matchers
with GeneratorDrivenPropertyChecks {
"DelimitedRowDivider.isMatchable" should
"return true if at least one could should be used in matching and false otherwise" in forAll {
(firstColumnMatching: Boolean,
secondColumnMatching: Boolean,
thirdColumnMatching: Boolean) =>
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
useDuringMatching={firstColumnMatching.toString}/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
useDuringMatching={secondColumnMatching.toString}/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="3"
useDuringMatching={thirdColumnMatching.toString}/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.isMatchable shouldBe (firstColumnMatching || secondColumnMatching || thirdColumnMatching)
case _ => fail
}
}
"DelimitedRowDivider.keepOnlyColumnsDescriptionsUsedIn" should
"return an DelimitedRowDivider containing all columns used during validation " in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="2"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
useDuringValidation="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="2"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnDescriptionList =
List(firstColumnDescription, secondColumnDescription)
val expectedDelimitedRowDivider =
DelimitedRowDivider(expectedColumnDescriptionList, delimiter)
delimitedRowDivider.keepOnlyColumnsDescriptionsUsedIn(ValidationStage) shouldBe expectedDelimitedRowDivider
case _ => fail
}
}
it should
"return an DelimitedRowDivider containing all columns used during Matching " in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="2"
useDuringMatching="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="2"
useDuringMatching="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnDescriptionList =
List(firstColumnDescription, thirdColumnDescription)
val expectedDelimitedRowDivider =
DelimitedRowDivider(expectedColumnDescriptionList, delimiter)
delimitedRowDivider.keepOnlyColumnsDescriptionsUsedIn(MatchingStage) shouldBe expectedDelimitedRowDivider
case _ => fail
}
}
it should
"return an DelimitedRowDivider containing all columns used during Reporting " in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="2"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
useDuringReporting="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="2"
useDuringReporting="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnDescriptionList =
List(secondColumnDescription, thirdColumnDescription)
val expectedDelimitedRowDivider =
DelimitedRowDivider(expectedColumnDescriptionList, delimiter)
delimitedRowDivider.keepOnlyColumnsDescriptionsUsedIn(ReportingStage) shouldBe expectedDelimitedRowDivider
case _ => fail
}
}
it should
"return an DelimitedRowDivider containing all columns used during from different stages " in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="2"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
useDuringMatching="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="2"
useDuringReporting="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val expectedDelimitedRowDivider =
DelimitedRowDivider(expectedColumnDescriptionList, delimiter)
delimitedRowDivider.keepOnlyColumnsDescriptionsUsedIn(
ValidationStage,
MatchingStage,
ReportingStage) shouldBe expectedDelimitedRowDivider
case _ => fail
}
}
"DelimitedRowDivider.columnsComparisonValuesFor" should "extract column values of all 3 validation columns" in {
val rawRow = RawRow("FirstColumn;SecondColumn;ThirdColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringValidation="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringValidation="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues =
List(Some("FirstColumn"), Some("SecondColumn"), Some("ThirdColumn"))
delimitedRowDivider.columnsComparisonValuesFor(ValidationStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
it should "extract column values of only validation columns" in {
val rawRow = RawRow("FirstColumn|SecondColumn|ThirdColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringValidation="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringValidation="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues =
List(Some("FirstColumn"), Some("ThirdColumn"))
delimitedRowDivider.columnsComparisonValuesFor(ValidationStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
it should "the extracted column values should include None for every validation column that does not exist" in {
val rawRow = RawRow("FirstColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringValidation="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringValidation="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues = List(Some("FirstColumn"), None, None)
delimitedRowDivider.columnsComparisonValuesFor(ValidationStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
it should "extract column values of all 3 matching columns" in {
val rawRow = RawRow("FirstColumn|SecondColumn|ThirdColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringMatching="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringMatching="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringMatching="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("|")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues =
List(Some("FirstColumn"), Some("SecondColumn"), Some("ThirdColumn"))
delimitedRowDivider.columnsComparisonValuesFor(MatchingStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
it should "extract column values of only matching columns" in {
val rawRow = RawRow("FirstColumn*SecondColumn*ThirdColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringMatching="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringMatching="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringMatching="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("*")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues =
List(Some("FirstColumn"), Some("ThirdColumn"))
delimitedRowDivider.columnsComparisonValuesFor(MatchingStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
it should "the extracted column values should include None for every matching column that does not exist" in {
val rawRow = RawRow("FirstColumn", 1)
val firstColumnDescriptionElem = <ColumnDescription
label="FirstColumn"
position="1"
useDuringMatching="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="SecondColumn"
position="2"
useDuringMatching="true"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="ThirdColumn"
position="3"
useDuringMatching="true"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter("*")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedColumnValues = List(Some("FirstColumn"), None, None)
delimitedRowDivider.columnsComparisonValuesFor(MatchingStage, rawRow) shouldBe expectedColumnValues
case _ => fail
}
}
"DelimitedRowDivider.compare" should "return ValidColumns, IrrelevantColumns, ReportingColumns and InvalidColumns" in {
val leftRawRow = Some(RawRow("One;Two;Three;Four", 1))
val rightRawRow = Some(RawRow("One;Bwo;RRree;Sour", 2))
val validColumnsDescriptionElem = <ColumnDescription
label="valid column"
position="1"
useDuringValidation="true"
useDuringReporting="false"
/>
val irrelevantColumnsDescriptionElem = <ColumnDescription
label="irrelevant column"
position="2"
useDuringValidation="false"
useDuringReporting="false"
/>
val reportingColumnsDescriptionElem = <ColumnDescription
label="reporting column"
position="3"
useDuringValidation="false"
useDuringReporting="true"
/>
val invalidColumnsDescriptionElem = <ColumnDescription
label="invalid column"
position="4"
useDuringValidation="true"
useDuringReporting="false"
/>
val validColumnsDescriptionEither =
DelimitedColumnDescription(validColumnsDescriptionElem.attributes)
val irrelevantColumnsDescriptionEither =
DelimitedColumnDescription(irrelevantColumnsDescriptionElem.attributes)
val reportingColumnsDescriptionEither =
DelimitedColumnDescription(reportingColumnsDescriptionElem.attributes)
val invalidColumnsDescriptionEither =
DelimitedColumnDescription(invalidColumnsDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(
validColumnsDescriptionEither,
irrelevantColumnsDescriptionEither,
reportingColumnsDescriptionEither,
invalidColumnsDescriptionEither,
delimiterEither
) match {
case (
Right(validColumnsDescription),
Right(irrelevantColumnsDescription),
Right(reportingColumnsDescription),
Right(invalidColumnsDescription),
Right(delimiter)
) =>
val columnDescriptionList = List(validColumnsDescription,
irrelevantColumnsDescription,
reportingColumnsDescription,
invalidColumnsDescription)
val delimitedRowDivider =
DelimitedRowDivider(columnDescriptionList, delimiter)
val expectedReportingColumns = ReportingColumns(
Some("Three"),
Some("RRree"),
" 3",
"reporting column"
)
val expectedInvalidColumns = InvalidColumns(
Some("Four"),
Some("Sour"),
" 4",
"invalid column"
)
val expectedComparison = List(ValidColumns,
IrrelevantColumns,
expectedReportingColumns,
expectedInvalidColumns)
delimitedRowDivider.compare(leftRawRow, rightRawRow) shouldBe expectedComparison
case _ =>
fail
}
}
"DelimitedRowDivider.usableDuringValidation" should
"return true if all column descriptions have default values" in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="4"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="8"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider = DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.usableDuringValidation shouldBe true
case _ => fail
}
}
it should
"return true if at least one column descriptions should be used during validation" in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
useDuringValidation="true"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="4"
checkColumnValueExists="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="8"
checkColumnValueExists="false"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider = DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.usableDuringValidation shouldBe true
case _ => fail
}
}
it should
"return true if at more than one column descriptions should be used during validation" in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
useDuringValidation="true"
checkColumnValueExists="false"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="4"
useDuringValidation="true"
checkColumnValueExists="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="8"
useDuringValidation="true"
checkColumnValueExists="false"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider = DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.usableDuringValidation shouldBe true
case _ => fail
}
}
it should
"return true if at least one column descriptions should be checked" in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
checkColumnValueMatches=".?"
checkColumnValueExists="false"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="4"
checkColumnValueExists="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="8"
checkColumnValueExists="false"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider = DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.usableDuringValidation shouldBe true
case _ => fail
}
}
it should
"return false if all columns should not be used during validation and have all checks deactivated" in {
val firstColumnDescriptionElem = <ColumnDescription
label="firstColumn"
position="1"
checkColumnValueExists="false"
/>
val secondColumnDescriptionElem = <ColumnDescription
label="secondColumn"
position="2"
checkColumnValueExists="false"
/>
val thirdColumnDescriptionElem = <ColumnDescription
label="thirdColumn"
position="3"
checkColumnValueExists="false"
/>
val firstColumnDescriptionEither =
DelimitedColumnDescription(firstColumnDescriptionElem.attributes)
val secondColumnDescriptionEither =
DelimitedColumnDescription(secondColumnDescriptionElem.attributes)
val thirdColumnDescriptionEither =
DelimitedColumnDescription(thirdColumnDescriptionElem.attributes)
val delimiterEither = LiteralDelimiter(";")
(firstColumnDescriptionEither,
secondColumnDescriptionEither,
thirdColumnDescriptionEither,
delimiterEither) match {
case (Right(firstColumnDescription),
Right(secondColumnDescription),
Right(thirdColumnDescription),
Right(delimiter)) =>
val columnDescriptionList = List(firstColumnDescription,
secondColumnDescription,
thirdColumnDescription)
val delimitedRowDivider = DelimitedRowDivider(columnDescriptionList, delimiter)
delimitedRowDivider.usableDuringValidation shouldBe false
case _ => fail
}
}
"DelimitedRowDivider.equals" should "return true if the arguments contains the same column description" in {
val delimitedColumnDescription = List(DelimitedColumnDescription(
ColumnDescriptionMetaData("","",ShouldUseDuring(true, true, true)),
DelimitedPosition(1),
ValueMapper(Nil),
CheckColumnValue(Nil)
))
val delimiter = LiteralDelimiter(";").right.get
val firstDelimitedRowDivider = DelimitedRowDivider(
delimitedColumnDescription,delimiter
)
val secondDelimitedRowDivider = DelimitedRowDivider(
delimitedColumnDescription,delimiter
)
firstDelimitedRowDivider.equals(secondDelimitedRowDivider) shouldBe true
}
it should "return false if argument is of other type" in {
val delimitedColumnDescription = List(DelimitedColumnDescription(
ColumnDescriptionMetaData("","",ShouldUseDuring(true, true, true)),
DelimitedPosition(1),
ValueMapper(Nil),
CheckColumnValue(Nil)
))
val delimiter = LiteralDelimiter(";").right.get
val firstDelimitedRowDivider = DelimitedRowDivider(
delimitedColumnDescription,delimiter
)
firstDelimitedRowDivider.equals("") shouldBe false
}
}
| MouslihAbdelhakim/Quick | src/test/scala/com/scalableQuality/quick/core/fileComponentDescriptions/DelimitedRowDividerTest.scala | Scala | apache-2.0 | 32,950 |
package com.psyanite.scorm.node
import com.psyanite.scorm.exception.ParseException
import scala.xml.NodeSeq
case class Resource (
var identifier: String,
var resourceType: String,
var scormType: String,
var href: Option[String]
)
object Resource extends BaseNode {
def apply(resource: NodeSeq): Resource = {
new Resource(
getAttribute(resource, "identifier"),
getAttribute(resource, "type"),
getScormType(resource),
getAttributeValue(resource, "href")
)
}
private def getAttribute(resource: NodeSeq, attribute: String): String = {
getAttributeValue(resource, attribute)
.getOrElse(throw new ParseException("A resource node is missing the '%s' attribute".format(attribute)))
}
private def getScormType(nodeSeq: NodeSeq): String = {
nodeSeq.headOption match {
case None => throw new ParseException("A resource node is missing the 'adlcp:scormtype' attribute")
case Some(node) => node.attribute(node.getNamespace("adlcp"), "scormtype") match {
case None => throw new ParseException("A resource node is missing the 'adlcp:scormtype' attribute")
case Some(attribute) => attribute.text
}
}
}
}
| psyanite/scorm-parser | src/main/scala/com/psyanite/scorm/node/Resource.scala | Scala | apache-2.0 | 1,315 |
package sample.stream.experiments
import rx.lang.scala.Observable
import scala.concurrent.duration._
object ReplaySelector {
def main(args: Array[String]) {
val observable = Observable.interval(1.second)
val connectable = observable.replay(selector = (observable: Observable[Long]) => {
observable.filter(_ % 2 == 0)
})
connectable.subscribe { x: Long =>
println(x)
}
Thread.sleep(10000)
}
}
| pallavig/akka-examples | src/main/scala/sample/stream/experiments/ReplaySelector.scala | Scala | cc0-1.0 | 437 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.yuiskw.spark.streaming
import com.google.cloud.sparkdemo.CloudPubsubReceiver
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
/**
* This class is a helper class to make dealing with Google Pub/Sub easy
*/
object CloudPubSubReceiverHelper {
/**
* create a DStream getting data from Google Pub/Sub
*
* @param ssc Streaming context
* @param projectId Google Cloud project ID
* @param numReceivers the number of receivers
* @param topic Google Pub/Sub topic
* @param subscription Google Pub/Sub subscription
*/
def createReceivers(
ssc: StreamingContext,
projectId: String,
numReceivers: Int,
topic: String,
subscription: String): DStream[String] = {
val receivers = (1 to numReceivers).map { i =>
ssc.receiverStream(new CloudPubsubReceiver(projectId, topic, subscription))
}
ssc.union(receivers)
}
}
| yu-iskw/spark-streaming-with-google-cloud-example | src/main/scala/com/github/yuiskw/spark/streaming/CloudPubSubReceiverHelper.scala | Scala | apache-2.0 | 1,764 |
package opencl.generator.stencil
import ir._
import ir.ast._
import lift.arithmetic.{StartFromRange, Var}
import opencl.executor.{Execute, Utils, _}
import opencl.ir._
import opencl.ir.pattern.{MapGlb, _}
import org.junit.Assert._
import org.junit.Assume.assumeFalse
import org.junit.{Ignore, Test}
object TestConvolutionSeparable extends TestWithExecutor
class TestConvolutionSeparable {
@Test def convolutionSimple(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17 * 17),
(matrix, weights) => {
MapGlb(1)(
MapGlb(0)(fun(neighbours => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeq(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ neighbours, weights)
}))
) o Slide2D(17, 1, 17, 1) o Pad2D(8, 8, 8, 8, Pad.Boundary.Clamp) $ matrix
})
val weights = Array.fill[Float](17 * 17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 16, 128, 128, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 17, 1, 8, 8, 8, 8, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 16, 4096, 4096, (true, true))(stencil, input, weights)
}
@Test def convolutionTiled(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17 * 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeq(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 17, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(32, 16, 32, 16) o
Pad2D(8, 8, 8, 8, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17 * 17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(32, 8, 128, 128, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 17, 1, 8, 8, 8, 8, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
// idle threads
//val (output: Array[Float], _) = Execute(32, 32, 4096, 4096, (true, true))(stencil, input, weights)
// blocked loading to local mem
//val (output: Array[Float], _) = Execute(16, 16, 4096, 4096, (true, true))(stencil, input, weights)
}
@Ignore //todo segfaults?
@Test def blurY(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
MapGlb(1)(
MapGlb(0)(fun(neighbours => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ neighbours, weights)
}))
) o Slide2D(17, 1, 1, 1) o Pad2D(8, 8, 0, 0, Pad.Boundary.Wrap) $ matrix
})
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 16, 128, 128, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaWrap)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 16, 4096, 4096, (true, true))(stencil, input, weights)
}
@Test def blurYTiled(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(80, 64, 1, 1) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(1, 4, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(1, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Test def blurYTiled2D(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(80, 64, 16, 16) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 4, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Ignore //fix
@Test def blurYTiled2DTiledLoading(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o Join() o
// load to local memory
toLocal(MapSeqUnroll(MapLcl(1)(MapLcl(0)(id)))) o Split(8) $ tile
// split tiles into chunks
))) o
// tiling
Slide2D(80, 64, 16, 16) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 8, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Test def blurYTiled2DTransposed(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o
// transposed load
Transpose() o
toLocal(MapLcl(0)(MapLcl(1)(id))) o
Transpose() $ tile
))) o
// tiling
Slide2D(80, 64, 16, 16) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 4, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Test def blurYTiled2DTiledLoadingTransposed(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o
// transposed load
Transpose() o
Map(Join()) o
// tiled loading
toLocal(MapLcl(0)(MapSeqUnroll(MapLcl(1)(id)))) o
// split tile into chunks
Map(Split(8)) o
Transpose() $ tile
))) o
// tiling
Slide2D(80, 64, 16, 16) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 4, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Ignore // pad is not the right primitive here, just to try things out
@Test def blurYTiled2DTransposedPadded(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(17, 1, 1, 1) o
// transposed load
Transpose() o
toLocal(MapLcl(0)(MapLcl(1)(id))) o
Transpose() o
// pad to avoid bank conflicts
Pad(0, 1, Pad.Boundary.Clamp) $ tile
))) o
// tiling
Slide2D(80, 64, 16, 16) o
Pad2D(8, 8, 0, 0, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(128, 128) { (i, j) => i * 128.0f + j }
val (output, _) = Execute(16, 4, 128, 64, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 17, 1, 1, 1, 8, 8, 0, 0, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 8, 4096, 512, (true, true))(stencil, input, weights)
}
@Test def blurX(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
MapGlb(1)(
MapGlb(0)(fun(neighbours => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ neighbours, weights)
}))
) o Slide2D(1, 1, 17, 1) o Pad2D(0, 0, 8, 8, Pad.Boundary.Clamp) $ matrix
})
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(256, 256) { (i, j) => i * 256.0f + j }
val (output, _) = Execute(16, 16, 256, 256, (true, true))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, 1, 1, 17, 1, 0, 0, 8, 8, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 16, 4096, 4096, (true, true))(stencil, input, weights)
}
@Ignore //fix
@Test def blurXTiled(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(1, 1, 17, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(1, 1, 144, 128) o
Pad2D(0, 0, 8, 8, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
//val input = Array.tabulate(3072, 3072) { (i, j) => i * 3072.0f + j }
//val (output: Array[Float], _) = Execute(16, 1, 128, 3072, (true, true))(stencil, input, weights)
//val gold = Utils.scalaCompute2DStencil(input, 1,1, 17,1, 0,0,8,8, weights, scalaClamp)
//assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
val input = Array.tabulate(1024, 1024) { (i, j) => i * 1024.0f + j }
val (output, _) = Execute(16, 1, 512, 1024, (true, true))[Array[Float]](stencil, input, weights)
}
@Ignore //fix
@Test def blurXTiled2D(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, 17),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(1, 1, 17, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(4, 4, 144, 128) o
Pad2D(0, 0, 8, 8, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](17)(1.0f)
// testing
val input = Array.tabulate(256, 256) { (i, j) => i * 256.0f + j }
val (output, _) = Execute(16, 4, 64, 256, (true, true))[Array[Float]](stencil, input, weights)
//val gold = Utils.scalaCompute2DStencil(input, 1,1, 17,1, 0,0,8,8, weights, scalaClamp)
//assertArrayEquals(gold, output, 0.2f)
// for generating 4k kernel
//val input = Array.tabulate(4096, 4096) { (i, j) => i * 4096.0f + j }
//val (output: Array[Float], _) = Execute(16, 4, 512, 4096, (true, true))(stencil, input, weights)
}
}
| lift-project/lift | src/test/opencl/generator/stencil/TestConvolutionSeparable.scala | Scala | mit | 19,586 |
/*
Copyright 2015 David R. Pugh, J. Doyne Farmer, and Dan F. Tang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package markets.clearing.engines
trait ContinuousDoubleAuctionLike extends MatchingEngineLike
| ScalABM/markets-sandbox | src/main/scala-2.11/markets/clearing/engines/ContinuousDoubleAuctionLike.scala | Scala | apache-2.0 | 689 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.common.util
import java.io.{FileInputStream, ObjectInputStream, ObjectOutputStream}
import java.math
import java.math.RoundingMode
import java.util.{Locale, TimeZone}
import org.apache.carbondata.common.logging.LogServiceFactory
import scala.collection.JavaConversions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.command.LoadDataCommand
import org.apache.spark.sql.test.{ResourceRegisterAndCopier, TestQueryExecutor}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.scalatest.Suite
import org.apache.carbondata.core.datastore.impl.FileFactory
class QueryTest extends PlanTest with Suite {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
val DOLLAR = "$"
// Add Locale setting
Locale.setDefault(Locale.US)
/**
* Runs the plan and makes sure the answer contains all of the keywords, or the
* none of keywords are listed in the answer
* @param df the [[DataFrame]] to be executed
* @param exists true for make sure the keywords are listed in the output, otherwise
* to make sure none of the keyword are not listed in the output
* @param keywords keyword in string array
*/
def checkExistence(df: DataFrame, exists: Boolean, keywords: String*) {
val outputs = df.collect().map(_.mkString).mkString
for (key <- keywords) {
if (exists) {
assert(outputs.contains(key), s"Failed for $df ($key doesn't exist in result)")
} else {
assert(!outputs.contains(key), s"Failed for $df ($key existed in the result)")
}
}
}
/**
* Runs the plan and makes sure the answer matches the expected result.
* @param df the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
*/
protected def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row]): Unit = {
QueryTest.checkAnswer(df, expectedAnswer) match {
case Some(errorMessage) => fail(errorMessage)
case None =>
}
}
protected def checkAnswer(df: DataFrame, expectedAnswer: Row): Unit = {
checkAnswer(df, Seq(expectedAnswer))
}
protected def checkAnswer(df: DataFrame, expectedAnswer: DataFrame): Unit = {
checkAnswer(df, expectedAnswer.collect())
}
protected def checkAnswer(carbon: String, hive: String, uniqueIdentifier: String): Unit = {
val path = TestQueryExecutor.hiveresultpath + "/" + uniqueIdentifier
if (FileFactory.isFileExist(path, FileFactory.getFileType(path))) {
val objinp = new ObjectInputStream(FileFactory
.getDataInputStream(path, FileFactory.getFileType(path)))
val rows = objinp.readObject().asInstanceOf[Array[Row]]
objinp.close()
QueryTest.checkAnswer(sql(carbon), rows) match {
case Some(errorMessage) => {
FileFactory.deleteFile(path, FileFactory.getFileType(path))
writeAndCheckAnswer(carbon, hive, path)
}
case None =>
}
} else {
writeAndCheckAnswer(carbon, hive, path)
}
}
private def writeAndCheckAnswer(carbon: String, hive: String, path: String): Unit = {
val rows = sql(hive).collect()
val obj = new ObjectOutputStream(FileFactory.getDataOutputStream(path, FileFactory
.getFileType(path)))
obj.writeObject(rows)
obj.close()
checkAnswer(sql(carbon), rows)
}
protected def checkAnswer(carbon: String, expectedAnswer: Seq[Row], uniqueIdentifier:String): Unit = {
checkAnswer(sql(carbon), expectedAnswer)
}
def sql(sqlText: String): DataFrame = {
val frame = TestQueryExecutor.INSTANCE.sql(sqlText)
val plan = frame.queryExecution.logical
if (TestQueryExecutor.hdfsUrl.startsWith("hdfs")) {
plan match {
case l: LoadDataCommand =>
val copyPath = TestQueryExecutor.warehouse + "/" + l.table.table.toLowerCase +
l.path.substring(l.path.lastIndexOf("/"), l.path.length)
ResourceRegisterAndCopier.copyLocalFile(l.path, copyPath)
case _ =>
}
}
frame
}
protected def dropTable(tableName: String): Unit ={
sql(s"DROP TABLE IF EXISTS $tableName")
}
val sqlContext: SQLContext = TestQueryExecutor.INSTANCE.sqlContext
val resourcesPath = TestQueryExecutor.resourcesPath
}
object QueryTest {
def checkAnswer(df: DataFrame, expectedAnswer: java.util.List[Row]): String = {
checkAnswer(df, expectedAnswer.toSeq) match {
case Some(errorMessage) => errorMessage
case None => null
}
}
import java.text.DecimalFormat
/**
* Runs the plan and makes sure the answer matches the expected result.
* If there was exception during the execution or the contents of the DataFrame does not
* match the expected result, an error message will be returned. Otherwise, a [[None]] will
* be returned.
* @param df the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
*/
def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row]): Option[String] = {
val isSorted = df.logicalPlan.collect { case s: logical.Sort => s }.nonEmpty
def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
val converted: Seq[Row] = answer.map { s =>
Row.fromSeq(s.toSeq.map {
case d: java.math.BigDecimal => BigDecimal(d)
case b: Array[Byte] => b.toSeq
case d : Double =>
if (!d.isInfinite && !d.isNaN) {
var bd = BigDecimal(d)
bd = bd.setScale(5, BigDecimal.RoundingMode.UP)
bd.doubleValue()
}
else {
d
}
case o => o
})
}
if (!isSorted) converted.sortBy(_.toString()) else converted
}
val sparkAnswer = try df.collect().toSeq catch {
case e: Exception =>
val errorMessage =
s"""
|Exception thrown while executing query:
|== Exception ==
|$e
|${org.apache.spark.sql.catalyst.util.stackTraceToString(e)}
""".stripMargin
return Some(errorMessage)
}
if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
val errorMessage =
s"""
|Results do not match for query:
|== Results ==
|${
sideBySide(
s"== Correct Answer - ${expectedAnswer.size} ==" +:
prepareAnswer(expectedAnswer).map(_.toString()),
s"== Spark Answer - ${sparkAnswer.size} ==" +:
prepareAnswer(sparkAnswer).map(_.toString())).mkString("\n")
}
""".stripMargin
return Some(errorMessage)
}
return None
}
}
| HuaweiBigData/carbondata | integration/spark-common-cluster-test/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala | Scala | apache-2.0 | 7,899 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.commons.source
import com.twitter.chill.Externalizer
import com.twitter.scalding._
import com.twitter.bijection.Injection
/**
* Source used to write some type T into an LZO-compressed SequenceFile using a
* codec on T for serialization.
*/
object LzoCodecSource {
def apply[T](paths: String*)(implicit passedInjection: Injection[T, Array[Byte]]) =
new LzoCodec[T] {
val hdfsPaths = paths
val localPath = { assert(paths.size == 1, "Cannot use multiple input files on local mode"); paths(0) }
val boxed = Externalizer(passedInjection)
override def injection = boxed.get
}
}
| lucamilanesio/scalding | scalding-commons/src/main/scala/com/twitter/scalding/commons/source/LzoCodecSource.scala | Scala | apache-2.0 | 1,198 |
object Main {
<div>
<abbr></div>
{ "..." }
</div>
}
| yusuke2255/dotty | tests/untried/neg/t3604.scala | Scala | bsd-3-clause | 64 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.functions.UserDefinedFunction
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecHashAggregate
import org.apache.flink.table.planner.plan.nodes.exec.{InputProperty, ExecNode}
import org.apache.flink.table.planner.plan.utils.RelExplainUtil
import org.apache.calcite.plan.{RelOptCluster, RelOptRule, RelTraitSet}
import org.apache.calcite.rel.RelDistribution.Type
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.{RelNode, RelWriter}
import org.apache.calcite.util.ImmutableIntList
import java.util
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for local hash-based aggregate operator.
*
* @see [[BatchPhysicalGroupAggregateBase]] for more info.
*/
class BatchPhysicalLocalHashAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
aggCallToAggFunction: Seq[(AggregateCall, UserDefinedFunction)])
extends BatchPhysicalHashAggregateBase(
cluster,
traitSet,
inputRel,
outputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
isMerge = false,
isFinal = false) {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchPhysicalLocalHashAggregate(
cluster,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
grouping,
auxGrouping,
aggCallToAggFunction)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("groupBy", RelExplainUtil.fieldToString(grouping, inputRowType), grouping.nonEmpty)
.itemIf("auxGrouping", RelExplainUtil.fieldToString(auxGrouping, inputRowType),
auxGrouping.nonEmpty)
.item("select", RelExplainUtil.groupAggregationToString(
inputRowType,
outputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
isMerge = false,
isGlobal = false))
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
// Does not to try to satisfy requirement by localAgg's input if enforce to use two-stage agg.
if (isEnforceTwoStageAgg) {
return None
}
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val canSatisfy = requiredDistribution.getType match {
case Type.HASH_DISTRIBUTED | Type.RANGE_DISTRIBUTED =>
val groupCount = grouping.length
// Cannot satisfy distribution if keys are not group keys of agg
requiredDistribution.getKeys.forall(_ < groupCount)
case _ => false
}
if (!canSatisfy) {
return None
}
val keys = requiredDistribution.getKeys.map(grouping(_))
val inputRequiredDistributionKeys = ImmutableIntList.of(keys: _*)
val inputRequiredDistribution = requiredDistribution.getType match {
case Type.HASH_DISTRIBUTED =>
FlinkRelDistribution.hash(inputRequiredDistributionKeys, requiredDistribution.requireStrict)
case Type.RANGE_DISTRIBUTED => FlinkRelDistribution.range(inputRequiredDistributionKeys)
}
val inputRequiredTraits = input.getTraitSet.replace(inputRequiredDistribution)
val newInput = RelOptRule.convert(getInput, inputRequiredTraits)
val providedTraits = getTraitSet.replace(requiredDistribution)
Some(copy(providedTraits, Seq(newInput)))
}
override def translateToExecNode(): ExecNode[_] = {
new BatchExecHashAggregate(
grouping,
auxGrouping,
getAggCallList.toArray,
FlinkTypeFactory.toLogicalRowType(inputRowType),
false, // isMerge is always false
false, // isFinal is always false
getInputProperty,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
private def getInputProperty: InputProperty = {
if (grouping.length == 0) {
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()
} else {
InputProperty.DEFAULT
}
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalLocalHashAggregate.scala | Scala | apache-2.0 | 5,216 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.lib
import scala.reflect.ClassTag
import org.apache.spark.graphx._
/** Strongly connected components algorithm implementation.
* 强连接组件算法实现 */
object StronglyConnectedComponents {
/**
* Compute the strongly connected component (SCC) of each vertex and return a graph with the
* vertex value containing the lowest vertex id in the SCC containing that vertex.
*
* 计算每个顶点的强连通分量(SCC),并返回包含顶点值的图形,该顶点值包含包含该顶点的SCC中的最低顶点id
*
* @tparam VD the vertex attribute type (discarded in the computation)
* 顶点属性类型(在计算中丢弃)
* @tparam ED the edge attribute type (preserved in the computation)
* 边属性类型(保留在计算中)
*
* @param graph the graph for which to compute the SCC
* 要为其计算SCC的图表
* @return a graph with vertex attributes containing the smallest vertex id in each SCC
*/
def run[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED], numIter: Int): Graph[VertexId, ED] = {
// the graph we update with final SCC ids, and the graph we return at the end
//我们使用最终SCC ID更新的图表,以及我们最后返回的图表
var sccGraph = graph.mapVertices { case (vid, _) => vid }
// graph we are going to work with in our iterations
//我们将在迭代中使用的图形
var sccWorkGraph = graph.mapVertices { case (vid, _) => (vid, false) }.cache()
var numVertices = sccWorkGraph.numVertices
var iter = 0
while (sccWorkGraph.numVertices > 0 && iter < numIter) {
iter += 1
do {
numVertices = sccWorkGraph.numVertices
sccWorkGraph = sccWorkGraph.outerJoinVertices(sccWorkGraph.outDegrees) {
(vid, data, degreeOpt) => if (degreeOpt.isDefined) data else (vid, true)
}.outerJoinVertices(sccWorkGraph.inDegrees) {
(vid, data, degreeOpt) => if (degreeOpt.isDefined) data else (vid, true)
}.cache()
// get all vertices to be removed
//获取要删除的所有顶点
val finalVertices = sccWorkGraph.vertices
.filter { case (vid, (scc, isFinal)) => isFinal}
.mapValues { (vid, data) => data._1}
// write values to sccGraph
//将值写入sccGraph
sccGraph = sccGraph.outerJoinVertices(finalVertices) {
(vid, scc, opt) => opt.getOrElse(scc)
}
// only keep vertices that are not final
//只保留不是最终的顶点
sccWorkGraph = sccWorkGraph.subgraph(vpred = (vid, data) => !data._2).cache()
} while (sccWorkGraph.numVertices < numVertices)
sccWorkGraph = sccWorkGraph.mapVertices{ case (vid, (color, isFinal)) => (vid, isFinal) }
// collect min of all my neighbor's scc values, update if it's smaller than mine
// then notify any neighbors with scc values larger than mine
//收集我所有邻居的scc值的min,如果它比我的小,则更新然后通知scc值大于我的scc值的邻居
sccWorkGraph = Pregel[(VertexId, Boolean), ED, VertexId](
sccWorkGraph, Long.MaxValue, activeDirection = EdgeDirection.Out)(
(vid, myScc, neighborScc) => (math.min(myScc._1, neighborScc), myScc._2),
e => {
if (e.srcAttr._1 < e.dstAttr._1) {
Iterator((e.dstId, e.srcAttr._1))
} else {
Iterator()
}
},
(vid1, vid2) => math.min(vid1, vid2))
// start at root of SCCs. Traverse values in reverse, notify all my neighbors
// do not propagate if colors do not match!
//从SCC的根源开始,反向遍历值,如果颜色不匹配,通知我的所有邻居都不会传播!
sccWorkGraph = Pregel[(VertexId, Boolean), ED, Boolean](
sccWorkGraph, false, activeDirection = EdgeDirection.In)(
// vertex is final if it is the root of a color
// or it has the same color as a neighbor that is final
//如果顶点是颜色的根,或者它与最终的邻居具有相同的颜色,则顶点是最终的
(vid, myScc, existsSameColorFinalNeighbor) => {
val isColorRoot = vid == myScc._1
(myScc._1, myScc._2 || isColorRoot || existsSameColorFinalNeighbor)
},
// activate neighbor if they are not final, you are, and you have the same color
//激活邻居,如果他们不是最终的,你是,并且你有相同的颜色
e => {
val sameColor = e.dstAttr._1 == e.srcAttr._1
val onlyDstIsFinal = e.dstAttr._2 && !e.srcAttr._2
if (sameColor && onlyDstIsFinal) {
Iterator((e.srcId, e.dstAttr._2))
} else {
Iterator()
}
},
(final1, final2) => final1 || final2)
}
sccGraph
}
}
| tophua/spark1.52 | graphx/src/main/scala/org/apache/spark/graphx/lib/StronglyConnectedComponents.scala | Scala | apache-2.0 | 5,673 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.anonymization
import akka.util.ByteString
import se.nimsa.dicom.data.{VR, _}
import se.nimsa.sbx.dicom.DicomUtil.toAsciiBytes
import scala.util.Random
object AnonymizationUtil {
def createAnonymousPatientName(sex: Option[String], age: Option[String]): String = {
val sexString = sex.filter(_.nonEmpty).getOrElse("<unknown sex>")
val ageString = age.filter(_.nonEmpty).getOrElse("<unknown age>")
s"Anonymous $sexString $ageString"
}
def createAccessionNumber(): ByteString = {
val rand = new Random()
val newNumber = (1 to 16).foldLeft("")((s, _) => s + rand.nextInt(10).toString)
toAsciiBytes(newNumber, VR.SH)
}
def createUid(): ByteString = toAsciiBytes(createUID(), VR.UI)
}
| slicebox/slicebox | src/main/scala/se/nimsa/sbx/anonymization/AnonymizationUtil.scala | Scala | apache-2.0 | 1,339 |
package blended.testsupport.camel
import akka.actor.ActorRef
import akka.camel.CamelMessage
import akka.util.Timeout
import blended.testsupport.camel.protocol.{CheckAssertions, CheckResults}
import blended.util.logging.Logger
import scala.util.Try
import scala.collection.JavaConverters._
import scala.concurrent.Await
import akka.pattern.ask
@Deprecated
object MockAssertion {
def checkAssertions(mock: ActorRef, assertions: MockAssertion*)(implicit timeout: Timeout) : List[Throwable] = {
val f = (mock ? CheckAssertions(assertions)).mapTo[CheckResults]
Await.result(f, timeout.duration).results.filter(_.isFailure).map(_.failed.get)
}
}
@Deprecated
trait MockAssertion {
def f : List[CamelMessage] => Try[String]
}
case class ExpectedMessageCount(count : Int) extends MockAssertion {
override def f = l => Try {
if (l.lengthCompare(count) == 0)
s"MockActor has [${l.size}] messages."
else
throw new Exception(s"MockActor has [${l.size}] messages, but expected [$count] messages")
}
}
case class MinMessageCount(count : Int) extends MockAssertion {
override def f = l => Try{
if (l.size >= count)
s"MockActor has [${l.size}] messages"
else
throw new Exception(s"MockActor has [${l.size}] messages, but expected at least [$count] messages")
}
}
case class ExpectedBodies(bodies: Any*) extends MockAssertion {
override def f = l => {
def compareBodies(matchList: Map[Any, Any]) : Try[String] = Try {
matchList.filter { case (expected, actual) =>
if (expected.isInstanceOf[Array[Byte]] && actual.isInstanceOf[Array[Byte]])
!expected.asInstanceOf[Array[Byte]].toList.equals(actual.asInstanceOf[Array[Byte]].toList)
else
!expected.equals(actual)
} match {
case e if e.isEmpty => "MockActor has received the correct bodies"
case l =>
val msg = l.map { case (e, a) => s"[$e != $a]"} mkString (",")
throw new Exception(s"Unexpected Bodies: $msg")
}
}
if (bodies.length == 1)
compareBodies( l.map( m => (bodies(0), m.body)).toMap )
else
l.size match {
case n if n == bodies.length =>
compareBodies(bodies.toList.zip(l.map { _.body }).toMap)
case _ => throw new Exception(s"The number of messages received [${l.size}] does not match the number of bodies [${bodies.length}]")
}
}
}
case class MandatoryHeaders(header: List[String]) extends MockAssertion {
override def f = l => Try {
l.filter { m =>
val missing = header.filter { h => m.headers.get(h).isEmpty }
if (missing.nonEmpty) {
throw new Exception(s"Missing headers ${missing.mkString("[", ",", "]")}")
}
true
}
"Mandatory header present"
}
}
case class ExpectedHeaders(headers : Map[String, Any]*) extends MockAssertion {
private[this] val log = Logger[ExpectedHeaders]
private[this] def extractHeader (m : CamelMessage) : Map[String, Any] = m.getHeaders.asScala.toMap
override def f: List[CamelMessage] => Try[String] = l => Try {
def misMatchedHeaders(m : CamelMessage, expected: Map[String, Any]) : Map[String, Any] = {
log.info(s"Checking headers ${extractHeader(m)}, expected: [$expected]")
expected.filter { case (k, v) =>
!m.headers.contains(k) || m.headers(k) != v
}
}
def compareHeaders(matchList: Map[CamelMessage, Map[String, Any]]) : Try[String] = Try {
matchList.filter { case (m, headers) => misMatchedHeaders(m, headers).nonEmpty } match {
case e if e.isEmpty => s"MockActor has received the correct headers"
case l =>
val msg = l.map { case (m, h) =>
val headerMsg = misMatchedHeaders(m, h).mkString(",")
s"Message [$m] did not have headers [$headerMsg]"
}.mkString("\\n")
throw new Exception(msg)
}
}
if (headers.length == 1)
compareHeaders(l.map(m => (m, headers(0))).toMap).get
else l.size match {
case n if n == headers.length =>
compareHeaders(l.zip(headers.toList).toMap).get
case _ => throw new Exception(s"The number of messages received [${l.size}] does not match the number of header maps [${headers.length}]")
}
}
}
| lefou/blended | blended.testsupport/src/main/scala/blended/testsupport/camel/MockAssertion.scala | Scala | apache-2.0 | 4,252 |
package com.romankagan.languages.classroomanalysis
/**
* Created by roman on 5/6/15.
*/
object CommonCourses extends App{
}
| kagan770/talentbuddy | src/com/romankagan/languages/classroomanalysis/CommonCourses.scala | Scala | apache-2.0 | 128 |
package org.http4s
import scala.util.control.{NoStackTrace, NonFatal}
import cats._
import cats.data._
import cats.implicits._
import fs2._
/** Indicates a failure to handle an HTTP [[Message]]. */
sealed abstract class MessageFailure extends RuntimeException {
/** Provides a message appropriate for logging. */
def message: String
/* Overridden for sensible logging of the failure */
final override def getMessage: String =
message
/** Provides a default rendering of this failure as a [[Response]]. */
def toHttpResponse(httpVersion: HttpVersion): Task[Response]
}
/**
* Indicates an error parsing an HTTP [[Message]].
*/
sealed abstract class ParsingFailure extends MessageFailure with NoStackTrace
/**
* Indicates an error parsing an HTTP [[Message]].
*
* @param sanitized May safely be displayed to a client to describe an error
* condition. Should not echo any part of a Request.
* @param details Contains any relevant details omitted from the sanitized
* version of the error. This may freely echo a Request.
*/
final case class ParseFailure(sanitized: String, details: String) extends ParsingFailure {
def message: String =
if (sanitized.isEmpty) details
else if (details.isEmpty) sanitized
else s"$sanitized: $details"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.BadRequest, httpVersion).withBody(sanitized)
}
/** Generic description of a failure to parse an HTTP [[Message]] */
final case class GenericParsingFailure(sanitized: String, details: String, response: HttpVersion => Task[Response]) extends ParsingFailure {
def message: String =
ParseFailure(sanitized, details).message
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
object ParseFailure {
implicit val eq = Eq.fromUniversalEquals[ParseFailure]
}
object ParseResult {
def fail(sanitized: String, details: String): ParseResult[Nothing] =
Either.left(ParseFailure(sanitized, details))
def success[A](a: A): ParseResult[A] =
Either.right(a)
def fromTryCatchNonFatal[A](sanitized: String)(f: => A): ParseResult[A] =
try ParseResult.success(f)
catch {
case NonFatal(e) => Either.left(ParseFailure(sanitized, e.getMessage))
}
implicit val parseResultMonad: MonadError[ParseResult, ParseFailure] = catsStdInstancesForEither[ParseFailure]
// implicit class ParseResultOps[A](parseResult: ParseResult[A])
// extends catsStdInstancesForEither[ParseFailure]
}
/** Indicates a problem decoding a [[Message]]. This may either be a problem with
* the entity headers or with the entity itself. */
sealed abstract class DecodeFailure extends MessageFailure
/** Generic description of a failure to decode a [[Message]] */
final case class GenericDecodeFailure(message: String, response: HttpVersion => Task[Response]) extends DecodeFailure {
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
/** Indicates a problem decoding a [[Message]] body. */
sealed abstract class MessageBodyFailure extends DecodeFailure {
def cause: Option[Throwable] = None
override def getCause: Throwable =
cause.orNull
}
/** Generic description of a failure to handle a [[Message]] body */
final case class GenericMessageBodyFailure(message: String,
override val cause: Option[Throwable],
response: HttpVersion => Task[Response]) extends MessageBodyFailure {
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
/** Indicates an syntactic error decoding the body of an HTTP [[Message]]. */
sealed case class MalformedMessageBodyFailure(details: String, override val cause: Option[Throwable] = None) extends MessageBodyFailure {
def message: String =
s"Malformed message body: $details"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.BadRequest, httpVersion).withBody(s"The request body was malformed.")
}
/** Indicates a semantic error decoding the body of an HTTP [[Message]]. */
sealed case class InvalidMessageBodyFailure(details: String, override val cause: Option[Throwable] = None) extends MessageBodyFailure {
def message: String =
s"Invalid message body: $details"
override def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.UnprocessableEntity, httpVersion).withBody(s"The request body was invalid.")
}
/** Indicates that a [[Message]] came with no supported [[MediaType]]. */
sealed abstract class UnsupportedMediaTypeFailure(expected: Set[MediaRange]) extends DecodeFailure with NoStackTrace {
def sanitizedResponsePrefix: String
val expectedMsg: String = s"Expected one of the following media ranges: ${expected.map(_.renderString).mkString(", ")}"
val responseMsg: String = s"$sanitizedResponsePrefix. $expectedMsg"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.UnsupportedMediaType, httpVersion)
.withBody(responseMsg)
}
/** Indicates that a [[Message]] attempting to be decoded has no [[MediaType]] and no
* [[EntityDecoder]] was lenient enough to accept it. */
final case class MediaTypeMissing(expected: Set[MediaRange])
extends UnsupportedMediaTypeFailure(expected)
{
def sanitizedResponsePrefix: String = "No media type specified in Content-Type header"
val message: String = responseMsg
}
/** Indicates that no [[EntityDecoder]] matches the [[MediaType]] of the [[Message]] being decoded */
final case class MediaTypeMismatch(messageType: MediaType, expected: Set[MediaRange])
extends UnsupportedMediaTypeFailure(expected)
{
def sanitizedResponsePrefix: String = "Media type supplied in Content-Type header is not supported"
def message: String = s"${messageType.renderString} is not a supported media type. $expectedMsg"
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/MessageFailure.scala | Scala | apache-2.0 | 5,963 |
package breeze.stats.distributions
import runtime.ScalaRunTime
import breeze.numerics.{log, round, log1p, expm1}
/**
* The Logarithmic distribution
*
* http://en.wikipedia.org/wiki/Logarithmic_distribution
* @author dlwh
*/
case class Logarthmic(p: Double)(implicit rand: RandBasis=Rand) extends DiscreteDistr[Int] with Moments[Double, Double] {
require(p >= 0)
require(p <= 1)
// from Efficient Generation of Logarithmically Distributed Pseudo-Random Variables
private val h = log1p(-p)
def draw() = {
val u2 = rand.uniform.draw()
if(u2 > p) {
1
} else {
val u1 = rand.uniform.draw()
val q = -expm1(u1 * h)
if(u2 < q * q) {
round(1.0 + log(u2)/log(q)).toInt
} else if (u2 > q) {
1
} else {
2
}
}
}
def probabilityOf(x: Int) = {
-1.0/log1p(-p) * math.pow(p,x)/x
}
def mean = -1.0/log1p(-p) * (p/(1-p))
def variance = {
val l1p = log1p(-p)
val onemp = 1 - p
val denompart = onemp * l1p
-p * (p + l1p)/(denompart * denompart)
}
def mode = 1
def entropy = ???
override def toString() = ScalaRunTime._toString(this)
}
| wavelets/breeze | src/main/scala/breeze/stats/distributions/Logarthmic.scala | Scala | apache-2.0 | 1,164 |
package com.socrata.soda.clients.datacoordinator
import scala.collection.JavaConverters._
import com.rojoma.json.v3.util.JsonUtil
import com.rojoma.json.v3.ast._
import com.rojoma.json.v3.codec.JsonEncode
import com.socrata.http.server.{HttpRequest, ParsedParam, UnparsableParam}
sealed abstract class RowUpdate extends DataCoordinatorInstruction {
override def toString = JsonUtil.renderJson(asJson)
}
case class UpsertRow(rowData: Map[String, JValue]) extends RowUpdate {
def asJson = JObject(rowData)
}
case class DeleteRow(rowId: JValue) extends RowUpdate {
def asJson = JArray(Seq(rowId))
}
// NOTE: If this class is changed, you may want to consider making similar changes
// to RowUpdateOption.java of the soda-java project
case class RowUpdateOption(truncate: Boolean,
mergeInsteadOfReplace: Boolean,
errorPolicy: RowUpdateOption.ErrorPolicy)
extends RowUpdate {
def asJson = JObject(Map(
"c" -> JString("row data"),
"truncate" -> JBoolean(truncate),
"update" -> (mergeInsteadOfReplace match {
case true => JString("merge")
case false => JString("replace")
})
) + errorPolicy.fold("fatal_row_errors" -> JsonEncode.toJValue(false)) { nonFatalRowErrors =>
"nonfatal_row_errors" -> JsonEncode.toJValue(nonFatalRowErrors)
})
}
// Note: nonfatal_row_errors = [] implies fatal_row_errors = true unless otherwise set
object RowUpdateOption {
val default = RowUpdateOption(
truncate = false,
mergeInsteadOfReplace = true,
errorPolicy = NonFatalRowErrors(Nil)
)
sealed abstract class ErrorPolicy {
def fold[T](noFatal: T)(nonFatals: Seq[String] => T): T
}
case object NoRowErrorsAreFatal extends ErrorPolicy {
def fold[T](noFatal: T)(nonFatals: Seq[String] => T) = noFatal
}
case class NonFatalRowErrors(errors: Seq[String]) extends ErrorPolicy {
def fold[T](noFatal: T)(nonFatals: Seq[String] => T) = nonFatals(errors)
}
def fromReq(req: HttpRequest): Either[(String, String), RowUpdateOption] = {
def boolParam(name: String, default: Boolean) = {
req.parseQueryParameterAs[Boolean](name) match {
case ParsedParam(Some(b)) => Right(b)
case ParsedParam(None) => Right(default)
case UnparsableParam(_, value) => Left((name, value))
}
}
def errorPolicyParam() = {
val interesting = req.queryParametersSeq.collect {
case ("nonFatalRowErrors[]", Some(value)) => value
}
if(interesting.nonEmpty) {
Right(NonFatalRowErrors(interesting))
} else {
req.parseQueryParameterAs[Boolean]("errorsAreFatal") match {
case ParsedParam(Some(true)) => Right(NonFatalRowErrors(Nil))
case ParsedParam(Some(false)) => Right(NoRowErrorsAreFatal)
case ParsedParam(None) => Right(default.errorPolicy)
case UnparsableParam(name, value) => Left((name, value))
}
}
}
for {
truncate <- boolParam("truncate", default.truncate).right
mergeInsteadOfReplace <- boolParam("mergeInsteadOfReplace", default.mergeInsteadOfReplace).right
errorPolicy <- errorPolicyParam().right
} yield {
RowUpdateOption(
truncate = truncate,
mergeInsteadOfReplace = mergeInsteadOfReplace,
errorPolicy = errorPolicy
)
}
}
}
| socrata-platform/soda-fountain | soda-fountain-lib/src/main/scala/com/socrata/soda/clients/datacoordinator/RowUpdate.scala | Scala | apache-2.0 | 3,343 |
package org.joda.time
import java.util.Locale
abstract class DateTimeField {
def getType(): DateTimeFieldType
def getName(): String
def isSupported(): Boolean
def isLenient(): Boolean
def get(instant: Long): Int
def getAsText(instant: Long, locale: Locale): String
def getAsText(instant: Long): String
def getAsText(partial: ReadablePartial,
fieldValue: Int,
locale: Locale): String
def getAsText(partial: ReadablePartial, locale: Locale): String
def getAsText(fieldValue: Int, locale: Locale): String
def getAsShortText(instant: Long, locale: Locale): String
def getAsShortText(instant: Long): String
def getAsShortText(partial: ReadablePartial,
fieldValue: Int,
locale: Locale): String
def getAsShortText(partial: ReadablePartial, locale: Locale): String
def getAsShortText(fieldValue: Int, locale: Locale): String
def add(instant: Long, value: Int): Long
def add(instant: Long, value: Long): Long
def add(instant: ReadablePartial,
fieldIndex: Int,
values: Array[Int],
valueToAdd: Int): Array[Int]
def addWrapPartial(instant: ReadablePartial,
fieldIndex: Int,
values: Array[Int],
valueToAdd: Int): Array[Int]
def addWrapField(instant: Long, value: Int): Long
def addWrapField(instant: ReadablePartial,
fieldIndex: Int,
values: Array[Int],
valueToAdd: Int): Array[Int]
def getDifference(minuendInstant: Long, subtrahendInstant: Long): Int
def getDifferenceAsLong(minuendInstant: Long, subtrahendInstant: Long): Long
def set(instant: Long, value: Int): Long
def set(instant: ReadablePartial,
fieldIndex: Int,
values: Array[Int],
newValue: Int): Array[Int]
def set(instant: Long, text: String, locale: Locale): Long
def set(instant: Long, text: String): Long
def set(instant: ReadablePartial,
fieldIndex: Int,
values: Array[Int],
text: String,
locale: Locale): Array[Int]
def getDurationField(): DurationField
def getRangeDurationField(): DurationField
def isLeap(instant: Long): Boolean
def getLeapAmount(instant: Long): Int
def getLeapDurationField(): DurationField
def getMinimumValue(): Int
def getMinimumValue(instant: Long): Int
def getMinimumValue(instant: ReadablePartial): Int
def getMinimumValue(instant: ReadablePartial, values: Array[Int]): Int
def getMaximumValue(): Int
def getMaximumValue(instant: Long): Int
def getMaximumValue(instant: ReadablePartial): Int
def getMaximumValue(instant: ReadablePartial, values: Array[Int]): Int
def getMaximumTextLength(locale: Locale): Int
def getMaximumShortTextLength(locale: Locale): Int
def roundFloor(instant: Long): Long
def roundCeiling(instant: Long): Long
def roundHalfFloor(instant: Long): Long
def roundHalfCeiling(instant: Long): Long
def roundHalfEven(instant: Long): Long
def remainder(instant: Long): Long
override def toString(): String
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/DateTimeField.scala | Scala | bsd-2-clause | 3,152 |
package com.arcusys.valamis.content.service
import com.arcusys.valamis.content.exceptions.NoQuestionException
import com.arcusys.valamis.content.model._
import com.arcusys.valamis.content.storage._
import com.arcusys.valamis.persistence.common.DatabaseLayer
import slick.dbio.DBIO
import scala.concurrent.ExecutionContext.Implicits.global
trait QuestionService {
def getQuestionNodeById(id: Long): QuestionNode
def getById(id: Long): Question
def getAnswers(id: Long): Seq[Answer]
def getWithAnswers(id: Long): (Question, Seq[Answer])
def create(question: Question, answers: Seq[Answer]): Question
def createWithNewCategory(question: Question, answers: Seq[Answer], categoryId: Option[Long]): Question
def update(question: Question, answers: Seq[Answer]): Unit
private[content] def copyByCategoryAction(categoryId: Option[Long], newCategoryId: Option[Long], courseId: Long): DBIO[Seq[Question]]
def copyByCategory(categoryId: Option[Long], newCategoryId: Option[Long], courseId: Long): Seq[Question]
def getByCategory(categoryId: Option[Long], courseId: Long): Seq[Question]
def moveToCategory(id: Long, newCategoryId: Option[Long], courseId: Long)
def moveToCourse(id: Long, courseId: Long, moveToRoot: Boolean)
private[content] def moveToCourseAction(id: Long, courseId: Long, moveToRoot: Boolean): DBIO[Int]
def delete(id: Long): Unit
}
abstract class QuestionServiceImpl extends QuestionService {
def categoryStorage: CategoryStorage
def questionStorage: QuestionStorage
def answerStorage: AnswerStorage
def dbLayer: DatabaseLayer
import DatabaseLayer._
override def getById(id: Long): Question = {
dbLayer.execSync(questionStorage.getById(id)).getOrElse(throw new NoQuestionException(id))
}
override def getWithAnswers(id: Long): (Question, Seq[Answer]) = {
//TODO use joins in getWithAnswers
val question = dbLayer.execSync(questionStorage.getById(id)).getOrElse(throw new NoQuestionException(id))
(question, dbLayer.execSync(answerStorage.getByQuestion(id)))
}
override def getAnswers(id: Long): Seq[Answer] = dbLayer.execSync(answerStorage.getByQuestion(id))
override def getQuestionNodeById(questionId: Long): QuestionNode =
dbLayer.execSync(questionStorage.getById(questionId)).fold(throw new NoQuestionException(questionId)) { q =>
new TreeBuilder(qId => dbLayer.execSync(answerStorage.getByQuestion(qId))).getQuestionNode(q)
}
override def delete(id: Long): Unit = dbLayer.execSync {
questionStorage.delete(id)
}
private def createAnswers(question: Question, answers: Seq[Answer], withReplace: Boolean = false) = {
val deleteAction = if (withReplace) {
Seq(answerStorage.deleteByQuestion(question.id.get))
} else {
Seq()
}
val createActions = question.questionType match {
case QuestionType.Positioning =>
var pos = 0
answers.map { answer =>
pos += 1
answerStorage.create(question.id.get, answer.asInstanceOf[AnswerText].copy(position = pos))
}
case _ =>
answers.map { answer =>
answerStorage.create(question.id.get, answer)
}
}
sequence(deleteAction ++ createActions)
}
private def buildCreateAction(question: Question, answers: Seq[Answer]) = {
for {
created <- questionStorage.create(question)
_ <- createAnswers(created, answers)
} yield created
}
override def create(question: Question, answers: Seq[Answer]): Question = {
//TODO QuestionService.create return with answers
dbLayer.execSyncInTransaction(buildCreateAction(question, answers))
}
override def createWithNewCategory(question: Question, answers: Seq[Answer], categoryId: Option[Long]): Question =
dbLayer.execSyncInTransaction {
for {
created <- questionStorage.createWithCategory(question, categoryId)
_ <- createAnswers(created, answers)
} yield created
}
override def update(question: Question, answers: Seq[Answer]): Unit = dbLayer.execSyncInTransaction {
questionStorage.update(question) >> createAnswers(question, answers, withReplace = true)
}
private def changeQuestionCategoryId(question: Question, newCategoryId: Option[Long]): Question = {
question match {
case q: ChoiceQuestion =>
q.copy(categoryId = newCategoryId)
case q: TextQuestion =>
q.copy(categoryId = newCategoryId)
case q: PositioningQuestion =>
q.copy(categoryId = newCategoryId)
case q: NumericQuestion =>
q.copy(categoryId = newCategoryId)
case q: MatchingQuestion =>
q.copy(categoryId = newCategoryId)
case q: CategorizationQuestion =>
q.copy(categoryId = newCategoryId)
case q: EssayQuestion =>
q.copy(categoryId = newCategoryId)
}
}
//TODO check copyByCategory in QuestionService
def copyByCategoryAction(categoryId: Option[Long], newCategoryId: Option[Long], courseId: Long): DBIO[Seq[Question]] =
for {
questions <- questionStorage.getByCategory(categoryId, courseId)
results <- sequence(questions.map { q =>
for {
answers <- answerStorage.getByQuestion(q.id.get)
created <- buildCreateAction(changeQuestionCategoryId(q, newCategoryId), answers)
} yield created
})
} yield results
override def copyByCategory(categoryId: Option[Long], newCategoryId: Option[Long], courseId: Long): Seq[Question] =
dbLayer.execSyncInTransaction {
copyByCategoryAction(categoryId, newCategoryId, courseId)
}
override def moveToCourseAction(id: Long, courseId: Long, moveToRoot: Boolean): DBIO[Int] = {
questionStorage.moveToCourse(id, courseId, moveToRoot) andThen
answerStorage.moveToCourseByQuestionId(id, courseId)
}
override def moveToCourse(id: Long, courseId: Long, moveToRoot: Boolean): Unit =
dbLayer.execSyncInTransaction(moveToCourseAction(id, courseId, moveToRoot))
override def moveToCategory(id: Long, newCategoryId: Option[Long], courseId: Long): Unit = dbLayer.execSync {
if (newCategoryId.isDefined) {
for {
newCourseId <- categoryStorage.getById(newCategoryId.get).map(_.map(_.courseId).getOrElse(courseId))
_ <- questionStorage.moveToCategory(id, newCategoryId, newCourseId)
} yield ()
} else {
questionStorage.moveToCategory(id, newCategoryId, courseId)
}
}
override def getByCategory(categoryId: Option[Long], courseId: Long): Seq[Question] = dbLayer.execSync {
questionStorage.getByCategory(categoryId, courseId)
}
}
| arcusys/Valamis | valamis-questionbank/src/main/scala/com/arcusys/valamis/content/service/QuestionService.scala | Scala | gpl-3.0 | 6,578 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cloudml.zen.ml.neuralNetwork
import com.github.cloudml.zen.ml.linalg.BLAS
import com.github.cloudml.zen.ml.util.SparkUtils
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.linalg.{Vector => SV, DenseVector => SDV}
import com.github.cloudml.zen.ml.optimization._
@Experimental
class MomentumUpdater(val momentum: Double) extends Updater {
assert(momentum > 0 && momentum < 1)
@transient private var momentumSum: SDV = null
protected def l2(
weightsOld: SV,
gradient: SV,
stepSize: Double,
iter: Int,
regParam: Double): Double = {
0D
}
override def compute(
weightsOld: SV,
gradient: SV,
stepSize: Double,
iter: Int,
regParam: Double): (SV, Double) = {
if (momentumSum == null) {
momentumSum = new SDV(new Array[Double](weightsOld.size))
}
val reg = l2(weightsOld, gradient, stepSize, iter, regParam)
if (momentum > 0) {
BLAS.axpy(momentum, momentumSum, gradient)
this.synchronized {
BLAS.copy(gradient, momentumSum)
}
}
BLAS.axpy(-stepSize, gradient, weightsOld)
(weightsOld, reg)
}
}
| witgo/zen | ml/src/main/scala/com/github/cloudml/zen/ml/neuralNetwork/MomentumUpdater.scala | Scala | apache-2.0 | 1,958 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*************************************************************************************
*/
package com.normation.ldap.sdk
import com.unboundid.ldap.sdk.{RDN,DN,Modification}
import com.unboundid.ldif.LDIFRecord
import com.normation.ldap.ldif.{ToLDIFString,ToLDIFRecords}
import scala.collection.mutable.{Buffer, Map => MutMap, HashMap, ObservableMap, Subscriber, Publisher}
import scala.collection.script._
import net.liftweb.common._
import com.normation.exceptions.TechnicalException
import LDAPTree._
/*
* An LDAP tree of entries.
* It's composed of a root entry and
* children.
* Each children is itself a tree.
* When a child is added to a tree, the parent DN
* of the root's child is set to the dn of the
* tree's root.
*/
trait LDAPTree extends Tree[LDAPEntry] with ToLDIFRecords with ToLDIFString {
selfTree =>
lazy val parentDn = root.parentDn
//validation on children
private var _childrenValidation = new Subscriber[Message[(RDN,LDAPTree)],ObservableMap[RDN,LDAPTree]]() {
override def notify(pub: ObservableMap[RDN,LDAPTree],event: Message[(RDN, LDAPTree)]): Unit = {
event match {
case Update(loc,(rdn,tree)) =>
require(root.optDn == tree.root.parentDn,
"Bad child/parent DN : try to add children %s to entry %s".
format(tree.root.dn,selfTree.root.dn))
require(Some(rdn) == tree.root.rdn)
case _ => //nothing
}
}
}
var _children = new HashMap[RDN,LDAPTree]() /*with UpdateChildTree*/ with ObservableMap[RDN,LDAPTree]
// { override lazy val parentDn = selfTree.root.parentDn }
_children.subscribe(_childrenValidation)
override def children() = Map() ++ _children
def addChild(child:LDAPTree) : Unit = {
child.root.rdn match {
case Some(r) =>
_children += ((r,child))
() // unit is expected
case None => {
throw new TechnicalException("Try to add a child Tree but the RDN of the root of this child is not defined. Parent: %s , child root: %s".
format(root.dn, child.root))
}
}
}
def addChild(child:LDAPEntry) : Unit = addChild(LDAPTree(child))
def addChildren(children:Seq[LDAPTree]) : Unit = children.foreach { t => addChild(t) }
def setChildren(children:Seq[LDAPTree]) : Unit = {
_children.clear
children.foreach { t => addChild(t) }
}
def deleteChildren(rdns:Seq[RDN]) = rdns.foreach { r => _children -= r }
override def addChild(rdn:RDN, child:Tree[LDAPEntry]) : Unit = {
addChild(LDAPTree(child))
}
override def toLDIFRecords() : Seq[LDIFRecord] = {
Seq(root.toLDIFRecord) ++ _children.valuesIterator.toSeq.flatMap( _.toLDIFRecords)
}
override def toString() = "{%s, %s}".format(root.dn.toString,
{
if(_children.size > 0) "children:{%s}".format(_children.map{ case(k,v) => "%s -> %s".format(k.toString,v.toString) } )
else "no child"
})
//not sure it's a really good idea. Hopefully, we won't mix LDAPTrees and LDAPEntries in HashSet...
override def hashCode() = root.hashCode
override def equals(that:Any) : Boolean = that match {
case t:LDAPTree =>
root == t.root &&
_children.size == t._children.size &&
_children.iterator.forall(e => e._2 == t._children(e._1))
case _ => false
}
/*
* Set opt on LDAPTree is forward to tree's root
*/
def setOpt[A](a:Option[A], attributeName:String, f:A => String) : Unit =
root.setOpt(a, attributeName, f)
}
//trait UpdateChildTree extends MutMap[RDN,LDAPTree] {
// def parentDn : Option[DN]
// abstract override def += (kv: (RDN,LDAPTree)): this.type = {
// val (key, tree) = kv
// if(tree.parentDn == this.parentDn) super.+=(kv)
// else super.+=((key, LDAPTree(tree,this.parentDn)))
// }
//}
object LDAPTree {
//loggin
val logger = org.slf4j.LoggerFactory.getLogger(classOf[LDAPTree])
def apply(r:LDAPEntry,c:Traversable[(RDN,LDAPTree)]) : LDAPTree = new LDAPTree {
require(null != r, "root of a tree can't be null")
require(null != c, "children map of a tree can't be null")
val root = r
c foreach { x => _children += x }
}
def apply(r:LDAPEntry) : LDAPTree = apply(r, Map.empty[RDN,LDAPTree])
/**
* Copy an LDAPTree changing its parent dn
*
*/
def move(tree:LDAPTree, newRdn:Option[RDN] = None,newParentDn:Option[DN] = None) : LDAPTree = {
val rdn = newRdn.orElse(tree.root.rdn)
val parentDn = newParentDn.orElse(tree.root.parentDn)
val newRoot = LDAPEntry(rdn,parentDn,tree.root.attributes.toSeq:_*)
apply(newRoot,tree._children.map{ kv => (kv._1,LDAPTree.move(kv._2, newParentDn = newRoot.optDn))})
}
/*
* copy children reference without any verification
* from 'from' to 'to'
*/
def overrideChildren(from:LDAPTree, to:LDAPTree) : Unit = {
to._children = from._children
to._children.subscribe(to._childrenValidation)
}
//transtype Tree[LDAPEntry] => LDAPTree
def apply(tree:Tree[LDAPEntry]) : LDAPTree = apply(tree.root, tree.children.map( e => (e._1,apply(e._2))))
import scala.collection.JavaConversions._
/*
* Build an LDAP tree from a list of entries.
* If the list is empty, return None.
* All entries in the list safe one (the one that will become the root of the tree)
* must have a direct parent in other entries.
*/
def apply(entries:Iterable[LDAPEntry]) : Box[LDAPTree] = {
if(null == entries || entries.isEmpty) Empty
//verify that there is no duplicates
else if(entries.map(_.dn).toSet.size != entries.size) {
Failure("Some entries have the same dn, what is forbiden: %s".format({val s = entries.map(_.dn).toSet; entries.map(_.dn).filter(x => ! s.contains(x))}))
} else {
val used = Buffer[DN]()
/*
* the iterable must be sorted on dn and only descendants of root.root.dn
* - add the direct children of root
* - return root with its children and not used entries
*/
def recBuild(root:LDAPTree, possible:Seq[LDAPEntry]) : LDAPTree = {
val directChildren = possible.filter(e => root.root.dn == e.dn.getParent)
for(child <- directChildren) {
used += child.dn
root.addChild(recBuild(LDAPTree(child), possible.filter(e => child.dn.isAncestorOf(e.dn,false)) ))
}
root
}
val sorted = entries.toSeq.sortWith((x,y) => x.dn.compareTo( y.dn) < 0)
val rootEntry = sorted.head
val root = recBuild(LDAPTree(rootEntry), sorted.filter(e => rootEntry.dn.isAncestorOf(e.dn,false)))
if(used.size < entries.size-1) {
Failure("Some entries have no parents: %s".format(entries.map(_.dn).filter(x => !used.contains(x))))
} else Full(root)
}
}
/*
* Compare two LDAP tree and return the list of modification to
* apply to "old" to merge with "target".
* The two roots HAVE to have the same DN, or None is returned.
* The comparison is strict, so that:
* - if
*/
def diff(source:LDAPTree, target:LDAPTree, removeMissing:Boolean) : Option[Tree[TreeModification]] = {
if(source.root.dn != target.root.dn) {
logger.debug("DN of the two LDAP tree's root are different: {} <> {}",source.root.dn,target.root.dn)
None
} else {
//modification on root
val rootDiff = LDAPEntry.merge(source.root, target.root, removeMissing = removeMissing)
val mods:Tree[TreeModification] = if(rootDiff.isEmpty) Tree(NoMod) else Tree(Replace((source.root.dn,rootDiff)))
val intersection = source.children.keySet intersect(target.children.keySet)
//remove entries present in source but not in target
for(k <- source.children.keySet -- intersection) {
mods.addChild(k, Tree(Delete(source.children()(k).map(_.dn))))
}
//add entries present in target but not in source
for(k <- target.children.keySet -- intersection) {
mods.addChild(k, Tree(Add(target.children()(k))))
}
//diff all entries both in source and target
for(k <- intersection) {
diff(source.children()(k), target.children()(k), removeMissing) foreach { d =>
mods.addChild(k, d)
}
}
Some(mods)
}
}
}
| fanf/scala-ldap | src/main/scala/com/normation/ldap/sdk/LDAPTree.scala | Scala | apache-2.0 | 9,037 |
package controllers.auth
import org.pac4j.core.context.HttpConstants
import org.pac4j.play.PlayWebContext
import org.pac4j.play.http.{DefaultHttpActionAdapter}
import play.mvc.Results
import play.mvc.Result
class DemoHttpActionAdapter extends DefaultHttpActionAdapter {
override def adapt(code: Int, context: PlayWebContext): Result = {
if (code == HttpConstants.UNAUTHORIZED) {
Results.unauthorized(views.html.auth.error401.render().toString()).as(HttpConstants.HTML_CONTENT_TYPE)
} else if (code == HttpConstants.FORBIDDEN) {
Results.forbidden(views.html.auth.error403.render().toString()).as(HttpConstants.HTML_CONTENT_TYPE)
} else {
super.adapt(code, context)
}
}
} | kristiankime/calc-tutor | app/controllers/auth/DemoHttpActionAdapter.scala | Scala | mit | 710 |
package com.mogproject.mogami.core
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import com.mogproject.mogami._
import com.mogproject.mogami.core.io.RecordFormatException
class PtypeSpec extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyChecks {
val allPtypes = Seq(
KING, GOLD, PAWN, KNIGHT, SILVER, LANCE, BISHOP, ROOK, PPAWN, PKNIGHT, PSILVER, PLANCE, PBISHOP, PROOK)
val csaPtypes = Seq(
"OU", "KI", "FU", "KE", "GI", "KY", "KA", "HI", "TO", "NK", "NG", "NY", "UM", "RY")
val englishNames = Seq(
"K", "G", "P", "N", "S", "L", "B", "R", "+P", "+N", "+S", "+L", "+B", "+R")
val japaneseNames = Seq(
"玉", "金", "歩", "桂", "銀", "香", "角", "飛", "と", "圭", "全", "杏", "馬", "龍")
val promotedPtypes = Seq(
KING, GOLD, PPAWN, PKNIGHT, PSILVER, PLANCE, PBISHOP, PROOK, PPAWN, PKNIGHT, PSILVER, PLANCE, PBISHOP, PROOK)
val demotedPtypes = Seq(
KING, GOLD, PAWN, KNIGHT, SILVER, LANCE, BISHOP, ROOK, PAWN, KNIGHT, SILVER, LANCE, BISHOP, ROOK)
val canPromote = Seq(false, false, true, true, true, true, true, true, false, false, false, false, false, false)
val isPromoted = Seq(false, false, false, false, false, false, false, false, true, true, true, true, true, true)
"Ptype#equals" must "distinguish some piece types" in {
KING.equals(KING) must be(true)
KING.equals(GOLD) must be(false)
GOLD.equals(KING) must be(false)
GOLD.equals(GOLD) must be(true)
}
"Ptype#toString" must "describe all piece types" in {
allPtypes.map(_.toString) must be(Seq(
"KING", "GOLD", "PAWN", "KNIGHT", "SILVER", "LANCE", "BISHOP", "ROOK",
"PPAWN", "PKNIGHT", "PSILVER", "PLANCE", "PBISHOP", "PROOK"))
}
"Ptype#toCsaString" must "describe all piece types" in {
allPtypes.map(_.toCsaString) must be(csaPtypes)
}
"Ptype#parseCsaString" must "make piece type" in {
csaPtypes.map(Ptype.parseCsaString) mustBe allPtypes
}
it must "throw an exception in error cases" in {
assertThrows[RecordFormatException](Ptype.parseCsaString(""))
assertThrows[RecordFormatException](Ptype.parseCsaString("* "))
assertThrows[RecordFormatException](Ptype.parseCsaString("OU "))
assertThrows[RecordFormatException](Ptype.parseCsaString("x" * 100))
}
"Ptype#toEnglishSimpleName" must "describe all piece types" in {
allPtypes.map(_.toEnglishSimpleName) must be(englishNames)
}
"Ptype#toJapaneseSimpleName" must "describe all piece types" in {
allPtypes.map(_.toJapaneseSimpleName) must be(japaneseNames)
}
it must "recover piece types" in forAll(PtypeGen.ptypes) { pt =>
Ptype.parseCsaString(pt.toCsaString) mustBe pt
}
"Ptype#promoted" must "return promoted piece types" in {
allPtypes.map(_.promoted) must be(promotedPtypes)
}
"Ptype#demoted" must "return demoted piece types" in {
allPtypes.map(_.demoted) must be(demotedPtypes)
}
it must "cancel promotion and demotion" in forAll(PtypeGen.ptypes) { pt =>
pt.promoted.demoted must be(pt.demoted)
pt.demoted.promoted must be(pt.promoted)
}
"Ptype#canPromote" must "return correctly" in {
allPtypes.map(_.canPromote) must be(canPromote)
}
"Ptype#isBasic" must "return correctly" in {
allPtypes.map(_.isBasic) must be(isPromoted.map(!_))
}
"Ptype#isPromoted" must "return correctly" in {
allPtypes.map(_.isPromoted) must be(isPromoted)
}
}
| mogproject/mog-core-scala | shared/src/test/scala/com/mogproject/mogami/core/PtypeSpec.scala | Scala | apache-2.0 | 3,491 |
package lila.base
import cats.data.Validated
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import org.joda.time.{ DateTime, Duration }
import ornicar.scalalib.Zero
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.util.Try
import LilaTypes._
final class PimpedOption[A](private val self: Option[A]) extends AnyVal {
def fold[X](some: A => X, none: => X): X = self.fold(none)(some)
def orDefault(implicit z: Zero[A]): A = self getOrElse z.zero
def toTryWith(err: => Exception): Try[A] =
self.fold[Try[A]](scala.util.Failure(err))(scala.util.Success.apply)
def toTry(err: => String): Try[A] = toTryWith(lila.base.LilaException(err))
def err(message: => String): A = self.getOrElse(sys.error(message))
def ifNone(n: => Unit): Unit = if (self.isEmpty) n
def has(a: A) = self contains a
}
final class PimpedString(private val s: String) extends AnyVal {
def replaceIf(t: Char, r: Char): String =
if (s.indexOf(t.toInt) >= 0) s.replace(t, r) else s
def replaceIf(t: Char, r: CharSequence): String =
if (s.indexOf(t.toInt) >= 0) s.replace(String.valueOf(t), r) else s
def replaceIf(t: CharSequence, r: CharSequence): String =
if (s.contains(t)) s.replace(t, r) else s
}
final class PimpedConfig(private val config: Config) extends AnyVal {
def millis(name: String): Int = config.getDuration(name, TimeUnit.MILLISECONDS).toInt
def seconds(name: String): Int = config.getDuration(name, TimeUnit.SECONDS).toInt
def duration(name: String): FiniteDuration = millis(name).millis
}
final class PimpedDateTime(private val date: DateTime) extends AnyVal {
def getSeconds: Long = date.getMillis / 1000
def getCentis: Long = date.getMillis / 10
def toNow = new Duration(date, DateTime.now)
def atMost(other: DateTime) = if (other isBefore date) other else date
def atLeast(other: DateTime) = if (other isAfter date) other else date
}
final class PimpedTry[A](private val v: Try[A]) extends AnyVal {
def fold[B](fe: Exception => B, fa: A => B): B =
v match {
case scala.util.Failure(e: Exception) => fe(e)
case scala.util.Failure(e) => throw e
case scala.util.Success(a) => fa(a)
}
def future: Fu[A] = fold(Future.failed, fuccess)
def toEither: Either[Throwable, A] =
v match {
case scala.util.Success(res) => Right(res)
case scala.util.Failure(err) => Left(err)
}
}
final class PimpedEither[A, B](private val v: Either[A, B]) extends AnyVal {
def orElse(other: => Either[A, B]): Either[A, B] =
v match {
case scala.util.Right(res) => Right(res)
case scala.util.Left(_) => other
}
}
final class PimpedFiniteDuration(private val d: FiniteDuration) extends AnyVal {
def toCentis =
chess.Centis {
// divide by Double, then round, to avoid rounding issues with just `/10`!
math.round {
if (d.unit eq MILLISECONDS) d.length / 10d
else d.toMillis / 10d
}
}
def abs = if (d.length < 0) -d else d
}
final class RichValidated[E, A](private val v: Validated[E, A]) extends AnyVal {
def toFuture: Fu[A] = v.fold(err => fufail(err.toString), fuccess)
}
| luanlv/lila | modules/common/src/main/base/PimpedUtils.scala | Scala | mit | 3,265 |
class A { }
/*
object Main { def main(args: Array[String]) { } }
| tobast/compil-petitscala | tests/syntax/bad/testfile-unclosed_comment-1.scala | Scala | gpl-3.0 | 67 |
package org.scalaide.core.internal.jdt.util
import org.eclipse.core.resources.IFile
import org.eclipse.core.resources.IFolder
import org.eclipse.core.resources.IProject
import org.eclipse.core.resources.IResource
import org.eclipse.core.resources.ResourcesPlugin
import org.eclipse.core.runtime.CoreException
import org.eclipse.core.runtime.IProgressMonitor
import org.eclipse.core.runtime.IStatus
import org.eclipse.core.runtime.Status
import org.eclipse.jdt.core.IClasspathEntry
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.IPackageFragment
import org.eclipse.jdt.core.IType
import org.eclipse.jdt.core.JavaCore
import org.eclipse.jdt.core.JavaModelException
import org.eclipse.jdt.internal.core.ImportContainerInfo
import org.eclipse.jdt.internal.core.JavaModelManager
import org.eclipse.jdt.internal.core.NameLookup
import org.eclipse.jdt.internal.core.OpenableElementInfo
import org.eclipse.jdt.internal.ui.packageview.PackageExplorerPart
import org.eclipse.ui.progress.UIJob
import org.scalaide.util.internal.ReflectionUtils
import org.scalaide.core.internal.project.ScalaProject
object JDTUtils {
private var refreshPending = false
private val lock = new Object
def refreshPackageExplorer() = {
lock.synchronized{
if (!refreshPending) {
refreshPending = true
new UIJob("Refresh package explorer") {
def runInUIThread(monitor : IProgressMonitor) : IStatus = {
lock.synchronized {
refreshPending = false
}
val pep = PackageExplorerPart.getFromActivePerspective
if (pep != null)
pep.getTreeViewer.refresh()
Status.OK_STATUS
}
}.schedule
}
}
}
def resolveType(nameLookup : NameLookup, packageName : String, typeName : String, acceptFlags : Int) : Option[IType] = {
val pkgs = nameLookup.findPackageFragments(packageName, false)
for(p <- pkgs) {
val tpe = nameLookup.findType(typeName, p, false, acceptFlags, true, true)
if (tpe != null)
return Some(tpe)
}
return None
}
def getParentPackage(scalaFile : IFile) : IPackageFragment = {
val jp = JavaCore.create(scalaFile.getProject)
val pkg = JavaModelManager.determineIfOnClasspath(scalaFile, jp)
if (pkg != null && pkg.isInstanceOf[IPackageFragment])
pkg.asInstanceOf[IPackageFragment]
else {
// Not on classpath so use the default package
val root = jp.getPackageFragmentRoot(scalaFile.getParent)
root.getPackageFragment(IPackageFragment.DEFAULT_PACKAGE_NAME)
}
}
def flattenProject(project : IProject) : Iterator[IFile] = {
try {
if (!ScalaProject.isScalaProject(project))
return Iterator.empty
val jp = JavaCore.create(project)
jp.getRawClasspath.filter(_.getEntryKind == IClasspathEntry.CPE_SOURCE).
iterator.flatMap(entry => flatten(ResourcesPlugin.getWorkspace.getRoot.findMember(entry.getPath)))
} catch {
case _ : JavaModelException => Iterator.empty
}
}
def flatten(r : IResource) : Iterator[IFile] = {
try {
r match {
case r if r == null || !r.exists => Iterator.empty
case folder : IFolder if folder.getType == IResource.FOLDER => folder.members.iterator.flatMap{flatten _}
case file : IFile if file.getType == IResource.FILE && file.getFileExtension == "scala" => Iterator.single(file)
case _ => Iterator.empty
}
} catch {
case _ : CoreException => Iterator.empty
}
}
}
object SourceRefElementInfoUtils extends ReflectionUtils {
private val sreiClazz = Class.forName("org.eclipse.jdt.internal.core.SourceRefElementInfo")
private val setSourceRangeStartMethod = getDeclaredMethod(sreiClazz, "setSourceRangeStart", classOf[Int])
private val setSourceRangeEndMethod = getDeclaredMethod(sreiClazz, "setSourceRangeEnd", classOf[Int])
def setSourceRangeStart(srei : AnyRef, pos : Int) = setSourceRangeStartMethod.invoke(srei, new Integer(pos))
def setSourceRangeEnd(srei : AnyRef, pos : Int) = setSourceRangeEndMethod.invoke(srei, new Integer(pos))
}
object ImportContainerInfoUtils extends ReflectionUtils {
private val iciClazz = classOf[ImportContainerInfo]
private val childrenField = getDeclaredField(iciClazz, "children")
def setChildren(ic : ImportContainerInfo, children : Array[IJavaElement]): Unit = { childrenField.set(ic, children) }
def getChildren(ic : ImportContainerInfo) = childrenField.get(ic).asInstanceOf[Array[IJavaElement]]
}
| Kwestor/scala-ide | org.scala-ide.sdt.core/src-luna/org/scalaide/core/internal/jdt/util/JDTUtils.scala | Scala | bsd-3-clause | 4,532 |
package is.hail.annotations
import is.hail.types.physical._
import is.hail.types.virtual._
import is.hail.utils._
import is.hail.variant.Locus
import org.apache.spark.sql.Row
class RegionValueBuilder(var region: Region) {
def this() = this(null)
var start: Long = _
var root: PType = _
val typestk = new ArrayStack[PType]()
val indexstk = new ArrayStack[Int]()
val offsetstk = new ArrayStack[Long]()
val elementsOffsetstk = new ArrayStack[Long]()
def inactive: Boolean = root == null && typestk.isEmpty && offsetstk.isEmpty && elementsOffsetstk.isEmpty && indexstk.isEmpty
def clear(): Unit = {
root = null
typestk.clear()
offsetstk.clear()
elementsOffsetstk.clear()
indexstk.clear()
}
def set(newRegion: Region) {
assert(inactive)
region = newRegion
}
def currentOffset(): Long = {
if (typestk.isEmpty)
start
else {
val i = indexstk.top
typestk.top match {
case t: PCanonicalBaseStruct =>
offsetstk.top + t.byteOffsets(i)
case t: PArray =>
elementsOffsetstk.top + i * t.elementByteSize
}
}
}
def currentType(): PType = {
if (typestk.isEmpty)
root
else {
typestk.top match {
case t: PCanonicalBaseStruct =>
val i = indexstk.top
t.types(i)
case t: PArray =>
t.elementType
}
}
}
def start(newRoot: PType) {
assert(inactive)
root = newRoot.fundamentalType
}
def allocateRoot() {
assert(typestk.isEmpty)
root match {
case t: PArray =>
case _: PBinary =>
case _ =>
start = region.allocate(root.alignment, root.byteSize)
}
}
def end(): Long = {
assert(root != null)
root = null
assert(inactive)
start
}
def advance() {
if (indexstk.nonEmpty)
indexstk(0) = indexstk(0) + 1
}
/**
* Unsafe unless the bytesize of every type being "advanced past" is size
* 0. The primary use-case is when adding an array of hl.PStruct()
* (i.e. empty structs).
*
**/
def unsafeAdvance(i: Int) {
if (indexstk.nonEmpty)
indexstk(0) = indexstk(0) + i
}
def startBaseStruct(init: Boolean = true, setMissing: Boolean = false) {
val t = currentType().asInstanceOf[PBaseStruct]
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
typestk.push(t)
offsetstk.push(off)
indexstk.push(0)
if (init)
t.initialize(off, setMissing)
}
def endBaseStruct() {
val t = typestk.top.asInstanceOf[PBaseStruct]
typestk.pop()
offsetstk.pop()
val last = indexstk.pop()
assert(last == t.size)
advance()
}
def startStruct(init: Boolean = true, setMissing: Boolean = false) {
assert(currentType().isInstanceOf[PStruct])
startBaseStruct(init, setMissing)
}
def endStruct() {
assert(typestk.top.isInstanceOf[PStruct])
endBaseStruct()
}
def startTuple(init: Boolean = true) {
assert(currentType().isInstanceOf[PTuple])
startBaseStruct(init)
}
def endTuple() {
assert(typestk.top.isInstanceOf[PTuple])
endBaseStruct()
}
def startArray(length: Int, init: Boolean = true) {
startArrayInternal(length, init, false)
}
// using this function, rather than startArray will set all elements of the array to missing by
// default, you will need to use setPresent to add a value to this array.
def startMissingArray(length: Int, init: Boolean = true) {
val t = currentType().asInstanceOf[PArray]
if (t.elementType.required)
fatal(s"cannot use random array pattern for required type ${ t.elementType }")
startArrayInternal(length, init, true)
}
private def startArrayInternal(length: Int, init: Boolean, setMissing: Boolean) {
val t = currentType().asInstanceOf[PArray]
val aoff = t.allocate(region, length)
if (typestk.nonEmpty) {
val off = currentOffset()
Region.storeAddress(off, aoff)
} else
start = aoff
typestk.push(t)
elementsOffsetstk.push(aoff + t.elementsOffset(length))
indexstk.push(0)
offsetstk.push(aoff)
if (init)
t.initialize(aoff, length, setMissing)
}
def endArray() {
val t = typestk.top.asInstanceOf[PArray]
val aoff = offsetstk.top
val length = t.loadLength(aoff)
assert(length == indexstk.top)
endArrayUnchecked()
}
def endArrayUnchecked() {
typestk.pop()
offsetstk.pop()
elementsOffsetstk.pop()
indexstk.pop()
advance()
}
def setArrayIndex(newI: Int) {
assert(typestk.top.isInstanceOf[PArray])
indexstk(0) = newI
}
def setFieldIndex(newI: Int) {
assert(typestk.top.isInstanceOf[PBaseStruct])
indexstk(0) = newI
}
def setMissing() {
val i = indexstk.top
typestk.top match {
case t: PBaseStruct =>
if (t.fieldRequired(i))
fatal(s"cannot set missing field for required type ${ t.types(i) }")
t.setFieldMissing(offsetstk.top, i)
case t: PArray =>
if (t.elementType.required)
fatal(s"cannot set missing field for required type ${ t.elementType }")
t.setElementMissing(offsetstk.top, i)
}
advance()
}
def setPresent() {
val i = indexstk.top
typestk.top match {
case t: PBaseStruct =>
t.setFieldPresent(offsetstk.top, i)
case t: PArray =>
t.setElementPresent(offsetstk.top, i)
}
}
def addBoolean(b: Boolean) {
assert(currentType().isInstanceOf[PBoolean])
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
Region.storeByte(off, b.toByte)
advance()
}
def addInt(i: Int) {
assert(currentType().isInstanceOf[PInt32])
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
Region.storeInt(off, i)
advance()
}
def addLong(l: Long) {
assert(currentType().isInstanceOf[PInt64])
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
Region.storeLong(off, l)
advance()
}
def addFloat(f: Float) {
assert(currentType().isInstanceOf[PFloat32])
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
Region.storeFloat(off, f)
advance()
}
def addDouble(d: Double) {
assert(currentType().isInstanceOf[PFloat64])
if (typestk.isEmpty)
allocateRoot()
val off = currentOffset()
Region.storeDouble(off, d)
advance()
}
def addBinary(bytes: Array[Byte]) {
val pbt = currentType().asInstanceOf[PBinary]
val valueAddress = pbt.allocate(region, bytes.length)
pbt.store(valueAddress, bytes)
if (typestk.nonEmpty)
Region.storeAddress(currentOffset(), valueAddress)
else
start = valueAddress
advance()
}
def addString(s: String) {
addBinary(s.getBytes)
}
def addRow(t: TBaseStruct, r: Row) {
assert(r != null)
startBaseStruct()
var i = 0
while (i < t.size) {
addAnnotation(t.types(i), r.get(i))
i += 1
}
endBaseStruct()
}
def addField(t: PBaseStruct, fromRegion: Region, fromOff: Long, i: Int) {
addField(t, fromOff, i, region.ne(fromRegion))
}
def addField(t: PBaseStruct, rv: RegionValue, i: Int) {
addField(t, rv.region, rv.offset, i)
}
def addField(t: PBaseStruct, fromOff: Long, i: Int, deepCopy: Boolean) {
if (t.isFieldDefined(fromOff, i))
addRegionValue(t.types(i), t.loadField(fromOff, i), deepCopy)
else
setMissing()
}
def skipFields(n: Int) {
var i = 0
while (i < n) {
setMissing()
i += 1
}
}
def addAllFields(t: PBaseStruct, fromRegion: Region, fromOff: Long) {
var i = 0
while (i < t.size) {
addField(t, fromRegion, fromOff, i)
i += 1
}
}
def addAllFields(t: PBaseStruct, fromRV: RegionValue) {
addAllFields(t, fromRV.region, fromRV.offset)
}
def addFields(t: PBaseStruct, fromRegion: Region, fromOff: Long, fieldIdx: Array[Int]) {
var i = 0
while (i < fieldIdx.length) {
addField(t, fromRegion, fromOff, fieldIdx(i))
i += 1
}
}
def addFields(t: PBaseStruct, fromRV: RegionValue, fieldIdx: Array[Int]) {
addFields(t, fromRV.region, fromRV.offset, fieldIdx)
}
def addElement(t: PArray, fromRegion: Region, fromAOff: Long, i: Int) {
if (t.isElementDefined(fromAOff, i))
addRegionValue(t.elementType, fromRegion,
t.elementOffset(fromAOff, i))
else
setMissing()
}
def addElement(t: PArray, rv: RegionValue, i: Int) {
addElement(t, rv.region, rv.offset, i)
}
def selectRegionValue(fromT: PStruct, fromFieldIdx: Array[Int], fromRV: RegionValue) {
selectRegionValue(fromT, fromFieldIdx, fromRV.region, fromRV.offset)
}
def selectRegionValue(fromT: PStruct, fromFieldIdx: Array[Int], region: Region, offset: Long) {
val t = fromT.typeAfterSelect(fromFieldIdx).fundamentalType
assert(currentType().setRequired(true) == t.setRequired(true))
assert(t.size == fromFieldIdx.length)
startStruct()
addFields(fromT, region, offset, fromFieldIdx)
endStruct()
}
def addRegionValue(t: PType, rv: RegionValue) {
addRegionValue(t, rv.region, rv.offset)
}
def addRegionValue(t: PType, fromRegion: Region, fromOff: Long) {
addRegionValue(t, fromOff, region.ne(fromRegion))
}
def addRegionValue(t: PType, fromOff: Long, deepCopy: Boolean) {
val toT = currentType()
if (typestk.isEmpty) {
val r = toT.copyFromAddress(region, t.fundamentalType, fromOff, deepCopy)
start = r
return
}
val toOff = currentOffset()
assert(typestk.nonEmpty || toOff == start)
toT.unstagedStoreAtAddress(toOff, region, t.fundamentalType, fromOff, deepCopy)
advance()
}
def addUnsafeRow(t: PBaseStruct, ur: UnsafeRow) {
assert(t == ur.t)
addRegionValue(t, ur.region, ur.offset)
}
def addUnsafeArray(t: PArray, uis: UnsafeIndexedSeq) {
assert(t == uis.t)
addRegionValue(t, uis.region, uis.aoff)
}
def addAnnotation(t: Type, a: Annotation) {
if (a == null)
setMissing()
else
t match {
case TBoolean => addBoolean(a.asInstanceOf[Boolean])
case TInt32 => addInt(a.asInstanceOf[Int])
case TInt64 => addLong(a.asInstanceOf[Long])
case TFloat32 => addFloat(a.asInstanceOf[Float])
case TFloat64 => addDouble(a.asInstanceOf[Double])
case TString => addString(a.asInstanceOf[String])
case TBinary => addBinary(a.asInstanceOf[Array[Byte]])
case t: TArray =>
a match {
case uis: UnsafeIndexedSeq if currentType() == uis.t =>
addUnsafeArray(uis.t.asInstanceOf[PArray], uis)
case is: IndexedSeq[Annotation] =>
startArray(is.length)
var i = 0
while (i < is.length) {
addAnnotation(t.elementType, is(i))
i += 1
}
endArray()
}
case t: TBaseStruct =>
a match {
case ur: UnsafeRow if currentType() == ur.t =>
addUnsafeRow(ur.t, ur)
case r: Row =>
addRow(t, r)
}
case TSet(elementType) =>
val s = a.asInstanceOf[Set[Annotation]]
.toArray
.sorted(elementType.ordering.toOrdering)
startArray(s.length)
s.foreach { x => addAnnotation(elementType, x) }
endArray()
case td: TDict =>
val m = a.asInstanceOf[Map[Annotation, Annotation]]
.map { case (k, v) => Row(k, v) }
.toArray
.sorted(td.elementType.ordering.toOrdering)
startArray(m.length)
m.foreach { case Row(k, v) =>
startStruct()
addAnnotation(td.keyType, k)
addAnnotation(td.valueType, v)
endStruct()
}
endArray()
case TCall =>
addInt(a.asInstanceOf[Int])
case t: TLocus =>
val l = a.asInstanceOf[Locus]
startStruct()
addString(l.contig)
addInt(l.position)
endStruct()
case t: TInterval =>
val i = a.asInstanceOf[Interval]
startStruct()
addAnnotation(t.pointType, i.start)
addAnnotation(t.pointType, i.end)
addBoolean(i.includesStart)
addBoolean(i.includesEnd)
endStruct()
case t@TNDArray(elementType, _) =>
val structWithStrides = TStruct(
("shape", t.shapeType),
("strides", t.shapeType),
("data", TArray(elementType))
)
val ptype = currentType().asInstanceOf[PBaseStruct]
val shapeRow = a.asInstanceOf[Row](0).asInstanceOf[Row]
val shapeArray = shapeRow.toSeq.toIndexedSeq.map(x => x.asInstanceOf[Long])
var runningProduct = ptype.fieldType("data").asInstanceOf[PArray].elementType.byteSize
val stridesArray = new Array[Long](shapeArray.size)
((shapeArray.size - 1) to 0 by -1).foreach { i =>
stridesArray(i) = runningProduct
runningProduct = runningProduct * (if (shapeArray(i) > 0L) shapeArray(i) else 1L)
}
val stridesRow = Row(stridesArray:_*)
addAnnotation(structWithStrides, Row(shapeRow, stridesRow, a.asInstanceOf[Row](1)))
}
}
def addInlineRow(t: PBaseStruct, a: Row) {
var i = 0
if (a == null) {
while (i < t.size) {
setMissing()
i += 1
}
} else {
while(i < t.size) {
addAnnotation(t.types(i).virtualType, a(i))
i += 1
}
}
}
def result(): RegionValue = RegionValue(region, start)
} | danking/hail | hail/src/main/scala/is/hail/annotations/RegionValueBuilder.scala | Scala | mit | 13,682 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.io.{File, Serializable, InputStream, OutputStream}
import java.util.{Calendar, TimeZone, UUID, Map => JMap, Properties}
import cascading.flow.hadoop.HadoopFlowProcess
import cascading.flow.{FlowProcess, FlowDef}
import cascading.flow.local.LocalFlowProcess
import cascading.pipe.Pipe
import cascading.scheme.Scheme
import cascading.scheme.local.{TextLine => CLTextLine, TextDelimited => CLTextDelimited}
import cascading.scheme.hadoop.{TextLine => CHTextLine, TextDelimited => CHTextDelimited, SequenceFile => CHSequenceFile, WritableSequenceFile => CHWritableSequenceFile }
import cascading.tap.hadoop.Hfs
import cascading.tap.MultiSourceTap
import cascading.tap.SinkMode
import cascading.tap.Tap
import cascading.tap.local.FileTap
import cascading.tuple.{Tuple, TupleEntry, TupleEntryIterator, Fields}
import com.etsy.cascading.tap.local.LocalTap
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.OutputCollector
import org.apache.hadoop.mapred.RecordReader
import org.apache.hadoop.io.Writable
import org.apache.commons.lang.StringEscapeUtils
import collection.mutable.{Buffer, MutableList}
import scala.collection.JavaConverters._
import scala.util.control.Exception.allCatch
/**
* This is a base class for File-based sources
*/
abstract class FileSource extends Source {
protected def pathIsGood(p : String, conf : Configuration) = {
val path = new Path(p)
Option(path.getFileSystem(conf).globStatus(path)).
map(_.length > 0).
getOrElse(false)
}
def localScheme: Scheme[Properties, InputStream, OutputStream, _, _] =
sys.error("Cascading local mode not supported for: " + toString)
def hdfsScheme: Scheme[JobConf,RecordReader[_,_],OutputCollector[_,_],_,_] =
sys.error("Cascading Hadoop mode not supported for: " + toString)
def hdfsPaths : Iterable[String]
// By default, we write to the LAST path returned by hdfsPaths
def hdfsWritePath = hdfsPaths.last
def localPath : String
val sinkMode: SinkMode = SinkMode.REPLACE
override def createTap(readOrWrite : AccessMode)(implicit mode : Mode) : Tap[_,_,_] = {
mode match {
// TODO support strict in Local
case Local(_) => {
val sinkmode = readOrWrite match {
case Read => SinkMode.KEEP
case Write => SinkMode.REPLACE
}
createLocalTap(sinkmode)
}
case hdfsMode @ Hdfs(_, _) => readOrWrite match {
case Read => createHdfsReadTap(hdfsMode)
case Write => CastHfsTap(new Hfs(hdfsScheme, hdfsWritePath, sinkMode))
}
case _ => {
allCatch.opt(
TestTapFactory(this, hdfsScheme)
).map {
_.createTap(readOrWrite) // these java types are invariant, so we cast here
.asInstanceOf[Tap[Any, Any, Any]]
}
.orElse {
allCatch.opt(
TestTapFactory(this, localScheme.getSourceFields)
).map {
_.createTap(readOrWrite)
.asInstanceOf[Tap[Any, Any, Any]]
}
}.get
}
}
}
def createLocalTap(sinkMode : SinkMode) : Tap[_,_,_] = new FileTap(localScheme, localPath, sinkMode)
// This is only called when Mode.sourceStrictness is true
protected def hdfsReadPathsAreGood(conf : Configuration) = {
hdfsPaths.forall { pathIsGood(_, conf) }
}
/*
* This throws InvalidSourceException if:
* 1) we are in sourceStrictness mode and all sources are not present.
* 2) we are not in the above, but some source has no input whatsoever
* TODO this only does something for HDFS now. Maybe we should do the same for LocalMode
*/
override def validateTaps(mode : Mode) : Unit = {
mode match {
case Hdfs(strict, conf) => {
if (strict && (!hdfsReadPathsAreGood(conf))) {
throw new InvalidSourceException(
"[" + this.toString + "] Data is missing from one or more paths in: " +
hdfsPaths.toString)
}
else if (!hdfsPaths.exists { pathIsGood(_, conf) }) {
//Check that there is at least one good path:
throw new InvalidSourceException(
"[" + this.toString + "] No good paths in: " + hdfsPaths.toString)
}
}
case _ => ()
}
}
/*
* Get all the set of valid paths based on source strictness.
*/
protected def goodHdfsPaths(hdfsMode : Hdfs) = {
hdfsMode match {
//we check later that all the paths are good
case Hdfs(true, _) => hdfsPaths
// If there are no matching paths, this is still an error, we need at least something:
case Hdfs(false, conf) => hdfsPaths.filter{ pathIsGood(_, conf) }
}
}
protected def createHdfsReadTap(hdfsMode : Hdfs) : Tap[JobConf, _, _] = {
val taps : List[Tap[JobConf, RecordReader[_,_], OutputCollector[_,_]]] =
goodHdfsPaths(hdfsMode)
.toList.map { path => CastHfsTap(new Hfs(hdfsScheme, path, SinkMode.KEEP)) }
taps.size match {
case 0 => {
// This case is going to result in an error, but we don't want to throw until
// validateTaps, so we just put a dummy path to return something so the
// Job constructor does not fail.
CastHfsTap(new Hfs(hdfsScheme, hdfsPaths.head, SinkMode.KEEP))
}
case 1 => taps.head
case _ => new ScaldingMultiSourceTap(taps)
}
}
}
class ScaldingMultiSourceTap(taps : Seq[Tap[JobConf, RecordReader[_,_], OutputCollector[_,_]]])
extends MultiSourceTap[Tap[JobConf, RecordReader[_,_], OutputCollector[_,_]], JobConf, RecordReader[_,_]](taps : _*) {
private final val randomId = UUID.randomUUID.toString
override def getIdentifier() = randomId
}
/**
* The fields here are ('offset, 'line)
*/
trait TextLineScheme extends FileSource with Mappable[String] {
import Dsl._
override def converter[U >: String] = TupleConverter.asSuperConverter[String, U](TupleConverter.of[String])
override def localScheme = new CLTextLine(new Fields("offset","line"), Fields.ALL)
override def hdfsScheme = HadoopSchemeInstance(new CHTextLine())
//In textline, 0 is the byte position, the actual text string is in column 1
override def sourceFields = Dsl.intFields(Seq(1))
}
/**
* Mix this in for delimited schemes such as TSV or one-separated values
* By default, TSV is given
*/
trait DelimitedScheme extends FileSource {
//override these as needed:
val fields = Fields.ALL
//This is passed directly to cascading where null is interpretted as string
val types : Array[Class[_]] = null
val separator = "\\t"
val skipHeader = false
val writeHeader = false
val quote : String = null
// Whether to throw an exception or not if the number of fields does not match an expected number.
// If set to false, missing fields will be set to null.
val strict = true
// Whether to throw an exception if a field cannot be coerced to the right type.
// If set to false, then fields that cannot be coerced will be set to null.
val safe = true
//These should not be changed:
override def localScheme = new CLTextDelimited(fields, skipHeader, writeHeader, separator, strict, quote, types, safe)
override def hdfsScheme = {
HadoopSchemeInstance(new CHTextDelimited(fields, null, skipHeader, writeHeader, separator, strict, quote, types, safe))
}
}
trait SequenceFileScheme extends FileSource {
//override these as needed:
val fields = Fields.ALL
// TODO Cascading doesn't support local mode yet
override def hdfsScheme = {
HadoopSchemeInstance(new CHSequenceFile(fields))
}
}
trait WritableSequenceFileScheme extends FileSource {
//override these as needed:
val fields = Fields.ALL
val keyType : Class[_ <: Writable]
val valueType : Class[_ <: Writable]
// TODO Cascading doesn't support local mode yet
override def hdfsScheme =
HadoopSchemeInstance(new CHWritableSequenceFile(fields, keyType, valueType))
}
/**
* Ensures that a _SUCCESS file is present in the Source path.
*/
trait SuccessFileSource extends FileSource {
override protected def pathIsGood(p: String, conf: Configuration) = {
val path = new Path(p)
Option(path.getFileSystem(conf).globStatus(path)).
map { statuses: Array[FileStatus] =>
// Must have a file that is called "_SUCCESS"
statuses.exists { fs: FileStatus =>
fs.getPath.getName == "_SUCCESS"
}
}
.getOrElse(false)
}
}
/**
* Use this class to add support for Cascading local mode via the Hadoop tap.
* Put another way, this runs a Hadoop tap outside of Hadoop in the Cascading local mode
*/
trait LocalTapSource extends FileSource {
override def createLocalTap(sinkMode : SinkMode) = new LocalTap(localPath, hdfsScheme, sinkMode).asInstanceOf[Tap[_, _, _]]
}
abstract class FixedPathSource(path : String*) extends FileSource {
def localPath = { assert(path.size == 1, "Cannot use multiple input files on local mode"); path(0) }
def hdfsPaths = path.toList
override def toString = getClass.getName + path
override def hashCode = toString.hashCode
override def equals(that: Any): Boolean = (that != null) && (that.toString == toString)
}
/**
* Tab separated value source
*/
case class Tsv(p : String, override val fields : Fields = Fields.ALL,
override val skipHeader : Boolean = false, override val writeHeader: Boolean = false,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* Allows the use of multiple Tsv input paths. The Tsv files will
* be process through your flow as if they are a single pipe. Tsv
* files must have the same schema.
* For more details on how multiple files are handled check the
* cascading docs.
*/
case class MultipleTsvFiles(p : Seq[String], override val fields : Fields = Fields.ALL,
override val skipHeader : Boolean = false, override val writeHeader: Boolean = false) extends FixedPathSource(p:_*)
with DelimitedScheme
/**
* Csv value source
* separated by commas and quotes wrapping all fields
*/
case class Csv(p : String,
override val separator : String = ",",
override val fields : Fields = Fields.ALL,
override val skipHeader : Boolean = false,
override val writeHeader : Boolean = false,
override val quote : String ="\\"",
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* One separated value (commonly used by Pig)
*/
case class Osv(p : String, f : Fields = Fields.ALL,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p)
with DelimitedScheme {
override val fields = f
override val separator = "\\1"
}
object TimePathedSource {
val YEAR_MONTH_DAY = "/%1$tY/%1$tm/%1$td"
val YEAR_MONTH_DAY_HOUR = "/%1$tY/%1$tm/%1$td/%1$tH"
}
/**
* This will automatically produce a globbed version of the given path.
* THIS MEANS YOU MUST END WITH A / followed by * to match a file
* For writing, we write to the directory specified by the END time.
*/
abstract class TimePathedSource(val pattern : String, val dateRange : DateRange, val tz : TimeZone) extends FileSource {
val glober = Globifier(pattern)(tz)
override def hdfsPaths = glober.globify(dateRange)
//Write to the path defined by the end time:
override def hdfsWritePath = {
// TODO this should be required everywhere but works on read without it
// maybe in 0.9.0 be more strict
assert(pattern.takeRight(2) == "/*", "Pattern must end with /* " + pattern)
val lastSlashPos = pattern.lastIndexOf('/')
val stripped = pattern.slice(0,lastSlashPos)
String.format(stripped, dateRange.end.toCalendar(tz))
}
override def localPath = pattern
/*
* Get path statuses based on daterange.
*/
protected def getPathStatuses(conf : Configuration) : Iterable[(String, Boolean)] = {
List("%1$tH" -> Hours(1), "%1$td" -> Days(1)(tz),
"%1$tm" -> Months(1)(tz), "%1$tY" -> Years(1)(tz))
.find { unitDur : (String,Duration) => pattern.contains(unitDur._1) }
.map { unitDur =>
// This method is exhaustive, but too expensive for Cascading's JobConf writing.
dateRange.each(unitDur._2)
.map { dr : DateRange =>
val path = String.format(pattern, dr.start.toCalendar(tz))
val good = pathIsGood(path, conf)
(path, good)
}
}
.getOrElse(Nil : Iterable[(String, Boolean)])
}
// Override because we want to check UNGLOBIFIED paths that each are present.
override def hdfsReadPathsAreGood(conf : Configuration) : Boolean = {
getPathStatuses(conf).forall{ x =>
if (!x._2) {
System.err.println("[ERROR] Path: " + x._1 + " is missing in: " + toString)
}
x._2
}
}
override def toString =
"TimePathedSource(" + pattern + ", " + dateRange + ", " + tz + ")"
override def equals(that : Any) =
(that != null) &&
(this.getClass == that.getClass) &&
this.pattern == that.asInstanceOf[TimePathedSource].pattern &&
this.dateRange == that.asInstanceOf[TimePathedSource].dateRange &&
this.tz == that.asInstanceOf[TimePathedSource].tz
override def hashCode = pattern.hashCode +
31 * dateRange.hashCode +
(31 ^ 2) * tz.hashCode
}
/*
* A source that contains the most recent existing path in this date range.
*/
abstract class MostRecentGoodSource(p : String, dr : DateRange, t : TimeZone)
extends TimePathedSource(p, dr, t) {
override def toString =
"MostRecentGoodSource(" + p + ", " + dr + ", " + t + ")"
override protected def goodHdfsPaths(hdfsMode : Hdfs) = getPathStatuses(hdfsMode.jobConf)
.toList
.reverse
.find{ _._2 }
.map{ x => x._1 }
override def hdfsReadPathsAreGood(conf : Configuration) = getPathStatuses(conf)
.exists{ _._2 }
}
object TextLine {
def apply(p: String, sm: SinkMode): TextLine = new TextLine(p, sm)
def apply(p: String): TextLine = new TextLine(p)
}
class TextLine(p : String, override val sinkMode: SinkMode) extends FixedPathSource(p) with TextLineScheme {
// For some Java interop
def this(p: String) = this(p, SinkMode.REPLACE)
}
case class SequenceFile(p : String, f : Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE)
extends FixedPathSource(p) with SequenceFileScheme with LocalTapSource {
override val fields = f
}
case class MultipleSequenceFiles(p : String*) extends FixedPathSource(p:_*) with SequenceFileScheme with LocalTapSource
case class MultipleTextLineFiles(p : String*) extends FixedPathSource(p:_*) with TextLineScheme
/**
* Delimited files source
* allowing to override separator and quotation characters and header configuration
*/
case class MultipleDelimitedFiles (f: Fields,
override val separator : String,
override val quote : String,
override val skipHeader : Boolean,
override val writeHeader : Boolean,
p : String*) extends FixedPathSource(p:_*) with DelimitedScheme {
override val fields = f
}
case class WritableSequenceFile[K <: Writable : Manifest, V <: Writable : Manifest](p : String, f : Fields,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with WritableSequenceFileScheme with LocalTapSource {
override val fields = f
override val keyType = manifest[K].erasure.asInstanceOf[Class[_ <: Writable]]
override val valueType = manifest[V].erasure.asInstanceOf[Class[_ <: Writable]]
}
case class MultipleWritableSequenceFiles[K <: Writable : Manifest, V <: Writable : Manifest](p : Seq[String], f : Fields) extends FixedPathSource(p:_*)
with WritableSequenceFileScheme with LocalTapSource {
override val fields = f
override val keyType = manifest[K].erasure.asInstanceOf[Class[_ <: Writable]]
override val valueType = manifest[V].erasure.asInstanceOf[Class[_ <: Writable]]
}
/**
* This Source writes out the TupleEntry as a simple JSON object, using the field
* names as keys and the string representation of the values.
*
* TODO: it would be nice to have a way to add read/write transformations to pipes
* that doesn't require extending the sources and overriding methods.
*/
case class JsonLine(p: String, fields: Fields = Fields.ALL,
override val sinkMode: SinkMode = SinkMode.REPLACE)
extends FixedPathSource(p) with TextLineScheme {
import Dsl._
import JsonLine._
override def transformForWrite(pipe : Pipe) = pipe.mapTo(fields -> 'json) {
t: TupleEntry => mapper.writeValueAsString(TupleConverter.ToMap(t))
}
override def transformForRead(pipe : Pipe) = pipe.mapTo('line -> fields) {
line : String =>
val fs: Map[String, AnyRef] = mapper.readValue(line, mapTypeReference)
val values = (0 until fields.size).map {
i : Int => fs.getOrElse(fields.get(i).toString, null)
}
new cascading.tuple.Tuple(values : _*)
}
override def toString = "JsonLine(" + p + ", " + fields.toString + ")"
}
/**
* TODO: at the next binary incompatible version remove the AbstractFunction2/scala.Serializable jank which
* was added to get mima to not report binary errors
*/
object JsonLine extends scala.runtime.AbstractFunction3[String,Fields,SinkMode,JsonLine] with Serializable with scala.Serializable {
import java.lang.reflect.{Type, ParameterizedType}
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.module.scala._
import com.fasterxml.jackson.databind.ObjectMapper
val mapTypeReference = typeReference[Map[String, AnyRef]]
private [this] def typeReference[T: Manifest] = new TypeReference[T] {
override def getType = typeFromManifest(manifest[T])
}
private [this] def typeFromManifest(m: Manifest[_]): Type = {
if (m.typeArguments.isEmpty) { m.erasure }
else new ParameterizedType {
def getRawType = m.erasure
def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType = null
}
}
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
}
| vidyar/twitterscalding | scalding-core/src/main/scala/com/twitter/scalding/FileSource.scala | Scala | apache-2.0 | 18,753 |
/**
* Intel Intrinsics for Lightweight Modular Staging Framework
* https://github.com/ivtoskov/lms-intrinsics
* Department of Computer Science, ETH Zurich, Switzerland
* __ _ __ _ _
* / /____ ___ _____ (_)____ / /_ _____ (_)____ _____ (_)_____ _____
* / // __ `__ \\ / ___/______ / // __ \\ / __// ___// // __ \\ / ___// // ___// ___/
* / // / / / / /(__ )/_____// // / / // /_ / / / // / / /(__ )/ // /__ (__ )
* /_//_/ /_/ /_//____/ /_//_/ /_/ \\__//_/ /_//_/ /_//____//_/ \\___//____/
*
* Copyright (C) 2017 Ivaylo Toskov (itoskov@ethz.ch)
* Alen Stojanov (astojanov@inf.ethz.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.ethz.acl.intrinsics
trait AVX512 extends IntrinsicsBase with AVX51200 with AVX51201 with AVX51202 with AVX51203 with AVX51204 with AVX51205 with AVX51206 with AVX51207 with AVX51208 with AVX51209 with AVX512010 with AVX512011 with AVX512012 with AVX512013 with AVX512014 with AVX512015 with AVX512016 with AVX512017 with AVX512018 with AVX512019 with AVX512020 with AVX512_KNC
trait CGenAVX512 extends CGenIntrinsics with CGenAVX51200 with CGenAVX51201 with CGenAVX51202 with CGenAVX51203 with CGenAVX51204 with CGenAVX51205 with CGenAVX51206 with CGenAVX51207 with CGenAVX51208 with CGenAVX51209 with CGenAVX512010 with CGenAVX512011 with CGenAVX512012 with CGenAVX512013 with CGenAVX512014 with CGenAVX512015 with CGenAVX512016 with CGenAVX512017 with CGenAVX512018 with CGenAVX512019 with CGenAVX512020 with CGenAVX512_KNC {
val IR: AVX512
}
| ivtoskov/lms-intrinsics | src/main/scala/ch/ethz/acl/intrinsics/AVX512.scala | Scala | apache-2.0 | 2,165 |
package controllers
import play.api.libs.json.Json
import play.api.libs.json.JsSuccess
import play.api.libs.json.JsError
import play.api.libs.json.Reads
import play.api.libs.json.JsValue
import play.api.libs.json.Reads._
import play.api.mvc.{Action, Controller}
import com.mongodb.BasicDBObject
import core.db.MongoFactory
import com.mongodb.casbah.Imports.MongoDBObject
import com.mongodb.casbah.MongoCollection
import play.api.libs.functional.syntax._
case class MenuDefinition(url: String, component: String)
case class MenuElement(var _id: Option[String], name: String, title: String, order: Int, active: Boolean, enabled: Boolean, parentId: Option[Long], definition: Option[MenuDefinition])
class MenuController extends Controller {
implicit val menuDefinitionFormat = Json.format[MenuDefinition]
implicit val menuFormat = Json.format[MenuElement]
val menuCollection = "menu"
def retrieveMenu() = Action {
val menu = MongoFactory.getCollection(menuCollection).toList
val tmpMenu = for {
menuElement <- menu
} yield menuElement.toString
val realMenu = tmpMenu.map(Json.parse(_).as[MenuElement])
.filter(_.enabled)
Ok(Json.toJson(realMenu))
}
def addMenuElement() = Action { implicit request =>
val json = request.body.asJson;
if (!json.isDefined) {
BadRequest("nothing's working")
} else {
val menuElement = json.get
menuElement.validate[MenuElement] match {
case s: JsSuccess[MenuElement] => {
val menuList: MongoCollection = MongoFactory.getCollection(menuCollection)
var newMenuEl = s.get
if (newMenuEl._id == None) {
val lastMenuEl = menuList
.find()
.sort(MongoDBObject("_id" -> -1))
.limit(1)
.next
.toString
val lastId = Json.parse(lastMenuEl).as[MenuElement]._id
newMenuEl._id = Option(lastId.get + 1)
}
menuList.insert(MongoDBObject(Json.toJson(newMenuEl).toString))
Ok("OK")
}
case e: JsError => {
BadRequest("the json is not valid")
}
}
}
}
}
| michael-zucchetta/michaelzucchetta.com | backend/core-app/app/controllers/MenuController.scala | Scala | gpl-3.0 | 2,214 |
package ingraph.compiler.sql.driver
import java.sql.{Connection, DriverManager}
import java.util.concurrent.CompletionStage
import java.util.{Map => javaMap}
import org.neo4j.driver.v1._
import org.neo4j.driver.v1.exceptions.ClientException
import org.neo4j.driver.v1.types.TypeSystem
class SqlSession(val sqlDriver: SqlDriver) extends Session {
val sqlConnection: Connection = DriverManager.getConnection(sqlDriver.url)
sqlConnection.setAutoCommit(false)
private[driver] var currentTransaction: Option[SqlTransaction] = None
override def beginTransaction(): SqlTransaction = {
if (currentTransaction.isDefined) {
throw new ClientException("You cannot begin a transaction on a session with an open transaction;" + " either run from within the transaction or use a different session.")
}
else {
val transaction = new SqlTransaction(this)
currentTransaction = Some(transaction)
transaction
}
}
private[driver] def closeTransaction(sqlTransaction: SqlTransaction, toBeCommitted: Boolean): Unit = {
if (currentTransaction.contains(sqlTransaction) && !sqlConnection.isClosed) {
if (toBeCommitted)
sqlConnection.commit()
else
sqlConnection.rollback()
currentTransaction = None
}
}
override def beginTransaction(bookmark: String): Transaction = ???
override def readTransaction[T](work: TransactionWork[T]): T = ???
override def writeTransaction[T](work: TransactionWork[T]): T = ???
override def lastBookmark(): String = ???
override def reset(): Unit = ???
override def close(): Unit = {
currentTransaction.foreach(_.close())
sqlConnection.close()
}
override def run(statementTemplate: String, parameters: Value): StatementResult = ???
override def run(statementTemplate: String, statementParameters: javaMap[String, AnyRef]): StatementResult = ???
override def run(statementTemplate: String, statementParameters: Record): StatementResult = ???
override def run(statementTemplate: String): StatementResult = ???
override def run(statement: Statement): StatementResult = ???
override def typeSystem(): TypeSystem = ???
override def isOpen: Boolean = !sqlConnection.isClosed
override def beginTransactionAsync(): CompletionStage[Transaction] = ???
override def readTransactionAsync[T](work: TransactionWork[CompletionStage[T]]): CompletionStage[T] = ???
override def writeTransactionAsync[T](work: TransactionWork[CompletionStage[T]]): CompletionStage[T] = ???
override def closeAsync(): CompletionStage[Void] = ???
override def runAsync(statementTemplate: String, parameters: Value): CompletionStage[StatementResultCursor] = ???
override def runAsync(statementTemplate: String, statementParameters: javaMap[String, AnyRef]): CompletionStage[StatementResultCursor] = ???
override def runAsync(statementTemplate: String, statementParameters: Record): CompletionStage[StatementResultCursor] = ???
override def runAsync(statementTemplate: String): CompletionStage[StatementResultCursor] = ???
override def runAsync(statement: Statement): CompletionStage[StatementResultCursor] = ???
}
| FTSRG/ingraph | cypher-to-sql/src/main/scala/ingraph/compiler/sql/driver/SqlSession.scala | Scala | epl-1.0 | 3,155 |
package edu.oregonstate.mutation.statementHistory
import org.scalatest.{FlatSpec, Matchers}
class CommitInfoTest extends FlatSpec with Matchers {
it should "correctly check for equality" in {
new CommitInfo("abced", "ADD").equals(new CommitInfo("abced", "ADD")) should be (true)
new CommitInfo("abced", "ADD").equals(new CommitInfo("adced", "UPDATE")) should be (false)
new CommitInfo("abced", "ADD").equals(Seq()) should be (false)
}
it should "give the correct string" in {
new CommitInfo("abced", "ADD").toString should equal ("[abced,ADD]")
}
}
| caiusb/statement-history | src/test/scala/edu/oregonstate/mutation/statementHistory/CommitInfoTest.scala | Scala | mit | 577 |
/*
* Copyright (c) 2012 Orderly Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package co.orderly.prestasac.representations
// Java
import java.lang.{Long => JLong}
import java.lang.{Integer => JInteger}
// Scala
import scala.reflect.BeanProperty
// JAXB
import javax.xml.bind.annotation._
// MOXy
import org.eclipse.persistence.oxm.annotations.XmlNameTransformer
// Narcolepsy
import co.orderly.narcolepsy._
import marshallers.jaxb.moxy.CamelCase2Underscore
// Prestasac
import shared._
/**
* The Country representation holds the information pertaining to a
* country in PrestaShop.
*
* A typical representation looks something like this:
*
* <prestashop xmlns:xlink="http://www.w3.org/1999/xlink">
* <country>
* <id><![CDATA[30]]></id>
* <id_zone xlink:href="http://www.psychicbazaar.com/api/zones/4"><![CDATA[4]]></id_zone>
* <id_currency></id_currency>
* <iso_code><![CDATA[ZA]]></iso_code>
* <call_prefix><![CDATA[27]]></call_prefix>
* <active><![CDATA[1]]></active>
* <contains_states><![CDATA[0]]></contains_states>
* <need_identification_number><![CDATA[0]]></need_identification_number>
* <need_zip_code><![CDATA[1]]></need_zip_code>
* <zip_code_format><![CDATA[NNNN]]></zip_code_format>
* <display_tax_label><![CDATA[1]]></display_tax_label>
* <name><language id="1" xlink:href="http://www.psychicbazaar.com/api/languages/1"><![CDATA[South Africa]]></language></name>
* </country>
* </prestashop>
*/
@XmlRootElement(name = "prestashop")
@XmlAccessorType(XmlAccessType.FIELD)
@XmlNameTransformer(classOf[CamelCase2Underscore])
class Country extends Representation {
@XmlElement(required = true)
@BeanProperty
var country: CountryElement = _
}
/**
* The StateElement holds the core fields for the state.
*/
@XmlAccessorType(XmlAccessType.FIELD)
class CountryElement extends PrestaShopIdentity {
// -------------------------------------------------------------------------------------------------------------------
// XLinks into other resources
// -------------------------------------------------------------------------------------------------------------------
// TODO: retrieve the xlink:href as well
@BeanProperty
var idZone: PrestaShopXLink = _ // JLong = _
// TODO: retrieve the xlink:href as well
@XmlElement(nillable = true)
@BeanProperty
var idCurrency: PrestaShopXLink = _ // JLong = _
// -------------------------------------------------------------------------------------------------------------------
// Resource-specific fields
// -------------------------------------------------------------------------------------------------------------------
@BeanProperty
var isoCode: String = _
@BeanProperty
var callPrefix: String = _
@BeanProperty
var active: JInteger = _
@BeanProperty
var containsStates: JInteger = _
@BeanProperty
var needIdentificationNumber: JInteger = _
@BeanProperty
var needZipCode: JInteger = _
@BeanProperty
var zipCodeFormat: String = _
@BeanProperty
var displayTaxLabel: JInteger = _
// TODO: add in name. It's wrapped inside a <language> element
// @BeanProperty
// var name: String = _
} | orderly/prestashop-scala-client | src/main/scala/co/orderly/prestasac/representations/Country.scala | Scala | apache-2.0 | 3,800 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.calculations.TotalAdditionsCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP54(value: Int) extends CtBoxIdentifier(name = "Total Additions") with CtInteger
object CP54 extends Calculated[CP54, ComputationsBoxRetriever] with TotalAdditionsCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CP54 = {
totalAdditionsCalculation(cp46 = fieldValueRetriever.cp46(),
cp47 = fieldValueRetriever.cp47(),
cp48 = fieldValueRetriever.cp48(),
cp49 = fieldValueRetriever.cp49(),
cp51 = fieldValueRetriever.cp51(),
cp52 = fieldValueRetriever.cp52(),
cp53 = fieldValueRetriever.cp53())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP54.scala | Scala | apache-2.0 | 1,586 |
import scala.tools.nsc._
object Test {
val testCode = """
import java.lang.Thread.`yield`
import scala.`package`.Throwable
`yield`
"""
def main(args: Array[String]) {
val settings = new Settings()
settings.classpath.value = System.getProperty("java.class.path")
val repl = new interpreter.IMain(settings)
repl.interpret(testCode)
}
}
| felixmulder/scala | test/files/run/repl-backticks.scala | Scala | bsd-3-clause | 388 |
package rere.ql.queries
import rere.ql.options.ComposableOptions
import rere.ql.options.all._
import rere.ql.ql2.Term.TermType
import rere.ql.types._
trait ChangesQueries {
// changes
trait ChangesTableQuery[T] extends ReqlInfiniteStream[ReqlChangefeedNotification[T]]
trait ChangesTableSliceQuery[T] extends ReqlInfiniteStream[ReqlChangefeedNotification[T]]
trait ChangesSelectionOfObjectQuery[T] extends ReqlInfiniteStream[ReqlChangefeedNotification[T]]
implicit class ChangesOnTableOp[T, PK <: PrimaryKey](val table: ReqlTable[T, PK]) {
def changes(
squash: SquashOptions = NotSquash,
changefeedQueueSize: ChangefeedQueueSizeOptions = DefaultChangefeedQueueSize,
includeInitial: IncludeInitialOptions = NotIncludeInitial,
includeStates: IncludeStatesOptions = NotIncludeStates,
//includeOffsets: IncludeOffsetsOptions = NotIncludeOffsets, //ReqlQueryLogicError: Cannot include offsets for range subs in:
includeTypes: IncludeTypesOptions = NotIncludeTypes
): ChangesTableQuery[T] = new ChangesTableQuery[T] {
val command = TermType.CHANGES
val string = "changes"
val arguments = table :: Nil
val options = ComposableOptions.compose(
squash,
changefeedQueueSize,
includeInitial,
includeStates,
//includeOffsets,
includeTypes
)
}
}
implicit class ChangesOnTableSliceOp[T, PK <: PrimaryKey](val tableSlice: ReqlTableSlice[T, PK]) {
def changes(
squash: SquashOptions = NotSquash,
changefeedQueueSize: ChangefeedQueueSizeOptions = DefaultChangefeedQueueSize,
includeInitial: IncludeInitialOptions = NotIncludeInitial,
includeStates: IncludeStatesOptions = NotIncludeStates,
//includeOffsets: IncludeOffsetsOptions = NotIncludeOffsets, //ReqlQueryLogicError: Cannot include offsets for range subs in:
includeTypes: IncludeTypesOptions = NotIncludeTypes
): ChangesTableSliceQuery[T] = new ChangesTableSliceQuery[T] {
val command = TermType.CHANGES
val string = "changes"
val arguments = tableSlice :: Nil
val options = ComposableOptions.compose(
squash,
changefeedQueueSize,
includeInitial,
includeStates,
//includeOffsets,
includeTypes
)
}
}
implicit class ChangesOnSelectionOfObjectOp[T, PK <: PrimaryKey](val sel: ReqlSelectionOfObject[T, PK]) {
def changes(
squash: SquashOptions = NotSquash,
changefeedQueueSize: ChangefeedQueueSizeOptions = DefaultChangefeedQueueSize,
includeInitial: IncludeInitialOptions = NotIncludeInitial,
includeStates: IncludeStatesOptions = NotIncludeStates,
//includeOffsets: IncludeOffsetsOptions = NotIncludeOffsets, //ReqlQueryLogicError: Cannot include offsets for range subs in:
includeTypes: IncludeTypesOptions = NotIncludeTypes
): ChangesSelectionOfObjectQuery[T] = new ChangesSelectionOfObjectQuery[T] {
val command = TermType.CHANGES
val string = "changes"
val arguments = sel :: Nil
val options = ComposableOptions.compose(
squash,
changefeedQueueSize,
includeInitial,
includeStates,
//includeOffsets,
includeTypes
)
}
}
}
| pbaun/rere | modules/ql/src/main/scala/rere/ql/queries/ChangesQueries.scala | Scala | apache-2.0 | 3,268 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.storage
import akka.actor.{Actor, Props}
import akka.event.{Logging, LoggingReceive}
import se.nimsa.sbx.storage.StorageProtocol._
import se.nimsa.sbx.util.ExceptionCatching
import scala.concurrent.duration.{DurationInt, FiniteDuration}
class StorageServiceActor(storage: StorageService,
cleanupInterval: FiniteDuration = 6.hours,
cleanupMinimumFileAge: FiniteDuration = 6.hours) extends Actor with ExceptionCatching {
import scala.collection.mutable
val log = Logging(context.system, this)
implicit val ec = context.dispatcher
val exportSets = mutable.Map.empty[Long, Seq[Long]]
case class RemoveExportSet(id: Long)
// ensure dcm4che uses standard ImageIO image readers for parsing compressed image data
// (see https://github.com/dcm4che/dcm4che/blob/3.3.7/dcm4che-imageio/src/main/java/org/dcm4che3/imageio/codec/ImageReaderFactory.java#L242)
System.setProperty("dcm4che.useImageIOServiceRegistry", "true")
log.info("Storage service started")
def receive = LoggingReceive {
case msg: ImageRequest => catchAndReport {
msg match {
case CreateExportSet(imageIds) =>
val exportSetId = if (exportSets.isEmpty) 1 else exportSets.keys.max + 1
exportSets(exportSetId) = imageIds
context.system.scheduler.scheduleOnce(cleanupInterval, self, RemoveExportSet(exportSetId))
sender ! ExportSetId(exportSetId)
case GetExportSetImageIds(exportSetId) =>
sender ! exportSets.get(exportSetId)
}
}
case RemoveExportSet(id) =>
exportSets.remove(id)
}
}
object StorageServiceActor {
def props(storage: StorageService): Props = Props(new StorageServiceActor(storage))
}
| slicebox/slicebox | src/main/scala/se/nimsa/sbx/storage/StorageServiceActor.scala | Scala | apache-2.0 | 2,361 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.utils.TableTestBase
import org.junit.Test
class InlineTableValidationTest extends TableTestBase {
@Test
def testFieldNamesDuplicate() {
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Field names must be unique.\n" +
"List of duplicate fields: [a].\n" +
"List of all fields: [a, a, b].")
val util = batchTestUtil()
util.addTable[(Int, Int, String)]("MyTable", 'a, 'a, 'b)
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/validation/InlineTableValidationTest.scala | Scala | apache-2.0 | 1,447 |
package sbt
import org.apache.ivy.core.module.id.ModuleRevisionId
import org.apache.ivy.core.module.descriptor.{ DefaultArtifact, DefaultExtendsDescriptor, DefaultModuleDescriptor, ModuleDescriptor }
import org.apache.ivy.core.module.descriptor.{ DefaultDependencyDescriptor, DependencyDescriptor }
import org.apache.ivy.plugins.parser.{ ModuleDescriptorParser, ModuleDescriptorParserRegistry, ParserSettings }
import org.apache.ivy.plugins.parser.m2.{ PomModuleDescriptorBuilder, PomModuleDescriptorParser }
import org.apache.ivy.plugins.repository.Resource
import org.apache.ivy.plugins.namespace.NamespaceTransformer
import org.apache.ivy.util.extendable.ExtendableItem
import java.io.{ File, InputStream }
import java.net.URL
import java.util.regex.Pattern
final class CustomPomParser(delegate: ModuleDescriptorParser, transform: (ModuleDescriptorParser, ModuleDescriptor) => ModuleDescriptor) extends ModuleDescriptorParser {
override def parseDescriptor(ivySettings: ParserSettings, descriptorURL: URL, validate: Boolean) =
transform(this, delegate.parseDescriptor(ivySettings, descriptorURL, validate))
override def parseDescriptor(ivySettings: ParserSettings, descriptorURL: URL, res: Resource, validate: Boolean) =
transform(this, delegate.parseDescriptor(ivySettings, descriptorURL, res, validate))
override def toIvyFile(is: InputStream, res: Resource, destFile: File, md: ModuleDescriptor) = delegate.toIvyFile(is, res, destFile, md)
override def accept(res: Resource) = delegate.accept(res)
override def getType() = delegate.getType()
override def getMetadataArtifact(mrid: ModuleRevisionId, res: Resource) = delegate.getMetadataArtifact(mrid, res)
}
object CustomPomParser {
/** The key prefix that indicates that this is used only to store extra information and is not intended for dependency resolution.*/
val InfoKeyPrefix = "info."
val ApiURLKey = "info.apiURL"
val SbtVersionKey = "sbtVersion"
val ScalaVersionKey = "scalaVersion"
val ExtraAttributesKey = "extraDependencyAttributes"
private[this] val unqualifiedKeys = Set(SbtVersionKey, ScalaVersionKey, ExtraAttributesKey, ApiURLKey)
// packagings that should be jars, but that Ivy doesn't handle as jars
val JarPackagings = Set("eclipse-plugin", "hk2-jar", "orbit", "scala-jar")
val default = new CustomPomParser(PomModuleDescriptorParser.getInstance, defaultTransform)
private[this] val TransformedHashKey = "e:sbtTransformHash"
// A hash of the parameters transformation is based on.
// If a descriptor has a different hash, we need to retransform it.
private[this] val TransformHash: String = hash((unqualifiedKeys ++ JarPackagings).toSeq.sorted)
private[this] def hash(ss: Seq[String]): String = Hash.toHex(Hash(ss.flatMap(_ getBytes "UTF-8").toArray))
// Unfortunately, ModuleDescriptorParserRegistry is add-only and is a singleton instance.
lazy val registerDefault: Unit = ModuleDescriptorParserRegistry.getInstance.addParser(default)
def defaultTransform(parser: ModuleDescriptorParser, md: ModuleDescriptor): ModuleDescriptor =
if (transformedByThisVersion(md)) md else defaultTransformImpl(parser, md)
private[this] def transformedByThisVersion(md: ModuleDescriptor): Boolean =
{
val oldTransformedHashKey = "sbtTransformHash"
val extraInfo = md.getExtraInfo
// sbt 0.13.1 used "sbtTransformHash" instead of "e:sbtTransformHash" until #1192 so read both
Option(extraInfo).isDefined &&
((Option(extraInfo get TransformedHashKey) orElse Option(extraInfo get oldTransformedHashKey)) match {
case Some(TransformHash) => true
case _ => false
})
}
private[this] def defaultTransformImpl(parser: ModuleDescriptorParser, md: ModuleDescriptor): ModuleDescriptor =
{
val properties = getPomProperties(md)
// Extracts extra attributes (currently, sbt and Scala versions) stored in the <properties> element of the pom.
// These are attached to the module itself.
val filtered = shouldBeUnqualified(properties)
// Extracts extra attributes for the dependencies.
// Because the <dependency> tag in pom.xml cannot include additional metadata,
// sbt includes extra attributes in a 'extraDependencyAttributes' property.
// This is read/written from/to a pure string (no element structure) because Ivy only
// parses the immediate text nodes of the property.
val extraDepAttributes = getDependencyExtra(filtered)
// Fixes up the detected extension in some cases missed by Ivy.
val convertArtifacts = artifactExtIncorrect(md)
// Merges artifact sections for duplicate dependency definitions
val mergeDuplicates = IvySbt.hasDuplicateDependencies(md.getDependencies)
val unqualify = toUnqualify(filtered)
if (unqualify.isEmpty && extraDepAttributes.isEmpty && !convertArtifacts && !mergeDuplicates)
md
else
addExtra(unqualify, extraDepAttributes, parser, md)
}
// The <properties> element of the pom is used to store additional metadata, such as for sbt plugins or for the base URL for API docs.
// This is done because the pom XSD does not appear to allow extra metadata anywhere else.
// The extra sbt plugin metadata in pom.xml does not need to be readable by maven, but the other information may be.
// However, the pom.xml needs to be valid in all cases because other tools like repository managers may read the pom.xml.
private[sbt] def getPomProperties(md: ModuleDescriptor): Map[String, String] =
{
import collection.JavaConverters._
PomModuleDescriptorBuilder.extractPomProperties(md.getExtraInfo).asInstanceOf[java.util.Map[String, String]].asScala.toMap
}
private[sbt] def toUnqualify(propertyAttributes: Map[String, String]): Map[String, String] =
(propertyAttributes - ExtraAttributesKey) map { case (k, v) => ("e:" + k, v) }
private[this] def artifactExtIncorrect(md: ModuleDescriptor): Boolean =
md.getConfigurations.exists(conf => md.getArtifacts(conf.getName).exists(art => JarPackagings(art.getExt)))
private[this] def shouldBeUnqualified(m: Map[String, String]): Map[String, String] = m.filterKeys(unqualifiedKeys)
private[this] def condAddExtra(properties: Map[String, String], id: ModuleRevisionId): ModuleRevisionId =
if (properties.isEmpty) id else addExtra(properties, id)
private[this] def addExtra(properties: Map[String, String], id: ModuleRevisionId): ModuleRevisionId =
{
import collection.JavaConverters._
val oldExtra = qualifiedExtra(id)
val newExtra = (oldExtra ++ properties).asJava
ModuleRevisionId.newInstance(id.getOrganisation, id.getName, id.getBranch, id.getRevision, newExtra)
}
private[this] def getDependencyExtra(m: Map[String, String]): Map[ModuleRevisionId, Map[String, String]] =
(m get ExtraAttributesKey) match {
case None => Map.empty
case Some(str) =>
def processDep(m: ModuleRevisionId) = (simplify(m), filterCustomExtra(m, include = true))
readDependencyExtra(str).map(processDep).toMap
}
def qualifiedExtra(item: ExtendableItem): Map[String, String] =
{
import collection.JavaConverters._
item.getQualifiedExtraAttributes.asInstanceOf[java.util.Map[String, String]].asScala.toMap
}
def filterCustomExtra(item: ExtendableItem, include: Boolean): Map[String, String] =
(qualifiedExtra(item) filterKeys { k => qualifiedIsExtra(k) == include })
def writeDependencyExtra(s: Seq[DependencyDescriptor]): Seq[String] =
s.flatMap { dd =>
val revId = dd.getDependencyRevisionId
if (filterCustomExtra(revId, include = true).isEmpty)
Nil
else
revId.encodeToString :: Nil
}
// parses the sequence of dependencies with extra attribute information, with one dependency per line
def readDependencyExtra(s: String): Seq[ModuleRevisionId] =
LinesP.split(s).map(_.trim).filter(!_.isEmpty).map(ModuleRevisionId.decode)
private[this] val LinesP = Pattern.compile("(?m)^")
def qualifiedIsExtra(k: String): Boolean = k.endsWith(ScalaVersionKey) || k.endsWith(SbtVersionKey)
// Reduces the id to exclude custom extra attributes
// This makes the id suitable as a key to associate a dependency parsed from a <dependency> element
// with the extra attributes from the <properties> section
def simplify(id: ModuleRevisionId): ModuleRevisionId =
{
import collection.JavaConverters._
ModuleRevisionId.newInstance(id.getOrganisation, id.getName, id.getBranch, id.getRevision, filterCustomExtra(id, include = false).asJava)
}
private[this] def addExtra(dep: DependencyDescriptor, extra: Map[ModuleRevisionId, Map[String, String]]): DependencyDescriptor =
{
val extras = if (extra.isEmpty) None else extra get simplify(dep.getDependencyRevisionId)
extras match {
case None => dep
case Some(extraAttrs) => transform(dep, revId => addExtra(extraAttrs, revId))
}
}
private[this] def transform(dep: DependencyDescriptor, f: ModuleRevisionId => ModuleRevisionId): DependencyDescriptor =
DefaultDependencyDescriptor.transformInstance(dep, namespaceTransformer(dep.getDependencyRevisionId, f), false)
private[this] def extraTransformer(txId: ModuleRevisionId, extra: Map[String, String]): NamespaceTransformer =
namespaceTransformer(txId, revId => addExtra(extra, revId))
private[this] def namespaceTransformer(txId: ModuleRevisionId, f: ModuleRevisionId => ModuleRevisionId): NamespaceTransformer =
new NamespaceTransformer {
def transform(revId: ModuleRevisionId): ModuleRevisionId = if (revId == txId) f(revId) else revId
def isIdentity = false
}
import collection.JavaConverters._
def addExtra(properties: Map[String, String], dependencyExtra: Map[ModuleRevisionId, Map[String, String]], parser: ModuleDescriptorParser, md: ModuleDescriptor): ModuleDescriptor =
{
val dmd = new DefaultModuleDescriptor(parser, md.getResource)
val mrid = addExtra(properties, md.getModuleRevisionId)
val resolvedMrid = addExtra(properties, md.getResolvedModuleRevisionId)
dmd.setModuleRevisionId(mrid)
dmd.setResolvedModuleRevisionId(resolvedMrid)
dmd.setDefault(md.isDefault)
dmd.setHomePage(md.getHomePage)
dmd.setDescription(md.getDescription)
dmd.setLastModified(md.getLastModified)
dmd.setStatus(md.getStatus())
dmd.setPublicationDate(md.getPublicationDate())
dmd.setResolvedPublicationDate(md.getResolvedPublicationDate())
for (l <- md.getLicenses) dmd.addLicense(l)
for ((key, value) <- md.getExtraInfo.asInstanceOf[java.util.Map[String, String]].asScala) dmd.addExtraInfo(key, value)
dmd.addExtraInfo(TransformedHashKey, TransformHash) // mark as transformed by this version, so we don't need to do it again
for ((key, value) <- md.getExtraAttributesNamespaces.asInstanceOf[java.util.Map[String, String]].asScala) dmd.addExtraAttributeNamespace(key, value)
IvySbt.addExtraNamespace(dmd)
val withExtra = md.getDependencies map { dd => addExtra(dd, dependencyExtra) }
val unique = IvySbt.mergeDuplicateDefinitions(withExtra)
unique foreach dmd.addDependency
for (ed <- md.getInheritedDescriptors) dmd.addInheritedDescriptor(new DefaultExtendsDescriptor(md, ed.getLocation, ed.getExtendsTypes))
for (conf <- md.getConfigurations) {
dmd.addConfiguration(conf)
for (art <- md.getArtifacts(conf.getName)) {
val ext = art.getExt
val newExt = if (JarPackagings(ext)) "jar" else ext
val nart = new DefaultArtifact(mrid, art.getPublicationDate, art.getName, art.getType, newExt, art.getUrl, art.getQualifiedExtraAttributes)
dmd.addArtifact(conf.getName, nart)
}
}
dmd
}
} | niktrop/sbt | ivy/src/main/scala/sbt/CustomPomParser.scala | Scala | bsd-3-clause | 11,892 |
package x7c1.wheat.modern.sequence
import scala.language.higherKinds
trait SequenceMapping[A, F[_] <: Sequence[_]]{
protected def underlying: F[A]
def map[B](f: A => B)(implicit x: CanMapFrom[F]): F[B] = {
x.mapFrom(underlying)(f)
}
}
trait CanMapFrom[F[_]] {
def mapFrom[A, B](fa: F[A])(f: A => B): F[B]
}
private[sequence] class DefaultCanMapFrom extends CanMapFrom[Sequence]{
override def mapFrom[A, B](fa: Sequence[A])(f: A => B) =
new Sequence[B] {
override def findAt(position: Int) = fa findAt position map f
override def length = fa.length
}
}
| x7c1/Linen | wheat-modern/src/main/scala/x7c1/wheat/modern/sequence/SequenceMapping.scala | Scala | mit | 591 |
/**
* Copyright (c) 2015, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.sparkts
import breeze.linalg._
import org.apache.commons.math3.random.MersenneTwister
import org.scalatest.FunSuite
class AugmentedDickeyFullerSuite extends FunSuite {
test("non-stationary AR model") {
val rand = new MersenneTwister(10L)
val arModel = new ARModel(0.0, .95)
val sample = arModel.sample(500, rand)
val (adfStat, pValue) = TimeSeriesStatisticalTests.adftest(sample, 1)
assert(!java.lang.Double.isNaN(adfStat))
assert(!java.lang.Double.isNaN(pValue))
println("adfStat: " + adfStat)
println("pValue: " + pValue)
}
test("iid samples") {
val rand = new MersenneTwister(11L)
val iidSample = Array.fill(500)(rand.nextDouble())
val (adfStat, pValue) = TimeSeriesStatisticalTests.adftest(new DenseVector(iidSample), 1)
assert(!java.lang.Double.isNaN(adfStat))
assert(!java.lang.Double.isNaN(pValue))
println("adfStat: " + adfStat)
println("pValue: " + pValue)
}
}
| dougneedham/spark-timeseries | src/test/scala/com/cloudera/sparkts/AugmentedDickeyFullerSuite.scala | Scala | apache-2.0 | 1,535 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker.backend
import org.scalajs.linker.backend.closure.ClosureLinkerBackend
object LinkerBackendImplPlatformExtensions {
import LinkerBackendImpl.Config
final class ConfigExt private[backend] (val __private_self: Config)
extends AnyVal {
@inline private def self: Config = __private_self
/** Whether to actually use the Google Closure Compiler pass. */
def closureCompiler: Boolean = self.closureCompilerIfAvailable
def withClosureCompiler(closureCompiler: Boolean): Config =
self.withClosureCompilerIfAvailable(closureCompiler)
}
}
| SebsLittleHelpers/scala-js | linker/jvm/src/main/scala/org/scalajs/linker/backend/LinkerBackendImplPlatformExtensions.scala | Scala | apache-2.0 | 869 |
package no.hib.dpf.text.scala.ct.transformation;
import no.hib.dpf.text.scala.ct._
/**
* A cospan graph transformation rule
* Attention: need to be implemented!
*/
case class CospanRule(name: String, cospan: TCospan, m: TMorphism, parameter: Map[String, String])
// def isApplicable(): Boolean = true //TODO Check application condition
// def repair():CospanRule //TODO if Category DPF or IGraph and m not match complete,
//PUT Method maybe somewhere else
// def apply(gid: Int = GroupIdGen.gen()): CospanTransformation = {
// if (!isApplicable()) {
// return None
// }
// ....
//
//case class CospanTransformation
// methods to get all morphisms
// def resultToGraph() //Use the names either for the model or new ones from the rule
// | fmantz/DPF_Text | no.hib.dpf.text/src_scala/no/hib/dpf/text/scala/ct/transformation/GraphTransformation.scala | Scala | epl-1.0 | 809 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check
import java.util.concurrent.ThreadLocalRandom
import io.gatling.commons.util.ThreadLocalRandoms
import io.gatling.commons.validation._
import io.gatling.core.check.extractor.Extractor
import io.gatling.core.session._
trait FindCheckBuilder[A, P, X] {
def find: ValidatorCheckBuilder[A, P, X]
}
class DefaultFindCheckBuilder[A, P, X](extractor: Expression[Extractor[P, X]])
extends FindCheckBuilder[A, P, X] {
def find: ValidatorCheckBuilder[A, P, X] = ValidatorCheckBuilder(extractor)
}
trait MultipleFindCheckBuilder[A, P, X] extends FindCheckBuilder[A, P, X] {
def find(occurrence: Int): ValidatorCheckBuilder[A, P, X]
def findAll: ValidatorCheckBuilder[A, P, Seq[X]]
def findRandom: ValidatorCheckBuilder[A, P, X]
def findRandom(num: Int, failIfLess: Boolean = false): ValidatorCheckBuilder[A, P, Seq[X]]
def count: ValidatorCheckBuilder[A, P, Int]
}
abstract class DefaultMultipleFindCheckBuilder[A, P, X]
extends MultipleFindCheckBuilder[A, P, X] {
def findExtractor(occurrence: Int): Expression[Extractor[P, X]]
def findAllExtractor: Expression[Extractor[P, Seq[X]]]
def findRandomExtractor: Expression[Extractor[P, X]] = findAllExtractor.map { fae =>
new Extractor[P, X] {
override def name: String = fae.name
override def arity: String = "findRandom"
override def apply(prepared: P): Validation[Option[X]] =
fae(prepared)
.map(_.collect { case seq if seq.nonEmpty => seq(ThreadLocalRandom.current.nextInt(seq.size)) })
}
}
def findManyRandomExtractor(num: Int, failIfLess: Boolean): Expression[Extractor[P, Seq[X]]] = findAllExtractor.map { fae =>
new Extractor[P, Seq[X]] {
override def name: String = fae.name
override def arity: String = s"findRandom($num, $failIfLess)"
override def apply(prepared: P): Validation[Option[Seq[X]]] =
fae(prepared)
.flatMap {
case Some(seq) =>
if (failIfLess && seq.size < num) {
s"Failed to collect $num matches".failure
} else if (seq.isEmpty) {
NoneSuccess
} else {
val randomSeq =
if (num >= seq.size) {
seq
} else {
val sortedRandomIndexes = ThreadLocalRandoms.shuffle(seq.indices.toVector).take(num).sorted
sortedRandomIndexes.map(seq)
}
Some(randomSeq).success
}
case None => NoneSuccess
}
}
}
def countExtractor: Expression[Extractor[P, Int]]
def find = find(0)
def find(occurrence: Int): ValidatorCheckBuilder[A, P, X] = ValidatorCheckBuilder(findExtractor(occurrence))
def findAll: ValidatorCheckBuilder[A, P, Seq[X]] = ValidatorCheckBuilder(findAllExtractor)
def findRandom: ValidatorCheckBuilder[A, P, X] = ValidatorCheckBuilder(findRandomExtractor)
def findRandom(num: Int, failIfLess: Boolean): ValidatorCheckBuilder[A, P, Seq[X]] = ValidatorCheckBuilder(findManyRandomExtractor(num, failIfLess))
def count: ValidatorCheckBuilder[A, P, Int] = ValidatorCheckBuilder(countExtractor)
}
object ValidatorCheckBuilder {
val TransformErrorMapper: String => String = "transform crashed: " + _
val TransformOptionErrorMapper: String => String = "transformOption crashed: " + _
}
case class ValidatorCheckBuilder[A, P, X](extractor: Expression[Extractor[P, X]]) {
import ValidatorCheckBuilder._
private def transformExtractor[X2](transformation: X => X2)(extractor: Extractor[P, X]) =
new Extractor[P, X2] {
def name = extractor.name
def arity = extractor.arity + ".transform"
def apply(prepared: P): Validation[Option[X2]] =
safely(TransformErrorMapper) {
extractor(prepared).map(_.map(transformation))
}
}
def transform[X2](transformation: X => X2): ValidatorCheckBuilder[A, P, X2] =
copy(extractor = extractor.map(transformExtractor(transformation)))
def transform[X2](transformation: (X, Session) => X2): ValidatorCheckBuilder[A, P, X2] =
copy(extractor = session => extractor(session).map(transformExtractor(transformation(_, session))))
private def transformOptionExtractor[X2](transformation: Option[X] => Validation[Option[X2]])(extractor: Extractor[P, X]) =
new Extractor[P, X2] {
def name = extractor.name
def arity = extractor.arity + ".transformOption"
def apply(prepared: P): Validation[Option[X2]] =
safely(TransformOptionErrorMapper) {
extractor(prepared).flatMap(transformation)
}
}
def transformOption[X2](transformation: Option[X] => Validation[Option[X2]]): ValidatorCheckBuilder[A, P, X2] =
copy(extractor = extractor.map(transformOptionExtractor(transformation)))
def transformOption[X2](transformation: (Option[X], Session) => Validation[Option[X2]]): ValidatorCheckBuilder[A, P, X2] =
copy(extractor = session => extractor(session).map(transformOptionExtractor(transformation(_, session))))
def validate(validator: Expression[Validator[X]]): CheckBuilder[A, P, X] with SaveAs[A, P, X] =
new CheckBuilder(this, validator) with SaveAs[A, P, X]
def validate(opName: String, validator: (Option[X], Session) => Validation[Option[X]]): CheckBuilder[A, P, X] with SaveAs[A, P, X] =
validate((session: Session) => new Validator[X] {
def name = opName
def apply(actual: Option[X]): Validation[Option[X]] = validator(actual, session)
}.success)
def is(expected: Expression[X]) = validate(expected.map(new IsMatcher(_)))
def not(expected: Expression[X]) = validate(expected.map(new NotMatcher(_)))
def in(expected: X*) = validate(expected.toSeq.expressionSuccess.map(new InMatcher(_)))
def in(expected: Expression[Seq[X]]) = validate(expected.map(new InMatcher(_)))
def exists = validate(new ExistsValidator[X]().expressionSuccess)
def notExists = validate(new NotExistsValidator[X]().expressionSuccess)
def optional = validate(new NoopValidator[X]().expressionSuccess)
def lessThan(expected: Expression[X])(implicit ordering: Ordering[X]) = validate(expected.map(new CompareMatcher("lessThan", "less than", ordering.lt, _)))
def lessThanOrEqual(expected: Expression[X])(implicit ordering: Ordering[X]) = validate(expected.map(new CompareMatcher("lessThanOrEqual", "less than or equal to", ordering.lteq, _)))
def greaterThan(expected: Expression[X])(implicit ordering: Ordering[X]) = validate(expected.map(new CompareMatcher("greaterThan", "greater than", ordering.gt, _)))
def greaterThanOrEqual(expected: Expression[X])(implicit ordering: Ordering[X]) = validate(expected.map(new CompareMatcher("greaterThanOrEqual", "greater than or equal to", ordering.gteq, _)))
}
case class CheckBuilder[A, P, X](
validatorCheckBuilder: ValidatorCheckBuilder[A, P, X],
validator: Expression[Validator[X]],
saveAs: Option[String] = None) {
def build[C <: Check[R], R](protocolProvider: CheckProtocolProvider[A, C, R, P]): C = {
import protocolProvider._
val base: CheckBase[R, P, X] = CheckBase(preparer, validatorCheckBuilder.extractor, validator, saveAs)
specializer(base)
}
}
trait SaveAs[C, P, X] { this: CheckBuilder[C, P, X] =>
def saveAs(key: String): CheckBuilder[C, P, X] = copy(saveAs = Some(key))
}
| MykolaB/gatling | gatling-core/src/main/scala/io/gatling/core/check/CheckBuilder.scala | Scala | apache-2.0 | 8,040 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package data
import Prelude._
/** An implementation of Banker's Dequeues, as described in Chris Okasaki's
* Purely Functional Data Structures.
*
* @param sizeF the front list length
* @param front the front list
* @param sizeR the rear list length
* @param rear the rear list
* @tparam A the deque type
* @author Carlo Micieli
* @since 0.0.1
*/
final case class BankersDequeue[A] private (
private val sizeF: Int,
private val front: List[A],
private val sizeR: Int,
private val rear: List[A]) extends Dequeue[A] {
def foreach[U](f: A => U): Unit = {
front.foreach(f)
rear.reverse.foreach(f)
}
override def isEmpty: Boolean = {
length == 0
}
override def length: Int = {
sizeR + sizeF
}
override def first: Maybe[A] = {
this match {
case BankersDequeue(_, List(), _, List(x)) => Maybe.just(x)
case BankersDequeue(_, fs, _, _) => fs.headMaybe
}
}
override def last: Maybe[A] = {
this match {
case BankersDequeue(_, List(x), _, List()) => Maybe.just(x)
case BankersDequeue(_, _, _, rs) => rs.headMaybe
}
}
override def reverse: Dequeue[A] = {
BankersDequeue(sizeR, rear, sizeF, front)
}
override def pushFront(x: A): BankersDequeue[A] = {
import BankersDequeue._
check(BankersDequeue(sizeF + 1, x :: front, sizeR, rear))
}
override def popFront: Maybe[(A, BankersDequeue[A])] = {
import BankersDequeue._
this match {
case BankersDequeue(_, List(), _, List()) => Maybe.none
case BankersDequeue(_, List(), _, List(x)) => Maybe.just((x, empty))
case BankersDequeue(_, List(), _, _) => throw new UnbalancedDequeueException
case BankersDequeue(sf, (f :: fs), sr, rs) =>
val res = (f, check(BankersDequeue(sf - 1, fs, sr, rs)))
Maybe.just(res)
}
}
override def pushBack(x: A): BankersDequeue[A] = {
import BankersDequeue._
check(BankersDequeue(sizeF, front, sizeR + 1, x :: rear))
}
override def popBack: Maybe[(A, BankersDequeue[A])] = {
import BankersDequeue._
this match {
case BankersDequeue(_, List(), _, List()) => Maybe.none
case BankersDequeue(_, List(x), _, List()) => Maybe.just((x, empty))
case BankersDequeue(_, _, _, List()) => throw new UnbalancedDequeueException
case BankersDequeue(sf, fs, sr, (r :: rs)) =>
val res = (r, check(BankersDequeue(sf, fs, sr - 1, rs)))
Maybe.just(res)
}
}
override def takeFront(n: Int): List[A] = {
(front take n) ++ rear.reverse.take(n - sizeF)
}
override def takeBack(n: Int): List[A] = {
(rear take n) ++ front.reverse.take(n - sizeR)
}
override def toList: List[A] = {
front ++ rear.reverse
}
}
/** An implementation of Banker's Dequeues, as described in Chris Okasaki's
* Purely Functional Data Structures.
*
* The functions for the Dequeue instance have the following complexities
* (where `n` is the length of the queue):
*
* - `length`: O(1)
* - `first`: O(1)
* - `last`: O(1)
* - `takeFront`: O(n)
* - `takeBack`: O(n)
* - `pushFront`: O(1) amortised
* - `popFront`: O(1) amortised
* - `pushBack`: O(1) amortised
* - `popBack`: O(1) amortised
* - `fromList`: O(n)
*
* @author Carlo Micieli
* @since 0.0.1
*/
object BankersDequeue {
// The maximum number of times longer one half of a 'BankersDequeue' is
// permitted to be relative to the other.
private val bqBalance: Int = 4
def empty[A]: BankersDequeue[A] = BankersDequeue(0, List.empty[A], 0, List.empty[A])
// Converts a list into a dequeue.
def fromList[A](as: List[A]): BankersDequeue[A] = {
check(BankersDequeue(as.length, as, 0, List.empty[A]))
}
// Checks to see if the queue is too far out of balance. If it is, it
// rebalances it.
private def check[A](q: BankersDequeue[A]): BankersDequeue[A] = {
val BankersDequeue(sizeF, front, sizeR, rear) = q
val size1 = (sizeF + sizeR) / 2
val size2 = (sizeF + sizeR) - size1
if (sizeF > bqBalance * sizeR + 1) {
val frontP = front take size1
val rearP = rear ++ (front drop size1).reverse
BankersDequeue(size1, frontP, size2, rearP)
} else if (sizeR > bqBalance * sizeF + 1) {
val frontP = front ++ (rear drop size1).reverse
val rearP = rear take size1
BankersDequeue(size2, frontP, size1, rearP)
} else {
q
}
}
}
final class UnbalancedDequeueException extends java.lang.Exception("Dequeue is too far unbalanced.")
| CarloMicieli/hascalator | core/src/main/scala/io/hascalator/data/BankersDequeue.scala | Scala | apache-2.0 | 5,153 |
package hrscala.validation
import Models.{Row, ScalaDeveloper}
import scala.util.Try
import scalaz.Validation.FlatMap._
import scalaz.ValidationNel
import scalaz.syntax.validation._
import scalaz.syntax.applicative._
object ScalazValidation
extends App
with CommonScalazValidations
with BusinessScalazValidations {
def constructScalaDeveloper(row: Row): ValidationNel[String, ScalaDeveloper] = {
val name = nonEmptyString(row.cells(0))
val age = positiveNumber(row.cells(1)) flatMap noMinor
val languages = commaSeparatedStrings(row.cells(2)) flatMap mustHaveScala
(name |@| age |@| languages) {
ScalaDeveloper(_, _, _)
}
}
Models.people foreach { row =>
println(constructScalaDeveloper(row))
}
}
trait CommonScalazValidations {
def nonEmptyString(input: String): ValidationNel[String, String] =
if (input != null && input.nonEmpty) input.successNel else "Input string is empty".failureNel
def number(input: String): ValidationNel[String, Int] =
nonEmptyString(input).flatMap { nes =>
Try(input.toInt.successNel[String]).getOrElse(s"Invalid number format for input: $input".failureNel)
}
def positiveNumber(input: String): ValidationNel[String, Int] =
number(input).flatMap { num =>
if(num > 0L) num.successNel else "The input value is not positive".failureNel
}
def commaSeparatedStrings(input: String): ValidationNel[String, Seq[String]] =
nonEmptyString(input).flatMap { nes =>
nes.split(", *").toSeq.successNel
}
}
trait BusinessScalazValidations {
def noMinor(age: Int): ValidationNel[String, Int] =
if (age < 18) "Person is a minor".failureNel else age.successNel
def mustHaveScala(languages: Seq[String]): ValidationNel[String, Seq[String]] =
languages.find(_ == "scala") match {
case _: Some[String] => languages.successNel
case _ => "Languages did not contain Scala".failureNel
}
}
| HRScala/validations-keep-us-sane | scalaz/ScalazValidation.scala | Scala | unlicense | 1,948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.loadBalancer
import akka.actor.{ActorRef, ActorSystem, Props}
import org.apache.openwhisk.common._
import org.apache.openwhisk.core.WhiskConfig._
import org.apache.openwhisk.core.connector._
import org.apache.openwhisk.core.containerpool.ContainerPoolConfig
import org.apache.openwhisk.core.entity.ControllerInstanceId
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.invoker.InvokerProvider
import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig}
import org.apache.openwhisk.spi.SpiLoader
import org.apache.openwhisk.utils.ExecutionContextFactory
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.core.entity.size._
import scala.concurrent.Future
/**
* Lean loadbalancer implemetation.
*
* Communicates with Invoker directly without Kafka in the middle. Invoker does not exist as a separate entity, it is built together with Controller
* Uses LeanMessagingProvider to use in-memory queue instead of Kafka
*/
class LeanBalancer(config: WhiskConfig,
feedFactory: FeedFactory,
controllerInstance: ControllerInstanceId,
implicit val messagingProvider: MessagingProvider = SpiLoader.get[MessagingProvider])(
implicit actorSystem: ActorSystem,
logging: Logging)
extends CommonLoadBalancer(config, feedFactory, controllerInstance) {
/** Loadbalancer interface methods */
override def invokerHealth(): Future[IndexedSeq[InvokerHealth]] = Future.successful(IndexedSeq.empty[InvokerHealth])
override def clusterSize: Int = 1
val poolConfig: ContainerPoolConfig = loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool)
val invokerName = InvokerInstanceId(0, None, None, poolConfig.userMemory)
/** 1. Publish a message to the loadbalancer */
override def publish(action: ExecutableWhiskActionMetaData, msg: ActivationMessage)(
implicit transid: TransactionId): Future[Future[Either[ActivationId, WhiskActivation]]] = {
/** 2. Update local state with the activation to be executed scheduled. */
val activationResult = setupActivation(msg, action, invokerName)
sendActivationToInvoker(messageProducer, msg, invokerName).map(_ => activationResult)
}
/** Creates an invoker for executing user actions. There is only one invoker in the lean model. */
private def makeALocalThreadedInvoker(): Unit = {
implicit val ec = ExecutionContextFactory.makeCachedThreadPoolExecutionContext()
val limitConfig: ConcurrencyLimitConfig = loadConfigOrThrow[ConcurrencyLimitConfig](ConfigKeys.concurrencyLimit)
SpiLoader.get[InvokerProvider].instance(config, invokerName, messageProducer, poolConfig, limitConfig)
}
makeALocalThreadedInvoker()
override protected val invokerPool: ActorRef = actorSystem.actorOf(Props.empty)
override protected def releaseInvoker(invoker: InvokerInstanceId, entry: ActivationEntry) = {
// Currently do nothing
}
override protected def emitMetrics() = {
super.emitMetrics()
}
}
object LeanBalancer extends LoadBalancerProvider {
override def instance(whiskConfig: WhiskConfig, instance: ControllerInstanceId)(implicit actorSystem: ActorSystem,
logging: Logging): LoadBalancer = {
new LeanBalancer(whiskConfig, createFeedFactory(whiskConfig, instance), instance)
}
def requiredProperties =
ExecManifest.requiredProperties ++
wskApiHost
}
| style95/openwhisk | core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/LeanBalancer.scala | Scala | apache-2.0 | 4,302 |
package io.youi.server
import io.youi.net.URL
class ServerException(val message: String,
cause: Throwable,
val url: URL) extends RuntimeException(s"$message ($url)", cause) | outr/youi | server/src/main/scala/io/youi/server/ServerException.scala | Scala | mit | 218 |
package models.storage.event
import no.uio.musit.models.MusitEvent
trait StorageFacilityEvent extends MusitEvent { self =>
val eventType: StorageFacilityEventType
}
| MUSIT-Norway/musit | service_backend/app/models/storage/event/StorageFacilityEvent.scala | Scala | gpl-2.0 | 171 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.