code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package models.daos
import java.util.UUID
import scala.concurrent.Future
import javax.inject.Inject
import play.modules.reactivemongo.ReactiveMongoApi
import scala.concurrent.ExecutionContext.Implicits.global
import org.joda.time.DateTime
import models.AuthToken
class AuthTokenDAOMongoImpl @Inject() (reactiveMongoApi: ReactiveMongoApi) extends AuthTokenDAO {
implicit val reader = new reactivemongo.bson.BSONDocumentReader[AuthToken] {
def read(bson: reactivemongo.bson.BSONDocument): AuthToken = {
val result = new AuthToken(
id = java.util.UUID.fromString(bson.getAs[String]("id").getOrElse("")),
userID = java.util.UUID.fromString(bson.getAs[String]("uid").getOrElse("")),
expiry = DateTime.parse(bson.getAs[String]("expiry").getOrElse(""))
)
result
}
}
val hash = new utils.misc.MongoSimpleHashMap[java.util.UUID, AuthToken](
"tokens",
(uuid: java.util.UUID) => uuid.toString,
new reactivemongo.bson.BSONDocumentWriter[AuthToken] {
def write(token: AuthToken): reactivemongo.bson.BSONDocument =
reactivemongo.bson.BSONDocument(
"id" -> token.id.toString(),
"uid" -> token.userID.toString(),
"expiry" -> token.expiry.toString()
)
},
reader,
reactiveMongoApi
)
def findall() = hash.findall()
def find(id: UUID) = hash.find(id)
def findExpired(dateTime: DateTime) = Future.successful { Seq[models.AuthToken]() }
def save(token: AuthToken) = hash.update(token.id, token).map(opttoken => token)
def remove(id: UUID) = hash.delete(id).map(_ => Unit)
}
| serversideapps/silhmojs | server/app/models/daos/AuthTokenDAOMongoImpl.scala | Scala | apache-2.0 | 1,610 |
/* *\\
** \\ \\ / _) \\ \\ / \\ | **
** \\ \\ / | __ \\ _ \\ __| \\ \\ / |\\/ | **
** \\ \\ / | | | __/ | \\ \\ / | | **
** \\_/ _| .__/ \\___| _| \\_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\\* */
package org.vipervm.platform
/**
* Describe a set of memory cells (not necessarily contiguous)
*
* Views can be used for data transfers and as kernel parameters
*/
sealed abstract class BufferView {
/** Associated buffer */
val buffer:Buffer
/** Offset in the buffer */
val offset:Long
}
/** Contiguous view */
case class BufferView1D(buffer:Buffer,offset:Long,size:Long) extends BufferView
/** 2D view with row padding */
case class BufferView2D(buffer:Buffer,offset:Long,width:Long,height:Long,padding:Long) extends BufferView
/** 2D view with row and plane paddings */
case class BufferView3D(buffer:Buffer,offset:Long,width:Long,height:Long,depth:Long,rowPadding:Long,planePadding:Long) extends BufferView
| hsyl20/Scala_ViperVM | src/main/scala/org/vipervm/platform/BufferView.scala | Scala | gpl-3.0 | 1,388 |
package controllers
import lila.app._
import lila.oauth.OAuthScope
final class DgtCtrl(env: Env) extends LilaController(env) {
def index =
Auth { implicit ctx => _ =>
Ok(views.html.dgt.index).fuccess
}
def config =
Auth { implicit ctx => me =>
findToken(me) map { token =>
Ok(views.html.dgt.config(token))
}
}
def generateToken =
Auth { _ => me =>
findToken(me) flatMap { t =>
t.isEmpty.?? {
env.oAuth.tokenApi.create(
lila.oauth.OAuthTokenForm.Data(
description = "DGT board automatic token",
scopes = dgtScopes.toList.map(_.key)
),
me,
isStudent = false
) >>
env.pref.api.saveTag(me, _.dgt, true)
} inject Redirect(routes.DgtCtrl.config)
}
}
def play =
Auth { implicit ctx => me =>
findToken(me) map {
case None => Redirect(routes.DgtCtrl.config)
case Some(t) =>
if (!ctx.pref.hasDgt) env.pref.api.saveTag(me, _.dgt, true)
Ok(views.html.dgt.play(t))
}
}
private val dgtScopes: Set[OAuthScope] = {
Set(
OAuthScope.Challenge.Read,
OAuthScope.Challenge.Write,
OAuthScope.Preference.Read,
OAuthScope.Msg.Write,
OAuthScope.Board.Play
)
}
private def findToken(me: lila.user.User) =
env.oAuth.tokenApi.findCompatiblePersonal(me, dgtScopes)
}
| luanlv/lila | app/controllers/DgtCtrl.scala | Scala | mit | 1,444 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, TensorModule}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.serializer._
import com.intel.analytics.bigdl.utils.serializer.converters.DataConverter
import com.intel.analytics.bigdl.utils.{T, Table}
import serialization.Bigdl.{AttrValue, BigDLModule}
import scala.reflect.ClassTag
import scala.reflect.runtime._
/**
* This layer implement a bidirectional recurrent neural network
* @param merge concat or add the output tensor of the two RNNs. Default is add
* @param ev numeric operator
* @tparam T numeric type
*/
class BiRecurrent[T : ClassTag] (
private val merge: AbstractModule[Table, Tensor[T], T] = null,
val batchNormParams: BatchNormParams[T] = null,
val isSplitInput: Boolean = false)
(implicit ev: TensorNumeric[T]) extends DynamicContainer[Tensor[T], Tensor[T], T] {
val timeDim = 2
val featDim = 3
val layer: Recurrent[T] = Recurrent[T](batchNormParams)
val revLayer: Recurrent[T] = Recurrent[T](batchNormParams)
private var birnn = Sequential[T]()
if (isSplitInput) {
birnn.add(BifurcateSplitTable[T](featDim))
} else {
birnn.add(ConcatTable()
.add(Identity[T]())
.add(Identity[T]()))
}
birnn
.add(ParallelTable[T]()
.add(layer)
.add(Sequential[T]()
.add(Reverse[T](timeDim))
.add(revLayer)
.add(Reverse[T](timeDim))))
if (merge == null) birnn.add(CAddTable[T](true))
else birnn.add(merge)
override def add(module: AbstractModule[_ <: Activity, _ <: Activity, T]): this.type = {
layer.add(module)
revLayer.add(module.cloneModule())
modules.append(birnn)
this
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
output = birnn.forward(input).toTensor[T]
output
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T]): Unit = {
birnn.accGradParameters(input, gradOutput)
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
gradInput = birnn.updateGradInput(input, gradOutput).toTensor[T]
gradInput
}
override def backward(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
val before = System.nanoTime()
gradInput = birnn.backward(input, gradOutput).toTensor[T]
backwardTime += System.nanoTime() - before
gradInput
}
/**
* This function returns two arrays. One for the weights and the other the gradients
* Custom modules should override this function if they have parameters
*
* @return (Array of weights, Array of grad)
*/
override def parameters(): (Array[Tensor[T]], Array[Tensor[T]]) = birnn.parameters()
override def canEqual(other: Any): Boolean = other.isInstanceOf[BiRecurrent[T]]
/**
* Clear cached activities to save storage space or network bandwidth. Note that we use
* Tensor.set to keep some information like tensor share
*
* The subclass should override this method if it allocate some extra resource, and call the
* super.clearState in the override method
*
* @return
*/
override def clearState(): BiRecurrent.this.type = {
birnn.clearState()
this
}
override def toString(): String = s"${getPrintName}($timeDim, $birnn)"
override def equals(other: Any): Boolean = other match {
case that: BiRecurrent[T] =>
super.equals(that) &&
(that canEqual this) &&
timeDim == that.timeDim &&
layer == that.layer &&
revLayer == that.revLayer &&
birnn == that.birnn
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), timeDim, layer, revLayer, birnn)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object BiRecurrent extends ContainerSerializable {
def apply[@specialized(Float, Double) T: ClassTag](
merge: AbstractModule[Table, Tensor[T], T] = null,
batchNormParams: BatchNormParams[T] = null,
isSplitInput: Boolean = false)
(implicit ev: TensorNumeric[T]) : BiRecurrent[T] = {
new BiRecurrent[T](merge, batchNormParams, isSplitInput)
}
override def doLoadModule[T: ClassTag](context: DeserializeContext)
(implicit ev: TensorNumeric[T]) : AbstractModule[Activity, Activity, T] = {
val attrMap = context.bigdlModule.getAttrMap
val merge = DataConverter.
getAttributeValue(context, attrMap.get("merge")).
asInstanceOf[AbstractModule[Table, Tensor[T], T]]
val isSplitInput = DataConverter
.getAttributeValue(context, attrMap.get("isSplitInput"))
.asInstanceOf[Boolean]
val flag = DataConverter
.getAttributeValue(context, attrMap.get("bnorm"))
.asInstanceOf[Boolean]
val biRecurrent = if (flag) {
BiRecurrent(merge, batchNormParams = BatchNormParams(), isSplitInput = isSplitInput)
} else {
BiRecurrent(merge, isSplitInput = isSplitInput)
}
biRecurrent.birnn = DataConverter.
getAttributeValue(context, attrMap.get("birnn")).
asInstanceOf[Sequential[T]]
if (flag) {
val bnormEpsAttr = attrMap.get("bnormEps")
biRecurrent.batchNormParams.eps =
DataConverter.getAttributeValue(context, bnormEpsAttr)
.asInstanceOf[Double]
val bnormMomentumAttr = attrMap.get("bnormMomentum")
biRecurrent.batchNormParams.momentum =
DataConverter.getAttributeValue(context, bnormMomentumAttr)
.asInstanceOf[Double]
val bnormInitWeightAttr = attrMap.get("bnormInitWeight")
biRecurrent.batchNormParams.initWeight =
DataConverter.getAttributeValue(context, bnormInitWeightAttr)
.asInstanceOf[Tensor[T]]
val bnormInitBiasAttr = attrMap.get("bnormInitBias")
biRecurrent.batchNormParams.initBias =
DataConverter.getAttributeValue(context, bnormInitBiasAttr)
.asInstanceOf[Tensor[T]]
val bnormInitGradWeightAttr = attrMap.get("bnormInitGradWeight")
biRecurrent.batchNormParams.initGradWeight =
DataConverter.getAttributeValue(context, bnormInitGradWeightAttr)
.asInstanceOf[Tensor[T]]
val bnormInitGradBiasAttr = attrMap.get("bnormInitGradBias")
biRecurrent.batchNormParams.initGradBias =
DataConverter.getAttributeValue(context, bnormInitGradBiasAttr)
.asInstanceOf[Tensor[T]]
val bnormAffineAttr = attrMap.get("bnormAffine")
biRecurrent.batchNormParams.affine =
DataConverter.getAttributeValue(context, bnormAffineAttr)
.asInstanceOf[Boolean]
}
loadSubModules(context, biRecurrent)
biRecurrent
}
override def doSerializeModule[T: ClassTag](context: SerializeContext[T],
birecurrentBuilder : BigDLModule.Builder)
(implicit ev: TensorNumeric[T]) : Unit = {
val birecurrentModule = context.moduleData.module.
asInstanceOf[BiRecurrent[T]]
val mergeBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, mergeBuilder,
birecurrentModule.merge,
ModuleSerializer.tensorModuleType)
birecurrentBuilder.putAttr("merge", mergeBuilder.build)
val isSplitInputBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, isSplitInputBuilder,
birecurrentModule.isSplitInput,
universe.typeOf[Boolean])
birecurrentBuilder.putAttr("isSplitInput", isSplitInputBuilder.build)
val birnnBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, birnnBuilder,
birecurrentModule.birnn,
ModuleSerializer.tensorModuleType)
birecurrentBuilder.putAttr("birnn", birnnBuilder.build)
val flag = if (birecurrentModule.batchNormParams != null) {
val bnormEpsBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormEpsBuilder,
birecurrentModule.batchNormParams.eps, universe.typeOf[Double])
birecurrentBuilder.putAttr("bnormEps", bnormEpsBuilder.build)
val bnormMomentumBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormMomentumBuilder,
birecurrentModule.batchNormParams.momentum, universe.typeOf[Double])
birecurrentBuilder.putAttr("bnormMomentum", bnormMomentumBuilder.build)
val bnormInitWeightBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormInitWeightBuilder,
birecurrentModule.batchNormParams.initWeight, ModuleSerializer.tensorType)
birecurrentBuilder.putAttr("bnormInitWeight", bnormInitWeightBuilder.build)
val bnormInitBiasBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormInitBiasBuilder,
birecurrentModule.batchNormParams.initBias, ModuleSerializer.tensorType)
birecurrentBuilder.putAttr("bnormInitBias", bnormInitBiasBuilder.build)
val bnormInitGradWeightBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormInitGradWeightBuilder,
birecurrentModule.batchNormParams.initGradWeight, ModuleSerializer.tensorType)
birecurrentBuilder.putAttr("bnormInitGradWeight", bnormInitGradWeightBuilder.build)
val bnormInitGradBiasBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormInitGradBiasBuilder,
birecurrentModule.batchNormParams.initGradBias, ModuleSerializer.tensorType)
birecurrentBuilder.putAttr("bnormInitGradBias", bnormInitGradBiasBuilder.build)
val bnormAffineBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bnormAffineBuilder,
birecurrentModule.batchNormParams.affine, universe.typeOf[Boolean])
birecurrentBuilder.putAttr("bnormAffine", bnormAffineBuilder.build)
true
} else {
false
}
val bNormBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, bNormBuilder,
flag, universe.typeOf[Boolean])
birecurrentBuilder.putAttr("bnorm", bNormBuilder.build)
serializeSubModules(context, birecurrentBuilder)
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/BiRecurrent.scala | Scala | apache-2.0 | 10,784 |
object input_in_range2_bad {
def main(args: Array[String]) {
// Put code here
}
}
| LoyolaChicagoBooks/introcs-scala-examples | input_in_range2_bad/input_in_range2_bad.scala | Scala | gpl-3.0 | 90 |
package com.ringcentral.gatling.mongo.action
import com.ringcentral.gatling.mongo.check.MongoCheck
import com.ringcentral.gatling.mongo.response.MongoResponse
import io.gatling.commons.stats.{KO, OK, Status}
import io.gatling.commons.validation
import io.gatling.commons.validation.{NoneSuccess, Validation}
import io.gatling.core.action.{Action, ExitableAction}
import io.gatling.core.check.Check
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.message.ResponseTimings
import io.gatling.core.util.NameGen
import play.api.libs.json._
import reactivemongo.api.DefaultDB
import reactivemongo.api.collections.GenericQueryBuilder
import reactivemongo.play.json.JSONSerializationPack
import scala.util.{Failure, Success, Try}
abstract class MongoAction(database: DefaultDB) extends ExitableAction with NameGen {
def commandName: Expression[String]
def executeCommand(commandName: String, session: Session): Validation[Unit]
override def execute(session: Session): Unit = recover(session) {
commandName(session).flatMap { resolvedCommandName =>
val outcome = executeCommand(resolvedCommandName, session)
outcome.onFailure(errorMessage => statsEngine.reportUnbuildableRequest(session, resolvedCommandName, errorMessage))
outcome
}
}
def string2JsObject(string: String): Validation[JsObject] = {
Try[JsObject](Json.parse(string).as[JsObject]) match {
case Success(json) => validation.SuccessWrapper(json).success
case Failure(err) =>
validation.FailureWrapper(s"Error parse JSON string: $string. ${err.getMessage}").failure
}
}
def string2JsObject(optionString: Option[String]): Validation[Option[JsObject]] =
optionString match {
case Some(string) => string2JsObject(string).map(Some.apply)
case None => NoneSuccess
}
protected def executeNext(session: Session,
sent: Long,
received: Long,
status: Status,
next: Action,
requestName: String,
message: Option[String]): Unit = {
val timings = ResponseTimings(sent, received)
statsEngine.logResponse(session, requestName, timings, status, None, message)
next ! session
}
protected def processResult(session: Session,
sent: Long,
received: Long,
checks: List[MongoCheck],
response: MongoResponse,
next: Action,
requestName: String): Unit = {
// run all the checks, advise the Gatling API that it is complete and move to next
val (checkSaveUpdate, error) = Check.check(response, session, checks)
val newSession = checkSaveUpdate(session)
error match {
case Some(validation.Failure(errorMessage)) => executeNext(newSession.markAsFailed, sent, received, KO, next, requestName, Some(errorMessage))
case _ => executeNext(newSession, sent, received, OK, next, requestName, None)
}
}
implicit class GenericQueryBuilderExt(b: GenericQueryBuilder[JSONSerializationPack.type]) {
def sort(sort: Option[JsObject]): GenericQueryBuilder[JSONSerializationPack.type] = {
sort.map(b.sort).getOrElse(b)
}
def hint(sort: Option[JsObject]): GenericQueryBuilder[JSONSerializationPack.type] = {
sort.map(b.hint).getOrElse(b)
}
}
}
| RC-Platform-Disco-Team/gatling-mongodb-protocol | src/main/scala/com/ringcentral/gatling/mongo/action/MongoAction.scala | Scala | mit | 3,515 |
package io.youi.capacitor
import scala.scalajs.js
trait PushNotificationData extends js.Object {
def streamId: String
} | outr/youi | capacitor/src/main/scala/io/youi/capacitor/PushNotificationData.scala | Scala | mit | 123 |
/* *\\
** _____ __ _____ __ ____ FieldKit **
** / ___/ / / /____/ / / / \\ (c) 2009, field **
** / ___/ /_/ /____/ / /__ / / / http://www.field.io **
** /_/ /____/ /____/ /_____/ **
\\* */
/* created March 24, 2009 */
package field.kit.gl.scene.shape
import field.kit.gl.scene._
import field.kit.math._
/**
* Companion object to class <code>Quad</code>
*/
object Quad extends Enumeration {
val TOP_LEFT = Value
val CENTER = Value
/** true when new Quadliterals should be constructed from quads instead of triangles */
val DEFAULT_USE_QUADS = true
/** Creates a new default <code>Quad</code> */
def apply() =
new Quad("Quad", CENTER, 1f, 1f)
def apply(width:Float, height:Float) =
new Quad("Quad", CENTER, width, height)
def apply(mode:Quad.Value, width:Float, height:Float) =
new Quad("Quad", mode, width, height)
def apply(name:String, width:Float, height:Float) =
new Quad(name, CENTER, width, height)
}
/**
* A quadliteral mesh, often used for billboards, shaders, etc
*/
class Quad(name:String, mode:Quad.Value, protected var _width:Float, protected var _height:Float)
extends Mesh(name) {
import field.kit.util.Buffer
// TODO could make a more convenient method for that
var useQuads = Quad.DEFAULT_USE_QUADS
init(mode, width, height)
def this(name:String) {
this(name, Quad.CENTER, 1f, 1f)
}
/**
* initializes the geometry data of this Quad
*/
def init(_mode:Quad.Value, width:Float, height:Float) {
this._width = width
this._height = height
// -- Vertices -------------------------------------------------------------
val vertices = data.allocVertices(4)
vertices.clear
mode match {
case Quad.TOP_LEFT =>
vertices put 0 put height put 0
vertices put width put height put 0
vertices put width put 0 put 0
vertices put 0 put 0 put 0
case Quad.CENTER =>
val hw = width * 0.5f
val hh = height * 0.5f
vertices put -hw put hh put 0
vertices put hw put hh put 0
vertices put hw put -hh put 0
vertices put -hw put -hh put 0
}
// -- Texture Coordinates --------------------------------------------------
val textureCoords = data.allocTextureCoords(4)
textureCoords.clear
textureCoords put 0f put 0f
textureCoords put 1f put 0f
textureCoords put 1f put 1f
textureCoords put 0f put 1f
// -- Normals --------------------------------------------------------------
val normals = data.allocNormals(4)
normals.clear
for(i <- 0 until 4)
normals put 0 put 0 put 1
// -- Indices --------------------------------------------------------------
if(useQuads) {
data.indexModes(0) = IndexMode.QUADS
} else {
val indices = data.allocIndices(6)
indices.clear
indices put Array(0, 1, 2, 0, 2, 3)
}
}
// -- Getters ----------------------------------------------------------------
def width = _width
def height = _height
}
| field/FieldKit.scala | src.gl/field/kit/gl/scene/shape/Quad.scala | Scala | lgpl-3.0 | 3,377 |
package com.thenetcircle.event_bus.story.tasks.http
import akka.NotUsed
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.scaladsl.Flow
import com.thenetcircle.event_bus.IntegrationTestBase
import com.thenetcircle.event_bus.event.EventStatus.{FAILED, NORMAL}
import com.thenetcircle.event_bus.event.{Event, EventStatus}
import org.scalatest.BeforeAndAfter
import scala.concurrent.Await
import scala.concurrent.duration._
class HttpSourceTest extends IntegrationTestBase with BeforeAndAfter {
behavior of "HttpSource"
def request(
handler: Flow[(EventStatus, Event), (EventStatus, Event), NotUsed],
requestData: String,
port: Int = 55661
): (StatusCode, String) = {
val settings = HttpSourceSettings(interface = "127.0.0.1", port = port, succeededResponse = "ok")
val httpSource = new HttpSource(settings)
httpSource.run(handler)
val result = Await.result(
Http()
.singleRequest(
HttpRequest(
method = HttpMethods.POST,
uri = Uri(s"http://127.0.0.1:$port"),
entity = HttpEntity(requestData)
)
)
.map(r => {
(r.status, Await.result(r.entity.toStrict(100.millisecond), 200.millisecond).data.utf8String)
}),
10.seconds
)
httpSource.shutdown()
result
}
it should "responds error when get non json request" in {
val testHandler = Flow[(EventStatus, Event)]
val (status, body) = request(testHandler, "abc")
status shouldEqual StatusCodes.BadRequest
}
it should "gives proper response when get proper request" in {
val testHandler = Flow[(EventStatus, Event)]
val (status, body) = request(testHandler, "{}")
status shouldEqual StatusCodes.OK
body shouldEqual "ok"
}
it should "responds error when processing failed" in {
val testHandler =
Flow[(EventStatus, Event)]
.map {
case (_, event) =>
(FAILED(new RuntimeException("failed")), event)
}
// TODO check here
// note that if not change the port here,
// will get Success response which from last test, could be caused by cache under Akka http binding
val (status, body) = request(testHandler, "{}", 55663)
status shouldEqual StatusCodes.InternalServerError
body shouldEqual "failed"
}
it should "responds error when processing failed with exception" in {
val testHandler =
Flow[(EventStatus, Event)]
.map {
case (_, event) =>
if (event.uuid == "a")
throw new RuntimeException("processing failed")
else
(NORMAL, event)
}
val (status, body) = request(testHandler, "{\\"id\\":\\"a\\"}", 55664)
status shouldEqual StatusCodes.InternalServerError
val (status2, body2) = request(testHandler, "{\\"id\\":\\"b\\"}", 55664)
status2 shouldEqual StatusCodes.OK
body2 shouldEqual "ok"
}
}
| thenetcircle/event-bus | integration-test/src/test/scala/com/thenetcircle/event_bus/story/tasks/http/HttpSourceTest.scala | Scala | apache-2.0 | 2,963 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.testFramework.EditorTestUtil
/**
* @author Nikolay.Tropin
*/
class IfElseToOptionTest extends OperationsOnCollectionInspectionTest {
override protected val classOfInspection: Class[_ <: OperationOnCollectionInspection] =
classOf[IfElseToOptionInspection]
override protected val hint: String =
"Replace with Option(x)"
def test1(): Unit = {
doTest(
s"val x = 0; ${START}if (x == null) None else Some(x)$END",
"val x = 0; if (x == null) None else Some(x)",
"val x = 0; Option(x)"
)
}
def test2(): Unit = {
doTest(
s"val x = 0; ${START}if (x != null) Some(x) else None$END",
"val x = 0; if (x != null) Some(x) else None",
"val x = 0; Option(x)"
)
}
def test3(): Unit = {
doTest(
s"""val x = 0
|${START}if (x == null) {
| None
|}
|else {
| Some(x)
|}$END""".stripMargin,
"""val x = 0
|if (x == null) {
| None
|}
|else {
| Some(x)
|}""".stripMargin,
"""val x = 0
|Option(x)""".stripMargin
)
}
def test4(): Unit = {
doTest(
s"val x = 0; ${START}if (null == x) None else Some(x)$END",
"val x = 0; if (null == x) None else Some(x)",
"val x = 0; Option(x)"
)
}
def testBoxedJavaType(): Unit = doTest(
s"""
|val x: java.lang.Integer = null
|val y = ${START}if (null == x) None else Some(x)$END
|""".stripMargin,
"""
|val x: java.lang.Integer = null
|val y = if (null == x) None else Some(x)
|""".stripMargin,
"""
|val x: java.lang.Integer = null
|val y = Option(x)
|""".stripMargin
)
def testBoxedJavaTypeToInt(): Unit =
checkTextHasNoErrors(
"""
|def test(x: java.lang.Integer): Option[Int] =
| if (x == null) None else Some(x)
|""".stripMargin
)
def testBoxedJavaTypeAliasToInt(): Unit =
checkTextHasNoErrors(
"""
|type X = java.lang.Integer
|def test(x: X): Option[Int] =
| if (x == null) None else Some(x)
|""".stripMargin
)
def testConversionToPrimitive(): Unit =
checkTextHasNoErrors(
"""
|trait X
|
|implicit def xtoInt(x: X): Int = 42
|
|def test(x: X): Option[Int] =
| if (x == null) None else Some(x)
|""".stripMargin
)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/collections/IfElseToOptionTest.scala | Scala | apache-2.0 | 2,518 |
package ee.cone.c4vdom
//import ee.cone.c4connection_api.EventKey
import java.text.DecimalFormat
import ee.cone.c4vdom.Types._
trait ToJson {
def appendJson(builder: MutableJsonBuilder): Unit
}
trait VDomValue extends ToJson
////
trait MutableJsonBuilder {
def startArray(): MutableJsonBuilder
def startObject(): MutableJsonBuilder
def end(): MutableJsonBuilder
def append(value: String): MutableJsonBuilder
def append(value: BigDecimal, decimalFormat: DecimalFormat): MutableJsonBuilder
def append(value: Boolean): MutableJsonBuilder
}
////
object Types {
type VDomKey = String
type ViewRes = List[ChildPair[_]]
}
trait ChildPair[-C] {
def key: VDomKey
}
trait ChildPairFactory {
def apply[C](key: VDomKey, theElement: VDomValue, elements: ViewRes): ChildPair[C]
}
////
abstract class TagName(val name: String)
trait TagAttr
trait TagStyle extends TagAttr {
def appendStyle(builder: MutableJsonBuilder): Unit
}
trait Color {
def value: String
}
////
trait VDomLens[C,I] {
def of: C⇒I
def modify: (I⇒I) ⇒ C⇒C
def set: I⇒C⇒C
}
trait VDomView[State] extends Product {
def view: State ⇒ ViewRes
}
trait VDomSender[State] {
def branchKey: String
type Send = Option[(String,String) ⇒ State ⇒ State]
def sending: State ⇒ (Send,Send)
}
trait VDomMessage {
def header: String⇒String
def body: Object
}
trait Receiver[State] {
type Handler = VDomMessage ⇒ State ⇒ State
def receive: Handler
}
trait VDomResolver {
def resolve(pathStr: String): Option[VDomValue] ⇒ Option[VDomValue]
}
trait VDomHandler[State] extends Receiver[State] {
def seeds: State ⇒ List[(String,Product)]
}
trait VDomHandlerFactory {
def create[State](
sender: VDomSender[State],
view: VDomView[State],
vDomUntil: VDomUntil,
vDomStateKey: VDomLens[State,Option[VDomState]]
): VDomHandler[State]
}
case class VDomState(value: VDomValue, until: Long)
trait VDomUntil {
def get(pairs: ViewRes): (Long, ViewRes)
}
////
trait TagJsonUtils {
def appendInputAttributes(builder: MutableJsonBuilder, value: String, deferSend: Boolean): Unit
def appendStyles(builder: MutableJsonBuilder, styles: List[TagStyle]): Unit
}
| wregs/c4proto | c4vdom-base/src/main/scala/ee/cone/c4vdom/Api.scala | Scala | apache-2.0 | 2,208 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.hive.ql.udf.UDAFPercentile
import org.apache.hadoop.hive.ql.udf.generic.{AbstractGenericUDAFResolver, GenericUDAFEvaluator, GenericUDAFMax}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.{AggregationBuffer, Mode}
import org.apache.hadoop.hive.ql.util.JavaDataModel
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo
import test.org.apache.spark.sql.MyDoubleAvg
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.apache.spark.sql.execution.aggregate.ObjectHashAggregateExec
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.test.SQLTestUtils
class HiveUDAFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
import testImplicits._
protected override def beforeAll(): Unit = {
sql(s"CREATE TEMPORARY FUNCTION mock AS '${classOf[MockUDAF].getName}'")
sql(s"CREATE TEMPORARY FUNCTION hive_max AS '${classOf[GenericUDAFMax].getName}'")
Seq(
(0: Integer) -> "val_0",
(1: Integer) -> "val_1",
(2: Integer) -> null,
(3: Integer) -> null
).toDF("key", "value").repartition(2).createOrReplaceTempView("t")
}
protected override def afterAll(): Unit = {
try {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS mock")
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
} finally {
super.afterAll()
}
}
test("built-in Hive UDAF") {
val df = sql("SELECT key % 2, hive_max(key) FROM t GROUP BY key % 2")
val aggs = df.queryExecution.executedPlan.collect {
case agg: ObjectHashAggregateExec => agg
}
// There should be two aggregate operators, one for partial aggregation, and the other for
// global aggregation.
assert(aggs.length == 2)
checkAnswer(df, Seq(
Row(0, 2),
Row(1, 3)
))
}
test("customized Hive UDAF") {
val df = sql("SELECT key % 2, mock(value) FROM t GROUP BY key % 2")
val aggs = df.queryExecution.executedPlan.collect {
case agg: ObjectHashAggregateExec => agg
}
// There should be two aggregate operators, one for partial aggregation, and the other for
// global aggregation.
assert(aggs.length == 2)
checkAnswer(df, Seq(
Row(0, Row(1, 1)),
Row(1, Row(1, 1))
))
}
test("call JAVA UDAF") {
withTempView("temp") {
withUserDefinedFunction("myDoubleAvg" -> false) {
spark.range(1, 10).toDF("value").createOrReplaceTempView("temp")
sql(s"CREATE FUNCTION myDoubleAvg AS '${classOf[MyDoubleAvg].getName}'")
checkAnswer(
spark.sql("SELECT default.myDoubleAvg(value) as my_avg from temp"),
Row(105.0))
}
}
}
test("non-deterministic children expressions of UDAF") {
withTempView("view1") {
spark.range(1).selectExpr("id as x", "id as y").createTempView("view1")
withUserDefinedFunction("testUDAFPercentile" -> true) {
// non-deterministic children of Hive UDAF
sql(s"CREATE TEMPORARY FUNCTION testUDAFPercentile AS '${classOf[UDAFPercentile].getName}'")
val e1 = intercept[AnalysisException] {
sql("SELECT testUDAFPercentile(x, rand()) from view1 group by y")
}.getMessage
assert(Seq("nondeterministic expression",
"should not appear in the arguments of an aggregate function").forall(e1.contains))
}
}
}
}
/**
* A testing Hive UDAF that computes the counts of both non-null values and nulls of a given column.
*/
class MockUDAF extends AbstractGenericUDAFResolver {
override def getEvaluator(info: Array[TypeInfo]): GenericUDAFEvaluator = new MockUDAFEvaluator
}
class MockUDAFBuffer(var nonNullCount: Long, var nullCount: Long)
extends GenericUDAFEvaluator.AbstractAggregationBuffer {
override def estimate(): Int = JavaDataModel.PRIMITIVES2 * 2
}
class MockUDAFEvaluator extends GenericUDAFEvaluator {
private val nonNullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private val nullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private val bufferOI = {
val fieldNames = Seq("nonNullCount", "nullCount").asJava
val fieldOIs = Seq(nonNullCountOI: ObjectInspector, nullCountOI: ObjectInspector).asJava
ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs)
}
private val nonNullCountField = bufferOI.getStructFieldRef("nonNullCount")
private val nullCountField = bufferOI.getStructFieldRef("nullCount")
override def getNewAggregationBuffer: AggregationBuffer = new MockUDAFBuffer(0L, 0L)
override def reset(agg: AggregationBuffer): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
buffer.nonNullCount = 0L
buffer.nullCount = 0L
}
override def init(mode: Mode, parameters: Array[ObjectInspector]): ObjectInspector = bufferOI
override def iterate(agg: AggregationBuffer, parameters: Array[AnyRef]): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
if (parameters.head eq null) {
buffer.nullCount += 1L
} else {
buffer.nonNullCount += 1L
}
}
override def merge(agg: AggregationBuffer, partial: Object): Unit = {
if (partial ne null) {
val nonNullCount = nonNullCountOI.get(bufferOI.getStructFieldData(partial, nonNullCountField))
val nullCount = nullCountOI.get(bufferOI.getStructFieldData(partial, nullCountField))
val buffer = agg.asInstanceOf[MockUDAFBuffer]
buffer.nonNullCount += nonNullCount
buffer.nullCount += nullCount
}
}
override def terminatePartial(agg: AggregationBuffer): AnyRef = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
Array[Object](buffer.nonNullCount: java.lang.Long, buffer.nullCount: java.lang.Long)
}
override def terminate(agg: AggregationBuffer): AnyRef = terminatePartial(agg)
}
| brad-kaiser/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala | Scala | apache-2.0 | 6,882 |
import java.io._
import org.commonmark.parser.Parser
import org.commonmark.renderer.html.HtmlRenderer
import org.fusesource.scalate.TemplateEngine
import scala.io.Source
object Generator extends App {
val engine = new TemplateEngine
val outputFile = new File("index.html")
val resources = "src/main/resources"
val contentFolder = new File(outputFile.getParentFile, resources)
val webPath = contentFolder.toURI
writeToFile(toHtml(findEntries.mkString("\n")), outputFile)
def findEntries(): List[String] = {
def findRecursiv(file: File, filter: File => Boolean): Array[File] = {
val files = file.listFiles
files.filter(filter) ++ files.filter(_.isDirectory)
.flatMap(findRecursiv(_, filter))
}
findRecursiv(contentFolder, _.getName == "index.md")
.map(index => index.getParentFile.getPath.substring(resources.length + 1))
.sorted.reverse
.map(folder => toTemplate(folder, "/index.md"))
.toList
}
def toTemplate(pathTo: String, fileName: String, map: Map[String, AnyRef] = Map.empty[String, AnyRef]): String = {
val text = Source.fromInputStream(getClass.getResourceAsStream(pathTo + fileName)).mkString
val template = engine.compileMoustache(text)
val newm = Map(
"path" -> (webPath + pathTo),
"changeDate" -> Util.lastEdit(resources + "/" + pathTo + fileName),
"gravatar" -> ((emails: String) => Util.gravatars(emails))
) ++ map
engine.layout("", template, newm)
}
def toHtml(md: String): String = {
toTemplate("", "main.mu", Map("entries" -> fromMdtoHtml(md)))
}
def fromMdtoHtml(input: String): String = {
val parser = Parser.builder.build
val document = parser.parse(input)
val renderer = HtmlRenderer.builder.build
renderer.render(document)
}
def writeToFile(s: String, file: File) {
val out = new PrintWriter(file, "UTF-8")
try {
out.print(s)
}
finally {
out.close()
}
}
}
| ThStock/garbanzo | src/main/scala/Generator.scala | Scala | apache-2.0 | 1,966 |
package org.openurp.edu.eams.teach.program.common.helper
import org.openurp.edu.teach.plan.CourseGroup
import org.openurp.edu.eams.teach.program.CoursePlan
import org.openurp.edu.teach.plan.PlanCourse
import org.openurp.edu.eams.teach.program.major.MajorPlan
import org.openurp.edu.teach.plan.MajorPlanCourse
import org.openurp.edu.teach.plan.MajorCourseGroup
import org.openurp.edu.eams.teach.program.major.model.MajorPlanBean
import org.openurp.edu.eams.teach.program.major.model.MajorPlanCourseBean
import org.openurp.edu.eams.teach.program.major.model.MajorCourseGroupBean
import org.openurp.edu.eams.teach.program.original.OriginalPlan
import org.openurp.edu.eams.teach.program.original.OriginalPlanCourse
import org.openurp.edu.eams.teach.program.original.OriginalPlanCourseGroup
import org.openurp.edu.eams.teach.program.original.model.OriginalPlanBean
import org.openurp.edu.eams.teach.program.original.model.OriginalPlanCourseBean
import org.openurp.edu.eams.teach.program.original.model.OriginalPlanCourseGroupBean
import org.openurp.edu.eams.teach.program.personal.PersonalPlan
import org.openurp.edu.eams.teach.program.personal.PersonalPlanCourse
import org.openurp.edu.eams.teach.program.personal.PersonalPlanCourseGroup
import org.openurp.edu.eams.teach.program.personal.model.PersonalPlanBean
import org.openurp.edu.eams.teach.program.personal.model.PersonalPlanCourseBean
import org.openurp.edu.eams.teach.program.personal.model.PersonalPlanCourseGroupBean
//remove if not needed
object ProgramHibernateClassGetter {
def hibernateClass(planGroup: CourseGroup): Class[_ <: CourseGroup] = {
if (classOf[OriginalPlanCourseGroupBean].isAssignableFrom(planGroup.getClass)) {
return classOf[OriginalPlanCourseGroup]
}
if (classOf[PersonalPlanCourseGroupBean].isAssignableFrom(planGroup.getClass)) {
return classOf[PersonalPlanCourseGroup]
}
if (classOf[MajorCourseGroupBean].isAssignableFrom(planGroup.getClass)) {
return classOf[MajorCourseGroup]
}
null
}
def hibernateClass(planCourse: PlanCourse): Class[_ <: PlanCourse] = {
if (classOf[OriginalPlanCourseBean].isAssignableFrom(planCourse.getClass)) {
return classOf[OriginalPlanCourse]
}
if (classOf[PersonalPlanCourseBean].isAssignableFrom(planCourse.getClass)) {
return classOf[PersonalPlanCourse]
}
if (classOf[MajorPlanCourseBean].isAssignableFrom(planCourse.getClass)) {
return classOf[MajorPlanCourse]
}
null
}
def hibernateClass(plan: CoursePlan): Class[_ <: CoursePlan] = {
if (classOf[OriginalPlanBean].isAssignableFrom(plan.getClass)) {
return classOf[OriginalPlan]
}
if (classOf[PersonalPlanBean].isAssignableFrom(plan.getClass)) {
return classOf[PersonalPlan]
}
if (classOf[MajorPlanBean].isAssignableFrom(plan.getClass)) {
return classOf[MajorPlan]
}
null
}
}
| openurp/edu-eams-webapp | plan/src/main/scala/org/openurp/edu/eams/teach/program/common/helper/ProgramHibernateClassGetter.scala | Scala | gpl-3.0 | 2,893 |
package spark.deploy.client
import spark.deploy._
import akka.actor._
import akka.pattern.ask
import akka.util.duration._
import akka.pattern.AskTimeoutException
import spark.{SparkException, Logging}
import akka.remote.RemoteClientLifeCycleEvent
import akka.remote.RemoteClientShutdown
import spark.deploy.RegisterJob
import akka.remote.RemoteClientDisconnected
import akka.actor.Terminated
import akka.dispatch.Await
/**
* The main class used to talk to a Spark deploy cluster. Takes a master URL, a job description,
* and a listener for job events, and calls back the listener when various events occur.
*/
private[spark] class Client(
actorSystem: ActorSystem,
masterUrl: String,
jobDescription: JobDescription,
listener: ClientListener)
extends Logging {
val MASTER_REGEX = "spark://([^:]+):([0-9]+)".r
var actor: ActorRef = null
var jobId: String = null
if (MASTER_REGEX.unapplySeq(masterUrl) == None) {
throw new SparkException("Invalid master URL: " + masterUrl)
}
class ClientActor extends Actor with Logging {
var master: ActorRef = null
var alreadyDisconnected = false // To avoid calling listener.disconnected() multiple times
override def preStart() {
val Seq(masterHost, masterPort) = MASTER_REGEX.unapplySeq(masterUrl).get
logInfo("Connecting to master spark://" + masterHost + ":" + masterPort)
val akkaUrl = "akka://spark@%s:%s/user/Master".format(masterHost, masterPort)
try {
master = context.actorFor(akkaUrl)
master ! RegisterJob(jobDescription)
context.system.eventStream.subscribe(self, classOf[RemoteClientLifeCycleEvent])
context.watch(master) // Doesn't work with remote actors, but useful for testing
} catch {
case e: Exception =>
logError("Failed to connect to master", e)
markDisconnected()
context.stop(self)
}
}
override def receive = {
case RegisteredJob(jobId_) =>
jobId = jobId_
listener.connected(jobId)
case ExecutorAdded(id: Int, workerId: String, host: String, cores: Int, memory: Int) =>
val fullId = jobId + "/" + id
logInfo("Executor added: %s on %s (%s) with %d cores".format(fullId, workerId, host, cores))
listener.executorAdded(fullId, workerId, host, cores, memory)
case ExecutorUpdated(id, state, message) =>
val fullId = jobId + "/" + id
val messageText = message.map(s => " (" + s + ")").getOrElse("")
logInfo("Executor updated: %s is now %s%s".format(fullId, state, messageText))
if (ExecutorState.isFinished(state)) {
listener.executorRemoved(fullId, message.getOrElse(""))
}
case Terminated(_) | RemoteClientDisconnected(_, _) | RemoteClientShutdown(_, _) =>
logError("Connection to master failed; stopping client")
markDisconnected()
context.stop(self)
case StopClient =>
markDisconnected()
sender ! true
context.stop(self)
}
/**
* Notify the listener that we disconnected, if we hadn't already done so before.
*/
def markDisconnected() {
if (!alreadyDisconnected) {
listener.disconnected()
alreadyDisconnected = true
}
}
}
def start() {
// Just launch an actor; it will call back into the listener.
actor = actorSystem.actorOf(Props(new ClientActor))
}
def stop() {
if (actor != null) {
try {
val timeout = 1.seconds
val future = actor.ask(StopClient)(timeout)
Await.result(future, timeout)
} catch {
case e: AskTimeoutException => // Ignore it, maybe master went away
}
actor = null
}
}
}
| joeywen/spark_cpp_api | core/src/main/scala/spark/deploy/client/Client.scala | Scala | bsd-3-clause | 3,730 |
package week4
abstract class Nat {
def isZero: Boolean
def predecessor: Nat
def successor = new Succ(this)
def + (that: Nat): Nat
def - (that: Nat): Nat
}
object Zero extends Nat {
def isZero = true
def predecessor = throw new Error("Zero.predecessor")
def + (that: Nat): Nat = that
def - (that: Nat): Nat = if (that.isZero) this else throw new Error("negative number")
}
class Succ(n: Nat) extends Nat {
def isZero = false
def predecessor = n
def + (that: Nat): Nat = new Succ(n + that)
def - (that: Nat): Nat = if (that.isZero) this else n - that.predecessor
} | keshavbashyal/playground-notes | functional-programming-principles-in-scala/week4/Nat.scala | Scala | mit | 592 |
package play.boilerplate.generators
import play.boilerplate.parser.model._
class ServiceCodeGenerator extends CodeGenerator {
import GeneratorUtils._
import treehugger.forest._
import definitions._
import treehuggerDSL._
def securityImports(schema: Schema)(implicit ctx: GeneratorContext): Seq[Import] = {
getSecurityProviderOfSchema(schema).flatMap(_.serviceImports)
}
def generateImports(schema: Schema)(implicit ctx: GeneratorContext): Seq[Tree] = {
Seq(
IMPORT(REF(ctx.settings.modelPackageName), "_"),
IMPORT(REF("scala.language"), "higherKinds")
) ++
tracesImports ++
securityImports(schema) ++
ctx.settings.codeProvidedPackages.filterNot(_.isEmpty).map(pkg => IMPORT(REF(pkg), "_"))
}
override def generate(schema: Schema)(implicit ctx: GeneratorContext): Iterable[CodeFile] = {
val serviceImports = BLOCK {
generateImports(schema)
} inPackage ctx.settings.servicePackageName
val methods = for {
path <- schema.paths
(_, operation) <- path.operations.toSeq.sortBy(_._1)
} yield generateMethod(path, operation)(ctx.addCurrentPath(operation.operationId).setInService(true))
if (methods.nonEmpty) {
val serviceTree = TRAITDEF(ctx.settings.serviceClassName).withTypeParams(F_TYPEVAR) :=
BLOCK {
IMPORT(REF(ctx.settings.serviceClassName), "_") +: filterNonEmptyTree(
methods.map(_.tree).toIndexedSeq
)
}
val companionTree = OBJECTDEF(ctx.settings.serviceClassName) := BLOCK {
(VAL(serviceNameValName) := LIT(ctx.settings.serviceName)).withDoc("Service name: " + ctx.settings.serviceName) +:
(generateResponseClasses(schema)(ctx.setInService(true)) ++ methods.flatMap(_.additionalDef))
}
SourceCodeFile(
packageName = ctx.settings.servicePackageName,
className = ctx.settings.serviceClassName,
header = treeToString(serviceImports),
impl = treeToString(serviceTree, EmptyTree, companionTree)
) :: Nil
} else {
Nil
}
}
def generateMethod(path: Path, operation: Operation)(implicit ctx: GeneratorContext): MethodDef = {
operationMethodDef(path, operation, F_OF_TYPE, canBeDeprecated = false)(_.empty)
}
def generateResponseClasses(schema: Schema)(implicit ctx: GeneratorContext): Seq[Tree] = {
val models = schema.definitions
val operationResults = for {
path <- schema.paths
(_, operation) <- path.operations.toSeq.sortBy(_._1)
} yield generateOperationResults(operation, models)(ctx.addCurrentPath(operation.operationId))
val traits = operationResults.map(_.traitName)
val UnexpectedResultDef = {
CASECLASSDEF(UnexpectedResultClassName)
.withParams(
PARAM("body", StringClass).empty,
PARAM("code", IntClass) := LIT(500),
PARAM("contentType", StringClass) := LIT(MIME_TYPE_TEXT)
)
.withParents(traits)
.withFlags(Flags.FINAL) := BLOCK(headersMethodDef.withFlags(Flags.OVERRIDE) := NIL)
}.withDoc(
Seq("Response for unexpected result of request."),
DocTag.Param("body", "Response body."),
DocTag.Param("code", "Response code (default: 500)."),
DocTag.Param("contentType", "Response Content-Type (default: text/plain).")
)
operationResults.flatMap(_.tree) ++ Seq(UnexpectedResultDef)
}
case class Responses(traitName: String, tree: Seq[Tree])
private def headersMethodDef: DefTreeStart = {
DEF("headers", TYPE_SEQ(TYPE_TUPLE(StringClass, StringClass)))
}
def generateOperationResults(operation: Operation, models: Map[String, Model])
(implicit ctx: GeneratorContext): Responses = {
val traitName = getOperationResponseTraitName(operation.operationId)
val sealedTrait = (TRAITDEF(traitName).withFlags(Flags.SEALED) := BLOCK {
headersMethodDef.empty
}).withDoc(
Seq(s"Response to operation '${operation.operationId}'.")
)
val hasOk = operation.responses.keys.exists {
case StatusResponse(code) if HttpStatus.codeIsOk(code) => true
case _ => false
}
val responses = for ((code, response) <- operation.responses.toSeq) yield {
val className = getResponseClassName(operation.operationId, code)
val bodyType = getResponseBodyType(response)
val fullParamsList = bodyType.map(body => ResponseParam(
headerName = None,
paramName = "body",
paramDef = PARAM("body", body.tpe).tree,
paramDoc = DocTag.Param("body", response.schema.flatMap(_.description).getOrElse("")),
isOptional = false
)).toSeq ++ {
code match {
case DefaultResponse =>
val defaultCode = if (hasOk) 500 else 200
Seq(ResponseParam(
headerName = None,
paramName = "code",
paramDef = PARAM("code", IntClass) := LIT(defaultCode),
paramDoc = DocTag.Param("code", s"Response code (default: $defaultCode)"),
isOptional = false
))
case _ =>
Nil
}
} ++ getResponseParameters(response)
val headersList = fullParamsList.collect { case ResponseParam(Some(headerName), paramName, _, _, isOptional) =>
val paramVal = if (isTraceIdHeaderName(headerName)) traceIdValRef(REF(paramName)) else REF(paramName)
PAIR(LIT(headerName), if (isOptional) paramVal else SOME(paramVal))
}
val headerMethodImpl = if (headersList.isEmpty) {
headersMethodDef.withFlags(Flags.OVERRIDE) := NIL
} else {
headersMethodDef.withFlags(Flags.OVERRIDE) := BLOCK(
LIST(headersList) INFIX "collect" APPLY BLOCK {
CASE(PAREN(ID("key"), SOME(ID("value")))) ==>
PAIR(REF("key"), REF("value"))
}
)
}
val classDef = if (fullParamsList.isEmpty) {
CASEOBJECTDEF(className).withParents(traitName)
} else {
CASECLASSDEF(className).withParams(fullParamsList.map(_.paramDef)).withParents(traitName).withFlags(Flags.FINAL)
}
val classTree = (classDef := BLOCK {
headerMethodImpl
}).withDoc(
Seq(response.description.getOrElse("")),
fullParamsList.map(_.paramDoc): _ *
)
bodyType.map(_.definitions).getOrElse(Nil) :+ classTree
}
Responses(traitName, sealedTrait +: responses.flatten.toIndexedSeq)
}
}
| Romastyi/sbt-play-boilerplate | sbt-plugin/lib/src/main/scala/play/boilerplate/generators/ServiceCodeGenerator.scala | Scala | apache-2.0 | 6,459 |
package application
import scala.swing.Frame
import java.awt.Dimension
import scala.swing.BoxPanel
import scala.swing.Orientation
import scala.swing.ListView
import scala.collection.parallel.mutable.ParArray
import scala.swing.Table
import scala.swing.Label
import scala.swing.ScrollPane
import scala.swing.TextArea
import scala.swing.GridPanel
import scala.swing.BorderPanel
class ResultPanel(searcha: String, searchb: String, searchc: String, url: String) {
def Open(): Unit = {
val data = new NewsGenerator(url, searcha, searchb, searchc)
val listdata = data.generate
val finalr = new ListView(listdata)
val resultPanel = new ScrollPane
val resultframe = new Frame
resultframe.title = "ForexSpider Search Results"
resultframe.preferredSize = new Dimension(1000, 800)
resultframe.contents = bord
resultPanel.contents = finalr
resultframe.open
println("Hello and Welcome to the Spider Search Analytics Page!")
println("In this window you will find a representation of the number of news bytes each site contains as a [balanced] BST.")
println("Below the [balanced] BST you will find a ListView of everything the spider found.")
println()
println()
println("The [balanced] BST where each number represents the number of news-bytes found on a linked site:")
data.PrettyPrinter
lazy val top = new ScrollPane {
val area = new TextArea(20, 40) with OutputCatcher { //Streams!
catchSystemOut
catchSystemErr
}
contents = new BorderPanel {
layout(new ScrollPane(area)) = BorderPanel.Position.Center
}
}
lazy val bord = new BorderPanel() {
//layout += starts -> BorderPanel.Position.East
layout += resultPanel -> BorderPanel.Position.Center
layout += top -> BorderPanel.Position.North
//layout += url -> BorderPanel.Position.South
}
val parserFactory = new org.ccil.cowan.tagsoup.jaxp.SAXFactoryImpl
val parser = parserFactory.newSAXParser()
val source = new org.xml.sax.InputSource("http://rates.fxcm.com/RatesXML")
val adapter = new scala.xml.parsing.NoBindingFactoryAdapter
val yy = adapter.loadXML(source, parser)
val k = (yy \\\\ "@symbol").map(_.text).toParArray
val price = (yy \\\\ "Bid").map(_.text).toParArray
}
}
| jacobluber/ScalaSpider | FinancialSpider/src/application/ResultWindow.scala | Scala | apache-2.0 | 2,314 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.annotation.{Evolving, Experimental}
import org.apache.spark.sql.streaming.Trigger
/**
* A [[Trigger]] that processes only one batch of data in a streaming query then terminates
* the query.
*/
@Experimental
@Evolving
case object OneTimeTrigger extends Trigger
| Aegeaner/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/Triggers.scala | Scala | apache-2.0 | 1,140 |
package marketdata
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.MILLISECONDS
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.actorRef2Scala
import marketdata.MarketDataPollCommandEnum.TickPollCommand
import marketdata.MarketDataPollCommandEnum.TradePollCommand
import marketdata.MarketDataTypes.MarketDataType
import marketdata.MarketDataTypes.TickDataType
import marketdata.MarketDataTypes.TradeDataType
class MarketDataFeedPublisherActor(val exchange: String, val instrument: String, val topics: Iterable[MarketDataType]) extends Actor with SubscriptionManager with ActorLogging {
implicit val sinceId: Long = 0
var lastTickId: Long = 0
var lastTradeTimestamp = System.currentTimeMillis() - 60 * 1000
val feedService = new FeedServiceMock(exchange, instrument) //Mock source for demos
implicit val ec = ExecutionContext.Implicits.global
override def receive: Receive = {
case TickPollCommand =>
{
val data: Option[Tick] = feedService.getLastTick(instrument)
data match {
case Some(tick) =>
subscribtions.get(TickDataType).get.foreach { listener => listener ! tick }; log.info("tick received " + tick)
case None => log.debug("No tick")
}
}
case TradePollCommand =>
{
val data: List[Trade] = feedService.getTrades(instrument, lastTradeTimestamp)
data match {
case Nil => log.debug("No new trades")
case trades: List[Trade] => {
log.info("trades " + trades.head.instrument + " " + trades.map(x => x.price.toString()).reduce((x, y) => x + "," + y))
log.info("subscribtions " + subscribtions)
subscribtions.get(TradeDataType).get.foreach { listener => data.foreach(trade => listener ! trade) };
}
}
}
case SubscribeListenerRequest(actor, subject, datatypes) => {
datatypes match {
case Some(tp) => { tp.filter(x => subject == instrument).foreach(topic => subscribe(actor, topic)); log.info("subscribe " + subject) }
case None => { MarketDataTypes.values.filter(x => subject == instrument).foreach(topic => subscribe(actor, topic)); log.info("subscribe all topics " + subject) }
case _ => log.info("Ignored subscribtion on instruments out of actor scope subject was " + subject)
}
}
case message: AnyRef => throw new UnsupportedOperationException("unsupported message =" + message)
}
override def preStart(): Unit = {
MarketDataTypes.values.foreach { topic =>
{
if (!subscribtions.keySet.contains(topic)) {
subscribtions = subscribtions.+((topic, Set[ActorRef]()))
}
}
}
//startup polling of all topics - schedule polling every 800 ms
val commands = topics.map { topic => MarketDataPollCommandEnum.withName(topic.toString()) }
log.info("schedule these topics " + commands)
commands.foreach(cmd => context.system.scheduler.schedule(FiniteDuration(0, MILLISECONDS), FiniteDuration(800, MILLISECONDS))(self ! cmd))
()
}
}
| mfahsi/btc-akka-demo | btc-trading-akka/src/main/scala/marketdata/MarketDataFeedPublisherActor.scala | Scala | mit | 3,191 |
package com.freshsoft.matterbridge.routing
import akka.http.scaladsl.server.Directives.{path, _}
import akka.http.scaladsl.server.Route
import com.freshsoft.matterbridge.service.database.CategoryService
import model.{BotOrCategoryUpload, DatabaseEntityJsonSupport}
import scala.concurrent.ExecutionContext
/**
* The nine gag specific static routes
*/
class CategoryRoute(service: CategoryService)(implicit executionContext: ExecutionContext)
extends DatabaseEntityJsonSupport {
val route: Route = pathPrefix("category") {
pathEndOrSingleSlash {
get {
complete(service.all)
}
} ~
path("count") {
get {
complete {
service.count map (_.toString)
}
}
} ~
path("add") {
post {
entity(as[BotOrCategoryUpload]) { entity =>
complete {
service.add(entity.name) map (_.toString)
}
}
}
} ~
path("exists" / Remaining) { search =>
get {
complete(service.exists(search) map (_.toString))
}
} ~
path(JavaUUID) { uuid =>
get {
complete(service.byId(uuid))
}
}
}
}
| Freshwood/matterbridge | src/main/scala/com/freshsoft/matterbridge/routing/CategoryRoute.scala | Scala | mit | 1,214 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.lang.reflect.{Method, Modifier}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess}
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* An expression that invokes a method on a class via reflection.
*
* For now, only types defined in `Reflect.typeMapping` are supported (basically primitives
* and string) as input types, and the output is turned automatically to a string.
*
* Note that unlike Hive's reflect function, this expression calls only static methods
* (i.e. does not support calling non-static methods).
*
* We should also look into how to consolidate this expression with
* [[org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke]] in the future.
*
* @param children the first element should be a literal string for the class name,
* and the second element should be a literal string for the method name,
* and the remaining are input arguments to the Java method.
*/
@ExpressionDescription(
usage = "_FUNC_(class, method[, arg1[, arg2 ..]]) - Calls a method with reflection.",
examples = """
Examples:
> SELECT _FUNC_('java.util.UUID', 'randomUUID');
c33fb387-8500-4bfa-81d2-6e0e3e930df2
> SELECT _FUNC_('java.util.UUID', 'fromString', 'a5cf6c42-0c85-418f-af6c-3e4e5b1328f2');
a5cf6c42-0c85-418f-af6c-3e4e5b1328f2
""")
case class CallMethodViaReflection(children: Seq[Expression])
extends Expression with CodegenFallback {
override def prettyName: String = "reflect"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.size < 2) {
TypeCheckFailure("requires at least two arguments")
} else if (!children.take(2).forall(e => e.dataType == StringType && e.foldable)) {
// The first two arguments must be string type.
TypeCheckFailure("first two arguments should be string literals")
} else if (!classExists) {
TypeCheckFailure(s"class $className not found")
} else if (children.slice(2, children.length)
.exists(e => !CallMethodViaReflection.typeMapping.contains(e.dataType))) {
TypeCheckFailure("arguments from the third require boolean, byte, short, " +
"integer, long, float, double or string expressions")
} else if (method == null) {
TypeCheckFailure(s"cannot find a static method that matches the argument types in $className")
} else {
TypeCheckSuccess
}
}
override lazy val deterministic: Boolean = false
override def nullable: Boolean = true
override val dataType: DataType = StringType
override def eval(input: InternalRow): Any = {
var i = 0
while (i < argExprs.length) {
buffer(i) = argExprs(i).eval(input).asInstanceOf[Object]
// Convert if necessary. Based on the types defined in typeMapping, string is the only
// type that needs conversion. If we support timestamps, dates, decimals, arrays, or maps
// in the future, proper conversion needs to happen here too.
if (buffer(i).isInstanceOf[UTF8String]) {
buffer(i) = buffer(i).toString
}
i += 1
}
val ret = method.invoke(null, buffer : _*)
UTF8String.fromString(String.valueOf(ret))
}
@transient private lazy val argExprs: Array[Expression] = children.drop(2).toArray
/** Name of the class -- this has to be called after we verify children has at least two exprs. */
@transient private lazy val className = children(0).eval().asInstanceOf[UTF8String].toString
/** True if the class exists and can be loaded. */
@transient private lazy val classExists = CallMethodViaReflection.classExists(className)
/** The reflection method. */
@transient lazy val method: Method = {
val methodName = children(1).eval(null).asInstanceOf[UTF8String].toString
CallMethodViaReflection.findMethod(className, methodName, argExprs.map(_.dataType)).orNull
}
/** A temporary buffer used to hold intermediate results returned by children. */
@transient private lazy val buffer = new Array[Object](argExprs.length)
}
object CallMethodViaReflection {
/** Mapping from Spark's type to acceptable JVM types. */
val typeMapping = Map[DataType, Seq[Class[_]]](
BooleanType -> Seq(classOf[java.lang.Boolean], classOf[Boolean]),
ByteType -> Seq(classOf[java.lang.Byte], classOf[Byte]),
ShortType -> Seq(classOf[java.lang.Short], classOf[Short]),
IntegerType -> Seq(classOf[java.lang.Integer], classOf[Int]),
LongType -> Seq(classOf[java.lang.Long], classOf[Long]),
FloatType -> Seq(classOf[java.lang.Float], classOf[Float]),
DoubleType -> Seq(classOf[java.lang.Double], classOf[Double]),
StringType -> Seq(classOf[String])
)
/**
* Returns true if the class can be found and loaded.
*/
private def classExists(className: String): Boolean = {
try {
Utils.classForName(className)
true
} catch {
case e: ClassNotFoundException => false
}
}
/**
* Finds a Java static method using reflection that matches the given argument types,
* and whose return type is string.
*
* The types sequence must be the valid types defined in [[typeMapping]].
*
* This is made public for unit testing.
*/
def findMethod(className: String, methodName: String, argTypes: Seq[DataType]): Option[Method] = {
val clazz: Class[_] = Utils.classForName(className)
clazz.getMethods.find { method =>
val candidateTypes = method.getParameterTypes
if (method.getName != methodName) {
// Name must match
false
} else if (!Modifier.isStatic(method.getModifiers)) {
// Method must be static
false
} else if (candidateTypes.length != argTypes.length) {
// Argument length must match
false
} else {
// Argument type must match. That is, either the method's argument type matches one of the
// acceptable types defined in typeMapping, or it is a super type of the acceptable types.
candidateTypes.zip(argTypes).forall { case (candidateType, argType) =>
typeMapping(argType).exists(candidateType.isAssignableFrom)
}
}
}
}
}
| bravo-zhang/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/CallMethodViaReflection.scala | Scala | apache-2.0 | 7,265 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.backend.jvm
import java.io.{DataOutputStream, IOException}
import java.nio.ByteBuffer
import java.nio.channels.{ClosedByInterruptException, FileChannel}
import java.nio.charset.StandardCharsets
import java.nio.file._
import java.nio.file.attribute.FileAttribute
import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.zip.{CRC32, Deflater, ZipEntry, ZipOutputStream}
import scala.reflect.internal.util.NoPosition
import scala.reflect.io.PlainNioFile
import scala.tools.nsc.Global
import scala.tools.nsc.backend.jvm.BTypes.InternalName
import scala.tools.nsc.io.AbstractFile
import scala.tools.nsc.plugins.{OutputFileWriter, Plugin}
import scala.tools.nsc.util.JarFactory
abstract class ClassfileWriters {
val postProcessor: PostProcessor
import postProcessor.bTypes.frontendAccess
/**
* The interface to writing classfiles. GeneratedClassHandler calls these methods to generate the
* directory and files that are created, and eventually calls `close` when the writing is complete.
*
* The companion object is responsible for constructing a appropriate and optimal implementation for
* the supplied settings.
*
* Operations are threadsafe.
*/
sealed trait ClassfileWriter extends OutputFileWriter with AutoCloseable {
/**
* Write a classfile
*/
def writeClass(name: InternalName, bytes: Array[Byte], sourceFile: AbstractFile): Unit
/**
* Close the writer. Behavior is undefined after a call to `close`.
*/
def close(): Unit
protected def classRelativePath(className: InternalName, suffix: String = ".class"): String =
className.replace('.', '/') + suffix
}
object ClassfileWriter {
private def getDirectory(dir: String): Path = Paths.get(dir)
def apply(global: Global): ClassfileWriter = {
//Note dont import global._ - its too easy to leak non threadsafe structures
import global.{ cleanup, log, settings }
def jarManifestMainClass: Option[String] = settings.mainClass.valueSetByUser.orElse {
cleanup.getEntryPoints match {
case List(name) => Some(name)
case es =>
if (es.isEmpty) log("No Main-Class designated or discovered.")
else log(s"No Main-Class due to multiple entry points:\n ${es.mkString("\n ")}")
None
}
}
val basicClassWriter = settings.outputDirs.getSingleOutput match {
case Some(dest) => new SingleClassWriter(FileWriter(global, dest, jarManifestMainClass))
case None =>
val distinctOutputs: Set[AbstractFile] = settings.outputDirs.outputs.iterator.map(_._2).toSet
if (distinctOutputs.size == 1) new SingleClassWriter(FileWriter(global, distinctOutputs.head, jarManifestMainClass))
else {
val sourceToOutput: Map[AbstractFile, AbstractFile] = global.currentRun.units.map(unit => (unit.source.file, frontendAccess.compilerSettings.outputDirectory(unit.source.file))).toMap
new MultiClassWriter(sourceToOutput, distinctOutputs.iterator.map { output: AbstractFile => output -> FileWriter(global, output, jarManifestMainClass) }.toMap)
}
}
val withAdditionalFormats = if (settings.Ygenasmp.valueSetByUser.isEmpty && settings.Ydumpclasses.valueSetByUser.isEmpty) basicClassWriter else {
val asmp = settings.Ygenasmp.valueSetByUser map { dir: String => FileWriter(global, new PlainNioFile(getDirectory(dir)), None) }
val dump = settings.Ydumpclasses.valueSetByUser map { dir: String => FileWriter(global, new PlainNioFile(getDirectory(dir)), None) }
new DebugClassWriter(basicClassWriter, asmp, dump)
}
val enableStats = settings.areStatisticsEnabled && settings.YaddBackendThreads.value == 1
if (enableStats) new WithStatsWriter(withAdditionalFormats) else withAdditionalFormats
}
/** Writes to the output directory corresponding to the source file, if multiple output directories are specified */
private final class MultiClassWriter(sourceToOutput: Map[AbstractFile, AbstractFile], underlying: Map[AbstractFile, FileWriter]) extends ClassfileWriter {
private def getUnderlying(sourceFile: AbstractFile, outputDir: AbstractFile) = underlying.getOrElse(outputDir, {
throw new Exception(s"Cannot determine output directory for ${sourceFile} with output ${outputDir}. Configured outputs are ${underlying.keySet}")
})
private def getUnderlying(outputDir: AbstractFile) = underlying.getOrElse(outputDir, {
throw new Exception(s"Cannot determine output for ${outputDir}. Configured outputs are ${underlying.keySet}")
})
override def writeClass(className: InternalName, bytes: Array[Byte], sourceFile: AbstractFile): Unit = {
getUnderlying(sourceFile, sourceToOutput(sourceFile)).writeFile(classRelativePath(className), bytes)
}
override def writeFile(relativePath: String, data: Array[Byte], outputDir: AbstractFile): Unit = {
getUnderlying(outputDir).writeFile(relativePath, data)
}
override def close(): Unit = underlying.values.foreach(_.close())
}
private final class SingleClassWriter(underlying: FileWriter) extends ClassfileWriter {
override def writeClass(className: InternalName, bytes: Array[Byte], sourceFile: AbstractFile): Unit = {
underlying.writeFile(classRelativePath(className), bytes)
}
override def writeFile(relativePath: String, data: Array[Byte], outputDir: AbstractFile): Unit = {
underlying.writeFile(relativePath, data)
}
override def close(): Unit = underlying.close()
}
private final class DebugClassWriter(basic: ClassfileWriter, asmp: Option[FileWriter], dump: Option[FileWriter]) extends ClassfileWriter {
override def writeClass(className: InternalName, bytes: Array[Byte], sourceFile: AbstractFile): Unit = {
basic.writeClass(className, bytes, sourceFile)
asmp.foreach { writer =>
val asmBytes = AsmUtils.textify(AsmUtils.readClass(bytes)).getBytes(StandardCharsets.UTF_8)
writer.writeFile(classRelativePath(className, ".asm"), asmBytes)
}
dump.foreach { writer =>
writer.writeFile(classRelativePath(className), bytes)
}
}
override def writeFile(relativePath: String, data: Array[Byte], outputDir: AbstractFile): Unit = {
basic.writeFile(relativePath, data, outputDir)
}
override def close(): Unit = {
basic.close()
asmp.foreach(_.close())
dump.foreach(_.close())
}
}
private final class WithStatsWriter(underlying: ClassfileWriter) extends ClassfileWriter {
override def writeClass(className: InternalName, bytes: Array[Byte], sourceFile: AbstractFile): Unit = {
val statistics = frontendAccess.unsafeStatistics
val snap = statistics.startTimer(statistics.bcodeWriteTimer)
try underlying.writeClass(className, bytes, sourceFile)
finally statistics.stopTimer(statistics.bcodeWriteTimer, snap)
}
override def writeFile(relativePath: String, data: Array[Byte], outputDir: AbstractFile): Unit = {
underlying.writeFile(relativePath, data, outputDir)
}
override def close(): Unit = underlying.close()
}
}
sealed trait FileWriter {
def writeFile(relativePath: String, bytes: Array[Byte]): Unit
def close(): Unit
}
object FileWriter {
def apply(global: Global, file: AbstractFile, jarManifestMainClass: Option[String]): FileWriter =
if (file.hasExtension("jar")) {
val jarCompressionLevel = global.settings.YjarCompressionLevel.value
val jarFactory =
Class.forName(global.settings.YjarFactory.value)
.asSubclass(classOf[JarFactory])
.getDeclaredConstructor().newInstance()
new JarEntryWriter(file, jarManifestMainClass, jarCompressionLevel, jarFactory, global.plugins)
}
else if (file.isVirtual) new VirtualFileWriter(file)
else if (file.isDirectory) new DirEntryWriter(file.file.toPath)
else throw new IllegalStateException(s"don't know how to handle an output of $file [${file.getClass}]")
}
private final class JarEntryWriter(file: AbstractFile, mainClass: Option[String], compressionLevel: Int, jarFactory: JarFactory, plugins: List[Plugin]) extends FileWriter {
//keep these imports local - avoid confusion with scala naming
import java.util.jar.Attributes.Name.{MANIFEST_VERSION, MAIN_CLASS}
import java.util.jar.{JarOutputStream, Manifest}
val storeOnly = compressionLevel == Deflater.NO_COMPRESSION
val jarWriter: JarOutputStream = {
import scala.util.Properties._
val manifest = new Manifest
val attrs = manifest.getMainAttributes
attrs.put(MANIFEST_VERSION, "1.0")
attrs.put(ScalaCompilerVersion, versionNumberString)
mainClass.foreach(c => attrs.put(MAIN_CLASS, c))
plugins.foreach(_.augmentManifest(file, manifest))
val jar = jarFactory.createJarOutputStream(file, manifest)
jar.setLevel(compressionLevel)
if (storeOnly) jar.setMethod(ZipOutputStream.STORED)
jar
}
lazy val crc = new CRC32
override def writeFile(relativePath: String, bytes: Array[Byte]): Unit = this.synchronized {
val entry = new ZipEntry(relativePath)
if (storeOnly) {
// When using compression method `STORED`, the ZIP spec requires the CRC and compressed/
// uncompressed sizes to be written before the data. The JarOutputStream could compute the
// values while writing the data, but not patch them into the stream after the fact. So we
// need to pre-compute them here. The compressed size is taken from size.
// https://stackoverflow.com/questions/1206970/how-to-create-uncompressed-zip-archive-in-java/5868403
// With compression method `DEFLATED` JarOutputStream computes and sets the values.
entry.setSize(bytes.length)
crc.reset()
crc.update(bytes)
entry.setCrc(crc.getValue)
}
jarWriter.putNextEntry(entry)
try jarWriter.write(bytes, 0, bytes.length)
finally jarWriter.flush()
}
override def close(): Unit = this.synchronized(jarWriter.close())
}
private final class DirEntryWriter(base: Path) extends FileWriter {
val builtPaths = new ConcurrentHashMap[Path, java.lang.Boolean]()
val noAttributes = Array.empty[FileAttribute[_]]
private val isWindows = scala.util.Properties.isWin
def ensureDirForPath(baseDir: Path, filePath: Path): Unit = {
import java.lang.Boolean.TRUE
val parent = filePath.getParent
if (!builtPaths.containsKey(parent)) {
try Files.createDirectories(parent, noAttributes: _*)
catch {
case e: FileAlreadyExistsException =>
// `createDirectories` reports this exception if `parent` is an existing symlink to a directory
// but that's fine for us (and common enough, `scalac -d /tmp` on mac targets symlink).
if (!Files.isDirectory(parent))
throw new FileConflictException(s"Can't create directory $parent; there is an existing (non-directory) file in its path", e)
}
builtPaths.put(baseDir, TRUE)
var current = parent
while ((current ne null) && (null ne builtPaths.put(current, TRUE))) {
current = current.getParent
}
}
}
// the common case is that we are are creating a new file, and on MS Windows the create and truncate is expensive
// because there is not an options in the windows API that corresponds to this so the truncate is applied as a separate call
// even if the file is new.
// as this is rare, its best to always try to create a new file, and it that fails, then open with truncate if that fails
private val fastOpenOptions = util.EnumSet.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)
private val fallbackOpenOptions = util.EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)
override def writeFile(relativePath: String, bytes: Array[Byte]): Unit = {
val path = base.resolve(relativePath)
try {
ensureDirForPath(base, path)
val os = if (isWindows) {
try FileChannel.open(path, fastOpenOptions)
catch {
case _: FileAlreadyExistsException => FileChannel.open(path, fallbackOpenOptions)
}
} else FileChannel.open(path, fallbackOpenOptions)
try {
os.write(ByteBuffer.wrap(bytes), 0L)
} catch {
case ex: ClosedByInterruptException =>
try {
Files.deleteIfExists(path) // don't leave a empty of half-written classfile around after an interrupt
} catch {
case _: Throwable =>
}
throw ex
}
os.close()
} catch {
case e: FileConflictException =>
frontendAccess.backendReporting.error(NoPosition, s"error writing $path: ${e.getMessage}")
case e: java.nio.file.FileSystemException =>
if (frontendAccess.compilerSettings.debug)
e.printStackTrace()
frontendAccess.backendReporting.error(NoPosition, s"error writing $path: ${e.getClass.getName} ${e.getMessage}")
}
}
override def close(): Unit = ()
}
private final class VirtualFileWriter(base: AbstractFile) extends FileWriter {
private def getFile(base: AbstractFile, path: String): AbstractFile = {
def ensureDirectory(dir: AbstractFile): AbstractFile =
if (dir.isDirectory) dir
else throw new FileConflictException(s"${base.path}/${path}: ${dir.path} is not a directory")
val components = path.split('/')
var dir = base
for (i <- 0 until components.length - 1) dir = ensureDirectory(dir) subdirectoryNamed components(i).toString
ensureDirectory(dir) fileNamed components.last.toString
}
private def writeBytes(outFile: AbstractFile, bytes: Array[Byte]): Unit = {
val out = new DataOutputStream(outFile.bufferedOutput)
try out.write(bytes, 0, bytes.length)
finally out.close()
}
override def writeFile(relativePath: String, bytes: Array[Byte]): Unit = {
val outFile = getFile(base, relativePath)
writeBytes(outFile, bytes)
}
override def close(): Unit = ()
}
/** Can't output a file due to the state of the file system. */
class FileConflictException(msg: String, cause: Throwable = null) extends IOException(msg, cause)
}
| lrytz/scala | src/compiler/scala/tools/nsc/backend/jvm/ClassfileWriters.scala | Scala | apache-2.0 | 14,907 |
package models
import util.Properties
import play.api.libs.json._
case class Train(
guid: String, title: String, timestamp: String,
locLat: Double, locLon: Double,
distance: Int, speed: Int, heading: String,
first: String, previous: String, next: String, last: String,
historySize: Int, historyLengthInM: Int,
numJammed: Int, numSamples: Int
) {
override def toString =
"------------------------------" + Properties.lineSeparator +
"Train: " + title + " (" + guid + ")" + Properties.lineSeparator +
"Timestamp: " + timestamp + Properties.lineSeparator +
"Distance: " + distance + " m" + Properties.lineSeparator +
"Location: " + locLat + "," + locLon + Properties.lineSeparator +
"Speed: " + speed + " km/h" + Properties.lineSeparator +
"Heading: " + heading + Properties.lineSeparator +
"Route: " + first + " -> " + last + Properties.lineSeparator +
"Stage: " + previous + " -> " + next + Properties.lineSeparator +
"History size: " + historySize + " (" + historyLengthInM + " m)" + Properties.lineSeparator +
"Jammed ratio: " + numJammed + "/" + numSamples + Properties.lineSeparator +
"------------------------------" + Properties.lineSeparator
}
object Train {
// Maps into JSon
implicit val projectWrites = new Writes[Train] {
def writes(train: Train): JsValue = {
Json.obj(
"guid" -> train.guid,
"title" -> train.title,
"timestamp" -> train.timestamp,
"locLat" -> train.locLat,
"locLon" -> train.locLon,
"distance" -> train.distance,
"speed" -> train.speed,
"heading" -> train.heading,
"first" -> train.first,
"previous" -> train.previous,
"next" -> train.next,
"last" -> train.last,
"historySize" -> train.historySize,
"historyLength" -> train.historyLengthInM,
"numJammed" -> train.numJammed,
"numSamples" -> train.numSamples
)
}
}
} | roikonen/MissaJuna | app/models/Train.scala | Scala | apache-2.0 | 2,042 |
package com.twitter.server
import com.twitter.app.App
/**
* Create a new hook for the given App. NewHooks are service-loaded.
*
* To use, extend the [[NewHook]] trait and implement an apply method which
* returns a [[Hook]] implementation, e.g.,
*
* Add the Hook as a service-loaded class in
* /META-INF/services/com.twitter.server.NewHook
*
* {{{
* class MyHook extends NewHook {
* def apply(app: App) = new Hook {
*
* override def premain(): Unit = ???
*
* override def onExit(): Unit = ???
* }
* }
* }}}
*
* @see [[com.twitter.server.Hook]]
* @see [[com.twitter.finagle.util.LoadService]]
*/
trait NewHook extends (App => Hook)
| twitter/twitter-server | server/src/main/scala/com/twitter/server/NewHook.scala | Scala | apache-2.0 | 679 |
package monocle.function
import monocle.std._
import monocle.syntax._
import org.specs2.scalaz.Spec
import scalaz.{==>>, IMap}
class EmptyExample extends Spec {
"empty is a Prism that is successful only when S is empty" in {
(List(1, 2, 3) applyPrism empty getOption) ==== None
(List.empty[Int] applyPrism empty getOption) ==== Some(())
(Vector.empty[Int] applyPrism empty getOption) ==== Some(())
("" applyPrism empty getOption) ==== Some(())
}
"_empty return the empty value of a given type" in {
_empty[List[Int]] ==== List.empty[Int]
_empty[Map[Int, String]] ==== Map.empty[Int, String]
_empty[Int ==>> String] ==== IMap.empty[Int, String]
_empty[String] ==== ""
}
"_isEmpty is a function that takes an S and return true is S is empty, false otherwise" in {
_isEmpty(List(1,2,3)) ==== false
_isEmpty("hello") ==== false
_isEmpty(Nil) ==== true
_isEmpty(None) ==== true
_isEmpty("") ==== true
}
}
| CapeSepias/Monocle | example/src/test/scala/monocle/function/EmptyExample.scala | Scala | mit | 1,017 |
package org.plummtw.jinrou.enum
import org.plummtw.jinrou.data._
object VoteFlagEnum extends Enumeration {
type VoteFlagEnum = Value
val AUTO = Value("A")
val BLESSED = Value("B")
val SHOUTED = Value("S")
val CURSED = Value("C")
val BFEATHERED = Value("E")
val INVALID = Value("I")
val FAKE = Value("F")
val POWER = Value("P")
val VICTIM = Value("V")
val DOMINATE = Value("D")
val COMMAND = Value("J")
val VORTEX = Value("R")
val SEAR = Value("Z")
val FALLEN = Value("X")
val ITEM = Value("I")
val COLORSPRAY = Value("T")
} | Plummtw/jinrou_Lift | src/main/scala/org/plummtw/jinrou/enum/VoteFlagEnum.scala | Scala | apache-2.0 | 681 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.JavaConverters._
import org.apache.spark.annotation.Stable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types._
private[sql] object Column {
def apply(colName: String): Column = new Column(colName)
def apply(expr: Expression): Column = new Column(expr)
def unapply(col: Column): Option[Expression] = Some(col.expr)
private[sql] def generateAlias(e: Expression): String = {
e match {
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
a.aggregateFunction.toString
case expr => toPrettySQL(expr)
}
}
private[sql] def stripColumnReferenceMetadata(a: AttributeReference): AttributeReference = {
val metadataWithoutId = new MetadataBuilder()
.withMetadata(a.metadata)
.remove(Dataset.DATASET_ID_KEY)
.remove(Dataset.COL_POS_KEY)
.build()
a.withMetadata(metadataWithoutId)
}
}
/**
* A [[Column]] where an [[Encoder]] has been given for the expected input and return type.
* To create a [[TypedColumn]], use the `as` function on a [[Column]].
*
* @tparam T The input type expected for this expression. Can be `Any` if the expression is type
* checked by the analyzer instead of the compiler (i.e. `expr("sum(...)")`).
* @tparam U The output type of this column.
*
* @since 1.6.0
*/
@Stable
class TypedColumn[-T, U](
expr: Expression,
private[sql] val encoder: ExpressionEncoder[U])
extends Column(expr) {
/**
* Inserts the specific input type and schema into any expressions that are expected to operate
* on a decoded object.
*/
private[sql] def withInputType(
inputEncoder: ExpressionEncoder[_],
inputAttributes: Seq[Attribute]): TypedColumn[T, U] = {
val unresolvedDeserializer = UnresolvedDeserializer(inputEncoder.deserializer, inputAttributes)
// This only inserts inputs into typed aggregate expressions. For untyped aggregate expressions,
// the resolving is handled in the analyzer directly.
val newExpr = expr transform {
case ta: TypedAggregateExpression if ta.inputDeserializer.isEmpty =>
ta.withInputInfo(
deser = unresolvedDeserializer,
cls = inputEncoder.clsTag.runtimeClass,
schema = inputEncoder.schema)
}
new TypedColumn[T, U](newExpr, encoder)
}
/**
* Gives the [[TypedColumn]] a name (alias).
* If the current `TypedColumn` has metadata associated with it, this metadata will be propagated
* to the new column.
*
* @group expr_ops
* @since 2.0.0
*/
override def name(alias: String): TypedColumn[T, U] =
new TypedColumn[T, U](super.name(alias).expr, encoder)
}
/**
* A column that will be computed based on the data in a `DataFrame`.
*
* A new column can be constructed based on the input columns present in a DataFrame:
*
* {{{
* df("columnName") // On a specific `df` DataFrame.
* col("columnName") // A generic column not yet associated with a DataFrame.
* col("columnName.field") // Extracting a struct field
* col("`a.column.with.dots`") // Escape `.` in column names.
* $"columnName" // Scala short hand for a named column.
* }}}
*
* [[Column]] objects can be composed to form complex expressions:
*
* {{{
* $"a" + 1
* $"a" === $"b"
* }}}
*
* @note The internal Catalyst expression can be accessed via [[expr]], but this method is for
* debugging purposes only and can change in any future Spark releases.
*
* @groupname java_expr_ops Java-specific expression operators
* @groupname expr_ops Expression operators
* @groupname df_ops DataFrame functions
* @groupname Ungrouped Support functions for DataFrames
*
* @since 1.3.0
*/
@Stable
class Column(val expr: Expression) extends Logging {
def this(name: String) = this(name match {
case "*" => UnresolvedStar(None)
case _ if name.endsWith(".*") =>
val parts = UnresolvedAttribute.parseAttributeName(name.substring(0, name.length - 2))
UnresolvedStar(Some(parts))
case _ => UnresolvedAttribute.quotedString(name)
})
override def toString: String = toPrettySQL(expr)
override def equals(that: Any): Boolean = that match {
case that: Column => that.normalizedExpr() == this.normalizedExpr()
case _ => false
}
override def hashCode: Int = this.normalizedExpr().hashCode()
private def normalizedExpr(): Expression = expr transform {
case a: AttributeReference => Column.stripColumnReferenceMetadata(a)
}
/** Creates a column based on the given expression. */
private def withExpr(newExpr: Expression): Column = new Column(newExpr)
/**
* Returns the expression for this column either with an existing or auto assigned name.
*/
private[sql] def named: NamedExpression = expr match {
// Wrap UnresolvedAttribute with UnresolvedAlias, as when we resolve UnresolvedAttribute, we
// will remove intermediate Alias for ExtractValue chain, and we need to alias it again to
// make it a NamedExpression.
case u: UnresolvedAttribute => UnresolvedAlias(u)
case u: UnresolvedExtractValue => UnresolvedAlias(u)
case expr: NamedExpression => expr
// Leave an unaliased generator with an empty list of names since the analyzer will generate
// the correct defaults after the nested expression's type has been resolved.
case g: Generator => MultiAlias(g, Nil)
case func: UnresolvedFunction => UnresolvedAlias(func, Some(Column.generateAlias))
// If we have a top level Cast, there is a chance to give it a better alias, if there is a
// NamedExpression under this Cast.
case c: Cast =>
c.transformUp {
case c @ Cast(_: NamedExpression, _, _) => UnresolvedAlias(c)
} match {
case ne: NamedExpression => ne
case _ => Alias(expr, toPrettySQL(expr))()
}
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
UnresolvedAlias(a, Some(Column.generateAlias))
// Wait until the struct is resolved. This will generate a nicer looking alias.
case struct: CreateNamedStruct => UnresolvedAlias(struct)
case expr: Expression => Alias(expr, toPrettySQL(expr))()
}
/**
* Provides a type hint about the expected return value of this column. This information can
* be used by operations such as `select` on a [[Dataset]] to automatically convert the
* results into the correct JVM types.
* @since 1.6.0
*/
def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
* The following types of extraction are supported:
* <ul>
* <li>Given an Array, an integer ordinal can be used to retrieve a single value.</li>
* <li>Given a Map, a key of the correct type can be used to retrieve an individual value.</li>
* <li>Given a Struct, a string fieldName can be used to extract that field.</li>
* <li>Given an Array of Structs, a string fieldName can be used to extract filed
* of every struct in that array, and return an Array of fields.</li>
* </ul>
* @group expr_ops
* @since 1.4.0
*/
def apply(extraction: Any): Column = withExpr {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
/**
* Unary minus, i.e. negate the expression.
* {{{
* // Scala: select the amount column and negates all values.
* df.select( -df("amount") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.select( negate(col("amount") );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_- : Column = withExpr { UnaryMinus(expr) }
/**
* Inversion of boolean expression, i.e. NOT.
* {{{
* // Scala: select rows that are not active (isActive === false)
* df.filter( !df("isActive") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( not(df.col("isActive")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def === (other: Any): Column = withExpr {
val right = lit(other).expr
if (this.expr == right) {
logWarning(
s"Constructing trivially true equals predicate, '${this.expr} = $right'. " +
"Perhaps you need to use aliases.")
}
EqualTo(expr, right)
}
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def equalTo(other: Any): Column = this === other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") =!= df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def =!= (other: Any): Column = withExpr{ Not(EqualTo(expr, lit(other).expr)) }
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
@deprecated("!== does not have the same precedence as ===, use =!= instead", "2.0.0")
def !== (other: Any): Column = this =!= other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def notEqual(other: Any): Column = withExpr { Not(EqualTo(expr, lit(other).expr)) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > 21 )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people.col("age").gt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def > (other: Any): Column = withExpr { GreaterThan(expr, lit(other).expr) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > lit(21) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people.col("age").gt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def gt(other: Any): Column = this > other
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people.col("age").lt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def < (other: Any): Column = withExpr { LessThan(expr, lit(other).expr) }
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people.col("age").lt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def lt(other: Any): Column = this < other
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people.col("age").leq(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def <= (other: Any): Column = withExpr { LessThanOrEqual(expr, lit(other).expr) }
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people.col("age").leq(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def leq(other: Any): Column = this <= other
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people.col("age").geq(21) )
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def >= (other: Any): Column = withExpr { GreaterThanOrEqual(expr, lit(other).expr) }
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people.col("age").geq(21) )
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def geq(other: Any): Column = this >= other
/**
* Equality test that is safe for null values.
*
* @group expr_ops
* @since 1.3.0
*/
def <=> (other: Any): Column = withExpr {
val right = lit(other).expr
if (this.expr == right) {
logWarning(
s"Constructing trivially true equals predicate, '${this.expr} <=> $right'. " +
"Perhaps you need to use aliases.")
}
EqualNullSafe(expr, right)
}
/**
* Equality test that is safe for null values.
*
* @group java_expr_ops
* @since 1.3.0
*/
def eqNullSafe(other: Any): Column = this <=> other
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def when(condition: Column, value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches :+ ((condition.expr, lit(value).expr))) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"when() cannot be applied once otherwise() is applied")
case _ =>
throw new IllegalArgumentException(
"when() can only be applied on a Column previously generated by when() function")
}
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def otherwise(value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches, Option(lit(value).expr)) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"otherwise() can only be applied once on a Column previously generated by when()")
case _ =>
throw new IllegalArgumentException(
"otherwise() can only be applied on a Column previously generated by when()")
}
/**
* True if the current column is between the lower bound and upper bound, inclusive.
*
* @group java_expr_ops
* @since 1.4.0
*/
def between(lowerBound: Any, upperBound: Any): Column = {
(this >= lowerBound) && (this <= upperBound)
}
/**
* True if the current expression is NaN.
*
* @group expr_ops
* @since 1.5.0
*/
def isNaN: Column = withExpr { IsNaN(expr) }
/**
* True if the current expression is null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNull: Column = withExpr { IsNull(expr) }
/**
* True if the current expression is NOT null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNotNull: Column = withExpr { IsNotNull(expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people.col("inSchool").or(people.col("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def || (other: Any): Column = withExpr { Or(expr, lit(other).expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people.col("inSchool").or(people.col("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def or(other: Column): Column = this || other
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people.col("inSchool").and(people.col("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def && (other: Any): Column = withExpr { And(expr, lit(other).expr) }
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people.col("inSchool").and(people.col("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def and(other: Column): Column = this && other
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people.col("height").plus(people.col("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def + (other: Any): Column = withExpr { Add(expr, lit(other).expr) }
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people.col("height").plus(people.col("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def plus(other: Any): Column = this + other
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people.col("height").minus(people.col("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def - (other: Any): Column = withExpr { Subtract(expr, lit(other).expr) }
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people.col("height").minus(people.col("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def minus(other: Any): Column = this - other
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people.col("height").multiply(people.col("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def * (other: Any): Column = withExpr { Multiply(expr, lit(other).expr) }
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people.col("height").multiply(people.col("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def multiply(other: Any): Column = this * other
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people.col("height").divide(people.col("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def / (other: Any): Column = withExpr { Divide(expr, lit(other).expr) }
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people.col("height").divide(people.col("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def divide(other: Any): Column = this / other
/**
* Modulo (a.k.a. remainder) expression.
*
* @group expr_ops
* @since 1.3.0
*/
def % (other: Any): Column = withExpr { Remainder(expr, lit(other).expr) }
/**
* Modulo (a.k.a. remainder) expression.
*
* @group java_expr_ops
* @since 1.3.0
*/
def mod(other: Any): Column = this % other
/**
* A boolean expression that is evaluated to true if the value of this expression is contained
* by the evaluated values of the arguments.
*
* Note: Since the type of the elements in the list are inferred only during the run time,
* the elements will be "up-casted" to the most common type for comparison.
* For eg:
* 1) In the case of "Int vs String", the "Int" will be up-casted to "String" and the
* comparison will look like "String vs String".
* 2) In the case of "Float vs Double", the "Float" will be up-casted to "Double" and the
* comparison will look like "Double vs Double"
*
* @group expr_ops
* @since 1.5.0
*/
@scala.annotation.varargs
def isin(list: Any*): Column = withExpr { In(expr, list.map(lit(_).expr)) }
/**
* A boolean expression that is evaluated to true if the value of this expression is contained
* by the provided collection.
*
* Note: Since the type of the elements in the collection are inferred only during the run time,
* the elements will be "up-casted" to the most common type for comparison.
* For eg:
* 1) In the case of "Int vs String", the "Int" will be up-casted to "String" and the
* comparison will look like "String vs String".
* 2) In the case of "Float vs Double", the "Float" will be up-casted to "Double" and the
* comparison will look like "Double vs Double"
*
* @group expr_ops
* @since 2.4.0
*/
def isInCollection(values: scala.collection.Iterable[_]): Column = isin(values.toSeq: _*)
/**
* A boolean expression that is evaluated to true if the value of this expression is contained
* by the provided collection.
*
* Note: Since the type of the elements in the collection are inferred only during the run time,
* the elements will be "up-casted" to the most common type for comparison.
* For eg:
* 1) In the case of "Int vs String", the "Int" will be up-casted to "String" and the
* comparison will look like "String vs String".
* 2) In the case of "Float vs Double", the "Float" will be up-casted to "Double" and the
* comparison will look like "Double vs Double"
*
* @group java_expr_ops
* @since 2.4.0
*/
def isInCollection(values: java.lang.Iterable[_]): Column = isInCollection(values.asScala)
/**
* SQL like expression. Returns a boolean column based on a SQL LIKE match.
*
* @group expr_ops
* @since 1.3.0
*/
def like(literal: String): Column = withExpr { new Like(expr, lit(literal).expr) }
/**
* SQL RLIKE expression (LIKE with Regex). Returns a boolean column based on a regex
* match.
*
* @group expr_ops
* @since 1.3.0
*/
def rlike(literal: String): Column = withExpr { RLike(expr, lit(literal).expr) }
/**
* An expression that gets an item at position `ordinal` out of an array,
* or gets a value by key `key` in a `MapType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getItem(key: Any): Column = withExpr { UnresolvedExtractValue(expr, Literal(key)) }
// scalastyle:off line.size.limit
/**
* An expression that adds/replaces field in `StructType` by name.
*
* {{{
* val df = sql("SELECT named_struct('a', 1, 'b', 2) struct_col")
* df.select($"struct_col".withField("c", lit(3)))
* // result: {"a":1,"b":2,"c":3}
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2) struct_col")
* df.select($"struct_col".withField("b", lit(3)))
* // result: {"a":1,"b":3}
*
* val df = sql("SELECT CAST(NULL AS struct<a:int,b:int>) struct_col")
* df.select($"struct_col".withField("c", lit(3)))
* // result: null of type struct<a:int,b:int,c:int>
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2, 'b', 3) struct_col")
* df.select($"struct_col".withField("b", lit(100)))
* // result: {"a":1,"b":100,"b":100}
*
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".withField("a.c", lit(3)))
* // result: {"a":{"a":1,"b":2,"c":3}}
*
* val df = sql("SELECT named_struct('a', named_struct('b', 1), 'a', named_struct('c', 2)) struct_col")
* df.select($"struct_col".withField("a.c", lit(3)))
* // result: org.apache.spark.sql.AnalysisException: Ambiguous reference to fields
* }}}
*
* This method supports adding/replacing nested fields directly e.g.
*
* {{{
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".withField("a.c", lit(3)).withField("a.d", lit(4)))
* // result: {"a":{"a":1,"b":2,"c":3,"d":4}}
* }}}
*
* However, if you are going to add/replace multiple nested fields, it is more optimal to extract
* out the nested struct before adding/replacing multiple fields e.g.
*
* {{{
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".withField("a", $"struct_col.a".withField("c", lit(3)).withField("d", lit(4))))
* // result: {"a":{"a":1,"b":2,"c":3,"d":4}}
* }}}
*
* @group expr_ops
* @since 3.1.0
*/
// scalastyle:on line.size.limit
def withField(fieldName: String, col: Column): Column = withExpr {
require(fieldName != null, "fieldName cannot be null")
require(col != null, "col cannot be null")
UpdateFields(expr, fieldName, col.expr)
}
// scalastyle:off line.size.limit
/**
* An expression that drops fields in `StructType` by name.
* This is a no-op if schema doesn't contain field name(s).
*
* {{{
* val df = sql("SELECT named_struct('a', 1, 'b', 2) struct_col")
* df.select($"struct_col".dropFields("b"))
* // result: {"a":1}
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2) struct_col")
* df.select($"struct_col".dropFields("c"))
* // result: {"a":1,"b":2}
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2, 'c', 3) struct_col")
* df.select($"struct_col".dropFields("b", "c"))
* // result: {"a":1}
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2) struct_col")
* df.select($"struct_col".dropFields("a", "b"))
* // result: org.apache.spark.sql.AnalysisException: cannot resolve 'update_fields(update_fields(`struct_col`))' due to data type mismatch: cannot drop all fields in struct
*
* val df = sql("SELECT CAST(NULL AS struct<a:int,b:int>) struct_col")
* df.select($"struct_col".dropFields("b"))
* // result: null of type struct<a:int>
*
* val df = sql("SELECT named_struct('a', 1, 'b', 2, 'b', 3) struct_col")
* df.select($"struct_col".dropFields("b"))
* // result: {"a":1}
*
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".dropFields("a.b"))
* // result: {"a":{"a":1}}
*
* val df = sql("SELECT named_struct('a', named_struct('b', 1), 'a', named_struct('c', 2)) struct_col")
* df.select($"struct_col".dropFields("a.c"))
* // result: org.apache.spark.sql.AnalysisException: Ambiguous reference to fields
* }}}
*
* This method supports dropping multiple nested fields directly e.g.
*
* {{{
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".dropFields("a.b", "a.c"))
* // result: {"a":{"a":1}}
* }}}
*
* However, if you are going to drop multiple nested fields, it is more optimal to extract
* out the nested struct before dropping multiple fields from it e.g.
*
* {{{
* val df = sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col")
* df.select($"struct_col".withField("a", $"struct_col.a".dropFields("b", "c")))
* // result: {"a":{"a":1}}
* }}}
*
* @group expr_ops
* @since 3.1.0
*/
// scalastyle:on line.size.limit
def dropFields(fieldNames: String*): Column = withExpr {
fieldNames.tail.foldLeft(UpdateFields(expr, fieldNames.head)) {
(resExpr, fieldName) => UpdateFields(resExpr, fieldName)
}
}
/**
* An expression that gets a field by name in a `StructType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getField(fieldName: String): Column = withExpr {
UnresolvedExtractValue(expr, Literal(fieldName))
}
/**
* An expression that returns a substring.
* @param startPos expression for the starting position.
* @param len expression for the length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Column, len: Column): Column = withExpr {
Substring(expr, startPos.expr, len.expr)
}
/**
* An expression that returns a substring.
* @param startPos starting position.
* @param len length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Int, len: Int): Column = withExpr {
Substring(expr, lit(startPos).expr, lit(len).expr)
}
/**
* Contains the other element. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def contains(other: Any): Column = withExpr { Contains(expr, lit(other).expr) }
/**
* String starts with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(other: Column): Column = withExpr { StartsWith(expr, lit(other).expr) }
/**
* String starts with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(literal: String): Column = this.startsWith(lit(literal))
/**
* String ends with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(other: Column): Column = withExpr { EndsWith(expr, lit(other).expr) }
/**
* String ends with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(literal: String): Column = this.endsWith(lit(literal))
/**
* Gives the column an alias. Same as `as`.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".alias("colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def alias(alias: String): Column = name(alias)
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use the API `as(alias: String, metadata: Metadata)`
* with explicit metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String): Column = name(alias)
/**
* (Scala-specific) Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Seq[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Array[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as('colB))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use the API `as(alias: String, metadata: Metadata)`
* with explicit metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: Symbol): Column = name(alias.name)
/**
* Gives the column an alias with metadata.
* {{{
* val metadata: Metadata = ...
* df.select($"colA".as("colB", metadata))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String, metadata: Metadata): Column = withExpr {
Alias(expr, alias)(explicitMetadata = Some(metadata))
}
/**
* Gives the column a name (alias).
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".name("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use the API `as(alias: String, metadata: Metadata)`
* with explicit metadata.
*
* @group expr_ops
* @since 2.0.0
*/
def name(alias: String): Column = withExpr {
Alias(normalizedExpr(), alias)()
}
/**
* Casts the column to a different data type.
* {{{
* // Casts colA to IntegerType.
* import org.apache.spark.sql.types.IntegerType
* df.select(df("colA").cast(IntegerType))
*
* // equivalent to
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: DataType): Column = withExpr { Cast(expr, to) }
/**
* Casts the column to a different data type, using the canonical string representation
* of the type. The supported types are: `string`, `boolean`, `byte`, `short`, `int`, `long`,
* `float`, `double`, `decimal`, `date`, `timestamp`.
* {{{
* // Casts colA to integer.
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: String): Column = cast(CatalystSqlParser.parseDataType(to))
/**
* Returns a sort expression based on the descending order of the column.
* {{{
* // Scala
* df.sort(df("age").desc)
*
* // Java
* df.sort(df.col("age").desc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def desc: Column = withExpr { SortOrder(expr, Descending) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing first.
* df.sort(df("age").desc_nulls_first)
*
* // Java
* df.sort(df.col("age").desc_nulls_first());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_first: Column = withExpr { SortOrder(expr, Descending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing last.
* df.sort(df("age").desc_nulls_last)
*
* // Java
* df.sort(df.col("age").desc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_last: Column = withExpr { SortOrder(expr, Descending, NullsLast, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column.
* {{{
* // Scala: sort a DataFrame by age column in ascending order.
* df.sort(df("age").asc)
*
* // Java
* df.sort(df.col("age").asc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def asc: Column = withExpr { SortOrder(expr, Ascending) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values return before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing first.
* df.sort(df("age").asc_nulls_first)
*
* // Java
* df.sort(df.col("age").asc_nulls_first());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_first: Column = withExpr { SortOrder(expr, Ascending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing last.
* df.sort(df("age").asc_nulls_last)
*
* // Java
* df.sort(df.col("age").asc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_last: Column = withExpr { SortOrder(expr, Ascending, NullsLast, Set.empty) }
/**
* Prints the expression to the console for debugging purposes.
*
* @group df_ops
* @since 1.3.0
*/
def explain(extended: Boolean): Unit = {
// scalastyle:off println
if (extended) {
println(expr)
} else {
println(expr.sql)
}
// scalastyle:on println
}
/**
* Compute bitwise OR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseOR(other: Any): Column = withExpr { BitwiseOr(expr, lit(other).expr) }
/**
* Compute bitwise AND of this expression with another expression.
* {{{
* df.select($"colA".bitwiseAND($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseAND(other: Any): Column = withExpr { BitwiseAnd(expr, lit(other).expr) }
/**
* Compute bitwise XOR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseXOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseXOR(other: Any): Column = withExpr { BitwiseXor(expr, lit(other).expr) }
/**
* Defines a windowing column.
*
* {{{
* val w = Window.partitionBy("name").orderBy("id")
* df.select(
* sum("price").over(w.rangeBetween(Window.unboundedPreceding, 2)),
* avg("price").over(w.rowsBetween(Window.currentRow, 4))
* )
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def over(window: expressions.WindowSpec): Column = window.withAggregate(this)
/**
* Defines an empty analytic clause. In this case the analytic function is applied
* and presented for all rows in the result set.
*
* {{{
* df.select(
* sum("price").over(),
* avg("price").over()
* )
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def over(): Column = over(Window.spec)
}
/**
* A convenient class used for constructing schema.
*
* @since 1.3.0
*/
@Stable
class ColumnName(name: String) extends Column(name) {
/**
* Creates a new `StructField` of type boolean.
* @since 1.3.0
*/
def boolean: StructField = StructField(name, BooleanType)
/**
* Creates a new `StructField` of type byte.
* @since 1.3.0
*/
def byte: StructField = StructField(name, ByteType)
/**
* Creates a new `StructField` of type short.
* @since 1.3.0
*/
def short: StructField = StructField(name, ShortType)
/**
* Creates a new `StructField` of type int.
* @since 1.3.0
*/
def int: StructField = StructField(name, IntegerType)
/**
* Creates a new `StructField` of type long.
* @since 1.3.0
*/
def long: StructField = StructField(name, LongType)
/**
* Creates a new `StructField` of type float.
* @since 1.3.0
*/
def float: StructField = StructField(name, FloatType)
/**
* Creates a new `StructField` of type double.
* @since 1.3.0
*/
def double: StructField = StructField(name, DoubleType)
/**
* Creates a new `StructField` of type string.
* @since 1.3.0
*/
def string: StructField = StructField(name, StringType)
/**
* Creates a new `StructField` of type date.
* @since 1.3.0
*/
def date: StructField = StructField(name, DateType)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal: StructField = StructField(name, DecimalType.USER_DEFAULT)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal(precision: Int, scale: Int): StructField =
StructField(name, DecimalType(precision, scale))
/**
* Creates a new `StructField` of type timestamp.
* @since 1.3.0
*/
def timestamp: StructField = StructField(name, TimestampType)
/**
* Creates a new `StructField` of type binary.
* @since 1.3.0
*/
def binary: StructField = StructField(name, BinaryType)
/**
* Creates a new `StructField` of type array.
* @since 1.3.0
*/
def array(dataType: DataType): StructField = StructField(name, ArrayType(dataType))
/**
* Creates a new `StructField` of type map.
* @since 1.3.0
*/
def map(keyType: DataType, valueType: DataType): StructField =
map(MapType(keyType, valueType))
def map(mapType: MapType): StructField = StructField(name, mapType)
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(fields: StructField*): StructField = struct(StructType(fields))
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(structType: StructType): StructField = StructField(name, structType)
}
| shuangshuangwang/spark | sql/core/src/main/scala/org/apache/spark/sql/Column.scala | Scala | apache-2.0 | 44,563 |
package com.lambtors.poker_api.module.poker.behaviour.player_cards.find
import java.util.UUID
import cats.implicits._
import com.lambtors.poker_api.module.poker.application.player_cards.find.{
FindPlayerCardsQueryHandler,
PlayerCardsFinder
}
import com.lambtors.poker_api.module.poker.behaviour.PokerBehaviourSpec
import com.lambtors.poker_api.module.poker.domain.error.{InvalidPlayerId, PlayerNotFound}
import com.lambtors.poker_api.module.poker.infrastructure.stub.{
FindPlayerCardsQueryStub,
FindPlayerCardsResponseStub,
PlayerIdStub,
PlayerStub
}
final class FindPlayerCardsSpec extends PokerBehaviourSpec {
val queryHandler = new FindPlayerCardsQueryHandler(new PlayerCardsFinder(playerRepository))
"A FindPlayerCardsQueryHandler" should {
"find player cards" in {
val query = FindPlayerCardsQueryStub.random()
val player = PlayerStub.create(playerId = PlayerIdStub.create(UUID.fromString(query.playerId)))
val initialState = PokerState.empty.withPlayer(player)
val expectedResponse = FindPlayerCardsResponseStub.create(player.cards)
val validatedStateT = queryHandler.handle(query)
validatedStateT should beValid
validatedStateT.map(_.runA(initialState) should beRightContaining(expectedResponse))
}
"fail if the player does not exist" in {
val query = FindPlayerCardsQueryStub.random()
val playerId = PlayerIdStub.create(UUID.fromString(query.playerId))
val initialState = PokerState.empty
val validatedStateT = queryHandler.handle(query)
validatedStateT should beValid
validatedStateT.map(_.runA(initialState) should beLeftContaining[Throwable](PlayerNotFound(playerId)))
}
"have validation errors on an invalid query" in {
val query = FindPlayerCardsQueryStub.invalid()
queryHandler.handle(query) should haveValidationErrors(InvalidPlayerId(query.playerId))
}
}
}
| lambtors/poker-api | src/test/scala/com/lambtors/poker_api/module/poker/behaviour/player_cards/find/FindPlayerCardsSpec.scala | Scala | mit | 1,920 |
package sbt
import sbt.internal.util.Types.const
import java.io.File
/**
* Represents how settings from various sources are automatically merged into a Project's settings.
* This only configures per-project settings and not global or per-build settings.
*/
sealed abstract class AddSettings
object AddSettings {
private[sbt] final class Sequence(val sequence: Seq[AddSettings]) extends AddSettings
private[sbt] final object User extends AddSettings
private[sbt] final class Plugins(val include: Plugin => Boolean) extends AddSettings
private[sbt] final class AutoPlugins(val include: AutoPlugin => Boolean) extends AddSettings
private[sbt] final class DefaultSbtFiles(val include: File => Boolean) extends AddSettings
private[sbt] final class SbtFiles(val files: Seq[File]) extends AddSettings
private[sbt] final object BuildScalaFiles extends AddSettings
/** Adds all settings from autoplugins. */
val autoPlugins: AddSettings = new AutoPlugins(const(true)) // Note: We do not expose fine-grained autoplugins because
// it's dangerous to control at that level right now.
// Leaving the hook in place in case we need to expose
// it, but most likely it will remain locked out
// for users with an alternative ordering feature
// in place.
/** Settings specified in Build.scala `Project` constructors. */
val buildScalaFiles: AddSettings = BuildScalaFiles
/** All plugins that aren't auto plugins. */
val nonAutoPlugins: AddSettings = plugins(const(true))
/** Adds all settings from a plugin to a project. */
val allPlugins: AddSettings = seq(autoPlugins, nonAutoPlugins)
/** Allows the plugins whose names match the `names` filter to automatically add settings to a project. */
def plugins(include: Plugin => Boolean): AddSettings = new Plugins(include)
/** Includes user settings in the project. */
val userSettings: AddSettings = User
/** Includes the settings from all .sbt files in the project's base directory. */
val defaultSbtFiles: AddSettings = new DefaultSbtFiles(const(true))
/** Includes the settings from the .sbt files given by `files`. */
def sbtFiles(files: File*): AddSettings = new SbtFiles(files)
/** Includes settings automatically*/
def seq(autos: AddSettings*): AddSettings = new Sequence(autos)
/** The default inclusion of settings. */
val allDefaults: AddSettings = seq(autoPlugins, buildScalaFiles, userSettings, nonAutoPlugins, defaultSbtFiles)
/** Combines two automatic setting configurations. */
def append(a: AddSettings, b: AddSettings): AddSettings = (a, b) match {
case (sa: Sequence, sb: Sequence) => seq(sa.sequence ++ sb.sequence: _*)
case (sa: Sequence, _) => seq(sa.sequence :+ b: _*)
case (_, sb: Sequence) => seq(a +: sb.sequence: _*)
case _ => seq(a, b)
}
def clearSbtFiles(a: AddSettings): AddSettings = tx(a) {
case _: DefaultSbtFiles | _: SbtFiles => None
case x => Some(x)
} getOrElse seq()
private[sbt] def tx(a: AddSettings)(f: AddSettings => Option[AddSettings]): Option[AddSettings] = a match {
case s: Sequence =>
s.sequence.flatMap { b => tx(b)(f) } match {
case Seq() => None
case Seq(x) => Some(x)
case ss => Some(new Sequence(ss))
}
case x => f(x)
}
}
| mdedetrich/sbt | main/src/main/scala/sbt/AddSettings.scala | Scala | bsd-3-clause | 3,358 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl
import scala.reflect.ClassTag
import java.nio.ByteBuffer
import java.lang.reflect.Constructor
import java.lang.reflect.Field
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.Entrypoint
import com.amd.aparapi.internal.model.ClassModel.NameMatcher
import com.amd.aparapi.internal.model.ClassModel.FieldDescriptor
import com.amd.aparapi.internal.util.UnsafeWrapper
trait OutputBufferWrapper[T] {
def next() : T
def hasNext() : Boolean
def countArgumentsUsed() : Int
/*
* Called after we have finished with the output buffer for the current inputs
* to prepare it for future buffering.
*/
def fillFrom(kernel_ctx : Long, nativeOutputBuffers : NativeOutputBuffers[T])
def generateNativeOutputBuffer(N : Int, outArgNum : Int, dev_ctx : Long,
ctx : Long, sampleOutput : T, entryPoint : Entrypoint) :
NativeOutputBuffers[T]
}
| agrippa/spark-swat | swat/src/main/scala/org/apache/spark/rdd/cl/OutputBufferWrapper.scala | Scala | bsd-3-clause | 2,437 |
package com.thinkbiganalytics.spark.service
import java.util.concurrent.{Executors, ScheduledExecutorService, ThreadFactory, TimeUnit}
import javax.annotation.Nonnull
import com.google.common.cache.{Cache, CacheBuilder, RemovalListener, RemovalNotification}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.thinkbiganalytics.spark.metadata.TransformJob
import com.thinkbiganalytics.spark.repl.SparkScriptEngine
import scala.collection.mutable
/** Tracks the progress of executing and recently completed jobs. */
object TransformJobTracker {
/** Key for the job group ID property */
val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
}
abstract class TransformJobTracker(contextClassLoader: ClassLoader) {
/** Executes jobs in separate threads */
private val executor: ScheduledExecutorService = Executors.newScheduledThreadPool(2, threadFactory)
/** Map of group id to job */
protected val groups: Cache[String, TransformJob] = CacheBuilder.newBuilder()
.expireAfterWrite(1, TimeUnit.HOURS)
.removalListener(new RemovalListener[String, TransformJob] {
override def onRemoval(notification: RemovalNotification[String, TransformJob]): Unit = {
notification.getValue.jobId.foreach(jobs.remove)
notification.getValue.stages.map(_.stageId).foreach(stages.remove)
}
})
.build[String, TransformJob]()
// Schedule clean-up of groups
executor.scheduleAtFixedRate(new Runnable {
override def run(): Unit = groups.cleanUp()
}, 1, 1, TimeUnit.HOURS)
/** Map of job id to job */
protected val jobs = new mutable.HashMap[Int, TransformJob]() with mutable.SynchronizedMap[Int, TransformJob]
/** Map of stage id to job */
protected val stages = new mutable.HashMap[Int, TransformJob]() with mutable.SynchronizedMap[Int, TransformJob]
/** Adds a listener to the Spark context of the specified script engine.
*
* @param engine the Spark script engine
*/
def addSparkListener(@Nonnull engine: SparkScriptEngine): Unit
/** Gets the job with the specified group id.
*
* @param groupId the group id
* @return the job
* @throws IllegalArgumentException if the job does not exist
*/
@Nonnull
def getJob(@Nonnull groupId: String): Option[TransformJob] = {
Option(groups.getIfPresent(groupId))
}
/** Removes the specified job from this tracker.
*
* @param groupId the group id
*/
def removeJob(@Nonnull groupId: String): Unit = {
groups.invalidate(groupId)
}
/** Submits a job to be executed.
*
* @param job the transform job
*/
def submitJob(@Nonnull job: TransformJob): Unit = {
groups.put(job.groupId, job)
executor.execute(job)
}
/** Creates a thread factory for running transform jobs.
*
* @return the thread factory
*/
private def threadFactory = {
val parentThreadFactory = new ThreadFactory {
override def newThread(r: Runnable): Thread = {
val thread = Executors.defaultThreadFactory().newThread(r)
thread.setContextClassLoader(contextClassLoader)
thread
}
}
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("transform-job-%d")
.setThreadFactory(parentThreadFactory)
.build()
}
}
| peter-gergely-horvath/kylo | integrations/spark/spark-shell-client/spark-shell-client-app/src/main/scala/com/thinkbiganalytics/spark/service/TransformJobTracker.scala | Scala | apache-2.0 | 3,495 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.scope.PsiScopeProcessor
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.psi.stubs.ScFunctionStub
/**
* @author ilyas
*/
abstract class ScFunctionImpl extends ScalaStubBasedElementImpl[ScFunction] with ScMember
with ScFunction with ScTypeParametersOwner {
override def isStable = false
def nameId: PsiElement = {
val n = getNode.findChildByType(ScalaTokenTypes.tIDENTIFIER) match {
case null => getNode.findChildByType(ScalaTokenTypes.kTHIS)
case notNull => notNull
}
if (n == null) {
return ScalaPsiElementFactory.createIdentifier(getStub.asInstanceOf[ScFunctionStub].getName, getManager).getPsi
}
n.getPsi
}
def paramClauses: ScParameters = {
getStubOrPsiChild(ScalaElementTypes.PARAM_CLAUSES)
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState,
lastParent: PsiElement, place: PsiElement): Boolean = {
// process function's process type parameters
if (!super[ScTypeParametersOwner].processDeclarations(processor, state, lastParent, place)) return false
lazy val parameterIncludingSynthetic: Seq[ScParameter] = effectiveParameterClauses.flatMap(_.effectiveParameters)
if (getStub == null) {
returnTypeElement match {
case Some(x) if lastParent != null && x.getStartOffsetInParent == lastParent.getStartOffsetInParent =>
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
case _ =>
}
} else {
if (lastParent != null && lastParent.getContext != lastParent.getParent) {
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
}
}
true
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/ScFunctionImpl.scala | Scala | apache-2.0 | 2,437 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package record {
import common._
import http.js.{JsExp, JsObj}
import http.{Req, SHtml}
import json.JsonAST._
import mapper.Safe
import util._
import field._
import scala.xml._
trait Record[MyType <: Record[MyType]] extends FieldContainer {
self: MyType =>
/**
* A unique identifier for this record... used for access control
*/
private val secure_# = Safe.next
/**
* Get the fields defined on the meta object for this record instance
*/
def fields() = meta.fields(this)
def allFields = fields()
/**
* The meta record (the object that contains the meta result for this type)
*/
def meta: MetaRecord[MyType]
/**
* Is it safe to make changes to the record (or should we check access control?)
*/
final def safe_? : Boolean = {
Safe.safe_?(secure_#)
}
def runSafe[T](f : => T) : T = {
Safe.runSafe(secure_#)(f)
}
/**
* Returns the HTML representation ofthis Record
*/
def toXHtml: NodeSeq = {
meta.toXHtml(this)
}
/**
* Validates this Record by calling validators for each field
*
* @return a List of FieldError. If this list is empty you can assume that record was validated successfully
*/
def validate : List[FieldError] = {
runSafe {
meta.validate(this)
}
}
/**
* Retuns the JSON representation of this record
*
* @return a JsObj
*/
def asJSON: JsExp = meta.asJSON(this)
/**
* Retuns the JSON representation of this record, converts asJValue to JsObj
*
* @return a JsObj
*/
def asJsExp: JsExp = meta.asJsExp(this)
/**
* Sets the fields of this Record from the given JSON.
*/
def setFieldsFromJSON(json: String): Box[Unit] = meta.setFieldsFromJSON(this, json)
/** Encode this record instance as a JObject */
def asJValue: JObject = meta.asJValue(this)
/** Set the fields of this record from the given JValue */
def setFieldsFromJValue(jvalue: JValue): Box[Unit] = meta.setFieldsFromJValue(this, jvalue)
/**
* Sets the fields of this Record from the given JSON.
*/
def setFieldsFromJsonString(json: String): Box[Unit] = meta.setFieldsFromJsonString(this, json)
/**
* Sets the fields of this Record from the given Req.
*/
def setFieldsFromReq(req: Req){ meta.setFieldsFromReq(this, req) }
/**
* Present the model as a form and execute the function on submission of the form
*
* @param button - If it's Full, put a submit button on the form with the value of the parameter
* @param f - the function to execute on form submission
*
* @return the form
*/
def toForm(button: Box[String])(f: MyType => Unit): NodeSeq = {
meta.toForm(this) ++
(SHtml.hidden(() => f(this))) ++
((button.map(b => (<input type="submit" value={b}/>)) openOr scala.xml.Text("")))
}
/**
* Present the model as a form and execute the function on submission of the form
*
* @param f - the function to execute on form submission
*
* @return the form
*/
def toForm(f: MyType => Unit): NodeSeq = meta.toForm(this) ++ (SHtml.hidden(() => f(this)))
/**
* Find the field by name
* @param fieldName -- the name of the field to find
*
* @return Box[MappedField]
*/
def fieldByName(fieldName: String): Box[Field[_, MyType]] = meta.fieldByName(fieldName, this)
}
trait ExpandoRecord[MyType <: Record[MyType] with ExpandoRecord[MyType]] {
self: MyType =>
/**
* If there's a field in this record that defines the locale, return it
*/
def localeField: Box[LocaleField[MyType]] = Empty
def timeZoneField: Box[TimeZoneField[MyType]] = Empty
def countryField: Box[CountryField[MyType]] = Empty
}
trait KeyedRecord[MyType <: KeyedRecord[MyType, KeyType], KeyType] extends Record[MyType] {
self: MyType =>
def primaryKey: KeyField[KeyType, MyType]
def comparePrimaryKeys(other: MyType) = primaryKey === other.primaryKey
}
}
}
| lift/lift | framework/lift-persistence/lift-record/src/main/scala/net/liftweb/record/Record.scala | Scala | apache-2.0 | 4,524 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.server
import java.net.{BindException, InetAddress}
import com.groupon.sparklint.common.Logging
import org.http4s.MediaType.{`application/json`, `text/html`}
import org.http4s.Response
import org.http4s.dsl._
import org.http4s.headers.`Content-Type`
import org.http4s.server.Server
import org.http4s.server.blaze.BlazeBuilder
import scala.util.{Failure, Success, Try}
import scalaz.concurrent.Task
/**
* This mix-in trait will start up a server on the port specified
*
* Usage: 1. Mount endpoint to RoutingMap.routingMap
* example: https://github.com/http4s/http4s/blob/master/examples/src/main/scala/com/example/http4s/ExampleService.scala
* 2. call startServer(port)
*
* @author rxue
* @since 4/25/16.
*/
trait AdhocServer extends RoutingMap with Logging {
// Random number without special meaning
var server: Option[Server] = None
def DEFAULT_PORT: Int
/** start the server with the routingMap and port
*
* @param port port to use
* @throws IllegalArgumentException if the port is smaller than 0 or greater than 65535
*/
@throws[IllegalArgumentException]
def startServer(port: Option[Int] = None): Unit = {
var portToAttempt = port.getOrElse(DEFAULT_PORT)
while (server.isEmpty) {
bindServer(portToAttempt) match {
case Success(someServer) =>
server = Some(someServer)
case Failure(ex: BindException) =>
val nextPort = portToAttempt + 1
logDebug(s"Port $portToAttempt has been used, trying $nextPort")
portToAttempt = nextPort
case Failure(ex: Throwable) =>
throw ex
}
}
}
def bindServer(port: Int): Try[Server] = Try {
logDebug(s"Starting server on port $port")
val mountedServer = BlazeBuilder.mountService(router).bindHttp(port, "0.0.0.0").run
logInfo(s"Server started on $port")
mountedServer
}
def stopServer(): Unit = {
logInfo("Shutdown request received")
server.foreach(s => {
s.shutdown.run
})
server = None
logInfo("Shutdown complete")
}
def getServerAddress = server.map(someServer =>
s"${InetAddress.getLocalHost.getCanonicalHostName}:${someServer.address.getPort}"
)
def jsonResponse(textResponse: Task[Response]): Task[Response] = {
textResponse.withContentType(Some(`Content-Type`(`application/json`)))
}
def htmlResponse(textResponse: Task[Response]): Task[Response] = {
textResponse.withContentType(Some(`Content-Type`(`text/html`)))
}
}
| groupon/sparklint | src/main/scala/com/groupon/sparklint/server/AdhocServer.scala | Scala | apache-2.0 | 3,117 |
package uk.zebington.junkcraft.common.containers
import net.minecraft.entity.player.{EntityPlayer, InventoryPlayer}
import net.minecraft.inventory._
import net.minecraft.item.ItemStack
import uk.zebington.junkcraft.common.containers.slots.{SlotSpikeStationContextual, SlotSpikeStationOutput}
import uk.zebington.junkcraft.common.tileentities.TileEntitySpikeStation
/**
* Created by Charlotte on 22/02/2015.
*/
class ContainerSpikeStation(inv: TileEntitySpikeStation, invPlayer: InventoryPlayer) extends Container {
addSlotToContainer(new Slot(inv, 0, 30, 35))
addSlotToContainer(new SlotSpikeStationContextual(inv, 1, 66, 35))
addSlotToContainer(new SlotSpikeStationOutput(inv, 2, 126, 35))
for (i <- 0 until 3) for (j <- 0 until 9) addSlotToContainer(new Slot(invPlayer, j + i * 9 + 9, 8 + j * 18, 84 + i * 18))
for (i <- 0 until 9) addSlotToContainer(new Slot(invPlayer, i, 8 + i * 18, 142))
override def canInteractWith(playerIn: EntityPlayer): Boolean = true
override def transferStackInSlot(player: EntityPlayer, i: Int): ItemStack = {
val slot = inventorySlots.get(i).asInstanceOf[Slot]
var itemStack: ItemStack = null
inv.mode match {
case 0 =>
if (slot != null && slot.getHasStack) {
val itemStack1: ItemStack = slot.getStack
itemStack = itemStack1.copy()
if (i == 2) {
if (!this.mergeItemStack(itemStack1, 3, 39, true)) {
slot.onPickupFromSlot(player, itemStack1)
slot.putStack(null)
return null
}
slot.onSlotChange(itemStack1, itemStack)
} else if (i > 2) {
if (!this.mergeItemStack(itemStack1, 0, 2, false)) {
slot.putStack(null)
return null
}
slot.onSlotChange(itemStack1, itemStack)
} else {
if (!this.mergeItemStack(itemStack1, 3, 39, true)) {
slot.putStack(null)
return null
}
}
}
case 1 =>
if (slot != null && slot.getHasStack) {
val itemStack1: ItemStack = slot.getStack
itemStack = itemStack1.copy()
if (i == 2) {
if (!this.mergeItemStack(itemStack1, 3, 39, true)) {
slot.onPickupFromSlot(player, itemStack1)
slot.putStack(null)
return null
}
slot.onSlotChange(itemStack1, itemStack)
} else if (i > 2) {
if (!this.mergeItemStack(itemStack1, 0, 1, false)) {
slot.putStack(null)
return null
}
slot.onSlotChange(itemStack1, itemStack)
} else {
if (!this.mergeItemStack(itemStack1, 3, 39, true)) {
slot.onPickupFromSlot(player, itemStack1)
slot.putStack(null)
return null
}
}
}
}
itemStack
}
override def detectAndSendChanges(): Unit = {
super.detectAndSendChanges()
inv.updateOutput()
}
override def slotClick(slotId: Int, clickedButton: Int, mode: Int, playerIn: EntityPlayer): ItemStack = {
val itemStack = super.slotClick(slotId, clickedButton, mode, playerIn)
inv.updateOutput()
itemStack
}
def switchMode(): Unit = {
inv.switchMode()
}
}
| zebington/JunkCraft | src/main/scala/uk/zebington/junkcraft/common/containers/ContainerSpikeStation.scala | Scala | gpl-3.0 | 3,307 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import kafka.api.LeaderAndIsr
import kafka.common.StateChangeFailedException
import kafka.server.KafkaConfig
import kafka.utils.Implicits._
import kafka.utils.Logging
import kafka.zk.KafkaZkClient
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import kafka.zk.TopicPartitionStateZNode
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.ControllerMovedException
import org.apache.zookeeper.KeeperException.Code
import scala.collection.{Seq, mutable}
abstract class ReplicaStateMachine(controllerContext: ControllerContext) extends Logging {
/**
* Invoked on successful controller election.
*/
def startup(): Unit = {
info("Initializing replica state")
initializeReplicaState()
info("Triggering online replica state changes")
val (onlineReplicas, offlineReplicas) = controllerContext.onlineAndOfflineReplicas
handleStateChanges(onlineReplicas.toSeq, OnlineReplica)
info("Triggering offline replica state changes")
handleStateChanges(offlineReplicas.toSeq, OfflineReplica)
debug(s"Started replica state machine with initial state -> ${controllerContext.replicaStates}")
}
/**
* Invoked on controller shutdown.
*/
def shutdown(): Unit = {
info("Stopped replica state machine")
}
/**
* Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions
* in zookeeper
*/
private def initializeReplicaState(): Unit = {
controllerContext.allPartitions.foreach { partition =>
val replicas = controllerContext.partitionReplicaAssignment(partition)
replicas.foreach { replicaId =>
val partitionAndReplica = PartitionAndReplica(partition, replicaId)
if (controllerContext.isReplicaOnline(replicaId, partition)) {
controllerContext.putReplicaState(partitionAndReplica, OnlineReplica)
} else {
// mark replicas on dead brokers as failed for topic deletion, if they belong to a topic to be deleted.
// This is required during controller failover since during controller failover a broker can go down,
// so the replicas on that broker should be moved to ReplicaDeletionIneligible to be on the safer side.
controllerContext.putReplicaState(partitionAndReplica, ReplicaDeletionIneligible)
}
}
}
}
def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit
}
/**
* This class represents the state machine for replicas. It defines the states that a replica can be in, and
* transitions to move the replica to another legal state. The different states that a replica can be in are -
* 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a
* replica can only get become follower state change request. Valid previous
* state is NonExistentReplica
* 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this
* state. In this state, it can get either become leader or become follower state change requests.
* Valid previous state are NewReplica, OnlineReplica, OfflineReplica and ReplicaDeletionIneligible
* 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica
* is down. Valid previous state are NewReplica, OnlineReplica, OfflineReplica and ReplicaDeletionIneligible
* 4. ReplicaDeletionStarted: If replica deletion starts, it is moved to this state. Valid previous state is OfflineReplica
* 5. ReplicaDeletionSuccessful: If replica responds with no error code in response to a delete replica request, it is
* moved to this state. Valid previous state is ReplicaDeletionStarted
* 6. ReplicaDeletionIneligible: If replica deletion fails, it is moved to this state. Valid previous states are
* ReplicaDeletionStarted and OfflineReplica
* 7. NonExistentReplica: If a replica is deleted successfully, it is moved to this state. Valid previous state is
* ReplicaDeletionSuccessful
*/
class ZkReplicaStateMachine(config: KafkaConfig,
stateChangeLogger: StateChangeLogger,
controllerContext: ControllerContext,
zkClient: KafkaZkClient,
controllerBrokerRequestBatch: ControllerBrokerRequestBatch)
extends ReplicaStateMachine(controllerContext) with Logging {
private val controllerId = config.brokerId
this.logIdent = s"[ReplicaStateMachine controllerId=$controllerId] "
override def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit = {
if (replicas.nonEmpty) {
try {
controllerBrokerRequestBatch.newBatch()
replicas.groupBy(_.replica).forKeyValue { (replicaId, replicas) =>
doHandleStateChanges(replicaId, replicas, targetState)
}
controllerBrokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch)
} catch {
case e: ControllerMovedException =>
error(s"Controller moved to another broker when moving some replicas to $targetState state", e)
throw e
case e: Throwable => error(s"Error while moving some replicas to $targetState state", e)
}
}
}
/**
* This API exercises the replica's state machine. It ensures that every state transition happens from a legal
* previous state to the target state. Valid state transitions are:
* NonExistentReplica --> NewReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica -> OnlineReplica
* --add the new replica to the assigned replica list if needed
*
* OnlineReplica,OfflineReplica -> OnlineReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica
* --send StopReplicaRequest to the replica (w/o deletion)
* --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and
* UpdateMetadata request for the partition to every live broker.
*
* OfflineReplica -> ReplicaDeletionStarted
* --send StopReplicaRequest to the replica (with deletion)
*
* ReplicaDeletionStarted -> ReplicaDeletionSuccessful
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionStarted -> ReplicaDeletionIneligible
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionSuccessful -> NonExistentReplica
* -- remove the replica from the in memory partition replica assignment cache
*
* @param replicaId The replica for which the state transition is invoked
* @param replicas The partitions on this replica for which the state transition is invoked
* @param targetState The end state that the replica should be moved to
*/
private def doHandleStateChanges(replicaId: Int, replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit = {
val stateLogger = stateChangeLogger.withControllerEpoch(controllerContext.epoch)
val traceEnabled = stateLogger.isTraceEnabled
replicas.foreach(replica => controllerContext.putReplicaStateIfNotExists(replica, NonExistentReplica))
val (validReplicas, invalidReplicas) = controllerContext.checkValidReplicaStateChange(replicas, targetState)
invalidReplicas.foreach(replica => logInvalidTransition(replica, targetState))
targetState match {
case NewReplica =>
validReplicas.foreach { replica =>
val partition = replica.topicPartition
val currentState = controllerContext.replicaState(replica)
controllerContext.partitionLeadershipInfo(partition) match {
case Some(leaderIsrAndControllerEpoch) =>
if (leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId) {
val exception = new StateChangeFailedException(s"Replica $replicaId for partition $partition cannot be moved to NewReplica state as it is being requested to become leader")
logFailedStateChange(replica, currentState, OfflineReplica, exception)
} else {
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
replica.topicPartition,
leaderIsrAndControllerEpoch,
controllerContext.partitionFullReplicaAssignment(replica.topicPartition),
isNew = true)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, partition, currentState, NewReplica)
controllerContext.putReplicaState(replica, NewReplica)
}
case None =>
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, partition, currentState, NewReplica)
controllerContext.putReplicaState(replica, NewReplica)
}
}
case OnlineReplica =>
validReplicas.foreach { replica =>
val partition = replica.topicPartition
val currentState = controllerContext.replicaState(replica)
currentState match {
case NewReplica =>
val assignment = controllerContext.partitionFullReplicaAssignment(partition)
if (!assignment.replicas.contains(replicaId)) {
error(s"Adding replica ($replicaId) that is not part of the assignment $assignment")
val newAssignment = assignment.copy(replicas = assignment.replicas :+ replicaId)
controllerContext.updatePartitionFullReplicaAssignment(partition, newAssignment)
}
case _ =>
controllerContext.partitionLeadershipInfo(partition) match {
case Some(leaderIsrAndControllerEpoch) =>
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
replica.topicPartition,
leaderIsrAndControllerEpoch,
controllerContext.partitionFullReplicaAssignment(partition), isNew = false)
case None =>
}
}
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, partition, currentState, OnlineReplica)
controllerContext.putReplicaState(replica, OnlineReplica)
}
case OfflineReplica =>
validReplicas.foreach { replica =>
controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition, deletePartition = false)
}
val (replicasWithLeadershipInfo, replicasWithoutLeadershipInfo) = validReplicas.partition { replica =>
controllerContext.partitionLeadershipInfo(replica.topicPartition).isDefined
}
val updatedLeaderIsrAndControllerEpochs = removeReplicasFromIsr(replicaId, replicasWithLeadershipInfo.map(_.topicPartition))
updatedLeaderIsrAndControllerEpochs.forKeyValue { (partition, leaderIsrAndControllerEpoch) =>
stateLogger.info(s"Partition $partition state changed to $leaderIsrAndControllerEpoch after removing replica $replicaId from the ISR as part of transition to $OfflineReplica")
if (!controllerContext.isTopicQueuedUpForDeletion(partition.topic)) {
val recipients = controllerContext.partitionReplicaAssignment(partition).filterNot(_ == replicaId)
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(recipients,
partition,
leaderIsrAndControllerEpoch,
controllerContext.partitionFullReplicaAssignment(partition), isNew = false)
}
val replica = PartitionAndReplica(partition, replicaId)
val currentState = controllerContext.replicaState(replica)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, partition, currentState, OfflineReplica)
controllerContext.putReplicaState(replica, OfflineReplica)
}
replicasWithoutLeadershipInfo.foreach { replica =>
val currentState = controllerContext.replicaState(replica)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, OfflineReplica)
controllerBrokerRequestBatch.addUpdateMetadataRequestForBrokers(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(replica.topicPartition))
controllerContext.putReplicaState(replica, OfflineReplica)
}
case ReplicaDeletionStarted =>
validReplicas.foreach { replica =>
val currentState = controllerContext.replicaState(replica)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionStarted)
controllerContext.putReplicaState(replica, ReplicaDeletionStarted)
controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition, deletePartition = true)
}
case ReplicaDeletionIneligible =>
validReplicas.foreach { replica =>
val currentState = controllerContext.replicaState(replica)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionIneligible)
controllerContext.putReplicaState(replica, ReplicaDeletionIneligible)
}
case ReplicaDeletionSuccessful =>
validReplicas.foreach { replica =>
val currentState = controllerContext.replicaState(replica)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionSuccessful)
controllerContext.putReplicaState(replica, ReplicaDeletionSuccessful)
}
case NonExistentReplica =>
validReplicas.foreach { replica =>
val currentState = controllerContext.replicaState(replica)
val newAssignedReplicas = controllerContext
.partitionFullReplicaAssignment(replica.topicPartition)
.removeReplica(replica.replica)
controllerContext.updatePartitionFullReplicaAssignment(replica.topicPartition, newAssignedReplicas)
if (traceEnabled)
logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, NonExistentReplica)
controllerContext.removeReplicaState(replica)
}
}
}
/**
* Repeatedly attempt to remove a replica from the isr of multiple partitions until there are no more remaining partitions
* to retry.
* @param replicaId The replica being removed from isr of multiple partitions
* @param partitions The partitions from which we're trying to remove the replica from isr
* @return The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr.
*/
private def removeReplicasFromIsr(
replicaId: Int,
partitions: Seq[TopicPartition]
): Map[TopicPartition, LeaderIsrAndControllerEpoch] = {
var results = Map.empty[TopicPartition, LeaderIsrAndControllerEpoch]
var remaining = partitions
while (remaining.nonEmpty) {
val (finishedRemoval, removalsToRetry) = doRemoveReplicasFromIsr(replicaId, remaining)
remaining = removalsToRetry
finishedRemoval.foreach {
case (partition, Left(e)) =>
val replica = PartitionAndReplica(partition, replicaId)
val currentState = controllerContext.replicaState(replica)
logFailedStateChange(replica, currentState, OfflineReplica, e)
case (partition, Right(leaderIsrAndEpoch)) =>
results += partition -> leaderIsrAndEpoch
}
}
results
}
/**
* Try to remove a replica from the isr of multiple partitions.
* Removing a replica from isr updates partition state in zookeeper.
*
* @param replicaId The replica being removed from isr of multiple partitions
* @param partitions The partitions from which we're trying to remove the replica from isr
* @return A tuple of two elements:
* 1. The updated Right[LeaderIsrAndControllerEpochs] of all partitions for which we successfully
* removed the replica from isr. Or Left[Exception] corresponding to failed removals that should
* not be retried
* 2. The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts can occur if
* the partition leader updated partition state while the controller attempted to update partition state.
*/
private def doRemoveReplicasFromIsr(
replicaId: Int,
partitions: Seq[TopicPartition]
): (Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]], Seq[TopicPartition]) = {
val (leaderAndIsrs, partitionsWithNoLeaderAndIsrInZk) = getTopicPartitionStatesFromZk(partitions)
val (leaderAndIsrsWithReplica, leaderAndIsrsWithoutReplica) = leaderAndIsrs.partition { case (_, result) =>
result.map { leaderAndIsr =>
leaderAndIsr.isr.contains(replicaId)
}.getOrElse(false)
}
val adjustedLeaderAndIsrs: Map[TopicPartition, LeaderAndIsr] = leaderAndIsrsWithReplica.flatMap {
case (partition, result) =>
result.toOption.map { leaderAndIsr =>
val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NoLeader else leaderAndIsr.leader
val adjustedIsr = if (leaderAndIsr.isr.size == 1) leaderAndIsr.isr else leaderAndIsr.isr.filter(_ != replicaId)
partition -> leaderAndIsr.newLeaderAndIsr(newLeader, adjustedIsr)
}
}
val UpdateLeaderAndIsrResult(finishedPartitions, updatesToRetry) = zkClient.updateLeaderAndIsr(
adjustedLeaderAndIsrs, controllerContext.epoch, controllerContext.epochZkVersion)
val exceptionsForPartitionsWithNoLeaderAndIsrInZk: Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]] =
partitionsWithNoLeaderAndIsrInZk.iterator.flatMap { partition =>
if (!controllerContext.isTopicQueuedUpForDeletion(partition.topic)) {
val exception = new StateChangeFailedException(
s"Failed to change state of replica $replicaId for partition $partition since the leader and isr " +
"path in zookeeper is empty"
)
Option(partition -> Left(exception))
} else None
}.toMap
val leaderIsrAndControllerEpochs: Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]] =
(leaderAndIsrsWithoutReplica ++ finishedPartitions).map { case (partition, result) =>
(partition, result.map { leaderAndIsr =>
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch)
controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch)
leaderIsrAndControllerEpoch
})
}
if (isDebugEnabled) {
updatesToRetry.foreach { partition =>
debug(s"Controller failed to remove replica $replicaId from ISR of partition $partition. " +
s"Attempted to write state ${adjustedLeaderAndIsrs(partition)}, but failed with bad ZK version. This will be retried.")
}
}
(leaderIsrAndControllerEpochs ++ exceptionsForPartitionsWithNoLeaderAndIsrInZk, updatesToRetry)
}
/**
* Gets the partition state from zookeeper
* @param partitions the partitions whose state we want from zookeeper
* @return A tuple of two values:
* 1. The Right(LeaderAndIsrs) of partitions whose state we successfully read from zookeeper.
* The Left(Exception) to failed zookeeper lookups or states whose controller epoch exceeds our current epoch
* 2. The partitions that had no leader and isr state in zookeeper. This happens if the controller
* didn't finish partition initialization.
*/
private def getTopicPartitionStatesFromZk(
partitions: Seq[TopicPartition]
): (Map[TopicPartition, Either[Exception, LeaderAndIsr]], Seq[TopicPartition]) = {
val getDataResponses = try {
zkClient.getTopicPartitionStatesRaw(partitions)
} catch {
case e: Exception =>
return (partitions.iterator.map(_ -> Left(e)).toMap, Seq.empty)
}
val partitionsWithNoLeaderAndIsrInZk = mutable.Buffer.empty[TopicPartition]
val result = mutable.Map.empty[TopicPartition, Either[Exception, LeaderAndIsr]]
getDataResponses.foreach[Unit] { getDataResponse =>
val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition]
if (getDataResponse.resultCode == Code.OK) {
TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat) match {
case None =>
partitionsWithNoLeaderAndIsrInZk += partition
case Some(leaderIsrAndControllerEpoch) =>
if (leaderIsrAndControllerEpoch.controllerEpoch > controllerContext.epoch) {
val exception = new StateChangeFailedException(
"Leader and isr path written by another controller. This probably " +
s"means the current controller with epoch ${controllerContext.epoch} went through a soft failure and " +
s"another controller was elected with epoch ${leaderIsrAndControllerEpoch.controllerEpoch}. Aborting " +
"state change by this controller"
)
result += (partition -> Left(exception))
} else {
result += (partition -> Right(leaderIsrAndControllerEpoch.leaderAndIsr))
}
}
} else if (getDataResponse.resultCode == Code.NONODE) {
partitionsWithNoLeaderAndIsrInZk += partition
} else {
result += (partition -> Left(getDataResponse.resultException.get))
}
}
(result.toMap, partitionsWithNoLeaderAndIsrInZk)
}
private def logSuccessfulTransition(logger: StateChangeLogger, replicaId: Int, partition: TopicPartition,
currState: ReplicaState, targetState: ReplicaState): Unit = {
logger.trace(s"Changed state of replica $replicaId for partition $partition from $currState to $targetState")
}
private def logInvalidTransition(replica: PartitionAndReplica, targetState: ReplicaState): Unit = {
val currState = controllerContext.replicaState(replica)
val e = new IllegalStateException(s"Replica $replica should be in the ${targetState.validPreviousStates.mkString(",")} " +
s"states before moving to $targetState state. Instead it is in $currState state")
logFailedStateChange(replica, currState, targetState, e)
}
private def logFailedStateChange(replica: PartitionAndReplica, currState: ReplicaState, targetState: ReplicaState, t: Throwable): Unit = {
stateChangeLogger.withControllerEpoch(controllerContext.epoch)
.error(s"Controller $controllerId epoch ${controllerContext.epoch} initiated state change of replica ${replica.replica} " +
s"for partition ${replica.topicPartition} from $currState to $targetState failed", t)
}
}
sealed trait ReplicaState {
def state: Byte
def validPreviousStates: Set[ReplicaState]
}
case object NewReplica extends ReplicaState {
val state: Byte = 1
val validPreviousStates: Set[ReplicaState] = Set(NonExistentReplica)
}
case object OnlineReplica extends ReplicaState {
val state: Byte = 2
val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}
case object OfflineReplica extends ReplicaState {
val state: Byte = 3
val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}
case object ReplicaDeletionStarted extends ReplicaState {
val state: Byte = 4
val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica)
}
case object ReplicaDeletionSuccessful extends ReplicaState {
val state: Byte = 5
val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted)
}
case object ReplicaDeletionIneligible extends ReplicaState {
val state: Byte = 6
val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica, ReplicaDeletionStarted)
}
case object NonExistentReplica extends ReplicaState {
val state: Byte = 7
val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionSuccessful)
}
| guozhangwang/kafka | core/src/main/scala/kafka/controller/ReplicaStateMachine.scala | Scala | apache-2.0 | 25,522 |
package sp.service
import akka.actor._
import sp.domain._
import sp.domain.Logic._
/**
* Created by kristofer on 2017-02-27.
*
* Monitors services and keeps track if they are removed or not behaving.
*
*/
class ServiceHandler extends Actor with ServiceHandlerLogic with MessageBussSupport {
case object Tick
subscribe(APIServiceHandler.topicRequest)
subscribe(APISP.serviceStatusResponse)
override def receive = {
case x: String if sender() != self =>
val mess = SPMessage.fromJson(x)
for {
m <- mess
h <- m.getHeaderAs[SPHeader] if h.to == APIServiceHandler.service
b <- m.getBodyAs[APIServiceHandler.Request]
} yield { b match {
case APIServiceHandler.GetServices =>
val res = services.map(_._2._1).toList
val updH = h.copy(from = APIServiceHandler.service, to = h.from)
sendAnswer(updH, APIServiceHandler.Services(res))
case APIServiceHandler.RemoveService(sR) =>
removeService(sR)
sendAnswer(SPHeader(from = APIServiceHandler.service), APIServiceHandler.ServiceRemoved(sR))
}
}
for {
m <- mess
h <- m.getHeaderAs[SPHeader]
b <- m.getBodyAs[APISP] if b.isInstanceOf[APISP.StatusResponse]
} yield {
val sR = b.asInstanceOf[APISP.StatusResponse]
val res = addResponse(sR, sender())
context.watch(sender())
if (res) {
val h = SPHeader(from = APIServiceHandler.service)
sendAnswer(h, APIServiceHandler.ServiceAdded(sR))
}
}
// Watching all services that are actors. Other services should send a
// APIServiceHandler.RemoveService
case Terminated(ref) =>
println("Removing service")
val res = deathWatch(ref)
res.foreach{kv =>
sendAnswer(SPHeader(from = APIServiceHandler.service), APIServiceHandler.ServiceRemoved(kv._2))
}
case Tick =>
aTick()
val h = SPHeader("ServiceHandler")
val b = APISP.StatusRequest
sendReq(h, b)
}
def sendReq(h: SPHeader, b: APISP) = publish(APISP.serviceStatusRequest, SPMessage.makeJson(h, b))
def sendAnswer(h: SPHeader, b: APIServiceHandler.Response) = publish(APIServiceHandler.topicResponse, SPMessage.makeJson(h, b))
import scala.concurrent.duration._
import context.dispatcher
val ticker = context.system.scheduler.schedule(5 seconds, 5 seconds, self, Tick)
}
object ServiceHandler {
def props = Props(classOf[ServiceHandler])
}
trait ServiceHandlerLogic {
var services: Map[String, (APISP.StatusResponse, ActorRef)] = Map()
var waitingResponse: Map[String, APISP.StatusResponse] = Map()
def aTick() = {
val noAnswer = waitingResponse.map{kv =>
services -= kv._1
kv
}
waitingResponse = services.map(kv => kv._1 -> kv._2._1)
noAnswer
}
def addResponse(resp: APISP.StatusResponse, sender: ActorRef) = {
val re = services.filter(kv => kv._2._2 == sender).map(kv => kv._1 -> kv._2._1)
val n = if (re.isEmpty) createName(resp) else re.head._1
val res = !services.contains(n)
waitingResponse -= n
services += n -> (resp, sender)
res
}
def deathWatch(actor: ActorRef) = {
val re = services.filter(kv => kv._2._2 == actor).map(kv => kv._1 -> kv._2._1)
services = services.filterNot(kv => re.contains(kv._1))
waitingResponse = waitingResponse.filterNot(kv => re.contains(kv._1))
re
}
def removeService(sR: APISP.StatusResponse): Unit = {
val n = createName(sR)
services = services - n
waitingResponse = waitingResponse - n
}
def createName(x: APISP.StatusResponse ) = {
val n = if (x.instanceName.isEmpty) "" else "-" +x.instanceName
val id = if (x.instanceID.isEmpty) n else "-" +x.instanceID.get.toString
x.service + id
}
}
| kristoferB/SP | spcore/src/main/scala/sp/service/ServiceHandler.scala | Scala | mit | 3,839 |
package controllers
import javax.inject._
import akka.actor.ActorSystem
import com.gilesc.scalacasts.bootstrap.{AkkaTimeoutSettings, ScalacastActors}
import com.gilesc.scalacasts.service.Receptionist
import com.gilesc.scalacasts.service.ScreencastContext
import com.typesafe.config.ConfigFactory
import models.ScreencastResource
import play.api.data._
import play.api.data.Forms._
import play.api.i18n.Messages.Implicits._
import play.api.Logger
import play.api.mvc._
import play.api.Play.current
@Singleton
class Home @Inject() (val system: ActorSystem) extends Controller
with AkkaTimeoutSettings with ScalacastActors {
import akka.pattern.ask
import system.dispatcher
val screencastResource = Form(
mapping(
"title" -> text,
"description" -> text,
"tags" -> text)(ScreencastResource.apply)(ScreencastResource.unapply))
def index = Action {
Ok(views.html.home.index())
}
def about = Action {
Ok(views.html.home.about())
}
def contact = Action {
Ok(views.html.home.contact(screencastResource))
}
def upload = Action(parse.multipartFormData) { implicit request =>
import java.io.File
val config = ConfigFactory.load()
screencastResource.bindFromRequest.fold(
formWithErrors => {
BadRequest(views.html.home.contact(formWithErrors))
},
screencast => request.body.file("screencast").map { video =>
val filename = video.filename
val contentType = video.contentType.getOrElse("N/A")
val path = s"${config.getString("scalacasts.videos.folder")}/tmp-$filename"
val cxt = ScreencastContext(
path,
contentType,
screencast.title,
screencast.description,
screencast.tags)
video.ref.moveTo(new File(path), replace = true)
val result = scalacastReceptionist ? Receptionist.AddNewScreencast(cxt)
result.mapTo[Receptionist.Successful].map { message =>
Logger.info("LOGGER: SUCCESSFUL " + message)
}
Redirect(routes.Home.index()).flashing("success" -> "Screencast Added")
}.getOrElse {
Redirect(routes.Home.index()).flashing("error" -> "Missing file")
})
}
}
| CraigGiles/scalacasts | presentation/app/controllers/Home.scala | Scala | mit | 2,211 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing.{Trace => FTrace}
import com.twitter.logging.Logger
import com.twitter.ostrich.admin.Service
import com.twitter.util.{Time, Future}
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.thriftscala
import com.twitter.zipkin.query.adjusters.Adjuster
import com.twitter.zipkin.storage._
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicBoolean
import org.apache.thrift.TException
import scala.collection.Set
/**
* Able to respond to users queries regarding the traces. Usually does so
* by lookup the information in the index and then fetch the required trace data
* from the storage.
*/
class QueryService(
storage: Storage,
index: Index,
aggregates: Aggregates,
adjusterMap: Map[thriftscala.Adjust, Adjuster],
statsReceiver: StatsReceiver = NullStatsReceiver
) extends thriftscala.ZipkinQuery.FutureIface with Service {
private val log = Logger.get
private val running = new AtomicBoolean(false)
private val stats = statsReceiver.scope("QueryService")
private val methodStats = stats.scope("methods")
private val errorStats = stats.scope("errors")
private val timingStats = stats.scope("timing")
// how to sort the trace summaries
private val OrderByDurationDesc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.duration > b.duration
}
private val OrderByDurationAsc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.duration < b.duration
}
private val OrderByTimestampDesc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.startTimestamp > b.startTimestamp
}
private val OrderByTimestampAsc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.startTimestamp < b.startTimestamp
}
// this is how many trace durations we fetch in one request
// TODO config
var traceDurationFetchBatchSize = 500
def start() {
running.set(true)
}
def shutdown() {
running.set(false)
storage.close
index.close
aggregates.close
}
private def constructQueryResponse(indexedIds: Seq[IndexedTraceId], limit: Int, order: thriftscala.Order, defaultEndTs: Long = -1): Future[thriftscala.QueryResponse] = {
val ids = indexedIds.map { _.traceId }
val ts = indexedIds.map { _.timestamp }
sortTraceIds(Future(ids), limit, order).map { sortedIds =>
val (min, max) = sortedIds match {
case Nil => (-1L, defaultEndTs)
case _ => (ts.min, ts.max)
}
thriftscala.QueryResponse(sortedIds, min, max)
}
}
def getTraceIds(queryRequest: thriftscala.QueryRequest): Future[thriftscala.QueryResponse] = {
val method = "getTraceIds"
log.debug("%s: %s".format(method, queryRequest.toString))
call(method) {
val serviceName = queryRequest.`serviceName`
val spanName = queryRequest.`spanName`
val endTs = queryRequest.`endTs`
val limit = queryRequest.`limit`
val order = queryRequest.`order`
val sliceQueries = Seq(
spanName.map { name =>
Seq(SpanSliceQuery(serviceName, name, endTs, 1))
},
queryRequest.`annotations`.map {
_.map { a =>
AnnotationSliceQuery(serviceName, a, None, endTs, 1)
}
},
queryRequest.`binaryAnnotations`.map {
_.map { b =>
AnnotationSliceQuery(serviceName, b.`key`, Some(b.`value`), endTs, 1)
}
}
).collect {
case Some(q: Seq[SliceQuery]) => q
}.flatten
log.debug(sliceQueries.toString())
sliceQueries match {
case Nil => {
/* No queries: get service level traces */
index.getTraceIdsByName(serviceName, None, endTs, limit).map {
constructQueryResponse(_, limit, order)
}.flatten
}
case head :: Nil => {
/* One query: just run it */
(head match {
case s: SpanSliceQuery => s.copy(limit = limit)
case a: AnnotationSliceQuery => a.copy(limit = limit)
}).execute(index).map {
constructQueryResponse(_, limit, order)
}.flatten
}
case queries => {
/* Multiple: Fetch a single column from each to reconcile non-overlapping portions
then fetch the entire slice */
Future.collect {
queries.map {
_.execute(index)
}
}.map {
_.flatten.map {
_.timestamp
}.min
}.map { alignedTimestamp =>
/* Pad the aligned timestamp by a minute */
val ts = padTimestamp(alignedTimestamp)
Future.collect {
queries.map {
case s: SpanSliceQuery => s.copy(endTs = ts, limit = limit).execute(index)
case a: AnnotationSliceQuery => a.copy(endTs = ts, limit = limit).execute(index)
}
}.map { ids =>
traceIdsIntersect(ids) match {
case Nil => {
val endTimestamp = ids.map {
_.map { _.timestamp }.min
}.max
constructQueryResponse(Nil, limit, order, endTimestamp)
}
case seq => {
constructQueryResponse(seq, limit, order)
}
}
}
}.flatten.flatten
}
}
}
}
private[query] def padTimestamp(timestamp: Long): Long = timestamp + Constants.TraceTimestampPadding.inMicroseconds
private[query] def traceIdsIntersect(idSeqs: Seq[Seq[IndexedTraceId]]): Seq[IndexedTraceId] = {
/* Find the trace IDs present in all the Seqs */
val idMaps = idSeqs.map {
_.groupBy {
_.traceId
}
}
val traceIds = idMaps.map {
_.keys.toSeq
}
val commonTraceIds = traceIds.tail.fold(traceIds(0)) { _.intersect(_) }
/*
* Find the timestamps associated with each trace ID and construct a new IndexedTraceId
* that has the trace ID's maximum timestamp (ending) as the timestamp
*/
commonTraceIds.map { id =>
val maxTime = idMaps.map { m =>
m(id).map { _.timestamp }
}.flatten.max
IndexedTraceId(id, maxTime)
}
}
def getTraceIdsBySpanName(serviceName: String, spanName: String, endTs: Long,
limit: Int, order: thriftscala.Order): Future[Seq[String]] = {
val method = "getTraceIdsBySpanName"
log.debug("%s. serviceName: %s spanName: %s endTs: %s limit: %s order: %s".format(method, serviceName, spanName,
endTs, limit, order))
call(method) {
if (serviceName == null || "".equals(serviceName)) {
errorStats.counter("%s_no_service".format(method)).incr()
return Future.exception(thriftscala.QueryException("No service name provided"))
}
// do we have a valid span name to query indexes by?
val span = convertToOption(spanName)
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("spanName", spanName)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByName(serviceName, span, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def getTraceIdsByServiceName(serviceName: String, endTs: Long,
limit: Int, order: thriftscala.Order): Future[Seq[String]] = {
val method = "getTraceIdsByServiceName"
log.debug("%s. serviceName: %s endTs: %s limit: %s order: %s".format(method, serviceName, endTs, limit, order))
call(method) {
if (serviceName == null || "".equals(serviceName)) {
errorStats.counter("%s_no_service".format(method)).incr()
return Future.exception(thriftscala.QueryException("No service name provided"))
}
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByName(serviceName, None, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def getTraceIdsByAnnotation(serviceName: String, annotation: String, value: ByteBuffer, endTs: Long,
limit: Int, order: thriftscala.Order): Future[Seq[String]] = {
val method = "getTraceIdsByAnnotation"
log.debug("%s. serviceName: %s annotation: %s value: %s endTs: %s limit: %s order: %s".format(method, serviceName,
annotation, value, endTs, limit, order))
call(method) {
if (annotation == null || "".equals(annotation)) {
errorStats.counter("%s_no_annotation".format(method)).incr()
return Future.exception(thriftscala.QueryException("No annotation provided"))
}
// do we have a valid annotation value to query indexes by?
val valueOption = convertToOption(value)
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("annotation", annotation)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByAnnotation(serviceName, annotation, valueOption, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def tracesExist(traceIds: Seq[String]): Future[Set[String]] = {
log.debug("tracesExist. " + traceIds)
call("tracesExist") {
FTrace.recordBinary("numIds", traceIds.length)
storage.tracesExist(traceIds)
}
}
def getTracesByIds(traceIds: Seq[String], adjust: Seq[thriftscala.Adjust]): Future[Seq[thriftscala.Trace]] = {
log.debug("getTracesByIds. " + traceIds + " adjust " + adjust)
call("getTracesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.map { spans =>
val trace = Trace(spans)
adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t)).toThrift
}
}
}
}
def getTraceTimelinesByIds(traceIds: Seq[String],
adjust: Seq[thriftscala.Adjust]): Future[Seq[thriftscala.TraceTimeline]] = {
log.debug("getTraceTimelinesByIds. " + traceIds + " adjust " + adjust)
call("getTraceTimelinesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.flatMap { spans =>
val trace = Trace(spans)
TraceTimeline(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).map(_.toThrift)
}
}
}
}
def getTraceSummariesByIds(traceIds: Seq[String],
adjust: Seq[thriftscala.Adjust]): Future[Seq[thriftscala.TraceSummary]] = {
log.debug("getTraceSummariesByIds. traceIds: " + traceIds + " adjust " + adjust)
call("getTraceSummariesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds.toList).map { traces =>
traces.flatMap { spans =>
val trace = Trace(spans)
TraceSummary(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).map(_.toThrift)
}
}
}
}
def getTraceCombosByIds(traceIds: Seq[String], adjust: Seq[thriftscala.Adjust]): Future[Seq[thriftscala.TraceCombo]] = {
log.debug("getTraceComboByIds. traceIds: " + traceIds + " adjust " + adjust)
call("getTraceComboByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.map { spans =>
val trace = Trace(spans)
TraceCombo(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).toThrift
}
}
}
}
def getDataTimeToLive: Future[Int] = {
log.debug("getDataTimeToLive")
call("getDataTimeToLive") {
Future(storage.getDataTimeToLive)
}
}
def getServiceNames: Future[Set[String]] = {
log.debug("getServiceNames")
call("getServiceNames") {
index.getServiceNames
}
}
def getSpanNames(service: String): Future[Set[String]] = {
log.debug("getSpanNames")
call("getSpanNames") {
index.getSpanNames(service)
}
}
def setTraceTimeToLive(traceId: String, ttlSeconds: Int): Future[Unit] = {
log.debug("setTimeToLive: " + traceId + " " + ttlSeconds)
call("setTraceTimeToLive") {
storage.setTimeToLive(traceId, ttlSeconds.seconds)
}
}
def getTraceTimeToLive(traceId: String): Future[Int] = {
log.debug("getTimeToLive: " + traceId)
call("getTraceTimeToLive") {
storage.getTimeToLive(traceId).map(_.inSeconds)
}
}
/** Aggregates related */
def getDependencies(startTime: Option[Long], endTime: Option[Long]) : Future[thriftscala.Dependencies] = {
log.debug("getDependencies: " + startTime + " - " + endTime)
call("getDependencies") {
val start = startTime.map { t => Time.fromMicroseconds(t) }
val end = endTime.map { t => Time.fromMicroseconds(t) }
aggregates.getDependencies(start, end) map {_.toThrift}
}
}
def getTopAnnotations(serviceName: String): Future[Seq[String]] = {
log.debug("getTopAnnotations: " + serviceName)
call("getTopAnnotations") {
aggregates.getTopAnnotations(serviceName)
}
}
def getTopKeyValueAnnotations(serviceName: String): Future[Seq[String]] = {
log.debug("getTopKeyValueAnnotations: " + serviceName)
call("getTopKeyValueAnnotations") {
aggregates.getTopKeyValueAnnotations(serviceName)
}
}
def getSpanDurations(
timeStamp: Long,
serverServiceName: String,
rcpName: String
): Future[Map[String, Seq[String]]] =
Future.exception(new Exception("Not Implemented"))
def getServiceNamesToTraceIds(
timeStamp: Long,
serviceName: String,
rcpName: String
): Future[Map[String, Seq[String]]] =
Future.exception(new Exception("Not Implemented"))
private def checkIfRunning() = {
if (!running.get) {
log.warning("Server not running, throwing exception")
throw new TException("Server not running")
}
}
private[this] def call[T](name: String)(f: => Future[T]): Future[T] = {
checkIfRunning()
methodStats.counter(name).incr()
timingStats.timeFuture(name) {
f rescue {
case e: Exception => {
log.error(e, "%s failed".format(name))
errorStats.counter(name).incr()
Future.exception(thriftscala.QueryException(e.toString))
}
}
}
}
/**
* Convert incoming Thrift order by enum into sort function.
*/
private def getOrderBy(order: thriftscala.Order) = {
order match {
case thriftscala.Order.None => OrderByDurationDesc
case thriftscala.Order.DurationDesc => OrderByDurationDesc
case thriftscala.Order.DurationAsc => OrderByDurationAsc
case thriftscala.Order.TimestampDesc => OrderByTimestampDesc
case thriftscala.Order.TimestampAsc => OrderByTimestampAsc
}
}
private def getAdjusters(adjusters: Seq[thriftscala.Adjust]): Seq[Adjuster] = {
adjusters.flatMap { adjusterMap.get(_) }
}
/**
* Do we have a valid object to query indexes by?
*/
private def convertToOption[O](param: O): Option[O] = {
param match {
case null => None
case "" => None
case s => Some(s)
}
}
/**
* Given a sequence of traceIds get their durations
*/
private def getTraceIdDurations(
traceIds: Future[Seq[String]]
): Future[Seq[TraceIdDuration]] = {
traceIds.map { t =>
Future.collect {
t.grouped(traceDurationFetchBatchSize)
.toSeq
.map {index.getTracesDuration(_)}
}
}.flatten.map {_.flatten}
}
private def sortTraceIds(
traceIds: Future[Seq[String]],
limit: Int,
order: thriftscala.Order
): Future[Seq[String]] = {
// No sorting wanted
if (order == thriftscala.Order.None) {
traceIds
} else {
val durations = getTraceIdDurations(traceIds)
durations map { d =>
d.sortWith(getOrderBy(order)).slice(0, limit).map(_.traceId)
}
}
}
}
| cogitate/twitter-zipkin-uuid | zipkin-query-core/src/main/scala/com/twitter/zipkin/query/QueryService.scala | Scala | apache-2.0 | 17,134 |
package com.ubirch.auth.server.route
import com.typesafe.scalalogging.slf4j.StrictLogging
import com.ubirch.auth.config.Config
import com.ubirch.auth.core.actor.DeepCheckActor
import com.ubirch.auth.core.actor.util.ActorNames
import com.ubirch.auth.util.server.RouteConstants
import com.ubirch.util.deepCheck.model.{DeepCheckRequest, DeepCheckResponse}
import com.ubirch.util.http.response.ResponseUtil
import com.ubirch.util.rest.akka.directives.CORSDirective
import akka.actor.ActorSystem
import akka.http.scaladsl.HttpExt
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.Materializer
import akka.util.Timeout
import de.heikoseeberger.akkahttpjson4s.Json4sSupport._
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}
/**
* author: cvandrei
* since: 2017-06-08
*/
class DeepCheckRoute(implicit system: ActorSystem, httpClient: HttpExt, materializer: Materializer) extends CORSDirective
with ResponseUtil
with StrictLogging {
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
implicit val timeout: Timeout = Timeout(Config.actorTimeout seconds)
private val deepCheckActor = system.actorOf(DeepCheckActor.props(), ActorNames.DEEP_CHECK)
val route: Route = {
path(RouteConstants.deepCheck) {
respondWithCORS {
get {
onComplete(deepCheckActor ? DeepCheckRequest()) {
case Failure(t) =>
logger.error("failed to run deepCheck (check DeepCheckRoute for bugs!!!)", t)
complete(serverErrorResponse(errorType = "ServerError", errorMessage = "sorry, something went wrong on our end"))
case Success(resp) =>
resp match {
case res: DeepCheckResponse if res.status => complete(res)
case res: DeepCheckResponse if !res.status => complete(response(responseObject = res, status = StatusCodes.ServiceUnavailable))
case _ => complete(serverErrorResponse(errorType = "ServerError", errorMessage = "failed to run deep check"))
}
}
}
}
}
}
}
| ubirch/ubirch-auth-service | server/src/main/scala/com/ubirch/auth/server/route/DeepCheckRoute.scala | Scala | apache-2.0 | 2,249 |
package org.eobjects.analyzer.beans
import org.eobjects.analyzer.beans.api.Provided
import org.eobjects.analyzer.beans.api.RendererBean
import org.eobjects.analyzer.beans.api.Renderer
import org.eobjects.analyzer.beans.api.RendererPrecedence
import org.eobjects.analyzer.result.html.HtmlFragment
import org.eobjects.analyzer.result.renderer.RendererFactory
import javax.inject.Inject
import org.eobjects.analyzer.result.renderer.HtmlRenderingFormat
@RendererBean(classOf[HtmlRenderingFormat])
class BooleanAnalyzerResultHtmlRenderer(rf: RendererFactory) extends Renderer[BooleanAnalyzerResult, HtmlFragment] {
@Inject
@Provided
var rendererFactory: RendererFactory = rf;
def this() = this(null)
override def getPrecedence(result: BooleanAnalyzerResult) = RendererPrecedence.HIGH
override def render(result: BooleanAnalyzerResult) = new BooleanAnalyzerHtmlFragment(rendererFactory, result);
} | datacleaner/AnalyzerBeans | components/basic-analyzers/src/main/scala/org/eobjects/analyzer/beans/BooleanAnalyzerResultHtmlRenderer.scala | Scala | lgpl-3.0 | 908 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.dynalloc
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.{doAnswer, mock, when}
import org.apache.spark._
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config._
import org.apache.spark.scheduler._
import org.apache.spark.storage._
import org.apache.spark.util.ManualClock
class ExecutorMonitorSuite extends SparkFunSuite {
private val idleTimeoutMs = TimeUnit.SECONDS.toMillis(60L)
private val storageTimeoutMs = TimeUnit.SECONDS.toMillis(120L)
private val shuffleTimeoutMs = TimeUnit.SECONDS.toMillis(240L)
private val conf = new SparkConf()
.set(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key, "60s")
.set(DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT.key, "120s")
.set(DYN_ALLOCATION_SHUFFLE_TIMEOUT.key, "240s")
.set(SHUFFLE_SERVICE_ENABLED, true)
private var monitor: ExecutorMonitor = _
private var client: ExecutorAllocationClient = _
private var clock: ManualClock = _
// List of known executors. Allows easily mocking which executors are alive without
// having to use mockito APIs directly in each test.
private val knownExecs = mutable.HashSet[String]()
override def beforeEach(): Unit = {
super.beforeEach()
knownExecs.clear()
clock = new ManualClock()
client = mock(classOf[ExecutorAllocationClient])
when(client.isExecutorActive(any())).thenAnswer { invocation =>
knownExecs.contains(invocation.getArguments()(0).asInstanceOf[String])
}
monitor = new ExecutorMonitor(conf, client, null, clock)
}
test("basic executor timeout") {
knownExecs += "1"
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
assert(monitor.executorCount === 1)
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
}
test("SPARK-4951, SPARK-26927: handle out of order task start events") {
knownExecs ++= Set("1", "2")
monitor.onTaskStart(SparkListenerTaskStart(1, 1, taskInfo("1", 1)))
assert(monitor.executorCount === 1)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
assert(monitor.executorCount === 1)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "2", null))
assert(monitor.executorCount === 2)
monitor.onExecutorRemoved(SparkListenerExecutorRemoved(clock.getTimeMillis(), "2", null))
assert(monitor.executorCount === 1)
knownExecs -= "2"
monitor.onTaskStart(SparkListenerTaskStart(1, 1, taskInfo("2", 2)))
assert(monitor.executorCount === 1)
}
test("track tasks running on executor") {
knownExecs += "1"
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onTaskStart(SparkListenerTaskStart(1, 1, taskInfo("1", 1)))
assert(!monitor.isExecutorIdle("1"))
// Start/end a few tasks and make sure the executor does not go idle.
(2 to 10).foreach { i =>
monitor.onTaskStart(SparkListenerTaskStart(i, 1, taskInfo("1", 1)))
assert(!monitor.isExecutorIdle("1"))
monitor.onTaskEnd(SparkListenerTaskEnd(i, 1, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(!monitor.isExecutorIdle("1"))
}
monitor.onTaskEnd(SparkListenerTaskEnd(1, 1, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(clock.getTimeMillis()).isEmpty)
assert(monitor.timedOutExecutors(clock.getTimeMillis() + idleTimeoutMs + 1) === Seq("1"))
}
test("use appropriate time out depending on whether blocks are stored") {
knownExecs += "1"
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 0, "1"))
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 0, "1", level = StorageLevel.NONE))
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
monitor.onTaskStart(SparkListenerTaskStart(1, 1, taskInfo("1", 1)))
assert(!monitor.isExecutorIdle("1"))
monitor.onBlockUpdated(rddUpdate(1, 0, "1"))
assert(!monitor.isExecutorIdle("1"))
monitor.onBlockUpdated(rddUpdate(1, 0, "1", level = StorageLevel.NONE))
assert(!monitor.isExecutorIdle("1"))
}
test("keeps track of stored blocks for each rdd and split") {
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onBlockUpdated(rddUpdate(1, 0, "1"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 1, "1"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(2, 0, "1"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 1, "1", level = StorageLevel.NONE))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onUnpersistRDD(SparkListenerUnpersistRDD(1))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
// Make sure that if we get an unpersist event much later, which moves an executor from having
// cached blocks to no longer having cached blocks, it will time out based on the time it
// originally went idle.
clock.setTime(idleDeadline)
monitor.onUnpersistRDD(SparkListenerUnpersistRDD(2))
assert(monitor.timedOutExecutors(clock.getTimeMillis()) === Seq("1"))
}
test("handle timeouts correctly with multiple executors") {
knownExecs ++= Set("1", "2", "3")
// start exec 1 at 0s (should idle time out at 60s)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
assert(monitor.isExecutorIdle("1"))
// start exec 2 at 30s, store a block (should idle time out at 150s)
clock.setTime(TimeUnit.SECONDS.toMillis(30))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "2", null))
monitor.onBlockUpdated(rddUpdate(1, 0, "2"))
assert(monitor.isExecutorIdle("2"))
assert(!monitor.timedOutExecutors(idleDeadline).contains("2"))
// start exec 3 at 60s (should idle timeout at 120s, exec 1 should time out)
clock.setTime(TimeUnit.SECONDS.toMillis(60))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "3", null))
assert(monitor.timedOutExecutors(clock.getTimeMillis()) === Seq("1"))
// store block on exec 3 (should now idle time out at 180s)
monitor.onBlockUpdated(rddUpdate(1, 0, "3"))
assert(monitor.isExecutorIdle("3"))
assert(!monitor.timedOutExecutors(idleDeadline).contains("3"))
// advance to 140s, remove block from exec 3 (time out immediately)
clock.setTime(TimeUnit.SECONDS.toMillis(140))
monitor.onBlockUpdated(rddUpdate(1, 0, "3", level = StorageLevel.NONE))
assert(monitor.timedOutExecutors(clock.getTimeMillis()).toSet === Set("1", "3"))
// advance to 150s, now exec 2 should time out
clock.setTime(TimeUnit.SECONDS.toMillis(150))
assert(monitor.timedOutExecutors(clock.getTimeMillis()).toSet === Set("1", "2", "3"))
}
test("SPARK-27677: don't track blocks stored on disk when using shuffle service") {
// First make sure that blocks on disk are counted when no shuffle service is available.
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onBlockUpdated(rddUpdate(1, 0, "1", level = StorageLevel.DISK_ONLY))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
conf.set(SHUFFLE_SERVICE_ENABLED, true).set(SHUFFLE_SERVICE_FETCH_RDD_ENABLED, true)
monitor = new ExecutorMonitor(conf, client, null, clock)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onBlockUpdated(rddUpdate(1, 0, "1", level = StorageLevel.MEMORY_ONLY))
monitor.onBlockUpdated(rddUpdate(1, 1, "1", level = StorageLevel.MEMORY_ONLY))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 0, "1", level = StorageLevel.DISK_ONLY))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline) === Seq("1"))
monitor.onBlockUpdated(rddUpdate(1, 1, "1", level = StorageLevel.DISK_ONLY))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
// Tag the block as being both in memory and on disk, which may happen after it was
// evicted and then restored into memory. Since it's still on disk the executor should
// still be eligible for removal.
monitor.onBlockUpdated(rddUpdate(1, 1, "1", level = StorageLevel.MEMORY_AND_DISK))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
}
test("track executors pending for removal") {
knownExecs ++= Set("1", "2", "3")
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "2", null))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "3", null))
clock.setTime(idleDeadline)
assert(monitor.timedOutExecutors().toSet === Set("1", "2", "3"))
assert(monitor.pendingRemovalCount === 0)
// Notify that only a subset of executors was killed, to mimic the case where the scheduler
// refuses to kill an executor that is busy for whatever reason the monitor hasn't detected yet.
monitor.executorsKilled(Seq("1"))
assert(monitor.timedOutExecutors().toSet === Set("2", "3"))
assert(monitor.pendingRemovalCount === 1)
// Check the timed out executors again so that we're sure they're still timed out when no
// events happen. This ensures that the monitor doesn't lose track of them.
assert(monitor.timedOutExecutors().toSet === Set("2", "3"))
monitor.onTaskStart(SparkListenerTaskStart(1, 1, taskInfo("2", 1)))
assert(monitor.timedOutExecutors().toSet === Set("3"))
monitor.executorsKilled(Seq("3"))
assert(monitor.pendingRemovalCount === 2)
monitor.onTaskEnd(SparkListenerTaskEnd(1, 1, "foo", Success, taskInfo("2", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors().isEmpty)
clock.advance(idleDeadline)
assert(monitor.timedOutExecutors().toSet === Set("2"))
}
test("shuffle block tracking") {
val bus = mockListenerBus()
conf.set(DYN_ALLOCATION_SHUFFLE_TRACKING, true).set(SHUFFLE_SERVICE_ENABLED, false)
monitor = new ExecutorMonitor(conf, client, bus, clock)
// 3 jobs: 2 and 3 share a shuffle, 1 has a separate shuffle.
val stage1 = stageInfo(1, shuffleId = 0)
val stage2 = stageInfo(2)
val stage3 = stageInfo(3, shuffleId = 1)
val stage4 = stageInfo(4)
val stage5 = stageInfo(5, shuffleId = 1)
val stage6 = stageInfo(6)
// Start jobs 1 and 2. Finish a task on each, but don't finish the jobs. This should prevent the
// executor from going idle since there are active shuffles.
monitor.onJobStart(SparkListenerJobStart(1, clock.getTimeMillis(), Seq(stage1, stage2)))
monitor.onJobStart(SparkListenerJobStart(2, clock.getTimeMillis(), Seq(stage3, stage4)))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
// First a failed task, to make sure it does not count.
monitor.onTaskStart(SparkListenerTaskStart(1, 0, taskInfo("1", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(1, 0, "foo", TaskResultLost, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
monitor.onTaskStart(SparkListenerTaskStart(1, 0, taskInfo("1", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(1, 0, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
monitor.onTaskStart(SparkListenerTaskStart(3, 0, taskInfo("1", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(3, 0, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
// Finish the jobs, now the executor should be idle, but with the shuffle timeout, since the
// shuffles are not active.
monitor.onJobEnd(SparkListenerJobEnd(1, clock.getTimeMillis(), JobSucceeded))
assert(!monitor.isExecutorIdle("1"))
monitor.onJobEnd(SparkListenerJobEnd(2, clock.getTimeMillis(), JobSucceeded))
assert(monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(storageDeadline).isEmpty)
assert(monitor.timedOutExecutors(shuffleDeadline) === Seq("1"))
// Start job 3. Since it shares a shuffle with job 2, the executor should not be considered
// idle anymore, even if no tasks are run.
monitor.onJobStart(SparkListenerJobStart(3, clock.getTimeMillis(), Seq(stage5, stage6)))
assert(!monitor.isExecutorIdle("1"))
assert(monitor.timedOutExecutors(shuffleDeadline).isEmpty)
monitor.onJobEnd(SparkListenerJobEnd(3, clock.getTimeMillis(), JobSucceeded))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
assert(monitor.timedOutExecutors(shuffleDeadline) === Seq("1"))
// Clean up the shuffles, executor now should now time out at the idle deadline.
monitor.shuffleCleaned(0)
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
monitor.shuffleCleaned(1)
assert(monitor.timedOutExecutors(idleDeadline) === Seq("1"))
}
test("SPARK-28839: Avoids NPE in context cleaner when shuffle service is on") {
val bus = mockListenerBus()
conf.set(DYN_ALLOCATION_SHUFFLE_TRACKING, true).set(SHUFFLE_SERVICE_ENABLED, true)
monitor = new ExecutorMonitor(conf, client, bus, clock) {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
throw new IllegalStateException("No event should be sent.")
}
}
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.shuffleCleaned(0)
}
test("shuffle tracking with multiple executors and concurrent jobs") {
val bus = mockListenerBus()
conf.set(DYN_ALLOCATION_SHUFFLE_TRACKING, true).set(SHUFFLE_SERVICE_ENABLED, false)
monitor = new ExecutorMonitor(conf, client, bus, clock)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "2", null))
// Two separate jobs with separate shuffles. The first job will only run tasks on
// executor 1, the second on executor 2. Ensures that jobs finishing don't affect
// executors that are active in other jobs.
val stage1 = stageInfo(1, shuffleId = 0)
val stage2 = stageInfo(2)
monitor.onJobStart(SparkListenerJobStart(1, clock.getTimeMillis(), Seq(stage1, stage2)))
val stage3 = stageInfo(3, shuffleId = 1)
val stage4 = stageInfo(4)
monitor.onJobStart(SparkListenerJobStart(2, clock.getTimeMillis(), Seq(stage3, stage4)))
monitor.onTaskStart(SparkListenerTaskStart(1, 0, taskInfo("1", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(1, 0, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors(idleDeadline) === Seq("2"))
monitor.onTaskStart(SparkListenerTaskStart(3, 0, taskInfo("2", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(3, 0, "foo", Success, taskInfo("2", 1),
new ExecutorMetrics, null))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
monitor.onJobEnd(SparkListenerJobEnd(1, clock.getTimeMillis(), JobSucceeded))
assert(monitor.isExecutorIdle("1"))
assert(!monitor.isExecutorIdle("2"))
monitor.onJobEnd(SparkListenerJobEnd(2, clock.getTimeMillis(), JobSucceeded))
assert(monitor.isExecutorIdle("2"))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
monitor.shuffleCleaned(0)
monitor.shuffleCleaned(1)
assert(monitor.timedOutExecutors(idleDeadline).toSet === Set("1", "2"))
}
test("SPARK-28455: avoid overflow in timeout calculation") {
conf
.set(DYN_ALLOCATION_SHUFFLE_TIMEOUT, Long.MaxValue)
.set(DYN_ALLOCATION_SHUFFLE_TRACKING, true)
.set(SHUFFLE_SERVICE_ENABLED, false)
monitor = new ExecutorMonitor(conf, client, null, clock)
// Generate events that will make executor 1 be idle, while still holding shuffle data.
// The executor should not be eligible for removal since the timeout is basically "infinite".
val stage = stageInfo(1, shuffleId = 0)
monitor.onJobStart(SparkListenerJobStart(1, clock.getTimeMillis(), Seq(stage)))
clock.advance(1000L)
monitor.onExecutorAdded(SparkListenerExecutorAdded(clock.getTimeMillis(), "1", null))
monitor.onTaskStart(SparkListenerTaskStart(1, 0, taskInfo("1", 1)))
monitor.onTaskEnd(SparkListenerTaskEnd(1, 0, "foo", Success, taskInfo("1", 1),
new ExecutorMetrics, null))
monitor.onJobEnd(SparkListenerJobEnd(1, clock.getTimeMillis(), JobSucceeded))
assert(monitor.timedOutExecutors(idleDeadline).isEmpty)
}
private def idleDeadline: Long = clock.getTimeMillis() + idleTimeoutMs + 1
private def storageDeadline: Long = clock.getTimeMillis() + storageTimeoutMs + 1
private def shuffleDeadline: Long = clock.getTimeMillis() + shuffleTimeoutMs + 1
private def stageInfo(id: Int, shuffleId: Int = -1): StageInfo = {
new StageInfo(id, 0, s"stage$id", 1, Nil, Nil, "",
shuffleDepId = if (shuffleId >= 0) Some(shuffleId) else None)
}
private def taskInfo(
execId: String,
id: Int,
speculative: Boolean = false,
duration: Long = -1L): TaskInfo = {
val start = if (duration > 0) clock.getTimeMillis() - duration else clock.getTimeMillis()
val task = new TaskInfo(id, id, 1, start, execId, "foo.example.com",
TaskLocality.PROCESS_LOCAL, speculative)
if (duration > 0) {
task.markFinished(TaskState.FINISHED, math.max(1, clock.getTimeMillis()))
}
task
}
private def rddUpdate(
rddId: Int,
splitIndex: Int,
execId: String,
level: StorageLevel = StorageLevel.MEMORY_ONLY): SparkListenerBlockUpdated = {
SparkListenerBlockUpdated(
BlockUpdatedInfo(BlockManagerId(execId, "1.example.com", 42),
RDDBlockId(rddId, splitIndex), level, 1L, 0L))
}
/**
* Mock the listener bus *only* for the functionality needed by the shuffle tracking code.
* Any other event sent through the mock bus will fail.
*/
private def mockListenerBus(): LiveListenerBus = {
val bus = mock(classOf[LiveListenerBus])
doAnswer { invocation =>
monitor.onOtherEvent(invocation.getArguments()(0).asInstanceOf[SparkListenerEvent])
}.when(bus).post(any())
bus
}
}
| pgandhi999/spark | core/src/test/scala/org/apache/spark/scheduler/dynalloc/ExecutorMonitorSuite.scala | Scala | apache-2.0 | 20,440 |
package cpup.mc.oldenMagic.content
import cpup.mc.lib.content.CPupItem
import cpup.mc.oldenMagic.{OldenMagicMod, TOldenMagicMod}
trait TItemBase extends CPupItem[TOldenMagicMod] {
def mod = OldenMagicMod
}
class ItemBase extends TItemBase
| CoderPuppy/oldenmagic-mc | src/main/scala/cpup/mc/oldenMagic/content/ItemBase.scala | Scala | mit | 242 |
package utils.pageobjects.s_about_you
import utils.WithBrowser
import utils.pageobjects._
final class GNationalityAndResidencyPage(ctx: PageObjectsContext) extends ClaimPage(ctx, GNationalityAndResidencyPage.url) {
declareRadioList("#nationality", "AboutYouNationalityAndResidencyNationality")
declareInput("#actualnationality", "AboutYouNationalityAndResidencyActualNationality")
declareYesNo("#alwaysLivedInUK", "AboutYouNationalityAndResidencyAlwaysLivedInUK")
declareYesNo("#liveInUKNow", "AboutYouNationalityAndResidencyLiveInUKNow")
declareRadioList("#arrivedInUK", "AboutYouNationalityAndResidencyArrivedInUK")
declareDate("#arrivedInUKDate", "AboutYouNationalityAndResidencyArrivedInUKDate")
declareInput("#arrivedInUKFrom", "AboutYouNationalityAndResidencyArrivedInUKFrom")
declareYesNo("#trip52weeks", "AboutYouNationalityAndResidencyTrip52Weeks")
declareInput("#tripDetails", "AboutYouNationalityAndResidencyTripDetails")
}
object GNationalityAndResidencyPage {
val url = "/nationality/where-you-live"
def apply(ctx: PageObjectsContext) = new GNationalityAndResidencyPage(ctx)
}
/** The context for Specs tests */
trait GNationalityAndResidencyPageContext extends PageContext {
this: WithBrowser[_] =>
val page = GNationalityAndResidencyPage(PageObjectsContext(browser))
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/utils/pageobjects/s_about_you/GNationalityAndResidencyPage.scala | Scala | mit | 1,316 |
package rugloom.web.socket
import play.api.libs.json.{JsObject, JsString, JsValue, Json, Writes}
import rugloom.shell.ShellOutput
import rugloom.web.socket.Message.Kind
import scala.tools.nsc.interpreter.Results.Result
/**
* RugLoom - Explorative analysis pipeline prototype
* Created by oliverr on 7/30/2015.
*/
object MessageJsonWriting {
implicit val timedIdWrites = new Writes[TimedId] {
override def writes(id: TimedId): JsObject = Json.obj(
"time" -> id.time,
"rand" -> id.rand
)
}
implicit val messageKindWrites = new Writes[Message.Kind] {
override def writes(kind: Kind): JsString = JsString(kind.toString)
}
implicit val pingMessageWrites = new Writes[PingMessage] {
override def writes(message: PingMessage): JsObject = Json.obj(
"id" -> message.id,
"kind" -> message.kind
)
}
implicit val echoMessageWrites = new Writes[EchoMessage] {
override def writes(message: EchoMessage): JsObject = Json.obj(
"id" -> message.id,
"kind" -> message.kind,
"inResponseToId" -> message.inResponseToId
)
}
implicit val inputMessageWrites = new Writes[InputMessage] {
override def writes(message: InputMessage): JsObject = Json.obj(
"id" -> message.id,
"kind" -> message.kind,
"input" -> message.input,
"num" -> message.num
)
}
implicit val resultReturnedWrites = new Writes[Result] {
override def writes(result: Result): JsString = new JsString(result.toString)
}
implicit val shellOutputWrites = new Writes[ShellOutput] {
override def writes(response: ShellOutput): JsObject = Json.obj(
"kind" -> response.kindAsString,
"text" -> response.text
)
}
implicit val shellOutputMessageWrites = new Writes[ShellOutputMessage] {
override def writes(message: ShellOutputMessage): JsObject = Json.obj(
"id" -> message.id,
"kind" -> message.kind,
"output" -> message.output
)
}
implicit val messageWrites = new Writes[Message] {
override def writes(message: Message): JsValue = message match {
case pingMessage: PingMessage => Json.toJson(pingMessage)
case echoMessage: EchoMessage => Json.toJson(echoMessage)
case lineEnteredMessage: InputMessage => Json.toJson(lineEnteredMessage)
}
}
}
| curoli/rugloom-client | app/rugloom/web/socket/MessageJsonWriting.scala | Scala | mit | 2,305 |
package com.oracle.infy.qa
import com.webtrends.harness.service.Service
class BasicService extends Service
| Webtrends/wookiee | examples/basic-service/src/main/scala/com/oracle/infy/qa/BasicService.scala | Scala | apache-2.0 | 109 |
package org.killingbilling.junction
object Buffer {
// TODO impl
def isBuffer(obj: AnyRef): Boolean = false //stub (used in assert.js)
}
| KillingBilling/junction | src/main/scala/org/killingbilling/junction/Buffer.scala | Scala | mit | 145 |
package mesosphere.marathon
package core.deployment.impl
import java.util.concurrent.LinkedBlockingDeque
import akka.Done
import akka.actor.{ActorRef, Props}
import akka.event.EventStream
import akka.stream.scaladsl.Source
import akka.testkit.TestActor.{AutoPilot, NoAutoPilot}
import akka.testkit.{ImplicitSender, TestActor, TestActorRef, TestProbe}
import mesosphere.AkkaUnitTest
import mesosphere.marathon.MarathonSchedulerActor.{DeploymentFailed, DeploymentStarted}
import mesosphere.marathon.core.deployment.{DeploymentPlan, DeploymentStepInfo}
import mesosphere.marathon.core.deployment.impl.DeploymentActor.Cancel
import mesosphere.marathon.core.deployment.impl.DeploymentManagerActor._
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.leadership.AlwaysElectedLeadershipModule
import mesosphere.marathon.core.readiness.ReadinessCheckExecutor
import mesosphere.marathon.core.storage.store.impl.memory.InMemoryPersistenceStore
import mesosphere.marathon.core.task.termination.KillService
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.metrics.dummy.DummyMetrics
import mesosphere.marathon.state.{AppDefinition, ResourceRole}
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.storage.repository.{AppRepository, DeploymentRepository}
import mesosphere.marathon.test.{GroupCreation, MarathonTestHelper}
import org.apache.mesos.SchedulerDriver
import org.rogach.scallop.ScallopConf
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Success
class DeploymentManagerActorTest extends AkkaUnitTest with ImplicitSender with GroupCreation with Eventually {
"DeploymentManager" should {
"Deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = ResourceRole.Unreserved)
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
}
"Finished deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = ResourceRole.Unreserved)
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! DeploymentFinished(plan, Success(Done))
awaitCond(manager.underlyingActor.runningDeployments.isEmpty, 5.seconds)
}
"Able to see deployment when listing deployments after it was started" in {
import akka.pattern.ask
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = "*")
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
eventually {
val runningDeployments = (manager.actorRef ? ListRunningDeployments).mapTo[Future[Seq[DeploymentStepInfo]]].futureValue.futureValue
runningDeployments.size should be(1)
runningDeployments.head.plan should be(plan)
}
}
"Conflicting not forced deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = "*")
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("d1"))
val probe = TestProbe()
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = false)
probe.expectMsgType[DeploymentFailed]
manager.underlyingActor.runningDeployments.size should be(1)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
}
"Conflicting forced deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = "*")
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("b1"))
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Canceling)
eventually(manager.underlyingActor.runningDeployments("d2").status should be(DeploymentStatus.Deploying))
}
"Multiple conflicting forced deployments" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = "*")
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("d1"))
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments("d1").status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments("d1").status should be(DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d2").status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d3"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
// Since deployments are not really started (DeploymentActor is not spawned), DeploymentFinished event is not
// sent and the deployments are staying in the list of runningDeployments
manager.underlyingActor.runningDeployments("d1").status should be(DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d2").status should be(DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d3").status should be(DeploymentStatus.Scheduled)
}
"StopActor" in {
val f = new Fixture
val manager = f.deploymentManager()
val probe = TestProbe()
probe.setAutoPilot(new AutoPilot {
override def run(sender: ActorRef, msg: Any): AutoPilot =
msg match {
case Cancel(_) =>
system.stop(probe.ref)
NoAutoPilot
}
})
val ex = new Exception("")
val res = manager.underlyingActor.stopActor(probe.ref, ex)
res.futureValue should be(Done)
}
"Cancel deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toAbsolutePath, cmd = Some("sleep"), role = "*")
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
manager ! CancelDeployment(plan)
eventually(manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Canceling))
}
}
class Fixture {
val driver: SchedulerDriver = mock[SchedulerDriver]
val deploymentRepo = mock[DeploymentRepository]
val eventBus: EventStream = mock[EventStream]
val launchQueue: LaunchQueue = mock[LaunchQueue]
val config: MarathonConf = new ScallopConf(Seq("--master", "foo")) with MarathonConf {
verify()
}
implicit val ctx: ExecutionContext = ExecutionContext.Implicits.global
val taskTracker: InstanceTracker = MarathonTestHelper.createTaskTracker(
AlwaysElectedLeadershipModule.forRefFactory(system)
)
val taskKillService: KillService = mock[KillService]
val metrics: Metrics = DummyMetrics
val appRepo: AppRepository = AppRepository.inMemRepository(new InMemoryPersistenceStore(metrics))
val hcManager: HealthCheckManager = mock[HealthCheckManager]
val readinessCheckExecutor: ReadinessCheckExecutor = mock[ReadinessCheckExecutor]
// A method that returns dummy props. Used to control the deployments progress. Otherwise the tests become racy
// and depending on when DeploymentActor sends DeploymentFinished message.
val deploymentActorProps: (Any, Any, Any, Any, Any, Any, Any, Any) => Props = (_, _, _, _, _, _, _, _) =>
TestActor.props(new LinkedBlockingDeque())
def deploymentManager(): TestActorRef[DeploymentManagerActor] =
TestActorRef(
DeploymentManagerActor.props(
metrics,
taskTracker,
taskKillService,
launchQueue,
hcManager,
eventBus,
readinessCheckExecutor,
deploymentRepo,
deploymentActorProps
)
)
deploymentRepo.store(any[DeploymentPlan]) returns Future.successful(Done)
deploymentRepo.delete(any[String]) returns Future.successful(Done)
deploymentRepo.all() returns Source.empty
launchQueue.add(any, any) returns Future.successful(Done)
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/core/deployment/impl/DeploymentManagerActorTest.scala | Scala | apache-2.0 | 10,525 |
package bhoot
import scala.util.parsing.combinator._
object JSON extends JavaTokenParsers {
def obj: Parser[Map[String, Any]] =
"{"~> repsep(member, ",") <~"}" ^^ (Map() ++ _)
def arr: Parser[List[Any]] =
"["~> repsep(value, ",") <~"]"
def member: Parser[(String, Any)] =
stringLiteral~":"~value ^^
{ case name~":"~value => (name, value) }
def value: Parser[Any] = (
obj
| arr
| stringLiteral
| floatingPointNumber
| wholeNumber
| "null" ^^ (x => null)
| "true" ^^ (x => true)
| "false" ^^ (x => false)
)
override def stringLiteral =
("\\"" + """[^"\\\\]*(\\\\.[^"\\\\]*)*""" + "\\"").r ^^ {case s => if(s.startsWith("\\"")) s.slice(1,s.length-1) else s}
def parse(reader:java.io.Reader) = {
parseAll(value, reader)
}
}
| rocketeerbkw/tdash | framework/src/main/scala/jsonParsers.scala | Scala | gpl-3.0 | 810 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression
import org.scalatest.FunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.{LinearDataGenerator, LocalSparkContext}
class LassoSuite extends FunSuite with LocalSparkContext {
def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) {
val numOffPredictions = predictions.zip(input).count { case (prediction, expected) =>
// A prediction is off if the prediction is more than 0.5 away from expected value.
math.abs(prediction - expected.label) > 0.5
}
// At least 80% of the predictions should be on.
assert(numOffPredictions < input.length / 5)
}
test("Lasso local random SGD") {
val nPoints = 1000
val A = 2.0
val B = -1.5
val C = 1.0e-2
val testData = LinearDataGenerator.generateLinearInput(A, Array[Double](B, C), nPoints, 42)
.map { case LabeledPoint(label, features) =>
LabeledPoint(label, Vectors.dense(1.0 +: features.toArray))
}
val testRDD = sc.parallelize(testData, 2).cache()
val ls = new LassoWithSGD()
ls.optimizer.setStepSize(1.0).setRegParam(0.01).setNumIterations(40)
val model = ls.run(testRDD)
val weight0 = model.weights(0)
val weight1 = model.weights(1)
val weight2 = model.weights(2)
assert(weight0 >= 1.9 && weight0 <= 2.1, weight0 + " not in [1.9, 2.1]")
assert(weight1 >= -1.60 && weight1 <= -1.40, weight1 + " not in [-1.6, -1.4]")
assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, weight2 + " not in [-0.001, 0.001]")
val validationData = LinearDataGenerator.generateLinearInput(A, Array[Double](B,C), nPoints, 17)
.map { case LabeledPoint(label, features) =>
LabeledPoint(label, Vectors.dense(1.0 +: features.toArray))
}
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
test("Lasso local random SGD with initial weights") {
val nPoints = 1000
val A = 2.0
val B = -1.5
val C = 1.0e-2
val testData = LinearDataGenerator.generateLinearInput(A, Array[Double](B, C), nPoints, 42)
.map { case LabeledPoint(label, features) =>
LabeledPoint(label, Vectors.dense(1.0 +: features.toArray))
}
val initialA = -1.0
val initialB = -1.0
val initialC = -1.0
val initialWeights = Vectors.dense(initialA, initialB, initialC)
val testRDD = sc.parallelize(testData, 2).cache()
val ls = new LassoWithSGD()
ls.optimizer.setStepSize(1.0).setRegParam(0.01).setNumIterations(40)
val model = ls.run(testRDD, initialWeights)
val weight0 = model.weights(0)
val weight1 = model.weights(1)
val weight2 = model.weights(2)
assert(weight0 >= 1.9 && weight0 <= 2.1, weight0 + " not in [1.9, 2.1]")
assert(weight1 >= -1.60 && weight1 <= -1.40, weight1 + " not in [-1.6, -1.4]")
assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, weight2 + " not in [-0.001, 0.001]")
val validationData = LinearDataGenerator.generateLinearInput(A, Array[Double](B,C), nPoints, 17)
.map { case LabeledPoint(label, features) =>
LabeledPoint(label, Vectors.dense(1.0 +: features.toArray))
}
val validationRDD = sc.parallelize(validationData,2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
}
| echalkpad/t4f-data | spark/mllib/src/test/scala/io/aos/spark/mllib/lasso/LassoSuite.scala | Scala | apache-2.0 | 4,506 |
package scoobie
import org.specs2._
/**
* Created by jacob.barber on 5/23/16.
*/
trait ProjectionTests extends SpecificationLike with ParamTests {
lazy val queryProjectOneTest = {
projection.alias mustEqual None
projection.selection mustEqual fooParam
}
}
| Jacoby6000/Scala-SQL-AST | ast/ansi/src/test/scala/scoobie/ProjectionTests.scala | Scala | mit | 274 |
package net.kogics.jiva.evolution
import scala.util.Random
import net.kogics.jiva.Predef._
import net.kogics.jiva.population._
import junit.framework._
import junit.framework.Assert._
import org.jmock.Mockery
import org.jmock.lib.legacy.ClassImposteriser
import org.jmock.Expectations
import org.jmock.Expectations._
class TestRouletteWheelSelector extends TestCase with net.kogics.jiva.TestUtils {
val context = new Mockery() {
{
setImposteriser(ClassImposteriser.INSTANCE)
}
}
def testSelectorMidRange = {
val fitnesses = List(0.1, 0.2, 0.3, 0.4)
val probs = List(0.01, 0.4, 0.7, 0.9)
val expectedPop = Population(initialPop(0), initialPop(2), initialPop(3), initialPop(3))
testHelper(fitnesses, probs, initialPop, expectedPop)
}
def testSelectorLeftEdge = {
val fitnesses = List(0.05, 0.25, 0.4, 0.3)
val probs = List(0.01, 0.03, 0.02, 0.04)
val expectedPop = Population(initialPop(0), initialPop(0), initialPop(0), initialPop(0))
testHelper(fitnesses, probs, initialPop, expectedPop)
}
def testSelectorRightEdge = {
val fitnesses = List(0.05, 0.25, 0.4, 0.3)
val probs = List(0.8, 0.9, 0.85, 0.95)
val expectedPop = Population(initialPop(3), initialPop(3), initialPop(3), initialPop(3))
testHelper(fitnesses, probs, initialPop, expectedPop)
}
private def testHelper(fitnesses: List[Double], probs: List[Double],
pop: Population[jbool], expectedPop: Population[jbool]) = {
var idx = 0
pop.foreach {chr => chr.fitness = Some(fitnesses(idx)); idx += 1}
val rg = (context.mock(classOf[Random])).asInstanceOf[Random]
context.checking(
new Expectations() {{
atLeast(1).of(rg).nextDouble
will(onConsecutiveCalls(returnConsecutiveValues(probs): _*))
}
})
val selector = new RouletteWheelSelector[jbool](pop.size, rg)
val pop2 = selector.select(pop)
assert(pop2 == expectedPop)
context.assertIsSatisfied
}
def testInvalidSelsize = {
try {
new RouletteWheelSelector[jbool](0, new Random)
fail("Invalid Selection Size should not be allowed")
}
catch {
case e: IllegalArgumentException => assertTrue(true)
}
}
}
| milliondreams/jiva-ng | src/test/scala/net/kogics/jiva/evolution/TestRouletteWheelSelector.scala | Scala | gpl-3.0 | 2,265 |
/*
* Copyright 2014 – 2018 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalax.transducers
trait TransducerCore[@specialized(Int, Long, Double, Char, Boolean) A, @specialized(Int, Long, Double, Char, Boolean) B] {
def apply[R](rf: Reducer[B, R]): Reducer[A, R]
}
| knutwalker/transducers-scala | api/src/main/scala/scalax/transducers/TransducerCore.scala | Scala | apache-2.0 | 808 |
package edu.berkeley.nlp.summ
import java.io.File
import edu.berkeley.nlp.entity.ConllDocReader
import edu.berkeley.nlp.entity.coref.CorefDocAssembler
import edu.berkeley.nlp.entity.coref.MentionPropertyComputer
import edu.berkeley.nlp.entity.coref.NumberGenderComputer
import edu.berkeley.nlp.entity.lang.EnglishCorefLanguagePack
import edu.berkeley.nlp.entity.lang.Language
import edu.berkeley.nlp.futile.LightRunner
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.summ.data.SummDoc
import edu.berkeley.nlp.summ.preprocess.DiscourseDependencyParser
import edu.berkeley.nlp.summ.preprocess.EDUSegmenter
import edu.berkeley.nlp.summ.data.DiscourseDepExProcessed
/**
* Main class for running the summarizer on unlabeled data. See run-summarizer.sh for
* example usage. The most useful arguments are:
* -inputDir: directory of files (in CoNLL format, with parses/coref/NER) to summarize
* -outputDir: directory to write summaries
* -modelPath if you want to use a different version of the summarizer.
*
* Any member of this class can be passed as a command-line argument to the
* system if it is preceded with a dash, e.g.
* -budget 100
*/
object Summarizer {
val numberGenderPath = "data/gender.data";
val segmenterPath = "models/edusegmenter.ser.gz"
val discourseParserPath = "models/discoursedep.ser.gz"
val modelPath = "models/summarizer-full.ser.gz"
val inputDir = ""
val outputDir = ""
// Indicates that we shouldn't do any discourse preprocessing; this is only appropriate
// for the sentence-extractive version of the system
val noRst = false
// Summary budget, in words. Set this to whatever you want it to.
val budget = 50
def main(args: Array[String]) {
LightRunner.initializeOutput(Summarizer.getClass())
LightRunner.populateScala(Summarizer.getClass(), args)
Logger.logss("Loading model...")
val model = IOUtils.readObjFile(modelPath).asInstanceOf[CompressiveAnaphoraSummarizer]
Logger.logss("Model loaded!")
val (segmenter, discourseParser) = if (noRst) {
(None, None)
} else {
Logger.logss("Loading segmenter...")
val tmpSegmenter = IOUtils.readObjFile(segmenterPath).asInstanceOf[EDUSegmenter]
Logger.logss("Segmenter loaded!")
Logger.logss("Loading discourse parser...")
val tmpDiscourseParser = IOUtils.readObjFile(discourseParserPath).asInstanceOf[DiscourseDependencyParser]
Logger.logss("Discourse parser loaded!")
(Some(tmpSegmenter), Some(tmpDiscourseParser))
}
val numberGenderComputer = NumberGenderComputer.readBergsmaLinData(numberGenderPath);
val mpc = new MentionPropertyComputer(Some(numberGenderComputer))
val reader = new ConllDocReader(Language.ENGLISH)
val assembler = new CorefDocAssembler(new EnglishCorefLanguagePack, true)
val filesToSummarize = new File(inputDir).listFiles()
for (file <- filesToSummarize) {
val conllDoc = reader.readConllDocs(file.getAbsolutePath).head
val corefDoc = assembler.createCorefDoc(conllDoc, mpc)
val summDoc = SummDoc.makeSummDoc(conllDoc.docID, corefDoc, Seq())
val ex = if (noRst) {
DiscourseDepExProcessed.makeTrivial(summDoc)
} else {
DiscourseDepExProcessed.makeWithEduAndSyntactic(summDoc, segmenter.get, discourseParser.get)
}
val summaryLines = model.summarize(ex, budget, true)
val outWriter = IOUtils.openOutHard(outputDir + "/" + file.getName)
for (summLine <- summaryLines) {
outWriter.println(summLine)
}
outWriter.close
}
LightRunner.finalizeOutput()
}
} | gregdurrett/berkeley-doc-summarizer | src/main/scala/edu/berkeley/nlp/summ/Summarizer.scala | Scala | gpl-3.0 | 3,674 |
package com.asto.dop.streamprocessor.process
import com.asto.dop.core.CoreModel
import com.asto.dop.streamprocessor.DOPContext
import org.apache.spark.rdd.RDD
import org.elasticsearch.spark._
object ESProcessor extends Serializable {
def save(messages: RDD[CoreModel], context: DOPContext): Unit = {
messages.saveToEs(s"${context.es_index}/core")
}
} | zj-lingxin/dop | source/stream-processor/src/main/scala/com/asto/dop/streamprocessor/process/ESProcessor.scala | Scala | mit | 362 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.annotators.sda.vivekn
import com.johnsnowlabs.nlp.annotators.common.{TokenizedSentence, TokenizedWithSentence}
import com.johnsnowlabs.nlp.serialization.{MapFeature, SetFeature}
import com.johnsnowlabs.nlp.{Annotation, AnnotatorModel, HasPretrained, ParamsAndFeaturesReadable, HasSimpleAnnotate}
import org.apache.spark.ml.param.{DoubleParam, IntParam, LongParam}
import org.apache.spark.ml.util.Identifiable
/** Sentiment analyser inspired by the algorithm by Vivek Narayanan [[https://github.com/vivekn/sentiment/]].
*
* The algorithm is based on the paper
* [[https://arxiv.org/abs/1305.6143 "Fast and accurate sentiment classification using an enhanced Naive Bayes model"]].
*
* This is the instantiated model of the [[com.johnsnowlabs.nlp.annotators.sda.vivekn.ViveknSentimentApproach ViveknSentimentApproach]].
* For training your own model, please see the documentation of that class.
*
* The analyzer requires sentence boundaries to give a score in context.
* Tokenization is needed to make sure tokens are within bounds. Transitivity requirements are also required.
*
* For extended examples of usage, see the [[https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/training/english/vivekn-sentiment/VivekNarayanSentimentApproach.ipynb Spark NLP Workshop]]
* and the [[https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/sda/vivekn ViveknSentimentTestSpec]].
*
* @see [[com.johnsnowlabs.nlp.annotators.sda.pragmatic.SentimentDetector SentimentDetector]] for an alternative approach to sentiment detection
* @groupname anno Annotator types
* @groupdesc anno Required input and expected output annotator types
* @groupname Ungrouped Members
* @groupname param Parameters
* @groupname setParam Parameter setters
* @groupname getParam Parameter getters
* @groupname Ungrouped Members
* @groupprio param 1
* @groupprio anno 2
* @groupprio Ungrouped 3
* @groupprio setParam 4
* @groupprio getParam 5
* @groupdesc param A list of (hyper-)parameter keys this annotator can take. Users can set and get the parameter values through setters and getters, respectively.
*/
class ViveknSentimentModel(override val uid: String) extends AnnotatorModel[ViveknSentimentModel] with HasSimpleAnnotate[ViveknSentimentModel] with ViveknSentimentUtils {
import com.johnsnowlabs.nlp.AnnotatorType._
/** Output annotator type : SENTIMENT
*
* @group anno
**/
override val outputAnnotatorType: AnnotatorType = SENTIMENT
/** Input annotator type : SENTIMENT
*
* @group anno
**/
override val inputAnnotatorTypes: Array[AnnotatorType] = Array(TOKEN, DOCUMENT)
/** positive_sentences
*
* @group param
**/
protected val positive: MapFeature[String, Long] = new MapFeature(this, "positive_sentences")
/** negative_sentences
*
* @group param
**/
protected val negative: MapFeature[String, Long] = new MapFeature(this, "negative_sentences")
/** words
*
* @group param
**/
protected val words: SetFeature[String] = new SetFeature[String](this, "words")
/** Count of positive words
*
* @group param
**/
val positiveTotals: LongParam = new LongParam(this, "positive_totals", "Count of positive words")
/** Count of negative words
*
* @group param
**/
val negativeTotals: LongParam = new LongParam(this, "negative_totals", "Count of negative words")
/** Proportion of feature content to be considered relevant (Default: `0.5`)
*
* @group param
**/
val importantFeatureRatio = new DoubleParam(this, "importantFeatureRatio", "Proportion of feature content to be considered relevant (Default: `0.5`)")
/** Proportion to lookahead in unimportant features (Default: `0.025`)
*
* @group param
**/
val unimportantFeatureStep = new DoubleParam(this, "unimportantFeatureStep", "Proportion to lookahead in unimportant features (Default: `0.025`)")
/** Content feature limit, to boost performance in very dirt text (Default: disabled with `-1`)
*
* @group param
**/
val featureLimit = new IntParam(this, "featureLimit", "Content feature limit, to boost performance in very dirt text (Default disabled with: -1")
def this() = this(Identifiable.randomUID("VIVEKN"))
/** Set Proportion of feature content to be considered relevant (Default: `0.5`)
*
* @group setParam
**/
def setImportantFeatureRatio(v: Double): this.type = set(importantFeatureRatio, v)
/** Set Proportion to lookahead in unimportant features (Default: `0.025`)
*
* @group setParam
**/
def setUnimportantFeatureStep(v: Double): this.type = set(unimportantFeatureStep, v)
/** Set Content feature limit, to boost performance in very dirt text (Default: disabled with `-1`)
*
* @group setParam
**/
def setFeatureLimit(v: Int): this.type = set(featureLimit, v)
/** Get Proportion of feature content to be considered relevant (Default: `0.5`) */
def getImportantFeatureRatio(v: Double): Double = $(importantFeatureRatio)
/** Get Proportion to lookahead in unimportant features (Default: `0.025`) */
def getUnimportantFeatureStep(v: Double): Double = $(unimportantFeatureStep)
/** Get Content feature limit, to boost performance in very dirt text (Default: disabled with `-1`)
*
* @group getParam
**/
def getFeatureLimit(v: Int): Int = $(featureLimit)
/** Count of positive words
*
* @group getParam
**/
def getPositive: Map[String, Long] = $$(positive)
/** Count of negative words
*
* @group getParam
**/
def getNegative: Map[String, Long] = $$(negative)
/** Set of unique words
*
* @group getParam
**/
def getFeatures: Set[String] = $$(words)
private[vivekn] def setPositive(value: Map[String, Long]): this.type = set(positive, value)
private[vivekn] def setNegative(value: Map[String, Long]): this.type = set(negative, value)
private[vivekn] def setPositiveTotals(value: Long): this.type = set(positiveTotals, value)
private[vivekn] def setNegativeTotals(value: Long): this.type = set(negativeTotals, value)
private[vivekn] def setWords(value: Array[String]): this.type = {
require(value.nonEmpty, "Word analysis for features cannot be empty. Set prune to false if training is small")
val currentFeatures = scala.collection.mutable.Set.empty[String]
val start = (value.length * $(importantFeatureRatio)).ceil.toInt
val afterStart = {
if ($(featureLimit) == -1) value.length
else $(featureLimit)
}
val step = (afterStart * $(unimportantFeatureStep)).ceil.toInt
value.take(start).foreach(currentFeatures.add)
Range(start, afterStart, step).foreach(k => {
value.slice(k, k+step).foreach(currentFeatures.add)
})
set(words, currentFeatures.toSet)
}
/** Positive: 0, Negative: 1, NA: 2*/
def classify(sentence: TokenizedSentence): (Short, Double) = {
val wordFeatures = negateSequence(sentence.tokens).intersect($$(words)).toList
if (wordFeatures.isEmpty) return (2, 0.0)
val positiveScore = wordFeatures.map(word => scala.math.log(($$(positive).getOrElse(word, 0L) + 1.0) / (2.0 * $(positiveTotals)))).sum
val negativeScore = wordFeatures.map(word => scala.math.log(($$(negative).getOrElse(word, 0L) + 1.0) / (2.0 * $(negativeTotals)))).sum
val positiveSum = wordFeatures.map(word => $$(positive).getOrElse(word, 0L)).sum.toDouble
val negativeSum = wordFeatures.map(word => $$(negative).getOrElse(word, 0L)).sum.toDouble
lazy val positiveConfidence = positiveSum / (positiveSum + negativeSum)
lazy val negativeConfidence = negativeSum / (positiveSum + negativeSum)
if (positiveScore > negativeScore) (0, positiveConfidence) else (1, negativeConfidence)
}
/**
* Tokens are needed to identify each word in a sentence boundary
* POS tags are optionally submitted to the model in case they are needed
* Lemmas are another optional annotator for some models
* Bounds of sentiment are hardcoded to 0 as they render useless
* @param annotations Annotations that correspond to inputAnnotationCols generated by previous annotators if any
* @return any number of annotations processed for every input annotation. Not necessary one to one relationship
*/
override def annotate(annotations: Seq[Annotation]): Seq[Annotation] = {
val sentences = TokenizedWithSentence.unpack(annotations)
sentences.filter(s => s.indexedTokens.nonEmpty).map(sentence => {
val (result, confidence) = classify(sentence)
Annotation(
outputAnnotatorType,
sentence.indexedTokens.map(t => t.begin).min,
sentence.indexedTokens.map(t => t.end).max,
if (result == 0) "positive" else if (result == 1) "negative" else "na",
Map("confidence" -> confidence.toString.take(6))
)
})
}
}
trait ReadablePretrainedVivekn extends ParamsAndFeaturesReadable[ViveknSentimentModel] with HasPretrained[ViveknSentimentModel] {
override val defaultModelName = Some("sentiment_vivekn")
/** Java compliant-overrides */
override def pretrained(): ViveknSentimentModel = super.pretrained()
override def pretrained(name: String): ViveknSentimentModel = super.pretrained(name)
override def pretrained(name: String, lang: String): ViveknSentimentModel = super.pretrained(name, lang)
override def pretrained(name: String, lang: String, remoteLoc: String): ViveknSentimentModel = super.pretrained(name, lang, remoteLoc)
}
/**
* This is the companion object of [[ViveknSentimentModel]]. Please refer to that class for the documentation.
*/
object ViveknSentimentModel extends ReadablePretrainedVivekn | JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/nlp/annotators/sda/vivekn/ViveknSentimentModel.scala | Scala | apache-2.0 | 10,350 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CarbonException
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.metadata.datatype.{DataTypes => CarbonType}
import org.apache.carbondata.spark.CarbonOption
class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def saveAsCarbonFile(parameters: Map[String, String] = Map()): Unit = {
// create a new table using dataframe's schema and write its content into the table
sqlContext.sparkSession.sql(
makeCreateTableString(dataFrame.schema, new CarbonOption(parameters)))
writeToCarbonFile(parameters)
}
def appendToCarbonFile(parameters: Map[String, String] = Map()): Unit = {
writeToCarbonFile(parameters)
}
private def writeToCarbonFile(parameters: Map[String, String] = Map()): Unit = {
val options = new CarbonOption(parameters)
loadDataFrame(options)
}
/**
* Loading DataFrame directly without saving DataFrame to CSV files.
* @param options
*/
private def loadDataFrame(options: CarbonOption): Unit = {
val header = dataFrame.columns.mkString(",")
CarbonLoadDataCommand(
Some(CarbonEnv.getDatabaseName(options.dbName)(sqlContext.sparkSession)),
options.tableName,
null,
Seq(),
Map("fileheader" -> header) ++ options.toMap,
isOverwriteTable = options.overwriteEnabled,
null,
Some(dataFrame)).run(sqlContext.sparkSession)
}
private def convertToCarbonType(sparkType: DataType): String = {
sparkType match {
case StringType => CarbonType.STRING.getName
case IntegerType => CarbonType.INT.getName
case ShortType => CarbonType.SHORT.getName
case LongType => CarbonType.LONG.getName
case FloatType => CarbonType.FLOAT.getName
case DoubleType => CarbonType.DOUBLE.getName
case TimestampType => CarbonType.TIMESTAMP.getName
case DateType => CarbonType.DATE.getName
case decimal: DecimalType => s"decimal(${decimal.precision}, ${decimal.scale})"
case BooleanType => CarbonType.BOOLEAN.getName
case other => CarbonException.analysisException(s"unsupported type: $other")
}
}
private def makeCreateTableString(schema: StructType, options: CarbonOption): String = {
val property = Map(
"SORT_COLUMNS" -> options.sortColumns,
"SORT_SCOPE" -> options.sortScope,
"DICTIONARY_INCLUDE" -> options.dictionaryInclude,
"DICTIONARY_EXCLUDE" -> options.dictionaryExclude,
"LONG_STRING_COLUMNS" -> options.longStringColumns,
"TABLE_BLOCKSIZE" -> options.tableBlockSize,
"TABLE_BLOCKLET_SIZE" -> options.tableBlockletSize,
"STREAMING" -> Option(options.isStreaming.toString)
).filter(_._2.isDefined)
.map(property => s"'${property._1}' = '${property._2.get}'").mkString(",")
val partition: Seq[String] = if (options.partitionColumns.isDefined) {
if (options.partitionColumns.get.toSet.size != options.partitionColumns.get.length) {
throw new MalformedCarbonCommandException(s"repeated partition column")
}
options.partitionColumns.get.map { column =>
val field = schema.fields.find(_.name.equalsIgnoreCase(column))
if (field.isEmpty) {
throw new MalformedCarbonCommandException(s"invalid partition column: $column")
}
s"$column ${field.get.dataType.typeName}"
}
} else {
Seq()
}
val schemaWithoutPartition = if (options.partitionColumns.isDefined) {
val partitionCols = options.partitionColumns.get
val fields = schema.filterNot {
field => partitionCols.exists(_.equalsIgnoreCase(field.name))
}
StructType(fields)
} else {
schema
}
val carbonSchema = schemaWithoutPartition.map { field =>
s"${ field.name } ${ convertToCarbonType(field.dataType) }"
}
val dbName = CarbonEnv.getDatabaseName(options.dbName)(sqlContext.sparkSession)
s"""
| CREATE TABLE IF NOT EXISTS $dbName.${options.tableName}
| (${ carbonSchema.mkString(", ") })
| ${ if (partition.nonEmpty) s"PARTITIONED BY (${partition.mkString(", ")})" else ""}
| STORED BY 'carbondata'
| ${ if (options.tablePath.nonEmpty) s"LOCATION '${options.tablePath.get}'" else ""}
| ${ if (property.nonEmpty) "TBLPROPERTIES (" + property + ")" else "" }
|
""".stripMargin
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala | Scala | apache-2.0 | 5,513 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.collection.mutable.ArrayBuilder
import org.apache.spark.annotation.Since
import org.apache.spark.ml._
import org.apache.spark.ml.attribute.{AttributeGroup, _}
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{StructField, StructType}
/**
* Params for [[Selector]] and [[SelectorModel]].
*/
private[feature] trait SelectorParams extends Params
with HasFeaturesCol with HasLabelCol with HasOutputCol {
/**
* Number of features that selector will select, ordered by ascending p-value. If the
* number of features is less than numTopFeatures, then this will select all features.
* Only applicable when selectorType = "numTopFeatures".
* The default value of numTopFeatures is 50.
*
* @group param
*/
@Since("3.1.0")
final val numTopFeatures = new IntParam(this, "numTopFeatures",
"Number of features that selector will select, ordered by ascending p-value. If the" +
" number of features is < numTopFeatures, then this will select all features.",
ParamValidators.gtEq(1))
setDefault(numTopFeatures -> 50)
/** @group getParam */
@Since("3.1.0")
def getNumTopFeatures: Int = $(numTopFeatures)
/**
* Percentile of features that selector will select, ordered by ascending p-value.
* Only applicable when selectorType = "percentile".
* Default value is 0.1.
* @group param
*/
@Since("3.1.0")
final val percentile = new DoubleParam(this, "percentile",
"Percentile of features that selector will select, ordered by ascending p-value.",
ParamValidators.inRange(0, 1))
setDefault(percentile -> 0.1)
/** @group getParam */
@Since("3.1.0")
def getPercentile: Double = $(percentile)
/**
* The highest p-value for features to be kept.
* Only applicable when selectorType = "fpr".
* Default value is 0.05.
* @group param
*/
@Since("3.1.0")
final val fpr = new DoubleParam(this, "fpr", "The higest p-value for features to be kept.",
ParamValidators.inRange(0, 1))
setDefault(fpr -> 0.05)
/** @group getParam */
@Since("3.1.0")
def getFpr: Double = $(fpr)
/**
* The upper bound of the expected false discovery rate.
* Only applicable when selectorType = "fdr".
* Default value is 0.05.
* @group param
*/
@Since("3.1.0")
final val fdr = new DoubleParam(this, "fdr",
"The upper bound of the expected false discovery rate.", ParamValidators.inRange(0, 1))
setDefault(fdr -> 0.05)
/** @group getParam */
def getFdr: Double = $(fdr)
/**
* The upper bound of the expected family-wise error rate.
* Only applicable when selectorType = "fwe".
* Default value is 0.05.
* @group param
*/
@Since("3.1.0")
final val fwe = new DoubleParam(this, "fwe",
"The upper bound of the expected family-wise error rate.", ParamValidators.inRange(0, 1))
setDefault(fwe -> 0.05)
/** @group getParam */
def getFwe: Double = $(fwe)
/**
* The selector type.
* Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe"
* @group param
*/
@Since("3.1.0")
final val selectorType = new Param[String](this, "selectorType",
"The selector type. Supported options: numTopFeatures, percentile, fpr, fdr, fwe",
ParamValidators.inArray(Array("numTopFeatures", "percentile", "fpr", "fdr",
"fwe")))
setDefault(selectorType -> "numTopFeatures")
/** @group getParam */
@Since("3.1.0")
def getSelectorType: String = $(selectorType)
}
/**
* Super class for feature selectors.
* 1. Chi-Square Selector
* This feature selector is for categorical features and categorical labels.
* 2. ANOVA F-value Classification Selector
* This feature selector is for continuous features and categorical labels.
* 3. Regression F-value Selector
* This feature selector is for continuous features and continuous labels.
* The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
* `fdr`, `fwe`.
* - `numTopFeatures` chooses a fixed number of top features according to a hypothesis.
* - `percentile` is similar but chooses a fraction of all features instead of a fixed number.
* - `fpr` chooses all features whose p-value are below a threshold, thus controlling the false
* positive rate of selection.
* - `fdr` uses the [Benjamini-Hochberg procedure]
* (https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure)
* to choose all features whose false discovery rate is below a threshold.
* - `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
* 1/numFeatures, thus controlling the family-wise error rate of selection.
* By default, the selection method is `numTopFeatures`, with the default number of top features
* set to 50.
*/
private[ml] abstract class Selector[T <: SelectorModel[T]]
extends Estimator[T] with SelectorParams with DefaultParamsWritable {
/** @group setParam */
@Since("3.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("3.1.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
@Since("3.1.0")
def setNumTopFeatures(value: Int): this.type = set(numTopFeatures, value)
/** @group setParam */
@Since("3.1.0")
def setPercentile(value: Double): this.type = set(percentile, value)
/** @group setParam */
@Since("3.1.0")
def setFpr(value: Double): this.type = set(fpr, value)
/** @group setParam */
@Since("3.1.0")
def setFdr(value: Double): this.type = set(fdr, value)
/** @group setParam */
@Since("3.1.0")
def setFwe(value: Double): this.type = set(fwe, value)
/** @group setParam */
@Since("3.1.0")
def setSelectorType(value: String): this.type = set(selectorType, value)
/** @group setParam */
@Since("3.1.0")
def setLabelCol(value: String): this.type = set(labelCol, value)
/**
* get the SelectionTestResult for every feature against the label
*/
protected[this] def getSelectionTestResult(df: DataFrame): DataFrame
/**
* Create a new instance of concrete SelectorModel.
* @param indices The indices of the selected features
* @return A new SelectorModel instance
*/
protected[this] def createSelectorModel(
uid: String,
indices: Array[Int]): T
@Since("3.1.0")
override def fit(dataset: Dataset[_]): T = {
transformSchema(dataset.schema, logging = true)
val spark = dataset.sparkSession
import spark.implicits._
val numFeatures = MetadataUtils.getNumFeatures(dataset, $(featuresCol))
val resultDF = getSelectionTestResult(dataset.toDF)
def getTopIndices(k: Int): Array[Int] = {
resultDF.sort("pValue", "featureIndex")
.select("featureIndex")
.limit(k)
.as[Int]
.collect()
}
val indices = $(selectorType) match {
case "numTopFeatures" =>
getTopIndices($(numTopFeatures))
case "percentile" =>
getTopIndices((numFeatures * getPercentile).toInt)
case "fpr" =>
resultDF.select("featureIndex")
.where(col("pValue") < $(fpr))
.as[Int].collect()
case "fdr" =>
// This uses the Benjamini-Hochberg procedure.
// https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure
val f = $(fdr) / numFeatures
val maxIndex = resultDF.sort("pValue", "featureIndex")
.select("pValue")
.as[Double].rdd
.zipWithIndex
.flatMap { case (pValue, index) =>
if (pValue <= f * (index + 1)) {
Iterator.single(index.toInt)
} else Iterator.empty
}.fold(-1)(math.max)
if (maxIndex >= 0) {
getTopIndices(maxIndex + 1)
} else Array.emptyIntArray
case "fwe" =>
resultDF.select("featureIndex")
.where(col("pValue") < $(fwe) / numFeatures)
.as[Int].collect()
case errorType =>
throw new IllegalStateException(s"Unknown Selector Type: $errorType")
}
copyValues(createSelectorModel(uid, indices.sorted)
.setParent(this))
}
@Since("3.1.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(featuresCol), new VectorUDT)
SchemaUtils.checkNumericType(schema, $(labelCol))
SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT)
}
@Since("3.1.0")
override def copy(extra: ParamMap): Selector[T] = defaultCopy(extra)
}
/**
* Model fitted by [[Selector]].
*/
@Since("3.1.0")
private[ml] abstract class SelectorModel[T <: SelectorModel[T]] (
@Since("3.1.0") val uid: String,
@Since("3.1.0") val selectedFeatures: Array[Int])
extends Model[T] with SelectorParams with MLWritable {
self: T =>
if (selectedFeatures.length >= 2) {
require(selectedFeatures.sliding(2).forall(l => l(0) < l(1)),
"Index should be strictly increasing.")
}
/** @group setParam */
@Since("3.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("3.1.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
protected def isNumericAttribute = true
@Since("3.1.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val outputSchema = transformSchema(dataset.schema, logging = true)
SelectorModel.transform(dataset, selectedFeatures, outputSchema, $(outputCol), $(featuresCol))
}
@Since("3.1.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(featuresCol), new VectorUDT)
val newField =
SelectorModel.prepOutputField(schema, selectedFeatures, $(outputCol), $(featuresCol),
isNumericAttribute)
SchemaUtils.appendColumn(schema, newField)
}
}
private[feature] object SelectorModel {
def transform(
dataset: Dataset[_],
selectedFeatures: Array[Int],
outputSchema: StructType,
outputCol: String,
featuresCol: String): DataFrame = {
val newSize = selectedFeatures.length
val func = { vector: Vector =>
vector match {
case SparseVector(_, indices, values) =>
val (newIndices, newValues) =
compressSparse(indices, values, selectedFeatures)
Vectors.sparse(newSize, newIndices, newValues)
case DenseVector(values) =>
Vectors.dense(selectedFeatures.map(values))
case other =>
throw new UnsupportedOperationException(
s"Only sparse and dense vectors are supported but got ${other.getClass}.")
}
}
val transformer = udf(func)
dataset.withColumn(outputCol, transformer(col(featuresCol)),
outputSchema(outputCol).metadata)
}
/**
* Prepare the output column field, including per-feature metadata.
*/
def prepOutputField(
schema: StructType,
selectedFeatures: Array[Int],
outputCol: String,
featuresCol: String,
isNumericAttribute: Boolean): StructField = {
val selector = selectedFeatures.toSet
val origAttrGroup = AttributeGroup.fromStructField(schema(featuresCol))
val featureAttributes: Array[Attribute] = if (origAttrGroup.attributes.nonEmpty) {
origAttrGroup.attributes.get.zipWithIndex.filter(x => selector.contains(x._2)).map(_._1)
} else {
if (isNumericAttribute) {
Array.fill[Attribute](selector.size)(NumericAttribute.defaultAttr)
} else {
Array.fill[Attribute](selector.size)(NominalAttribute.defaultAttr)
}
}
val newAttributeGroup = new AttributeGroup(outputCol, featureAttributes)
newAttributeGroup.toStructField()
}
def compressSparse(
indices: Array[Int],
values: Array[Double],
selectedFeatures: Array[Int]): (Array[Int], Array[Double]) = {
val newValues = new ArrayBuilder.ofDouble
val newIndices = new ArrayBuilder.ofInt
var i = 0
var j = 0
while (i < indices.length && j < selectedFeatures.length) {
val indicesIdx = indices(i)
val filterIndicesIdx = selectedFeatures(j)
if (indicesIdx == filterIndicesIdx) {
newIndices += j
newValues += values(i)
j += 1
i += 1
} else {
if (indicesIdx > filterIndicesIdx) {
j += 1
} else {
i += 1
}
}
}
// TODO: Sparse representation might be ineffective if (newSize ~= newValues.size)
(newIndices.result(), newValues.result())
}
}
| dbtsai/spark | mllib/src/main/scala/org/apache/spark/ml/feature/Selector.scala | Scala | apache-2.0 | 13,541 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.test
import java.sql.{Connection, ResultSet, Statement}
import org.schedoscope.dsl.{FieldLike, View}
import org.schedoscope.schema.ddl.HiveQl
import scala.collection.mutable.{HashMap, ListBuffer}
class Database(conn: Connection, url: String) {
def selectForViewByQuery(v: View, query: String, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] = {
val res = ListBuffer[Map[String, Any]]()
var statement: Statement = null
var rs: ResultSet = null
try {
statement = conn.createStatement()
rs = statement.executeQuery(query)
while (rs.next()) {
val row = HashMap[String, Any]()
v.fields.view.zipWithIndex.foreach(f => {
row.put(f._1.n, ViewSerDe.deserializeField(f._1.t, rs.getString(f._2 + 1)))
})
res.append(row.toMap)
}
}
finally {
if (rs != null) try {
rs.close()
} catch {
case _: Throwable =>
}
if (statement != null) try {
statement.close()
} catch {
case _: Throwable =>
}
}
orderByField match {
case Some(f) => res.sortBy {
_ (f.n) match {
case null => ""
case other => other.toString
}
} toList
case None => res.toList
}
}
def selectView(v: View, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] =
selectForViewByQuery(v, HiveQl.selectAll(v), orderByField)
} | christianrichter/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/test/Database.scala | Scala | apache-2.0 | 2,068 |
package frameless
import org.apache.spark.sql.Encoder
import org.scalacheck.{Arbitrary, Gen, Prop}
import org.scalacheck.Prop._
import org.scalatest.matchers.should.Matchers
import scala.reflect.ClassTag
class NumericTests extends TypedDatasetSuite with Matchers {
test("plus") {
def prop[A: TypedEncoder: CatalystNumeric: Numeric](a: A, b: A): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
val result = implicitly[Numeric[A]].plus(a, b)
val got = df.select(df.col('a) + df.col('b)).collect().run()
got ?= (result :: Nil)
}
check(prop[BigDecimal] _)
check(prop[Byte] _)
check(prop[Double] _)
check(prop[Int] _)
check(prop[Long] _)
check(prop[Short] _)
}
test("minus") {
def prop[A: TypedEncoder: CatalystNumeric: Numeric](a: A, b: A): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
val result = implicitly[Numeric[A]].minus(a, b)
val got = df.select(df.col('a) - df.col('b)).collect().run()
got ?= (result :: Nil)
}
check(prop[BigDecimal] _)
check(prop[Byte] _)
check(prop[Double] _)
check(prop[Int] _)
check(prop[Long] _)
check(prop[Short] _)
}
test("multiply") {
def prop[A: TypedEncoder : CatalystNumeric : Numeric : ClassTag](a: A, b: A): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
val result = implicitly[Numeric[A]].times(a, b)
val got = df.select(df.col('a) * df.col('b)).collect().run()
got ?= (result :: Nil)
}
check(prop[Byte] _)
check(prop[Double] _)
check(prop[Int] _)
check(prop[Long] _)
check(prop[Short] _)
}
test("divide") {
def prop[A: TypedEncoder: CatalystNumeric: Numeric](a: A, b: A)(implicit cd: CatalystDivisible[A, Double]): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
if (b == 0) proved else {
val div: Double = implicitly[Numeric[A]].toDouble(a) / implicitly[Numeric[A]].toDouble(b)
val got: Seq[Double] = df.select(df.col('a) / df.col('b)).collect().run()
got ?= (div :: Nil)
}
}
check(prop[Byte ] _)
check(prop[Double] _)
check(prop[Int ] _)
check(prop[Long ] _)
check(prop[Short ] _)
}
test("divide BigDecimals") {
def prop(a: BigDecimal, b: BigDecimal): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
if (b.doubleValue == 0) proved else {
// Spark performs something in between Double division and BigDecimal division,
// we approximate it using double vision and `approximatelyEqual`:
val div = BigDecimal(a.doubleValue / b.doubleValue)
val got = df.select(df.col('a) / df.col('b)).collect().run()
approximatelyEqual(got.head, div)
}
}
check(prop _)
}
test("multiply BigDecimal") {
def prop(a: BigDecimal, b: BigDecimal): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
val result = BigDecimal(a.doubleValue * b.doubleValue)
val got = df.select(df.col('a) * df.col('b)).collect().run()
approximatelyEqual(got.head, result)
}
check(prop _)
}
trait NumericMod[T] {
def mod(a: T, b: T): T
}
object NumericMod {
implicit val byteInstance = new NumericMod[Byte] {
def mod(a: Byte, b: Byte) = (a % b).toByte
}
implicit val doubleInstance = new NumericMod[Double] {
def mod(a: Double, b: Double) = a % b
}
implicit val floatInstance = new NumericMod[Float] {
def mod(a: Float, b: Float) = a % b
}
implicit val intInstance = new NumericMod[Int] {
def mod(a: Int, b: Int) = a % b
}
implicit val longInstance = new NumericMod[Long] {
def mod(a: Long, b: Long) = a % b
}
implicit val shortInstance = new NumericMod[Short] {
def mod(a: Short, b: Short) = (a % b).toShort
}
implicit val bigDecimalInstance = new NumericMod[BigDecimal] {
def mod(a: BigDecimal, b: BigDecimal) = a % b
}
}
test("mod") {
import NumericMod._
def prop[A: TypedEncoder : CatalystNumeric : NumericMod](a: A, b: A): Prop = {
val df = TypedDataset.create(X2(a, b) :: Nil)
if (b == 0) proved else {
val mod: A = implicitly[NumericMod[A]].mod(a, b)
val got: Seq[A] = df.select(df.col('a) % df.col('b)).collect().run()
got ?= (mod :: Nil)
}
}
check(prop[Byte] _)
check(prop[Double] _)
check(prop[Int ] _)
check(prop[Long ] _)
check(prop[Short ] _)
check(prop[BigDecimal] _)
}
test("a mod lit(b)"){
import NumericMod._
def prop[A: TypedEncoder : CatalystNumeric : NumericMod](elem: A, data: X1[A]): Prop = {
val dataset = TypedDataset.create(Seq(data))
val a = dataset.col('a)
if (elem == 0) proved else {
val mod: A = implicitly[NumericMod[A]].mod(data.a, elem)
val got: Seq[A] = dataset.select(a % elem).collect().run()
got ?= (mod :: Nil)
}
}
check(prop[Byte] _)
check(prop[Double] _)
check(prop[Int ] _)
check(prop[Long ] _)
check(prop[Short ] _)
check(prop[BigDecimal] _)
}
test("isNaN") {
val spark = session
import spark.implicits._
implicit val doubleWithNaN = Arbitrary {
implicitly[Arbitrary[Double]].arbitrary.flatMap(Gen.oneOf(_, Double.NaN))
}
implicit val x1 = Arbitrary{ doubleWithNaN.arbitrary.map(X1(_)) }
def prop[A : TypedEncoder : Encoder : CatalystNaN](data: List[X1[A]]): Prop = {
val ds = TypedDataset.create(data)
val expected = ds.toDF().filter(!$"a".isNaN).map(_.getAs[A](0)).collect().toSeq
val rs = ds.filter(!ds('a).isNaN).collect().run().map(_.a)
rs ?= expected
}
check(forAll(prop[Float] _))
check(forAll(prop[Double] _))
}
test("isNaN with non-nan types should not compile") {
val ds = TypedDataset.create((1, false, 'a, "b") :: Nil)
"ds.filter(ds('_1).isNaN)" shouldNot typeCheck
"ds.filter(ds('_2).isNaN)" shouldNot typeCheck
"ds.filter(ds('_3).isNaN)" shouldNot typeCheck
"ds.filter(ds('_4).isNaN)" shouldNot typeCheck
}
}
| adelbertc/frameless | dataset/src/test/scala/frameless/NumericTests.scala | Scala | apache-2.0 | 6,085 |
package com.tardis.common.init
import com.tardis.common.block.{BlockConsole, BlockTardisDoor}
import com.tardis.common.tile.{TEConsole, TEDoor}
import com.temportalist.origin.foundation.common.register.BlockRegister
import com.temportalist.origin.internal.common.Origin
/**
*
*
* @author TheTemportalist 4/14/15
*/
object TardisBlocks extends BlockRegister {
var console: BlockConsole = null
var tDoor: BlockTardisDoor = null
override def register(): Unit = {
this.register("console", classOf[TEConsole])
this.console = new BlockConsole("console")
Origin.addBlockToTab(this.console)
this.register("door", classOf[TEDoor])
this.tDoor = new BlockTardisDoor("tardis_door")
Origin.addBlockToTab(this.tDoor)
}
}
| TheTemportalist/Tardis | src/main/scala/com/tardis/common/init/TardisBlocks.scala | Scala | apache-2.0 | 735 |
package com.github.ldaniels528.trifecta.sjs.controllers
import com.github.ldaniels528.trifecta.sjs.services.ConfigService
import io.scalajs.npm.angularjs._
import io.scalajs.npm.angularjs.toaster.Toaster
import scala.concurrent.duration._
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.scalajs.js
import scala.util.{Failure, Success}
/**
* Configuration Controller
* @author lawrence.daniels@gmail.com
*/
case class ConfigController($scope: ConfigScope, $log: Log, $timeout: Timeout, toaster: Toaster,
@injected("ConfigService") configService: ConfigService)
extends Controller with PopupMessages {
private val knownConfigs = js.Array(
"trifecta.common.encoding",
"trifecta.kafka.consumers.native",
"trifecta.kafka.consumers.zookeeper",
"trifecta.web.push.interval.consumer",
"trifecta.web.push.interval.topic",
"trifecta.zookeeper.host",
"trifecta.zookeeper.kafka.root.path"
)
$scope.init = () => {
$log.info(s"Initializing ${getClass.getName}...")
loadConfigurationProperties()
}
$scope.isConfigKey = (aLabel: js.UndefOr[String]) => {
aLabel.exists(s => knownConfigs.contains(s.trim.toLowerCase))
}
private def loadConfigurationProperties() = {
$scope.loadingConfig = true
configService.getConfig onComplete {
case Success(props) =>
$timeout(() => $scope.loadingConfig = false, 500.millis)
$log.info(s"Loading ${props.size} configuration properties")
$scope.$apply(() => $scope.configProps = props)
case Failure(e) =>
$scope.$apply(() => $scope.loadingConfig = false)
errorPopup("Error loading configuration details", e)
}
}
}
@js.native
trait ConfigScope extends Scope {
// variables
var configProps: js.UndefOr[js.Dictionary[js.Any]] = js.native
var loadingConfig: js.UndefOr[Boolean] = js.native
// functions
var init: js.Function0[Unit] = js.native
var isConfigKey: js.Function1[js.UndefOr[String], Boolean] = js.native
} | ldaniels528/trifecta | app-js/src/main/scala/com/github/ldaniels528/trifecta/sjs/controllers/ConfigController.scala | Scala | apache-2.0 | 2,038 |
package verystickypistonsmod
import cpw.mods.fml.common.Mod
import cpw.mods.fml.common.Mod.Init
import cpw.mods.fml.common.Mod.PostInit
import cpw.mods.fml.common.Mod.PreInit
import cpw.mods.fml.common.event.FMLInitializationEvent
import cpw.mods.fml.common.event.FMLPostInitializationEvent
import cpw.mods.fml.common.event.FMLPreInitializationEvent
import cpw.mods.fml.common.network.NetworkMod
import net.minecraftforge.common.Configuration
import cpw.mods.fml.common.registry.LanguageRegistry
@Mod(modid="verystickypistonsmod", name="VeryStickyPistonsMod", version="0.0.1", modLanguage = "scala")
@NetworkMod(clientSideRequired=true, serverSideRequired=false)
object VeryStickyPiston {
val veryStickyPiston = new BlockVeryStickyPiston(5100).setUnlocalizedName("specificItem")
@PreInit
def preInit(event: FMLPreInitializationEvent) {
}
@Init
def init(event: FMLInitializationEvent) {
LanguageRegistry.addName(veryStickyPiston, "Very Sticky Piston");
}
@PostInit
def postInit(event:FMLPostInitializationEvent) {
}
}
| nmarshall23/Very-Sticky-Pistons-Mod | src/main/verystickypistonsmod/Verystickypiston.scala | Scala | mit | 1,103 |
import Deck.Color._
import Deck._
/**
*/
class EventSourcingSpec extends App {
object Given {
val simpleDeck = GameStarted(1, 4, CardDigit(3, Red)) :: CardPlayed(1, 0, CardDigit(9, Red)) :: Nil
}
println("coucou")
}
| twillouer/handson-eventsourcing | src/test/scala/EventSourcingSpec.scala | Scala | apache-2.0 | 235 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.asterix.connector.result
/**
* AsterixDB result iterator for AsterixRDD.
*
* @param resultReader AsterixResultReader to get the JSON format.
* @tparam String String JSON format.
*/
class AsterixResultIterator[String] (resultReader: AsterixResultReader) extends Iterator[String] {
private[this] val resultClient = new AsterixClient(resultReader)
/**
* Converts [[java.lang.String]] to [[String]]
* @return
*/
override def next(): String = resultClient.getResultTuple.asInstanceOf[String]
override def hasNext: Boolean = resultClient.hasNext
}
| Nullification/asterixdb-spark-connector | src/main/scala/org/apache/asterix/connector/result/AsterixResultIterator.scala | Scala | apache-2.0 | 1,395 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.TotalCreditorsWithinOneYearCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.accounts.validation.AssetsEqualToSharesValidator
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.BoxRetriever._
case class AC154(value: Option[Int]) extends CtBoxIdentifier(name = "Total creditors within one year (current PoA)")
with CtOptionalInteger with AssetsEqualToSharesValidator with ValidatableBox[Frs102AccountsBoxRetriever] {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
import boxRetriever._
failIf (anyHaveValue(ac58(), ac59()))(
validateMatchesBalanceSheetValue(boxRetriever)
)
}
private def validateMatchesBalanceSheetValue(boxRetriever: Frs102AccountsBoxRetriever)() = {
failIf(value != boxRetriever.ac58().value) {
Set(CtValidation(None, "error.creditors.within.one.year.note.current.total.not.equal.balance.sheet"))
}
}
}
object AC154 extends Calculated[AC154, FullAccountsBoxRetriever] with TotalCreditorsWithinOneYearCalculator {
override def calculate(boxRetriever: FullAccountsBoxRetriever): AC154 = {
import boxRetriever._
calculateCurrentTotalCreditorsWithinOneYear(ac142(), ac144(), ac146(), ac148(), ac150(), ac152())
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC154.scala | Scala | apache-2.0 | 2,030 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kudu.backup
import java.math.BigDecimal
import java.util.Base64
import com.google.protobuf.StringValue
import org.apache.kudu.backup.Backup._
import org.apache.kudu.ColumnSchema.{ColumnSchemaBuilder, CompressionAlgorithm, Encoding}
import org.apache.kudu.ColumnTypeAttributes.ColumnTypeAttributesBuilder
import org.apache.kudu.client.{CreateTableOptions, KuduTable, PartialRow}
import org.apache.kudu.{ColumnSchema, Schema, Type}
import org.apache.yetus.audience.{InterfaceAudience, InterfaceStability}
import scala.collection.JavaConverters._
@InterfaceAudience.Private
@InterfaceStability.Unstable
object TableMetadata {
val MetadataFileName = ".kudu-metadata.json"
def getTableMetadata(table: KuduTable, options: KuduBackupOptions): TableMetadataPB = {
val columns = table.getSchema.getColumns.asScala.map { col =>
val builder = ColumnMetadataPB.newBuilder()
.setName(col.getName)
.setType(col.getType.name())
.setIsKey(col.isKey)
.setIsNullable(col.isNullable)
.setEncoding(col.getEncoding.toString)
.setCompression(col.getCompressionAlgorithm.toString)
.setBlockSize(col.getDesiredBlockSize)
if (col.getTypeAttributes != null) {
builder.setTypeAttributes(getTypeAttributesMetadata(col))
}
if (col.getDefaultValue != null) {
builder.setDefaultValue(StringValue.of(valueToString(col.getDefaultValue, col.getType)))
}
builder.build()
}
TableMetadataPB.newBuilder()
.setFromMs(0) // TODO: fromMs is always zero until we support incremental backups
.setToMs(options.timestampMs)
.setDataFormat(options.format)
.setTableName(table.getName)
.addAllColumns(columns.asJava)
.setNumReplicas(table.getNumReplicas)
.setPartitions(getPartitionMetadata(table))
.build()
}
private def getTypeAttributesMetadata(col: ColumnSchema): ColumnTypeAttributesMetadataPB = {
val attributes = col.getTypeAttributes
ColumnTypeAttributesMetadataPB.newBuilder()
.setPrecision(attributes.getPrecision)
.setScale(attributes.getScale)
.build()
}
private def getPartitionMetadata(table: KuduTable): PartitionMetadataPB = {
val hashPartitions = getHashPartitionsMetadata(table)
val rangePartitions = getRangePartitionMetadata(table)
PartitionMetadataPB.newBuilder()
.addAllHashPartitions(hashPartitions.asJava)
.setRangePartitions(rangePartitions)
.build()
}
private def getHashPartitionsMetadata(table: KuduTable): Seq[HashPartitionMetadataPB] = {
val tableSchema = table.getSchema
val partitionSchema = table.getPartitionSchema
partitionSchema.getHashBucketSchemas.asScala.map { hs =>
val columnNames = hs.getColumnIds.asScala.map { id =>
getColumnById(tableSchema, id).getName
}
HashPartitionMetadataPB.newBuilder()
.addAllColumnNames(columnNames.asJava)
.setNumBuckets(hs.getNumBuckets)
.setSeed(hs.getSeed)
.build()
}
}
private def getRangePartitionMetadata(table: KuduTable): RangePartitionMetadataPB = {
val tableSchema = table.getSchema
val partitionSchema = table.getPartitionSchema
val columnNames = partitionSchema.getRangeSchema.getColumnIds.asScala.map { id =>
getColumnById(tableSchema, id).getName
}
val bounds = table.getRangePartitions(table.getAsyncClient.getDefaultOperationTimeoutMs)
.asScala.map { p =>
val lowerValues = getBoundValues(p.getDecodedRangeKeyStart(table), columnNames, tableSchema)
val upperValues = getBoundValues(p.getDecodedRangeKeyEnd(table), columnNames, tableSchema)
RangeBoundsMetadataPB.newBuilder()
.addAllUpperBounds(upperValues.asJava)
.addAllLowerBounds(lowerValues.asJava)
.build()
}
RangePartitionMetadataPB.newBuilder()
.addAllColumnNames(columnNames.asJava)
.addAllBounds(bounds.asJava)
.build()
}
private def getColumnById(schema: Schema, colId: Int): ColumnSchema = {
schema.getColumnByIndex(schema.getColumnIndex(colId))
}
private def getBoundValues(bound: PartialRow, columnNames: Seq[String], schema: Schema): Seq[ColumnValueMetadataPB] = {
columnNames.filter(bound.isSet).map { col =>
val colType = schema.getColumn(col).getType
val value = getValue(bound, col, colType)
ColumnValueMetadataPB.newBuilder()
.setColumnName(col)
.setValue(valueToString(value, colType))
.build()
}
}
private def getPartialRow(values: Seq[ColumnValueMetadataPB], schema: Schema): PartialRow = {
val row = schema.newPartialRow()
values.foreach { v =>
val colType = schema.getColumn(v.getColumnName).getType
addValue(valueFromString(v.getValue, colType), row, v.getColumnName, colType)
}
row
}
def getKuduSchema(metadata: TableMetadataPB): Schema = {
val columns = metadata.getColumnsList.asScala.map { col =>
val colType = Type.getTypeForName(col.getType)
val builder = new ColumnSchemaBuilder(col.getName, colType)
.key(col.getIsKey)
.nullable(col.getIsNullable)
.encoding(Encoding.valueOf(col.getEncoding))
.compressionAlgorithm(CompressionAlgorithm.valueOf(col.getCompression))
.desiredBlockSize(col.getBlockSize)
if (col.hasDefaultValue) {
val value = valueFromString(col.getDefaultValue.getValue, colType)
builder.defaultValue(value)
}
if (col.hasTypeAttributes) {
val attributes = col.getTypeAttributes
builder.typeAttributes(
new ColumnTypeAttributesBuilder()
.precision(attributes.getPrecision)
.scale(attributes.getScale)
.build()
)
}
builder.build()
}
new Schema(columns.asJava)
}
private def getValue(row: PartialRow, columnName: String, colType: Type): Any = {
colType match {
case Type.BOOL => row.getBoolean(columnName)
case Type.INT8 => row.getByte(columnName)
case Type.INT16 => row.getShort(columnName)
case Type.INT32 => row.getInt(columnName)
case Type.INT64 | Type.UNIXTIME_MICROS => row.getLong(columnName)
case Type.FLOAT => row.getFloat(columnName)
case Type.DOUBLE => row.getDouble(columnName)
case Type.STRING => row.getString(columnName)
case Type.BINARY => row.getBinary(columnName)
case Type.DECIMAL => row.getDecimal(columnName)
case _ => throw new IllegalArgumentException(s"Unsupported column type: $colType")
}
}
private def addValue(value: Any, row: PartialRow, columnName: String, colType: Type): Any = {
colType match {
case Type.BOOL => row.addBoolean(columnName, value.asInstanceOf[Boolean])
case Type.INT8 => row.addByte(columnName, value.asInstanceOf[Byte])
case Type.INT16 => row.addShort(columnName, value.asInstanceOf[Short])
case Type.INT32 => row.addInt(columnName, value.asInstanceOf[Int])
case Type.INT64 | Type.UNIXTIME_MICROS => row.addLong(columnName, value.asInstanceOf[Long])
case Type.FLOAT => row.addFloat(columnName, value.asInstanceOf[Float])
case Type.DOUBLE => row.addDouble(columnName, value.asInstanceOf[Double])
case Type.STRING => row.addString(columnName, value.asInstanceOf[String])
case Type.BINARY => row.addBinary(columnName, value.asInstanceOf[Array[Byte]])
case Type.DECIMAL => row.addDecimal(columnName, value.asInstanceOf[BigDecimal])
case _ => throw new IllegalArgumentException(s"Unsupported column type: $colType")
}
}
private def valueToString(value: Any, colType: Type): String = {
colType match {
case Type.BOOL =>
String.valueOf(value.asInstanceOf[Boolean])
case Type.INT8 =>
String.valueOf(value.asInstanceOf[Byte])
case Type.INT16 =>
String.valueOf(value.asInstanceOf[Short])
case Type.INT32 =>
String.valueOf(value.asInstanceOf[Int])
case Type.INT64 | Type.UNIXTIME_MICROS =>
String.valueOf(value.asInstanceOf[Long])
case Type.FLOAT =>
String.valueOf(value.asInstanceOf[Float])
case Type.DOUBLE =>
String.valueOf(value.asInstanceOf[Double])
case Type.STRING =>
value.asInstanceOf[String]
case Type.BINARY =>
Base64.getEncoder.encodeToString(value.asInstanceOf[Array[Byte]])
case Type.DECIMAL =>
value.asInstanceOf[BigDecimal].toString // TODO: Explicitly control print format
case _ => throw new IllegalArgumentException(s"Unsupported column type: $colType")
}
}
private def valueFromString(value: String, colType: Type): Any = {
colType match {
case Type.BOOL => value.toBoolean
case Type.INT8 => value.toByte
case Type.INT16 => value.toShort
case Type.INT32 => value.toInt
case Type.INT64 | Type.UNIXTIME_MICROS => value.toLong
case Type.FLOAT => value.toFloat
case Type.DOUBLE => value.toDouble
case Type.STRING => value
case Type.BINARY => Base64.getDecoder.decode(value)
case Type.DECIMAL => new BigDecimal(value) // TODO: Explicitly pass scale
case _ => throw new IllegalArgumentException(s"Unsupported column type: $colType")
}
}
def getCreateTableOptions(metadata: TableMetadataPB): CreateTableOptions = {
val schema = getKuduSchema(metadata)
val options = new CreateTableOptions()
options.setNumReplicas(metadata.getNumReplicas)
metadata.getPartitions.getHashPartitionsList.asScala.foreach { hp =>
options.addHashPartitions(hp.getColumnNamesList, hp.getNumBuckets, hp.getSeed)
}
val rangePartitionColumns = metadata.getPartitions.getRangePartitions.getColumnNamesList
options.setRangePartitionColumns(rangePartitionColumns)
metadata.getPartitions.getRangePartitions.getBoundsList.asScala.foreach { b =>
val lower = getPartialRow(b.getLowerBoundsList.asScala, schema)
val upper = getPartialRow(b.getUpperBoundsList.asScala, schema)
options.addRangePartition(lower, upper)
}
options
}
} | EvilMcJerkface/kudu | java/kudu-backup/src/main/scala/org/apache/kudu/backup/TableMetadata.scala | Scala | apache-2.0 | 10,898 |
package com.chatwork.sbt.aws.s3.resolver
import com.amazonaws.ClientConfiguration
import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client, AmazonS3ClientBuilder}
import com.chatwork.sbt.aws.core.SbtAwsCoreKeys
import com.chatwork.sbt.aws.s3.{SbtAwsS3Keys, SbtAwsS3Plugin}
import sbt.Keys._
import sbt.{AutoPlugin, Logger, Plugins}
object SbtAwsS3ResolverPlugin extends AutoPlugin with SbtAwsS3Resolver {
override def trigger = allRequirements
override def requires: Plugins = SbtAwsS3Plugin
object autoImport extends SbtAwsS3ResolverKeys {
object DeployStyle extends Enumeration {
val Maven, Ivy2 = Value
}
}
import SbtAwsCoreKeys._
import SbtAwsS3Keys._
import autoImport._
override def projectSettings: Seq[_root_.sbt.Def.Setting[_]] = Seq(
s3Region in aws := com.amazonaws.services.s3.model.Region.AP_Tokyo,
s3DeployStyle in aws := DeployStyle.Maven,
s3ServerSideEncryption in aws := false,
s3Acl in aws := com.amazonaws.services.s3.model.CannedAccessControlList.PublicRead,
s3OverwriteObject in aws := isSnapshot.value,
s3Resolver in aws := { (name: String, location: String) =>
val cpc = (credentialsProviderChain in aws).value
val cc = (clientConfiguration in aws).value
val regions = (region in aws).value
val _s3Region = (s3Region in aws).value
val sse = (s3ServerSideEncryption in aws).value
val acl = (s3Acl in aws).value
val overwrite = (s3OverwriteObject in aws).value
val deployStyle = (s3DeployStyle in aws).value
val builder0 = AmazonS3ClientBuilder.standard().withRegion(regions).withCredentials(cpc)
val builder = cc.fold(builder0)(builder0.withClientConfiguration)
val s3Client: AmazonS3 = builder.build()
ResolverCreator.create(s3Client,
_s3Region,
name,
location,
acl,
sse,
overwrite,
isMavenStyle = if (deployStyle == DeployStyle.Maven) true else false)
}
)
}
| chatwork/sbt-aws | sbt-aws-s3-resolver/src/main/scala/com/chatwork/sbt/aws/s3/resolver/SbtAwsS3ResolverPlugin.scala | Scala | mit | 2,191 |
package controllers
import play.api.libs.json.Json
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc.{Action, Controller}
import models.Event
/**
* Created by Norman on 02.04.14.
*/
object EventsController extends Controller {
def getLastNEvents(n: Int) = Action.async {
Event.listAsJson(n).map( jsList => Ok(Json.prettyPrint(Json.toJson(jsList))))
}
}
| normalerweise/mte | app/controllers/EventsController.scala | Scala | gpl-2.0 | 391 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs.manager
import swing.Frame
trait ActionContext {
def frame: Frame
def left: FolderPanel
def right: FolderPanel
}
| pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/manager/ActionContext.scala | Scala | gpl-3.0 | 864 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO2
package com.google.protobuf.descriptor
import _root_.scalapb.internal.compat.JavaConverters._
/** Describes the relationship between generated code and its original source
* file. A GeneratedCodeInfo message is associated with only one generated
* source file, but may contain references to different source .proto files.
*
* @param annotation
* An Annotation connects some span of text in generated code to an element
* of its generating .proto file.
*/
@SerialVersionUID(0L)
final case class GeneratedCodeInfo(
annotation: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] = _root_.scala.Seq.empty
) extends scalapb.GeneratedMessage with scalapb.Message[GeneratedCodeInfo] with scalapb.lenses.Updatable[GeneratedCodeInfo] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
annotation.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
annotation.foreach { __v =>
val __m = __v
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.GeneratedCodeInfo = {
val __annotation = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] ++= this.annotation)
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__annotation += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation.defaultInstance)
case tag => _input__.skipField(tag)
}
}
com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation = __annotation.result()
)
}
def clearAnnotation = copy(annotation = _root_.scala.Seq.empty)
def addAnnotation(__vs: com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation*): GeneratedCodeInfo = addAllAnnotation(__vs)
def addAllAnnotation(__vs: Iterable[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]): GeneratedCodeInfo = copy(annotation = annotation ++ __vs)
def withAnnotation(__v: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]): GeneratedCodeInfo = copy(annotation = __v)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => annotation
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PRepeated(annotation.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.GeneratedCodeInfo
}
object GeneratedCodeInfo extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.GeneratedCodeInfo, com.google.protobuf.DescriptorProtos.GeneratedCodeInfo] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.GeneratedCodeInfo, com.google.protobuf.DescriptorProtos.GeneratedCodeInfo] = this
def toJavaProto(scalaPbSource: com.google.protobuf.descriptor.GeneratedCodeInfo): com.google.protobuf.DescriptorProtos.GeneratedCodeInfo = {
val javaPbOut = com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.newBuilder
javaPbOut.addAllAnnotation(scalaPbSource.annotation.iterator.map(com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation.toJavaProto).toIterable.asJava)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.DescriptorProtos.GeneratedCodeInfo): com.google.protobuf.descriptor.GeneratedCodeInfo = com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation = javaPbSource.getAnnotationList.asScala.iterator.map(com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation.fromJavaProto).toSeq
)
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.descriptor.GeneratedCodeInfo = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.descriptor.GeneratedCodeInfo(
__fieldsMap.getOrElse(__fields.get(0), Nil).asInstanceOf[_root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.GeneratedCodeInfo] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.GeneratedCodeInfo(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DescriptorProtoCompanion.javaDescriptor.getMessageTypes.get(20)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DescriptorProtoCompanion.scalaDescriptor.messages(20)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 1 => __out = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]](
_root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
)
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.GeneratedCodeInfo(
)
/** @param path
* Identifies the element in the original source .proto file. This field
* is formatted the same as SourceCodeInfo.Location.path.
* @param sourceFile
* Identifies the filesystem path to the original source .proto.
* @param begin
* Identifies the starting offset in bytes in the generated code
* that relates to the identified object.
* @param end
* Identifies the ending offset in bytes in the generated code that
* relates to the identified offset. The end offset should be one past
* the last relevant byte (so the length of the text = end - begin).
*/
@SerialVersionUID(0L)
final case class Annotation(
path: _root_.scala.Seq[_root_.scala.Int] = _root_.scala.Seq.empty,
sourceFile: _root_.scala.Option[_root_.scala.Predef.String] = _root_.scala.None,
begin: _root_.scala.Option[_root_.scala.Int] = _root_.scala.None,
end: _root_.scala.Option[_root_.scala.Int] = _root_.scala.None
) extends scalapb.GeneratedMessage with scalapb.Message[Annotation] with scalapb.lenses.Updatable[Annotation] {
private[this] def pathSerializedSize = {
if (__pathSerializedSizeField == 0) __pathSerializedSizeField = {
var __s: _root_.scala.Int = 0
path.foreach(__i => __s += _root_.com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(__i))
__s
}
__pathSerializedSizeField
}
@transient private[this] var __pathSerializedSizeField: _root_.scala.Int = 0
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
if(path.nonEmpty) {
val __localsize = pathSerializedSize
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__localsize) + __localsize
}
if (sourceFile.isDefined) {
val __value = sourceFile.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(2, __value)
};
if (begin.isDefined) {
val __value = begin.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(3, __value)
};
if (end.isDefined) {
val __value = end.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(4, __value)
};
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
if (path.nonEmpty) {
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(pathSerializedSize)
path.foreach(_output__.writeInt32NoTag)
};
sourceFile.foreach { __v =>
val __m = __v
_output__.writeString(2, __m)
};
begin.foreach { __v =>
val __m = __v
_output__.writeInt32(3, __m)
};
end.foreach { __v =>
val __m = __v
_output__.writeInt32(4, __m)
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = {
val __path = (_root_.scala.collection.immutable.Vector.newBuilder[_root_.scala.Int] ++= this.path)
var __sourceFile = this.sourceFile
var __begin = this.begin
var __end = this.end
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__path += _input__.readInt32()
case 10 => {
val length = _input__.readRawVarint32()
val oldLimit = _input__.pushLimit(length)
while (_input__.getBytesUntilLimit > 0) {
__path += _input__.readInt32
}
_input__.popLimit(oldLimit)
}
case 18 =>
__sourceFile = Option(_input__.readString())
case 24 =>
__begin = Option(_input__.readInt32())
case 32 =>
__end = Option(_input__.readInt32())
case tag => _input__.skipField(tag)
}
}
com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path = __path.result(),
sourceFile = __sourceFile,
begin = __begin,
end = __end
)
}
def clearPath = copy(path = _root_.scala.Seq.empty)
def addPath(__vs: _root_.scala.Int*): Annotation = addAllPath(__vs)
def addAllPath(__vs: Iterable[_root_.scala.Int]): Annotation = copy(path = path ++ __vs)
def withPath(__v: _root_.scala.Seq[_root_.scala.Int]): Annotation = copy(path = __v)
def getSourceFile: _root_.scala.Predef.String = sourceFile.getOrElse("")
def clearSourceFile: Annotation = copy(sourceFile = _root_.scala.None)
def withSourceFile(__v: _root_.scala.Predef.String): Annotation = copy(sourceFile = Option(__v))
def getBegin: _root_.scala.Int = begin.getOrElse(0)
def clearBegin: Annotation = copy(begin = _root_.scala.None)
def withBegin(__v: _root_.scala.Int): Annotation = copy(begin = Option(__v))
def getEnd: _root_.scala.Int = end.getOrElse(0)
def clearEnd: Annotation = copy(end = _root_.scala.None)
def withEnd(__v: _root_.scala.Int): Annotation = copy(end = Option(__v))
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => path
case 2 => sourceFile.orNull
case 3 => begin.orNull
case 4 => end.orNull
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PRepeated(path.iterator.map(_root_.scalapb.descriptors.PInt).toVector)
case 2 => sourceFile.map(_root_.scalapb.descriptors.PString).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 3 => begin.map(_root_.scalapb.descriptors.PInt).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 4 => end.map(_root_.scalapb.descriptors.PInt).getOrElse(_root_.scalapb.descriptors.PEmpty)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
}
object Annotation extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation, com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.Annotation] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation, com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.Annotation] = this
def toJavaProto(scalaPbSource: com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation): com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.Annotation = {
val javaPbOut = com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.Annotation.newBuilder
javaPbOut.addAllPath(scalaPbSource.path.iterator.map(_root_.scala.Int.box).toIterable.asJava)
scalaPbSource.sourceFile.foreach(javaPbOut.setSourceFile)
scalaPbSource.begin.foreach(javaPbOut.setBegin)
scalaPbSource.end.foreach(javaPbOut.setEnd)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.DescriptorProtos.GeneratedCodeInfo.Annotation): com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path = javaPbSource.getPathList.asScala.iterator.map(_.intValue).toSeq,
sourceFile = if (javaPbSource.hasSourceFile) Some(javaPbSource.getSourceFile) else _root_.scala.None,
begin = if (javaPbSource.hasBegin) Some(javaPbSource.getBegin.intValue) else _root_.scala.None,
end = if (javaPbSource.hasEnd) Some(javaPbSource.getEnd.intValue) else _root_.scala.None
)
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
__fieldsMap.getOrElse(__fields.get(0), Nil).asInstanceOf[_root_.scala.Seq[_root_.scala.Int]],
__fieldsMap.get(__fields.get(1)).asInstanceOf[_root_.scala.Option[_root_.scala.Predef.String]],
__fieldsMap.get(__fields.get(2)).asInstanceOf[_root_.scala.Option[_root_.scala.Int]],
__fieldsMap.get(__fields.get(3)).asInstanceOf[_root_.scala.Option[_root_.scala.Int]]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Seq[_root_.scala.Int]]).getOrElse(_root_.scala.Seq.empty),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Predef.String]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Int]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(4).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Int]])
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = com.google.protobuf.descriptor.GeneratedCodeInfo.javaDescriptor.getNestedTypes.get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = com.google.protobuf.descriptor.GeneratedCodeInfo.scalaDescriptor.nestedMessages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
)
implicit class AnnotationLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation](_l) {
def path: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[_root_.scala.Int]] = field(_.path)((c_, f_) => c_.copy(path = f_))
def sourceFile: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.getSourceFile)((c_, f_) => c_.copy(sourceFile = Option(f_)))
def optionalSourceFile: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Predef.String]] = field(_.sourceFile)((c_, f_) => c_.copy(sourceFile = f_))
def begin: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.getBegin)((c_, f_) => c_.copy(begin = Option(f_)))
def optionalBegin: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Int]] = field(_.begin)((c_, f_) => c_.copy(begin = f_))
def end: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.getEnd)((c_, f_) => c_.copy(end = Option(f_)))
def optionalEnd: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Int]] = field(_.end)((c_, f_) => c_.copy(end = f_))
}
final val PATH_FIELD_NUMBER = 1
final val SOURCE_FILE_FIELD_NUMBER = 2
final val BEGIN_FIELD_NUMBER = 3
final val END_FIELD_NUMBER = 4
def of(
path: _root_.scala.Seq[_root_.scala.Int],
sourceFile: _root_.scala.Option[_root_.scala.Predef.String],
begin: _root_.scala.Option[_root_.scala.Int],
end: _root_.scala.Option[_root_.scala.Int]
): _root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = _root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path,
sourceFile,
begin,
end
)
}
implicit class GeneratedCodeInfoLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo](_l) {
def annotation: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]] = field(_.annotation)((c_, f_) => c_.copy(annotation = f_))
}
final val ANNOTATION_FIELD_NUMBER = 1
def of(
annotation: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]
): _root_.com.google.protobuf.descriptor.GeneratedCodeInfo = _root_.com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation
)
}
| dotty-staging/ScalaPB | scalapb-runtime/jvm/src/main/scala/com/google/protobuf/descriptor/GeneratedCodeInfo.scala | Scala | apache-2.0 | 21,548 |
object Test {
var x1: Int => Float => Double = _
var x2: (Int => Float) => Double = _
var x3: Int => Double
def main(args: Array[String]): Unit = {
x1 = "a"
x2 = "b"
x3 = "c"
}
}
| yusuke2255/dotty | tests/untried/neg/nested-fn-print.scala | Scala | bsd-3-clause | 202 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import scala.collection.mutable.IndexedSeq
import breeze.linalg.{diag, DenseMatrix => BreezeMatrix, DenseVector => BDV, Vector => BV}
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg.{BLAS, DenseMatrix, Matrices, Vector, Vectors}
import org.apache.spark.mllib.stat.distribution.MultivariateGaussian
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
/**
* This class performs expectation maximization for multivariate Gaussian
* Mixture Models (GMMs). A GMM represents a composite distribution of
* independent Gaussian distributions with associated "mixing" weights
* specifying each's contribution to the composite.
*
* Given a set of sample points, this class will maximize the log-likelihood
* for a mixture of k Gaussians, iterating until the log-likelihood changes by
* less than convergenceTol, or until it has reached the max number of iterations.
* While this process is generally guaranteed to converge, it is not guaranteed
* to find a global optimum.
*
* @param k Number of independent Gaussians in the mixture model.
* @param convergenceTol Maximum change in log-likelihood at which convergence
* is considered to have occurred.
* @param maxIterations Maximum number of iterations allowed.
*
* @note This algorithm is limited in its number of features since it requires storing a covariance
* matrix which has size quadratic in the number of features. Even when the number of features does
* not exceed this limit, this algorithm may perform poorly on high-dimensional data.
* This is due to high-dimensional data (a) making it difficult to cluster at all (based
* on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions.
*/
@Since("1.3.0")
class GaussianMixture private (
private var k: Int,
private var convergenceTol: Double,
private var maxIterations: Int,
private var seed: Long) extends Serializable {
/**
* Constructs a default instance. The default parameters are {k: 2, convergenceTol: 0.01,
* maxIterations: 100, seed: random}.
*/
@Since("1.3.0")
def this() = this(2, 0.01, 100, Utils.random.nextLong())
// number of samples per cluster to use when initializing Gaussians
private val nSamples = 5
// an initializing GMM can be provided rather than using the
// default random starting point
private var initialModel: Option[GaussianMixtureModel] = None
/**
* Set the initial GMM starting point, bypassing the random initialization.
* You must call setK() prior to calling this method, and the condition
* (model.k == this.k) must be met; failure will result in an IllegalArgumentException
*/
@Since("1.3.0")
def setInitialModel(model: GaussianMixtureModel): this.type = {
require(model.k == k,
s"Mismatched cluster count (model.k ${model.k} != k ${k})")
initialModel = Some(model)
this
}
/**
* Return the user supplied initial GMM, if supplied
*/
@Since("1.3.0")
def getInitialModel: Option[GaussianMixtureModel] = initialModel
/**
* Set the number of Gaussians in the mixture model. Default: 2
*/
@Since("1.3.0")
def setK(k: Int): this.type = {
require(k > 0,
s"Number of Gaussians must be positive but got ${k}")
this.k = k
this
}
/**
* Return the number of Gaussians in the mixture model
*/
@Since("1.3.0")
def getK: Int = k
/**
* Set the maximum number of iterations allowed. Default: 100
*/
@Since("1.3.0")
def setMaxIterations(maxIterations: Int): this.type = {
require(maxIterations >= 0,
s"Maximum of iterations must be nonnegative but got ${maxIterations}")
this.maxIterations = maxIterations
this
}
/**
* Return the maximum number of iterations allowed
*/
@Since("1.3.0")
def getMaxIterations: Int = maxIterations
/**
* Set the largest change in log-likelihood at which convergence is
* considered to have occurred.
*/
@Since("1.3.0")
def setConvergenceTol(convergenceTol: Double): this.type = {
require(convergenceTol >= 0.0,
s"Convergence tolerance must be nonnegative but got ${convergenceTol}")
this.convergenceTol = convergenceTol
this
}
/**
* Return the largest change in log-likelihood at which convergence is
* considered to have occurred.
*/
@Since("1.3.0")
def getConvergenceTol: Double = convergenceTol
/**
* Set the random seed
*/
@Since("1.3.0")
def setSeed(seed: Long): this.type = {
this.seed = seed
this
}
/**
* Return the random seed
*/
@Since("1.3.0")
def getSeed: Long = seed
/**
* Perform expectation maximization
*/
@Since("1.3.0")
def run(data: RDD[Vector]): GaussianMixtureModel = {
val sc = data.sparkContext
// we will operate on the data as breeze data
val breezeData = data.map(_.asBreeze).cache()
// Get length of the input vectors
val d = breezeData.first().length
require(d < GaussianMixture.MAX_NUM_FEATURES, s"GaussianMixture cannot handle more " +
s"than ${GaussianMixture.MAX_NUM_FEATURES} features because the size of the covariance" +
s" matrix is quadratic in the number of features.")
val shouldDistributeGaussians = GaussianMixture.shouldDistributeGaussians(k, d)
// Determine initial weights and corresponding Gaussians.
// If the user supplied an initial GMM, we use those values, otherwise
// we start with uniform weights, a random mean from the data, and
// diagonal covariance matrices using component variances
// derived from the samples
val (weights, gaussians) = initialModel match {
case Some(gmm) => (gmm.weights, gmm.gaussians)
case None =>
val samples = breezeData.takeSample(withReplacement = true, k * nSamples, seed)
(Array.fill(k)(1.0 / k), Array.tabulate(k) { i =>
val slice = samples.view(i * nSamples, (i + 1) * nSamples)
new MultivariateGaussian(vectorMean(slice), initCovariance(slice))
})
}
var llh = Double.MinValue // current log-likelihood
var llhp = 0.0 // previous log-likelihood
var iter = 0
while (iter < maxIterations && math.abs(llh-llhp) > convergenceTol) {
// create and broadcast curried cluster contribution function
val compute = sc.broadcast(ExpectationSum.add(weights, gaussians)_)
// aggregate the cluster contribution for all sample points
val sums = breezeData.treeAggregate(ExpectationSum.zero(k, d))(compute.value, _ += _)
// Create new distributions based on the partial assignments
// (often referred to as the "M" step in literature)
val sumWeights = sums.weights.sum
if (shouldDistributeGaussians) {
val numPartitions = math.min(k, 1024)
val tuples =
Seq.tabulate(k)(i => (sums.means(i), sums.sigmas(i), sums.weights(i)))
val (ws, gs) = sc.parallelize(tuples, numPartitions).map { case (mean, sigma, weight) =>
updateWeightsAndGaussians(mean, sigma, weight, sumWeights)
}.collect().unzip
Array.copy(ws, 0, weights, 0, ws.length)
Array.copy(gs, 0, gaussians, 0, gs.length)
} else {
var i = 0
while (i < k) {
val (weight, gaussian) =
updateWeightsAndGaussians(sums.means(i), sums.sigmas(i), sums.weights(i), sumWeights)
weights(i) = weight
gaussians(i) = gaussian
i = i + 1
}
}
llhp = llh // current becomes previous
llh = sums.logLikelihood // this is the freshly computed log-likelihood
iter += 1
compute.destroy(blocking = false)
}
new GaussianMixtureModel(weights, gaussians)
}
/**
* Java-friendly version of `run()`
*/
@Since("1.3.0")
def run(data: JavaRDD[Vector]): GaussianMixtureModel = run(data.rdd)
private def updateWeightsAndGaussians(
mean: BDV[Double],
sigma: BreezeMatrix[Double],
weight: Double,
sumWeights: Double): (Double, MultivariateGaussian) = {
val mu = (mean /= weight)
BLAS.syr(-weight, Vectors.fromBreeze(mu),
Matrices.fromBreeze(sigma).asInstanceOf[DenseMatrix])
val newWeight = weight / sumWeights
val newGaussian = new MultivariateGaussian(mu, sigma / weight)
(newWeight, newGaussian)
}
/** Average of dense breeze vectors */
private def vectorMean(x: IndexedSeq[BV[Double]]): BDV[Double] = {
val v = BDV.zeros[Double](x(0).length)
x.foreach(xi => v += xi)
v / x.length.toDouble
}
/**
* Construct matrix where diagonal entries are element-wise
* variance of input vectors (computes biased variance)
*/
private def initCovariance(x: IndexedSeq[BV[Double]]): BreezeMatrix[Double] = {
val mu = vectorMean(x)
val ss = BDV.zeros[Double](x(0).length)
x.foreach(xi => ss += (xi - mu) :^ 2.0)
diag(ss / x.length.toDouble)
}
}
private[clustering] object GaussianMixture {
/** Limit number of features such that numFeatures^2^ < Int.MaxValue */
private[clustering] val MAX_NUM_FEATURES = math.sqrt(Int.MaxValue).toInt
/**
* Heuristic to distribute the computation of the `MultivariateGaussian`s, approximately when
* d is greater than 25 except for when k is very small.
* @param k Number of topics
* @param d Number of features
*/
def shouldDistributeGaussians(k: Int, d: Int): Boolean = ((k - 1.0) / k) * d > 25
}
// companion class to provide zero constructor for ExpectationSum
private object ExpectationSum {
def zero(k: Int, d: Int): ExpectationSum = {
new ExpectationSum(0.0, Array.fill(k)(0.0),
Array.fill(k)(BDV.zeros(d)), Array.fill(k)(BreezeMatrix.zeros(d, d)))
}
// compute cluster contributions for each input point
// (U, T) => U for aggregation
def add(
weights: Array[Double],
dists: Array[MultivariateGaussian])
(sums: ExpectationSum, x: BV[Double]): ExpectationSum = {
val p = weights.zip(dists).map {
case (weight, dist) => MLUtils.EPSILON + weight * dist.pdf(x)
}
val pSum = p.sum
sums.logLikelihood += math.log(pSum)
var i = 0
while (i < sums.k) {
p(i) /= pSum
sums.weights(i) += p(i)
sums.means(i) += x * p(i)
BLAS.syr(p(i), Vectors.fromBreeze(x),
Matrices.fromBreeze(sums.sigmas(i)).asInstanceOf[DenseMatrix])
i = i + 1
}
sums
}
}
// Aggregation class for partial expectation results
private class ExpectationSum(
var logLikelihood: Double,
val weights: Array[Double],
val means: Array[BDV[Double]],
val sigmas: Array[BreezeMatrix[Double]]) extends Serializable {
val k = weights.length
def +=(x: ExpectationSum): ExpectationSum = {
var i = 0
while (i < k) {
weights(i) += x.weights(i)
means(i) += x.means(i)
sigmas(i) += x.sigmas(i)
i = i + 1
}
logLikelihood += x.logLikelihood
this
}
}
| jianran/spark | mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala | Scala | apache-2.0 | 11,831 |
package org.bitcoins.core.wallet.utxo
import org.bitcoins.crypto.StringFactory
/** A type of address tag, many AddressTags of the same type
* should inherit the AddressTagType that they all share
*/
trait AddressTagType {
def typeName: String
def ==(at: AddressTagType): Boolean = typeName == at.typeName
def !=(at: AddressTagType): Boolean = !(this == at)
}
trait AddressTagName {
def name: String
def ==(at: AddressTagType): Boolean = name == at.typeName
def !=(at: AddressTagType): Boolean = !(this == at)
}
/** An tag for an address. It's name is what it is referred to as
* and it's tagType is its parent AddressTagType
*/
trait AddressTag {
def tagName: AddressTagName
def tagType: AddressTagType
def ==(at: AddressTag): Boolean =
tagName == at.tagName && tagType == at.tagType
def !=(at: AddressTag): Boolean = !(this == at)
}
trait AddressTagFactory[Tag <: AddressTag] extends StringFactory[Tag] {
def tagType: AddressTagType
def tagNames: Vector[AddressTagName]
def all: Vector[Tag]
override def fromStringOpt(str: String): Option[Tag] =
all.find(tag => str.toLowerCase() == tag.toString.toLowerCase)
override def fromString(string: String): Tag = {
fromStringOpt(string) match {
case Some(t) => t
case None => sys.error(s"Could not find tag for string=${string}")
}
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/wallet/utxo/AddressTag.scala | Scala | mit | 1,364 |
package mesosphere.marathon
package core.health
import mesosphere.UnitTest
import mesosphere.marathon.api.v2.json.Formats
import mesosphere.marathon.core.event.HealthStatusChanged
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.state.{PathId, Timestamp}
import play.api.libs.json._
class HealthTest extends UnitTest with Formats {
"Health" should {
val f = new Fixture
import f._
"serialize correctly" in {
val j1 = Json.toJson(h1)
(j1 \ "instanceId").as[String] should equal (instanceId.idString)
(j1 \ "alive").as[Boolean] should equal (false)
(j1 \ "consecutiveFailures").as[Int] should equal (0)
(j1 \ "firstSuccess").asOpt[String] should equal (None)
(j1 \ "lastFailure").asOpt[String] should equal (None)
(j1 \ "lastSuccess").asOpt[String] should equal (None)
val j2 = Json.toJson(h2)
(j2 \ "instanceId").as[String] should equal (instanceId.idString)
(j2 \ "alive").as[Boolean] should equal (true)
(j2 \ "consecutiveFailures").as[Int] should equal (0)
(j2 \ "firstSuccess").as[String] should equal ("1970-01-01T00:00:00.001Z")
(j2 \ "lastFailure").as[String] should equal ("1970-01-01T00:00:00.002Z")
(j2 \ "lastSuccess").as[String] should equal ("1970-01-01T00:00:00.003Z")
val j3 = Json.toJson(h3)
(j3 \ "instanceId").as[String] should equal (instanceId.idString)
(j3 \ "alive").as[Boolean] should equal (false)
(j3 \ "consecutiveFailures").as[Int] should equal (1)
(j3 \ "firstSuccess").as[String] should equal ("1970-01-01T00:00:00.001Z")
(j3 \ "lastFailure").as[String] should equal ("1970-01-01T00:00:00.003Z")
(j3 \ "lastSuccess").as[String] should equal ("1970-01-01T00:00:00.002Z")
}
}
"HealthStatusChangedEvent" should {
"serialize correctly" in {
val f = new Fixture
val event = HealthStatusChanged(
appId = f.appId,
instanceId = f.instanceId,
version = f.version,
alive = true,
timestamp = f.now.toString
)
val json = Json.toJson(event)
println(json.toString())
(json \ "appId").as[String] should equal(f.appId.toString)
(json \ "instanceId").as[String] should equal(f.instanceId.idString)
(json \ "version").as[String] should equal(f.version.toString)
(json \ "alive").as[Boolean] should equal(true)
(json \ "eventType").as[String] should equal("health_status_changed_event")
(json \ "timestamp").as[String] should equal(f.now.toString)
}
}
class Fixture {
val appId = PathId("/test")
val version = Timestamp(1)
val now = Timestamp(2)
val instanceId = Instance.Id.forRunSpec(appId)
val taskId = Task.Id(instanceId)
val h1 = Health(instanceId)
val h2 = Health(
instanceId = instanceId,
consecutiveFailures = 0,
firstSuccess = Some(Timestamp(1)),
lastSuccess = Some(Timestamp(3)),
lastFailure = Some(Timestamp(2))
)
val h3 = Health(
instanceId,
consecutiveFailures = 1,
firstSuccess = Some(Timestamp(1)),
lastSuccess = Some(Timestamp(2)),
lastFailure = Some(Timestamp(3))
)
}
}
| gsantovena/marathon | src/test/scala/mesosphere/marathon/core/health/HealthTest.scala | Scala | apache-2.0 | 3,244 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.application.assets.insurancePolicy
import iht.config.AppConfig
import iht.controllers.application.ApplicationControllerTest
import iht.forms.ApplicationForms._
import iht.models.application.ApplicationDetails
import iht.models.application.assets.InsurancePolicy
import iht.testhelpers.CommonBuilder
import iht.views.html.application.asset.insurancePolicy.insurance_policy_details_deceased_own
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import play.api.mvc.MessagesControllerComponents
import play.api.test.Helpers._
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
import scala.concurrent.Future
class InsurancePolicyDetailsDeceasedOwnControllerTest extends ApplicationControllerTest {
protected abstract class TestController extends FrontendController(mockControllerComponents) with InsurancePolicyDetailsDeceasedOwnController {
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
override val insurancePolicyDetailsDeceasedOwnView: insurance_policy_details_deceased_own = app.injector.instanceOf[insurance_policy_details_deceased_own]
}
def insurancePolicyDetailsDeceasedOwnController = new TestController {
override val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
def insurancePolicyDetailsDeceasedOwnControllerNotAuthorised = new TestController {
override val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
val registrationDetails = CommonBuilder.buildRegistrationDetails copy(
deceasedDetails = Some(CommonBuilder.buildDeceasedDetails),
ihtReference = Some("ABC123"))
val insurancePolicyDetails = InsurancePolicy(
isAnnuitiesBought = Some(false),
isInsurancePremiumsPayedForSomeoneElse = Some(true),
value = Some(BigDecimal(7)),
shareValue = Some(BigDecimal(8)),
policyInDeceasedName = Some(true),
isJointlyOwned = Some(true),
isInTrust = Some(false),
coveredByExemption = Some(true),
sevenYearsBefore = Some(true),
moreThanMaxValue = Some(false)
)
val allAssets = CommonBuilder.buildAllAssets copy (insurancePolicy = Some(insurancePolicyDetails))
val applicationDetails = CommonBuilder.buildApplicationDetails copy (allAssets = Some(allAssets))
private def createMocks(applicationDetails: ApplicationDetails) = {
when(mockCachingConnector.getRegistrationDetails(any(), any()))
.thenReturn(Future.successful(Some(registrationDetails)))
when(mockIhtConnector.getApplication(any(), any(), any())(any()))
.thenReturn(Future.successful(Some(applicationDetails)))
when(mockCachingConnector.storeApplicationDetails(any())(any(), any()))
.thenReturn(Future.successful(Some(applicationDetails)))
when(mockIhtConnector.saveApplication(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(applicationDetails)))
}
"InsurancePolicyDetailsDeceasedOwnController" must {
"save application and go to Insurance Overview page on submit" in {
val applicationDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = Some(CommonBuilder
.buildAllAssets.copy(insurancePolicy = Some(insurancePolicyDetails))))
createMocksForApplication(mockCachingConnector,
mockIhtConnector,
appDetails = Some(applicationDetails),
getAppDetails = true,
saveAppDetails= true,
storeAppDetailsInCache = true)
val insuranceDeceasedOwnValue = CommonBuilder.buildInsurancePolicy.copy(policyInDeceasedName=Some(true),value=Some(20))
val filledInsuranceForm = insurancePolicyDeceasedOwnQuestionForm.fill(insuranceDeceasedOwnValue)
implicit val request = createFakeRequest().withFormUrlEncodedBody(filledInsuranceForm.data.toSeq: _*)
val result = insurancePolicyDetailsDeceasedOwnController.onSubmit (request)
status(result) mustBe (SEE_OTHER)
}
"save application and go to Insurance Overview page on submit where no assets previously saved" in {
val applicationDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = None)
createMocksForApplication(mockCachingConnector,
mockIhtConnector,
appDetails = Some(applicationDetails),
getAppDetails = true,
saveAppDetails= true,
storeAppDetailsInCache = true)
val insuranceDeceasedOwnValue = CommonBuilder.buildInsurancePolicy.copy(policyInDeceasedName=Some(true),value=Some(20))
val filledInsuranceForm = insurancePolicyDeceasedOwnQuestionForm.fill(insuranceDeceasedOwnValue)
implicit val request = createFakeRequest().withFormUrlEncodedBody(filledInsuranceForm.data.toSeq: _*)
val result = insurancePolicyDetailsDeceasedOwnController.onSubmit (request)
status(result) mustBe (SEE_OTHER)
}
"save application and go to Insurance Overview page on submit where answer as no" in {
val applicationDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = None)
createMocksForApplication(mockCachingConnector,
mockIhtConnector,
appDetails = Some(applicationDetails),
getAppDetails = true,
saveAppDetails= true,
storeAppDetailsInCache = true)
val insuranceDeceasedOwnValue = CommonBuilder.buildInsurancePolicy.copy(policyInDeceasedName=Some(false))
val filledInsuranceForm = insurancePolicyDeceasedOwnQuestionForm.fill(insuranceDeceasedOwnValue)
implicit val request = createFakeRequest().withFormUrlEncodedBody(filledInsuranceForm.data.toSeq: _*)
val result = insurancePolicyDetailsDeceasedOwnController.onSubmit (request)
status(result) mustBe (SEE_OTHER)
}
"respond with bad request when incorrect value are entered on the page" in {
implicit val fakePostRequest = createFakeRequest().withFormUrlEncodedBody(("value", "utytyyterrrrrrrrrrrrrr"))
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector)
val result = insurancePolicyDetailsDeceasedOwnController.onSubmit (fakePostRequest)
status(result) mustBe (BAD_REQUEST)
}
"redirect to login page onPageLoad if the user is not logged in" in {
val result = insurancePolicyDetailsDeceasedOwnControllerNotAuthorised.onPageLoad(createFakeRequest(isAuthorised = false))
status(result) must be(SEE_OTHER)
redirectLocation(result).get must be(loginUrl)
}
"respond with OK on page load" in {
createMocks(applicationDetails)
val result = insurancePolicyDetailsDeceasedOwnController.onPageLoad(createFakeRequest())
status(result) must be (OK)
}
"redirect to correct page on submit" in {
createMocks(applicationDetails)
val filledForm = insurancePolicyDeceasedOwnQuestionForm.fill(insurancePolicyDetails)
implicit val request = createFakeRequest().withFormUrlEncodedBody(filledForm.data.toSeq: _*)
val result = insurancePolicyDetailsDeceasedOwnController.onSubmit (request)
redirectLocation(result) must be (Some(iht.controllers.application.assets.insurancePolicy.routes.InsurancePolicyOverviewController.onPageLoad().url + "#" + mockAppConfig.InsurancePayingToDeceasedYesNoID))
}
behave like controllerOnPageLoadWithNoExistingRegistrationDetails(mockCachingConnector,
insurancePolicyDetailsDeceasedOwnController.onPageLoad(createFakeRequest()))
}
}
| hmrc/iht-frontend | test/iht/controllers/application/assets/insurancePolicy/InsurancePolicyDetailsDeceasedOwnControllerTest.scala | Scala | apache-2.0 | 8,171 |
package integration
import akka.actor.ActorSystem
import com.xebialabs.restito.builder.stub.StubHttp._
import com.xebialabs.restito.builder.verify.VerifyHttp._
import com.xebialabs.restito.semantics.Action._
import com.xebialabs.restito.semantics.Condition._
import com.xebialabs.restito.server.StubServer
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import org.glassfish.grizzly.http.util.HttpStatus
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
@RunWith(classOf[JUnitRunner])
class QuickRequestsTest extends FunSpec with BeforeAndAfterAll with Matchers {
implicit val system: ActorSystem = ActorSystem()
describe("restito server") {
it("should register quick requests happening in parallel") {
val server = new StubServer().run
whenHttp(server)
.`match`(startsWithUri("/test"))
.`then`(status(HttpStatus.OK_200))
def makeRequest =
Http().singleRequest(HttpRequest(uri = s"http://localhost:${server.getPort}/test"))
Await.result(Future.sequence(Seq(
makeRequest,
makeRequest,
makeRequest,
makeRequest,
makeRequest
)), 10 second)
verifyHttp(server)
.times(5, get("/test"))
}
}
}
| mkotsur/restito | src/test/scala/integration/QuickRequestsTest.scala | Scala | mit | 1,480 |
package org.scaladebugger.api.dsl.info
import org.scaladebugger.api.profiles.traits.info.{ObjectInfo, VariableInfo}
import org.scaladebugger.api.virtualmachines.ObjectCache
/**
* Wraps a profile, providing DSL-like syntax.
*
* @param variableInfo The profile to wrap
*/
class VariableInfoDSLWrapper private[dsl] (
private val variableInfo: VariableInfo
) {
/**
* Caches the value of this variable in its associated JVM cache.
*
* @param objectCache The JVM cache to store this variable's value
* @return The variable whose value was stored
*/
def cache()(
implicit objectCache: ObjectCache = variableInfo.scalaVirtualMachine.cache
): VariableInfo = {
import org.scaladebugger.api.dsl.Implicits.ValueInfoDSL
variableInfo.toValueInfo.cache()
variableInfo
}
/**
* Removes this variable's value from its associated JVM cache.
*
* @param objectCache The JVM cache to remove this variable's value
* @return The variable whose value was removed
*/
def uncache()(
implicit objectCache: ObjectCache = variableInfo.scalaVirtualMachine.cache
): VariableInfo = {
import org.scaladebugger.api.dsl.Implicits.ValueInfoDSL
variableInfo.toValueInfo.uncache()
variableInfo
}
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/dsl/info/VariableInfoDSLWrapper.scala | Scala | apache-2.0 | 1,248 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
package graph
import scala.collection.mutable
import scala.concurrent.{ ExecutionContext, Future }
import org.objectweb.asm.Opcodes
import org.objectweb.asm.signature.SignatureVisitor
import org.apache.spark.rdd.RDD
import com.asakusafw.spark.compiler.graph.CacheStrategy
import com.asakusafw.spark.runtime.RoundContext
import com.asakusafw.spark.runtime.rdd.BranchKey
import com.asakusafw.spark.tools.asm._
import com.asakusafw.spark.tools.asm.MethodBuilder._
import com.asakusafw.spark.tools.asm4s._
import com.asakusafw.spark.tools.asm4s.MixIn._
import com.asakusafw.spark.extensions.iterativebatch.runtime.graph.{
CacheAlways => CacheAlwaysTrait,
CacheByParameter => CacheByParameterTrait
}
trait CacheAlways extends CacheStrategy {
override val mixins = Seq(
MixIn(classOf[CacheAlwaysTrait[_, _]].asType,
Seq(
FieldDef(
Opcodes.ACC_FINAL | Opcodes.ACC_TRANSIENT,
"values",
classOf[mutable.Map[_, _]].asType,
_.newClassType(classOf[mutable.Map[_, _]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[AnyRef].asType)
.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[AnyRef].asType)
})),
Seq(
MethodDef("getOrCache",
classOf[AnyRef].asType,
Seq(
classOf[AnyRef].asType,
classOf[Function0[_]].asType),
new MethodSignatureBuilder()
.newParameterType(classOf[AnyRef].asType)
.newParameterType {
_.newClassType(classOf[Function0[_]].asType) {
_.newTypeArgument()
}
}
.newReturnType(classOf[AnyRef].asType)))))
}
trait CacheByParameter extends CacheStrategy {
override val mixins = Seq(
MixIn(classOf[CacheByParameterTrait[_]].asType,
Seq(
FieldDef(
Opcodes.ACC_FINAL | Opcodes.ACC_TRANSIENT,
"values",
classOf[mutable.Map[_, _]].asType,
_.newClassType(classOf[mutable.Map[_, _]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF) {
_.newClassType(classOf[Seq[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[String].asType)
}
}
.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[AnyRef].asType)
})),
Seq(
MethodDef("getOrCache",
classOf[AnyRef].asType,
Seq(
classOf[RoundContext].asType,
classOf[Function0[_]].asType),
new MethodSignatureBuilder()
.newParameterType(classOf[RoundContext].asType)
.newParameterType {
_.newClassType(classOf[Function0[_]].asType) {
_.newTypeArgument()
}
}
.newReturnType(classOf[AnyRef].asType)))))
def parameters: Set[String]
override def defFields(fieldDef: FieldDef): Unit = {
super.defFields(fieldDef)
fieldDef.newField(
Opcodes.ACC_PRIVATE | Opcodes.ACC_TRANSIENT,
"parameters",
classOf[Set[String]].asType,
new TypeSignatureBuilder()
.newClassType(classOf[Set[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[String].asType)
})
}
override def defMethods(methodDef: MethodDef): Unit = {
super.defMethods(methodDef)
methodDef.newMethod(
"getOrCache",
classOf[AnyRef].asType,
Seq(classOf[AnyRef].asType, classOf[Function0[_]].asType)) { implicit mb =>
val thisVar :: keyVar :: valueVar :: _ = mb.argVars
`return`(
thisVar.push().invokeV(
"getOrCache",
classOf[AnyRef].asType,
keyVar.push().cast(classOf[RoundContext].asType),
valueVar.push()))
}
methodDef.newMethod("parameters", classOf[Set[String]].asType, Seq.empty) { implicit mb =>
val thisVar :: _ = mb.argVars
thisVar.push().getField("parameters", classOf[Set[String]].asType).unlessNotNull {
thisVar.push().putField(
"parameters",
buildSet { builder =>
parameters.foreach { parameter =>
builder += ldc(parameter)
}
})
}
`return`(thisVar.push().getField("parameters", classOf[Set[String]].asType))
}
}
}
| akirakw/asakusafw-spark | extensions/iterativebatch/compiler/core/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/CacheStrategy.scala | Scala | apache-2.0 | 5,011 |
package scalydomain
import java.io.File
import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue}
import scala.concurrent._
import scala.concurrent.duration._
import scala.collection.JavaConversions._
import ExecutionContext.Implicits.global
import scalydomain.core.{DomainDb, ZoneFile}
case class DomainName(val name: String, val hash: Array[Byte])
case class CliOptions(domainDbFile: File = new File("."), zoneFiles: Seq[File] = Seq())
object ZoneImport {
def main(args: Array[String]): Unit = {
val optParser = new scopt.OptionParser[CliOptions]("zoneimport") {
head("zoneimport", "SNAPSHOT")
arg[File]("<domain db file>") required() action { (x, c) =>
c.copy(domainDbFile = x) } text("Path to domain database file which will be populated by this command")
arg[File]("<zonefilefile>...") unbounded() required() action { (x, c) =>
c.copy(zoneFiles = c.zoneFiles :+ x) } text("DNS zone file(s) to import")
}
val config = optParser.parse(args, CliOptions()).get
val zonefiles: Seq[ZoneFile] = config.zoneFiles.flatMap { a =>
try {
Some(new ZoneFile(a.getPath))
} catch {
case e: Exception => {
println(s"Error reading zonefile $a: $e")
None
}
}
}
if (zonefiles.isEmpty) {
println("No zonefiles to read; nothing to do")
return
}
val queue = new LinkedBlockingQueue[Option[DomainName]](DomainDb.WriteBatchSize)
println("Starting writer")
val writer = Future {
println(s"Writing domains to ${config.domainDbFile.getPath}")
var count: Long = 0
val domainDb = new DomainDb(config.domainDbFile.getPath)
try {
var eof = false
while(!eof) {
queue.take match {
case Some(domain) => {
domainDb.write(domain.name, domain.hash)
count = count + 1
}
case None => {
println("Writer shutting down")
eof = true
}
}
}
println("Compacting domain database")
domainDb.compact()
println(domainDb.stats)
} finally {
domainDb.close()
}
count
}
println("Starting zone file readers")
val readers = Future.sequence(zonefiles.map { zonefile =>
Future {
println(s"Starting to read ${zonefile.path}")
var count: Long = 0
for (domain <- zonefile) {
val hash = DomainDb.computeDomainHash(domain)
queue.put(Some(DomainName(domain, hash)))
count += 1
if (count % 1000000 == 0) {
println(s"Processed ${zonefile.path}:$count")
}
}
(zonefile, count)
}
})
println("Waiting for readers to complete")
for (result <- Await.result(readers, Duration.Inf)) {
val (zonefile, count) = result
println(s"Zone file ${zonefile.path} contained $count domains")
}
println("Waiting for writer to complete")
queue.put(None)
val writeCount = Await.result(writer, Duration.Inf)
println(s"Wrote $writeCount")
}
}
| anelson/scalydomain | zoneimport/main.scala | Scala | apache-2.0 | 2,923 |
package gatlin
import io.gatling.core.session.Session
import io.gatling.commons.validation.Validation
/**
* Created by peter on 01/10/15.
*/
trait AskMessage {
def apply(session:Session): Validation[Any]
}
| kwark/akka-persistence-postgresql | modules/benchmark/src/it/scala/gatlin/AskMessage.scala | Scala | mit | 215 |
package spark
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Utility trait for classes that want to log data. Creates a SLF4J logger
* for the class and allows logging messages at different levels using
* methods that only evaluate parameters lazily if the log level is enabled.
*/
trait Logging {
// Make the log field transient so that objects with Logging can
// be serialized and used on another machine
@transient private var log_ : Logger = null
// Method to get or create the logger for this object
def log: Logger = {
if (log_ == null) {
var className = this.getClass().getName()
// Ignore trailing $'s in the class names for Scala objects
if (className.endsWith("$"))
className = className.substring(0, className.length - 1)
log_ = LoggerFactory.getLogger(className)
}
return log_
}
// Log methods that take only a String
def logInfo(msg: => String) = log.info(msg)
//def logInfo(msg: => String) = if (log.isInfoEnabled) log.info(msg)
def logDebug(msg: => String) = if (log.isDebugEnabled) log.debug(msg)
def logWarning(msg: => String) = if (log.isWarnEnabled) log.warn(msg)
def logError(msg: => String) = if (log.isErrorEnabled) log.error(msg)
// Log methods that take Throwables (Exceptions/Errors) too
def logInfo(msg: => String, throwable: Throwable) =
if (log.isInfoEnabled) log.info(msg)
def logDebug(msg: => String, throwable: Throwable) =
if (log.isDebugEnabled) log.debug(msg)
def logWarning(msg: => String, throwable: Throwable) =
if (log.isWarnEnabled) log.warn(msg, throwable)
def logError(msg: => String, throwable: Throwable) =
if (log.isErrorEnabled) log.error(msg, throwable)
// Method for ensuring that logging is initialized, to avoid having multiple
// threads do it concurrently (as SLF4J initialization is not thread safe).
def initLogging() { log }
}
| jperla/spark-advancers | core/src/main/scala/spark/Logging.scala | Scala | bsd-3-clause | 1,912 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.api.java.function.FilterFunction
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{ArrayType, DataType, MapType, StructType, UserDefinedType}
/*
* This file defines optimization rules related to object manipulation (for the Dataset API).
*/
/**
* Removes cases where we are unnecessarily going between the object and serialized (InternalRow)
* representation of data item. For example back to back map operations.
*/
object EliminateSerialization extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case d @ DeserializeToObject(_, _, s: SerializeFromObject)
if d.outputObjAttr.dataType == s.inputObjAttr.dataType =>
// Adds an extra Project here, to preserve the output expr id of `DeserializeToObject`.
// We will remove it later in RemoveAliasOnlyProject rule.
val objAttr = Alias(s.inputObjAttr, s.inputObjAttr.name)(exprId = d.outputObjAttr.exprId)
Project(objAttr :: Nil, s.child)
case a @ AppendColumns(_, _, _, _, _, s: SerializeFromObject)
if a.deserializer.dataType == s.inputObjAttr.dataType =>
AppendColumnsWithObject(a.func, s.serializer, a.serializer, s.child)
// If there is a `SerializeFromObject` under typed filter and its input object type is same with
// the typed filter's deserializer, we can convert typed filter to normal filter without
// deserialization in condition, and push it down through `SerializeFromObject`.
// e.g. `ds.map(...).filter(...)` can be optimized by this rule to save extra deserialization,
// but `ds.map(...).as[AnotherType].filter(...)` can not be optimized.
case f @ TypedFilter(_, _, _, _, s: SerializeFromObject)
if f.deserializer.dataType == s.inputObjAttr.dataType =>
s.copy(child = f.withObjectProducerChild(s.child))
// If there is a `DeserializeToObject` upon typed filter and its output object type is same with
// the typed filter's deserializer, we can convert typed filter to normal filter without
// deserialization in condition, and pull it up through `DeserializeToObject`.
// e.g. `ds.filter(...).map(...)` can be optimized by this rule to save extra deserialization,
// but `ds.filter(...).as[AnotherType].map(...)` can not be optimized.
case d @ DeserializeToObject(_, _, f: TypedFilter)
if d.outputObjAttr.dataType == f.deserializer.dataType =>
f.withObjectProducerChild(d.copy(child = f.child))
}
}
/**
* Combines two adjacent [[TypedFilter]]s, which operate on same type object in condition, into one,
* merging the filter functions into one conjunctive function.
*/
object CombineTypedFilters extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case t1 @ TypedFilter(_, _, _, _, t2 @ TypedFilter(_, _, _, _, child))
if t1.deserializer.dataType == t2.deserializer.dataType =>
TypedFilter(
combineFilterFunction(t2.func, t1.func),
t1.argumentClass,
t1.argumentSchema,
t1.deserializer,
child)
}
private def combineFilterFunction(func1: AnyRef, func2: AnyRef): Any => Boolean = {
(func1, func2) match {
case (f1: FilterFunction[_], f2: FilterFunction[_]) =>
input => f1.asInstanceOf[FilterFunction[Any]].call(input) &&
f2.asInstanceOf[FilterFunction[Any]].call(input)
case (f1: FilterFunction[_], f2) =>
input => f1.asInstanceOf[FilterFunction[Any]].call(input) &&
f2.asInstanceOf[Any => Boolean](input)
case (f1, f2: FilterFunction[_]) =>
input => f1.asInstanceOf[Any => Boolean].apply(input) &&
f2.asInstanceOf[FilterFunction[Any]].call(input)
case (f1, f2) =>
input => f1.asInstanceOf[Any => Boolean].apply(input) &&
f2.asInstanceOf[Any => Boolean].apply(input)
}
}
}
/**
* Removes MapObjects when the following conditions are satisfied
* 1. Mapobject(... lambdavariable(..., false) ...), which means types for input and output
* are primitive types with non-nullable
* 2. no custom collection class specified representation of data item.
*/
object EliminateMapObjects extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case MapObjects(_, LambdaVariable(_, _, false, _), inputData, None) => inputData
}
}
/**
* Prunes unnecessary object serializers from query plan. This rule prunes both individual
* serializer and nested fields in serializers.
*/
object ObjectSerializerPruning extends Rule[LogicalPlan] {
/**
* Visible for testing.
* Collects all struct types from given data type object, recursively.
*/
def collectStructType(dt: DataType, structs: ArrayBuffer[StructType]): ArrayBuffer[StructType] = {
dt match {
case s @ StructType(fields) =>
structs += s
fields.map(f => collectStructType(f.dataType, structs))
case ArrayType(elementType, _) =>
collectStructType(elementType, structs)
case MapType(keyType, valueType, _) =>
collectStructType(keyType, structs)
collectStructType(valueType, structs)
// We don't use UserDefinedType in those serializers.
case _: UserDefinedType[_] =>
case _ =>
}
structs
}
/**
* This method returns pruned `CreateNamedStruct` expression given an original `CreateNamedStruct`
* and a pruned `StructType`.
*/
private def pruneNamedStruct(struct: CreateNamedStruct, prunedType: StructType) = {
// Filters out the pruned fields.
val resolver = SQLConf.get.resolver
val prunedFields = struct.nameExprs.zip(struct.valExprs).filter { case (nameExpr, _) =>
val name = nameExpr.eval(EmptyRow).toString
prunedType.fieldNames.exists(resolver(_, name))
}.flatMap(pair => Seq(pair._1, pair._2))
CreateNamedStruct(prunedFields)
}
/**
* When we change nested serializer data type, `If` expression will be unresolved because
* literal null's data type doesn't match now. We need to align it with new data type.
* Note: we should do `transformUp` explicitly to change data types.
*/
private def alignNullTypeInIf(expr: Expression) = expr.transformUp {
case i @ If(_: IsNull, Literal(null, dt), ser) if !dt.sameType(ser.dataType) =>
i.copy(trueValue = Literal(null, ser.dataType))
}
/**
* This method prunes given serializer expression by given pruned data type. For example,
* given a serializer creating struct(a int, b int) and pruned data type struct(a int),
* this method returns pruned serializer creating struct(a int).
*/
def pruneSerializer(
serializer: NamedExpression,
prunedDataType: DataType): NamedExpression = {
val prunedStructTypes = collectStructType(prunedDataType, ArrayBuffer.empty[StructType])
.toIterator
def transformer: PartialFunction[Expression, Expression] = {
case m: ExternalMapToCatalyst =>
val prunedKeyConverter = m.keyConverter.transformDown(transformer)
val prunedValueConverter = m.valueConverter.transformDown(transformer)
m.copy(keyConverter = alignNullTypeInIf(prunedKeyConverter),
valueConverter = alignNullTypeInIf(prunedValueConverter))
case s: CreateNamedStruct if prunedStructTypes.hasNext =>
val prunedType = prunedStructTypes.next()
pruneNamedStruct(s, prunedType)
}
val transformedSerializer = serializer.transformDown(transformer)
val prunedSerializer = alignNullTypeInIf(transformedSerializer).asInstanceOf[NamedExpression]
if (prunedSerializer.dataType.sameType(prunedDataType)) {
prunedSerializer
} else {
serializer
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case p @ Project(_, s: SerializeFromObject) =>
// Prunes individual serializer if it is not used at all by above projection.
val usedRefs = p.references
val prunedSerializer = s.serializer.filter(usedRefs.contains)
val rootFields = SchemaPruning.identifyRootFields(p.projectList, Seq.empty)
if (SQLConf.get.serializerNestedSchemaPruningEnabled && rootFields.nonEmpty) {
// Prunes nested fields in serializers.
val prunedSchema = SchemaPruning.pruneDataSchema(
StructType.fromAttributes(prunedSerializer.map(_.toAttribute)), rootFields)
val nestedPrunedSerializer = prunedSerializer.zipWithIndex.map { case (serializer, idx) =>
pruneSerializer(serializer, prunedSchema(idx).dataType)
}
// Builds new projection.
val projectionOverSchema = ProjectionOverSchema(prunedSchema)
val newProjects = p.projectList.map(_.transformDown {
case projectionOverSchema(expr) => expr
}).map { case expr: NamedExpression => expr }
p.copy(projectList = newProjects,
child = SerializeFromObject(nestedPrunedSerializer, s.child))
} else {
p.copy(child = SerializeFromObject(prunedSerializer, s.child))
}
}
}
/**
* Reassigns per-query unique IDs to `LambdaVariable`s, whose original IDs are globally unique. This
* can help Spark to hit codegen cache more often and improve performance.
*/
object ReassignLambdaVariableID extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
// The original LambdaVariable IDs are all positive. To avoid conflicts, the new IDs are all
// negative and starts from -1.
var newId = 0L
val oldIdToNewId = scala.collection.mutable.Map.empty[Long, Long]
// The `LambdaVariable` IDs in a query should be all positive or negative. Otherwise it's a bug
// and we should fail earlier.
var hasNegativeIds = false
var hasPositiveIds = false
plan.transformAllExpressions {
case lr: LambdaVariable if lr.id == 0 =>
throw new IllegalStateException("LambdaVariable should never has 0 as its ID.")
case lr: LambdaVariable if lr.id < 0 =>
hasNegativeIds = true
if (hasPositiveIds) {
throw new IllegalStateException(
"LambdaVariable IDs in a query should be all positive or negative.")
}
lr
case lr: LambdaVariable if lr.id > 0 =>
hasPositiveIds = true
if (hasNegativeIds) {
throw new IllegalStateException(
"LambdaVariable IDs in a query should be all positive or negative.")
}
if (oldIdToNewId.contains(lr.id)) {
// This `LambdaVariable` has appeared before, reuse the newly generated ID.
lr.copy(id = oldIdToNewId(lr.id))
} else {
// This is the first appearance of this `LambdaVariable`, generate a new ID.
newId -= 1
oldIdToNewId(lr.id) = newId
lr.copy(id = newId)
}
}
}
}
| goldmedal/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala | Scala | apache-2.0 | 11,914 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.jni
/**
* @author Emmanouil Antonios Platanios
*/
object CheckpointReader {
TensorFlow.load()
@native def newCheckpointReader(filePattern: String): Long
@native def debugString(handle: Long): String
@native def hasTensor(handle: Long, name: String): Boolean
@native def getTensor(handle: Long, name: String): Long
@native def delete(handle: Long): Unit
}
| eaplatanios/tensorflow | tensorflow/scala/jni/src/main/scala/org/platanios/tensorflow/jni/CheckpointReader.scala | Scala | apache-2.0 | 1,042 |
package org.eclairjs.tools.generate
import java.io.{File => JFile, FileInputStream}
import java.util.Properties
import org.eclairjs.tools.generate.model._
import scala.collection.mutable
class GenerateJavaWrapper {
lazy val templates = {
try {
val prop = new Properties()
prop.load(new FileInputStream("./src/main/resources/template.properties"))
prop
} catch { case e: Exception =>
e.printStackTrace()
sys.exit(1)
}
}
val funcMac = Map("JFunction"->"JSFunction","VoidFunction"->"JSVoidFunction","JFunction2"->"JSFunction2",
"JFunction3"->"JSFunction3","PairFunction"->"JSPairFunction","PairFlatMapFunction"->"JSPairFlatMapFunction",
"Function"->"JSFunction","Function2"->"JSFunction2","Function3"->"JSFunction3",
"Function0"->"JSFunction"
)
val org_apache_spark ="org.apache.spark"
val org_apache_spark_ ="org.apache.spark."
def generate(file:File, destDir:JFile) : Unit = {
var generatedClasses= Set("")
file.classes.filter(!_.isStatic) foreach( cls => {
generateClassFile(file,cls,destDir)
generatedClasses += cls.name
} )
val statics=file.classes.filter(cls=>cls.isStatic && !generatedClasses.contains(cls.name))
val non_emptyStatics=statics.filter(cls=> cls.members.filter(!_.isConstructor()).length>0)
if (!non_emptyStatics.isEmpty)
{
non_emptyStatics foreach( generateClassFile(file,_,destDir))
}
}
def generateClassFile(file:File, cls:Clazz, destDir:JFile) : Unit = {
val sbFile=new StringBuilder
sbFile ++= s"package ${wrapperPackageName(cls)};\\n"
sbFile ++= getTemplate("copyright")
val imports= mutable.ListBuffer("org.eclairjs.nashorn.Utils","org.eclairjs.nashorn.wrap.WrappedFunction","org.apache.log4j.Logger")
imports += parentFullName(cls);
imports.map(pkg=> sbFile ++= s"import $pkg;\\n")
addNewlines(2,sbFile)
sbFile ++= s"public class ${cls.name} extends ${parentClass(cls)} {\\n"
addNewlines(1,sbFile)
sbFile ++= s" static Logger logger = Logger.getLogger(${cls.name}.class);\\n"
addNewlines(1,sbFile)
val methods=cls.methods.filter(member=> !member.isConstructor() && member.isInstanceOf[Method])
val objName="_"+cls.name(0).toLower + cls.name.substring(1)
val fullJavaName= if (cls.parent.packageName!="<empty>")
s"${cls.parent.packageName}.${cls.name}"
else
cls.name
methods.foreach(method=>{
sbFile ++= s""" static WrappedFunction F_${method.name} = new WrappedFunction() {
@Override
public Object call(Object thiz, Object... args) {
"""
val methodBody=generateMethodBody(method,false,objName,fullJavaName)
sbFile ++= indentString(" ",methodBody)
sbFile++="""
| }
| };
|
|""".stripMargin
})
val staticCls=file.classes.find( clazz=> clazz.isStatic && clazz.name==cls.name)
if (staticCls.isDefined && staticCls.get.members.filter(!_.isConstructor()).length>0)
{
sbFile.append("\\n//\\n// static methods\\n//\\n")
staticCls.get.members.filter(!_.isConstructor()) foreach( member =>
member match {
case method:Method => generateMethodBody(method,true,objName,fullJavaName)
}
)
}
val moduleName= if (cls.parent.packageName.startsWith(org_apache_spark_))
cls.parent.packageName.substring(org_apache_spark_.length) + "."+cls.name
else
cls.name
val getMemberList = methods.map(method=> s"""case "${method.name}\\":
| return F_${method.name};""".stripMargin).mkString("\\n")
val swGetMember=indentString(" ",getMemberList)
val swHasMember = indentStringList(" ",methods.map(method=> s"""case "${method.name}\\":""".stripMargin))
sbFile ++= s"""
| private $fullJavaName $objName;
|
| public ${cls.name}($fullJavaName $objName)
| {
| logger.debug("constructor");
| this.$objName = $objName;
| }
|
| static public String getModuleName() {
| return "$moduleName";
| }
|
| public boolean checkInstance(Object other) {
| return other instanceof ${cls.name};
| }
|
| public Object getJavaObject() {
| return $objName;
| }
|
| @Override
| public String toString() {
|
| return $objName.toString();
| }
|
| public String getClassName() {
| return "${cls.name}";
| }
|
| // get the value of that named property
| @Override
| public Object getMember(String name) {
| switch (name) {
|$swGetMember
| }
| return super.getMember(name);
| }
|
| @Override
| public boolean hasMember(String name) {
| switch (name) {
|$swHasMember
| return true;
| }
| return super.hasMember(name);
| }
|""".stripMargin
sbFile ++="\\n}\\n"
val src :String = sbFile.toString()
if (Main.isConsole) {
System.out.println("SOURCE: "+cls.fullName())
System.out.println("")
System.out.println(src)
}
else
{
if (!destDir.exists())
destDir.mkdirs();
val toFile=destDir.getAbsolutePath+"/"+cls.name+".java"
// System.out.println("WRITING: "+toFile)
scala.tools.nsc.io.File(toFile).writeAll(src)
}
}
def generateMethodBody(method: Method,isStatic:Boolean,objName : String,fullJavaName:String ): String =
{
val cls=method.parent
val sb=new StringBuilder
// System.out.println("return="+method.getReturnJSType())
val hasReturn= method.returnType.isVoid()!=true
val callTarget =
if (isStatic)
fullJavaName
else
objName
val returnValue= if (hasReturn) "returnValue = " else ""
sb ++=s"""logger.debug(\\"${method.name}\\");\\n"""
sb ++=s"Object returnValue = null;\\n"
sb ++=s"$fullJavaName $objName = ($fullJavaName) ((${cls.name}) thiz).getJavaObject();\\n"
var inx=0;
def addParm(parm:Parm): Unit = {
if (parm.isRepeated)
sb++=s" %%% deliberate syntax error +++ // TODO: handle repeated parm '${parm.name}'\\n"
if (parm.isOptional)
{
System.out.println(s"Optional parm, method - ${method.name}, parm - ${parm.name}")
}
parm.typ match {
case FunctionDataType(name,parms,returnType) => {
val argsLength=method.parms.length
val functionType=funcMac.getOrElse(name,"JSFunction")
sb ++= s"""Object bindArgs$inx = null;
|if (args.length > $argsLength) {
| bindArgs = args[$argsLength];
|}
|$functionType ${parm.name} = ($functionType)Utils.createLambdaFunction(args[$inx], "org.eclairjs.nashorn.$functionType", $objName.context(), bindArgs$inx);
|""".stripMargin
}
case _ => {
val parmType= javaType(parm.typ)
parmType match {
case "int" =>
sb ++= s"int ${parm.name} = Utils.toInt(args[$inx]);\\n"
case "double" =>
sb ++= s"double ${parm.name} = Utils.toDouble(args[$inx]);\\n"
case "long" =>
sb ++= s"long ${parm.name} = Utils.toLong(args[$inx]);\\n"
case "int[]" =>
sb ++= s"int[] ${parm.name} = Utils.toIntArray(args[$inx]);\\n"
case "double[]" =>
sb ++= s"double[] ${parm.name} = Utils.toDoubleArray(args[$inx]);\\n"
case _ => {
if (isSparkClass(parm.typ))
sb ++= s"$parmType ${parm.name} = ($parmType) Utils.toObject(args[$inx]);\\n"
else
sb ++= s"$parmType ${parm.name} = ($parmType) args[$inx];\\n"
}
}
}
}
}
def addCall(parmList:String,indent:String) = {
sb ++= s"$indent$returnValue$callTarget.${method.name}($parmList);\\n"
}
val nonOptionalParms=method.requiredParms()
nonOptionalParms foreach(parm =>{
addParm(parm)
inx+=1
} )
val numRequredParms=nonOptionalParms.length
if (numRequredParms==method.parms.length)
addCall(method.parmList(),"")
else { // has optional parms
val optionalParms=method.optionalParms()
sb ++= s"\\nif (args.length==$numRequredParms) {\\n"
addCall(method.parmList(numRequredParms)," ")
sb ++= s"\\n} else {\\n"
optionalParms foreach(parm =>{
addParm(parm)
inx+=1
} )
addCall(method.parmList()," ")
sb ++= s"\\n}\\n"
}
if (hasReturn) {
if (method.returnType.isSparkClass()) {
if (!isSparkClass(method.returnType))
sb ++="return Utils.javaToJs(returnValue);\\n"
else {
val typeName =
if (method.returnType.name=="this.type")
method.parent.name
else
method.returnType.name
sb ++="// return Utils.javaToJs(returnValue);\\n"
sb ++=s"return new ${wrapperFullName(typeName)}((${javaType(typeName)})returnValue);\\n"
}
}
else
sb ++="return returnValue;\\n"
} else
sb ++="return null;\\n"
sb.toString()
}
def parentClass(cls:Clazz):String =
{
cls.parentClass() match {
case Some(cls) => cls.name
case None => "WrappedClass"
}
}
def getModule(jsType:String):String =
{
val clsOpt=Main.allClasses.get(jsType)
clsOpt match {
case Some(cls) => cls.module()
case _ => jsType
}
}
def parentFullName(cls:Clazz) : String = {
cls.parentClass() match {
case Some(parent) => wrapperPackageName(parent)+"."+cls.parentClass().get.name
case _ => "org.eclairjs.nashorn.wrap.WrappedClass"
}
}
def wrapperPackageName(cls:Clazz):String = {
var pkgName="org.eclairjs.nashorn.wrap";
val prefix="org.apache.spark"
if (cls.parent.packageName.startsWith(prefix))
pkgName+=cls.parent.packageName.substring(prefix.length)
pkgName
}
def wrapperFullName(typ:String):String = {
Main.allClasses.get(typ) match {
case Some(cls) => wrapperPackageName(cls) +"."+cls.name
case _ =>typ
}
}
def javaType(typ:String):String =
{
typ match {
case "Int" => "int"
case "Long" => "long"
case "Boolean" => "boolean"
case "String" => "String"
case "Float" => "float"
case "Double" => "double"
case _ => {
Main.allClasses.get(typ) match {
case Some(typeClass) => typeClass.fullName();
case _ => typ
}
}
}
}
def javaType(dataType: DataType):String =
{
dataType match {
case SimpleType(name) => javaType(name)
case ExtendedDataType(name,referenceType) => {
name match {
case "Array" => javaType(referenceType)+"[]"
case _ => javaType(name)
}
}
case _ => "??"
}
}
def isSparkClass(typ:DataType): Boolean =
{
typ match {
case ExtendedDataType(name,referenceType) => {
name match {
case "Option" | "List" | "Array" | "Seq" => false
case _ => typ.isSparkClass()
}
}
case _ =>typ.isSparkClass()
}
}
def addNewlines(count:Integer,sb:StringBuilder) : Unit = {
val newLines="\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\\\n\\n\\n\\n".toCharArray
sb.appendAll(newLines,0,count)
}
def getTemplate(name:String,args: AnyRef*):String =
{
val prop=templates.get(name).asInstanceOf[String]
val template=prop.format(args.map(_.asInstanceOf[AnyRef]) : _*)
template
}
def indentString(indent:String, str:String):String = str.split("\\\\n").map(indent+_).mkString("\\n")
def indentStringList(indent:String, str:List[String]):String = str.map(indent+_).mkString("\\n")
}
| EclairJS/eclairjs-nashorn | tools/generateJS/src/main/scala/org/eclairjs/tools/generate/GenerateJavaWrapper.scala | Scala | apache-2.0 | 12,685 |
import scala.io.Source
object Solution extends App {
val lines = Source.stdin.getLines().toList
val tests = lines.head.toInt
val parts = lines.tail.partition(!_.contains(" "))
val counts = parts._1.map(_.toInt)
val pairs = parts._2.map(_.split(" ")).map(arr => Tuple2(arr(0).toInt, arr(1).toInt))
var start = 0
counts.foreach {
i => {
val p = pairs.slice(start, start + i)
start = start + i
println(if(isFunction(p)) "YES" else "NO")
}
}
def isFunction(rel: List[(Int, Int)]): Boolean = {
val groupByX = rel.groupBy(_._1)
val filteredMoreThanOne =
groupByX.filter(p => p._2.size > 1)
filteredMoreThanOne.forall(_._2.map(_._2).distinct == 1)
}
} | lcacciagioni/hackerrank | functional/introduction/functions-or-not.scala | Scala | gpl-3.0 | 710 |
package com.awesomesauce.minecraft.forge.openautomation.common
trait OAModule {
val oa = OpenAutomation
val name: String
def preInit()
def init()
def postInit()
}
| AwesomeSauceMods/OpenAutomation | main/scala/com/awesomesauce/minecraft/forge/openautomation/common/OAModule.scala | Scala | mit | 177 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.remote
import java.io.{ByteArrayInputStream, InputStream, IOException}
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentHashMap
import scala.collection.mutable
import io.netty.buffer.{ByteBuf, Unpooled}
import org.apache.hadoop.fs.{FSDataInputStream, Path}
import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.protocol.{Encodable, Encoders}
import org.apache.spark.network.util.{JavaUtils, LimitedInputStream}
/**
* Something like [[org.apache.spark.network.buffer.FileSegmentManagedBuffer]], instead we only
* need createInputStream function, so we don't need a TransportConf field, which is intended to
* be used in other functions
*/
private[spark] class HadoopFileSegmentManagedBuffer(
val file: Path, val offset: Long, val length: Long, var eagerRequirement: Boolean = false)
extends ManagedBuffer with Logging {
import HadoopFileSegmentManagedBuffer._
private lazy val dataStream: InputStream = {
if (length == 0) {
new ByteArrayInputStream(new Array[Byte](0))
} else {
var is: FSDataInputStream = null
is = fs.open(file)
is.seek(offset)
new LimitedInputStream(is, length)
}
}
private lazy val dataInByteArray: Array[Byte] = {
if (length == 0) {
Array.empty[Byte]
} else {
var is: FSDataInputStream = null
try {
is = {
if (reuseFileHandle) {
val pathToHandleMap = handleCache.get(Thread.currentThread().getId)
if (pathToHandleMap == null) {
val res = fs.open(file)
handleCache.put(Thread.currentThread().getId,
new mutable.HashMap[Path, FSDataInputStream]() += (file -> res))
res
} else {
pathToHandleMap.getOrElseUpdate(file, fs.open(file))
}
} else {
fs.open(file)
}
}
is.seek(offset)
val array = new Array[Byte](length.toInt)
is.readFully(array)
array
} catch {
case e: IOException =>
var errorMessage = "Error in reading " + this
if (is != null) {
val size = fs.getFileStatus(file).getLen
errorMessage += " (actual file length " + size + ")"
}
throw new IOException(errorMessage, e)
} finally {
if (!reuseFileHandle) {
// Immediately close it if disabled file handle reuse
JavaUtils.closeQuietly(is)
}
}
}
}
private[spark] def prepareData(eagerRequirement: Boolean): Unit = {
this.eagerRequirement = eagerRequirement
if (! eagerRequirement) {
dataInByteArray
}
}
override def size(): Long = length
override def createInputStream(): InputStream = if (eagerRequirement) {
logInfo("Eagerly requiring this data input stream")
dataStream
} else {
new ByteArrayInputStream(dataInByteArray)
}
override def equals(obj: Any): Boolean = {
if (! obj.isInstanceOf[HadoopFileSegmentManagedBuffer]) {
false
} else {
val buffer = obj.asInstanceOf[HadoopFileSegmentManagedBuffer]
this.file == buffer.file && this.offset == buffer.offset && this.length == buffer.length
}
}
override def hashCode(): Int = super.hashCode()
override def retain(): ManagedBuffer = this
override def release(): ManagedBuffer = this
override def nioByteBuffer(): ByteBuffer = throw new UnsupportedOperationException
override def convertToNetty(): AnyRef = throw new UnsupportedOperationException
}
private[remote] object HadoopFileSegmentManagedBuffer {
private val fs = RemoteShuffleManager.getFileSystem
private[remote] lazy val handleCache =
new ConcurrentHashMap[Long, mutable.HashMap[Path, FSDataInputStream]]()
private val reuseFileHandle =
RemoteShuffleManager.getConf.get(RemoteShuffleConf.REUSE_FILE_HANDLE)
}
/**
* This is an RPC message encapsulating HadoopFileSegmentManagedBuffers. Slightly different with
* the OpenBlocks message, this doesn't transfer block stream between executors through netty, but
* only returns file segment ranges(offsets and lengths). Due to in remote shuffle, there is a
* globally-accessible remote storage, like HDFS or DAOS.
*/
class MessageForHadoopManagedBuffers(
val buffers: Array[(String, HadoopFileSegmentManagedBuffer)]) extends Encodable {
override def encodedLength(): Int = {
var sum = 0
// the length of count: Int
sum += 4
for ((blockId, hadoopFileSegment) <- buffers) {
sum += Encoders.Strings.encodedLength(blockId)
sum += Encoders.Strings.encodedLength(hadoopFileSegment.file.toUri.toString)
sum += 8
sum += 8
}
sum
}
override def encode(buf: ByteBuf): Unit = {
val count = buffers.length
// To differentiate from other BlockTransferMessage
buf.writeByte(MessageForHadoopManagedBuffers.MAGIC_CODE)
buf.writeInt(count)
for ((blockId, hadoopFileSegment) <- buffers) {
Encoders.Strings.encode(buf, blockId)
Encoders.Strings.encode(buf, hadoopFileSegment.file.toUri.toString)
buf.writeLong(hadoopFileSegment.offset)
buf.writeLong(hadoopFileSegment.length)
}
}
// As opposed to fromByteBuffer
def toByteBuffer: ByteBuf = {
val buf = Unpooled.buffer(encodedLength)
encode(buf)
buf
}
}
object MessageForHadoopManagedBuffers {
// To differentiate from other BlockTransferMessage
val MAGIC_CODE = -99
// Decode
def fromByteBuffer(buf: ByteBuf): MessageForHadoopManagedBuffers = {
val magic = buf.readByte()
assert(magic == MAGIC_CODE, "This is not a MessageForHadoopManagedBuffers! : (")
val count = buf.readInt()
val buffers = for (i <- 0 until count) yield {
val blockId = Encoders.Strings.decode(buf)
val path = new Path(Encoders.Strings.decode(buf))
val offset = buf.readLong()
val length = buf.readLong()
(blockId, new HadoopFileSegmentManagedBuffer(path, offset, length))
}
new MessageForHadoopManagedBuffers(buffers.toArray)
}
}
| Intel-bigdata/OAP | oap-shuffle/remote-shuffle/src/main/scala/org/apache/spark/shuffle/remote/HadoopFileSegmentManagedBuffer.scala | Scala | apache-2.0 | 6,931 |
/*
* Copyright (c) 2014 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.tiling.analytics
import com.oculusinfo.binning.TileData
import com.oculusinfo.binning.TilePyramid
import org.json.JSONObject
/**
* This analytic stores the CIDR block represented by a given tile.
*/
object IPv4Analytics extends Serializable {
import com.oculusinfo.tilegen.tiling.IPv4ZCurveIndexScheme._
private val EPSILON = 1E-10
def getCIDRBlock (pyramid: TilePyramid)(tile: TileData[_]): String = {
// Figure out the IP address of our corners
val index = tile.getDefinition()
val bounds = pyramid.getTileBounds(index)
val llAddr = ipArrayToLong(reverse(bounds.getMinX(), bounds.getMinY()))
val urAddr = ipArrayToLong(reverse(bounds.getMaxX()-EPSILON, bounds.getMaxY()-EPSILON))
// Figure out how many significant bits they have in common
val significant = 0xffffffffL & ~(llAddr ^ urAddr)
// That is the number of blocks
val block = 32 -
(for (i <- 0 to 32) yield (i, ((1L << i) & significant) != 0))
.find(_._2)
.getOrElse((32, false))._1
// And apply that to either to get the common address
val addr = longToIPArray(llAddr & significant)
ipArrayToString(addr)+"/"+block
}
def getIPAddress (pyramid: TilePyramid, max: Boolean)(tile: TileData[_]): Long = {
val index = tile.getDefinition()
val bounds = pyramid.getTileBounds(index)
if (max) {
ipArrayToLong(reverse(bounds.getMaxX()-EPSILON, bounds.getMaxY()-EPSILON))
} else {
ipArrayToLong(reverse(bounds.getMinX(), bounds.getMinY()))
}
}
/**
* Get an analysis description for an analysis that stores the CIDR block
* of an IPv4-indexed tile, with an arbitrary tile pyramid.
*/
def getCIDRBlockAnalysis[BT] (pyramid: TilePyramid = getDefaultIPPyramid):
AnalysisDescription[TileData[BT], String] =
new TileOnlyMonolithicAnalysisDescription[TileData[BT], String](
getCIDRBlock(pyramid),
new StringAnalytic("CIDR Block"))
def getMinIPAddressAnalysis[BT] (pyramid: TilePyramid = getDefaultIPPyramid):
AnalysisDescription[TileData[BT], Long] =
new MonolithicAnalysisDescription[TileData[BT], Long](
getIPAddress(pyramid, false),
new TileAnalytic[Long] {
def name = "Minimum IP Address"
def aggregate (a: Long, b: Long): Long = a min b
def defaultProcessedValue: Long = 0L
def defaultUnprocessedValue: Long = 0xffffffffL
override def storableValue (value: Long, location: TileAnalytic.Locations.Value): Option[JSONObject] = {
val result = new JSONObject()
result.put(name, ipArrayToString(longToIPArray(value)))
Some(result)
}
})
def getMaxIPAddressAnalysis[BT] (pyramid: TilePyramid = getDefaultIPPyramid):
AnalysisDescription[TileData[BT], Long] =
new MonolithicAnalysisDescription[TileData[BT], Long](
getIPAddress(pyramid, true),
new TileAnalytic[Long] {
def name = "Maximum IP Address"
def aggregate (a: Long, b: Long): Long = a max b
def defaultProcessedValue: Long = 0xffffffffL
def defaultUnprocessedValue: Long = 0L
override def storableValue (value: Long, location: TileAnalytic.Locations.Value): Option[JSONObject] = {
val result = new JSONObject()
result.put(name, ipArrayToString(longToIPArray(value)))
Some(result)
}
})
}
| unchartedsoftware/aperture-tiles | tile-generation/src/main/scala/com/oculusinfo/tilegen/tiling/analytics/IPv4Analytics.scala | Scala | mit | 4,399 |
package org.remus32
import org.remus32.BetterServer.core.api.HttpStatus
import org.remus32.NanoHttpdLayer.Status
/**
* Created by remus32 on 25/04/16.
*/
package object BetterServer {
implicit def statusToStatus(that: Status): HttpStatus.Status = {
HttpStatus.map(that.code)
}
implicit def statusToStatus$(that: HttpStatus.Status): Status = {
new Status {
def description: String = that.description
def code: Int = that.code
}
}
}
| remus32/BetterServer | server/src/main/scala/org/remus32/BetterServer/package.scala | Scala | gpl-3.0 | 471 |
package com.zhranklin.homepage.client.components
import japgolly.scalajs.react.component.Scala.BackendScope
import japgolly.scalajs.react.extra.{Broadcaster, OnUnmount}
import scala.concurrent.Future
object AsyncVdom {
type Props = (VdomElement, Broadcaster[VdomElement])
type State = VdomElement
class Backend($: BackendScope[Props, State]) extends OnUnmount
val comp = ScalaComponent.builder[Props]("AsyncVdom")
.initialStateFromProps(_._1)
.backend(new Backend(_))
.render_S(identity)
.configure(Listenable.listen(_._2, $ ⇒ $.setState(_: State)))
.build
def future(future: Future[VdomElement], default: VdomElement = <.div(<.p("loading..."))) =
comp(default, new Broadcaster[VdomElement] {
future.foreach{ dom ⇒
broadcast(dom).runNow()
}
})
}
| zhranklin/Private_Blog | client/src/main/scala/com/zhranklin/homepage/client/components/AsyncVdom.scala | Scala | gpl-3.0 | 814 |
package com.tribbloids.spookystuff.pipeline
import java.util.UUID
import com.tribbloids.spookystuff.SpookyContext
import com.tribbloids.spookystuff.rdd.FetchedDataset
import org.apache.spark.ml.param.ParamMap
/**
* Created by peng on 31/10/15.
*/
class RemoteTransformerChain(
self: Seq[RemoteTransformer],
override val uid: String =
classOf[RemoteTransformerChain].getCanonicalName + "_" + UUID.randomUUID().toString
) extends RemoteTransformerLike {
//this is mandatory for Params.defaultCopy()
def this(uid: String) = this(Nil, uid)
override def transform(dataset: FetchedDataset): FetchedDataset = self.foldLeft(dataset) { (rdd, transformer) =>
transformer.transform(rdd)
}
override def copy(extra: ParamMap): RemoteTransformerChain = new RemoteTransformerChain(
self = this.self
.map(_.copy(extra)),
uid = this.uid
)
def +>(another: RemoteTransformer): RemoteTransformerChain = new RemoteTransformerChain(
this.self :+ another,
uid = this.uid
)
override def test(spooky: SpookyContext): Unit = self.foreach(_.test(spooky))
}
| tribbloid/spookystuff | core/src/main/scala/com/tribbloids/spookystuff/pipeline/RemoteTransformerChain.scala | Scala | apache-2.0 | 1,100 |
package Yosemite
import java.io._
import java.net._
import java.util.concurrent.{Executors, ThreadFactory, ThreadPoolExecutor}
import java.util.{Locale, Random}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import scala.collection.JavaConversions._
import scala.io.Source
import scala.reflect.ClassTag
/**
* Various utility methods used by Varys.
*/
private object Utils extends Logging {
/**
* Serialize an object using Java serialization
*/
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
return bos.toByteArray
}
/**
* Deserialize an object using Java serialization
*/
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
return ois.readObject.asInstanceOf[T]
}
/**
* Deserialize an object using Java serialization and the given ClassLoader
*/
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass) =
Class.forName(desc.getName, false, loader)
}
return ois.readObject.asInstanceOf[T]
}
def isAlpha(c: Char): Boolean = {
(c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
*/
lazy val localIpAddress: String = findLocalIpAddress()
private def findLocalIpAddress(): String = {
val defaultIpOverride = System.getenv("VARYS_LOCAL_IP")
if (defaultIpOverride != null) {
defaultIpOverride
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
for (ni <- NetworkInterface.getNetworkInterfaces) {
for (addr <- ni.getInetAddresses if !addr.isLinkLocalAddress &&
!addr.isLoopbackAddress && addr.isInstanceOf[Inet4Address]) {
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " + addr.getHostAddress +
" instead (on interface " + ni.getName + ")")
logWarning("Set VARYS_LOCAL_IP if you need to bind to another address")
return addr.getHostAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set VARYS_LOCAL_IP if you need to bind to another address")
}
address.getHostAddress
}
}
private var customHostname: Option[String] = None
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String) {
customHostname = Some(hostname)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(InetAddress.getLocalHost.getHostName)
}
private[Yosemite] val daemonThreadFactory: ThreadFactory =
new ThreadFactoryBuilder().setDaemon(true).build()
/**
* Wrapper over newCachedThreadPool.
*/
def newDaemonCachedThreadPool(): ThreadPoolExecutor =
Executors.newCachedThreadPool(daemonThreadFactory).asInstanceOf[ThreadPoolExecutor]
/**
* Wrapper over newFixedThreadPool.
*/
def newDaemonFixedThreadPool(nThreads: Int): ThreadPoolExecutor =
Executors.newFixedThreadPool(nThreads, daemonThreadFactory).asInstanceOf[ThreadPoolExecutor]
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MB".
*/
def bytesToString(size: Long): String = {
val TB = 1L << 40
val GB = 1L << 30
val MB = 1L << 20
val KB = 1L << 10
val (value, unit) = {
if (size >= 2*TB) {
(size.asInstanceOf[Double] / TB, "TB")
} else if (size >= 2*GB) {
(size.asInstanceOf[Double] / GB, "GB")
} else if (size >= 2*MB) {
(size.asInstanceOf[Double] / MB, "MB")
} else if (size >= 2*KB) {
(size.asInstanceOf[Double] / KB, "KB")
} else {
(size.asInstanceOf[Double], "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
/**
* Return a string containing part of a file from byte 'start' to 'end'.
*/
def offsetBytes(path: String, start: Long, end: Long): String = {
val file = new File(path)
val length = file.length()
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = new FileInputStream(file)
stream.skip(effectiveStart)
stream.read(buff)
stream.close()
Source.fromBytes(buff).mkString
}
} | zhanghan1990/Yosemite | core/src/main/scala/Yosemite/Utils.scala | Scala | apache-2.0 | 5,938 |
package gitbucket.core.model
trait CommitStatusComponent extends TemplateComponent { self: Profile =>
import profile.api._
import self._
implicit val commitStateColumnType = MappedColumnType.base[CommitState, String](b => b.name, i => CommitState(i))
lazy val CommitStatuses = TableQuery[CommitStatuses]
class CommitStatuses(tag: Tag) extends Table[CommitStatus](tag, "COMMIT_STATUS") with CommitTemplate {
val commitStatusId = column[Int]("COMMIT_STATUS_ID", O AutoInc)
val context = column[String]("CONTEXT")
val state = column[CommitState]("STATE")
val targetUrl = column[Option[String]]("TARGET_URL")
val description = column[Option[String]]("DESCRIPTION")
val creator = column[String]("CREATOR")
val registeredDate = column[java.util.Date]("REGISTERED_DATE")
val updatedDate = column[java.util.Date]("UPDATED_DATE")
def * =
(
commitStatusId,
userName,
repositoryName,
commitId,
context,
state,
targetUrl,
description,
creator,
registeredDate,
updatedDate
) <> ((CommitStatus.apply _).tupled, CommitStatus.unapply)
def byPrimaryKey(id: Int) = commitStatusId === id.bind
}
}
case class CommitStatus(
commitStatusId: Int = 0,
userName: String,
repositoryName: String,
commitId: String,
context: String,
state: CommitState,
targetUrl: Option[String],
description: Option[String],
creator: String,
registeredDate: java.util.Date,
updatedDate: java.util.Date
)
object CommitStatus {
def pending(owner: String, repository: String, context: String) =
CommitStatus(
commitStatusId = 0,
userName = owner,
repositoryName = repository,
commitId = "",
context = context,
state = CommitState.PENDING,
targetUrl = None,
description = Some("Waiting for status to be reported"),
creator = "",
registeredDate = new java.util.Date(),
updatedDate = new java.util.Date()
)
}
sealed abstract class CommitState(val name: String)
object CommitState {
object ERROR extends CommitState("error")
object FAILURE extends CommitState("failure")
object PENDING extends CommitState("pending")
object SUCCESS extends CommitState("success")
val values: Vector[CommitState] = Vector(PENDING, SUCCESS, ERROR, FAILURE)
private val map: Map[String, CommitState] = values.map(enum => enum.name -> enum).toMap
def apply(name: String): CommitState = map(name)
def valueOf(name: String): Option[CommitState] = map.get(name)
/**
* failure if any of the contexts report as error or failure
* pending if there are no statuses or a context is pending
* success if the latest status for all contexts is success
*/
def combine(statuses: Set[CommitState]): CommitState = {
if (statuses.isEmpty) {
PENDING
} else if (statuses.contains(CommitState.ERROR) || statuses.contains(CommitState.FAILURE)) {
FAILURE
} else if (statuses.contains(CommitState.PENDING)) {
PENDING
} else {
SUCCESS
}
}
}
| McFoggy/gitbucket | src/main/scala/gitbucket/core/model/CommitStatus.scala | Scala | apache-2.0 | 3,089 |
package at.forsyte.apalache.tla
import at.forsyte.apalache.tla.lir.{NameEx, TlaEx}
import scala.collection.immutable.HashMap
package object bmcmt {
/**
* A theory used to evaluate a TLA+ expression: cells, Booleans, and integers.
*
* This concept is obsolete and will be removed in the future.
* See the <a href="https://github.com/informalsystems/apalache/issues/22">issue</a>.
*/
sealed abstract class Theory {
/**
* Check whether a constant is named after the theory naming convention.
*
* @param name a constant name
* @return if the name follows the naming conventions of this theory.
*/
def hasConst(name: String): Boolean
/**
* Check whether a TLA expression is NameEx(name) and hasConst(name) holds true.
* @param tlaEx a TLA expression
* @return true, if tlaEx is NameEx(name) and name is a name of a theory constant.
*/
def hasNameEx(tlaEx: TlaEx): Boolean = {
tlaEx match {
case NameEx(name) if hasConst(name) => true
case _ => false
}
}
/**
* Check, whether a TLA expression is NameEx and a theory constant.
* If so, return its name.
*
* @param tlaEx a TLA expression
* @return constant name
* @throws InvalidTlaExException if the expression is not a theory constant
*/
def nameExToString(tlaEx: TlaEx): String = {
tlaEx match {
case NameEx(name) if hasConst(name) =>
name
case _ =>
throw new CheckerException("Expected a cell, found: %s".format(tlaEx), tlaEx)
}
}
}
case class CellTheory() extends Theory {
/**
* The prefix of all cells.
*/
val namePrefix = "$C$"
/**
* Check whether a constant is named after the theory naming convention.
*
* @param name a constant name
* @return if the name follows the naming conventions of this theory.
*/
override def hasConst(name: String): Boolean = {
name.startsWith(namePrefix)
}
override def toString: String = "Cell"
}
} | konnov/dach | tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/package.scala | Scala | apache-2.0 | 2,093 |
package com.socrata.internal.http.util
import scala.collection.JavaConverters._
import org.scalatest.FunSuite
import scala.util.Random
import java.io._
import org.scalatest.matchers.MustMatchers
import org.scalatest.exceptions.ModifiableMessage
import java.nio.charset.{StandardCharsets, Charset}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar
class ReaderInputStreamTest extends FunSuite with MustMatchers with Timeouts with SpanSugar {
def range(min: Int, max: Int)(implicit rng: Random): Int =
min + rng.nextInt(1 + max - min)
val allCharsets = Charset.availableCharsets().values.asScala.filter(_.canEncode).toArray
def generateValidString(minLen: Int, maxLen: Int, fracSurrogatePair: Double)(implicit rng: Random): String = {
val len = range(minLen, maxLen)
val sb = new StringBuilder(len)
for(i <- 0 until len) {
if(rng.nextDouble < fracSurrogatePair) {
val high = range(Character.MIN_HIGH_SURROGATE, Character.MAX_HIGH_SURROGATE)
val low = range(Character.MIN_LOW_SURROGATE, Character.MAX_LOW_SURROGATE)
sb.append(high.toChar).append(low.toChar)
} else {
var c: Char = '\0'
do {
c = range(0, Char.MaxValue).toChar
} while(Character.isSurrogate(c))
sb.append(c)
}
}
sb.toString
}
def generateInvalidString(minLen: Int, maxLen: Int, fracReversed: Double, fracStrayHigh: Double, fracStrayLow: Double)(implicit rng: Random): String = {
val len = range(minLen, maxLen)
val sb = new StringBuilder(len)
for(i <- 0 until len) {
val p = rng.nextDouble()
if(p < fracReversed) {
val high = range(Character.MIN_HIGH_SURROGATE, Character.MAX_HIGH_SURROGATE)
val low = range(Character.MIN_LOW_SURROGATE, Character.MAX_LOW_SURROGATE)
sb.append(low.toChar).append(high.toChar)
} else if(p < fracReversed + fracStrayHigh) {
val high = range(Character.MIN_HIGH_SURROGATE, Character.MAX_HIGH_SURROGATE)
sb.append(high.toChar)
} else if(p < fracReversed + fracStrayHigh + fracStrayLow) {
val low = range(Character.MIN_LOW_SURROGATE, Character.MAX_LOW_SURROGATE)
sb.append(low.toChar)
} else {
var c: Char = '\0'
do {
c = range(0, Char.MaxValue).toChar
} while(Character.isSurrogate(c))
sb.append(c)
}
}
sb.toString
}
private class ShortReader(r: Reader)(implicit rng: Random) extends Reader {
def read(cbuf: Array[Char], off: Int, len: Int): Int = {
if(len == 0) return 0
r.read(cbuf, off, range(1, len))
}
def close() { r.close() }
}
private def copyTo(from: InputStream, to: OutputStream, blockSize: Int) {
val buf = new Array[Byte](blockSize)
def loop() {
from.read(buf) match {
case -1 => // done
case n =>
to.write(buf, 0, n)
loop()
}
}
loop()
}
class SpecificSeedProvider(l: Long) {
def nextLong() = l
}
test("Reading from a ReaderInputStream gives the same results as just calling getBytes on a valid string") {
val seedProvider = new Random
for(i <- 1 to 1000) {
val seed = seedProvider.nextLong()
doValidTest(seed)
}
}
test("Reading from a ReaderInputStream gives the same results as just calling getBytes on an invalid string") {
val seedProvider = new Random
for(i <- 1 to 1000) {
val seed = seedProvider.nextLong()
doInvalidTest(seed)
}
}
private def doValidTest(seed: Long) {
doTest(seed) { implicit rng =>
generateValidString(0, 10000, rng.nextDouble())
}
}
private def doInvalidTest(seed: Long) {
doTest(seed) { implicit rng =>
generateInvalidString(0, 10000, rng.nextDouble() / 3, rng.nextDouble() / 3, rng.nextDouble() / 3)
}
}
private def doTest(seed: Long)(genS: Random => String) {
withClue(seed) {
try {
implicit val rng = new Random(seed)
val s: String = genS(rng)
val cs = allCharsets(range(0, allCharsets.length - 1))
val bs = new ByteArrayOutputStream
def maybeShortReader(r: Reader) =
if(rng.nextBoolean()) r
else new ShortReader(r)
val ris = new ReaderInputStream(maybeShortReader(new StringReader(s)), cs, range(1, 10000))
failAfter(10.seconds) {
copyTo(ris, bs, range(512, 4096))
}
val fromReader = bs.toByteArray
val fromString = s.getBytes(cs)
fromReader must equal (fromString)
} catch {
case e: Throwable if !e.isInstanceOf[ModifiableMessage[_]] =>
fail(e)
}
}
}
test("Worst-case UTF-8 string") {
val seedProvider = new Random
for(i <- 1 to 100) {
val seed = seedProvider.nextLong()
withClue(seed) {
try {
implicit val rng = new Random(seed)
val s = s"${Character.MAX_HIGH_SURROGATE}${Character.MAX_LOW_SURROGATE}" * range(1, 10000)
val bs = new ByteArrayOutputStream
val ris = new ReaderInputStream(new StringReader(s), StandardCharsets.UTF_8, range(1, 10000))
failAfter(1.second) {
copyTo(ris, bs, range(512, 4096))
}
val fromReader = bs.toByteArray
val fromString = s.getBytes(StandardCharsets.UTF_8)
fromReader must equal (fromString)
} catch {
case e: Throwable if !e.isInstanceOf[ModifiableMessage[_]] =>
fail(e)
}
}
}
}
}
| socrata-platform/socrata-internal-http | src/test/scala/com/socrata/internal/http/util/ReaderInputStreamTest.scala | Scala | apache-2.0 | 5,522 |
package akkaviz.serialization
import scala.collection.mutable
object CachingClassInspector {
private[this] val cache = mutable.Map[Class[_], ClassInspector]()
def of(clazz: Class[_]): ClassInspector = {
cache.getOrElseUpdate(clazz, ClassInspector.of(clazz))
}
}
| blstream/akka-viz | monitoring/src/main/scala/akkaviz/serialization/CachingClassInspector.scala | Scala | mit | 278 |
package codeoptimus.sgir.comm
import akka.actor.{ActorLogging, Actor}
import comm.Reconnect
/**
*
* Created with IntelliJ IDEA.
* User: Aaron Allred
*/
class ConnectionActor extends Actor with ActorLogging {
def receive = {
case reconnect: Reconnect =>
None
case _ => None
}
}
| digicyc/SGir | src/main/scala/comm/ConnectionActor.scala | Scala | mit | 303 |
package org.jetbrains.plugins.scala.lang.refactoring.extractMethod
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.{PsiAnnotation, PsiType, PsiTypeVisitor}
import com.intellij.refactoring.util.VariableData
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.fake.FakePsiParameter
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.refactoring.ScTypePresentationExt
import org.jetbrains.plugins.scala.lang.refactoring.extractMethod.ScalaVariableData._
/**
* Nikolay.Tropin
* 2014-04-10
*/
class ScalaVariableData(val element: ScTypedDefinition,
val isInsideOfElements: Boolean,
val scType: ScType)
extends VariableData(createFakeParameter(element, scType)) {
`type` = new FakePsiType(scType)
passAsParameter = true
name = element.name
}
object ScalaVariableData {
private def createFakeParameter(element: ScTypedDefinition, scType: ScType): FakePsiParameter = {
val parameter = Parameter(scType, isRepeated = false, index = -1)
new FakePsiParameter(element.getManager, ScalaLanguage.INSTANCE, parameter, element.name)
}
private class FakePsiType(val tp: ScType) extends PsiType(PsiAnnotation.EMPTY_ARRAY) {
override def getPresentableText(boolean: Boolean): String = getPresentableText
override def getPresentableText: String = tp.codeText
override def getCanonicalText: String = tp.canonicalCodeText
override def isValid: Boolean = true
override def equalsToText(text: String): Boolean = false
override def accept[A](visitor: PsiTypeVisitor[A]): A = visitor.visitType(this)
override def getResolveScope: GlobalSearchScope = null
override def getSuperTypes: Array[PsiType] = Array.empty
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/extractMethod/ScalaVariableData.scala | Scala | apache-2.0 | 1,973 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.