code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package mpff.controllers
import java.nio.charset.Charset
import mpff.resources.ServerErrorCode
import mpff.resources.UserErrorCode
import play.api.Logger
import play.api.i18n.I18nSupport
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.mvc.Result
abstract class MPFFAbstractAPIController[ActionContext <: MPFFActionContext]
extends MPFFAbstractController[ActionContext]
with I18nSupport {
private val UTF8 = Charset.forName("UTF8")
private val RESULT_KEY = "result"
private val RESULT_ERROR_ID_KEY = "errorId"
private val DESCRIPTION_KEY = "description"
private val OPTIONAL_INFO = "optInfo"
// ----------------------------------------------------------------------
def renderOK()(implicit context: ActionContext): Result = {
renderJson(Json.obj(RESULT_KEY -> "ok"))
}
def renderJson(obj: JsValue, status: Int = OK)(implicit context: ActionContext): Result = {
finalizeResult(Status(status)(obj))
}
override protected def renderInvalid(ec: UserErrorCode, e: Option[Throwable], optInfo: Option[Map[String, String]])(implicit context: ActionContext): Result = {
e match {
case None => ()
case Some(x) => Logger.info("renderInvalid", x)
}
val json = Json.obj(
RESULT_KEY -> "invalid",
DESCRIPTION_KEY -> ec.description,
OPTIONAL_INFO -> Json.toJson(optInfo.getOrElse(Map()))
)
renderJson(json, BAD_REQUEST)
}
override protected def renderError(ec: ServerErrorCode, e: Option[Throwable], optInfo: Option[Map[String, String]])(implicit context: ActionContext): Result = {
e match {
case None => ()
case Some(x) => Logger.info("renderInvalid", x)
}
val json = Json.obj(
RESULT_KEY -> "error",
DESCRIPTION_KEY -> ec.description,
OPTIONAL_INFO -> Json.toJson(optInfo.getOrElse(Map()))
)
renderJson(json, INTERNAL_SERVER_ERROR)
}
override protected def renderLoginRequired()(implicit context: ActionContext): Result = {
val json = Json.obj(
RESULT_KEY -> "auth",
DESCRIPTION_KEY -> "Login is required"
)
renderJson(json, UNAUTHORIZED).withHeaders(
"WWW-Authenticate" -> "OAuth"
)
}
override protected def renderForbidden()(implicit context: ActionContext): Result = {
val json = Json.obj(
RESULT_KEY -> "forbidden",
DESCRIPTION_KEY -> "Forbidden action"
)
renderJson(json, FORBIDDEN)
}
override protected def renderNotFound()(implicit context: ActionContext): Result = {
val json = Json.obj(
RESULT_KEY -> "notfound",
DESCRIPTION_KEY -> "Not found"
)
renderJson(json, NOT_FOUND)
}
}
|
mayah/mpff
|
app/mpff/controllers/MPFFAbstractAPIController.scala
|
Scala
|
mit
| 2,661
|
package chana.serializer
import akka.actor.ExtendedActorSystem
import akka.serialization.Serializer
import akka.util.ByteString
import java.nio.ByteOrder
import org.apache.avro.Schema
final class SchemaSerializer(system: ExtendedActorSystem) extends Serializer {
implicit val byteOrder = ByteOrder.BIG_ENDIAN
override def identifier: Int = 844372015
override def includeManifest: Boolean = false
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
case schema: Schema =>
val builder = ByteString.newBuilder
StringSerializer.appendToByteString(builder, schema.toString)
builder.result.toArray
case _ =>
throw new IllegalArgumentException("Can't serialize a non-Schema message using SchemaSerializer [" + obj + "]")
}
override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = {
val data = ByteString(bytes).iterator
val schemaJson = StringSerializer.fromByteIterator(data)
new Schema.Parser().parse(schemaJson)
}
}
|
wandoulabs/chana
|
src/main/scala/chana/serializer/SchemaSerializer.scala
|
Scala
|
apache-2.0
| 1,012
|
package oriana
import akka.actor.{ActorRef, Actor}
import scala.collection.mutable
class SingleMessageCaptor extends Actor {
import oriana.SingleMessageCaptor.Read
var contents: Any = _
var waiting = mutable.Buffer[ActorRef]()
val resultReceived: Receive = {
case Read => sender() ! contents
}
val waitForResult: Receive = {
case Read => waiting += sender()
case x =>
contents = x
waiting.foreach(_ ! x)
waiting.clear()
context.become(resultReceived)
}
def receive = waitForResult
}
object SingleMessageCaptor {
case object Read
}
|
Norwae/oriana
|
src/test/scala/oriana/SingleMessageCaptor.scala
|
Scala
|
bsd-2-clause
| 594
|
package org.beaucatcher.bson
import org.beaucatcher.wire._
/** A detailed type tag for binary data in Bson. */
object BsonSubtype extends Enumeration {
type BsonSubtype = Value
val GENERAL, FUNC, BINARY, UUID, MD5, USER_DEFINED = Value
private val fromBytes =
Map(Bson.B_GENERAL -> GENERAL,
Bson.B_FUNC -> FUNC,
Bson.B_BINARY -> BINARY,
Bson.B_UUID -> UUID,
Bson.B_MD5 -> MD5,
Bson.B_USER_DEFINED -> USER_DEFINED)
private val toBytes = fromBytes map { _.swap }
def fromByte(b: Byte): Option[BsonSubtype] = {
fromBytes.get(b)
}
def toByte(v: Value): Byte = {
toBytes.getOrElse(v, throw new IllegalArgumentException("bad BsonSubtype value"))
}
}
|
havocp/beaucatcher
|
base/src/main/scala/org/beaucatcher/bson/BsonSubtype.scala
|
Scala
|
apache-2.0
| 766
|
package io.hydrosphere.mist.master.store
import cats.implicits._
import com.zaxxer.hikari.HikariConfig
import io.hydrosphere.mist.master.{DbConfig, JobDetails, JobDetailsRequest, JobDetailsResponse}
import javax.sql.DataSource
import org.flywaydb.core.Flyway
import scala.concurrent.{ExecutionContext, Future}
trait JobRepository {
def remove(jobId: String): Future[Unit]
def get(jobId: String): Future[Option[JobDetails]]
def update(jobDetails: JobDetails): Future[Unit]
def filteredByStatuses(statuses: Seq[JobDetails.Status]): Future[Seq[JobDetails]]
def getAll(limit: Int, offset: Int, statuses: Seq[JobDetails.Status]): Future[Seq[JobDetails]]
def getAll(limit: Int, offset: Int): Future[Seq[JobDetails]] = getAll(limit, offset, Seq.empty)
def clear(): Future[Unit]
def running(): Future[Seq[JobDetails]] = filteredByStatuses(JobDetails.Status.inProgress)
def path(jobId: String)(f: JobDetails => JobDetails)(implicit ec: ExecutionContext): Future[Unit] = {
get(jobId).flatMap {
case Some(d) => update(f(d))
case None => Future.failed(new IllegalStateException(s"Not found job: $jobId"))
}
}
def getJobs(req: JobDetailsRequest): Future[JobDetailsResponse]
}
object JobRepository {
def create(config: DbConfig): Either[Throwable, HikariJobRepository] = {
for {
setup <- JobRepoSetup(config)
trns <- transactor(setup)
} yield new HikariJobRepository(trns, setup.jobRequestSql)
}
private def transactor(setup: JobRepoSetup): Either[Throwable, HikariDataSourceTransactor] = {
Either.catchNonFatal {
val transactor = new HikariDataSourceTransactor(hikariConfig(setup), setup.poolSize)
setup.migrationPath match {
case None => transactor
case Some(path) =>
migrate(path, transactor.ds)
transactor
}
}
}
private def migrate(migrationPath: String, ds: DataSource): Unit = {
val flyway = new Flyway()
flyway.setBaselineOnMigrate(true)
flyway.setLocations(migrationPath)
flyway.setDataSource(ds)
flyway.migrate()
}
private def hikariConfig(setup: JobRepoSetup): HikariConfig = {
import setup._
val configure =
SetterFunc[HikariConfig](driverClass)(_.setDriverClassName(_)) >>>
SetterFunc[HikariConfig](jdbcUrl)(_.setJdbcUrl(_)) >>>
SetterFunc[HikariConfig].opt(username)(_.setUsername(_)) >>>
SetterFunc[HikariConfig].opt(password)(_.setPassword(_))
configure(new HikariConfig())
}
object SetterFunc {
def void[A, B](b: B)(f: (A, B) => Unit): A => A =
(a: A) => { f(a, b); a}
def apply[A]: Partial[A] = new Partial[A]
class Partial[A] { self =>
def apply[B](b: B)(f: (A, B) => Unit): A => A =
(a: A) => { f(a, b); a }
def opt[B](b: Option[B])(f: (A, B) => Unit): A => A =
b match {
case Some(v) => self(v)(f)
case None => identity[A]
}
}
def opt[A, B](b: Option[B])(f: (A, B) => Unit): A => A =
b match {
case Some(v) => void(v)(f)
case None => identity[A]
}
}
}
|
Hydrospheredata/mist
|
mist/master/src/main/scala/io/hydrosphere/mist/master/store/JobRepository.scala
|
Scala
|
apache-2.0
| 3,146
|
/*
* Copyright 2014 DataGenerator Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.datagenerator.common.SocialNetwork_Example
import org.finra.datagenerator.common.Graph.Node
import org.finra.datagenerator.common.Helpers.RandomHelper
import org.finra.datagenerator.common.NodeData._
import scala.beans.BeanProperty
import scala.collection.mutable.ListBuffer
/**
* Description: Defines all user state and transition probability information.
* Each type is defined and mapped to a set of predicates determining the allowable parent and child types and whether or not to create them,
* as well as the actual methods and business logic to create the parent/child states for each allowable state transition (edge/link).
*/
class UserTypes extends NodeDataTypes[User, UserStub, UserType.UserType, UserTypes] {
def allInitialDataTypes: collection.immutable.HashSet[UserType.UserType] = {
collection.immutable.HashSet[UserType.UserType](UserType.ADMIN)
}
def allDataTypes: collection.immutable.HashSet[UserType.UserType] = {
collection.immutable.HashSet[UserType.UserType](UserType.ADMIN, UserType.SOCIAL_NETWORK_EMPLOYEE, UserType.PUBLIC_USER)
}
def dataTransitions: UserTransitions.type = UserTransitions
}
import NodeDataType.NodeDataType
object UserType {
abstract class UserType extends NodeDataType[User, UserStub, UserTypes, UserType] {
@BeanProperty def nodeDataTypes: UserTypes = new UserTypes()
def asStub: UserStub = new UserStub(this)
// We don't have any engines that use these two methods yet, but it might be useful at some point.
override def probabilisticallyLinkToExistingParentDataNode(dataNode: Node[User]): Unit = {}
override def probabilisticallyLinkToExistingParentStubNode(stubNode: Node[UserStub]): Unit = {}
}
// ADMIN can friend request ADMIN, SOCIAL_NETWORK_EMPLOYEE, and PUBLIC_USER
// SOCIAL_NETWORK_EMPLOYEE can friend request SOCIAL_NETWORK_EMPLOYEE and PUBLIC_USER
// PUBLIC_USER can friend request PUBLIC_USER
case object ADMIN extends UserType {
override def getDataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.ADMIN
override val name = "Admin"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
nodeDataTypes.allDataTypes.toSeq
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.ADMIN)
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.ADMIN, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.07)),
(UserType.SOCIAL_NETWORK_EMPLOYEE, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.1)),
(UserType.PUBLIC_USER, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.15))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.ADMIN, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier * 0.07))
)
}
}
case object SOCIAL_NETWORK_EMPLOYEE extends UserType {
override def getDataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.ADMIN
override val name = "SocialNetworkEmployee"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.SOCIAL_NETWORK_EMPLOYEE, UserType.PUBLIC_USER)
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.ADMIN, UserType.SOCIAL_NETWORK_EMPLOYEE)
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.SOCIAL_NETWORK_EMPLOYEE, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.25)),
(UserType.PUBLIC_USER, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.30))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.ADMIN, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.03)),
(UserType.SOCIAL_NETWORK_EMPLOYEE, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.25))
)
}
}
case object PUBLIC_USER extends UserType {
override def getDataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.ADMIN
override val name = "PublicUser"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.PUBLIC_USER)
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
nodeDataTypes.allDataTypes.toSeq
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.PUBLIC_USER, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.35))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))]= {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.ADMIN, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.01)),
(UserType.SOCIAL_NETWORK_EMPLOYEE, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.02)),
(UserType.PUBLIC_USER, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.35))
)
}
}
}
|
FINRAOS/DataGenerator
|
dg-common/src/main/scala/org/finra/datagenerator/common/SocialNetwork_Example/UserType.scala
|
Scala
|
apache-2.0
| 8,128
|
package com.sksamuel.elastic4s.http.search.queries.specialized
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory}
object WeightScoreBodyFn {
def apply(weight: Double): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject()
builder.field("weight", weight.toFloat)
builder.endObject()
builder
}
}
|
aroundus-inc/elastic4s
|
elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/queries/specialized/WeightScoreBodyFn.scala
|
Scala
|
apache-2.0
| 378
|
package cwe.scala.library.runtime.exploration
import cwe.scala.library.runtime.test.ScalaTest
import cwe.scala.library.math.bignumbers.Natural
import cwe.scala.library.audit.AuditServiceProvider
class cweExplore extends ScalaTest {
def run(): Unit = {
val c = Natural.create("6543121452238632892310987678")
this.inspect(c)("c")
//doubleFraction
//realWithRationals
realBase2
}
def doubleFraction = {
import cwe.scala.library.math._
import cwe.scala.library.math.bignumbers._
import cwe.scala.library.boxes._
import cwe.scala.library.boxes.Numerics._
val v1 = new Rational(Rational.create(Integer.create("123456432"), Integer.create("1256122212266")),
Rational.create(Integer.create("12788864"), Integer.create("12787554333222")))
this.log(v1)
}
def realWithRationals = {
import cwe.scala.library.math._
import cwe.scala.library.math.bignumbers._
import cwe.scala.library.boxes._
import cwe.scala.library.boxes.Numerics._
val v1 = new RealImplBaseN(Rational.create(100, 4), "4575444544322", "2342212")
this.log((v1.getIntegerPartBaseN(), v1.getDecimalPartBaseN()))
}
def realBase2 = {
import cwe.scala.library.math._
import cwe.scala.library.math.bignumbers._
import cwe.scala.library.boxes._
import cwe.scala.library.boxes.Numerics._
val rops = BigNumbersServiceProvider.getRealOperationsService()
rops.MAX_DECIMAL_PRECISION = Natural.create(36)
rops.MAX_DIVISION_PRECISION = 34
val v1 = new RealImplBaseN(byteNumeric.two, "4575444544322", "23422123737366262")
this.log((v1.getIntegerPartBaseN(), v1.getDecimalPartBaseN()))
this.log(v1)
val v2 = Real.MAX_DECIMAL_PRECISION.asInstanceOf[RealWrapper].r.asInstanceOf[RealBaseN[_]]
this.log(v2.getIntegerPartBaseX(byteNumeric.two))
AuditServiceProvider.registerTraceServiceWithDebug
this.log(v2.getIntegerPartBaseX(1.asInstanceOf[Byte]))
}
}
|
wwwigii-system/research
|
cwe-scala-library/src/cwe/scala/library/runtime/exploration/cweExplore.scala
|
Scala
|
gpl-3.0
| 1,893
|
// Classes: apply()
// A method named "apply" in Scala has
// special semantics
// Apply methods give a syntactic sugar
// for when a class has one main use.
class Bar {
def apply() = "boop!"
}
val bar = new Bar
print(bar())
// Prints "boop!"
|
agconti/scala-school
|
01-intro-to-scala/slides/slide023.scala
|
Scala
|
mit
| 254
|
package com.twitter.finagle.kestrel.protocol
import org.jboss.netty.buffer.ChannelBuffer
import com.twitter.finagle.memcached.protocol.text.TokensWithData
import com.twitter.finagle.memcached.protocol.text.client.AbstractDecodingToResponse
private[kestrel] class DecodingToResponse extends AbstractDecodingToResponse[Response] {
import AbstractDecodingToResponse._
def parseResponse(tokens: Seq[ChannelBuffer]) = {
tokens.head match {
case NOT_FOUND => NotFound()
case STORED => Stored()
case DELETED => Deleted()
case ERROR => Error()
}
}
def parseValues(valueLines: Seq[TokensWithData]) = {
val values = valueLines.map { valueLine =>
val tokens = valueLine.tokens
Value(tokens(1), valueLine.data)
}
Values(values)
}
}
|
enachb/finagle_2.9_durgh
|
finagle-kestrel/src/main/scala/com/twitter/finagle/kestrel/protocol/DecodingToResponse.scala
|
Scala
|
apache-2.0
| 802
|
package cinema.test.digraph
import cinema.graph.mutable.DirectedGraph
import cinema.graph.mutable.UndirectedGraph
object DiGraph {
def main(args: Array[String]) {
val G = new UndirectedGraph
G.addEdge(5, 4)
G.addEdge(4, 5)
println(G)
}
}
|
adelbertc/cinema
|
src/main/scala/TestDiGraph.scala
|
Scala
|
mit
| 258
|
package scuff.concurrent
import scuff._
import scala.collection.immutable.Map
/**
* Lock-free concurrent Map.
* Wrapper-class that turns any immutable Map into a
* lock-free concurrent map.
*/
final class LockFreeConcurrentMap[A, B](initialMap: Map[A, B] = Map.empty[A, B])
extends collection.concurrent.Map[A, B] {
require(initialMap != null, "Initial map cannot be null")
private[this] val EmptyMap = initialMap.empty
private[this] val mapRef = new java.util.concurrent.atomic.AtomicReference(initialMap)
@annotation.tailrec
final def putIfAbsent(k: A, v: B): Option[B] = {
val map = mapRef.get
map.get(k) match {
case existing: Some[_] => existing
case _ =>
val updated = map + (k -> v)
if (mapRef.compareAndSet(map, updated)) {
None
} else {
putIfAbsent(k, v)
}
}
}
@annotation.tailrec
final def remove(k: A, expected: B): Boolean = {
val map = mapRef.get
map.get(k) match {
case Some(value) if value == expected =>
val updated = map - k
if (mapRef.compareAndSet(map, updated)) {
true
} else {
remove(k, expected)
}
case _ => false
}
}
@annotation.tailrec
final def replace(k: A, expected: B, newvalue: B): Boolean = {
val map = mapRef.get
map.get(k) match {
case Some(value) if value == expected =>
val updated = map + (k -> newvalue)
if (mapRef.compareAndSet(map, updated)) {
true
} else {
replace(k, expected, newvalue)
}
case _ => false
}
}
@annotation.tailrec
final def replace(k: A, v: B): Option[B] = {
val map = mapRef.get
map.get(k) match {
case replaced: Some[_] =>
val updated = map + (k -> v)
if (mapRef.compareAndSet(map, updated)) {
replaced
} else {
replace(k, v)
}
case _ => None
}
}
def subtractOne(k: A): this.type = {
@annotation.tailrec
def tryRemove(): Unit = {
val map = mapRef.get
val updated = map - k
if (!mapRef.compareAndSet(map, updated)) {
tryRemove()
}
}
tryRemove()
this
}
def addOne(kv: (A, B)): this.type = {
@annotation.tailrec
def tryAdd(): Unit = {
val map = mapRef.get
val updated = map + kv
if (!mapRef.compareAndSet(map, updated)) {
tryAdd()
}
}
tryAdd()
this
}
def iterator = mapRef.get.iterator
def get(k: A): Option[B] = mapRef.get.get(k)
override def size = mapRef.get.size
def removeAll(keys: A*): this.type = {
@annotation.tailrec
def tryRemoveAll(): Unit = {
val map = mapRef.get
val updated = map -- keys
if (!mapRef.compareAndSet(map, updated)) {
tryRemoveAll()
}
}
if (keys.nonEmpty) tryRemoveAll()
this
}
def putAll(copyMap: collection.Map[A, B]): this.type = {
@annotation.tailrec
def tryPutAll(): Unit = {
val map = mapRef.get
val updated = map ++ copyMap
if (!mapRef.compareAndSet(map, updated)) {
tryPutAll()
}
}
if (copyMap.nonEmpty) tryPutAll()
this
}
override def clear(): Unit = {
mapRef.set(EmptyMap)
}
override def contains(key: A) = mapRef.get.contains(key)
override def isEmpty = mapRef.get.isEmpty
def snapshot[M <: Map[A, B]]: M = mapRef.get.asInstanceOf[M]
def drain[M <: Map[A, B]](): M = mapRef.getAndSet(EmptyMap).asInstanceOf[M]
@annotation.tailrec
final override def remove(key: A): Option[B] = {
val map = mapRef.get
map.get(key) match {
case value: Some[_] =>
val updated = map - key
if (mapRef.compareAndSet(map, updated)) {
value
} else {
remove(key)
}
case _ => None
}
}
override def getOrElseUpdate(key: A, makeValue: => B): B = {
get(key) match {
case Some(value) => value
case _ =>
val newValue = makeValue
putIfAbsent(key, newValue) || newValue
}
}
}
|
nilskp/scuff
|
src/2.13/main/scala/scuff/concurrent/LockFreeConcurrentMap.scala
|
Scala
|
mit
| 4,133
|
package clusterconsole.client.d3
import scala.scalajs.js
package D3 {
trait Selectors extends js.Object {
def select(selector: String): Selection = js.native
def selectAll(selector: String): Selection = js.native
}
trait Base extends Selectors {
var layout: Layout.Layout = js.native
}
trait Selection extends js.Array[js.Any] with Selectors {
def data[A](values: js.Array[A]): UpdateSelection = js.native
def call(callback: js.Function, args: js.Any*): Selection = js.native
}
trait UpdateSelection extends Selection
}
package Layout {
trait Layout extends js.Object {
def force(): ForceLayout = js.native
}
trait GraphNode extends js.Object {
var id: Double = js.native
var index: Double = js.native
var name: String = js.native
var px: Double = js.native
var py: Double = js.native
var size: Double = js.native
var weight: Double = js.native
var x: Double = js.native
var y: Double = js.native
var subindex: Double = js.native
var startAngle: Double = js.native
var endAngle: Double = js.native
var value: Double = js.native
var fixed: Boolean = js.native
var children: js.Array[GraphNode] = js.native
var _children: js.Array[GraphNode] = js.native
var parent: GraphNode = js.native
var depth: Double = js.native
}
trait GraphLink extends js.Object {
var source: GraphNode = js.native
var target: GraphNode = js.native
}
trait ForceLayout extends js.Function {
def apply(): ForceLayout = js.native
def size(): Double = js.native
def size(mysize: js.Array[Double]): ForceLayout = js.native
def size(accessor: js.Function2[js.Any, Double, js.Any]): ForceLayout = js.native
def linkDistance(): Double = js.native
def linkDistance(number: Double): ForceLayout = js.native
def linkDistance(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def linkStrength(): Double = js.native
def linkStrength(number: Double): ForceLayout = js.native
def linkStrength(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def friction(): Double = js.native
def friction(number: Double): ForceLayout = js.native
def friction(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def alpha(): Double = js.native
def alpha(number: Double): ForceLayout = js.native
def alpha(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def charge(): Double = js.native
def charge(number: Double): ForceLayout = js.native
def chargeDistance(number: Double): ForceLayout = js.native
def charge(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def theta(): Double = js.native
def theta(number: Double): ForceLayout = js.native
def theta(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def gravity(): Double = js.native
def gravity(number: Double): ForceLayout = js.native
def gravity(accessor: js.Function2[js.Any, Double, Double]): ForceLayout = js.native
def links(): js.Array[GraphLink] = js.native
def links[A <: GraphLink](arLinks: js.Array[A]): ForceLayout = js.native
def nodes[A <: GraphNode](): js.Array[A] = js.native
def nodes[A <: GraphNode](arNodes: js.Array[A]): ForceLayout = js.native
def start(): ForceLayout = js.native
def resume(): ForceLayout = js.native
def stop(): ForceLayout = js.native
def tick(): ForceLayout = js.native
def on(`type`: String, listener: js.Function0[Unit]): ForceLayout = js.native
def drag(): Behavior.Drag = js.native
}
}
package Behavior {
trait Behavior extends js.Object {
def drag(): Drag = js.native
}
trait Drag extends js.Function {
def apply(): js.Dynamic = js.native
var on: js.Function2[String, js.Function2[js.Any, Double, Any], Drag] = js.native
def origin(): js.Dynamic = js.native
def origin(origin: js.Any = js.native): Drag = js.native
}
}
|
CapeSepias/cluster-console
|
js/src/main/scala/clusterconsole/client/d3/d3.scala
|
Scala
|
bsd-3-clause
| 4,023
|
package com.softwaremill.streams.util
import scala.util.Random
object Timed {
def timed[T](b: => T): (T, Long) = {
val start = System.currentTimeMillis()
val r = b
(r, System.currentTimeMillis() - start)
}
def runTests(tests: List[(String, () => String)], repetitions: Int): Unit = {
val allTests = Random.shuffle(List.fill(repetitions)(tests).flatten)
println("Warmup")
for ((name, body) <- tests) {
val (result, time) = timed { body() }
println(f"$name%-25s $result%-25s ${time/1000.0d}%4.2fs")
}
println("---")
println(s"Running ${allTests.size} tests")
val rawResults = for ((name, body) <- allTests) yield {
val (result, time) = timed { body() }
println(f"$name%-25s $result%-25s ${time/1000.0d}%4.2fs")
name -> time
}
val results: Map[String, (Double, Double)] = rawResults.groupBy(_._1)
.mapValues(_.map(_._2))
.mapValues { times =>
val count = times.size
val mean = times.sum.toDouble / count
val dev = times.map(t => (t - mean) * (t - mean))
val stddev = Math.sqrt(dev.sum / count)
(mean, stddev)
}
println("---")
println("Averages (name, mean, stddev)")
results.toList.sortBy(_._2._1).foreach { case (name, (mean, stddev)) =>
println(f"$name%-25s ${mean/1000.0d}%4.2fs $stddev%4.2fms")
}
}
}
|
ahjohannessen/streams-tests
|
src/main/scala/com/softwaremill/streams/util/Timed.scala
|
Scala
|
apache-2.0
| 1,365
|
package com.azavea.maml.eval.tile
import geotrellis.raster._
import java.lang.IllegalStateException
case class NeighboringTiles(
tl: Tile,
tm: Tile,
tr: Tile,
ml: Tile,
mr: Tile,
bl: Tile,
bm: Tile,
br: Tile
)
case class TileWithNeighbors(centerTile: Tile, buffers: Option[NeighboringTiles]) {
def withBuffer(buffer: Int): Tile = buffers match {
case Some(buf) =>
if (buffer > 0) {
CompositeTile(
Seq(
buf.tl, buf.tm, buf.tr,
buf.ml, centerTile, buf.mr,
buf.bl, buf.bm, buf.br
),
TileLayout(3, 3, centerTile.cols, centerTile.rows)
).crop(
centerTile.cols - buffer,
centerTile.rows - buffer,
centerTile.cols * 2 + buffer - 1,
centerTile.rows * 2 + buffer - 1
)
}
else
centerTile
case None if (buffer == 0) =>
centerTile
case _ =>
throw new IllegalStateException(s"tile buffer > 0 ($buffer) but no neighboring tiles found")
}
}
|
geotrellis/maml
|
jvm/src/main/scala/eval/tile/TileWithNeighbors.scala
|
Scala
|
apache-2.0
| 1,032
|
package net.sansa_stack.rdf.spark
import net.sansa_stack.rdf.spark.utils.Logging
import org.apache.jena.graph.{Node, Triple}
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}
/**
* Wrap up implicit classes/methods for RDF data into [[RDD]].
*
* @author Gezim Sejdiu
*/
package object model {
/**
* Adds all methods to [[RDD]] that allows to use TripleOps functions.
*/
implicit class TripleOperations(triples: RDD[Triple]) extends Logging {
import net.sansa_stack.rdf.spark.model.rdd.TripleOps
/**
* Convert a [[RDD[Triple]]] into a DataFrame.
*
* @return a DataFrame of triples.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.toDF]]
*/
def toDF(): DataFrame =
TripleOps.toDF(triples)
/**
* Convert an RDD of Triple into a Dataset of Triple.
*
* @return a Dataset of triples.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.toDS]]
*/
def toDS(): Dataset[Triple] =
TripleOps.toDS(triples)
/**
* Get triples.
*
* @return [[RDD[Triple]]] which contains list of the triples.
*/
def getTriples(): RDD[Triple] =
TripleOps.getTriples(triples)
/**
* Get subjects.
*
* @return [[RDD[Node]]] which contains list of the subjects.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.getSubjects]]
*/
def getSubjects(): RDD[Node] =
TripleOps.getSubjects(triples)
/**
* Get predicates.
*
* @return [[RDD[Node]]] which contains list of the predicates.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.getPredicates]]
*/
def getPredicates(): RDD[Node] =
TripleOps.getPredicates(triples)
/**
* Get objects.
*
* @return [[RDD[Node]]] which contains list of the objects.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.getObjects]]
*/
def getObjects(): RDD[Node] =
TripleOps.getObjects(triples)
/**
* Filter out the subject from a given RDD[Triple],
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[RDD[Triple]]] a subset of the given RDD.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.filterSubjects]]
*/
def filterSubjects(func: Node => Boolean): RDD[Triple] =
TripleOps.filterSubjects(triples, func)
/**
* Filter out the predicates from a given RDD[Triple],
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[RDD[Triple]]] a subset of the given RDD.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.filterPredicates]]
*/
def filterPredicates(func: Node => Boolean): RDD[Triple] =
TripleOps.filterPredicates(triples, func)
/**
* Filter out the objects from a given RDD[Triple],
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[RDD[Triple]]] a subset of the given RDD.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.filterObjects]]
*/
def filterObjects(func: Node => Boolean): RDD[Triple] =
TripleOps.filterObjects(triples, func)
/**
* Returns an RDD of triples that match with the given input.
*
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return RDD of triples
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.find]]
*/
def find(subject: Option[Node] = None, predicate: Option[Node] = None, `object`: Option[Node] = None): RDD[Triple] =
TripleOps.find(triples, subject, predicate, `object`)
/**
* Returns an RDD of triples that match with the given input.
*
* @param triple the triple to be checked
* @return RDD of triples that match the given input
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.find]]
*/
def find(triple: Triple): RDD[Triple] =
TripleOps.find(triples, triple)
/**
* Determine whether this RDF graph contains any triples
* with a given subject and predicate.
*
* @param subject the subject (None for any)
* @param predicate the predicate (Node for any)
* @return true if there exists within this RDF graph
* a triple with subject and predicate, false otherwise
*/
def contains(subject: Some[Node], predicate: Some[Node]): Boolean =
TripleOps.contains(triples, subject, predicate, None)
/**
* Determine whether this RDF graph contains any triples
* with a given (subject, predicate, object) pattern.
*
* @param subject the subject (None for any)
* @param predicate the predicate (None for any)
* @param object the object (None for any)
* @return true if there exists within this RDF graph
* a triple with (S, P, O) pattern, false otherwise
*/
def contains(subject: Some[Node], predicate: Some[Node], `object`: Some[Node]): Boolean =
TripleOps.contains(triples, subject, predicate, `object`)
/**
* Determine if a triple is present in this RDF graph.
*
* @param triple the triple to be checked
* @return true if the statement s is in this RDF graph, false otherwise
*/
def contains(triple: Triple): Boolean =
TripleOps.contains(triples, triple)
/**
* Determine if any of the triples in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if any of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAny(other: RDD[Triple]): Boolean =
TripleOps.containsAny(triples, other)
/**
* Determine if all of the statements in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if all of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAll(other: RDD[Triple]): Boolean =
TripleOps.containsAll(triples, other)
/**
* Return the number of triples.
*
* @return the number of triples
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.size]]
*/
def size(): Long =
TripleOps.size(triples)
/**
* Return the union of this RDF graph and another one.
*
* @param other of the other RDF graph
* @return graph (union of both)
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.union]]
*/
def union(other: RDD[Triple]): RDD[Triple] =
TripleOps.union(triples, other)
/**
* Return the union all of RDF graphs.
*
* @return graph (union of all)
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.unionAll]]
*/
def unionAll(others: Seq[RDD[Triple]]): RDD[Triple] =
TripleOps.unionAll(triples, others)
/**
* Returns a new RDF graph that contains the intersection of the current RDF graph with the given RDF graph.
*
* @param other of the other RDF graph
* @return the intersection of both RDF graphs
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.intersection]]
*/
def intersection(other: RDD[Triple]): RDD[Triple] =
TripleOps.intersection(triples, other)
/**
* Returns a new RDF graph that contains the difference between the current RDF graph and the given RDF graph.
*
* @param other of the other RDF graph
* @return the difference of both RDF graphs
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.difference]]
*/
def difference(other: RDD[Triple]): RDD[Triple] =
TripleOps.difference(triples, other)
/**
* Add a statement to the current RDF graph.
*
* @param triple the triple to be added.
* @return new RDD of triples containing this statement.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.add]]
*/
def add(triple: Triple): RDD[Triple] =
TripleOps.add(triples, triple)
/**
* Add a list of statements to the current RDF graph.
*
* @param triple the list of triples to be added.
* @return new RDD of triples containing this list of statements.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.addAll]]
*/
def addAll(triple: Seq[Triple]): RDD[Triple] =
TripleOps.addAll(triples, triple)
/**
* Removes a statement from the current RDF graph.
* The statement with the same subject, predicate and object as that supplied will be removed from the model.
*
* @param triple the statement to be removed.
* @return new RDD of triples without this statement.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.remove]]
*/
def remove(triple: Triple): RDD[Triple] =
TripleOps.remove(triples, triple)
/**
* Removes all the statements from the current RDF graph.
* The statements with the same subject, predicate and object as those supplied will be removed from the model.
*
* @param triple the list of statements to be removed.
* @return new RDD of triples without these statements.
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.removeAll]]
*/
def removeAll(triple: Seq[Triple]): RDD[Triple] =
TripleOps.removeAll(triples, triple)
/**
* Write N-Triples from a given RDD of triples
*
* @param path path to the file containing N-Triples
* @see [[net.sansa_stack.rdf.spark.model.rdd.TripleOps.saveAsNTriplesFile]]
*/
def saveAsNTriplesFile(path: String): Unit =
TripleOps.saveAsNTriplesFile(triples, path)
}
/**
* Adds all methods to [[RDD]] that allows to use HDT TripleOps functions.
*/
implicit class HDTTripleOperations(triples: RDD[Triple]) extends Logging {
import net.sansa_stack.rdf.spark.model.hdt.TripleOps
type HDTDataFrame = DataFrame
/**
* Convert an RDD of triples into a DataFrame of hdt.
*
* @return a DataFrame of hdt triples.
*/
def asHDT(): HDTDataFrame =
TripleOps.asHDT(triples)
}
/**
* Adds methods, `readHDTFromDist`, to [[SparkSession]] that allows to read
* hdt files.
*/
implicit class HDTReader(spark: SparkSession) {
import net.sansa_stack.rdf.spark.model.hdt.TripleOps
/**
* Read hdt data from disk.
*
* @param input -- path to hdt data.
* @retun DataFrame of hdt, subject, predicate, and object view.
*/
def readHDTFromDisk(input: String): (DataFrame, DataFrame, DataFrame, DataFrame) =
TripleOps.readHDTFromDisk(input)
}
/**
* Adds methods, `readHDTFromDist`, to [[SparkSession]] that allows to read
* hdt files.
*/
implicit class HDTWriter(hdt_tables: (DataFrame, DataFrame, DataFrame, DataFrame)) {
import net.sansa_stack.rdf.spark.model.hdt.TripleOps
/**
* Function saves the Index and Dictionaries Dataframe into given path
*
* @param output path to be written
*/
def saveAsCSV(output: String): Unit =
TripleOps.saveAsCSV(hdt_tables._1, hdt_tables._2, hdt_tables._3, hdt_tables._4,
output, org.apache.spark.sql.SaveMode.Overwrite)
}
/**
* Adds all methods to [[RDD]] that allows to use Tensor TripleOps functions.
*/
implicit class TensorTripleOperations(triples: RDD[Triple]) extends Logging {
import net.sansa_stack.rdf.spark.model.tensor.TripleOps
/**
* Return all the mapped triples (tensor) based on their relations
*
* @return all the mapped triples (tensor) based on their relations
*/
def asTensor(): RDD[(Long, Long, Long)] =
TripleOps.getMappedTriples(triples)
/**
* Return size of the entities in the graph
*
* @return size of the entities in the graph
*/
def getNumEntities(): Long =
TripleOps.getNumEntities(triples)
/**
* Return size of the relations in the graph
*
* @return size of the relations in the graph
*/
def getNumRelations(): Long =
TripleOps.getNumRelations(triples)
}
/**
* Adds all methods to [[DataFrame]] that allows to use TripleOps functions.
*/
implicit class DFTripleOperations(triples: DataFrame) extends Logging {
import net.sansa_stack.rdf.spark.model.df.TripleOps
/**
* Convert a DataFrame of triples into [[RDD[Triple]]].
*
* @return a DataFrame of triples.
*/
def toRDD(): RDD[Triple] =
TripleOps.toRDD(triples)
/**
* Convert an DataFrame of triples into a Dataset of Triple.
*
* @return a Dataset of triples.
*/
def toDS(): Dataset[Triple] =
TripleOps.toDS(triples)
/**
* Get triples.
*
* @return [[RDD[Triple]]] which contains list of the triples.
*/
def getTriples(): DataFrame =
TripleOps.getTriples(triples)
/**
* Get subjects.
*
* @return DataFrame which contains list of the subjects.
*/
def getSubjects(): DataFrame =
TripleOps.getSubjects(triples)
/**
* Get predicates.
*
* @return DataFrame which contains list of the predicates.
*/
def getPredicates(): DataFrame =
TripleOps.getPredicates(triples)
/**
* Get objects.
*
* @return DataFrame which contains list of the objects.
*/
def getObjects(): DataFrame =
TripleOps.getObjects(triples)
/**
* Returns an DataFrame of triples that match with the given input.
*
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return DataFrame of triples
*/
def find(subject: Option[String] = None, predicate: Option[String] = None, `object`: Option[String] = None): DataFrame =
TripleOps.find(triples, subject, predicate, `object`)
/**
* Returns an DataFrame of triples that match with the given input.
*
* @param triple the triple to be checked
* @return DataFrame of triples that match the given input
*/
def find(triple: Triple): DataFrame =
TripleOps.find(triples, triple)
/**
* Return the number of triples.
*
* @return the number of triples
*/
def size(): Long =
TripleOps.size(triples)
/**
* Return the union of this RDF graph and another one.
*
* @param triples DataFrame of RDF graph
* @param other the other RDF graph
* @return graph (union of both)
*/
def union(other: DataFrame): DataFrame =
TripleOps.union(triples, other)
/**
* Return the union all of RDF graphs.
*
* @param others sequence of DataFrames of other RDF graph
* @return graph (union of all)
*/
def unionAll(others: Seq[DataFrame]): DataFrame =
TripleOps.unionAll(triples, others)
/**
* Returns a new RDF graph that contains the intersection
* of the current RDF graph with the given RDF graph.
*
* @param other the other RDF graph
* @return the intersection of both RDF graphs
*/
def intersection(other: DataFrame): DataFrame =
TripleOps.intersection(triples, other)
/**
* Returns a new RDF graph that contains the difference
* between the current RDF graph and the given RDF graph.
*
* @param other the other RDF graph
* @return the difference of both RDF graphs
*/
def difference(other: DataFrame): DataFrame =
TripleOps.difference(triples, other)
/**
* Determine whether this RDF graph contains any triples
* with a given (subject, predicate, object) pattern.
*
* @param subject the subject (None for any)
* @param predicate the predicate (None for any)
* @param object the object (None for any)
* @return true if there exists within this RDF graph
* a triple with (S, P, O) pattern, false otherwise
*/
def contains(subject: Option[String] = None, predicate: Option[String] = None, `object`: Option[String] = None): Boolean =
TripleOps.contains(triples, subject, predicate, `object`)
/**
* Determine if a triple is present in this RDF graph.
*
* @param triple the triple to be checked
* @return true if the statement s is in this RDF graph, false otherwise
*/
def contains(triple: Triple): Boolean =
TripleOps.contains(triples, triple)
/**
* Determine if any of the triples in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if any of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAny(other: DataFrame): Boolean =
TripleOps.containsAny(triples, other)
/**
* Determine if all of the statements in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if all of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAll(other: DataFrame): Boolean =
TripleOps.containsAll(triples, other)
/**
* Add a statement to the current RDF graph.
*
* @param triple the triple to be added.
* @return new DataFrame of triples containing this statement.
*/
def add(triple: Triple): DataFrame =
TripleOps.add(triples, triple)
/**
* Add a list of statements to the current RDF graph.
*
* @param triple the list of triples to be added.
* @return new DataFrame of triples containing this list of statements.
*/
def addAll(triple: Seq[Triple]): DataFrame =
TripleOps.addAll(triples, triple)
/**
* Removes a statement from the current RDF graph.
* The statement with the same subject, predicate and
* object as that supplied will be removed from the model.
*
* @param triple the statement to be removed.
* @return new DataFrame of triples without this statement.
*/
def remove(triple: Triple): DataFrame =
TripleOps.remove(triples, triple)
/**
* Removes all the statements from the current RDF graph.
* The statements with the same subject, predicate and
* object as those supplied will be removed from the model.
*
* @param triple the list of statements to be removed.
* @return new DataFrame of triples without these statements.
*/
def removeAll(triple: Seq[Triple]): DataFrame =
TripleOps.removeAll(triples, triple)
/**
* Write N-Triples from a given DataFrame of triples
*
* @param triples DataFrame of RDF graph
* @param path path to the file containing N-Triples
*/
def saveAsNTriplesFile(path: String): Unit =
TripleOps.saveAsNTriplesFile(triples, path)
}
/**
* Adds all methods to [[Dataset[Triple]]] that allows to use TripleOps functions.
*/
implicit class DSTripleOperations(triples: Dataset[Triple]) extends Logging {
import net.sansa_stack.rdf.spark.model.ds.TripleOps
/**
* Convert a Dataset of triples into [[RDD[Triple]]].
*
* @return a RDD of triples.
*/
def toRDD(): RDD[Triple] =
TripleOps.toRDD(triples)
/**
* Convert an Dataset of triples into a DataFrame of triples.
*
* @return a DataFrame of triples.
*/
def toDF(): DataFrame =
TripleOps.toDF(triples)
/**
* Get triples.
*
* @return [[Dataset[Triple]]] which contains list of the triples.
*/
def getTriples(): Dataset[Triple] =
TripleOps.getTriples(triples)
/**
* Returns an Dataset of triples that match with the given input.
*
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return Dataset of triples
*/
def find(subject: Option[Node] = None, predicate: Option[Node] = None, `object`: Option[Node] = None): Dataset[Triple] =
TripleOps.find(triples, subject, predicate, `object`)
/**
* Returns an Dataset of triples that match with the given input.
*
* @param triple the triple to be checked
* @return Dataset of triples that match the given input
*/
def find(triple: Triple): Dataset[Triple] =
TripleOps.find(triples, triple)
/**
* Return the number of triples.
*
* @return the number of triples
*/
def size(): Long =
TripleOps.size(triples)
/**
* Return the union of this RDF graph and another one.
*
* @param triples Dataset of RDF graph
* @param other the other RDF graph
* @return graph (union of both)
*/
def union(other: Dataset[Triple]): Dataset[Triple] =
TripleOps.union(triples, other)
/**
* Return the union all of RDF graphs.
*
* @param others sequence of Dataset of other RDF graph
* @return graph (union of all)
*/
def unionAll(others: Seq[Dataset[Triple]]): Dataset[Triple] =
TripleOps.unionAll(triples, others)
/**
* Returns a new RDF graph that contains the intersection
* of the current RDF graph with the given RDF graph.
*
* @param other the other RDF graph
* @return the intersection of both RDF graphs
*/
def intersection(other: Dataset[Triple]): Dataset[Triple] =
TripleOps.intersection(triples, other)
/**
* Returns a new RDF graph that contains the difference
* between the current RDF graph and the given RDF graph.
*
* @param other the other RDF graph
* @return the difference of both RDF graphs
*/
def difference(other: Dataset[Triple]): Dataset[Triple] =
TripleOps.difference(triples, other)
/**
* Determine whether this RDF graph contains any triples
* with a given (subject, predicate, object) pattern.
*
* @param subject the subject (None for any)
* @param predicate the predicate (None for any)
* @param object the object (None for any)
* @return true if there exists within this RDF graph
* a triple with (S, P, O) pattern, false otherwise
*/
def contains(subject: Option[Node] = None, predicate: Option[Node] = None, `object`: Option[Node] = None): Boolean =
TripleOps.contains(triples, subject, predicate, `object`)
/**
* Determine if a triple is present in this RDF graph.
*
* @param triple the triple to be checked
* @return true if the statement s is in this RDF graph, false otherwise
*/
def contains(triple: Triple): Boolean =
TripleOps.contains(triples, triple)
/**
* Determine if any of the triples in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if any of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAny(other: Dataset[Triple]): Boolean =
TripleOps.containsAny(triples, other)
/**
* Determine if all of the statements in an RDF graph are also contained in this RDF graph.
*
* @param other the other RDF graph containing the statements to be tested
* @return true if all of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAll(other: Dataset[Triple]): Boolean =
TripleOps.containsAll(triples, other)
/**
* Add a statement to the current RDF graph.
*
* @param triple the triple to be added.
* @return new Dataset of triples containing this statement.
*/
def add(triple: Triple): Dataset[Triple] =
TripleOps.add(triples, triple)
/**
* Add a list of statements to the current RDF graph.
*
* @param triple the list of triples to be added.
* @return new Dataset of triples containing this list of statements.
*/
def addAll(triple: Seq[Triple]): Dataset[Triple] =
TripleOps.addAll(triples, triple)
/**
* Removes a statement from the current RDF graph.
* The statement with the same subject, predicate and
* object as that supplied will be removed from the model.
*
* @param triple the statement to be removed.
* @return new Dataset of triples without this statement.
*/
def remove(triple: Triple): Dataset[Triple] =
TripleOps.remove(triples, triple)
/**
* Removes all the statements from the current RDF graph.
* The statements with the same subject, predicate and
* object as those supplied will be removed from the model.
*
* @param triple the list of statements to be removed.
* @return new Dataset of triples without these statements.
*/
def removeAll(triple: Seq[Triple]): Dataset[Triple] =
TripleOps.removeAll(triples, triple)
/**
* Write N-Triples from a given Dataset of triples
*
* @param triples Dataset of RDF graph
* @param path path to the file containing N-Triples
*/
def saveAsNTriplesFile(path: String): Unit =
TripleOps.saveAsNTriplesFile(triples, path)
}
/**
* Adds methods, `asGraph` to [[RDD]] that allows to transform as a GraphX representation.
*/
implicit class GraphLoader(triples: RDD[Triple]) extends Logging {
import net.sansa_stack.rdf.spark.model.graph.GraphOps
/**
* Constructs GraphX graph from RDD of triples
*
* @return object of GraphX which contains the constructed ''graph''.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.constructGraph]]
*/
def asGraph(): Graph[Node, Node] =
GraphOps.constructGraph(triples)
/**
* Constructs Hashed GraphX graph from RDD of triples
*
* @return object of GraphX which contains the constructed hashed ''graph''.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.constructHashedGraph]]
*/
def asHashedGraph(): Graph[Node, Node] =
GraphOps.constructHashedGraph(triples)
/**
* Constructs String GraphX graph from RDD of triples
*
* @return object of GraphX which contains the constructed string ''graph''.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.constructStringGraph]]
*/
def asStringGraph(): Graph[String, String] =
GraphOps.constructStringGraph(triples)
}
/**
* Adds methods, `astTriple`, `find`, `size` to [[Graph][Node, Node]] that allows to different operations to it.
*/
implicit class GraphOperations(graph: Graph[Node, Node]) extends Logging {
import net.sansa_stack.rdf.spark.model.graph.GraphOps
/**
* Convert a graph into a RDD of Triple.
*
* @return a RDD of triples.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.toRDD]]
*/
def toRDD(): RDD[Triple] =
GraphOps.toRDD(graph)
/**
* Convert a graph into a DataFrame.
*
* @return a DataFrame of triples.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.toDF]]
*/
def toDF(): DataFrame =
GraphOps.toDF(graph)
/**
* Convert a graph into a Dataset of Triple.
*
* @return a Dataset of triples.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.toDS]]
*/
def toDS(): Dataset[Triple] =
GraphOps.toDS(graph)
/**
* Finds triplets of a given graph.
*
* @param subject
* @param predicate
* @param object
* @return graph which contains subset of the reduced graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.find]]
*/
def find(subject: Node, predicate: Node, `object`: Node): Graph[Node, Node] =
GraphOps.find(graph, subject, predicate, `object`)
/**
* Gets triples of a given graph.
*
* @return [[[RDD[Triple]]] from the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.getTriples]]
*/
def getTriples(): RDD[Triple] =
GraphOps.getTriples(graph)
/**
* Gets subjects of a given graph.
*
* @return [[[RDD[Node]]] from the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.getSubjects]]
*/
def getSubjects(): RDD[Node] =
GraphOps.getSubjects(graph)
/**
* Gets predicates of a given graph.
*
* @return [[[RDD[Node]]] from the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.getPredicates]]
*/
def getPredicates(): RDD[Node] =
GraphOps.getPredicates(graph)
/**
* Gets objects of a given graph.
*
* @return [[[RDD[Node]]] from the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.getObjects]]
*/
def getObjects(): RDD[Node] =
GraphOps.getObjects(graph)
/**
* Filter out the subject from a given graph,
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[Graph[Node, Node]]] a subset of the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.filterSubjects]]
*/
def filterSubjects(func: Node => Boolean): Graph[Node, Node] =
GraphOps.filterSubjects(graph, func)
/**
* Filter out the predicates from a given graph,
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[Graph[Node, Node]]] a subset of the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.filterPredicates]]
*/
def filterPredicates(func: Node => Boolean): Graph[Node, Node] =
GraphOps.filterPredicates(graph, func)
/**
* Filter out the objects from a given graph,
* based on a specific function @func .
*
* @param func a partial funtion.
* @return [[Graph[Node, Node]]] a subset of the given graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.filterObjects]]
*/
def filterObjects(func: Node => Boolean): Graph[Node, Node] =
GraphOps.filterObjects(graph, func)
/**
* Compute the size of the graph
*
* @return the number of edges in the graph.
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.size]]
*/
def size(): Long =
GraphOps.size(graph)
/**
* Return the union of this graph and another one.
*
* @param other of the other graph
* @return graph (union of all)
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.union]]
*/
def union(other: Graph[Node, Node]): Graph[Node, Node] =
GraphOps.union(graph, other)
/**
* Returns a new RDF graph that contains the intersection of the current RDF graph with the given RDF graph.
*
* @param other the other RDF graph
* @return the intersection of both RDF graphs
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.difference]]
*/
def difference(other: Graph[Node, Node]): Graph[Node, Node] =
GraphOps.difference(graph, other)
/**
* Returns a new RDF graph that contains the difference between the current RDF graph and the given RDF graph.
*
* @param other the other RDF graph
* @return the difference of both RDF graphs
* @see [[net.sansa_stack.rdf.spark.graph.GraphOps.intersection]]
*/
def intersection(other: Graph[Node, Node]): Graph[Node, Node] =
GraphOps.intersection(graph, other)
/**
* Returns the lever at which vertex stands in the hierarchy.
*/
def hierarcyDepth(): Graph[(VertexId, Int, Node), Node] =
GraphOps.hierarcyDepth(graph)
/**
* Save the Graph to JSON format
*
* @param output the output
*/
def saveGraphToJson(output: String): Unit =
GraphOps.saveGraphToJson(graph, output)
}
}
|
SANSA-Stack/Spark-RDF
|
sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/model/package.scala
|
Scala
|
gpl-3.0
| 32,501
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.orc.jobs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.orc.mapred.OrcStruct
import org.locationtech.geomesa.fs.storage.api.StorageMetadata.StorageFileAction.StorageFileAction
import org.locationtech.geomesa.fs.storage.common.jobs.StorageConfiguration
import org.locationtech.geomesa.fs.storage.common.jobs.StorageConfiguration.SimpleFeatureAction
import org.locationtech.geomesa.fs.storage.orc.jobs.OrcSimpleFeatureInputFormat.{OrcSimpleFeatureInputFormatBase, OrcSimpleFeatureRecordReaderBase}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Input format for orc files that tracks the action and timestamp associated with each feature
*/
class OrcSimpleFeatureActionInputFormat extends OrcSimpleFeatureInputFormatBase[SimpleFeatureAction] {
override protected def createRecordReader(
delegate: RecordReader[NullWritable, OrcStruct],
split: FileSplit,
conf: Configuration,
sft: SimpleFeatureType,
filter: Option[Filter],
transform: Option[(String, SimpleFeatureType)],
columns: Option[Set[Int]]): RecordReader[SimpleFeatureAction, SimpleFeature] = {
val (timestamp, action) = StorageConfiguration.getPathAction(conf, split.getPath)
new OrcSimpleFeatureActionRecordReader(delegate, sft, filter, transform, columns, timestamp, action)
}
class OrcSimpleFeatureActionRecordReader(
delegate: RecordReader[NullWritable, OrcStruct],
sft: SimpleFeatureType,
filter: Option[Filter],
transform: Option[(String, SimpleFeatureType)],
columns: Option[Set[Int]],
timestamp: Long,
action: StorageFileAction
) extends OrcSimpleFeatureRecordReaderBase[SimpleFeatureAction](delegate, sft, filter, transform, columns) {
override def getCurrentKey: SimpleFeatureAction =
new SimpleFeatureAction(getCurrentValue.getID, timestamp, action)
}
}
|
locationtech/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-orc/src/main/scala/org/locationtech/geomesa/fs/storage/orc/jobs/OrcSimpleFeatureActionInputFormat.scala
|
Scala
|
apache-2.0
| 2,572
|
package org.xsc.pure
import akka.actor.Actor
import concurrent.Future
object PureActor {
trait Base[Action, Effect, Response, State] {
type Propagate = PartialFunction[Effect, Future[Unit]]
val initialState: State
val handleAction: (State, Action) => (Option[Response], List[Effect])
val updateState: (State, Effect) => State
val propagateEffect: Propagate
protected def wrapReceive(receiveAction: Action => Unit,
receiveEffect: Effect => Unit): Actor.Receive
}
final object ProbeState
}
trait PureActor[Action, Effect, Response, State]
extends Actor
with PureActor.Base[Action, Effect, Response, State] {
// ---- Action/Effect Handling
private def receiveAction(state: State)(action: Action): Unit = {
val (maybeResponse, effects) = handleAction(state, action)
val newState = effects.foldLeft(state)(updateState)
context.become(generateReceive(newState))
maybeResponse.foreach(sender() ! _)
forwardEffects(effects)
}
private def forwardEffects(effects: List[Effect]): Unit = {
effects.foreach(self.forward(_))
}
private def receiveEffect(effect: Effect): Unit = {
propagateEffect.lift(effect)
()
}
private def receiveInternal(state: State): Receive = {
case PureActor.ProbeState => sender() ! state
}
// ---- Receive/Recover Logic
override def receive: Receive = generateReceive(initialState)
private def generateReceive(state: State): Receive = {
wrapReceive(receiveAction(state), receiveEffect)
.orElse(receiveInternal(state))
}
}
|
xsc/akka-pure-actor
|
src/main/scala/org/xsc/pure/PureActor.scala
|
Scala
|
mit
| 1,596
|
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.algebird.monad.Reader
import com.twitter.algebird.Monoid
import com.twitter.scalding.cascading_interop.FlowListenerPromise
import com.twitter.scalding.Dsl.flowDefToRichFlowDef
import scala.concurrent.{ Await, Future, Promise, ExecutionContext => ConcurrentExecutionContext }
import scala.util.{ Failure, Success, Try }
import cascading.flow.{ FlowDef, Flow }
/**
* This is a Monad, that represents a computation and a result
*/
sealed trait Execution[+T] {
import Execution.{ Mapped, MapCounters, FactoryExecution, FlatMapped, Zipped }
/*
* First run this Execution, then move to the result
* of the function
*/
def flatMap[U](fn: T => Execution[U]): Execution[U] =
FlatMapped(this, fn)
def flatten[U](implicit ev: T <:< Execution[U]): Execution[U] =
flatMap(ev)
def map[U](fn: T => U): Execution[U] =
Mapped(this, fn)
/**
* Reads the counters into the value, but does not reset them.
* You may want .getAndResetCounters
*
*/
def getCounters: Execution[(T, ExecutionCounters)] =
MapCounters[T, (T, ExecutionCounters)](this, { case tc @ (t, c) => (tc, c) })
def getAndResetCounters: Execution[(T, ExecutionCounters)] =
getCounters.resetCounters
/**
* Resets the counters back to zero. This can happen if
* you want to reset before a zip or a call to flatMap
*/
def resetCounters: Execution[T] =
MapCounters[T, T](this, { case (t, c) => (t, ExecutionCounters.empty) })
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext): Future[T] =
runStats(conf, mode)(cec).map(_._1)
protected def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext): Future[(T, ExecutionCounters)]
/**
* This is convenience for when we don't care about the result.
* same a .map(_ => ())
*/
def unit: Execution[Unit] = map(_ => ())
// This waits synchronously on run, using the global execution context
def waitFor(conf: Config, mode: Mode): Try[T] =
Try(Await.result(run(conf, mode)(ConcurrentExecutionContext.global),
scala.concurrent.duration.Duration.Inf))
/*
* run this and that in parallel, without any dependency
*/
def zip[U](that: Execution[U]): Execution[(T, U)] = that match {
// push zips as low as possible
case fact @ FactoryExecution(_) => fact.zip(this).map(_.swap)
case _ => Zipped(this, that)
}
}
object Execution {
private case class Const[T](get: () => T) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
Future(get(), ExecutionCounters.empty)
override def unit = Const(() => ())
}
private case class FlatMapped[S, T](prev: Execution[S], fn: S => Execution[T]) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) = for {
(s, st1) <- prev.runStats(conf, mode)
next = fn(s)
(t, st2) <- next.runStats(conf, mode)
} yield (t, Monoid.plus(st1, st2))
}
private case class Mapped[S, T](prev: Execution[S], fn: S => T) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
prev.runStats(conf, mode).map { case (s, stats) => (fn(s), stats) }
// Don't bother applying the function if we are mapped
override def unit = prev.unit
}
private case class MapCounters[T, U](prev: Execution[T],
fn: ((T, ExecutionCounters)) => (U, ExecutionCounters)) extends Execution[U] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
prev.runStats(conf, mode).map(fn)
}
private case class Zipped[S, T](one: Execution[S], two: Execution[T]) extends Execution[(S, T)] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
one.runStats(conf, mode).zip(two.runStats(conf, mode))
.map { case ((s, ss), (t, st)) => ((s, t), Monoid.plus(ss, st)) }
// Make sure we remove any mapping functions on both sides
override def unit = one.unit.zip(two.unit).map(_ => ())
}
private case class UniqueIdExecution[T](fn: UniqueID => Execution[T]) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) = {
val (uid, nextConf) = conf.ensureUniqueId
fn(uid).runStats(nextConf, mode)
}
}
/*
* This is the main class the represents a flow without any combinators
*/
private case class FlowDefExecution[T](result: (Config, Mode) => (FlowDef, (JobStats => Future[T]))) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) = {
for {
(flowDef, fn) <- Future(result(conf, mode))
jobStats <- ExecutionContext.newContext(conf)(flowDef, mode).run
t <- fn(jobStats)
} yield (t, ExecutionCounters.fromJobStats(jobStats))
}
/*
* Cascading can run parallel Executions in the same flow if they are both FlowDefExecutions
*/
override def zip[U](that: Execution[U]): Execution[(T, U)] =
that match {
case FlowDefExecution(result2) =>
FlowDefExecution({ (conf, m) =>
val (fd1, fn1) = result(conf, m)
val (fd2, fn2) = result2(conf, m)
val merged = fd1.copy
merged.mergeFrom(fd2)
(merged, { (js: JobStats) => fn1(js).zip(fn2(js)) })
})
case _ => super.zip(that)
}
}
private case class FactoryExecution[T](result: (Config, Mode) => Execution[T]) extends Execution[T] {
def runStats(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
unwrap(conf, mode, this).runStats(conf, mode)
@annotation.tailrec
private def unwrap[U](conf: Config, mode: Mode, that: Execution[U]): Execution[U] =
that match {
case FactoryExecution(fn) => unwrap(conf, mode, fn(conf, mode))
case nonFactory => nonFactory
}
/*
* Cascading can run parallel Executions in the same flow if they are both FlowDefExecutions
*/
override def zip[U](that: Execution[U]): Execution[(T, U)] =
that match {
case FactoryExecution(result2) =>
FactoryExecution({ (conf, m) =>
val exec1 = unwrap(conf, m, result(conf, m))
val exec2 = unwrap(conf, m, result2(conf, m))
exec1.zip(exec2)
})
case _ =>
FactoryExecution({ (conf, m) =>
val exec1 = unwrap(conf, m, result(conf, m))
exec1.zip(that)
})
}
}
/**
* This makes a constant execution that runs no job.
*/
def from[T](t: => T): Execution[T] = Const(() => t)
private[scalding] def factory[T](fn: (Config, Mode) => Execution[T]): Execution[T] =
FactoryExecution(fn)
/**
* This converts a function into an Execution monad. The flowDef returned
* is never mutated. The returned callback funcion is called after the flow
* is run and succeeds.
*/
def fromFn[T](
fn: (Config, Mode) => ((FlowDef, JobStats => Future[T]))): Execution[T] =
FlowDefExecution(fn)
/**
* Use this to use counters/stats with Execution. You do this:
* Execution.withId { implicit uid =>
* val myStat = Stat("myStat") // uid is implicitly pulled in
* pipe.map { t =>
* if(someCase(t)) myStat.inc
* fn(t)
* }
* .writeExecution(mySink)
* }
*
*/
def withId[T](fn: UniqueID => Execution[T]): Execution[T] = UniqueIdExecution(fn)
/**
* This creates a new ExecutionContext, passes to the reader, builds the flow
* and cleans up the state of the FlowDef
*/
def buildFlow[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Try[Flow[_]]) = {
val ec = ExecutionContext.newContextEmpty(conf, mode)
try {
// This mutates the newFlowDef in ec
val resultT = op(ec)
(resultT, ec.buildFlow)
} finally {
// Make sure to clean up all state with flowDef
FlowStateMap.clear(ec.flowDef)
}
}
def run[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Future[JobStats]) = {
val (t, tryFlow) = buildFlow(conf, mode)(op)
tryFlow match {
case Success(flow) => (t, run(flow))
case Failure(err) => (t, Future.failed(err))
}
}
/*
* This runs a Flow using Cascading's built in threads. The resulting JobStats
* are put into a promise when they are ready
*/
def run[C](flow: Flow[C]): Future[JobStats] =
// This is in Java because of the cascading API's raw types on FlowListener
FlowListenerPromise.start(flow, { f: Flow[C] => JobStats(f.getFlowStats) })
/*
* If you want scalding to fail if the sources cannot be validated, then
* use this.
* Alteratively, in your Reader, call Source.validateTaps(Mode) to
* control which sources individually need validation
* Suggested use:
* for {
* result <- job
* mightErr <- validateSources
* } yield mightErr.map(_ => result)
*/
def validateSources: Reader[ExecutionContext, Try[Unit]] =
Reader { ec => Try(FlowStateMap.validateSources(ec.flowDef, ec.mode)) }
def waitFor[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Try[JobStats]) = {
val (t, tryFlow) = buildFlow(conf, mode)(op)
(t, tryFlow.flatMap(waitFor(_)))
}
/*
* This blocks the current thread until the job completes with either success or
* failure.
*/
def waitFor[C](flow: Flow[C]): Try[JobStats] =
Try {
flow.complete;
JobStats(flow.getStats)
}
def zip[A, B](ax: Execution[A], bx: Execution[B]): Execution[(A, B)] =
ax.zip(bx)
def zip[A, B, C](ax: Execution[A], bx: Execution[B], cx: Execution[C]): Execution[(A, B, C)] =
ax.zip(bx).zip(cx).map { case ((a, b), c) => (a, b, c) }
def zip[A, B, C, D](ax: Execution[A],
bx: Execution[B],
cx: Execution[C],
dx: Execution[D]): Execution[(A, B, C, D)] =
ax.zip(bx).zip(cx).zip(dx).map { case (((a, b), c), d) => (a, b, c, d) }
def zip[A, B, C, D, E](ax: Execution[A],
bx: Execution[B],
cx: Execution[C],
dx: Execution[D],
ex: Execution[E]): Execution[(A, B, C, D, E)] =
ax.zip(bx).zip(cx).zip(dx).zip(ex).map { case ((((a, b), c), d), e) => (a, b, c, d, e) }
/*
* If you have many Executions, it is better to combine them with
* zip than flatMap (which is sequential)
*/
def zipAll[T](exs: Seq[Execution[T]]): Execution[Seq[T]] = {
@annotation.tailrec
def go(xs: List[Execution[T]], acc: Execution[List[T]]): Execution[List[T]] = xs match {
case Nil => acc
case h :: tail => go(tail, h.zip(acc).map { case (y, ys) => y :: ys })
}
// This pushes all of them onto a list, and then reverse to keep order
go(exs.toList, from(Nil)).map(_.reverse)
}
}
trait ExecutionCounters {
def keys: Set[StatKey]
def apply(key: StatKey): Long = get(key).getOrElse(0L)
def get(key: StatKey): Option[Long]
def toMap: Map[StatKey, Long] = keys.map { k => (k, get(k).getOrElse(0L)) }.toMap
}
object ExecutionCounters {
def empty: ExecutionCounters = new ExecutionCounters {
def keys = Set.empty
def get(key: StatKey) = None
override def toMap = Map.empty
}
def fromCascading(cs: cascading.stats.CascadingStats): ExecutionCounters = new ExecutionCounters {
import scala.collection.JavaConverters._
val keys = (for {
group <- cs.getCounterGroups.asScala
counter <- cs.getCountersFor(group).asScala
} yield StatKey(counter, group)).toSet
def get(k: StatKey) =
if (keys(k)) {
// Yes, cascading is reversed frow what we did in Stats. :/
Some(cs.getCounterValue(k.group, k.counter))
} else None
}
def fromJobStats(js: JobStats): ExecutionCounters = {
val counters = js.counters
new ExecutionCounters {
def keys = for {
group <- counters.keySet
counter <- counters(group).keys
} yield StatKey(counter, group)
def get(k: StatKey) = counters.get(k.group).flatMap(_.get(k.counter))
}
}
/**
* This allows us to merge the results of two computations
*/
implicit def monoid: Monoid[ExecutionCounters] = new Monoid[ExecutionCounters] {
override def isNonZero(that: ExecutionCounters) = that.keys.nonEmpty
def zero = ExecutionCounters.empty
def plus(left: ExecutionCounters, right: ExecutionCounters) = {
val allKeys = left.keys ++ right.keys
val allValues = allKeys
.map { k => (k, left(k) + right(k)) }
.toMap
// Don't capture right and left
new ExecutionCounters {
def keys = allKeys
def get(k: StatKey) = allValues.get(k)
override def toMap = allValues
}
}
}
}
|
wanyifu/scaldingtest
|
scalding-core/src/main/scala/com/twitter/scalding/Execution.scala
|
Scala
|
apache-2.0
| 13,307
|
/*
* ________ ____
* / ____/ /_ ____ __________ ____ / __ )___ ___
* / / / __ \\/ __ `/ ___/ __ `/ _ \\/ __ / _ \\/ _ \\
* / /___/ / / / /_/ / / / /_/ / __/ /_/ / __/ __/
* \\____/_/ /_/\\__,_/_/ \\__, /\\___/_____/\\___/\\___/
* /____/
*
* __ __
* ___/ /__ _/ /____ _ ___ __ _____ ____
* / _ / _ `/ __/ _ `/ (_-</ // / _ \\/ __/
* \\_,_/\\_,_/\\__/\\_,_/ /___/\\_, /_//_/\\__/
* /___/
*
* Copyright (c) Alexandros Mavrommatis.
*
* This file is part of ChargeBeeDataSync.
*
* ChargeBeeDataSync is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* ChargeBeeDataSync is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with ChargeBeeDataSync. If not, see <http://www.gnu.org/licenses/>.
*/
package core
import java.sql.Timestamp
import java.util.logging.{Level, Logger}
import com.chargebee.Environment
import com.chargebee.models._
import org.mongodb.scala._
import org.mongodb.scala.model.Filters._
import scala.collection.JavaConverters._
import MongoDBImplicits._
import me.tongfei.progressbar.{ProgressBar, ProgressBarStyle}
import scala.annotation.tailrec
object DataSync extends Parser("chargeBee data sync") with App{
println(logo)
Logger.getLogger("org.mongodb.driver").setLevel(Level.SEVERE)
parse(args, OptionConf()) match {
case Some(conf) =>
implicit val mg: MongoClient = initialization(conf) //initialize api and mongo db parameters
implicit val db: MongoDatabase = mg.getDatabase(conf.db) //get mongo database
val (from, to) = retrieveState //retrieve last state
val pb = new ProgressBar("Please Wait", 100, 200, System.out, ProgressBarStyle.UNICODE_BLOCK)
println(s"""Sync data from: "${conf.site}""")
//sync resources*/
pb.start()
syncSubscription(from, to, null)
pb.stepTo(18)
syncCustomer(from, to, null)
pb.stepTo(58)
syncInvoice(from, to, null)
pb.stepTo(78)
syncCreditNote(from, to, null)
pb.stepTo(80)
syncTransaction(from, to, null)
pb.stepTo(82)
syncHostedPage(from, to, null)
pb.stepTo(84)
syncPlan(from, to, null)
pb.stepTo(86)
syncAddon(from, to, null)
pb.stepTo(88)
syncCoupon(from, to, null)
pb.stepTo(90)
syncEvent(from, to, null)
pb.stepTo(94)
syncComment(from, to, null)
pb.stepTo(96)
syncSiteMigrationDetail(from, to, null)
pb.stepTo(98)
//get additional resources
retrieveCouponCode(null)
pb.stepTo(100)
pb.stop()
println("Data has been synchronized.\\nExiting...")
mg.close()
case None => //exits
}
/**
* Syncs site migration detail resources
* @param db Mongo database
* @param offset request offset
*/
@tailrec
private[core] def syncSiteMigrationDetail(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("SiteMigrationDetail")
val results = SiteMigrationDetail.list().limit(100).offset(offset).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala.filter{entry =>
val migratedAt = entry.siteMigrationDetail().migratedAt()
migratedAt.after(new Timestamp(from)) && migratedAt.before(new Timestamp(to))
}
iterator.foreach{entry =>
println(entry.siteMigrationDetail().migratedAt().getTime)
val entity = entry.siteMigrationDetail()
val json = entity.toJson
collection.insertOne(Document(json)).results()
}
if(nextOffset != null) syncSiteMigrationDetail(from, to, nextOffset)
}
/**
* Syncs comment resources from a timestamp to another on
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncComment(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Comment")
val results = Comment.list().limit(100).offset(offset).createdAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.comment()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
if(nextOffset != null) syncComment(from, to, nextOffset)
}
/**
* Syncs events resources from a timestamp to another on
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncEvent(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Event")
val results = Event.list().limit(100).offset(offset).occurredAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.event()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
if(nextOffset != null) syncEvent(from, to, nextOffset)
}
/**
* Retrieves coupon code resources
* @param db Mongo database
* @param offset request offset
*/
@tailrec
private[core] def retrieveCouponCode(offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("CouponCode")
val results = CouponCode.list().limit(100).offset(offset).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.couponCode()
val json = entity.toJson.replace("\\"code\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.code()), Document(json)).results()
}
}
if(nextOffset != null) retrieveCouponCode(nextOffset)
}
/**
* Syncs coupon resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncCoupon(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Coupon")
val results = Coupon.list().limit(100).offset(offset).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.coupon()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncCoupon(from, to, nextOffset)
}
/**
* Syncs addon resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncAddon(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Addon")
val results = Addon.list().limit(100).offset(offset).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.addon()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncAddon(from, to, nextOffset)
}
/**
* Syncs plan resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncPlan(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Plan")
val results = Plan.list().limit(100).offset(offset).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.plan()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncPlan(from, to, nextOffset)
}
/**
* Syncs hosted page resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncHostedPage(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("HostedPage")
val results = HostedPage.list().limit(100).offset(offset).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.hostedPage()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncHostedPage(from, to, nextOffset)
}
/**
* Syncs transaction resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncTransaction(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Transaction")
val results = Transaction.list().limit(100).offset(offset).includeDeleted(true).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.transaction()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncTransaction(from, to, nextOffset)
}
/**
* Syncs credit note resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncCreditNote(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("CreditNote")
val results = CreditNote.list().limit(100).offset(offset).includeDeleted(true).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.creditNote()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
}
if(nextOffset != null) syncCreditNote(from, to, nextOffset)
}
/**
* Syncs order for an invoice
* @param db Mongo database
* @param invoiceId invoice id
* @param offset request offset
*/
@inline
@tailrec
private[core] def syncOrder(invoiceId: String, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Order")
val results = Order.list().limit(100).offset(offset).invoiceId().is(invoiceId).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.order()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
if(nextOffset != null) syncOrder(invoiceId, nextOffset)
}
/**
* Syncs invoice resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncInvoice(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Invoice")
val results = Invoice.list().limit(100).offset(offset).includeDeleted(true).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.invoice()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
//in case there is an order for an invoice
syncOrder(entity.id(), null)
}
if(nextOffset != null) syncInvoice(from, to, nextOffset)
}
/**
* Sync card for a customer
* @param db Mongo database
* @param entity card resource
*/
@inline
private[core] def syncCard(entity: Card)(implicit db: MongoDatabase) {
val collection = db.getCollection("Card")
if(entity != null) {
val json = entity.toJson.replace("\\"customer_id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException =>
collection.replaceOne(equal("_id", entity.customerId()), Document(json)).results()
}
}
}
/**
* Syncs payment sources for a customer
* @param db Mongo database
* @param customerId customer id
* @param offset request offset
*/
@inline
@tailrec
private[core] def syncPaymentSource(customerId: String, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("PaymentSource")
val results = PaymentSource.list().limit(100).offset(offset).customerId.is(customerId).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.paymentSource()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
if(nextOffset != null) syncPaymentSource(customerId, nextOffset)
}
/**
* Syncs customer, card, payment sources resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncCustomer(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Customer")
val results = Customer.list().limit(100).offset(offset).includeDeleted(true).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.customer()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
//in case there is a card for a customer
syncCard(entry.card())
//in case there are payment sources for a customer
syncPaymentSource(entity.id(), null)
//in case there are unbilled charges for a customer
syncUnbilledCharge(entity.id(), null, subscription = false)
}
if(nextOffset != null) syncCustomer(from, to, nextOffset)
}
/**
* Syncs unbilled charge for a subscription/customer
* @param db Mongo database
* @param id customer/subscription id
* @param offset request offset
* @param subscription if enabled the id corresponds to a subscription
*/
@inline
@tailrec
private[core] def syncUnbilledCharge(id: String, offset: String, subscription: Boolean)(implicit db: MongoDatabase) {
val collection = db.getCollection("UnbilledCharge")
val results = if (subscription) {
UnbilledCharge.list().limit(100).offset(offset).includeDeleted(true).subscriptionId().is(id)
.request()
} else {
UnbilledCharge.list().limit(100).offset(offset).includeDeleted(true).customerId().is(id)
.request()
}
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.unbilledCharge()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
if(nextOffset != null) syncUnbilledCharge(id, nextOffset, subscription)
}
/**
* Syncs subscription resources from a timestamp to another one
* @param from starting timestamp
* @param to ending timestamp
* @param offset request offset
* @param db mongo database
*/
@tailrec
private[core] def syncSubscription(from: Long, to: Long, offset: String)(implicit db: MongoDatabase) {
val collection = db.getCollection("Subscription")
val results = Subscription.list().limit(100).offset(offset).includeDeleted(true).updatedAt()
.between(new Timestamp(from), new Timestamp(to)).request()
val nextOffset = results.nextOffset()
val iterator = results.iterator().asScala
iterator.foreach{entry =>
val entity = entry.subscription()
val json = entity.toJson.replace("\\"id\\"", "\\"_id\\"")
try {
collection.insertOne(Document(json)).results()
}
catch {
case _: MongoWriteException=>
val prevResNum = collection.find(equal("_id", entity.id())).headResult().getLong("resource_version")
if(prevResNum < entity.resourceVersion()) {
collection.replaceOne(equal("_id", entity.id()), Document(json)).results()
}
}
//in case there are unbilling charges for a subscription
syncUnbilledCharge(entity.id(), null, subscription = true)
}
if(nextOffset != null) syncSubscription(from, to, nextOffset)
}
/**
* Retrieves the last timestamp that the system has synced
* @param mg MongoClient
* @param db mongo database
* @return the last and the new sync timestamps
*/
private[core] def retrieveState()(implicit mg: MongoClient, db: MongoDatabase) = {
val state = db.getCollection("State")
val to = System.currentTimeMillis()
if(db.listCollectionNames().results().contains("State")) {
val fromEntry = state.findOneAndReplace(equal("_id", 1), Document("_id" -> 1, "updatedAt" -> to)).headResult()
fromEntry.get("updatedAt") match {
case Some(from) => (from.asInt64().getValue, to)
case None => (0L, to)
}
}
else {
state.insertOne(Document("_id" -> 1, "updatedAt" -> to))
.results()
(0L, to)
}
}
private[core] def initialization(conf: OptionConf) = {
Environment.configure(conf.site, conf.key)
MongoClient(conf.uri)
}
}
|
blackeye42/ChargeBeeDataSync
|
src/main/scala/core/DataSync.scala
|
Scala
|
lgpl-3.0
| 23,084
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import scala.collection.JavaConverters._
import scala.language.existentials
import scala.util.Random
import scala.util.control.Breaks._
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.ml.attribute.NominalAttribute
import org.apache.spark.ml.classification.LogisticRegressionSuite._
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{DenseMatrix, Matrices, Matrix, SparseMatrix, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{Dataset, Row}
import org.apache.spark.sql.functions.{col, lit, rand}
import org.apache.spark.sql.types.LongType
class LogisticRegressionSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
private val seed = 42
@transient var smallBinaryDataset: Dataset[_] = _
@transient var smallMultinomialDataset: Dataset[_] = _
@transient var binaryDataset: Dataset[_] = _
@transient var multinomialDataset: Dataset[_] = _
private val eps: Double = 1e-5
override def beforeAll(): Unit = {
super.beforeAll()
smallBinaryDataset = generateLogisticInput(1.0, 1.0, nPoints = 100, seed = seed).toDF()
smallMultinomialDataset = {
val nPoints = 100
val coefficients = Array(
-0.57997, 0.912083, -0.371077,
-0.16624, -0.84355, -0.048509)
val xMean = Array(5.843, 3.057)
val xVariance = Array(0.6856, 0.1899)
val testData = generateMultinomialLogisticInput(
coefficients, xMean, xVariance, addIntercept = true, nPoints, seed)
val df = sc.parallelize(testData, 4).toDF()
df.cache()
df
}
binaryDataset = {
val nPoints = 10000
val coefficients = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData =
generateMultinomialLogisticInput(coefficients, xMean, xVariance,
addIntercept = true, nPoints, seed)
sc.parallelize(testData, 4).toDF().withColumn("weight", rand(seed))
}
multinomialDataset = {
val nPoints = 10000
val coefficients = Array(
-0.57997, 0.912083, -0.371077, -0.819866, 2.688191,
-0.16624, -0.84355, -0.048509, -0.301789, 4.170682)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData = generateMultinomialLogisticInput(
coefficients, xMean, xVariance, addIntercept = true, nPoints, seed)
val df = sc.parallelize(testData, 4).toDF().withColumn("weight", rand(seed))
df.cache()
df
}
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glmnet package.
*/
ignore("export test data into CSV format") {
binaryDataset.rdd.map { case Row(label: Double, features: Vector, weight: Double) =>
label + "," + weight + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LogisticRegressionSuite/binaryDataset")
multinomialDataset.rdd.map { case Row(label: Double, features: Vector, weight: Double) =>
label + "," + weight + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LogisticRegressionSuite/multinomialDataset")
}
test("params") {
ParamsSuite.checkParams(new LogisticRegression)
val model = new LogisticRegressionModel("logReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("logistic regression: default params") {
val lr = new LogisticRegression
assert(lr.getLabelCol === "label")
assert(lr.getFeaturesCol === "features")
assert(lr.getPredictionCol === "prediction")
assert(lr.getRawPredictionCol === "rawPrediction")
assert(lr.getProbabilityCol === "probability")
assert(lr.getFamily === "auto")
assert(!lr.isDefined(lr.weightCol))
assert(lr.getFitIntercept)
assert(lr.getStandardization)
val model = lr.fit(smallBinaryDataset)
model.transform(smallBinaryDataset)
.select("label", "probability", "prediction", "rawPrediction")
.collect()
assert(model.getThreshold === 0.5)
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.getRawPredictionCol === "rawPrediction")
assert(model.getProbabilityCol === "probability")
assert(model.intercept !== 0.0)
assert(model.hasParent)
MLTestingUtils.checkCopyAndUids(lr, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
}
test("logistic regression: illegal params") {
val lowerBoundsOnCoefficients = Matrices.dense(1, 4, Array(1.0, 0.0, 1.0, 0.0))
val upperBoundsOnCoefficients1 = Matrices.dense(1, 4, Array(0.0, 1.0, 1.0, 0.0))
val upperBoundsOnCoefficients2 = Matrices.dense(1, 3, Array(1.0, 0.0, 1.0))
val lowerBoundsOnIntercepts = Vectors.dense(1.0)
// Work well when only set bound in one side.
new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.fit(binaryDataset)
withClue("bound constrained optimization only supports L2 regularization") {
intercept[IllegalArgumentException] {
new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setElasticNetParam(1.0)
.fit(binaryDataset)
}
}
withClue("lowerBoundsOnCoefficients should less than or equal to upperBoundsOnCoefficients") {
intercept[IllegalArgumentException] {
new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients1)
.fit(binaryDataset)
}
}
withClue("the coefficients bound matrix mismatched with shape (1, number of features)") {
intercept[IllegalArgumentException] {
new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients2)
.fit(binaryDataset)
}
}
withClue("bounds on intercepts should not be set if fitting without intercept") {
intercept[IllegalArgumentException] {
new LogisticRegression()
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setFitIntercept(false)
.fit(binaryDataset)
}
}
}
test("empty probabilityCol") {
val lr = new LogisticRegression().setProbabilityCol("")
val model = lr.fit(smallBinaryDataset)
assert(model.hasSummary)
// Validate that we re-insert a probability column for evaluation
val fieldNames = model.summary.predictions.schema.fieldNames
assert(smallBinaryDataset.schema.fieldNames.toSet.subsetOf(
fieldNames.toSet))
assert(fieldNames.exists(s => s.startsWith("probability_")))
}
test("setThreshold, getThreshold") {
val lr = new LogisticRegression().setFamily("binomial")
// default
assert(lr.getThreshold === 0.5, "LogisticRegression.threshold should default to 0.5")
withClue("LogisticRegression should not have thresholds set by default.") {
intercept[java.util.NoSuchElementException] { // Note: The exception type may change in future
lr.getThresholds
}
}
// Set via threshold.
// Intuition: Large threshold or large thresholds(1) makes class 0 more likely.
lr.setThreshold(1.0)
assert(lr.getThresholds === Array(0.0, 1.0))
lr.setThreshold(0.0)
assert(lr.getThresholds === Array(1.0, 0.0))
lr.setThreshold(0.5)
assert(lr.getThresholds === Array(0.5, 0.5))
// Set via thresholds
val lr2 = new LogisticRegression().setFamily("binomial")
lr2.setThresholds(Array(0.3, 0.7))
val expectedThreshold = 1.0 / (1.0 + 0.3 / 0.7)
assert(lr2.getThreshold ~== expectedThreshold relTol 1E-7)
// thresholds and threshold must be consistent
lr2.setThresholds(Array(0.1, 0.2, 0.3))
withClue("getThreshold should throw error if thresholds has length != 2.") {
intercept[IllegalArgumentException] {
lr2.getThreshold
}
}
// thresholds and threshold must be consistent: values
withClue("fit with ParamMap should throw error if threshold, thresholds do not match.") {
intercept[IllegalArgumentException] {
lr2.fit(smallBinaryDataset,
lr2.thresholds -> Array(0.3, 0.7), lr2.threshold -> (expectedThreshold / 2.0))
}
}
withClue("fit with ParamMap should throw error if threshold, thresholds do not match.") {
intercept[IllegalArgumentException] {
val lr2model = lr2.fit(smallBinaryDataset,
lr2.thresholds -> Array(0.3, 0.7), lr2.threshold -> (expectedThreshold / 2.0))
lr2model.getThreshold
}
}
}
test("thresholds prediction") {
val blr = new LogisticRegression().setFamily("binomial")
val binaryModel = blr.fit(smallBinaryDataset)
binaryModel.setThreshold(1.0)
val binaryZeroPredictions =
binaryModel.transform(smallBinaryDataset).select("prediction").collect()
assert(binaryZeroPredictions.forall(_.getDouble(0) === 0.0))
binaryModel.setThreshold(0.0)
val binaryOnePredictions =
binaryModel.transform(smallBinaryDataset).select("prediction").collect()
assert(binaryOnePredictions.forall(_.getDouble(0) === 1.0))
val mlr = new LogisticRegression().setFamily("multinomial")
val model = mlr.fit(smallMultinomialDataset)
val basePredictions = model.transform(smallMultinomialDataset).select("prediction").collect()
// should predict all zeros
model.setThresholds(Array(1, 1000, 1000))
val zeroPredictions = model.transform(smallMultinomialDataset).select("prediction").collect()
assert(zeroPredictions.forall(_.getDouble(0) === 0.0))
// should predict all ones
model.setThresholds(Array(1000, 1, 1000))
val onePredictions = model.transform(smallMultinomialDataset).select("prediction").collect()
assert(onePredictions.forall(_.getDouble(0) === 1.0))
// should predict all twos
model.setThresholds(Array(1000, 1000, 1))
val twoPredictions = model.transform(smallMultinomialDataset).select("prediction").collect()
assert(twoPredictions.forall(_.getDouble(0) === 2.0))
// constant threshold scaling is the same as no thresholds
model.setThresholds(Array(1000, 1000, 1000))
val scaledPredictions = model.transform(smallMultinomialDataset).select("prediction").collect()
assert(scaledPredictions.zip(basePredictions).forall { case (scaled, base) =>
scaled.getDouble(0) === base.getDouble(0)
})
// force it to use the predict method
model.setRawPredictionCol("").setProbabilityCol("").setThresholds(Array(0, 1, 1))
val predictionsWithPredict =
model.transform(smallMultinomialDataset).select("prediction").collect()
assert(predictionsWithPredict.forall(_.getDouble(0) === 0.0))
}
test("logistic regression doesn't fit intercept when fitIntercept is off") {
val lr = new LogisticRegression().setFamily("binomial")
lr.setFitIntercept(false)
val model = lr.fit(smallBinaryDataset)
assert(model.intercept === 0.0)
val mlr = new LogisticRegression().setFamily("multinomial")
mlr.setFitIntercept(false)
val mlrModel = mlr.fit(smallMultinomialDataset)
assert(mlrModel.interceptVector === Vectors.sparse(3, Seq()))
}
test("logistic regression with setters") {
// Set params, train, and check as many params as we can.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
.setProbabilityCol("myProbability")
val model = lr.fit(smallBinaryDataset)
val parent = model.parent.asInstanceOf[LogisticRegression]
assert(parent.getMaxIter === 10)
assert(parent.getRegParam === 1.0)
assert(parent.getThreshold === 0.6)
assert(model.getThreshold === 0.6)
// Modify model params, and check that the params worked.
model.setThreshold(1.0)
val predAllZero = model.transform(smallBinaryDataset)
.select("prediction", "myProbability")
.collect()
.map { case Row(pred: Double, prob: Vector) => pred }
assert(predAllZero.forall(_ === 0),
s"With threshold=1.0, expected predictions to be all 0, but only" +
s" ${predAllZero.count(_ === 0)} of ${smallBinaryDataset.count()} were 0.")
// Call transform with params, and check that the params worked.
val predNotAllZero =
model.transform(smallBinaryDataset, model.threshold -> 0.0,
model.probabilityCol -> "myProb")
.select("prediction", "myProb")
.collect()
.map { case Row(pred: Double, prob: Vector) => pred }
assert(predNotAllZero.exists(_ !== 0.0))
// Call fit() with new params, and check as many params as we can.
lr.setThresholds(Array(0.6, 0.4))
val model2 = lr.fit(smallBinaryDataset, lr.maxIter -> 5, lr.regParam -> 0.1,
lr.probabilityCol -> "theProb")
val parent2 = model2.parent.asInstanceOf[LogisticRegression]
assert(parent2.getMaxIter === 5)
assert(parent2.getRegParam === 0.1)
assert(parent2.getThreshold === 0.4)
assert(model2.getThreshold === 0.4)
assert(model2.getProbabilityCol === "theProb")
}
test("multinomial logistic regression: Predictor, Classifier methods") {
val sqlContext = smallMultinomialDataset.sqlContext
import sqlContext.implicits._
val mlr = new LogisticRegression().setFamily("multinomial")
val model = mlr.fit(smallMultinomialDataset)
assert(model.numClasses === 3)
val numFeatures = smallMultinomialDataset.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
val results = model.transform(smallMultinomialDataset)
// check that raw prediction is coefficients dot features + intercept
results.select("rawPrediction", "features").collect().foreach {
case Row(raw: Vector, features: Vector) =>
assert(raw.size === 3)
val margins = Array.tabulate(3) { k =>
var margin = 0.0
features.foreachActive { (index, value) =>
margin += value * model.coefficientMatrix(k, index)
}
margin += model.interceptVector(k)
margin
}
assert(raw ~== Vectors.dense(margins) relTol eps)
}
// Compare rawPrediction with probability
results.select("rawPrediction", "probability").collect().foreach {
case Row(raw: Vector, prob: Vector) =>
assert(raw.size === 3)
assert(prob.size === 3)
val max = raw.toArray.max
val subtract = if (max > 0) max else 0.0
val sum = raw.toArray.map(x => math.exp(x - subtract)).sum
val probFromRaw0 = math.exp(raw(0) - subtract) / sum
val probFromRaw1 = math.exp(raw(1) - subtract) / sum
assert(prob(0) ~== probFromRaw0 relTol eps)
assert(prob(1) ~== probFromRaw1 relTol eps)
assert(prob(2) ~== 1.0 - probFromRaw1 - probFromRaw0 relTol eps)
}
// Compare prediction with probability
results.select("prediction", "probability").collect().foreach {
case Row(pred: Double, prob: Vector) =>
val predFromProb = prob.toArray.zipWithIndex.maxBy(_._1)._2
assert(pred == predFromProb)
}
// force it to use raw2prediction
model.setRawPredictionCol("rawPrediction").setProbabilityCol("")
val resultsUsingRaw2Predict =
model.transform(smallMultinomialDataset).select("prediction").as[Double].collect()
resultsUsingRaw2Predict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
// force it to use probability2prediction
model.setRawPredictionCol("").setProbabilityCol("probability")
val resultsUsingProb2Predict =
model.transform(smallMultinomialDataset).select("prediction").as[Double].collect()
resultsUsingProb2Predict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
// force it to use predict
model.setRawPredictionCol("").setProbabilityCol("")
val resultsUsingPredict =
model.transform(smallMultinomialDataset).select("prediction").as[Double].collect()
resultsUsingPredict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
}
test("binary logistic regression: Predictor, Classifier methods") {
val sqlContext = smallBinaryDataset.sqlContext
import sqlContext.implicits._
val lr = new LogisticRegression().setFamily("binomial")
val model = lr.fit(smallBinaryDataset)
assert(model.numClasses === 2)
val numFeatures = smallBinaryDataset.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
val results = model.transform(smallBinaryDataset)
// Compare rawPrediction with probability
results.select("rawPrediction", "probability").collect().foreach {
case Row(raw: Vector, prob: Vector) =>
assert(raw.size === 2)
assert(prob.size === 2)
val probFromRaw1 = 1.0 / (1.0 + math.exp(-raw(1)))
assert(prob(1) ~== probFromRaw1 relTol eps)
assert(prob(0) ~== 1.0 - probFromRaw1 relTol eps)
}
// Compare prediction with probability
results.select("prediction", "probability").collect().foreach {
case Row(pred: Double, prob: Vector) =>
val predFromProb = prob.toArray.zipWithIndex.maxBy(_._1)._2
assert(pred == predFromProb)
}
// force it to use raw2prediction
model.setRawPredictionCol("rawPrediction").setProbabilityCol("")
val resultsUsingRaw2Predict =
model.transform(smallBinaryDataset).select("prediction").as[Double].collect()
resultsUsingRaw2Predict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
// force it to use probability2prediction
model.setRawPredictionCol("").setProbabilityCol("probability")
val resultsUsingProb2Predict =
model.transform(smallBinaryDataset).select("prediction").as[Double].collect()
resultsUsingProb2Predict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
// force it to use predict
model.setRawPredictionCol("").setProbabilityCol("")
val resultsUsingPredict =
model.transform(smallBinaryDataset).select("prediction").as[Double].collect()
resultsUsingPredict.zip(results.select("prediction").as[Double].collect()).foreach {
case (pred1, pred2) => assert(pred1 === pred2)
}
}
test("coefficients and intercept methods") {
val mlr = new LogisticRegression().setMaxIter(1).setFamily("multinomial")
val mlrModel = mlr.fit(smallMultinomialDataset)
val thrownCoef = intercept[SparkException] {
mlrModel.coefficients
}
val thrownIntercept = intercept[SparkException] {
mlrModel.intercept
}
assert(thrownCoef.getMessage().contains("use coefficientMatrix instead"))
assert(thrownIntercept.getMessage().contains("use interceptVector instead"))
val blr = new LogisticRegression().setMaxIter(1).setFamily("binomial")
val blrModel = blr.fit(smallBinaryDataset)
assert(blrModel.coefficients.size === 1)
assert(blrModel.intercept !== 0.0)
}
test("sparse coefficients in LogisticAggregator") {
val bcCoefficientsBinary = spark.sparkContext.broadcast(Vectors.sparse(2, Array(0), Array(1.0)))
val bcFeaturesStd = spark.sparkContext.broadcast(Array(1.0))
val binaryAgg = new LogisticAggregator(bcCoefficientsBinary, bcFeaturesStd, 2,
fitIntercept = true, multinomial = false)
val thrownBinary = withClue("binary logistic aggregator cannot handle sparse coefficients") {
intercept[IllegalArgumentException] {
binaryAgg.add(Instance(1.0, 1.0, Vectors.dense(1.0)))
}
}
assert(thrownBinary.getMessage.contains("coefficients only supports dense"))
val bcCoefficientsMulti = spark.sparkContext.broadcast(Vectors.sparse(6, Array(0), Array(1.0)))
val multinomialAgg = new LogisticAggregator(bcCoefficientsMulti, bcFeaturesStd, 3,
fitIntercept = true, multinomial = true)
val thrown = withClue("multinomial logistic aggregator cannot handle sparse coefficients") {
intercept[IllegalArgumentException] {
multinomialAgg.add(Instance(1.0, 1.0, Vectors.dense(1.0)))
}
}
assert(thrown.getMessage.contains("coefficients only supports dense"))
bcCoefficientsBinary.destroy(blocking = false)
bcFeaturesStd.destroy(blocking = false)
bcCoefficientsMulti.destroy(blocking = false)
}
test("overflow prediction for multiclass") {
val model = new LogisticRegressionModel("mLogReg",
Matrices.dense(3, 2, Array(0.0, 0.0, 0.0, 1.0, 2.0, 3.0)),
Vectors.dense(0.0, 0.0, 0.0), 3, true)
val overFlowData = Seq(
LabeledPoint(1.0, Vectors.dense(0.0, 1000.0)),
LabeledPoint(1.0, Vectors.dense(0.0, -1.0))
).toDF()
val results = model.transform(overFlowData).select("rawPrediction", "probability").collect()
// probabilities are correct when margins have to be adjusted
val raw1 = results(0).getAs[Vector](0)
val prob1 = results(0).getAs[Vector](1)
assert(raw1 === Vectors.dense(1000.0, 2000.0, 3000.0))
assert(prob1 ~== Vectors.dense(0.0, 0.0, 1.0) absTol eps)
// probabilities are correct when margins don't have to be adjusted
val raw2 = results(1).getAs[Vector](0)
val prob2 = results(1).getAs[Vector](1)
assert(raw2 === Vectors.dense(-1.0, -2.0, -3.0))
assert(prob2 ~== Vectors.dense(0.66524096, 0.24472847, 0.09003057) relTol eps)
}
test("MultiClassSummarizer") {
val summarizer1 = (new MultiClassSummarizer)
.add(0.0).add(3.0).add(4.0).add(3.0).add(6.0)
assert(summarizer1.histogram === Array[Double](1, 0, 0, 2, 1, 0, 1))
assert(summarizer1.countInvalid === 0)
assert(summarizer1.numClasses === 7)
val summarizer2 = (new MultiClassSummarizer)
.add(1.0).add(5.0).add(3.0).add(0.0).add(4.0).add(1.0)
assert(summarizer2.histogram === Array[Double](1, 2, 0, 1, 1, 1))
assert(summarizer2.countInvalid === 0)
assert(summarizer2.numClasses === 6)
val summarizer3 = (new MultiClassSummarizer)
.add(0.0).add(1.3).add(5.2).add(2.5).add(2.0).add(4.0).add(4.0).add(4.0).add(1.0)
assert(summarizer3.histogram === Array[Double](1, 1, 1, 0, 3))
assert(summarizer3.countInvalid === 3)
assert(summarizer3.numClasses === 5)
val summarizer4 = (new MultiClassSummarizer)
.add(3.1).add(4.3).add(2.0).add(1.0).add(3.0)
assert(summarizer4.histogram === Array[Double](0, 1, 1, 1))
assert(summarizer4.countInvalid === 2)
assert(summarizer4.numClasses === 4)
val summarizer5 = new MultiClassSummarizer
assert(summarizer5.histogram.isEmpty)
assert(summarizer5.numClasses === 0)
// small map merges large one
val summarizerA = summarizer1.merge(summarizer2)
assert(summarizerA.hashCode() === summarizer2.hashCode())
assert(summarizerA.histogram === Array[Double](2, 2, 0, 3, 2, 1, 1))
assert(summarizerA.countInvalid === 0)
assert(summarizerA.numClasses === 7)
// large map merges small one
val summarizerB = summarizer3.merge(summarizer4)
assert(summarizerB.hashCode() === summarizer3.hashCode())
assert(summarizerB.histogram === Array[Double](1, 2, 2, 1, 3))
assert(summarizerB.countInvalid === 5)
assert(summarizerB.numClasses === 5)
}
test("MultiClassSummarizer with weighted samples") {
val summarizer1 = (new MultiClassSummarizer)
.add(label = 0.0, weight = 0.2).add(3.0, 0.8).add(4.0, 3.2).add(3.0, 1.3).add(6.0, 3.1)
assert(Vectors.dense(summarizer1.histogram) ~==
Vectors.dense(Array(0.2, 0, 0, 2.1, 3.2, 0, 3.1)) absTol 1E-10)
assert(summarizer1.countInvalid === 0)
assert(summarizer1.numClasses === 7)
val summarizer2 = (new MultiClassSummarizer)
.add(1.0, 1.1).add(5.0, 2.3).add(3.0).add(0.0).add(4.0).add(1.0).add(2, 0.0)
assert(Vectors.dense(summarizer2.histogram) ~==
Vectors.dense(Array[Double](1.0, 2.1, 0.0, 1, 1, 2.3)) absTol 1E-10)
assert(summarizer2.countInvalid === 0)
assert(summarizer2.numClasses === 6)
val summarizer = summarizer1.merge(summarizer2)
assert(Vectors.dense(summarizer.histogram) ~==
Vectors.dense(Array(1.2, 2.1, 0.0, 3.1, 4.2, 2.3, 3.1)) absTol 1E-10)
assert(summarizer.countInvalid === 0)
assert(summarizer.numClasses === 7)
}
test("binary logistic regression with intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setStandardization(true)
.setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true).setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 0))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 2.7355261
data.V3 -0.5734389
data.V4 0.8911736
data.V5 -0.3878645
data.V6 -0.8060570
*/
val coefficientsR = Vectors.dense(-0.5734389, 0.8911736, -0.3878645, -0.8060570)
val interceptR = 2.7355261
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
}
test("binary logistic regression with intercept without regularization with bound") {
// Bound constrained optimization with bound on one side.
val upperBoundsOnCoefficients = Matrices.dense(1, 4, Array(1.0, 0.0, 1.0, 0.0))
val upperBoundsOnIntercepts = Vectors.dense(1.0)
val trainer1 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected1 = Vectors.dense(0.06079437, 0.0, -0.26351059, -0.59102199)
val interceptExpected1 = 1.0
assert(model1.intercept ~== interceptExpected1 relTol 1E-3)
assert(model1.coefficients ~= coefficientsExpected1 relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.intercept ~== interceptExpected1 relTol 1E-3)
assert(model2.coefficients ~= coefficientsExpected1 relTol 1E-3)
// Bound constrained optimization with bound on both side.
val lowerBoundsOnCoefficients = Matrices.dense(1, 4, Array(0.0, -1.0, 0.0, -1.0))
val lowerBoundsOnIntercepts = Vectors.dense(0.0)
val trainer3 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer4 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model3 = trainer3.fit(binaryDataset)
val model4 = trainer4.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected3 = Vectors.dense(0.0, 0.0, 0.0, -0.71708632)
val interceptExpected3 = 0.58776113
assert(model3.intercept ~== interceptExpected3 relTol 1E-3)
assert(model3.coefficients ~= coefficientsExpected3 relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model4.intercept ~== interceptExpected3 relTol 1E-3)
assert(model4.coefficients ~= coefficientsExpected3 relTol 1E-3)
// Bound constrained optimization with infinite bound on both side.
val trainer5 = new LogisticRegression()
.setUpperBoundsOnCoefficients(Matrices.dense(1, 4, Array.fill(4)(Double.PositiveInfinity)))
.setUpperBoundsOnIntercepts(Vectors.dense(Double.PositiveInfinity))
.setLowerBoundsOnCoefficients(Matrices.dense(1, 4, Array.fill(4)(Double.NegativeInfinity)))
.setLowerBoundsOnIntercepts(Vectors.dense(Double.NegativeInfinity))
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer6 = new LogisticRegression()
.setUpperBoundsOnCoefficients(Matrices.dense(1, 4, Array.fill(4)(Double.PositiveInfinity)))
.setUpperBoundsOnIntercepts(Vectors.dense(Double.PositiveInfinity))
.setLowerBoundsOnCoefficients(Matrices.dense(1, 4, Array.fill(4)(Double.NegativeInfinity)))
.setLowerBoundsOnIntercepts(Vectors.dense(Double.NegativeInfinity))
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model5 = trainer5.fit(binaryDataset)
val model6 = trainer6.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
// It should be same as unbound constrained optimization with LBFGS.
val coefficientsExpected5 = Vectors.dense(-0.5734389, 0.8911736, -0.3878645, -0.8060570)
val interceptExpected5 = 2.7355261
assert(model5.intercept ~== interceptExpected5 relTol 1E-3)
assert(model5.coefficients ~= coefficientsExpected5 relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model6.intercept ~== interceptExpected5 relTol 1E-3)
assert(model6.coefficients ~= coefficientsExpected5 relTol 1E-3)
}
test("binary logistic regression without intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false).setStandardization(true)
.setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false).setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 0, intercept=FALSE))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 -0.3448461
data.V4 1.2776453
data.V5 -0.3539178
data.V6 -0.7469384
*/
val coefficientsR = Vectors.dense(-0.3448461, 1.2776453, -0.3539178, -0.7469384)
assert(model1.intercept ~== 0.0 relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-2)
// Without regularization, with or without standardization should converge to the same solution.
assert(model2.intercept ~== 0.0 relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
}
test("binary logistic regression without intercept without regularization with bound") {
val upperBoundsOnCoefficients = Matrices.dense(1, 4, Array(1.0, 0.0, 1.0, 0.0)).toSparse
val trainer1 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setFitIntercept(false)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setFitIntercept(false)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected = Vectors.dense(0.20847553, 0.0, -0.24240289, -0.55568071)
assert(model1.intercept ~== 0.0 relTol 1E-3)
assert(model1.coefficients ~= coefficientsExpected relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.intercept ~== 0.0 relTol 1E-3)
assert(model2.coefficients ~= coefficientsExpected relTol 1E-3)
}
test("binary logistic regression with intercept with L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 1,
lambda = 0.12, standardize=T))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) -0.06775980
data.V3 .
data.V4 .
data.V5 -0.03933146
data.V6 -0.03047580
*/
val coefficientsRStd = Vectors.dense(0.0, 0.0, -0.03933146, -0.03047580)
val interceptRStd = -0.06775980
assert(model1.intercept ~== interceptRStd relTol 1E-2)
assert(model1.coefficients ~= coefficientsRStd absTol 2E-2)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 1,
lambda = 0.12, standardize=F))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.3544768
data.V3 .
data.V4 .
data.V5 -0.1626191
data.V6 .
*/
val coefficientsR = Vectors.dense(0.0, 0.0, -0.1626191, 0.0)
val interceptR = 0.3544768
assert(model2.intercept ~== interceptR relTol 1E-2)
assert(model2.coefficients ~== coefficientsR absTol 1E-3)
}
test("binary logistic regression without intercept with L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="binomial", alpha = 1,
lambda = 0.12, intercept=F, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 1,
lambda = 0.12, intercept=F, standardize=F))
coefficientsStd
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 .
data.V4 .
data.V5 -0.04967635
data.V6 -0.04757757
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 .
data.V4 .
data.V5 -0.08433195
data.V6 .
*/
val coefficientsRStd = Vectors.dense(0.0, 0.0, -0.04967635, -0.04757757)
val coefficientsR = Vectors.dense(0.0, 0.0, -0.08433195, 0.0)
assert(model1.intercept ~== 0.0 absTol 1E-3)
assert(model1.coefficients ~= coefficientsRStd absTol 1E-3)
assert(model2.intercept ~== 0.0 absTol 1E-3)
assert(model2.coefficients ~= coefficientsR absTol 1E-3)
}
test("binary logistic regression with intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 1.37, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 1.37, standardize=F))
coefficientsStd
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.12707703
data.V3 -0.06980967
data.V4 0.10803933
data.V5 -0.04800404
data.V6 -0.10165096
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.46613016
data.V3 -0.04944529
data.V4 0.02326772
data.V5 -0.11362772
data.V6 -0.06312848
*/
val coefficientsRStd = Vectors.dense(-0.06980967, 0.10803933, -0.04800404, -0.10165096)
val interceptRStd = 0.12707703
val coefficientsR = Vectors.dense(-0.04944529, 0.02326772, -0.11362772, -0.06312848)
val interceptR = 0.46613016
assert(model1.intercept ~== interceptRStd relTol 1E-3)
assert(model1.coefficients ~= coefficientsRStd relTol 1E-3)
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
}
test("binary logistic regression with intercept with L2 regularization with bound") {
val upperBoundsOnCoefficients = Matrices.dense(1, 4, Array(1.0, 0.0, 1.0, 0.0))
val upperBoundsOnIntercepts = Vectors.dense(1.0)
val trainer1 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setRegParam(1.37)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setRegParam(1.37)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpectedWithStd = Vectors.dense(-0.06985003, 0.0, -0.04794278, -0.10168595)
val interceptExpectedWithStd = 0.45750141
val coefficientsExpected = Vectors.dense(-0.0494524, 0.0, -0.11360797, -0.06313577)
val interceptExpected = 0.53722967
assert(model1.intercept ~== interceptExpectedWithStd relTol 1E-3)
assert(model1.coefficients ~= coefficientsExpectedWithStd relTol 1E-3)
assert(model2.intercept ~== interceptExpected relTol 1E-3)
assert(model2.coefficients ~= coefficientsExpected relTol 1E-3)
}
test("binary logistic regression without intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 1.37, intercept=F, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0,
lambda = 1.37, intercept=F, standardize=F))
coefficientsStd
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 -0.06000152
data.V4 0.12598737
data.V5 -0.04669009
data.V6 -0.09941025
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 -0.005482255
data.V4 0.048106338
data.V5 -0.093411640
data.V6 -0.054149798
*/
val coefficientsRStd = Vectors.dense(-0.06000152, 0.12598737, -0.04669009, -0.09941025)
val coefficientsR = Vectors.dense(-0.005482255, 0.048106338, -0.093411640, -0.054149798)
assert(model1.intercept ~== 0.0 absTol 1E-3)
assert(model1.coefficients ~= coefficientsRStd relTol 1E-2)
assert(model2.intercept ~== 0.0 absTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
}
test("binary logistic regression without intercept with L2 regularization with bound") {
val upperBoundsOnCoefficients = Matrices.dense(1, 4, Array(1.0, 0.0, 1.0, 0.0))
val trainer1 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setRegParam(1.37)
.setFitIntercept(false)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setRegParam(1.37)
.setFitIntercept(false)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpectedWithStd = Vectors.dense(-0.00796538, 0.0, -0.0394228, -0.0873314)
val coefficientsExpected = Vectors.dense(0.01105972, 0.0, -0.08574949, -0.05079558)
assert(model1.intercept ~== 0.0 relTol 1E-3)
assert(model1.coefficients ~= coefficientsExpectedWithStd relTol 1E-3)
assert(model2.intercept ~== 0.0 relTol 1E-3)
assert(model2.coefficients ~= coefficientsExpected relTol 1E-3)
}
test("binary logistic regression with intercept with ElasticNet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setMaxIter(200)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0.38,
lambda = 0.21, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0.38,
lambda = 0.21, standardize=F))
coefficientsStd
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.49991996
data.V3 -0.04131110
data.V4 .
data.V5 -0.08585233
data.V6 -0.15875400
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.5024256
data.V3 .
data.V4 .
data.V5 -0.1846038
data.V6 -0.0559614
*/
val coefficientsRStd = Vectors.dense(-0.04131110, 0.0, -0.08585233, -0.15875400)
val interceptRStd = 0.49991996
val coefficientsR = Vectors.dense(0.0, 0.0, -0.1846038, -0.0559614)
val interceptR = 0.5024256
assert(model1.intercept ~== interceptRStd relTol 6E-3)
assert(model1.coefficients ~== coefficientsRStd absTol 5E-3)
assert(model2.intercept ~== interceptR relTol 6E-3)
assert(model2.coefficients ~= coefficientsR absTol 1E-3)
}
test("binary logistic regression without intercept with ElasticNet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0.38,
lambda = 0.21, intercept=FALSE, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 0.38,
lambda = 0.21, intercept=FALSE, standardize=F))
coefficientsStd
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 .
data.V4 0.06859390
data.V5 -0.07900058
data.V6 -0.14684320
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V3 .
data.V4 0.03060637
data.V5 -0.11126742
data.V6 .
*/
val coefficientsRStd = Vectors.dense(0.0, 0.06859390, -0.07900058, -0.14684320)
val coefficientsR = Vectors.dense(0.0, 0.03060637, -0.11126742, 0.0)
assert(model1.intercept ~== 0.0 relTol 1E-3)
assert(model1.coefficients ~= coefficientsRStd absTol 1E-2)
assert(model2.intercept ~== 0.0 absTol 1E-3)
assert(model2.coefficients ~= coefficientsR absTol 1E-3)
}
test("binary logistic regression with intercept with strong L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
val histogram = binaryDataset.as[Instance].rdd.map { i => (i.label, i.weight)}
.treeAggregate(new MultiClassSummarizer)(
seqOp = (c, v) => (c, v) match {
case (classSummarizer: MultiClassSummarizer, (label: Double, weight: Double)) =>
classSummarizer.add(label, weight)
},
combOp = (c1, c2) => (c1, c2) match {
case (classSummarizer1: MultiClassSummarizer, classSummarizer2: MultiClassSummarizer) =>
classSummarizer1.merge(classSummarizer2)
}).histogram
/*
For binary logistic regression with strong L1 regularization, all the coefficients
will be zeros. As a result,
{{{
P(0) = 1 / (1 + \\exp(b)), and
P(1) = \\exp(b) / (1 + \\exp(b))
}}}, hence
{{{
b = \\log{P(1) / P(0)} = \\log{count_1 / count_0}
}}}
*/
val interceptTheory = math.log(histogram(1) / histogram(0))
val coefficientsTheory = Vectors.dense(0.0, 0.0, 0.0, 0.0)
assert(model1.intercept ~== interceptTheory relTol 1E-5)
assert(model1.coefficients ~= coefficientsTheory absTol 1E-6)
assert(model2.intercept ~== interceptTheory relTol 1E-5)
assert(model2.coefficients ~= coefficientsTheory absTol 1E-6)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="binomial", alpha = 1.0,
lambda = 6.0))
coefficients
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) -0.2516986
data.V3 0.0000000
data.V4 .
data.V5 .
data.V6 .
*/
val interceptR = -0.2516986
val coefficientsR = Vectors.dense(0.0, 0.0, 0.0, 0.0)
assert(model1.intercept ~== interceptR relTol 1E-5)
assert(model1.coefficients ~== coefficientsR absTol 1E-6)
}
test("multinomial logistic regression with intercept with strong L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(false)
val sqlContext = multinomialDataset.sqlContext
import sqlContext.implicits._
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
val histogram = multinomialDataset.as[Instance].rdd.map(i => (i.label, i.weight))
.treeAggregate(new MultiClassSummarizer)(
seqOp = (c, v) => (c, v) match {
case (classSummarizer: MultiClassSummarizer, (label: Double, weight: Double)) =>
classSummarizer.add(label, weight)
},
combOp = (c1, c2) => (c1, c2) match {
case (classSummarizer1: MultiClassSummarizer, classSummarizer2: MultiClassSummarizer) =>
classSummarizer1.merge(classSummarizer2)
}).histogram
val numFeatures = multinomialDataset.as[Instance].first().features.size
val numClasses = histogram.length
/*
For multinomial logistic regression with strong L1 regularization, all the coefficients
will be zeros. As a result, the intercepts will be proportional to the log counts in the
histogram.
{{{
\\exp(b_k) = count_k * \\exp(\\lambda)
b_k = \\log(count_k) * \\lambda
}}}
\\lambda is a free parameter, so choose the phase \\lambda such that the
mean is centered. This yields
{{{
b_k = \\log(count_k)
b_k' = b_k - \\mean(b_k)
}}}
*/
val rawInterceptsTheory = histogram.map(c => math.log(c + 1)) // add 1 for smoothing
val rawMean = rawInterceptsTheory.sum / rawInterceptsTheory.length
val interceptsTheory = Vectors.dense(rawInterceptsTheory.map(_ - rawMean))
val coefficientsTheory = new DenseMatrix(numClasses, numFeatures,
Array.fill[Double](numClasses * numFeatures)(0.0), isTransposed = true)
assert(model1.interceptVector ~== interceptsTheory relTol 1E-3)
assert(model1.coefficientMatrix ~= coefficientsTheory absTol 1E-6)
assert(model2.interceptVector ~== interceptsTheory relTol 1E-3)
assert(model2.coefficientMatrix ~= coefficientsTheory absTol 1E-6)
}
test("multinomial logistic regression with intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(0.0).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(0.0).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial",
alpha = 0, lambda = 0))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-2.10320093
data.V3 0.24337896
data.V4 -0.05916156
data.V5 0.14446790
data.V6 0.35976165
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.3394473
data.V3 -0.3443375
data.V4 0.9181331
data.V5 -0.2283959
data.V6 -0.4388066
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
1.76375361
data.V3 0.10095851
data.V4 -0.85897154
data.V5 0.08392798
data.V6 0.07904499
*/
val coefficientsR = new DenseMatrix(3, 4, Array(
0.24337896, -0.05916156, 0.14446790, 0.35976165,
-0.3443375, 0.9181331, -0.2283959, -0.4388066,
0.10095851, -0.85897154, 0.08392798, 0.07904499), isTransposed = true)
val interceptsR = Vectors.dense(-2.10320093, 0.3394473, 1.76375361)
model1.coefficientMatrix.colIter.foreach(v => assert(v.toArray.sum ~== 0.0 absTol eps))
model2.coefficientMatrix.colIter.foreach(v => assert(v.toArray.sum ~== 0.0 absTol eps))
assert(model1.coefficientMatrix ~== coefficientsR relTol 0.05)
assert(model1.coefficientMatrix.toArray.sum ~== 0.0 absTol eps)
assert(model1.interceptVector ~== interceptsR relTol 0.05)
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR relTol 0.05)
assert(model2.coefficientMatrix.toArray.sum ~== 0.0 absTol eps)
assert(model2.interceptVector ~== interceptsR relTol 0.05)
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression with intercept without regularization with bound") {
// Bound constrained optimization with bound on one side.
val lowerBoundsOnCoefficients = Matrices.dense(3, 4, Array.fill(12)(1.0))
val lowerBoundsOnIntercepts = Vectors.dense(Array.fill(3)(1.0))
val trainer1 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected1 = new DenseMatrix(3, 4, Array(
2.52076464, 2.73596057, 1.87984904, 2.73264492,
1.93302281, 3.71363303, 1.50681746, 1.93398782,
2.37839917, 1.93601818, 1.81924758, 2.45191255), isTransposed = true)
val interceptsExpected1 = Vectors.dense(1.00010477, 3.44237083, 4.86740286)
checkCoefficientsEquivalent(model1.coefficientMatrix, coefficientsExpected1)
assert(model1.interceptVector ~== interceptsExpected1 relTol 0.01)
checkCoefficientsEquivalent(model2.coefficientMatrix, coefficientsExpected1)
assert(model2.interceptVector ~== interceptsExpected1 relTol 0.01)
// Bound constrained optimization with bound on both side.
val upperBoundsOnCoefficients = Matrices.dense(3, 4, Array.fill(12)(2.0))
val upperBoundsOnIntercepts = Vectors.dense(Array.fill(3)(2.0))
val trainer3 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer4 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setUpperBoundsOnCoefficients(upperBoundsOnCoefficients)
.setUpperBoundsOnIntercepts(upperBoundsOnIntercepts)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model3 = trainer3.fit(multinomialDataset)
val model4 = trainer4.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected3 = new DenseMatrix(3, 4, Array(
1.61967097, 1.16027835, 1.45131448, 1.97390431,
1.30529317, 2.0, 1.12985473, 1.26652854,
1.61647195, 1.0, 1.40642959, 1.72985589), isTransposed = true)
val interceptsExpected3 = Vectors.dense(1.0, 2.0, 2.0)
checkCoefficientsEquivalent(model3.coefficientMatrix, coefficientsExpected3)
assert(model3.interceptVector ~== interceptsExpected3 relTol 0.01)
checkCoefficientsEquivalent(model4.coefficientMatrix, coefficientsExpected3)
assert(model4.interceptVector ~== interceptsExpected3 relTol 0.01)
// Bound constrained optimization with infinite bound on both side.
val trainer5 = new LogisticRegression()
.setLowerBoundsOnCoefficients(Matrices.dense(3, 4, Array.fill(12)(Double.NegativeInfinity)))
.setLowerBoundsOnIntercepts(Vectors.dense(Array.fill(3)(Double.NegativeInfinity)))
.setUpperBoundsOnCoefficients(Matrices.dense(3, 4, Array.fill(12)(Double.PositiveInfinity)))
.setUpperBoundsOnIntercepts(Vectors.dense(Array.fill(3)(Double.PositiveInfinity)))
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer6 = new LogisticRegression()
.setLowerBoundsOnCoefficients(Matrices.dense(3, 4, Array.fill(12)(Double.NegativeInfinity)))
.setLowerBoundsOnIntercepts(Vectors.dense(Array.fill(3)(Double.NegativeInfinity)))
.setUpperBoundsOnCoefficients(Matrices.dense(3, 4, Array.fill(12)(Double.PositiveInfinity)))
.setUpperBoundsOnIntercepts(Vectors.dense(Array.fill(3)(Double.PositiveInfinity)))
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model5 = trainer5.fit(multinomialDataset)
val model6 = trainer6.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
// It should be same as unbound constrained optimization with LBFGS.
val coefficientsExpected5 = new DenseMatrix(3, 4, Array(
0.24337896, -0.05916156, 0.14446790, 0.35976165,
-0.3443375, 0.9181331, -0.2283959, -0.4388066,
0.10095851, -0.85897154, 0.08392798, 0.07904499), isTransposed = true)
val interceptsExpected5 = Vectors.dense(-2.10320093, 0.3394473, 1.76375361)
checkCoefficientsEquivalent(model5.coefficientMatrix, coefficientsExpected5)
assert(model5.interceptVector ~== interceptsExpected5 relTol 0.01)
checkCoefficientsEquivalent(model6.coefficientMatrix, coefficientsExpected5)
assert(model6.interceptVector ~== interceptsExpected5 relTol 0.01)
}
test("multinomial logistic regression without intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(0.0).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(0.0).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0,
lambda = 0, intercept=F))
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.07276291
data.V4 -0.36325496
data.V5 0.12015088
data.V6 0.31397340
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.3180040
data.V4 0.9679074
data.V5 -0.2252219
data.V6 -0.4319914
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.2452411
data.V4 -0.6046524
data.V5 0.1050710
data.V6 0.1180180
*/
val coefficientsR = new DenseMatrix(3, 4, Array(
0.07276291, -0.36325496, 0.12015088, 0.31397340,
-0.3180040, 0.9679074, -0.2252219, -0.4319914,
0.2452411, -0.6046524, 0.1050710, 0.1180180), isTransposed = true)
model1.coefficientMatrix.colIter.foreach(v => assert(v.toArray.sum ~== 0.0 absTol eps))
model2.coefficientMatrix.colIter.foreach(v => assert(v.toArray.sum ~== 0.0 absTol eps))
assert(model1.coefficientMatrix ~== coefficientsR relTol 0.05)
assert(model1.coefficientMatrix.toArray.sum ~== 0.0 absTol eps)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR relTol 0.05)
assert(model2.coefficientMatrix.toArray.sum ~== 0.0 absTol eps)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression without intercept without regularization with bound") {
val lowerBoundsOnCoefficients = Matrices.dense(3, 4, Array.fill(12)(1.0))
val trainer1 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setFitIntercept(false)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setFitIntercept(false)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpected = new DenseMatrix(3, 4, Array(
1.62410051, 1.38219391, 1.34486618, 1.74641729,
1.23058989, 2.71787825, 1.0, 1.00007073,
1.79478632, 1.14360459, 1.33011603, 1.55093897), isTransposed = true)
checkCoefficientsEquivalent(model1.coefficientMatrix, coefficientsExpected)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
checkCoefficientsEquivalent(model2.coefficientMatrix, coefficientsExpected)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
}
test("multinomial logistic regression with intercept with L1 regularization") {
// use tighter constraints because OWL-QN solver takes longer to converge
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.05).setStandardization(true)
.setMaxIter(300).setTol(1e-10).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.05).setStandardization(false)
.setMaxIter(300).setTol(1e-10).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial",
alpha = 1, lambda = 0.05, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 1,
lambda = 0.05, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.62244703
data.V3 .
data.V4 .
data.V5 .
data.V6 0.08419825
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.2804845
data.V3 -0.1336960
data.V4 0.3717091
data.V5 -0.1530363
data.V6 -0.2035286
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.9029315
data.V3 .
data.V4 -0.4629737
data.V5 .
data.V6 .
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.44215290
data.V3 .
data.V4 .
data.V5 0.01767089
data.V6 0.02542866
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.76308326
data.V3 -0.06818576
data.V4 .
data.V5 -0.20446351
data.V6 -0.13017924
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.3209304
data.V3 .
data.V4 .
data.V5 .
data.V6 .
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.08419825,
-0.1336960, 0.3717091, -0.1530363, -0.2035286,
0.0, -0.4629737, 0.0, 0.0), isTransposed = true)
val interceptsRStd = Vectors.dense(-0.62244703, -0.2804845, 0.9029315)
val coefficientsR = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.01767089, 0.02542866,
-0.06818576, 0.0, -0.20446351, -0.13017924,
0.0, 0.0, 0.0, 0.0), isTransposed = true)
val interceptsR = Vectors.dense(-0.44215290, 0.76308326, -0.3209304)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.02)
assert(model1.interceptVector ~== interceptsRStd relTol 0.1)
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR absTol 0.02)
assert(model2.interceptVector ~== interceptsR relTol 0.1)
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression without intercept with L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.05).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.05).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 1,
lambda = 0.05, intercept=F, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 1,
lambda = 0.05, intercept=F, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 0.01144225
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.1678787
data.V4 0.5385351
data.V5 -0.1573039
data.V6 -0.2471624
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 .
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 .
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 0.1929409
data.V5 -0.1889121
data.V6 -0.1010413
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 .
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.01144225,
-0.1678787, 0.5385351, -0.1573039, -0.2471624,
0.0, 0.0, 0.0, 0.0), isTransposed = true)
val coefficientsR = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.0,
0.0, 0.1929409, -0.1889121, -0.1010413,
0.0, 0.0, 0.0, 0.0), isTransposed = true)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.01)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR absTol 0.01)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression with intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(0.1).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(0.1).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame( data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial",
alpha = 0, lambda = 0.1, intercept=T, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0,
lambda = 0.1, intercept=T, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-1.5898288335
data.V3 0.1691226336
data.V4 0.0002983651
data.V5 0.1001732896
data.V6 0.2554575585
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.2125746
data.V3 -0.2304586
data.V4 0.6153492
data.V5 -0.1537017
data.V6 -0.2975443
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
1.37725427
data.V3 0.06133600
data.V4 -0.61564761
data.V5 0.05352840
data.V6 0.04208671
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-1.5681088
data.V3 0.1508182
data.V4 0.0121955
data.V5 0.1217930
data.V6 0.2162850
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
1.1217130
data.V3 -0.2028984
data.V4 0.2862431
data.V5 -0.1843559
data.V6 -0.2481218
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.44639579
data.V3 0.05208012
data.V4 -0.29843864
data.V5 0.06256289
data.V6 0.03183676
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.1691226336, 0.0002983651, 0.1001732896, 0.2554575585,
-0.2304586, 0.6153492, -0.1537017, -0.2975443,
0.06133600, -0.61564761, 0.05352840, 0.04208671), isTransposed = true)
val interceptsRStd = Vectors.dense(-1.5898288335, 0.2125746, 1.37725427)
val coefficientsR = new DenseMatrix(3, 4, Array(
0.1508182, 0.0121955, 0.1217930, 0.2162850,
-0.2028984, 0.2862431, -0.1843559, -0.2481218,
0.05208012, -0.29843864, 0.06256289, 0.03183676), isTransposed = true)
val interceptsR = Vectors.dense(-1.5681088, 1.1217130, 0.44639579)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.001)
assert(model1.interceptVector ~== interceptsRStd relTol 0.05)
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR relTol 0.05)
assert(model2.interceptVector ~== interceptsR relTol 0.05)
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression with intercept with L2 regularization with bound") {
val lowerBoundsOnCoefficients = Matrices.dense(3, 4, Array.fill(12)(1.0))
val lowerBoundsOnIntercepts = Vectors.dense(Array.fill(3)(1.0))
val trainer1 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setRegParam(0.1)
.setFitIntercept(true)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setLowerBoundsOnIntercepts(lowerBoundsOnIntercepts)
.setRegParam(0.1)
.setFitIntercept(true)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpectedWithStd = new DenseMatrix(3, 4, Array(
1.0, 1.0, 1.0, 1.01647497,
1.0, 1.44105616, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0), isTransposed = true)
val interceptsExpectedWithStd = Vectors.dense(2.52055893, 1.0, 2.560682)
val coefficientsExpected = new DenseMatrix(3, 4, Array(
1.0, 1.0, 1.03189386, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0), isTransposed = true)
val interceptsExpected = Vectors.dense(1.06418835, 1.0, 1.20494701)
assert(model1.coefficientMatrix ~== coefficientsExpectedWithStd relTol 0.01)
assert(model1.interceptVector ~== interceptsExpectedWithStd relTol 0.01)
assert(model2.coefficientMatrix ~== coefficientsExpected relTol 0.01)
assert(model2.interceptVector ~== interceptsExpected relTol 0.01)
}
test("multinomial logistic regression without intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(0.1).setStandardization(true).setWeightCol("weight")
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(0.1).setStandardization(false).setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0,
lambda = 0.1, intercept=F, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0,
lambda = 0.1, intercept=F, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.04048126
data.V4 -0.23075758
data.V5 0.08228864
data.V6 0.22277648
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.2149745
data.V4 0.6478666
data.V5 -0.1515158
data.V6 -0.2930498
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.17449321
data.V4 -0.41710901
data.V5 0.06922716
data.V6 0.07027332
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.003949652
data.V4 -0.142982415
data.V5 0.091439598
data.V6 0.179286241
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.09071124
data.V4 0.39752531
data.V5 -0.16233832
data.V6 -0.22206059
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.09466090
data.V4 -0.25454290
data.V5 0.07089872
data.V6 0.04277435
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.04048126, -0.23075758, 0.08228864, 0.22277648,
-0.2149745, 0.6478666, -0.1515158, -0.2930498,
0.17449321, -0.41710901, 0.06922716, 0.07027332), isTransposed = true)
val coefficientsR = new DenseMatrix(3, 4, Array(
-0.003949652, -0.142982415, 0.091439598, 0.179286241,
-0.09071124, 0.39752531, -0.16233832, -0.22206059,
0.09466090, -0.25454290, 0.07089872, 0.04277435), isTransposed = true)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.01)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR absTol 0.01)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression without intercept with L2 regularization with bound") {
val lowerBoundsOnCoefficients = Matrices.dense(3, 4, Array.fill(12)(1.0))
val trainer1 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setRegParam(0.1)
.setFitIntercept(false)
.setStandardization(true)
.setWeightCol("weight")
val trainer2 = new LogisticRegression()
.setLowerBoundsOnCoefficients(lowerBoundsOnCoefficients)
.setRegParam(0.1)
.setFitIntercept(false)
.setStandardization(false)
.setWeightCol("weight")
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
// The solution is generated by https://github.com/yanboliang/bound-optimization.
val coefficientsExpectedWithStd = new DenseMatrix(3, 4, Array(
1.01324653, 1.0, 1.0, 1.0415767,
1.0, 1.0, 1.0, 1.0,
1.02244888, 1.0, 1.0, 1.0), isTransposed = true)
val coefficientsExpected = new DenseMatrix(3, 4, Array(
1.0, 1.0, 1.03932259, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.03274649, 1.0), isTransposed = true)
assert(model1.coefficientMatrix ~== coefficientsExpectedWithStd absTol 0.01)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
assert(model2.coefficientMatrix ~== coefficientsExpected absTol 0.01)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
}
test("multinomial logistic regression with intercept with elasticnet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(0.5).setRegParam(0.1).setStandardization(true)
.setMaxIter(300).setTol(1e-10)
val trainer2 = (new LogisticRegression).setFitIntercept(true).setWeightCol("weight")
.setElasticNetParam(0.5).setRegParam(0.1).setStandardization(false)
.setMaxIter(300).setTol(1e-10)
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0.5,
lambda = 0.1, intercept=T, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0.5,
lambda = 0.1, intercept=T, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.50133383
data.V3 .
data.V4 .
data.V5 .
data.V6 0.08351653
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.3151913
data.V3 -0.1058702
data.V4 0.3183251
data.V5 -0.1212969
data.V6 -0.1629778
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.8165252
data.V3 .
data.V4 -0.3943069
data.V5 .
data.V6 .
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.38857157
data.V3 .
data.V4 .
data.V5 0.02384198
data.V6 0.03127749
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
0.62492165
data.V3 -0.04949061
data.V4 .
data.V5 -0.18584462
data.V6 -0.08952455
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
-0.2363501
data.V3 .
data.V4 .
data.V5 .
data.V6 .
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.08351653,
-0.1058702, 0.3183251, -0.1212969, -0.1629778,
0.0, -0.3943069, 0.0, 0.0), isTransposed = true)
val interceptsRStd = Vectors.dense(-0.50133383, -0.3151913, 0.8165252)
val coefficientsR = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.02384198, 0.03127749,
-0.04949061, 0.0, -0.18584462, -0.08952455,
0.0, 0.0, 0.0, 0.0), isTransposed = true)
val interceptsR = Vectors.dense(-0.38857157, 0.62492165, -0.2363501)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.01)
assert(model1.interceptVector ~== interceptsRStd absTol 0.01)
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR absTol 0.01)
assert(model2.interceptVector ~== interceptsR absTol 0.01)
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("multinomial logistic regression without intercept with elasticnet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false).setWeightCol("weight")
.setElasticNetParam(0.5).setRegParam(0.1).setStandardization(true)
.setMaxIter(300).setTol(1e-10)
val trainer2 = (new LogisticRegression).setFitIntercept(false).setWeightCol("weight")
.setElasticNetParam(0.5).setRegParam(0.1).setStandardization(false)
.setMaxIter(300).setTol(1e-10)
val model1 = trainer1.fit(multinomialDataset)
val model2 = trainer2.fit(multinomialDataset)
/*
Use the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = as.factor(data$V1)
w = data$V2
features = as.matrix(data.frame(data$V3, data$V4, data$V5, data$V6))
coefficientsStd = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0.5,
lambda = 0.1, intercept=F, standardize=T))
coefficients = coef(glmnet(features, label, weights=w, family="multinomial", alpha = 0.5,
lambda = 0.1, intercept=F, standardize=F))
coefficientsStd
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 0.03238285
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 -0.1328284
data.V4 0.4219321
data.V5 -0.1247544
data.V6 -0.1893318
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 0.004572312
data.V4 .
data.V5 .
data.V6 .
coefficients
$`0`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 .
$`1`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 0.14571623
data.V5 -0.16456351
data.V6 -0.05866264
$`2`
5 x 1 sparse Matrix of class "dgCMatrix"
s0
.
data.V3 .
data.V4 .
data.V5 .
data.V6 .
*/
val coefficientsRStd = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.03238285,
-0.1328284, 0.4219321, -0.1247544, -0.1893318,
0.004572312, 0.0, 0.0, 0.0), isTransposed = true)
val coefficientsR = new DenseMatrix(3, 4, Array(
0.0, 0.0, 0.0, 0.0,
0.0, 0.14571623, -0.16456351, -0.05866264,
0.0, 0.0, 0.0, 0.0), isTransposed = true)
assert(model1.coefficientMatrix ~== coefficientsRStd absTol 0.01)
assert(model1.interceptVector.toArray === Array.fill(3)(0.0))
assert(model1.interceptVector.toArray.sum ~== 0.0 absTol eps)
assert(model2.coefficientMatrix ~== coefficientsR absTol 0.01)
assert(model2.interceptVector.toArray === Array.fill(3)(0.0))
assert(model2.interceptVector.toArray.sum ~== 0.0 absTol eps)
}
test("evaluate on test set") {
// TODO: add for multiclass when model summary becomes available
// Evaluate on test set should be same as that of the transformed training data.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
val model = lr.fit(smallBinaryDataset)
val summary = model.summary.asInstanceOf[BinaryLogisticRegressionSummary]
val sameSummary =
model.evaluate(smallBinaryDataset).asInstanceOf[BinaryLogisticRegressionSummary]
assert(summary.areaUnderROC === sameSummary.areaUnderROC)
assert(summary.roc.collect() === sameSummary.roc.collect())
assert(summary.pr.collect === sameSummary.pr.collect())
assert(
summary.fMeasureByThreshold.collect() === sameSummary.fMeasureByThreshold.collect())
assert(summary.recallByThreshold.collect() === sameSummary.recallByThreshold.collect())
assert(
summary.precisionByThreshold.collect() === sameSummary.precisionByThreshold.collect())
}
test("evaluate with labels that are not doubles") {
// Evaluate a test set with Label that is a numeric type other than Double
val lr = new LogisticRegression()
.setMaxIter(1)
.setRegParam(1.0)
val model = lr.fit(smallBinaryDataset)
val summary = model.evaluate(smallBinaryDataset).asInstanceOf[BinaryLogisticRegressionSummary]
val longLabelData = smallBinaryDataset.select(col(model.getLabelCol).cast(LongType),
col(model.getFeaturesCol))
val longSummary = model.evaluate(longLabelData).asInstanceOf[BinaryLogisticRegressionSummary]
assert(summary.areaUnderROC === longSummary.areaUnderROC)
}
test("statistics on training data") {
// Test that loss is monotonically decreasing.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
val model = lr.fit(smallBinaryDataset)
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("logistic regression with sample weights") {
def modelEquals(m1: LogisticRegressionModel, m2: LogisticRegressionModel): Unit = {
assert(m1.coefficientMatrix ~== m2.coefficientMatrix absTol 0.05)
assert(m1.interceptVector ~== m2.interceptVector absTol 0.05)
}
val testParams = Seq(
("binomial", smallBinaryDataset, 2),
("multinomial", smallMultinomialDataset, 3)
)
testParams.foreach { case (family, dataset, numClasses) =>
val estimator = new LogisticRegression().setFamily(family)
MLTestingUtils.testArbitrarilyScaledWeights[LogisticRegressionModel, LogisticRegression](
dataset.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LogisticRegressionModel, LogisticRegression](
dataset.as[LabeledPoint], estimator, numClasses, modelEquals, outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LogisticRegressionModel, LogisticRegression](
dataset.as[LabeledPoint], estimator, modelEquals, seed)
}
}
test("set family") {
val lr = new LogisticRegression().setMaxIter(1)
// don't set anything for binary classification
val model1 = lr.fit(binaryDataset)
assert(model1.coefficientMatrix.numRows === 1 && model1.coefficientMatrix.numCols === 4)
assert(model1.interceptVector.size === 1)
// set to multinomial for binary classification
val model2 = lr.setFamily("multinomial").fit(binaryDataset)
assert(model2.coefficientMatrix.numRows === 2 && model2.coefficientMatrix.numCols === 4)
assert(model2.interceptVector.size === 2)
// set to binary for binary classification
val model3 = lr.setFamily("binomial").fit(binaryDataset)
assert(model3.coefficientMatrix.numRows === 1 && model3.coefficientMatrix.numCols === 4)
assert(model3.interceptVector.size === 1)
// don't set anything for multiclass classification
val mlr = new LogisticRegression().setMaxIter(1)
val model4 = mlr.fit(multinomialDataset)
assert(model4.coefficientMatrix.numRows === 3 && model4.coefficientMatrix.numCols === 4)
assert(model4.interceptVector.size === 3)
// set to binary for multiclass classification
mlr.setFamily("binomial")
val thrown = intercept[IllegalArgumentException] {
mlr.fit(multinomialDataset)
}
assert(thrown.getMessage.contains("Binomial family only supports 1 or 2 outcome classes"))
// set to multinomial for multiclass
mlr.setFamily("multinomial")
val model5 = mlr.fit(multinomialDataset)
assert(model5.coefficientMatrix.numRows === 3 && model5.coefficientMatrix.numCols === 4)
assert(model5.interceptVector.size === 3)
}
test("set initial model") {
val lr = new LogisticRegression().setFamily("binomial")
val model1 = lr.fit(smallBinaryDataset)
val lr2 = new LogisticRegression().setInitialModel(model1).setMaxIter(5).setFamily("binomial")
val model2 = lr2.fit(smallBinaryDataset)
val predictions1 = model1.transform(smallBinaryDataset).select("prediction").collect()
val predictions2 = model2.transform(smallBinaryDataset).select("prediction").collect()
predictions1.zip(predictions2).foreach { case (Row(p1: Double), Row(p2: Double)) =>
assert(p1 === p2)
}
assert(model2.summary.totalIterations === 1)
val lr3 = new LogisticRegression().setFamily("multinomial")
val model3 = lr3.fit(smallMultinomialDataset)
val lr4 = new LogisticRegression()
.setInitialModel(model3).setMaxIter(5).setFamily("multinomial")
val model4 = lr4.fit(smallMultinomialDataset)
val predictions3 = model3.transform(smallMultinomialDataset).select("prediction").collect()
val predictions4 = model4.transform(smallMultinomialDataset).select("prediction").collect()
predictions3.zip(predictions4).foreach { case (Row(p1: Double), Row(p2: Double)) =>
assert(p1 === p2)
}
// TODO: check that it converges in a single iteration when model summary is available
}
test("binary logistic regression with all labels the same") {
val sameLabels = smallBinaryDataset
.withColumn("zeroLabel", lit(0.0))
.withColumn("oneLabel", lit(1.0))
// fitIntercept=true
val lrIntercept = new LogisticRegression()
.setFitIntercept(true)
.setMaxIter(3)
.setFamily("binomial")
val allZeroInterceptModel = lrIntercept
.setLabelCol("zeroLabel")
.fit(sameLabels)
assert(allZeroInterceptModel.coefficients ~== Vectors.dense(0.0) absTol 1E-3)
assert(allZeroInterceptModel.intercept === Double.NegativeInfinity)
assert(allZeroInterceptModel.summary.totalIterations === 0)
val allOneInterceptModel = lrIntercept
.setLabelCol("oneLabel")
.fit(sameLabels)
assert(allOneInterceptModel.coefficients ~== Vectors.dense(0.0) absTol 1E-3)
assert(allOneInterceptModel.intercept === Double.PositiveInfinity)
assert(allOneInterceptModel.summary.totalIterations === 0)
// fitIntercept=false
val lrNoIntercept = new LogisticRegression()
.setFitIntercept(false)
.setMaxIter(3)
.setFamily("binomial")
val allZeroNoInterceptModel = lrNoIntercept
.setLabelCol("zeroLabel")
.fit(sameLabels)
assert(allZeroNoInterceptModel.intercept === 0.0)
assert(allZeroNoInterceptModel.summary.totalIterations > 0)
val allOneNoInterceptModel = lrNoIntercept
.setLabelCol("oneLabel")
.fit(sameLabels)
assert(allOneNoInterceptModel.intercept === 0.0)
assert(allOneNoInterceptModel.summary.totalIterations > 0)
}
test("multiclass logistic regression with all labels the same") {
val constantData = Seq(
LabeledPoint(4.0, Vectors.dense(0.0)),
LabeledPoint(4.0, Vectors.dense(1.0)),
LabeledPoint(4.0, Vectors.dense(2.0))).toDF()
val mlr = new LogisticRegression().setFamily("multinomial")
val model = mlr.fit(constantData)
val results = model.transform(constantData)
results.select("rawPrediction", "probability", "prediction").collect().foreach {
case Row(raw: Vector, prob: Vector, pred: Double) =>
assert(raw === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, Double.PositiveInfinity)))
assert(prob === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, 1.0)))
assert(pred === 4.0)
}
// force the model to be trained with only one class
val constantZeroData = Seq(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0))).toDF()
val modelZeroLabel = mlr.setFitIntercept(false).fit(constantZeroData)
val resultsZero = modelZeroLabel.transform(constantZeroData)
resultsZero.select("rawPrediction", "probability", "prediction").collect().foreach {
case Row(raw: Vector, prob: Vector, pred: Double) =>
assert(prob === Vectors.dense(Array(1.0)))
assert(pred === 0.0)
}
// ensure that the correct value is predicted when numClasses passed through metadata
val labelMeta = NominalAttribute.defaultAttr.withName("label").withNumValues(6).toMetadata()
val constantDataWithMetadata = constantData
.select(constantData("label").as("label", labelMeta), constantData("features"))
val modelWithMetadata = mlr.setFitIntercept(true).fit(constantDataWithMetadata)
val resultsWithMetadata = modelWithMetadata.transform(constantDataWithMetadata)
resultsWithMetadata.select("rawPrediction", "probability", "prediction").collect().foreach {
case Row(raw: Vector, prob: Vector, pred: Double) =>
assert(raw === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, Double.PositiveInfinity, 0.0)))
assert(prob === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, 1.0, 0.0)))
assert(pred === 4.0)
}
// TODO: check num iters is zero when it become available in the model
}
test("compressed storage for constant label") {
/*
When the label is constant and fit intercept is true, all the coefficients will be
zeros, and so the model coefficients should be stored as sparse data structures, except
when the matrix dimensions are very small.
*/
val moreClassesThanFeatures = Seq(
LabeledPoint(4.0, Vectors.dense(Array.fill(5)(0.0))),
LabeledPoint(4.0, Vectors.dense(Array.fill(5)(1.0))),
LabeledPoint(4.0, Vectors.dense(Array.fill(5)(2.0)))).toDF()
val mlr = new LogisticRegression().setFamily("multinomial").setFitIntercept(true)
val model = mlr.fit(moreClassesThanFeatures)
assert(model.coefficientMatrix.isInstanceOf[SparseMatrix])
assert(model.coefficientMatrix.isColMajor)
// in this case, it should be stored as row major
val moreFeaturesThanClasses = Seq(
LabeledPoint(1.0, Vectors.dense(Array.fill(5)(0.0))),
LabeledPoint(1.0, Vectors.dense(Array.fill(5)(1.0))),
LabeledPoint(1.0, Vectors.dense(Array.fill(5)(2.0)))).toDF()
val model2 = mlr.fit(moreFeaturesThanClasses)
assert(model2.coefficientMatrix.isInstanceOf[SparseMatrix])
assert(model2.coefficientMatrix.isRowMajor)
val blr = new LogisticRegression().setFamily("binomial").setFitIntercept(true)
val blrModel = blr.fit(moreFeaturesThanClasses)
assert(blrModel.coefficientMatrix.isInstanceOf[SparseMatrix])
assert(blrModel.coefficientMatrix.asInstanceOf[SparseMatrix].colPtrs.length === 2)
}
test("compressed coefficients") {
val trainer1 = new LogisticRegression()
.setRegParam(0.1)
.setElasticNetParam(1.0)
// compressed row major is optimal
val model1 = trainer1.fit(multinomialDataset.limit(100))
assert(model1.coefficientMatrix.isInstanceOf[SparseMatrix])
assert(model1.coefficientMatrix.isRowMajor)
// compressed column major is optimal since there are more classes than features
val labelMeta = NominalAttribute.defaultAttr.withName("label").withNumValues(6).toMetadata()
val model2 = trainer1.fit(multinomialDataset
.withColumn("label", col("label").as("label", labelMeta)).limit(100))
assert(model2.coefficientMatrix.isInstanceOf[SparseMatrix])
assert(model2.coefficientMatrix.isColMajor)
// coefficients are dense without L1 regularization
val trainer2 = new LogisticRegression()
.setElasticNetParam(0.0)
val model3 = trainer2.fit(multinomialDataset.limit(100))
assert(model3.coefficientMatrix.isInstanceOf[DenseMatrix])
}
test("numClasses specified in metadata/inferred") {
val lr = new LogisticRegression().setMaxIter(1).setFamily("multinomial")
// specify more classes than unique label values
val labelMeta = NominalAttribute.defaultAttr.withName("label").withNumValues(4).toMetadata()
val df = smallMultinomialDataset.select(smallMultinomialDataset("label").as("label", labelMeta),
smallMultinomialDataset("features"))
val model1 = lr.fit(df)
assert(model1.numClasses === 4)
assert(model1.interceptVector.size === 4)
// specify two classes when there are really three
val labelMeta1 = NominalAttribute.defaultAttr.withName("label").withNumValues(2).toMetadata()
val df1 = smallMultinomialDataset
.select(smallMultinomialDataset("label").as("label", labelMeta1),
smallMultinomialDataset("features"))
val thrown = intercept[IllegalArgumentException] {
lr.fit(df1)
}
assert(thrown.getMessage.contains("less than the number of unique labels"))
// lr should infer the number of classes if not specified
val model3 = lr.fit(smallMultinomialDataset)
assert(model3.numClasses === 3)
}
test("read/write") {
def checkModelData(model: LogisticRegressionModel, model2: LogisticRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients.toArray === model2.coefficients.toArray)
assert(model.numClasses === model2.numClasses)
assert(model.numFeatures === model2.numFeatures)
}
val lr = new LogisticRegression()
testEstimatorAndModelReadWrite(lr, smallBinaryDataset, LogisticRegressionSuite.allParamSettings,
LogisticRegressionSuite.allParamSettings, checkModelData)
}
test("should support all NumericType labels and weights, and not support other types") {
val lr = new LogisticRegression().setMaxIter(1)
MLTestingUtils.checkNumericTypes[LogisticRegressionModel, LogisticRegression](
lr, spark) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients.toArray === actual.coefficients.toArray)
}
}
}
object LogisticRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = ProbabilisticClassifierSuite.allParamSettings ++ Map(
"probabilityCol" -> "myProbability",
"thresholds" -> Array(0.4, 0.6),
"regParam" -> 0.01,
"elasticNetParam" -> 0.1,
"maxIter" -> 2, // intentionally small
"fitIntercept" -> true,
"tol" -> 0.8,
"standardization" -> false,
"threshold" -> 0.6
)
def generateLogisticInputAsList(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
generateLogisticInput(offset, scale, nPoints, seed).asJava
}
// Generate input of the form Y = logistic(offset + scale*X)
def generateLogisticInput(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): Seq[LabeledPoint] = {
val rnd = new Random(seed)
val x1 = Array.fill[Double](nPoints)(rnd.nextGaussian())
val y = (0 until nPoints).map { i =>
val p = 1.0 / (1.0 + math.exp(-(offset + scale * x1(i))))
if (rnd.nextDouble() < p) 1.0 else 0.0
}
val testData = (0 until nPoints).map(i => LabeledPoint(y(i), Vectors.dense(Array(x1(i)))))
testData
}
/**
* Generates `k` classes multinomial synthetic logistic input in `n` dimensional space given the
* model weights and mean/variance of the features. The synthetic data will be drawn from
* the probability distribution constructed by weights using the following formula.
*
* P(y = 0 | x) = 1 / norm
* P(y = 1 | x) = exp(x * w_1) / norm
* P(y = 2 | x) = exp(x * w_2) / norm
* ...
* P(y = k-1 | x) = exp(x * w_{k-1}) / norm
* where norm = 1 + exp(x * w_1) + exp(x * w_2) + ... + exp(x * w_{k-1})
*
* @param weights matrix is flatten into a vector; as a result, the dimension of weights vector
* will be (k - 1) * (n + 1) if `addIntercept == true`, and
* if `addIntercept != true`, the dimension will be (k - 1) * n.
* @param xMean the mean of the generated features. Lots of time, if the features are not properly
* standardized, the algorithm with poor implementation will have difficulty
* to converge.
* @param xVariance the variance of the generated features.
* @param addIntercept whether to add intercept.
* @param nPoints the number of instance of generated data.
* @param seed the seed for random generator. For consistent testing result, it will be fixed.
*/
def generateMultinomialLogisticInput(
weights: Array[Double],
xMean: Array[Double],
xVariance: Array[Double],
addIntercept: Boolean,
nPoints: Int,
seed: Int): Seq[LabeledPoint] = {
val rnd = new Random(seed)
val xDim = xMean.length
val xWithInterceptsDim = if (addIntercept) xDim + 1 else xDim
val nClasses = weights.length / xWithInterceptsDim + 1
val x = Array.fill[Vector](nPoints)(Vectors.dense(Array.fill[Double](xDim)(rnd.nextGaussian())))
x.foreach { vector =>
// This doesn't work if `vector` is a sparse vector.
val vectorArray = vector.toArray
var i = 0
val len = vectorArray.length
while (i < len) {
vectorArray(i) = vectorArray(i) * math.sqrt(xVariance(i)) + xMean(i)
i += 1
}
}
val y = (0 until nPoints).map { idx =>
val xArray = x(idx).toArray
val margins = Array.ofDim[Double](nClasses)
val probs = Array.ofDim[Double](nClasses)
for (i <- 0 until nClasses - 1) {
for (j <- 0 until xDim) margins(i + 1) += weights(i * xWithInterceptsDim + j) * xArray(j)
if (addIntercept) margins(i + 1) += weights((i + 1) * xWithInterceptsDim - 1)
}
// Preventing the overflow when we compute the probability
val maxMargin = margins.max
if (maxMargin > 0) for (i <- 0 until nClasses) margins(i) -= maxMargin
// Computing the probabilities for each class from the margins.
val norm = {
var temp = 0.0
for (i <- 0 until nClasses) {
probs(i) = math.exp(margins(i))
temp += probs(i)
}
temp
}
for (i <- 0 until nClasses) probs(i) /= norm
// Compute the cumulative probability so we can generate a random number and assign a label.
for (i <- 1 until nClasses) probs(i) += probs(i - 1)
val p = rnd.nextDouble()
var y = 0
breakable {
for (i <- 0 until nClasses) {
if (p < probs(i)) {
y = i
break
}
}
}
y
}
val testData = (0 until nPoints).map(i => LabeledPoint(y(i), x(i)))
testData
}
/**
* When no regularization is applied, the multinomial coefficients lack identifiability
* because we do not use a pivot class. We can add any constant value to the coefficients
* and get the same likelihood. If fitting under bound constrained optimization, we don't
* choose the mean centered coefficients like what we do for unbound problems, since they
* may out of the bounds. We use this function to check whether two coefficients are equivalent.
*/
def checkCoefficientsEquivalent(coefficients1: Matrix, coefficients2: Matrix): Unit = {
coefficients1.colIter.zip(coefficients2.colIter).foreach { case (col1: Vector, col2: Vector) =>
(col1.asBreeze - col2.asBreeze).toArray.toSeq.sliding(2).foreach {
case Seq(v1, v2) => assert(v1 ~= v2 absTol 1E-3)
}
}
}
}
|
MLnick/spark
|
mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
|
Scala
|
apache-2.0
| 109,723
|
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.unsupervised.pca
import scala.util.{Try, Success, Failure}
import org.apache.log4j.Logger
import org.apache.commons.math3.linear._
import org.apache.commons.math3.stat.correlation.Covariance
import org.scalaml.core.XTSeries
import org.scalaml.core.Types.{CommonMath, ScalaMl}
import org.scalaml.core.Design.PipeOperator
import org.scalaml.util.DisplayUtils
import ScalaMl._
/**
* <p>Generic class that implements the Principal Component Analysis technique. The
* extraction of the principal components (Eigenvectors). The class is parameterized
* as a view bound to Double. The purpose of the class is to compute the covariance
* matrix and the eigenvalues (normalized values ordered by decreasing order).<br> The features
* (or variables) associated with a low eigenvalue are eliminated, reducing the dimension
* of the model and the complexity of any future supervised learning algorithm.</p>
* @constructor Instantiate a principal component analysis algorithm as a data transformation
* of type PipeOperator
*
* @author Patrick Nicolas
* @since February 26, 2014
* @note Scala for Machine Learning Chapter 4 Unsupervised learning / Principal Components Analysis
*/
final class PCA[T <% Double] extends PipeOperator[XTSeries[Array[T]], (DblMatrix, DblVector)] {
import CommonMath._, XTSeries._
private val logger = Logger.getLogger("PCA")
/**
* <p>Data transformation that implements the extraction of the principal components
* from a time series. The methods uses the Apache Commons Math library to compute
* eigenvectors and eigenvalues. All the exceptions thrown by the Math library during
* the manipulation of matrices are caught in the method.</p>
* @param xt time series of dimension > 1
* @throws MatchError if the input time series is undefined or have no elements
* @return PartialFunction of time series of elements of type T as input to the Principal
* Component Analysis and tuple Covariance matrix and vector of eigen values as output
*/
override def |> : PartialFunction[XTSeries[Array[T]], (DblMatrix, DblVector)] = {
case xt: XTSeries[Array[T]] if( !xt.isEmpty ) => {
Try {
// Compute the zScore of the time series (1)
zScoring(xt).map(observation => {
// Forces a conversion
val obs: DblMatrix = observation
// Compute the covariance matrix related to the observations in the time series (3)
val covariance = new Covariance(obs).getCovarianceMatrix
// Create a Eigenvalue and Eigenvectors decomposition (4)
val transform = new EigenDecomposition(covariance)
// Retrieve the principal components (or direction)
val eigenVectors = transform.getV
// Retrieve the eigen values
val eigenValues = new ArrayRealVector(transform.getRealEigenvalues)
val cov = obs.multiply(eigenVectors).getData
// Return the tuple (Covariance matrix, Eigenvalues)
(cov, eigenValues.toArray)
})
}
// Return the tuple (Empty Covariance matrix, Empty Eigenvalue vector)
// if an exception is thrown.
match {
case Success(eigenResults) => eigenResults.getOrElse((Array.empty, Array.empty))
case Failure(e) => {
DisplayUtils.error("PCA.|> zScoring ", logger, e)
(Array.empty, Array.empty)
}
}
}
}
}
//-------------------------------- EOF -------------------------------------------------------------------------
|
batermj/algorithm-challenger
|
books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/unsupervised/pca/PCA.scala
|
Scala
|
apache-2.0
| 4,066
|
package net.magik6k.jliblxc
import java.io.{FileOutputStream, File}
import net.magik6k.jliblxc.natives.NativeLxcContainer
private[jliblxc] object NativeLoader {
private val input = getClass.getResourceAsStream("/libjlxc.so")
private val dest = File.createTempFile("libjlxc", ".so")
private val output = new FileOutputStream(dest)
CopyUtil.copy(input, output)
System.load(dest.getAbsolutePath)
dest.delete()
def init() {}
def getNativeContainer(name: String, configPath: String) = new NativeLxcContainer(name, configPath)
}
|
magik6k/jLibLXC
|
src/main/scala/net/magik6k/jliblxc/NativeLoader.scala
|
Scala
|
mit
| 544
|
package ch.ltouroumov.modularmachines.common.tileentity.ports
import ch.ltouroumov.modularmachines.common.tileentity.PortType
class MachinePortFluid extends MachinePortBase {
def portType = PortType.Fluid
}
|
ltouroumov/modular-machines
|
src/main/scala/ch/ltouroumov/modularmachines/common/tileentity/ports/MachinePortFluid.scala
|
Scala
|
gpl-2.0
| 212
|
package es.weso.wiFetcher.dao
import es.weso.wiFetcher.entities.Indicator
/**
* This trait contains all method that has to have a class that load information
* about indicators
*/
trait IndicatorDAO extends DAO [Indicator] {
def getPrimaryIndicators() : List[Indicator]
def getSecondaryIndicators() : List[Indicator]
}
|
weso/wiFetcher
|
app/es/weso/wiFetcher/dao/IndicatorDAO.scala
|
Scala
|
apache-2.0
| 334
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.nio.charset.{Charset, StandardCharsets}
import java.util.{Locale, TimeZone}
import com.fasterxml.jackson.core.{JsonFactory, JsonParser}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util._
/**
* Options for parsing JSON data into Spark SQL rows.
*
* Most of these map directly to Jackson's internal options, specified in [[JsonParser.Feature]].
*/
private[sql] class JSONOptions(
@transient val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends Logging with Serializable {
def this(
parameters: Map[String, String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String = "") = {
this(
CaseInsensitiveMap(parameters),
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
val samplingRatio =
parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0)
val primitivesAsString =
parameters.get("primitivesAsString").map(_.toBoolean).getOrElse(false)
val prefersDecimal =
parameters.get("prefersDecimal").map(_.toBoolean).getOrElse(false)
val allowComments =
parameters.get("allowComments").map(_.toBoolean).getOrElse(false)
val allowUnquotedFieldNames =
parameters.get("allowUnquotedFieldNames").map(_.toBoolean).getOrElse(false)
val allowSingleQuotes =
parameters.get("allowSingleQuotes").map(_.toBoolean).getOrElse(true)
val allowNumericLeadingZeros =
parameters.get("allowNumericLeadingZeros").map(_.toBoolean).getOrElse(false)
val allowNonNumericNumbers =
parameters.get("allowNonNumericNumbers").map(_.toBoolean).getOrElse(true)
val allowBackslashEscapingAnyCharacter =
parameters.get("allowBackslashEscapingAnyCharacter").map(_.toBoolean).getOrElse(false)
private val allowUnquotedControlChars =
parameters.get("allowUnquotedControlChars").map(_.toBoolean).getOrElse(false)
val compressionCodec = parameters.get("compression").map(CompressionCodecs.getCodecClassName)
val parseMode: ParseMode =
parameters.get("mode").map(ParseMode.fromString).getOrElse(PermissiveMode)
val columnNameOfCorruptRecord =
parameters.getOrElse("columnNameOfCorruptRecord", defaultColumnNameOfCorruptRecord)
// Whether to ignore column of all null values or empty array/struct during schema inference
val dropFieldIfAllNull = parameters.get("dropFieldIfAllNull").map(_.toBoolean).getOrElse(false)
// A language tag in IETF BCP 47 format
val locale: Locale = parameters.get("locale").map(Locale.forLanguageTag).getOrElse(Locale.US)
val timeZone: TimeZone = DateTimeUtils.getTimeZone(
parameters.getOrElse(DateTimeUtils.TIMEZONE_OPTION, defaultTimeZoneId))
val dateFormat: String = parameters.getOrElse("dateFormat", "yyyy-MM-dd")
val timestampFormat: String =
parameters.getOrElse("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSXXX")
val multiLine = parameters.get("multiLine").map(_.toBoolean).getOrElse(false)
/**
* A string between two consecutive JSON records.
*/
val lineSeparator: Option[String] = parameters.get("lineSep").map { sep =>
require(sep.nonEmpty, "'lineSep' cannot be an empty string.")
sep
}
protected def checkedEncoding(enc: String): String = enc
/**
* Standard encoding (charset) name. For example UTF-8, UTF-16LE and UTF-32BE.
* If the encoding is not specified (None) in read, it will be detected automatically
* when the multiLine option is set to `true`. If encoding is not specified in write,
* UTF-8 is used by default.
*/
val encoding: Option[String] = parameters.get("encoding")
.orElse(parameters.get("charset")).map(checkedEncoding)
val lineSeparatorInRead: Option[Array[Byte]] = lineSeparator.map { lineSep =>
lineSep.getBytes(encoding.getOrElse("UTF-8"))
}
val lineSeparatorInWrite: String = lineSeparator.getOrElse("\\n")
/**
* Generating JSON strings in pretty representation if the parameter is enabled.
*/
val pretty: Boolean = parameters.get("pretty").map(_.toBoolean).getOrElse(false)
/** Sets config options on a Jackson [[JsonFactory]]. */
def setJacksonOptions(factory: JsonFactory): Unit = {
factory.configure(JsonParser.Feature.ALLOW_COMMENTS, allowComments)
factory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, allowUnquotedFieldNames)
factory.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, allowSingleQuotes)
factory.configure(JsonParser.Feature.ALLOW_NUMERIC_LEADING_ZEROS, allowNumericLeadingZeros)
factory.configure(JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS, allowNonNumericNumbers)
factory.configure(JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER,
allowBackslashEscapingAnyCharacter)
factory.configure(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS, allowUnquotedControlChars)
}
}
private[sql] class JSONOptionsInRead(
@transient override val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends JSONOptions(parameters, defaultTimeZoneId, defaultColumnNameOfCorruptRecord) {
def this(
parameters: Map[String, String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String = "") = {
this(
CaseInsensitiveMap(parameters),
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
protected override def checkedEncoding(enc: String): String = {
val isBlacklisted = JSONOptionsInRead.blacklist.contains(Charset.forName(enc))
require(multiLine || !isBlacklisted,
s"""The ${enc} encoding must not be included in the blacklist when multiLine is disabled:
|Blacklist: ${JSONOptionsInRead.blacklist.mkString(", ")}""".stripMargin)
val isLineSepRequired =
multiLine || Charset.forName(enc) == StandardCharsets.UTF_8 || lineSeparator.nonEmpty
require(isLineSepRequired, s"The lineSep option must be specified for the $enc encoding")
enc
}
}
private[sql] object JSONOptionsInRead {
// The following encodings are not supported in per-line mode (multiline is false)
// because they cause some problems in reading files with BOM which is supposed to
// present in the files with such encodings. After splitting input files by lines,
// only the first lines will have the BOM which leads to impossibility for reading
// the rest lines. Besides of that, the lineSep option must have the BOM in such
// encodings which can never present between lines.
val blacklist = Seq(
Charset.forName("UTF-16"),
Charset.forName("UTF-32")
)
}
|
facaiy/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala
|
Scala
|
apache-2.0
| 7,436
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.linalg
import java.util.Random
import breeze.linalg.{CSCMatrix, Matrix => BM}
import org.mockito.Mockito.when
import org.scalatest.mock.MockitoSugar._
import scala.collection.mutable.{Map => MutableMap}
import org.apache.spark.ml.SparkMLFunSuite
import org.apache.spark.ml.util.TestingUtils._
class MatricesSuite extends SparkMLFunSuite {
test("dense matrix construction") {
val m = 3
val n = 2
val values = Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0)
val mat = Matrices.dense(m, n, values).asInstanceOf[DenseMatrix]
assert(mat.numRows === m)
assert(mat.numCols === n)
assert(mat.values.eq(values), "should not copy data")
}
test("dense matrix construction with wrong dimension") {
intercept[RuntimeException] {
Matrices.dense(3, 2, Array(0.0, 1.0, 2.0))
}
}
test("sparse matrix construction") {
val m = 3
val n = 4
val values = Array(1.0, 2.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 2, 4, 4)
val rowIndices = Array(1, 2, 1, 2)
val mat = Matrices.sparse(m, n, colPtrs, rowIndices, values).asInstanceOf[SparseMatrix]
assert(mat.numRows === m)
assert(mat.numCols === n)
assert(mat.values.eq(values), "should not copy data")
assert(mat.colPtrs.eq(colPtrs), "should not copy data")
assert(mat.rowIndices.eq(rowIndices), "should not copy data")
val entries: Array[(Int, Int, Double)] = Array((2, 2, 3.0), (1, 0, 1.0), (2, 0, 2.0),
(1, 2, 2.0), (2, 2, 2.0), (1, 2, 2.0), (0, 0, 0.0))
val mat2 = SparseMatrix.fromCOO(m, n, entries)
assert(mat.asBreeze === mat2.asBreeze)
assert(mat2.values.length == 4)
}
test("sparse matrix construction with wrong number of elements") {
intercept[IllegalArgumentException] {
Matrices.sparse(3, 2, Array(0, 1), Array(1, 2, 1), Array(0.0, 1.0, 2.0))
}
intercept[IllegalArgumentException] {
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(0.0, 1.0, 2.0))
}
}
test("index in matrices incorrect input") {
val sm = Matrices.sparse(3, 2, Array(0, 2, 3), Array(1, 2, 1), Array(0.0, 1.0, 2.0))
val dm = Matrices.dense(3, 2, Array(0.0, 2.3, 1.4, 3.2, 1.0, 9.1))
Array(sm, dm).foreach { mat =>
intercept[IllegalArgumentException] { mat.index(4, 1) }
intercept[IllegalArgumentException] { mat.index(1, 4) }
intercept[IllegalArgumentException] { mat.index(-1, 2) }
intercept[IllegalArgumentException] { mat.index(1, -2) }
}
}
test("equals") {
val dm1 = Matrices.dense(2, 2, Array(0.0, 1.0, 2.0, 3.0))
assert(dm1 === dm1)
assert(dm1 !== dm1.transpose)
val dm2 = Matrices.dense(2, 2, Array(0.0, 2.0, 1.0, 3.0))
assert(dm1 === dm2.transpose)
val sm1 = dm1.asInstanceOf[DenseMatrix].toSparse
assert(sm1 === sm1)
assert(sm1 === dm1)
assert(sm1 !== sm1.transpose)
val sm2 = dm2.asInstanceOf[DenseMatrix].toSparse
assert(sm1 === sm2.transpose)
assert(sm1 === dm2.transpose)
}
test("matrix copies are deep copies") {
val m = 3
val n = 2
val denseMat = Matrices.dense(m, n, Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0))
val denseCopy = denseMat.copy
assert(!denseMat.toArray.eq(denseCopy.toArray))
val values = Array(1.0, 2.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(1, 2, 1, 2)
val sparseMat = Matrices.sparse(m, n, colPtrs, rowIndices, values)
val sparseCopy = sparseMat.copy
assert(!sparseMat.toArray.eq(sparseCopy.toArray))
}
test("matrix indexing and updating") {
val m = 3
val n = 2
val allValues = Array(0.0, 1.0, 2.0, 3.0, 4.0, 0.0)
val denseMat = new DenseMatrix(m, n, allValues)
assert(denseMat(0, 1) === 3.0)
assert(denseMat(0, 1) === denseMat.values(3))
assert(denseMat(0, 1) === denseMat(3))
assert(denseMat(0, 0) === 0.0)
denseMat.update(0, 0, 10.0)
assert(denseMat(0, 0) === 10.0)
assert(denseMat.values(0) === 10.0)
val sparseValues = Array(1.0, 2.0, 3.0, 4.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(1, 2, 0, 1)
val sparseMat = new SparseMatrix(m, n, colPtrs, rowIndices, sparseValues)
assert(sparseMat(0, 1) === 3.0)
assert(sparseMat(0, 1) === sparseMat.values(2))
assert(sparseMat(0, 0) === 0.0)
intercept[NoSuchElementException] {
sparseMat.update(0, 0, 10.0)
}
intercept[NoSuchElementException] {
sparseMat.update(2, 1, 10.0)
}
sparseMat.update(0, 1, 10.0)
assert(sparseMat(0, 1) === 10.0)
assert(sparseMat.values(2) === 10.0)
}
test("toSparse, toDense") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val spMat2 = deMat1.toSparse
val deMat2 = spMat1.toDense
assert(spMat1.asBreeze === spMat2.asBreeze)
assert(deMat1.asBreeze === deMat2.asBreeze)
}
test("map, update") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val deMat2 = deMat1.map(_ * 2)
val spMat2 = spMat1.map(_ * 2)
deMat1.update(_ * 2)
spMat1.update(_ * 2)
assert(spMat1.toArray === spMat2.toArray)
assert(deMat1.toArray === deMat2.toArray)
}
test("transpose") {
val dA =
new DenseMatrix(4, 3, Array(0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 3.0))
val sA = new SparseMatrix(4, 3, Array(0, 1, 3, 4), Array(1, 0, 2, 3), Array(1.0, 2.0, 1.0, 3.0))
val dAT = dA.transpose.asInstanceOf[DenseMatrix]
val sAT = sA.transpose.asInstanceOf[SparseMatrix]
val dATexpected =
new DenseMatrix(3, 4, Array(0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 3.0))
val sATexpected =
new SparseMatrix(3, 4, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0))
assert(dAT.asBreeze === dATexpected.asBreeze)
assert(sAT.asBreeze === sATexpected.asBreeze)
assert(dA(1, 0) === dAT(0, 1))
assert(dA(2, 1) === dAT(1, 2))
assert(sA(1, 0) === sAT(0, 1))
assert(sA(2, 1) === sAT(1, 2))
assert(!dA.toArray.eq(dAT.toArray), "has to have a new array")
assert(dA.values.eq(dAT.transpose.asInstanceOf[DenseMatrix].values), "should not copy array")
assert(dAT.toSparse.asBreeze === sATexpected.asBreeze)
assert(sAT.toDense.asBreeze === dATexpected.asBreeze)
}
test("foreachActive") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val sp = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val dn = new DenseMatrix(m, n, allValues)
val dnMap = MutableMap[(Int, Int), Double]()
dn.foreachActive { (i, j, value) =>
dnMap.put((i, j), value)
}
assert(dnMap.size === 6)
assert(dnMap(0, 0) === 1.0)
assert(dnMap(1, 0) === 2.0)
assert(dnMap(2, 0) === 0.0)
assert(dnMap(0, 1) === 0.0)
assert(dnMap(1, 1) === 4.0)
assert(dnMap(2, 1) === 5.0)
val spMap = MutableMap[(Int, Int), Double]()
sp.foreachActive { (i, j, value) =>
spMap.put((i, j), value)
}
assert(spMap.size === 4)
assert(spMap(0, 0) === 1.0)
assert(spMap(1, 0) === 2.0)
assert(spMap(1, 1) === 4.0)
assert(spMap(2, 1) === 5.0)
}
test("horzcat, vertcat, eye, speye") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
// transposed versions
val allValuesT = Array(1.0, 0.0, 2.0, 4.0, 0.0, 5.0)
val colPtrsT = Array(0, 1, 3, 4)
val rowIndicesT = Array(0, 0, 1, 1)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val spMat1T = new SparseMatrix(n, m, colPtrsT, rowIndicesT, values)
val deMat1T = new DenseMatrix(n, m, allValuesT)
// should equal spMat1 & deMat1 respectively
val spMat1TT = spMat1T.transpose
val deMat1TT = deMat1T.transpose
val deMat2 = Matrices.eye(3)
val spMat2 = Matrices.speye(3)
val deMat3 = Matrices.eye(2)
val spMat3 = Matrices.speye(2)
val spHorz = Matrices.horzcat(Array(spMat1, spMat2))
val spHorz2 = Matrices.horzcat(Array(spMat1, deMat2))
val spHorz3 = Matrices.horzcat(Array(deMat1, spMat2))
val deHorz1 = Matrices.horzcat(Array(deMat1, deMat2))
val deHorz2 = Matrices.horzcat(Array.empty[Matrix])
assert(deHorz1.numRows === 3)
assert(spHorz2.numRows === 3)
assert(spHorz3.numRows === 3)
assert(spHorz.numRows === 3)
assert(deHorz1.numCols === 5)
assert(spHorz2.numCols === 5)
assert(spHorz3.numCols === 5)
assert(spHorz.numCols === 5)
assert(deHorz2.numRows === 0)
assert(deHorz2.numCols === 0)
assert(deHorz2.toArray.length === 0)
assert(deHorz1 ~== spHorz2.asInstanceOf[SparseMatrix].toDense absTol 1e-15)
assert(spHorz2 ~== spHorz3 absTol 1e-15)
assert(spHorz(0, 0) === 1.0)
assert(spHorz(2, 1) === 5.0)
assert(spHorz(0, 2) === 1.0)
assert(spHorz(1, 2) === 0.0)
assert(spHorz(1, 3) === 1.0)
assert(spHorz(2, 4) === 1.0)
assert(spHorz(1, 4) === 0.0)
assert(deHorz1(0, 0) === 1.0)
assert(deHorz1(2, 1) === 5.0)
assert(deHorz1(0, 2) === 1.0)
assert(deHorz1(1, 2) == 0.0)
assert(deHorz1(1, 3) === 1.0)
assert(deHorz1(2, 4) === 1.0)
assert(deHorz1(1, 4) === 0.0)
// containing transposed matrices
val spHorzT = Matrices.horzcat(Array(spMat1TT, spMat2))
val spHorz2T = Matrices.horzcat(Array(spMat1TT, deMat2))
val spHorz3T = Matrices.horzcat(Array(deMat1TT, spMat2))
val deHorz1T = Matrices.horzcat(Array(deMat1TT, deMat2))
assert(deHorz1T ~== deHorz1 absTol 1e-15)
assert(spHorzT ~== spHorz absTol 1e-15)
assert(spHorz2T ~== spHorz2 absTol 1e-15)
assert(spHorz3T ~== spHorz3 absTol 1e-15)
intercept[IllegalArgumentException] {
Matrices.horzcat(Array(spMat1, spMat3))
}
intercept[IllegalArgumentException] {
Matrices.horzcat(Array(deMat1, spMat3))
}
val spVert = Matrices.vertcat(Array(spMat1, spMat3))
val deVert1 = Matrices.vertcat(Array(deMat1, deMat3))
val spVert2 = Matrices.vertcat(Array(spMat1, deMat3))
val spVert3 = Matrices.vertcat(Array(deMat1, spMat3))
val deVert2 = Matrices.vertcat(Array.empty[Matrix])
assert(deVert1.numRows === 5)
assert(spVert2.numRows === 5)
assert(spVert3.numRows === 5)
assert(spVert.numRows === 5)
assert(deVert1.numCols === 2)
assert(spVert2.numCols === 2)
assert(spVert3.numCols === 2)
assert(spVert.numCols === 2)
assert(deVert2.numRows === 0)
assert(deVert2.numCols === 0)
assert(deVert2.toArray.length === 0)
assert(deVert1 ~== spVert2.asInstanceOf[SparseMatrix].toDense absTol 1e-15)
assert(spVert2 ~== spVert3 absTol 1e-15)
assert(spVert(0, 0) === 1.0)
assert(spVert(2, 1) === 5.0)
assert(spVert(3, 0) === 1.0)
assert(spVert(3, 1) === 0.0)
assert(spVert(4, 1) === 1.0)
assert(deVert1(0, 0) === 1.0)
assert(deVert1(2, 1) === 5.0)
assert(deVert1(3, 0) === 1.0)
assert(deVert1(3, 1) === 0.0)
assert(deVert1(4, 1) === 1.0)
// containing transposed matrices
val spVertT = Matrices.vertcat(Array(spMat1TT, spMat3))
val deVert1T = Matrices.vertcat(Array(deMat1TT, deMat3))
val spVert2T = Matrices.vertcat(Array(spMat1TT, deMat3))
val spVert3T = Matrices.vertcat(Array(deMat1TT, spMat3))
assert(deVert1T ~== deVert1 absTol 1e-15)
assert(spVertT ~== spVert absTol 1e-15)
assert(spVert2T ~== spVert2 absTol 1e-15)
assert(spVert3T ~== spVert3 absTol 1e-15)
intercept[IllegalArgumentException] {
Matrices.vertcat(Array(spMat1, spMat2))
}
intercept[IllegalArgumentException] {
Matrices.vertcat(Array(deMat1, spMat2))
}
}
test("zeros") {
val mat = Matrices.zeros(2, 3).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 3)
assert(mat.values.forall(_ == 0.0))
}
test("ones") {
val mat = Matrices.ones(2, 3).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 3)
assert(mat.values.forall(_ == 1.0))
}
test("eye") {
val mat = Matrices.eye(2).asInstanceOf[DenseMatrix]
assert(mat.numCols === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 0.0, 0.0, 1.0))
}
test("rand") {
val rng = mock[Random]
when(rng.nextDouble()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = Matrices.rand(2, 2, rng).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("randn") {
val rng = mock[Random]
when(rng.nextGaussian()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = Matrices.randn(2, 2, rng).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("diag") {
val mat = Matrices.diag(Vectors.dense(1.0, 2.0)).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 0.0, 0.0, 2.0))
}
test("sprand") {
val rng = mock[Random]
when(rng.nextInt(4)).thenReturn(0, 1, 1, 3, 2, 2, 0, 1, 3, 0)
when(rng.nextDouble()).thenReturn(1.0, 2.0, 3.0, 4.0, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val mat = SparseMatrix.sprand(4, 4, 0.25, rng)
assert(mat.numRows === 4)
assert(mat.numCols === 4)
assert(mat.rowIndices.toSeq === Seq(3, 0, 2, 1))
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
val mat2 = SparseMatrix.sprand(2, 3, 1.0, rng)
assert(mat2.rowIndices.toSeq === Seq(0, 1, 0, 1, 0, 1))
assert(mat2.colPtrs.toSeq === Seq(0, 2, 4, 6))
}
test("sprandn") {
val rng = mock[Random]
when(rng.nextInt(4)).thenReturn(0, 1, 1, 3, 2, 2, 0, 1, 3, 0)
when(rng.nextGaussian()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = SparseMatrix.sprandn(4, 4, 0.25, rng)
assert(mat.numRows === 4)
assert(mat.numCols === 4)
assert(mat.rowIndices.toSeq === Seq(3, 0, 2, 1))
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("toString") {
val empty = Matrices.ones(0, 0)
empty.toString(0, 0)
val mat = Matrices.rand(5, 10, new Random())
mat.toString(-1, -5)
mat.toString(0, 0)
mat.toString(Int.MinValue, Int.MinValue)
mat.toString(Int.MaxValue, Int.MaxValue)
var lines = mat.toString(6, 50).lines.toArray
assert(lines.size == 5 && lines.forall(_.size <= 50))
lines = mat.toString(5, 100).lines.toArray
assert(lines.size == 5 && lines.forall(_.size <= 100))
}
test("numNonzeros and numActives") {
val dm1 = Matrices.dense(3, 2, Array(0, 0, -1, 1, 0, 1))
assert(dm1.numNonzeros === 3)
assert(dm1.numActives === 6)
val sm1 = Matrices.sparse(3, 2, Array(0, 2, 3), Array(0, 2, 1), Array(0.0, -1.2, 0.0))
assert(sm1.numNonzeros === 1)
assert(sm1.numActives === 3)
}
test("fromBreeze with sparse matrix") {
// colPtr.last does NOT always equal to values.length in breeze SCSMatrix and
// invocation of compact() may be necessary. Refer to SPARK-11507
val bm1: BM[Double] = new CSCMatrix[Double](
Array(1.0, 1, 1), 3, 3, Array(0, 1, 2, 3), Array(0, 1, 2))
val bm2: BM[Double] = new CSCMatrix[Double](
Array(1.0, 2, 2, 4), 3, 3, Array(0, 0, 2, 4), Array(1, 2, 1, 2))
val sum = bm1 + bm2
Matrices.fromBreeze(sum)
}
test("row/col iterator") {
val dm = new DenseMatrix(3, 2, Array(0, 1, 2, 3, 4, 0))
val sm = dm.toSparse
val rows = Seq(Vectors.dense(0, 3), Vectors.dense(1, 4), Vectors.dense(2, 0))
val cols = Seq(Vectors.dense(0, 1, 2), Vectors.dense(3, 4, 0))
for (m <- Seq(dm, sm)) {
assert(m.rowIter.toSeq === rows)
assert(m.colIter.toSeq === cols)
assert(m.transpose.rowIter.toSeq === cols)
assert(m.transpose.colIter.toSeq === rows)
}
}
}
|
ZxlAaron/mypros
|
mllib-local/src/test/scala/org/apache/spark/ml/linalg/MatricesSuite.scala
|
Scala
|
apache-2.0
| 17,257
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils.timer
import org.junit.Assert._
import java.util.concurrent.atomic._
import org.junit.Test
class TimerTaskListTest {
private class TestTask(val delayMs: Long) extends TimerTask {
def run(): Unit = { }
}
private def size(list: TimerTaskList): Int = {
var count = 0
list.foreach(_ => count += 1)
count
}
@Test
def testAll() {
val sharedCounter = new AtomicInteger(0)
val list1 = new TimerTaskList(sharedCounter)
val list2 = new TimerTaskList(sharedCounter)
val list3 = new TimerTaskList(sharedCounter)
val tasks = (1 to 10).map { i =>
val task = new TestTask(0L)
list1.add(new TimerTaskEntry(task, 10L))
assertEquals(i, sharedCounter.get)
task
}
assertEquals(tasks.size, sharedCounter.get)
// reinserting the existing tasks shouldn't change the task count
tasks.take(4).foreach { task =>
val prevCount = sharedCounter.get
// new TimerTaskEntry(task) will remove the existing entry from the list
list2.add(new TimerTaskEntry(task, 10L))
assertEquals(prevCount, sharedCounter.get)
}
assertEquals(10 - 4, size(list1))
assertEquals(4, size(list2))
assertEquals(tasks.size, sharedCounter.get)
// reinserting the existing tasks shouldn't change the task count
tasks.drop(4).foreach { task =>
val prevCount = sharedCounter.get
// new TimerTaskEntry(task) will remove the existing entry from the list
list3.add(new TimerTaskEntry(task, 10L))
assertEquals(prevCount, sharedCounter.get)
}
assertEquals(0, size(list1))
assertEquals(4, size(list2))
assertEquals(6, size(list3))
assertEquals(tasks.size, sharedCounter.get)
// cancel tasks in lists
list1.foreach { _.cancel() }
assertEquals(0, size(list1))
assertEquals(4, size(list2))
assertEquals(6, size(list3))
list2.foreach { _.cancel() }
assertEquals(0, size(list1))
assertEquals(0, size(list2))
assertEquals(6, size(list3))
list3.foreach { _.cancel() }
assertEquals(0, size(list1))
assertEquals(0, size(list2))
assertEquals(0, size(list3))
}
}
|
wangcy6/storm_app
|
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/utils/timer/TimerTaskListTest.scala
|
Scala
|
apache-2.0
| 2,956
|
package org.jetbrains.sbt.project
import java.io.File
import java.util
import com.intellij.openapi.externalSystem.ExternalSystemAutoImportAware
import com.intellij.openapi.project.Project
import org.jetbrains.sbt._
import org.jetbrains.sbt.project.AutoImportAwareness._
import scala.collection.JavaConverters._
/**
* @author Pavel Fatin
*/
trait AutoImportAwareness extends ExternalSystemAutoImportAware {
override final def getAffectedExternalProjectPath(changedFileOrDirPath: String, project: Project): String =
if (isProjectDefinitionFile(project, new File(changedFileOrDirPath))) project.getBasePath
else null
override def getAffectedExternalProjectFiles(projectPath: String, project: Project): util.List[File] = {
val baseDir = new File(projectPath)
val projectDir = baseDir / Sbt.ProjectDirectory
val files =
baseDir / "build.sbt" +:
projectDir / "build.properties" +:
projectDir.ls(name => name.endsWith(".sbt") || name.endsWith(".scala"))
files.asJava
}
}
private object AutoImportAwareness {
def isProjectDefinitionFile(project: Project, file: File): Boolean = {
val baseDir = new File(project.getBasePath)
val projectDir = baseDir / Sbt.ProjectDirectory
val fileName = file.getName
fileName == Sbt.BuildFile && file.isIn(baseDir) ||
fileName == Sbt.PropertiesFile && file.isIn(projectDir) ||
fileName.endsWith(".sbt") && file.isIn(projectDir) ||
fileName.endsWith(".scala") && file.isIn(projectDir)
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/sbt/project/AutoImportAwareness.scala
|
Scala
|
apache-2.0
| 1,517
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal
import java.util.concurrent.TimeUnit
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.util.Utils
package object config {
private[spark] val DRIVER_CLASS_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val DRIVER_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val DRIVER_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.driver.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val DRIVER_MEMORY = ConfigBuilder("spark.driver.memory")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val EXECUTOR_CLASS_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val EXECUTOR_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val EXECUTOR_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.executor.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_MEMORY = ConfigBuilder("spark.executor.memory")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val IS_PYTHON_APP = ConfigBuilder("spark.yarn.isPython").internal()
.booleanConf.createWithDefault(false)
private[spark] val CPUS_PER_TASK = ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.initialExecutors")
.fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
private[spark] val SHUFFLE_SERVICE_ENABLED =
ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
private[spark] val KEYTAB = ConfigBuilder("spark.yarn.keytab")
.doc("Location of user's keytab.")
.stringConf.createOptional
private[spark] val PRINCIPAL = ConfigBuilder("spark.yarn.principal")
.doc("Name of the Kerberos principal.")
.stringConf.createOptional
private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
.intConf
.createOptional
private[spark] val PY_FILES = ConfigBuilder("spark.submit.pyFiles")
.internal()
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val MAX_TASK_FAILURES =
ConfigBuilder("spark.task.maxFailures")
.intConf
.createWithDefault(4)
// Blacklist confs
private[spark] val BLACKLIST_ENABLED =
ConfigBuilder("spark.blacklist.enabled")
.booleanConf
.createOptional
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)
private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val BLACKLIST_TIMEOUT_CONF =
ConfigBuilder("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_KILL_ENABLED =
ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)
private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
.internal()
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
// End blacklist confs
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.withAlternative("spark.scheduler.listenerbus.eventqueue.size")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
.createWithDefault(10000)
// This property sets the root namespace for metrics reporting
private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
.stringConf
.createOptional
private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
.stringConf
.createOptional
private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
.stringConf
.createOptional
// To limit memory usage, we only track information for a fixed number of tasks
private[spark] val UI_RETAINED_TASKS = ConfigBuilder("spark.ui.retainedTasks")
.intConf
.createWithDefault(100000)
// To limit how many applications are shown in the History Server summary ui
private[spark] val HISTORY_UI_MAX_APPS =
ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE)
private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
ConfigBuilder("spark.io.encryption.keygen.algorithm")
.stringConf
.createWithDefault("HmacSHA1")
private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
.intConf
.checkValues(Set(128, 192, 256))
.createWithDefault(128)
private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
ConfigBuilder("spark.io.crypto.cipher.transformation")
.internal()
.stringConf
.createWithDefaultString("AES/CTR/NoPadding")
private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
.doc("Address of driver endpoints.")
.stringConf
.createWithDefault(Utils.localHostName())
private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
.doc("Address where to bind network listen sockets on the driver.")
.fallbackConf(DRIVER_HOST_ADDRESS)
private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
.doc("Port to use for the block manager when a more specific setting is not provided.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
.doc("Port to use for the block manager on the driver.")
.fallbackConf(BLOCK_MANAGER_PORT)
private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
.doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
"encountering corrupted or non-existing files and contents that have been read will still " +
"be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
.stringConf
.createOptional
private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
.doc("The maximum number of bytes to pack into a single partition when reading files.")
.longConf
.createWithDefault(128 * 1024 * 1024)
private[spark] val FILES_OPEN_COST_IN_BYTES = ConfigBuilder("spark.files.openCostInBytes")
.doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" +
" the same time. This is used when putting multiple files into a partition. It's better to" +
" over estimate, then the partitions with small files will be faster than partitions with" +
" bigger files.")
.longConf
.createWithDefault(4 * 1024 * 1024)
private[spark] val SECRET_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.regex")
.doc("Regex to decide which Spark configuration properties and environment variables in " +
"driver and executor environments contain sensitive information. When this regex matches " +
"a property key or value, the value is redacted from the environment UI and various logs " +
"like YARN and event logs.")
.regexConf
.createWithDefault("(?i)secret|password".r)
private[spark] val STRING_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.string.regex")
.doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
"information. When this regex matches a string part, that string part is replaced by a " +
"dummy value. This is currently used to redact the output of SQL explain commands.")
.regexConf
.createOptional
private[spark] val NETWORK_AUTH_ENABLED =
ConfigBuilder("spark.authenticate")
.booleanConf
.createWithDefault(false)
private[spark] val SASL_ENCRYPTION_ENABLED =
ConfigBuilder("spark.authenticate.enableSaslEncryption")
.booleanConf
.createWithDefault(false)
private[spark] val NETWORK_ENCRYPTION_ENABLED =
ConfigBuilder("spark.network.crypto.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_ACCURATE_BLOCK_THRESHOLD =
ConfigBuilder("spark.shuffle.accurateBlockThreshold")
.doc("When we compress the size of shuffle blocks in HighlyCompressedMapStatus, we will " +
"record the size accurately if it's above this config. This helps to prevent OOM by " +
"avoiding underestimating shuffle block size when fetch shuffle blocks.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(100 * 1024 * 1024)
private[spark] val REDUCER_MAX_REQ_SIZE_SHUFFLE_TO_MEM =
ConfigBuilder("spark.reducer.maxReqSizeShuffleToMem")
.doc("The blocks of a shuffle request will be fetched to disk when size of the request is " +
"above this threshold. This is to avoid a giant request takes too much memory.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("200m")
}
|
mzl9039/spark
|
core/src/main/scala/org/apache/spark/internal/config/package.scala
|
Scala
|
apache-2.0
| 11,900
|
/*
* Copyright 2014 DataGenerator Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.datagenerator.common.Helpers
/**
* Boolean implicit methods
*/
object BooleanHelper {
implicit class BooleanSerialization(private val boolean: Boolean) {
/**
* Returns "Y" if boolean is true, else ""
* @return String representing boolean as either "Y" or ""
*/
def toY_orEmpty: String = {
if (!boolean) {
""
} else {
"Y"
}
}
}
}
|
Brijeshrpatel9/SingleThreaderProcessingDG
|
dg-common/src/main/code/org/finra/datagenerator/common/Helpers/BooleanHelper.scala
|
Scala
|
apache-2.0
| 1,027
|
package microtools.actions
import java.util.UUID
import microtools.models.ExtraHeaders
import play.api.mvc.RequestHeader
import scala.util.Try
object Helper {
def isBusinessDebug(rh: RequestHeader): Boolean =
rh.cookies
.get(ExtraHeaders.DEBUG_HEADER)
.flatMap(c => Try(c.value.toBoolean).toOption)
.getOrElse(
rh.headers
.get(ExtraHeaders.DEBUG_HEADER)
.flatMap(s => Try(s.toBoolean).toOption)
.getOrElse(false)
)
def getOrCreateFlowId(rh: RequestHeader): String =
rh.cookies
.get(ExtraHeaders.FLOW_ID_HEADER)
.map(_.value)
.getOrElse(
rh.headers
.get(ExtraHeaders.FLOW_ID_HEADER)
.getOrElse(generateFlowId())
)
def generateFlowId(): String = UUID.randomUUID().toString
}
|
21re/play-micro-tools
|
src/main/scala/microtools/actions/Helper.scala
|
Scala
|
mit
| 807
|
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package no.uio.musit.service
import javax.inject.Inject
import play.api.http.DefaultHttpFilters
class MusitFilter @Inject()(
noCache: NoCacheFilter,
logging: AccessLogFilter
) extends DefaultHttpFilters(noCache, logging)
|
kpmeen/musit
|
musit-service/src/main/scala/no/uio/musit/service/MusitFilter.scala
|
Scala
|
gpl-2.0
| 1,095
|
package ld.query
import cmwell.fts._
import cmwell.web.ld.cmw.CMWellRDFHelper
import cmwell.web.ld.query._
import com.typesafe.scalalogging.LazyLogging
import ld.cmw.PassiveFieldTypesCache
import ld.query.JenaArqExtensionsUtils._
import logic.CRUDServiceFS
import org.apache.jena.graph.{Node, NodeFactory, Triple}
import org.apache.jena.query.{Query, QueryExecution, QueryExecutionFactory}
import org.apache.jena.rdf.model.ModelFactory
import org.apache.jena.sparql.algebra.Algebra
import org.apache.jena.sparql.core.Quad
import wsutil.{HashedFieldKey, RawFieldFilter, RawMultiFieldFilter, RawSingleFieldFilter}
import scala.annotation.switch
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration.DurationInt
import scala.util.Try
object JenaArqExtensionsUtils {
case class BakedSparqlQuery(qe: QueryExecution, driver: DatasetGraphCmWell)
val emptyLtrl = NodeFactory.createLiteral("")
def buildCmWellQueryExecution(query: Query,
host: String,
config: Config = Config.defaultConfig,
crudServiceFS: CRUDServiceFS,
arqCache: ArqCache,
jenaArqExtensionsUtils: JenaArqExtensionsUtils,
dataFetcher: DataFetcher)(implicit ec: ExecutionContext) = {
val driver = new DatasetGraphCmWell(host, config.copy(deadline = Some(config.finiteDuarationForDeadLine.fromNow)), crudServiceFS, arqCache, jenaArqExtensionsUtils, dataFetcher)
val model = ModelFactory.createModelForGraph(driver.getDefaultGraph)
val qe = QueryExecutionFactory.create(query, model) // todo quads
BakedSparqlQuery(qe, driver)
}
def mangleSubjectVariableNameIntoPredicate(t: Triple): Triple =
new Triple(t.getSubject, mangleSubjectVariableNameIntoPredicate(t.getSubject, t.getPredicate), t.getObject)
def mangleSubjectVariableNameIntoPredicate(key: String, t: Triple): Triple =
new Triple(t.getSubject, mangleSubjectVariableNameIntoPredicate(NodeFactory.createLiteral(key), t.getPredicate), t.getObject)
def mangleSubjectVariableNameIntoPredicate(s: Node, p: Node): Node = {
val varName = if(s.isVariable) s.getName else ""
NodeFactory.createURI(varName + manglingSeparator + p.getURI)
}
def unmanglePredicate(p: Node): (String,Node) = {
if(!p.isURI || !p.getURI.contains(manglingSeparator)) "" -> p else {
// val idxOfSep = p.getURI.indexOf(JenaArqExtensionsUtils.manglingSeparator)
// val (subVarName, sepAndPred) = p.getURI.splitAt(idxOfSep)
val (subVarName, sepAndPred) = p.getURI.span(manglingSeparator.!=)
subVarName -> NodeFactory.createURI(sepAndPred.substring(1))
}
}
def squashBySubject(triples: Seq[Triple]): Triple = {
(triples.length: @switch) match {
case 0 =>
throw new IllegalArgumentException("squash of empty list")
case 1 =>
triples.head // not squashing
case _ =>
val containerPredicate: Node = NodeFactory.createURI(engineInternalUriPrefix + triples.map { t =>
t.getPredicate.getURI.replace(cmwellInternalUriPrefix, "") + ":" + objectToQpValue(t.getObject).getOrElse("")
}.mkString("|"))
val emptyLiteral = NodeFactory.createLiteral("")
val mangledContainerPredicate = JenaArqExtensionsUtils.mangleSubjectVariableNameIntoPredicate(triples.head.getSubject, containerPredicate)
new Triple(emptyLiteral, mangledContainerPredicate, emptyLiteral)
}
}
def objectToQpValue(obj: Node) = {
if(obj.isURI)
Some(obj.getURI)
else if(obj.isLiteral)
Some(obj.getLiteral.getValue.toString)
else
None // making qp just `field:`
}
def explodeContainerPredicate(pred: Node): Seq[(String,String)] = pred
.getURI
.replace(engineInternalUriPrefix,"")
.split('|')
.map(cmwell.util.string.splitAtNoSep(_, ':'))
val cmwellInternalUriPrefix = "cmwell://meta/internal/"
val engineInternalUriPrefix = "engine://"
val manglingSeparator = '$'
val fakeQuad: Quad = {
// by having an empty subject and an empty object, this triple will never be visible to user as it won't be bind to anything. even for `select *` it's hidden
new Quad(emptyLtrl, emptyLtrl, NodeFactory.createURI(engineInternalUriPrefix + "FAKE"), emptyLtrl)
}
def isConst(node: Node) = node != Node.ANY
def queryToSseString(query: Query): String = Algebra.compile(query).toString(query.getPrefixMapping)
}
class JenaArqExtensionsUtils(arqCache: ArqCache, typesCache: PassiveFieldTypesCache, cmwellRDFHelper: CMWellRDFHelper, dataFetcher: DataFetcher) extends LazyLogging {
def predicateToInnerRepr(predicate: Node): Node = {
if(!predicate.isURI) predicate else {
val nsIdentifier = arqCache.namespaceToHash(predicate.getNameSpace)
val localNameDotHash = predicate.getLocalName + "." + nsIdentifier // no dollar sign!
NodeFactory.createURI(cmwellInternalUriPrefix + localNameDotHash)
}
}
def innerReprToPredicate(innerReprPred: Node): Node = {
if(innerReprPred.getURI.contains("/meta/sys") || innerReprPred.getURI.contains("msg://") || innerReprPred.getURI.contains(engineInternalUriPrefix+"FAKE")) innerReprPred else {
val (hash, localName) = {
val splt = innerReprPred.getURI.replace(cmwellInternalUriPrefix, "").split('#')
splt(0) -> splt(1)
}
val nsUri = arqCache.hashToNamespace(hash)
NodeFactory.createURI(nsUri + localName)
}
}
def normalizeAsOutput(q: Quad): Quad =
new Quad(q.getGraph, q.getSubject, innerReprToPredicate(q.getPredicate), q.getObject)
private def defaultAmountCounter(t: Triple, graph: Option[CmWellGraph] = None) = {
val ff = predicateToFieldFilter(t.getPredicate)(scala.concurrent.ExecutionContext.global) //TODO: don't use global, pass ec properly
val amount = dataFetcher.count(ff)
graph.collect { case g if g.dsg.config.explainOnly => g.dsg }.foreach { dsg =>
val pred = Try(unmanglePredicate(t.getPredicate)._2).toOption.getOrElse(t.getPredicate).getURI.replace(cmwellInternalUriPrefix, "")
dsg.logMsgOnce("Expl", s"Objects count for $pred: $amount")
}
amount
}
// todo add DocTest :)
def sortTriplePatternsByAmount(triplePatterns: Iterable[Triple], amountCounter: (Triple, Option[CmWellGraph]) => Long = defaultAmountCounter)
(graph: CmWellGraph): Seq[Triple] = {
def isVar(t: Triple) =
t.getSubject.isVariable || (t.getSubject.isLiteral && t.getSubject.getLiteralValue == "")
def isEngine(t: Triple) = t.getPredicate.getURI.contains(engineInternalUriPrefix)
val allSorted = Vector.newBuilder[Triple]
val actualSubBuilder = Vector.newBuilder[Triple]
val engineSubBuilder = Vector.newBuilder[Triple]
allSorted.sizeHint(triplePatterns.size)
triplePatterns.foreach { triple =>
if(!isVar(triple)) allSorted += triple // const subjects come first.
else if(isEngine(triple)) engineSubBuilder += triple
else actualSubBuilder += triple
}
val engineTriples = engineSubBuilder.result()
val actualTriples = actualSubBuilder.result()
val squashedSubjects = engineTriples.map(t => unmanglePredicate(t.getPredicate)._1).toSet
def isSquashed(t: Triple): Boolean = squashedSubjects(t.getSubject.getName)
val (squashedActualTriples, nonSquashedActualTriples) = actualTriples.partition(isSquashed)
val groupedByEngine = squashedActualTriples.groupBy(_.getSubject.getName)
(engineTriples ++ nonSquashedActualTriples).sortBy(amountCounter(_,Option(graph))).foreach { triple =>
allSorted += triple
if(isEngine(triple)) {
val tripleSubName = unmanglePredicate(triple.getPredicate)._1
allSorted ++= groupedByEngine(tripleSubName)
}
}
allSorted.result()
}
def predicateToFieldFilter(p: Node, obj: Node = emptyLtrl)(implicit ec: scala.concurrent.ExecutionContext): FieldFilter = {
val unmangled@(subVarName,pred) = unmanglePredicate(p)
logger.trace(s"unmangled = $unmangled")
def toExplodedMangledFieldFilter(qp: String, value: Option[String]) = {
val (localName, hash) = { val splt = qp.split('.'); splt(0) -> splt(1) }
RawSingleFieldFilter(Must, Equals, Right(HashedFieldKey(localName, hash)), value)
}
def evalAndAwait(rff: RawFieldFilter) = {
// def filterNot(ff: FieldFilter)(pred: SingleFieldFilter => Boolean): FieldFilter = ???
//
// def isTypeIncompatible(sff: SingleFieldFilter) =
// sff.name.startsWith("d$") && sff.value.fold(false)(FDate.isDate)
/*val evalRes =*/ Await.result(RawFieldFilter.eval(rff,typesCache,cmwellRDFHelper), 9.seconds)
// filterNot(evalRes)(isTypeIncompatible)
}
def noneIfEmpty(s: String): Option[String] = if(s.isEmpty) None else Some(s)
if(pred.getURI.contains(JenaArqExtensionsUtils.engineInternalUriPrefix)) {
val fieldFilters = JenaArqExtensionsUtils.explodeContainerPredicate(pred).map { case t@(name,value) =>
logger.trace(s"explodeContainerPredicate result = $t")
toExplodedMangledFieldFilter(name, noneIfEmpty(value))
}
evalAndAwait(RawMultiFieldFilter(Must, fieldFilters))
} else {
val value = JenaArqExtensionsUtils.objectToQpValue(obj)
if (!isConst(pred)) {
SingleFieldFilter(Must, Contains, "_all", value)
} else {
val qp = pred.getURI.replace(JenaArqExtensionsUtils.cmwellInternalUriPrefix,"")
val rff = toExplodedMangledFieldFilter(qp, value.flatMap(noneIfEmpty))
logger.trace(s"toExplodedMangledFieldFilter result = $rff")
evalAndAwait(rff)
}
}
}
}
|
nruppin/CM-Well
|
server/cmwell-ws/app/ld/query/JenaArqExtensionsUtils.scala
|
Scala
|
apache-2.0
| 9,738
|
package com.twitter.scalding.parquet.tuple.scheme
import parquet.io.api.{ Binary, Converter, GroupConverter, PrimitiveConverter }
import scala.util.Try
trait TupleFieldConverter[+T] extends Converter with Serializable {
/**
* Current value read from parquet column
*/
def currentValue: T
/**
* reset the converter state, make it ready for reading next column value.
*/
def reset(): Unit
}
/**
* Parquet tuple converter used to create user defined tuple value from parquet column values
*/
abstract class ParquetTupleConverter[T] extends GroupConverter with TupleFieldConverter[T] {
override def start(): Unit = reset()
override def end(): Unit = ()
}
/**
* Primitive fields converter
* @tparam T primitive types (String, Double, Float, Long, Int, Short, Byte, Boolean)
*/
trait PrimitiveFieldConverter[T] extends PrimitiveConverter with TupleFieldConverter[T] {
val defaultValue: T
var value: T = defaultValue
override def currentValue: T = value
override def reset(): Unit = value = defaultValue
}
class StringConverter extends PrimitiveFieldConverter[String] {
override val defaultValue: String = null
override def addBinary(binary: Binary): Unit = value = binary.toStringUsingUTF8
}
class DoubleConverter extends PrimitiveFieldConverter[Double] {
override val defaultValue: Double = 0D
override def addDouble(v: Double): Unit = value = v
}
class FloatConverter extends PrimitiveFieldConverter[Float] {
override val defaultValue: Float = 0F
override def addFloat(v: Float): Unit = value = v
}
class LongConverter extends PrimitiveFieldConverter[Long] {
override val defaultValue: Long = 0L
override def addLong(v: Long): Unit = value = v
}
class IntConverter extends PrimitiveFieldConverter[Int] {
override val defaultValue: Int = 0
override def addInt(v: Int): Unit = value = v
}
class ShortConverter extends PrimitiveFieldConverter[Short] {
override val defaultValue: Short = 0
override def addInt(v: Int): Unit = value = Try(v.toShort).getOrElse(0)
}
class ByteConverter extends PrimitiveFieldConverter[Byte] {
override val defaultValue: Byte = 0
override def addInt(v: Int): Unit = value = Try(v.toByte).getOrElse(0)
}
class BooleanConverter extends PrimitiveFieldConverter[Boolean] {
override val defaultValue: Boolean = false
override def addBoolean(v: Boolean): Unit = value = v
}
/**
* Collection field converter, such as list(Scala Option is also seen as a collection).
* @tparam T collection element type(can be primitive types or nested types)
*/
trait CollectionConverter[T] {
val child: TupleFieldConverter[T]
def appendValue(v: T): Unit
}
/**
* A wrapper of primitive converters for modeling primitive fields in a collection
* @tparam T primitive types (String, Double, Float, Long, Int, Short, Byte, Boolean)
*/
abstract class CollectionElementPrimitiveConverter[T](val parent: CollectionConverter[T]) extends PrimitiveConverter
with TupleFieldConverter[T] {
val delegate: PrimitiveFieldConverter[T]
override def addBinary(v: Binary) = {
delegate.addBinary(v)
parent.appendValue(delegate.currentValue)
}
override def addBoolean(v: Boolean) = {
delegate.addBoolean(v)
parent.appendValue(delegate.currentValue)
}
override def addDouble(v: Double) = {
delegate.addDouble(v)
parent.appendValue(delegate.currentValue)
}
override def addFloat(v: Float) = {
delegate.addFloat(v)
parent.appendValue(delegate.currentValue)
}
override def addInt(v: Int) = {
delegate.addInt(v)
parent.appendValue(delegate.currentValue)
}
override def addLong(v: Long) = {
delegate.addLong(v)
parent.appendValue(delegate.currentValue)
}
override def currentValue: T = delegate.currentValue
override def reset(): Unit = delegate.reset()
}
/**
* A wrapper of group converters for modeling group type element in a collection
* @tparam T group tuple type(can be a collection type, such as list)
*/
abstract class CollectionElementGroupConverter[T](val parent: CollectionConverter[T]) extends GroupConverter
with TupleFieldConverter[T] {
val delegate: TupleFieldConverter[T]
override def getConverter(i: Int): Converter = delegate.asGroupConverter().getConverter(i)
override def end(): Unit = {
parent.appendValue(delegate.currentValue)
delegate.asGroupConverter().end()
}
override def start(): Unit = delegate.asGroupConverter().start()
override def currentValue: T = delegate.currentValue
override def reset(): Unit = delegate.reset()
}
/**
* Option converter for modeling option field
* @tparam T option element type(can be primitive types or nested types)
*/
abstract class OptionConverter[T] extends TupleFieldConverter[Option[T]] with CollectionConverter[T] {
var value: Option[T] = None
override def appendValue(v: T): Unit = value = Option(v)
override def currentValue: Option[T] = value
override def reset(): Unit = {
value = None
child.reset()
}
override def isPrimitive: Boolean = child.isPrimitive
override def asGroupConverter: GroupConverter = child.asGroupConverter()
override def asPrimitiveConverter: PrimitiveConverter = child.asPrimitiveConverter()
}
/**
* List in parquet is represented by 3-level structure.
* Check this https://github.com/apache/incubator-parquet-format/blob/master/LogicalTypes.md
* Helper class to wrap a converter for a list group converter
*/
object ListElement {
def wrapper(child: Converter): GroupConverter = new GroupConverter() {
override def getConverter(i: Int): Converter = {
if (i != 0)
throw new IllegalArgumentException("list have only one element field. can't reach " + i)
child
}
override def end(): Unit = ()
override def start(): Unit = ()
}
}
/**
* List converter for modeling list field
* @tparam T list element type(can be primitive types or nested types)
*/
abstract class ListConverter[T] extends GroupConverter with TupleFieldConverter[List[T]] with CollectionConverter[T] {
var value: List[T] = Nil
def appendValue(v: T): Unit = value = value :+ v
lazy val listElement: GroupConverter = new GroupConverter() {
override def getConverter(i: Int): Converter = {
if (i != 0)
throw new IllegalArgumentException("lists have only one element field. can't reach " + i)
child
}
override def end(): Unit = ()
override def start(): Unit = ()
}
override def getConverter(i: Int): Converter = {
if (i != 0)
throw new IllegalArgumentException("lists have only one element field. can't reach " + i)
listElement
}
override def end(): Unit = ()
override def start(): Unit = reset()
override def currentValue: List[T] = value
override def reset(): Unit = {
value = Nil
child.reset()
}
}
/**
* Set converter for modeling set field
* @tparam T list element type(can be primitive types or nested types)
*/
abstract class SetConverter[T] extends GroupConverter with TupleFieldConverter[Set[T]] with CollectionConverter[T] {
var value: Set[T] = Set()
def appendValue(v: T): Unit = value = value + v
//in the back end set is stored as list
lazy val listElement: GroupConverter = ListElement.wrapper(child)
override def getConverter(i: Int): Converter = {
if (i != 0)
throw new IllegalArgumentException("sets have only one element field. can't reach " + i)
listElement
}
override def end(): Unit = ()
override def start(): Unit = reset()
override def currentValue: Set[T] = value
override def reset(): Unit = {
value = Set()
child.reset()
}
}
/**
* Map converter for modeling map field
* @tparam K map key type
* @tparam V map value type
*/
abstract class MapConverter[K, V] extends GroupConverter with TupleFieldConverter[Map[K, V]] with CollectionConverter[(K, V)] {
var value: Map[K, V] = Map()
def appendValue(v: (K, V)): Unit = value = value + v
override def getConverter(i: Int): Converter = {
if (i != 0)
throw new IllegalArgumentException("maps have only one element type key_value(0). can't reach " + i)
child
}
override def end(): Unit = ()
override def start(): Unit = reset()
override def currentValue: Map[K, V] = value
override def reset(): Unit = {
value = Map()
child.reset()
}
}
abstract class MapKeyValueConverter[K, V](parent: CollectionConverter[(K, V)])
extends CollectionElementGroupConverter[(K, V)](parent) {
val keyConverter: TupleFieldConverter[K]
val valueConverter: TupleFieldConverter[V]
override lazy val delegate: TupleFieldConverter[(K, V)] = new GroupConverter with TupleFieldConverter[(K, V)] {
override def currentValue: (K, V) = (keyConverter.currentValue, valueConverter.currentValue)
override def reset(): Unit = {
keyConverter.reset()
valueConverter.reset()
}
override def getConverter(i: Int): Converter = {
if (i == 0) keyConverter
else if (i == 1) valueConverter
else throw new IllegalArgumentException("key_value has only the key (0) and value (1) fields expected: " + i)
}
override def end(): Unit = ()
override def start(): Unit = reset()
}
}
|
bendridi/scalding
|
scalding-parquet/src/main/scala/com/twitter/scalding/parquet/tuple/scheme/ParquetTupleConverter.scala
|
Scala
|
apache-2.0
| 9,198
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.encoders
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.{typeTag, TypeTag}
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.catalyst.{InternalRow, JavaTypeInference, ScalaReflection}
import org.apache.spark.sql.catalyst.analysis.{Analyzer, GetColumnByOrdinal, SimpleAnalyzer, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateSafeProjection, GenerateUnsafeProjection}
import org.apache.spark.sql.catalyst.expressions.objects.{AssertNotNull, InitializeJavaBean, Invoke, NewInstance}
import org.apache.spark.sql.catalyst.optimizer.SimplifyCasts
import org.apache.spark.sql.catalyst.plans.logical.{CatalystSerde, DeserializeToObject, LocalRelation}
import org.apache.spark.sql.types.{ObjectType, StringType, StructField, StructType}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* A factory for constructing encoders that convert objects and primitives to and from the
* internal row format using catalyst expressions and code generation. By default, the
* expressions used to retrieve values from an input row when producing an object will be created as
* follows:
* - Classes will have their sub fields extracted by name using [[UnresolvedAttribute]] expressions
* and [[UnresolvedExtractValue]] expressions.
* - Tuples will have their subfields extracted by position using [[BoundReference]] expressions.
* - Primitives will have their values extracted from the first ordinal with a schema that defaults
* to the name `value`.
*/
object ExpressionEncoder {
def apply[T : TypeTag](): ExpressionEncoder[T] = {
val mirror = ScalaReflection.mirror
val tpe = typeTag[T].in(mirror).tpe
if (ScalaReflection.optionOfProductType(tpe)) {
throw new UnsupportedOperationException(
"Cannot create encoder for Option of Product type, because Product type is represented " +
"as a row, and the entire row can not be null in Spark SQL like normal databases. " +
"You can wrap your type with Tuple1 if you do want top level null Product objects, " +
"e.g. instead of creating `Dataset[Option[MyClass]]`, you can do something like " +
"`val ds: Dataset[Tuple1[MyClass]] = Seq(Tuple1(MyClass(...)), Tuple1(null)).toDS`")
}
val cls = mirror.runtimeClass(tpe)
val serializer = ScalaReflection.serializerForType(tpe)
val deserializer = ScalaReflection.deserializerForType(tpe)
new ExpressionEncoder[T](
serializer,
deserializer,
ClassTag[T](cls))
}
// TODO: improve error message for java bean encoder.
def javaBean[T](beanClass: Class[T]): ExpressionEncoder[T] = {
val schema = JavaTypeInference.inferDataType(beanClass)._1
assert(schema.isInstanceOf[StructType])
val objSerializer = JavaTypeInference.serializerFor(beanClass)
val objDeserializer = JavaTypeInference.deserializerFor(beanClass)
new ExpressionEncoder[T](
objSerializer,
objDeserializer,
ClassTag[T](beanClass))
}
/**
* Given a set of N encoders, constructs a new encoder that produce objects as items in an
* N-tuple. Note that these encoders should be unresolved so that information about
* name/positional binding is preserved.
*/
def tuple(encoders: Seq[ExpressionEncoder[_]]): ExpressionEncoder[_] = {
// TODO: check if encoders length is more than 22 and throw exception for it.
encoders.foreach(_.assertUnresolved())
val cls = Utils.getContextOrSparkClassLoader.loadClass(s"scala.Tuple${encoders.size}")
val newSerializerInput = BoundReference(0, ObjectType(cls), nullable = true)
val serializers = encoders.zipWithIndex.map { case (enc, index) =>
val boundRefs = enc.objSerializer.collect { case b: BoundReference => b }.distinct
assert(boundRefs.size == 1, "object serializer should have only one bound reference but " +
s"there are ${boundRefs.size}")
val originalInputObject = boundRefs.head
val newInputObject = Invoke(
newSerializerInput,
s"_${index + 1}",
originalInputObject.dataType,
returnNullable = originalInputObject.nullable)
val newSerializer = enc.objSerializer.transformUp {
case BoundReference(0, _, _) => newInputObject
}
Alias(newSerializer, s"_${index + 1}")()
}
val newSerializer = CreateStruct(serializers)
val newDeserializerInput = GetColumnByOrdinal(0, newSerializer.dataType)
val deserializers = encoders.zipWithIndex.map { case (enc, index) =>
val getColExprs = enc.objDeserializer.collect { case c: GetColumnByOrdinal => c }.distinct
assert(getColExprs.size == 1, "object deserializer should have only one " +
s"`GetColumnByOrdinal`, but there are ${getColExprs.size}")
val input = GetStructField(newDeserializerInput, index)
enc.objDeserializer.transformUp {
case GetColumnByOrdinal(0, _) => input
}
}
val newDeserializer = NewInstance(cls, deserializers, ObjectType(cls), propagateNull = false)
def nullSafe(input: Expression, result: Expression): Expression = {
If(IsNull(input), Literal.create(null, result.dataType), result)
}
new ExpressionEncoder[Any](
nullSafe(newSerializerInput, newSerializer),
nullSafe(newDeserializerInput, newDeserializer),
ClassTag(cls))
}
// Tuple1
def tuple[T](e: ExpressionEncoder[T]): ExpressionEncoder[Tuple1[T]] =
tuple(Seq(e)).asInstanceOf[ExpressionEncoder[Tuple1[T]]]
def tuple[T1, T2](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2]): ExpressionEncoder[(T1, T2)] =
tuple(Seq(e1, e2)).asInstanceOf[ExpressionEncoder[(T1, T2)]]
def tuple[T1, T2, T3](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3]): ExpressionEncoder[(T1, T2, T3)] =
tuple(Seq(e1, e2, e3)).asInstanceOf[ExpressionEncoder[(T1, T2, T3)]]
def tuple[T1, T2, T3, T4](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3],
e4: ExpressionEncoder[T4]): ExpressionEncoder[(T1, T2, T3, T4)] =
tuple(Seq(e1, e2, e3, e4)).asInstanceOf[ExpressionEncoder[(T1, T2, T3, T4)]]
def tuple[T1, T2, T3, T4, T5](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3],
e4: ExpressionEncoder[T4],
e5: ExpressionEncoder[T5]): ExpressionEncoder[(T1, T2, T3, T4, T5)] =
tuple(Seq(e1, e2, e3, e4, e5)).asInstanceOf[ExpressionEncoder[(T1, T2, T3, T4, T5)]]
}
/**
* A generic encoder for JVM objects that uses Catalyst Expressions for a `serializer`
* and a `deserializer`.
*
* @param objSerializer An expression that can be used to encode a raw object to corresponding
* Spark SQL representation that can be a primitive column, array, map or a
* struct. This represents how Spark SQL generally serializes an object of
* type `T`.
* @param objDeserializer An expression that will construct an object given a Spark SQL
* representation. This represents how Spark SQL generally deserializes
* a serialized value in Spark SQL representation back to an object of
* type `T`.
* @param clsTag A classtag for `T`.
*/
case class ExpressionEncoder[T](
objSerializer: Expression,
objDeserializer: Expression,
clsTag: ClassTag[T])
extends Encoder[T] {
/**
* A sequence of expressions, one for each top-level field that can be used to
* extract the values from a raw object into an [[InternalRow]]:
* 1. If `serializer` encodes a raw object to a struct, strip the outer If-IsNull and get
* the `CreateNamedStruct`.
* 2. For other cases, wrap the single serializer with `CreateNamedStruct`.
*/
val serializer: Seq[NamedExpression] = {
val clsName = Utils.getSimpleName(clsTag.runtimeClass)
if (isSerializedAsStruct) {
val nullSafeSerializer = objSerializer.transformUp {
case r: BoundReference =>
// For input object of Product type, we can't encode it to row if it's null, as Spark SQL
// doesn't allow top-level row to be null, only its columns can be null.
AssertNotNull(r, Seq("top level Product or row object"))
}
nullSafeSerializer match {
case If(_: IsNull, _, s: CreateNamedStruct) => s
case _ =>
throw new RuntimeException(s"class $clsName has unexpected serializer: $objSerializer")
}
} else {
// For other input objects like primitive, array, map, etc., we construct a struct to wrap
// the serializer which is a column of an row.
CreateNamedStruct(Literal("value") :: objSerializer :: Nil)
}
}.flatten
/**
* Returns an expression that can be used to deserialize an input row to an object of type `T`
* with a compatible schema. Fields of the row will be extracted using `UnresolvedAttribute`.
* of the same name as the constructor arguments.
*
* For complex objects that are encoded to structs, Fields of the struct will be extracted using
* `GetColumnByOrdinal` with corresponding ordinal.
*/
val deserializer: Expression = {
if (isSerializedAsStruct) {
// We serialized this kind of objects to root-level row. The input of general deserializer
// is a `GetColumnByOrdinal(0)` expression to extract first column of a row. We need to
// transform attributes accessors.
objDeserializer.transform {
case UnresolvedExtractValue(GetColumnByOrdinal(0, _),
Literal(part: UTF8String, StringType)) =>
UnresolvedAttribute.quoted(part.toString)
case GetStructField(GetColumnByOrdinal(0, dt), ordinal, _) =>
GetColumnByOrdinal(ordinal, dt)
case If(IsNull(GetColumnByOrdinal(0, _)), _, n: NewInstance) => n
case If(IsNull(GetColumnByOrdinal(0, _)), _, i: InitializeJavaBean) => i
}
} else {
// For other input objects like primitive, array, map, etc., we deserialize the first column
// of a row to the object.
objDeserializer
}
}
// The schema after converting `T` to a Spark SQL row. This schema is dependent on the given
// serialier.
val schema: StructType = StructType(serializer.map { s =>
StructField(s.name, s.dataType, s.nullable)
})
/**
* Returns true if the type `T` is serialized as a struct.
*/
def isSerializedAsStruct: Boolean = objSerializer.dataType.isInstanceOf[StructType]
// serializer expressions are used to encode an object to a row, while the object is usually an
// intermediate value produced inside an operator, not from the output of the child operator. This
// is quite different from normal expressions, and `AttributeReference` doesn't work here
// (intermediate value is not an attribute). We assume that all serializer expressions use the
// same `BoundReference` to refer to the object, and throw exception if they don't.
assert(serializer.forall(_.references.isEmpty), "serializer cannot reference any attributes.")
assert(serializer.flatMap { ser =>
val boundRefs = ser.collect { case b: BoundReference => b }
assert(boundRefs.nonEmpty,
"each serializer expression should contain at least one `BoundReference`")
boundRefs
}.distinct.length <= 1, "all serializer expressions must use the same BoundReference.")
/**
* Returns a new copy of this encoder, where the `deserializer` is resolved and bound to the
* given schema.
*
* Note that, ideally encoder is used as a container of serde expressions, the resolution and
* binding stuff should happen inside query framework. However, in some cases we need to
* use encoder as a function to do serialization directly(e.g. Dataset.collect), then we can use
* this method to do resolution and binding outside of query framework.
*/
def resolveAndBind(
attrs: Seq[Attribute] = schema.toAttributes,
analyzer: Analyzer = SimpleAnalyzer): ExpressionEncoder[T] = {
val dummyPlan = CatalystSerde.deserialize(LocalRelation(attrs))(this)
val analyzedPlan = analyzer.execute(dummyPlan)
analyzer.checkAnalysis(analyzedPlan)
val resolved = SimplifyCasts(analyzedPlan).asInstanceOf[DeserializeToObject].deserializer
val bound = BindReferences.bindReference(resolved, attrs)
copy(objDeserializer = bound)
}
@transient
private lazy val extractProjection = GenerateUnsafeProjection.generate(serializer)
@transient
private lazy val inputRow = new GenericInternalRow(1)
@transient
private lazy val constructProjection = GenerateSafeProjection.generate(deserializer :: Nil)
/**
* Returns a new set (with unique ids) of [[NamedExpression]] that represent the serialized form
* of this object.
*/
def namedExpressions: Seq[NamedExpression] = schema.map(_.name).zip(serializer).map {
case (_, ne: NamedExpression) => ne.newInstance()
case (name, e) => Alias(e, name)()
}
/**
* Returns an encoded version of `t` as a Spark SQL row. Note that multiple calls to
* toRow are allowed to return the same actual [[InternalRow]] object. Thus, the caller should
* copy the result before making another call if required.
*/
def toRow(t: T): InternalRow = try {
inputRow(0) = t
extractProjection(inputRow)
} catch {
case e: Exception =>
throw new RuntimeException(
s"Error while encoding: $e\\n${serializer.map(_.simpleString).mkString("\\n")}", e)
}
/**
* Returns an object of type `T`, extracting the required values from the provided row. Note that
* you must `resolveAndBind` an encoder to a specific schema before you can call this
* function.
*/
def fromRow(row: InternalRow): T = try {
constructProjection(row).get(0, ObjectType(clsTag.runtimeClass)).asInstanceOf[T]
} catch {
case e: Exception =>
throw new RuntimeException(s"Error while decoding: $e\\n${deserializer.simpleString}", e)
}
/**
* The process of resolution to a given schema throws away information about where a given field
* is being bound by ordinal instead of by name. This method checks to make sure this process
* has not been done already in places where we plan to do later composition of encoders.
*/
def assertUnresolved(): Unit = {
(deserializer +: serializer).foreach(_.foreach {
case a: AttributeReference if a.name != "loopVar" =>
sys.error(s"Unresolved encoder expected, but $a was found.")
case _ =>
})
}
protected val attrs = serializer.flatMap(_.collect {
case _: UnresolvedAttribute => ""
case a: Attribute => s"#${a.exprId}"
case b: BoundReference => s"[${b.ordinal}]"
})
protected val schemaString =
schema
.zip(attrs)
.map { case(f, a) => s"${f.name}$a: ${f.dataType.simpleString}"}.mkString(", ")
override def toString: String = s"class[$schemaString]"
}
|
ahnqirage/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
|
Scala
|
apache-2.0
| 15,898
|
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.bson
import java.util.Date
import scala.annotation.implicitNotFound
import scala.collection.JavaConverters._
import scala.util.matching.Regex
import org.mongodb.scala.bson.collection.immutable.{ Document => IDocument }
import org.mongodb.scala.bson.collection.mutable.{ Document => MDocument }
/**
* BsonTransformers allow the transformation of type `T` to their corresponding [[BsonValue]].
*
* Custom implementations can be written to implicitly to convert a `T` into a [[BsonValue]] so it can be stored in a `Document`.
*
* @tparam T the type of value to be transformed into a [[BsonValue]].
* @since 1.0
*/
@implicitNotFound("No bson implicit transformer found for type ${T}. Implement or import an implicit BsonTransformer for this type.")
trait BsonTransformer[-T] {
/**
* Convert the object into a [[BsonValue]]
*/
def apply(value: T): BsonValue
}
/**
* Maps the following native scala types to BsonValues:
*
* - `BsonValue => BsonValue`
* - `BigDecimal` => BsonDecimal128
* - `Boolean => BsonBoolean`
* - `String => BsonString`
* - `Array[Byte] => BsonBinary`
* - `Regex => BsonRegex`
* - `Date => BsonDateTime`
* - `Decimal128` => BsonDecimal128
* - `ObjectId => BsonObjectId`
* - `Int => BsonInt32`
* - `Long => BsonInt64`
* - `Double => BsonDouble`
* - `None => BsonNull`
* - `immutable.Document => BsonDocument`
* - `mutable.Document => BsonDocument`
* - `Option[T] => BsonValue` where `T` is one of the above types
* - `Seq[(String, T)] => BsonDocument` where `T` is one of the above types
* - `Seq[T] => BsonArray` where `T` is one of the above types
*/
object BsonTransformer extends DefaultBsonTransformers {}
/**
* Default BsonTransformers for native types.
*/
trait DefaultBsonTransformers extends LowPrio {
/**
* Noop transformer for `BsonValue`s
*/
implicit object TransformBsonValue extends BsonTransformer[BsonValue] {
def apply(value: BsonValue): BsonValue = value
}
/**
* Transforms `BigDecimal` to `BsonDecimal128`
*/
implicit object TransformBigDecimal extends BsonTransformer[BigDecimal] {
def apply(value: BigDecimal): BsonDecimal128 = BsonDecimal128(value)
}
/**
* Transforms `Boolean` to `BsonBoolean`
*/
implicit object TransformBoolean extends BsonTransformer[Boolean] {
def apply(value: Boolean): BsonBoolean = BsonBoolean(value)
}
/**
* Transforms `String` to `BsonString`
*/
implicit object TransformString extends BsonTransformer[String] {
def apply(value: String): BsonString = BsonString(value)
}
/**
* Transforms `Array[Byte]` to `BsonBinary`
*/
implicit object TransformBinary extends BsonTransformer[Array[Byte]] {
def apply(value: Array[Byte]): BsonBinary = BsonBinary(value)
}
/**
* Transforms `Regex` to `BsonRegex`
*/
implicit object TransformRegex extends BsonTransformer[Regex] {
def apply(value: Regex): BsonRegularExpression = BsonRegularExpression(value)
}
/**
* Transforms `Date` to `BsonDateTime`
*/
implicit object TransformDateTime extends BsonTransformer[Date] {
def apply(value: Date): BsonDateTime = BsonDateTime(value)
}
/**
* Transforms `Decimal128` to `BsonDecimal128`
*/
implicit object TransformDecimal128 extends BsonTransformer[Decimal128] {
def apply(value: Decimal128): BsonDecimal128 = BsonDecimal128(value)
}
/**
* Transforms `ObjectId` to `BsonObjectId`
*/
implicit object TransformObjectId extends BsonTransformer[ObjectId] {
def apply(value: ObjectId): BsonObjectId = BsonObjectId(value)
}
/**
* Transforms `Int` to `BsonInt32`
*/
implicit object TransformInt extends BsonTransformer[Int] {
def apply(value: Int): BsonInt32 = BsonInt32(value)
}
/**
* Transforms `Long` to `BsonInt64`
*/
implicit object TransformLong extends BsonTransformer[Long] {
def apply(value: Long): BsonInt64 = BsonInt64(value)
}
/**
* Transforms `Double` to `BsonDouble`
*/
implicit object TransformDouble extends BsonTransformer[Double] {
def apply(value: Double): BsonDouble = BsonDouble(value)
}
/**
* Transforms `None` to `BsonNull`
*/
implicit object TransformNone extends BsonTransformer[Option[Nothing]] {
def apply(value: Option[Nothing]): BsonNull = BsonNull()
}
/**
* Transforms `Option[T]` to `BsonValue`
*/
implicit def transformOption[T](implicit transformer: BsonTransformer[T]): BsonTransformer[Option[T]] = {
new BsonTransformer[Option[T]] {
def apply(value: Option[T]): BsonValue = value match {
case Some(transformable) => transformer(transformable)
case None => BsonNull()
}
}
}
}
trait LowPrio {
/**
* Transforms `immutable.Document` to `BsonDocument`
*/
implicit object TransformImmutableDocument extends BsonTransformer[IDocument] {
def apply(value: IDocument): BsonDocument = value.toBsonDocument
}
/**
* Transforms `mutable.Document` to `BsonDocument`
*/
implicit object TransformMutableDocument extends BsonTransformer[MDocument] {
def apply(value: MDocument): BsonDocument = value.underlying
}
/**
* Transforms `Seq[(String, T)]` to `BsonDocument`
*
* @param transformer implicit transformer for type `T`
* @tparam T the type of the values
* @return a BsonDocument containing the values
*/
implicit def transformKeyValuePairs[T](implicit transformer: BsonTransformer[T]): BsonTransformer[Seq[(String, T)]] = {
new BsonTransformer[Seq[(String, T)]] {
def apply(values: Seq[(String, T)]): BsonDocument = {
BsonDocument(values.map(kv => (kv._1, transformer(kv._2))).toList)
}
}
}
/**
* Transforms `Seq[T]` to `BsonArray`
*
* @param transformer implicit transformer for type `T`
* @tparam T the type of the values
* @return a BsonArray containing all the values
*/
implicit def transformSeq[T](implicit transformer: BsonTransformer[T]): BsonTransformer[Seq[T]] = {
new BsonTransformer[Seq[T]] {
def apply(values: Seq[T]): BsonValue = {
new BsonArray(values.map(transformer.apply).toList.asJava)
}
}
}
}
|
rozza/mongo-scala-driver
|
bson/src/main/scala/org/mongodb/scala/bson/BsonTransformer.scala
|
Scala
|
apache-2.0
| 6,776
|
package com.twitter.finagle.mysql.codec
import com.twitter.finagle.mysql.protocol.{Buffer, Packet}
import org.jboss.netty.channel.{Channel, ChannelHandlerContext}
import org.jboss.netty.buffer.ChannelBuffers
import org.specs.mock.Mockito
import org.specs.SpecificationWithJUnit
class PacketFrameDecoderSpec extends SpecificationWithJUnit with Mockito {
val ctx = mock[ChannelHandlerContext]
val c = mock[Channel]
def makeBuffer(packet: Packet) = {
val header = packet.header.toChannelBuffer
val body = Buffer.toChannelBuffer(packet.body)
ChannelBuffers.wrappedBuffer(header, body)
}
"PacketFrameDecoder" should {
val frameDecoder = new PacketFrameDecoder
val partial = Packet(5, 0, Array[Byte](0x00, 0x01))
"ignore incomplete packets" in {
frameDecoder.decode(ctx, c, makeBuffer(partial)) must beNull
}
val complete = Packet(5, 0, Array[Byte](0x01, 0x01, 0x02, 0x03, 0x04))
"decode complete packets" in {
val result = frameDecoder.decode(ctx, c, makeBuffer(complete))
result.header.size mustEqual complete.header.size
result.header.seq mustEqual complete.header.seq
result.body.toList must containAll(complete.body.toList)
}
}
}
|
foursquare/finagle
|
finagle-mysql/src/test/scala/com/twitter/finagle/mysql/unit/codec/PacketFrameDecoderSpec.scala
|
Scala
|
apache-2.0
| 1,218
|
package forcomp
import common._
object Anagrams {
/** A word is simply a `String`. */
type Word = String
/** A sentence is a `List` of words. */
type Sentence = List[Word]
/** `Occurrences` is a `List` of pairs of characters and positive integers saying
* how often the character appears.
* This list is sorted alphabetically w.r.t. to the character in each pair.
* All characters in the occurrence list are lowercase.
*
* Any list of pairs of lowercase characters and their frequency which is not sorted
* is **not** an occurrence list.
*
* Note: If the frequency of some character is zero, then that character should not be
* in the list.
*/
type Occurrences = List[(Char, Int)]
/** The dictionary is simply a sequence of words.
* It is predefined and obtained as a sequence using the utility method `loadDictionary`.
*/
val dictionary: List[Word] = loadDictionary
/** Converts the word into its character occurence list.
*
* Note: the uppercase and lowercase version of the character are treated as the
* same character, and are represented as a lowercase character in the occurrence list.
*/
def wordOccurrences(w: Word): Occurrences =
{
val wc: Array[Char] = w.toLowerCase().toCharArray()
val wcMap = wc.map(c => (c, 1)).groupBy(c => c._1).map(entry => (entry._1, entry._2.length))
wcMap.toList.sortBy(_._1)
}
/** Converts a sentence into its character occurrence list. */
def sentenceOccurrences(s: Sentence): Occurrences =
{
val bigStr = s.reduce((str1, str2) => str1 + str2)
wordOccurrences(bigStr)
}
/** The `dictionaryByOccurrences` is a `Map` from different occurrences to a sequence of all
* the words that have that occurrence count.
* This map serves as an easy way to obtain all the anagrams of a word given its occurrence list.
*
* For example, the word "eat" has the following character occurrence list:
*
* `List(('a', 1), ('e', 1), ('t', 1))`
*
* Incidentally, so do the words "ate" and "tea".
*
* This means that the `dictionaryByOccurrences` map will contain an entry:
*
* List(('a', 1), ('e', 1), ('t', 1)) -> Seq("ate", "eat", "tea")
*
*/
lazy val dictionaryByOccurrences: Map[Occurrences, List[Word]] =
{
val ans = Map[Occurrences, List[Word]]()
dictionaryByOccurrencesAcc(dictionary, ans)
}
def dictionaryByOccurrencesAcc(dict: List[Word], acc: Map[Occurrences, List[Word]]): Map[Occurrences, List[Word]] =
{
if(dict.isEmpty) acc
else
{
val occ = wordOccurrences(dict.head)
if(acc.contains(occ))
dictionaryByOccurrencesAcc(dict.tail, acc.updated(occ, dict.head :: acc(occ)))
else
dictionaryByOccurrencesAcc(dict.tail, acc + (occ -> List(dict.head)))
}
}
/** Returns all the anagrams of a given word. */
def wordAnagrams(word: Word): List[Word] =
{
val keyOcc = wordOccurrences(word)
dictionaryByOccurrences(keyOcc)
}
/** Returns the list of all subsets of the occurrence list.
* This includes the occurrence itself, i.e. `List(('k', 1), ('o', 1))`
* is a subset of `List(('k', 1), ('o', 1))`.
* It also include the empty subset `List()`.
*
* Example: the subsets of the occurrence list `List(('a', 2), ('b', 2))` are:
*
* List(
* List(),
* List(('a', 1)),
* List(('a', 2)),
* List(('b', 1)),
* List(('a', 1), ('b', 1)),
* List(('a', 2), ('b', 1)),
* List(('b', 2)),
* List(('a', 1), ('b', 2)),
* List(('a', 2), ('b', 2))
* )
*
* Note that the order of the occurrence list subsets does not matter -- the subsets
* in the example above could have been displayed in some other order.
*/
def combinations(occurrences: Occurrences): List[Occurrences] =
{
def getList(pair: (Char, Int)): List[(Char, Int)] =
{
val retList = for(i <- 1 to pair._2) yield (pair._1, i)
retList.toList
}
if(occurrences.isEmpty) List[Occurrences]()
else
{
for{
first <- getList(occurrences.head)
tail <- combinations(occurrences.tail)
} yield first :: tail
}
}
/** Subtracts occurrence list `y` from occurrence list `x`.
*
* The precondition is that the occurrence list `y` is a subset of
* the occurrence list `x` -- any character appearing in `y` must
* appear in `x`, and its frequency in `y` must be smaller or equal
* than its frequency in `x`.
*
* Note: the resulting value is an occurrence - meaning it is sorted
* and has no zero-entries.
*/
def subtract(x: Occurrences, y: Occurrences): Occurrences = ???
/** Returns a list of all anagram sentences of the given sentence.
*
* An anagram of a sentence is formed by taking the occurrences of all the characters of
* all the words in the sentence, and producing all possible combinations of words with those characters,
* such that the words have to be from the dictionary.
*
* The number of words in the sentence and its anagrams does not have to correspond.
* For example, the sentence `List("I", "love", "you")` is an anagram of the sentence `List("You", "olive")`.
*
* Also, two sentences with the same words but in a different order are considered two different anagrams.
* For example, sentences `List("You", "olive")` and `List("olive", "you")` are different anagrams of
* `List("I", "love", "you")`.
*
* Here is a full example of a sentence `List("Yes", "man")` and its anagrams for our dictionary:
*
* List(
* List(en, as, my),
* List(en, my, as),
* List(man, yes),
* List(men, say),
* List(as, en, my),
* List(as, my, en),
* List(sane, my),
* List(Sean, my),
* List(my, en, as),
* List(my, as, en),
* List(my, sane),
* List(my, Sean),
* List(say, men),
* List(yes, man)
* )
*
* The different sentences do not have to be output in the order shown above - any order is fine as long as
* all the anagrams are there. Every returned word has to exist in the dictionary.
*
* Note: in case that the words of the sentence are in the dictionary, then the sentence is the anagram of itself,
* so it has to be returned in this list.
*
* Note: There is only one anagram of an empty sentence.
*/
def sentenceAnagrams(sentence: Sentence): List[Sentence] = ???
}
|
WangTaoTheTonic/Functional-Programming-Principles-in-Scala
|
code/forcomp/src/main/scala/forcomp/Anagrams.scala
|
Scala
|
apache-2.0
| 6,559
|
/*
* Copyright 2014-2022 Jarek Sacha (jpsacha -at- gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.ij_plugins.sf.sbt.install4j
import sbt.Keys.TaskStreams
import java.io.File
object Defaults {
val INSTALL4J_HOME_ENV = "INSTALL4J_HOME"
val INSTALL4JC_FILE_ENV = "INSTALL4JC_FILE"
def install4jHomeDir(taskStreams: Option[TaskStreams] = None): String = {
val logger = taskStreams.map(_.log)
val install4JHomeEnv = System.getProperty(INSTALL4J_HOME_ENV, null)
logger.foreach(_.debug(s"INSTALL4JC_FILE_ENV: $install4JHomeEnv"))
// First check for INSTALL4J_HOME, and if available use that
Option(install4JHomeEnv) match {
case Some(s) => s
case _ =>
val osName = System.getProperty("os.name")
if (osName.startsWith("Windows"))
"C:/Program Files/install4j9"
else if (osName.equals("Linux"))
"/opt/install4j9"
else if (osName.equals("Mac OS X"))
"/Applications/install4j.app/Contents/Resources/app"
else
throw new UnsupportedOperationException(
"Cannot determine default 'Install4jHomeDir'. Unsupported OS: " + osName
)
}
}
def install4jCompilerFile(
install4jHomeDir: String = Defaults.install4jHomeDir(),
taskStreams: Option[TaskStreams] = None
): File = {
val logger = taskStreams.map(_.log)
val installJCFileEnv = System.getProperty(INSTALL4JC_FILE_ENV, null)
logger.foreach(_.debug(s"INSTALL4JC_FILE_ENV: $installJCFileEnv"))
// First check for INSTALL4JC_PATH, and if available use that
Option(installJCFileEnv) match {
case Some(s) => new File(s)
case _ => new File(install4jHomeDir, "bin/" + compilerName())
}
}
def compilerName(): String = {
if (System.getProperty("os.name").startsWith("Windows"))
"install4jc.exe"
else
"install4jc"
}
}
|
jpsacha/sbt-install4j
|
src/main/scala/net/ij_plugins/sf/sbt/install4j/Defaults.scala
|
Scala
|
apache-2.0
| 2,420
|
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
object RoundRobin {
def apply[T](values: IndexedSeq[T]): Iterator[T] = values.length match {
case 0 => Iterator.empty
case 1 =>
new Iterator[T] {
val hasNext = true
val next = values(0)
}
case _ =>
new Iterator[T] {
val counter = new CyclicCounter(values.length)
val hasNext = true
def next() = values(counter.nextVal)
}
}
}
|
wiacekm/gatling
|
gatling-commons/src/main/scala/io/gatling/commons/util/RoundRobin.scala
|
Scala
|
apache-2.0
| 1,057
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.dataset.segmentation
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import scala.collection.mutable.ArrayBuffer
abstract class SegmentationMasks extends Serializable {
/**
* Convert to a RLE encoded masks
*/
def toRLE: RLEMasks
/**
* Get the height and width
*/
def size: (Int, Int)
}
/**
* A mask of regions defined by one or more polygons. The masked object(s) should have the same
* label.
* @param poly An array of polygons. The inner array defines one polygon, with [x1,y1,x2,y2,...]
* @param height the height of the image
* @param width the width of the image
*/
class PolyMasks(val poly: Array[Array[Float]], val height: Int, val width: Int) extends
SegmentationMasks {
override def toRLE: RLEMasks = {
require(height > 0 && width > 0, "the height and width must > 0 for toRLE")
MaskUtils.mergeRLEs(MaskUtils.poly2RLE(this, height, width), false)
}
/**
* Get the height and width
*/
override def size: (Int, Int) = (height, width)
}
object PolyMasks {
def apply(poly: Array[Array[Float]], height: Int, width: Int): PolyMasks =
new PolyMasks(poly, height, width)
}
/**
* A mask of regions defined by RLE. The masked object(s) should have the same label.
* This class corresponds to "uncompressed RLE" of COCO dataset.
* RLE is a compact format for binary masks. Binary masks defines the region by assigning a boolean
* to every pixel of the image. RLE compresses the binary masks by instead recording the runs of
* trues and falses in the binary masks. RLE is an array of integer.
* The first element is the length of run of falses staring from the first pixel.
* The second element of RLE is the is the length of first run of trues.
* e.g. binary masks: 00001110000011
* RLE: ---4--3----5-2 ====> 4,3,5,2
*
* Also note that we don't use COCO's "compact" RLE string here because this RLE class has better
* time & space performance.
*
* @param counts the RLE counts
* @param height height of the image
* @param width width of the image
*/
class RLEMasks(val counts: Array[Int], val height: Int, val width: Int)
extends SegmentationMasks {
override def toRLE: RLEMasks = this
override def size: (Int, Int) = (height, width)
// cached bbox value
@transient
lazy val bbox: (Float, Float, Float, Float) = MaskUtils.rleToOneBbox(this)
// cached area value
@transient
lazy val area: Long = MaskUtils.rleArea(this)
/**
* Get an element in the counts. Process the overflowed int
*
* @param idx
* @return
*/
def get(idx: Int): Long = {
MaskUtils.uint2long(counts(idx))
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[RLEMasks]) {
return false
}
val other = obj.asInstanceOf[RLEMasks]
if (this.eq(other)) {
return true
}
this.counts.deep == other.counts.deep &&
this.height == other.height &&
this.width == other.width
}
override def hashCode() : Int = {
val seed = 37
var hash = 1
hash = hash * seed + height
hash = hash * seed + width
this.counts.foreach(key => {
hash = hash * seed + key.hashCode()
})
hash
}
}
object RLEMasks {
def apply(counts: Array[Int], height: Int, width: Int): RLEMasks =
new RLEMasks(counts, height, width)
}
object MaskUtils {
/**
* Convert an unsigned int to long (note: int may overflow)
*
* @param i
* @return
*/
def uint2long(i: Int): Long = {
if (i >= 0) {
i
} else {
i.toLong - Int.MinValue.toLong + Int.MaxValue.toLong + 1
}
}
/**
* Convert "uncompressed" RLE to "compact" RLE string of COCO
* Implementation based on COCO's MaskApi.c
* @param rle
* @return RLE string
*/
// scalastyle:off methodName
def RLE2String(rle: RLEMasks): String = {
// Similar to LEB128 but using 6 bits/char and ascii chars 48-111.
val m = rle.counts.length
val s = new ArrayBuffer[Char]()
for (i <- 0 until m) {
var x = rle.get(i)
if (i > 2) x -= rle.get(i - 2)
var more = true
while (more) {
var c = (x & 0x1f)
x >>= 5
more = if ((c & 0x10) != 0) x != -1 else x != 0
if (more) c |= 0x20
c += 48
s += c.toChar
}
}
new String(s.toArray)
}
// scalastyle:on methodName
/**
* Convert "compact" RLE string of COCO to "uncompressed" RLE
* Implementation based on COCO's MaskApi.c
* @param s the RLE string
* @param h height of the image
* @param w width of the image
* @return RLE string
*/
def string2RLE(s: String, h: Int, w: Int): RLEMasks = {
val cnts = new ArrayBuffer[Int]()
var m = 0
var p = 0
while (p < s.length) {
var x = 0L
var k = 0
var more = true
while (more) {
val c = s(p).toLong - 48
x |= (c & 0x1f) << (5 * k)
more = (c & 0x20) != 0
k += 1
p += 1
if (!more && (c & 0x10) != 0) x |= -1 << (5 * k)
}
if (m > 2) x += uint2long(cnts(m - 2))
cnts += x.toInt
m += 1
}
RLEMasks(cnts.toArray, h, w)
}
/**
* Convert a PolyMasks to an array of RLEMasks. Note that a PolyMasks may have multiple
* polygons. This function does not merge them. Instead, it returns the RLE for each polygon.
* Implementation based on COCO's MaskApi.c
* @param poly
* @param height height of the image
* @param width width of the image
* @return The converted RLEs
*/
def poly2RLE(poly: PolyMasks, height: Int, width: Int): Array[RLEMasks] = {
poly.poly.map(xy => {
// upsample and get discrete points densely along entire boundary
val scale = 5d
val (u, v, upsamplePoints) = {
val nPoints = xy.length / 2
val x = new Array[Long](nPoints + 1)
val y = new Array[Long](nPoints + 1)
for (j <- 0 until nPoints) {
x(j) = Math.floor(scale * xy(j * 2 + 0) + .5).toLong
y(j) = Math.floor(scale * xy(j * 2 + 1) + .5).toLong
}
x(nPoints) = x(0)
y(nPoints) = y(0)
val m1 = (0 until nPoints).map { case j =>
Math.max(Math.abs(x(j) - x(j + 1)), Math.abs(y(j) - y(j + 1))) + 1
}.sum.toInt
val u = new Array[Long](m1)
val v = new Array[Long](m1)
var m = 0
for (j <- 0 until nPoints) {
val (xs, xe, ys, ye, dx, dy, flip) = {
val _xs = x(j)
val _xe = x(j + 1)
val _ys = y(j)
val _ye = y(j + 1)
val _dx = Math.abs(_xe - _xs)
val _dy = Math.abs(_ys - _ye)
val _flip = (_dx >= _dy && _xs > _xe) || (_dx < _dy && _ys > _ye)
if (_flip) (_xe, _xs, _ye, _ys, _dx, _dy, _flip)
else (_xs, _xe, _ys, _ye, _dx, _dy, _flip)
}
if (dx >= dy) {
for (d <- 0 to dx.toInt) {
val s = (ye - ys).toDouble / dx
val t = if (flip) dx - d else d
u(m) = t + xs
v(m) = Math.floor(ys + s * t + .5).toLong
m += 1
}
}
else {
for (d <- 0 to dy.toInt) {
val s = (xe - xs).toDouble / dy
val t = if (flip) dy - d else d
v(m) = t + ys
u(m) = Math.floor(xs + s * t + .5).toLong
m += 1
}
}
}
(u, v, m)
}
// get points along y-boundary and downsample
val (downsampleX, downsampleY, downsamplePoints) = {
// use an independent scope
val nPoints = upsamplePoints
var m = 0
val x = new Array[Long](nPoints)
val y = new Array[Long](nPoints)
for (j <- 1 until nPoints) {
if (u(j) != u(j - 1)) {
// Should u(j) - 1 be u(j - 1) ????
val _xd = if (u(j) < u(j - 1)) u(j) else u(j) - 1
val xd = (_xd.toDouble + .5) / scale - .5
if (Math.floor(xd) != xd || xd < 0 || xd > width - 1) {
// continue
} else {
var yd = (if (v(j) < v(j - 1)) v(j) else v(j - 1)).toDouble
yd = (yd + .5) / scale - .5
if (yd < 0) {
yd = 0
} else if (yd > height) {
yd = height
}
yd = Math.ceil(yd)
x(m) = xd.toInt
y(m) = yd.toInt
m += 1
}
}
}
(x, y, m)
}
{
// compute rle encoding given y-boundary points
val x = downsampleX
val y = downsampleY
val nPoints = downsamplePoints + 1
val a = new Array[Long](nPoints)
for (j <- 0 until nPoints - 1)
a(j) = x(j) * height + y(j)
a(nPoints - 1) = height * width
scala.util.Sorting.quickSort(a)
var p = 0L
for (j <- 0 until nPoints) {
val t = a(j)
a(j) -= p
p = t
}
val b = new ArrayBuffer[Int]()
var j = 1
var m = 1
b += a(0).toInt
while (j < nPoints) {
if (a(j) > 0) {
b += a(j).toInt
m += 1
j += 1
}
else {
j += 1
if (j < nPoints) {
b(m - 1) += a(j).toInt
j += 1
}
}
}
RLEMasks(b.toArray, height, width)
}
})
}
/**
* Merge multiple RLEs into one (union or intersect)
* Implementation based on COCO's MaskApi.c
* @param R the RLEs
* @param intersect if true, do intersection; else find union
* @return the merged RLE
*/
def mergeRLEs(R: Array[RLEMasks], intersect: Boolean): RLEMasks = {
val n = R.length
if (n == 1) return R(0)
val h = R(0).height
val w = R(0).width
val cnts = new ArrayBuffer[Int]()
cnts.appendAll(R(0).counts)
for(i <- 1 until n) {
val B = R(i)
require(B.height == h && B.width == w, "The height and width of the merged RLEs must" +
" be the same")
val acnt = cnts.toArray
val am = cnts.length
cnts.clear()
var ca = uint2long(acnt(0))
var cb = B.get(0)
var (v, va, vb) = (false, false, false)
var a = 1
var b = 1
var cc = 0L
var ct = 1L
while (ct > 0) {
val c = Math.min(ca, cb)
cc += c
ct = 0
ca -= c
if (ca == 0 && a < am) {
ca = uint2long(acnt(a))
a += 1
va = !va
}
ct += ca
cb -= c
if (cb == 0 && b < B.counts.length) {
cb = B.get(b)
b += 1
vb = !vb
}
ct += cb
val vp = v
if (intersect) {
v = va && vb
} else {
v = va || vb
}
if (v != vp || ct == 0) {
cnts += cc.toInt
cc = 0
}
}
}
RLEMasks(cnts.toArray, h, w)
}
private[segmentation] def rleArea(R: RLEMasks): Long = {
var a = 0L
for (j <- 1.until(R.counts.length, 2))
a += R.get(j)
a.toInt
}
/**
* Calculate the intersection over union (IOU) of two RLEs
* @param detection the detection RLE
* @param groundTruth the ground truth RLE
* @param isCrowd if groundTruth is isCrowd
* @return IOU
*/
def rleIOU(detection: RLEMasks, groundTruth: RLEMasks, isCrowd: Boolean): Float = {
val gtBbox = groundTruth.bbox
val dtBbox = detection.bbox
require((detection.width, detection.height) == (groundTruth.width, groundTruth.height),
"The sizes of RLEs must be the same to compute IOU")
val iou = bboxIOU(gtBbox, dtBbox, isCrowd)
if (iou > 0) {
val crowd = isCrowd
val dCnts = detection
val gCnts = groundTruth
var a = 1
var b = 1
var ca = dCnts.get(0)
val ka = dCnts.counts.length
var va: Boolean = false
var vb: Boolean = false
var cb = gCnts.get(0)
val kb = gCnts.counts.length
var i = 0L
var u = 0L
var ct = 1L
while (ct > 0) {
val c = math.min(ca, cb)
if (va || vb) {
u = u + c
if (va && vb) i += c
}
ct = 0
ca = ca - c
if (ca == 0 && a < ka) {
ca = dCnts.get(a)
a += 1
va = !va
}
ct += ca
cb = cb - c
if (cb == 0 && b < kb) {
cb = gCnts.get(b)
b += 1
vb = !vb
}
ct += cb
}
if (i == 0) {
u = 1
} else if (crowd) {
u = dCnts.area
}
i.toFloat / u
} else {
iou
}
}
/**
* Get the iou of two bounding boxes
* @param gtx1 Ground truth x1
* @param gty1 Ground truth y1
* @param gtx2 Ground truth x2
* @param gty2 Ground truth y2
* @param dtx1 Detection x1
* @param dty1 Detection y1
* @param dtx2 Detection x2
* @param dty2 Detection y2
* @param isCrowd if ground truth is is crowd
* @return
*/
def bboxIOU(gtx1: Float, gty1: Float, gtx2: Float, gty2: Float, dtx1: Float, dty1: Float,
dtx2: Float, dty2: Float, isCrowd: Boolean): Float = {
val (xmin, ymin, xmax, ymax) = (gtx1, gty1, gtx2, gty2)
val (x1, y1, x2, y2) = (dtx1, dty1, dtx2, dty2)
val area = (xmax - xmin + 1) * (ymax - ymin + 1)
val ixmin = Math.max(xmin, x1)
val iymin = Math.max(ymin, y1)
val ixmax = Math.min(xmax, x2)
val iymax = Math.min(ymax, y2)
val inter = Math.max(ixmax - ixmin + 1, 0) * Math.max(iymax - iymin + 1, 0)
val detectionArea = (x2 - x1 + 1) * (y2 - y1 + 1)
val union = if (isCrowd) detectionArea else (detectionArea + area - inter)
inter / union
}
/**
* Get the iou of two bounding boxes
* @param groundTruth
* @param detection
* @param isCrowd if groundTruth is isCrowd
* @return
*/
def bboxIOU(groundTruth: (Float, Float, Float, Float),
detection: (Float, Float, Float, Float), isCrowd: Boolean): Float = {
bboxIOU(groundTruth._1, groundTruth._2, groundTruth._3, groundTruth._4,
detection._1, detection._2, detection._3, detection._4, isCrowd)
}
// convert one rle to one bbox
private[segmentation] def rleToOneBbox(rle: RLEMasks): (Float, Float, Float, Float) = {
val m = rle.counts.length / 2 * 2
val h = rle.height.toLong
var xp = 0.0f
var cc = 0L
var xs = rle.width.toLong
var ys = rle.height.toLong
var ye = 0.0f
var xe = 0.0f
if(m == 0) {
(0, 0, 0, 0)
} else {
for (j <- 0 until m) {
cc += rle.get(j)
val t = cc - j % 2
val y = t % h
val x = (t - y) / h
if (j % 2 == 0) {
xp = x
} else if (xp < x) {
ys = 0
ye = h - 1
}
xs = math.min(xs, x)
xe = math.max(xe, x)
ys = math.min(ys, y)
ye = math.max(ye, y)
}
(xs, ys, xe, ye)
}
}
def polyToSingleRLE(poly: PolyMasks, height: Int, width: Int): RLEMasks = {
val out = poly2RLE(poly, height, width)
mergeRLEs(out, false)
}
// convert binary mask to rle with counts
def binaryToRLE(binaryMask: Tensor[Float]): RLEMasks = {
val countsBuffer = new ArrayBuffer[Int]
val h = binaryMask.size(1)
val w = binaryMask.size(2)
val maskArr = binaryMask.storage().array()
val offset = binaryMask.storageOffset() - 1
val n = binaryMask.nElement()
var i = 0
var p = -1
var c = 0
while (i < n) {
// the first one should be 0
val iw = i / h
val ih = i % h
val ss = ih * w + iw
if (p == -1 && maskArr(ss + offset) == 1) {
countsBuffer.append(0)
p = 1
c = 1
} else if (p == -1 && maskArr(ss + offset) == 0) {
p = 0
c = 1
} else if (maskArr(ss + offset) == p) {
c += 1
} else {
countsBuffer.append(c)
c = 1
p = maskArr(ss + offset).toInt
}
i += 1
}
countsBuffer.append(c)
RLEMasks(countsBuffer.toArray, height = h, width = w)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/feature/dataset/segmentation/MaskUtils.scala
|
Scala
|
apache-2.0
| 16,731
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.filereader
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.Partition
import org.apache.samza.system.SystemStreamMetadata.SystemStreamPartitionMetadata
import org.junit.Assert._
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import scala.collection.JavaConverters._
import scala.collection.mutable
class TestFileReaderSystemAdmin extends AssertionsForJUnit {
val files = List(
getClass.getResource("/empty.txt").getPath,
getClass.getResource("/noEnter.txt").getPath,
getClass.getResource("/oneEnter.txt").getPath,
getClass.getResource("/twoEnter.txt").getPath,
getClass.getResource("/moreEnter.txt").getPath)
@Test
def testGetOffsetsAfter {
val fileReaderSystemAdmin = new FileReaderSystemAdmin
val ssp3 = new SystemStreamPartition("file-reader", files(2), new Partition(0))
val ssp4 = new SystemStreamPartition("file-reader", files(3), new Partition(0))
val ssp5 = new SystemStreamPartition("file-reader", files(4), new Partition(0))
val offsets: java.util.Map[SystemStreamPartition, String] =
mutable.HashMap(ssp3 -> "0", ssp4 -> "12", ssp5 -> "25").asJava
val afterOffsets = fileReaderSystemAdmin.getOffsetsAfter(offsets)
assertEquals("11", afterOffsets.get(ssp3))
assertEquals("23", afterOffsets.get(ssp4))
assertEquals("34", afterOffsets.get(ssp5))
}
@Test
def testGetSystemStreamMetadata {
val fileReaderSystemAdmin = new FileReaderSystemAdmin
val allMetadata = fileReaderSystemAdmin.getSystemStreamMetadata(setAsJavaSetConverter(files.toSet).asJava)
val expectedEmpty = new SystemStreamPartitionMetadata(null, null, "0")
val expectedNoEntry = new SystemStreamPartitionMetadata("0", "0", "0")
val expectedOneEntry = new SystemStreamPartitionMetadata("0", "0", "11")
val expectedTwoEntry = new SystemStreamPartitionMetadata("0", "11", "23")
val expectedMoreEntry = new SystemStreamPartitionMetadata("0", "34", "46")
allMetadata.asScala.foreach { entry =>
{
val result = entry._2.getSystemStreamPartitionMetadata.get(new Partition(0))
if (entry._1.endsWith("empty.txt")) {
assertEquals(expectedEmpty, result)
} else if (entry._1.endsWith("noEnter.txt")) {
assertEquals(expectedNoEntry, result)
} else if (entry._1.endsWith("oneEnter.txt")) {
assertEquals(expectedOneEntry, result)
} else if (entry._1.endsWith("twoEnter.txt")) {
assertEquals(expectedTwoEntry, result)
} else if (entry._1.endsWith("moreEnter.txt")) {
assertEquals(expectedMoreEntry, result)
} else
fail()
}
}
}
}
|
prateekm/samza
|
samza-core/src/test/scala/org/apache/samza/system/filereader/TestFileReaderSystemAdmin.scala
|
Scala
|
apache-2.0
| 3,521
|
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.yggdrasil
package table
package cf
object math {
val Negate = CF1P("builtin::ct::negate") {
case c: BoolColumn =>
new Map1Column(c) with BoolColumn {
def apply(row: Int) = !c(row)
}
case c: LongColumn =>
new Map1Column(c) with LongColumn {
def apply(row: Int) = -c(row)
}
case c: DoubleColumn =>
new Map1Column(c) with DoubleColumn {
def apply(row: Int) = -c(row)
}
case c: NumColumn =>
new Map1Column(c) with NumColumn {
def apply(row: Int) = -c(row)
}
}
val Add = CF2P("builtin::ct::add") {
case (c1: BoolColumn, c2: BoolColumn) =>
new Map2Column(c1, c2) with BoolColumn {
def apply(row: Int) = c1(row) || c2(row)
}
case (c1: LongColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with LongColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: LongColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: LongColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: DoubleColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: DoubleColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with DoubleColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: DoubleColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: NumColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: NumColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
case (c1: NumColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) + c2(row)
}
}
val Mod = CF2P("builtin::ct::mod") {
case (c1: LongColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with LongColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: LongColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: LongColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: DoubleColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: DoubleColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with DoubleColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: DoubleColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: NumColumn, c2: LongColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: NumColumn, c2: DoubleColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
case (c1: NumColumn, c2: NumColumn) =>
new Map2Column(c1, c2) with NumColumn {
def apply(row: Int) = c1(row) % c2(row)
}
}
}
// type Math
|
drostron/quasar
|
yggdrasil/src/main/scala/quasar/yggdrasil/table/cf/Math.scala
|
Scala
|
apache-2.0
| 4,096
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.nscplugin.test
import org.scalajs.nscplugin.test.util._
import org.scalajs.ir.Names._
import org.scalajs.ir.Trees._
import org.scalajs.ir.Types._
import org.junit.Assert._
import org.junit.Test
// scalastyle:off line.size.limit
class StaticForwardersASTTest extends JSASTTest {
@Test
def emitStaticForwardersInExistingClass(): Unit = {
val classDef = """
import scala.scalajs.js, js.annotation._
class Foo(val y: Int = 10)
object Foo {
def bar(x: Int = 5): Int = x + 1
@js.native
@JSGlobal("foobar")
def foobar(x: Int = 5): Int = js.native
}
""".extractOne("class Foo") {
case cd: ClassDef if cd.name.name == ClassName("Foo") => cd
}
val staticMethodNames = classDef.memberDefs.collect {
case MethodDef(flags, MethodIdent(name), _, _, _, _) if flags.namespace.isStatic =>
name
}.sortBy(_.simpleName)
assertEquals(
List(
MethodName("$lessinit$greater$default$1", Nil, IntRef),
MethodName("bar", List(IntRef), IntRef),
MethodName("bar$default$1", Nil, IntRef)
),
staticMethodNames
)
}
@Test
def emitStaticForwardersInSyntheticClass(): Unit = {
val classDef = """
import scala.scalajs.js, js.annotation._
object Foo {
def bar(x: Int = 5): Int = x + 1
@js.native
@JSGlobal("foobar")
def foobar(x: Int = 5): Int = js.native
}
""".extractOne("class Foo") {
case cd: ClassDef if cd.name.name == ClassName("Foo") => cd
}
val staticMethodNames = classDef.memberDefs.collect {
case MethodDef(flags, MethodIdent(name), _, _, _, _) if flags.namespace.isStatic =>
name
}.sortBy(_.simpleName)
assertEquals(
List(
MethodName("bar", List(IntRef), IntRef),
MethodName("bar$default$1", Nil, IntRef)
),
staticMethodNames
)
}
}
|
scala-js/scala-js
|
compiler/src/test/scala/org/scalajs/nscplugin/test/StaticForwardersASTTest.scala
|
Scala
|
apache-2.0
| 2,180
|
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.controller.inject.closed
import scala.concurrent.duration._
import io.gatling.BaseSpec
class ClosedInjectionStepSpec extends BaseSpec {
"ConstantConcurrentUsersInjection" should "return the correct number of users target" in {
val step = ConstantConcurrentUsersInjection(5, 2.second)
step.valueAt(0.seconds) shouldBe 5
step.valueAt(1.seconds) shouldBe 5
step.valueAt(2.seconds) shouldBe 5
}
"RampConcurrentUsersInjection" should "return the correct number of users target" in {
val step = RampConcurrentUsersInjection(5, 10, 5.second)
step.valueAt(0.seconds) shouldBe 5
step.valueAt(1.seconds) shouldBe 6
step.valueAt(2.seconds) shouldBe 7
step.valueAt(3.seconds) shouldBe 8
step.valueAt(4.seconds) shouldBe 9
step.valueAt(5.seconds) shouldBe 10
}
it should "inject once a full user is reached" in {
val step = RampConcurrentUsersInjection(1, 100, (60 * 99).second)
step.valueAt(0.seconds) shouldBe 1
step.valueAt(30.seconds) shouldBe 1
step.valueAt(60.seconds) shouldBe 2
}
"composite.injectionSteps" should "produce the expected injection profile with starting users and with ramps" in {
val steps = StairsConcurrentUsersCompositeStep(
usersIncrement = 10,
levels = 5,
levelDuration = 10.seconds,
startingUsers = 5,
rampDuration = 20.seconds
).composite.steps
val expected = List(
ConstantConcurrentUsersInjection(5, 10.seconds),
RampConcurrentUsersInjection(5, 15, 20.seconds),
ConstantConcurrentUsersInjection(15, 10.seconds),
RampConcurrentUsersInjection(15, 25, 20.seconds),
ConstantConcurrentUsersInjection(25, 10.seconds),
RampConcurrentUsersInjection(25, 35, 20.seconds),
ConstantConcurrentUsersInjection(35, 10.seconds),
RampConcurrentUsersInjection(35, 45, 20.seconds),
ConstantConcurrentUsersInjection(45, 10.seconds)
)
steps.shouldBe(expected)
}
it should "produce the expected injection profile without starting users and without ramps" in {
val steps = StairsConcurrentUsersCompositeStep(
usersIncrement = 10,
levels = 5,
levelDuration = 10.seconds,
startingUsers = 0,
rampDuration = Duration.Zero
).composite.steps
val expected = List(
ConstantConcurrentUsersInjection(0, 10.seconds),
ConstantConcurrentUsersInjection(10, 10.seconds),
ConstantConcurrentUsersInjection(20, 10.seconds),
ConstantConcurrentUsersInjection(30, 10.seconds),
ConstantConcurrentUsersInjection(40, 10.seconds)
)
steps.shouldBe(expected)
}
it should "produce the expected injection profile with starting users and without ramps" in {
val steps = StairsConcurrentUsersCompositeStep(
usersIncrement = 10,
levels = 5,
levelDuration = 10.seconds,
startingUsers = 5,
rampDuration = Duration.Zero
).composite.steps
val expected = List(
ConstantConcurrentUsersInjection(5, 10.seconds),
ConstantConcurrentUsersInjection(15, 10.seconds),
ConstantConcurrentUsersInjection(25, 10.seconds),
ConstantConcurrentUsersInjection(35, 10.seconds),
ConstantConcurrentUsersInjection(45, 10.seconds)
)
steps.shouldBe(expected)
}
it should "produce the expected injection profile without starting users and with ramps" in {
val steps = StairsConcurrentUsersCompositeStep(
usersIncrement = 10,
levels = 5,
levelDuration = 10.seconds,
startingUsers = 0,
rampDuration = 80.seconds
).composite.steps
val expected = Seq(
RampConcurrentUsersInjection(0, 10, 80.seconds),
ConstantConcurrentUsersInjection(10, 10.seconds),
RampConcurrentUsersInjection(10, 20, 80.seconds),
ConstantConcurrentUsersInjection(20, 10.seconds),
RampConcurrentUsersInjection(20, 30, 80.seconds),
ConstantConcurrentUsersInjection(30, 10.seconds),
RampConcurrentUsersInjection(30, 40, 80.seconds),
ConstantConcurrentUsersInjection(40, 10.seconds),
RampConcurrentUsersInjection(40, 50, 80.seconds),
ConstantConcurrentUsersInjection(50, 10.seconds)
)
steps.shouldBe(expected)
}
}
|
gatling/gatling
|
gatling-core/src/test/scala/io/gatling/core/controller/inject/closed/ClosedInjectionStepSpec.scala
|
Scala
|
apache-2.0
| 4,834
|
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.tasks
import k.grid.Grid
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Promise, Future}
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by michael on 3/9/16.
*/
trait TaskResult
case object TaskSuccessful extends TaskResult
case object TaskFailed extends TaskResult
trait Task {
protected def cancel(prom : Promise[Unit], d : FiniteDuration) = {
Grid.system.scheduler.scheduleOnce(d) {
prom.failure(new Throwable("Task reached its timeout and was canceled."))
}
}
def exec : Future[TaskResult]
}
|
nruppin/CM-Well
|
server/cmwell-controller/src/main/scala/cmwell/ctrl/tasks/Task.scala
|
Scala
|
apache-2.0
| 1,211
|
package org.pgscala.converters
import org.joda.time.LocalDate
/** Do not edit - generated in Builder / PGLocalDateConverterBuilder.scala */
object PGLocalDateConverter extends PGConverter[LocalDate] {
val PGType = PGNullableLocalDateConverter.pgType
def toPGString(ld: LocalDate) =
PGNullableLocalDateConverter.localDateToString(ld)
val defaultValue: LocalDate = LocalDate.parse("0001-01-01")
def fromPGString(ld: String) =
if (ld eq null)
defaultValue
else
PGNullableLocalDateConverter.stringToLocalDate(ld)
}
|
melezov/pgscala
|
converters-scala/src/generated/scala/org/pgscala/converters/core/PGLocalDateConverter.scala
|
Scala
|
bsd-3-clause
| 549
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import monix.execution.Cancelable
import scala.util.control.NonFatal
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
/** An observable that evaluates the given by-name argument,
* and emits it.
*/
private[reactive] final class EvalAlwaysObservable[+A](f: () => A) extends Observable[A] {
def unsafeSubscribeFn(subscriber: Subscriber[A]): Cancelable = {
try {
subscriber.onNext(f())
// No need to do back-pressure
subscriber.onComplete()
} catch {
case ex if NonFatal(ex) =>
try subscriber.onError(ex)
catch {
case err if NonFatal(err) =>
val s = subscriber.scheduler
s.reportFailure(ex)
s.reportFailure(err)
}
}
Cancelable.empty
}
}
|
monixio/monix
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/EvalAlwaysObservable.scala
|
Scala
|
apache-2.0
| 1,499
|
package salgo.collection.array
trait ArrayExtension {
implicit class ArrayExt2[T](underlay: Array[Array[T]]) {
def apply(a: Int, b: Int): T = underlay(a)(b)
def update(a: Int, b: Int, value: T) = underlay(a).update(b, value)
}
implicit class ArrayExt3[T](underlay: Array[Array[Array[T]]]) {
def apply(a: Int, b: Int): Array[T] = underlay(a)(b)
def apply(a: Int, b: Int, c: Int): T = underlay(a)(b)(c)
def update(a: Int, b: Int, c: Int, value: T) = underlay(a)(b).update(c, value)
}
implicit class ArrayExt4[T](underlay: Array[Array[Array[Array[T]]]]) {
def apply(a: Int, b: Int): Array[Array[T]] = underlay(a)(b)
def apply(a: Int, b: Int, c: Int): Array[T] = underlay(a)(b)(c)
def apply(a: Int, b: Int, c: Int, d: Int): T = underlay(a)(b)(c)(d)
def update(a: Int, b: Int, c: Int, d: Int, value: T) = underlay(a)(b)(c).update(d, value)
}
implicit class ArrayExt5[T](underlay: Array[Array[Array[Array[Array[T]]]]]) {
def apply(a: Int, b: Int): Array[Array[Array[T]]] = underlay(a)(b)
def apply(a: Int, b: Int, c: Int): Array[Array[T]] = underlay(a)(b)(c)
def apply(a: Int, b: Int, c: Int, d: Int): Array[T] = underlay(a)(b)(c)(d)
def apply(a: Int, b: Int, c: Int, d: Int, e: Int): T = underlay(a)(b)(c)(d)(e)
def update(a: Int, b: Int, c: Int, d: Int, e: Int, value: T) = underlay(a)(b)(c)(d).update(e, value)
}
}
|
shivawu/salgo
|
collection/src/main/scala/salgo/collection/array/ArrayExtension.scala
|
Scala
|
apache-2.0
| 1,388
|
package pep_118
import common.IntOps._
object Wip {
object Attempt1 {
val digits = (1 to 9).toSet
// Set is Monadic but not a Monad :-(
val p1: Set[Int] = for {
n <- digits
if isPrime(n)
} yield n
val p2: Set[Int] = for {
a <- digits diff Set(2, 4, 5, 6, 8)
b <- digits
n = a + b * 10
if isPrime(n)
} yield n
val p3: Set[Int] = for {
a <- digits diff Set(2, 4, 5, 6, 8)
b <- digits
c <- digits
n = a + b * 10 + c * 100
if isPrime(n)
} yield n
val p4: Set[Int] = for {
a <- digits diff Set(2, 4, 5, 6, 8)
b <- digits
c <- digits
d <- digits
n = a + b * 10 + c * 100 + d * 1000
if isPrime(n)
} yield n
@deprecated("this is slower than isPrime!", "1.0")
def divisibleBy3(d: Int*): Boolean = {
var r = d.sum
while (r > 10) {
val t = r.toString.toList.map(_ - '0').sum
r = t
}
Set(3, 6, 9).contains(r)
}
def solve() =
s"p1=${p1.size}\\n" +
s"p2=${p2.size}\\n" +
s"p3=${p3.size}\\n" +
s"p4=${p4.size}\\n" +
s"$p2"
// s"p5=${p5.size}\\n" +
// s"p6=${p6.size}\\n" +
// s"p7=${p7.size}\\n" +
// s"p8=${p8.size}\\n"
//.toList.sorted
//s"p7=${p7.size}\\n"
//s"p8=${p8.size}\\n"
/*
WRONG
p1=4 p1=4 p1=4
p2=21 p2=21 p2=21
p3=128 p3=128 p3=128
p4=857 p4=857 p4=857
p5=6112 p5=6112 p5=6112
p6=45191 p6=45191 p6=45191
p7=345553
– 2040ms – 1873ms – 10091ms
*/
}
object Attempt2 {
// Set is Monadic but not a Monad :-(
val digits = (1 to 9).toSet
val p: Int => Set[Int] = scalaz.Memo.mutableHashMapMemo { i =>
val ns = for {
s <- digits.subsets(i)
p <- s.toList.permutations
n = (0 /: p)(_ * 10 + _)
if isPrime(n)
} yield n
ns.toSet
}
def stats() =
s"p1=${p(1).size} \\n" + // p1=4
s"p2=${p(2).size}\\n" + // p2=20
s"p3=${p(3).size}\\n" + // p3=83
s"p4=${p(4).size}\\n" + // p4=395
s"p5=${p(5).size}\\n" + // p5=1610
s"p6=${p(6).size}\\n" + // p6=5045
s"p7=${p(7).size}\\n" + // p7=12850
s"p8=${p(8).size}\\n" + // p8=23082
s"p9=${p(9).size}\\n" + // p9=0
" " // – 2810ms
val partitions = Iterator(
List(1, 1, 1, 1, 2, 3),
List(1, 1, 1, 1, 5),
List(1, 1, 1, 2, 2, 2),
List(1, 1, 1, 2, 4),
List(1, 1, 1, 3, 3),
List(1, 1, 2, 2, 3),
List(1, 1, 2, 5),
List(1, 1, 3, 4),
List(1, 2, 2, 2, 2),
List(1, 2, 2, 4),
List(1, 2, 3, 3),
List(1, 3, 5),
List(1, 4, 4)
)
def split(n: Int): List[Int] = Iterator.iterate(n)(_ / 10).takeWhile(_ != 0).map(_ % 10).toList
def areDisjoint(ns: Int*): Boolean = {
val ls = ns.flatMap(split)
ls.size == ls.toSet.size
}
def solve() = {
val aaa111123 = for {
is <- partitions.take(1) // TODO .take(1) List(1, 1, 1, 1, 2, 3)
a <- p(1)
b <- p(1)
if areDisjoint(a, b)
if a < b
c <- p(1)
if areDisjoint(a, b, c)
if b < c
d <- p(1)
if areDisjoint(a, b, c, d)
if c < d
e <- p(2)
if d < e
if areDisjoint(a, b, c, d, e)
f <- p(3)
if e < f
if areDisjoint(a, b, c, d, e, f)
} yield (a, b, c, d, e, f)
aaa111123 foreach println
"TODO"
// (5,2,7,3,89,461)
// (5,2,7,3,89,641)
// TODO – 298ms
}
}
object Attempt3 {
// Set is Monadic but not a Monad :-(
val digits = (1 to 9).toSet
val p: Int => Set[Int] = scalaz.Memo.mutableHashMapMemo { i =>
val ns = for {
s <- digits.subsets(i)
p <- s.toList.permutations
n = (0 /: p)(_ * 10 + _)
if isPrime(n)
} yield n
ns.toSet
}
val partitions = List(
List(4, 5),
List(3, 3, 3),
List(2, 3, 4),
List(2, 2, 5),
List(2, 2, 2, 3),
List(1, 4, 4),
List(1, 3, 5),
List(1, 2, 3, 3),
List(1, 2, 2, 4),
List(1, 2, 2, 2, 2),
List(1, 1, 3, 4),
List(1, 1, 2, 5),
List(1, 1, 2, 2, 3),
List(1, 1, 1, 3, 3),
List(1, 1, 1, 2, 4),
List(1, 1, 1, 2, 2, 2),
List(1, 1, 1, 1, 5),
List(1, 1, 1, 1, 2, 3)
)
def split(n: Int): List[Int] = Iterator.iterate(n)(_ / 10).takeWhile(_ != 0).map(_ % 10).toList
def areDisjoint(ns: Int*): Boolean = {
val ls = ns.flatMap(split)
ls.size == ls.toSet.size
}
def solve() = {
val solutions6 = for {
is <- partitions.filter(_.size == 6)
a <- p(is(0))
b <- p(is(1)) if a < b && areDisjoint(a, b)
c <- p(is(2)) if b < c && areDisjoint(a, b, c)
d <- p(is(3)) if c < d && areDisjoint(a, b, c, d)
e <- p(is(4)) if d < e && areDisjoint(a, b, c, d, e)
f <- p(is(5)) if e < f && areDisjoint(a, b, c, d, e, f)
} yield (a, b, c, d, e, f)
val solutions5 = for {
is <- partitions.filter(_.size == 5)
a <- p(is(0))
b <- p(is(1)) if a < b && areDisjoint(a, b)
c <- p(is(2)) if b < c && areDisjoint(a, b, c)
d <- p(is(3)) if c < d && areDisjoint(a, b, c, d)
e <- p(is(4)) if d < e && areDisjoint(a, b, c, d, e)
} yield (a, b, c, d, e)
val solutions4 = for {
is <- partitions.filter(_.size == 4)
a <- p(is(0))
b <- p(is(1)) if a < b && areDisjoint(a, b)
c <- p(is(2)) if b < c && areDisjoint(a, b, c)
d <- p(is(3)) if c < d && areDisjoint(a, b, c, d)
} yield (a, b, c, d)
val solutions3 = for {
is <- partitions.filter(_.size == 3)
a <- p(is(0))
b <- p(is(1)) if a < b && areDisjoint(a, b)
c <- p(is(2)) if b < c && areDisjoint(a, b, c)
} yield (a, b, c)
val solutions2 = for {
is <- partitions.filter(_.size == 2)
a <- p(is(0))
b <- p(is(1)) if a < b && areDisjoint(a, b)
} yield (a, b)
(solutions2 ++ solutions3 ++ solutions4 ++ solutions5 ++ solutions6).size
}
// 13042 – 2601ms
}
object Attempt4 {
val digits = (1 to 9).permutations
val is = for {
a <- 0 to 4
b <- 0 to(9, 2) if a + b < 10
c <- 0 to(9, 3) if a + b + c < 10
d <- 0 to(9, 4) if a + b + c + d < 10
e <- 0 to(9, 5) if a + b + c + d + e == 9
} yield (a, b, c, d, e)
val p = for {
v <- digits
(a, b, c, d, e) <- is.toIterator
} yield ()
def solve() = p take 10 foreach println
}
}
|
filippovitale/pe
|
pe-solution/src/main/scala/pep_118/Wip.scala
|
Scala
|
mit
| 6,735
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.scalnet.models
import org.deeplearning4j.scalnet.layers.convolutional.Convolution2D
import org.deeplearning4j.scalnet.layers.core.{ Dense, OutputLayer }
import org.deeplearning4j.scalnet.layers.pooling.MaxPooling2D
import org.deeplearning4j.scalnet.layers.reshaping.{ Flatten3D, Unflatten3D }
import org.deeplearning4j.scalnet.regularizers.L2
import org.nd4j.linalg.activations.Activation
import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction
import org.scalatest._
/**
* Created by maxpumperla on 29/06/17.
*/
class SequentialTest extends FunSpec with BeforeAndAfter {
var model: Sequential = Sequential()
val shape = 100
val wrongInputShape = 10
val height: Int = 28
val width: Int = 28
val channels: Int = 1
val nClasses: Int = 10
val weightDecay: Double = 0.005
before {
model = Sequential()
}
describe("A Sequential network") {
it("without layers should produce an IllegalArgumentException when compiled") {
assertThrows[java.lang.IllegalArgumentException] {
model.compile(null)
}
}
it("without buildOutput called should not have an output layer") {
model.add(Dense(shape, shape))
assert(!model.getLayers.last.asInstanceOf[OutputLayer].output.isOutput)
}
it("with buildOutput called should have an output layer") {
model.add(Dense(shape, shape))
model.buildOutput(LossFunction.NEGATIVELOGLIKELIHOOD)
assert(model.getLayers.last.asInstanceOf[OutputLayer].output.isOutput)
}
it("should infer the correct shape of an incorrectly initialized layer") {
model.add(Dense(shape, shape))
model.add(Dense(shape, wrongInputShape))
assert(model.getLayers.last.inputShape == List(shape))
}
it("should propagate the correct shape of all layers and preprocessors") {
model.add(Unflatten3D(List(height, width, channels), nIn = height * width))
model.add(Convolution2D(20, List(5, 5), channels, regularizer = L2(weightDecay), activation = Activation.RELU))
model.add(MaxPooling2D(List(2, 2), List(2, 2)))
model.add(Convolution2D(50, List(5, 5), regularizer = L2(weightDecay), activation = Activation.RELU))
model.add(MaxPooling2D(List(2, 2), List(2, 2)))
model.add(Flatten3D())
val preprocessorOutShapes = model.getPreprocessors.values.map(_.outputShape)
assert(preprocessorOutShapes == List(List(height, width, channels), List(4 * 4 * 50)))
val layerOutShapes = model.getLayers.map(_.outputShape)
assert(layerOutShapes == List(List(24, 24, 20), List(12, 12, 20), List(8, 8, 50), List(4, 4, 50)))
}
}
}
|
deeplearning4j/deeplearning4j
|
scalnet/src/test/scala/org/deeplearning4j/scalnet/models/SequentialTest.scala
|
Scala
|
apache-2.0
| 3,419
|
package a65.测试5
object Runner6 {
def number1s(n: Int): Number1 = {
lazy val taZero: Number1 = Number1T(() => taZero)
n match {
case n1 if n1 > 0 => Number1S(() => number1s(n1 - 1))
case 0 => taZero
}
}
def number1t(n: Int): Number1 = {
def gen(n1: Int, zero: => Number1): Number1 = n1 match {
case n2 if n2 > 0 => Number1T(() => gen(n2 - 1, zero))
case 0 => zero
}
lazy val number1t: Number1 = gen(n, taZero)
lazy val taZero: Number1 = Number1S(() => number1t)
number1t
}
def number2t(n: Int): Number2T = {
def gen(n1: Int, zero: => Number2S): Number2T = n1 match {
case n2 if n2 > 1 => Number2T(() => gen(n2 - 1, zero))
case 1 => Number2T(() => zero)
}
lazy val number2t: Number2T = gen(n, number2s)
lazy val number2s: Number2S = Number2S(() => number2t)
number2t
}
def number2s(n: Int): Number2 = {
def gen(n1: Int, zero: => Number2): Number2 = n1 match {
case n2 if n2 > 0 => Number2S(() => gen(n2 - 1, zero))
case 0 => zero
}
lazy val number2s: Number2 = gen(n, number2t)
lazy val number2t: Number2 = Number2T(() => number2s)
number2t
}
def counts(number1: Number1): Int = number1 match {
case Number1S(tail) =>
val num =
try {
counts(tail())
} catch {
case e: StackOverflowError => 0
}
num + 1
case Number1T(tail) => 0
}
def countt(number1: Number1): Int = number1 match {
case Number1T(tail) =>
val num =
try {
counts(tail())
} catch {
case e: StackOverflowError => 0
}
num + 1
case Number1S(tail) =>
try {
countt(tail())
} catch {
case e: StackOverflowError => 0
}
}
def main(arr: Array[String]): Unit = {
for {
i <- 4 to 8
n <- 2 to 3
} {
val result1 = MathCount.log(n, i)
val number1 = number1s(i).asInstanceOf[Number1S]
val number2Positive = number2t(n)
val result2 = number2Positive.method4(number1)
val resultNum2 = counts(result2)
if (result1 != resultNum2) {
println(n, i, resultNum2, result1)
}
}
for {
i <- 8 to 8
n <- 3 to 3
} {
val result1 = MathCount.pow(n, i)
val number1 = number1t(i)
val number2Positive = number2s(n)
val result2 = number1.method3(number2Positive)
println(i, n, result1, counts(result2), result1 == counts(result2))
}
}
}
|
djx314/ubw
|
a66-指数对数-原型/src/main/scala/a65/测试5/Runner6.scala
|
Scala
|
bsd-3-clause
| 2,607
|
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
import org.scalatest._
import org.scalatest.fixture
import org.scalatest.OutcomeOf.outcomeOf
/**
* Trait that can pass a new <code>Conductor</code> fixture into tests.
*
* <p>
* Here's an example of the use of this trait to test the <code>ArrayBlockingQueue</code>
* class from <code>java.util.concurrent</code>:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.fixture
* import org.scalatest.concurrent.ConductorFixture
* import org.scalatest.matchers.ShouldMatchers
* import java.util.concurrent.ArrayBlockingQueue
*
* class ArrayBlockingQueueSuite extends fixture.FunSuite with ConductorFixture with ShouldMatchers {
*
* test("calling put on a full queue blocks the producer thread") { conductor => import conductor._
*
* val buf = new ArrayBlockingQueue[Int](1)
*
* thread("producer") {
* buf put 42
* buf put 17
* beat should be (1)
* }
*
* thread("consumer") {
* waitForBeat(1)
* buf.take should be (42)
* buf.take should be (17)
* }
*
* whenFinished {
* buf should be ('empty)
* }
* }
*
* test("calling take on an empty queue blocks the consumer thread") { conductor => import conductor._
*
* val buf = new ArrayBlockingQueue[Int](1)
*
* thread("producer") {
* waitForBeat(1)
* buf put 42
* buf put 17
* }
*
* thread("consumer") {
* buf.take should be (42)
* buf.take should be (17)
* beat should be (1)
* }
*
* whenFinished {
* buf should be ('empty)
* }
* }
* }
* </pre>
*
* <p>
* For an explanation of how these tests work, see the documentation for <a href="Conductor.html"><code>Conductor</code></a>.
* </p>
*
* @author Bill Venners
*/
trait ConductorFixture extends SuiteMixin with Conductors { this: fixture.Suite =>
/**
* Defines type <code>Fixture</code> to be <code>Conductor</code>.
*/
type FixtureParam = Conductor
/**
* Creates a new <code>Conductor</code>, passes the <code>Conductor</code> to the
* specified test function, and ensures that <code>conduct</code> gets invoked
* on the <code>Conductor</code>.
*
* <p>
* After the test function returns (so long as it returns normally and doesn't
* complete abruptly with an exception), this method will determine whether the
* <code>conduct</code> method has already been called (by invoking
* <code>conductingHasBegun</code> on the <code>Conductor</code>). If not,
* this method will invoke <code>conduct</code> to ensure that the
* multi-threaded scenario is actually conducted.
* </p>
*
* <p>
* This trait is stackable with other traits that override <code>withFixture(NoArgTest)</code>, because
* instead of invoking the test function directly, it delegates responsibility for invoking the test
* function to <code>withFixture(NoArgTest)</code>.
* </p>
*/
def withFixture(test: OneArgTest): Outcome = {
val conductor = new Conductor
withFixture(test.toNoArgTest(conductor)) match {
case Succeeded if !conductor.conductingHasBegun =>
outcomeOf { conductor.conduct() }
case other => other
}
}
}
|
svn2github/scalatest
|
src/main/scala/org/scalatest/concurrent/ConductorFixture.scala
|
Scala
|
apache-2.0
| 3,841
|
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Content Management System. *
* *
* Copyright © 2015 Reactific Software LLC *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file *
* except in compliance with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the *
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* either express or implied. See the License for the specific language governing permissions *
* and limitations under the License. *
**********************************************************************************************************************/
package scrupal.store.reactivemongo
import reactivemongo.bson._
import scrupal.utils.AbstractRegistry
trait VariantReaderWriter[B <: VariantStorable[_], S <: B] {
def fromDoc(doc: BSONDocument) : S
def toDoc(obj: B) : BSONDocument
}
/** Registry Of Variants Of Base Class
*
* This registry keeps track of how to read and write variant subclasses of a base class so they can all be stored in
* one collection. Subclasses must register their BSONHandler with the registry
*/
case class VariantRegistry[B <: VariantStorable[_]](name: String)
extends AbstractRegistry[Symbol, VariantReaderWriter[B,_ <: B]]
with VariantBSONDocumentReader[B]
with VariantBSONDocumentWriter[B] {
def kinds: Seq[String] = { _keys.map { k ⇒ k.name} }.toSeq
def register[S <: B](kind: Symbol, handler: VariantReaderWriter[B, S]) = {
super._register(kind, handler)
}
def read(doc: BSONDocument) : B = {
doc.getAs[BSONString]("kind") match {
case Some(str) ⇒
super.lookup(Symbol(str.value)) match {
case Some(handler) ⇒ handler.fromDoc(doc)
case None ⇒ toss(s"Unknown kind of $name: '${str.value}")
}
case None ⇒ toss(s"Field 'kind' is missing from Node: ${doc.toString()}")
}
}
def write(obj: B) : BSONDocument = {
super.lookup(obj.kind) match {
case Some(handler) ⇒ handler.toDoc(obj)
case None ⇒ toss(s"Unknown kind of $name: ${obj.kind.name}")
}
}
}
|
scrupal/scrupal-store-reactivemongo
|
src/main/scala/scrupal/store/reactivemongo/VariantRegistry.scala
|
Scala
|
apache-2.0
| 3,262
|
package org.moscatocms;
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import spray.can.Http
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import org.moscatocms.api.Api
object Boot extends App {
implicit val system = ActorSystem("moscato")
val service = system.actorOf(Props[Api], "moscato-rest-service")
// IO requires an implicit ActorSystem, and ? requires an implicit timeout
// Bind HTTP to the specified service.
implicit val timeout = Timeout(5.seconds)
IO(Http) ? Http.Bind(service, interface = "localhost", port = 8080)
}
|
moscatocms/moscato
|
src/main/scala/org/moscatocms/Boot.scala
|
Scala
|
apache-2.0
| 598
|
package algorithms
/**
* @author loustler
* @since 02/09/2017 00:33
*/
object RecursiveBinarySearchTest extends App{
val numberList = Array(1, 2, 3, 4, 5, 6, 7, 10, 15, 20)
val start = 0
val last = numberList.length - 1
val find = 15
val findIndex = 8
val result1 = RecursiveBinarySearch binarySearch(numberList, start, last, find)
val result2 = RecursiveBinarySearch binarySearch2(numberList, start, last, find)
assert(result1 == findIndex)
assert(result2 == findIndex)
}
|
loustler/scala
|
src/test/scala/algorithms/RecursiveBinarySearchTest.scala
|
Scala
|
mit
| 501
|
package org.cddb.lsmt
trait Table {
}
object Table {
}
|
tierex/cddb
|
core/src/main/scala/org/cddb/lsmt/Table.scala
|
Scala
|
apache-2.0
| 59
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.FullAccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class ACQ5032(value: Option[Boolean]) extends CtBoxIdentifier(name = "Plant and machinery") with CtOptionalBoolean with Input
with ValidatableBox[FullAccountsBoxRetriever]
{
def validate(boxRetriever: FullAccountsBoxRetriever) = {
import boxRetriever._
cannotExistErrorIf(hasValue && ac44.noValue && ac45.noValue)
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/ACQ5032.scala
|
Scala
|
apache-2.0
| 1,092
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import org.scalacheck.{ Gen, Arbitrary }
class VectorSpaceProperties extends CheckProperties {
import com.twitter.algebird.BaseVectorSpaceProperties._
// TODO: we won't need this when we have an Equatable trait
def mapEqFn(a: Map[Int, Double], b: Map[Int, Double]) = {
(a.keySet ++ b.keySet).forall { key =>
(a.get(key), b.get(key)) match {
case (Some(aVal), Some(bVal)) => beCloseTo(aVal, bVal)
case (Some(aVal), None) => beCloseTo(aVal, 0.0)
case (None, Some(bVal)) => beCloseTo(bVal, 0.0)
case _ => true
}
}
}
implicit val genDouble = Arbitrary{ Gen.choose(-1.0E50, 1.0E50) }
property("map int double scaling") {
vectorSpaceLaws[Double, ({ type x[a] = Map[Int, a] })#x](mapEqFn(_, _))
}
}
|
erikerlandson/algebird
|
algebird-test/src/test/scala/com/twitter/algebird/VectorSpaceProperties.scala
|
Scala
|
apache-2.0
| 1,353
|
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.dsl.views
import java.util.Calendar
import org.schedoscope.Schedoscope
import org.schedoscope.dsl.Parameter
import org.schedoscope.dsl.Parameter.p
import scala.collection.mutable.ListBuffer
object DateParameterizationUtils {
/**
* The earliest day in history, as configured. Defaults to 2013/12/01.
*/
def earliestDay = Schedoscope.settings.earliestDay
/**
* Creates a Calendar out of year, month, day encoded as string parameters.
*/
def parametersToDay(year: Parameter[String], month: Parameter[String], day: Parameter[String]) = {
val date = Calendar.getInstance()
date.clear()
date.set(year.v.get.toInt, month.v.get.toInt - 1, day.v.get.toInt)
date
}
/**
* Returns a string triple (year, month, day) from a Calendar.
*/
def dayToStrings(thisDay: Calendar) = {
val year = s"${"%04d".format(thisDay.get(Calendar.YEAR))}"
val month = s"${"%02d".format(thisDay.get(Calendar.MONTH) + 1)}"
val day = s"${"%02d".format(thisDay.get(Calendar.DAY_OF_MONTH))}"
(year, month, day)
}
/**
* Returns a string parameter triple (year, month, day) from a Calendar.
*/
def dayToParameters(thisDay: Calendar) = {
val year: Parameter[String] = p(s"${"%04d".format(thisDay.get(Calendar.YEAR))}")
val month: Parameter[String] = p(s"${"%02d".format(thisDay.get(Calendar.MONTH) + 1)}")
val day: Parameter[String] = p(s"${"%02d".format(thisDay.get(Calendar.DAY_OF_MONTH))}")
(year, month, day)
}
/**
* The latest day in history, either now or the configured latest day, as a string parameter triple (year, month, day).
*/
def today = dayToParameters(Schedoscope.settings.latestDay)
/**
* Given a Calendar, return the previous day as a Calendar or None, if the earliest day is crossed.
*/
def prevDay(thisDay: Calendar): Option[Calendar] = {
if (thisDay.after(earliestDay)) {
val prevDay = thisDay.clone().asInstanceOf[Calendar]
prevDay.add(Calendar.DAY_OF_MONTH, -1)
Some(prevDay)
} else {
None
}
}
/**
* Given the year, month, and day as parameter strings, return the previous day as strings or None, if the earliest day is crossed.
*/
def prevDay(year: Parameter[String], month: Parameter[String], day: Parameter[String]): Option[(String, String, String)] = {
prevDay(parametersToDay(year, month, day)) match {
case Some(previousDay) => Some(dayToStrings(previousDay))
case None => None
}
}
/**
* Return all prev days including the passed Calendar as a sequence, bounded by the earliest day
*/
def prevDaysFrom(thisDay: Calendar): Seq[Calendar] = {
new Iterator[Calendar] {
var current: Option[Calendar] = Some(thisDay)
override def hasNext = current != None
override def next = current match {
case Some(day) => {
current = prevDay(day)
day
}
case None => null
}
}.toSeq
}
/**
* Return all days in a Calendar range a sequence, bounded by the earliest day
*/
def dayRange(fromThisDay: Calendar, toThisDay: Calendar): Seq[Calendar] = {
new Iterator[Calendar] {
var current: Option[Calendar] = Some(toThisDay)
override def hasNext = current != None
override def next = current match {
case Some(day) => {
current = if (current.get.after(fromThisDay)) prevDay(day)
else
None
day
}
case None => null
}
}.toSeq
}
/**
* Convert a sequence of Calendars into string date triples (year, month, day).
*/
def dayParameterRange(range: Seq[Calendar]): Seq[(String, String, String)] =
range.map {
dayToStrings(_)
}
/**
* Convert a sequence of Calendars into string month tuples (year, month).
*/
def monthParameterRange(range: Seq[Calendar]): Seq[(String, String)] =
range.map {
dayToStrings(_)
}.map { case (year, month, day) => (year, month) }.distinct
def dayRangeAsParams(year: Parameter[String], month: Parameter[String], day: Parameter[String]): Seq[(String, String, String)] =
prevDaysFrom(parametersToDay(year, month, day)).map {
dayToStrings(_)
}
/**
* Return this date (given as string parameters) and all earlier dates bounded by the earliest day as a sequence of string date triples (year, month, day)
*/
def thisAndPrevDays(year: Parameter[String], month: Parameter[String], day: Parameter[String]) = dayRangeAsParams(year, month, day)
/**
* Return the last of month passed (given as string parameters) and all earlier dates bounded by the earliest day as a sequence of string date triples (year, month, day)
*/
def thisAndPrevDays(year: Parameter[String], month: Parameter[String]): Seq[(String, String, String)] = {
val lastOfMonth = parametersToDay(year, month, p("01"))
lastOfMonth.add(Calendar.MONTH, 1)
lastOfMonth.add(Calendar.DAY_OF_MONTH, -1)
val lastOfMonthParameters = dayToParameters(lastOfMonth)
thisAndPrevDays(lastOfMonthParameters._1, lastOfMonthParameters._2, lastOfMonthParameters._3)
}
/**
* Return this month (given as string parameters) and all earlier months bounded by the earliest day as a sequence of string month tuple (year, month)
*/
def thisAndPrevMonths(year: Parameter[String], month: Parameter[String]): Seq[(String, String)] = {
val lastOfMonth = parametersToDay(year, month, p("01"))
lastOfMonth.add(Calendar.MONTH, 1)
lastOfMonth.add(Calendar.DAY_OF_MONTH, -1)
val lastOfMonthParameters = dayToParameters(lastOfMonth)
thisAndPrevDays(lastOfMonthParameters._1, lastOfMonthParameters._2, lastOfMonthParameters._3).map { case (year, month, day) => (year, month) }.distinct
}
/**
* Given a Calendar, return the previous month as a Calendar or None, if the earliest day is crossed.
*/
def prevMonth(thisDay: Calendar): Option[Calendar] = {
if (thisDay.after(earliestDay)) {
val prevDay = thisDay.clone().asInstanceOf[Calendar]
prevDay.add(Calendar.MONTH, -1)
Some(prevDay)
} else {
None
}
}
/**
* Given a month (passed as string parameters) return the previous month as a string month tuple (year, month) or None if earliest day is crossed.
*/
def prevMonth(year: Parameter[String], month: Parameter[String]): Option[(String, String)] = {
prevMonth(parametersToDay(year, month, p("01"))) match {
case Some(previousDay) => Some((dayToStrings(previousDay)._1, dayToStrings(previousDay)._2))
case None => None
}
}
/**
* Return the sequence of all days from today until the earliest day as string date triples (year, month, day)
*/
def allDays() = {
val (todaysYear, todaysMonth, todaysDay) = today
thisAndPrevDays(todaysYear, todaysMonth, todaysDay)
}
/**
* Return the sequence of all months from today until the earliest day as string month tuples (year, month)
*/
def allMonths() = {
val (todaysYear, todaysMonth, _) = today
thisAndPrevMonths(todaysYear, todaysMonth)
}
/**
* Return the all days of the month passed as string parameters from today as string date triples (year, month, day)
*/
def allDaysOfMonth(year: Parameter[String], month: Parameter[String]) = {
val lastOfMonth = parametersToDay(year, month, p("01"))
lastOfMonth.add(Calendar.MONTH, 1)
lastOfMonth.add(Calendar.DAY_OF_MONTH, -1)
val days = ListBuffer[(String, String, String)]()
var currentDate = lastOfMonth
var firstOfMonthReached = false
while (!firstOfMonthReached) {
firstOfMonthReached = currentDate.get(Calendar.DAY_OF_MONTH) == 1
days += dayToStrings(currentDate)
currentDate.add(Calendar.DAY_OF_MONTH, -1)
}
days.toList
}
}
/**
* Defines parameters for monthly partitioned views along with date arithmetic methods.
*/
trait MonthlyParameterization {
val year: Parameter[String]
val month: Parameter[String]
val monthId: Parameter[String] = p(s"${year.v.get}${month.v.get}")
/**
* Return the previous month as a string month tuple (year, month) or None if earliest day is crossed.
*/
def prevMonth() = DateParameterizationUtils.prevMonth(year, month)
/**
* Return this and the previous months as string month tuples (year, month) until earliest day is crossed.
*/
def thisAndPrevMonths() = DateParameterizationUtils.thisAndPrevMonths(year, month)
/**
* Return this and the previous day as string date triples (year, month, day) until earliest day is crossed.
*/
def thisAndPrevDays() = DateParameterizationUtils.thisAndPrevDays(year, month)
/**
* Return this and the previous day as string date triples (year, month, day) until a given Calendar is crossed.
*/
def thisAndPrevDaysUntil(thisDay: Calendar) =
DateParameterizationUtils.prevDaysFrom(thisDay)
/**
* Return the sequence of all days from today until the earliest day as string date triples (year, month, day)
*/
def allDays() = DateParameterizationUtils.allDays()
/**
* Return the sequence of all months from today until the earliest day as string month tuples (year, month)
*/
def allMonths() = DateParameterizationUtils.allMonths()
/**
* Return the sequence of a given number of months from today until the earliest day as string month tuples (year, month)
*/
def lastMonths(c: Int) = {
val to = DateParameterizationUtils.parametersToDay(year, month, p("1"))
val from = to
from.add(Calendar.MONTH, c)
DateParameterizationUtils.monthParameterRange(DateParameterizationUtils.dayRange(from, to))
}
/**
* Return the all days of the month as string date triples (year, month, day)
*/
def allDaysOfMonth() = DateParameterizationUtils.allDaysOfMonth(year, month)
}
/**
* Defines parameters for daily partitioned views along with date arithmetic methods.
*/
trait DailyParameterization {
val year: Parameter[String]
val month: Parameter[String]
val day: Parameter[String]
val dateId: Parameter[String] = p(s"${year.v.get}${month.v.get}${day.v.get}")
/**
* Return the previous day as Strings or None, if the earliest day is crossed.
*/
def prevDay() = DateParameterizationUtils.prevDay(year, month, day)
/**
* Return the previous month as a string month tuple (year, month) or None if earliest day is crossed.
*/
def prevMonth() = DateParameterizationUtils.prevMonth(year, month)
/**
* Return this date and all earlier dates bounded by the earliest day as a sequence of string date triples (year, month, day)
*/
def thisAndPrevDays() = DateParameterizationUtils.thisAndPrevDays(year, month, day)
/**
* Return this and the previous months as string month tuples (year, month) until earliest day is crossed.
*/
def thisAndPrevMonths() = DateParameterizationUtils.thisAndPrevMonths(year, month)
/**
* Return the sequence of all days from today until the earliest day as string date triples (year, month, day)
*/
def allDays() = DateParameterizationUtils.allDays()
/**
* Return the sequence of all months from today until the earliest day as string month tuples (year, month)
*/
def allMonths() = DateParameterizationUtils.allMonths()
/**
* Return the sequence of a given number of months from today until the earliest day as string month tuples (year, month)
*/
def lastMonths(c: Int) = {
val to = DateParameterizationUtils.parametersToDay(year, month, day)
val from = to
from.add(Calendar.MONTH, c)
DateParameterizationUtils.dayParameterRange(DateParameterizationUtils.dayRange(from, to))
}
/**
* Return the sequence of a given number of days from today until the earliest day as string data triples (year, month, day)
*/
def lastDays(c: Int) = {
val to = DateParameterizationUtils.parametersToDay(year, month, day)
val from = to
from.add(Calendar.DATE, c)
DateParameterizationUtils.dayParameterRange(DateParameterizationUtils.dayRange(from, to))
}
}
|
christianrichter/schedoscope
|
schedoscope-core/src/main/scala/org/schedoscope/dsl/views/DateParameterization.scala
|
Scala
|
apache-2.0
| 12,698
|
package com.twitter.finagle.spdy
import com.twitter.conversions.storage._
import com.twitter.finagle._
import com.twitter.finagle.netty3.transport.ChannelTransport
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.transport.Transport
import com.twitter.util.{Closable, StorageUnit}
import java.util.concurrent.atomic.AtomicInteger
import org.jboss.netty.channel.{Channel, ChannelPipelineFactory, Channels}
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse}
import org.jboss.netty.handler.codec.spdy._
class AnnotateSpdyStreamId extends SimpleFilter[HttpRequest, HttpResponse] {
def apply(request: HttpRequest, service: Service[HttpRequest, HttpResponse]) = {
val streamId = request.headers.get(SpdyHttpHeaders.Names.STREAM_ID)
service(request) map { response =>
response.headers.set(SpdyHttpHeaders.Names.STREAM_ID, streamId)
response
}
}
}
class GenerateSpdyStreamId extends SimpleFilter[HttpRequest, HttpResponse] {
private[this] val currentStreamId = new AtomicInteger(1)
def apply(request: HttpRequest, service: Service[HttpRequest, HttpResponse]) = {
SpdyHttpHeaders.setStreamId(request, currentStreamId.getAndAdd(2))
service(request) map { response =>
SpdyHttpHeaders.removeStreamId(response)
response
}
}
}
case class Spdy(
_version: SpdyVersion = SpdyVersion.SPDY_3_1,
_enableHeaderCompression: Boolean = true,
_maxHeaderSize: StorageUnit = 16384.bytes,
_maxRequestSize: StorageUnit = 5.megabytes,
_maxResponseSize: StorageUnit = 5.megabytes)
extends CodecFactory[HttpRequest, HttpResponse]
{
def version(version: SpdyVersion) = copy(_version = version)
def enableHeaderCompression(enable: Boolean) = copy(_enableHeaderCompression = enable)
def maxHeaderSize(size: StorageUnit) = copy(_maxHeaderSize = size)
def maxRequestSize(size: StorageUnit) = copy(_maxRequestSize = size)
def maxResponseSize(size: StorageUnit) = copy(_maxResponseSize = size)
private[this] def spdyFrameCodec = {
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
if (_enableHeaderCompression) {
// Header blocks tend to be small so reduce the window-size of the
// compressor from 32 KB (15) to 2KB (11) to save memory.
// These settings still provide sufficient compression to fit the
// compressed header block within the TCP initial congestion window.
new SpdyFrameCodec(_version, 8192, maxHeaderSizeInBytes, 9, 11, 8)
} else {
new SpdyRawFrameCodec(_version, 8192, maxHeaderSizeInBytes)
}
}
def client = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
val maxResponseSizeInBytes = _maxResponseSize.inBytes.toInt
val pipeline = Channels.pipeline()
pipeline.addLast("spdyFrameCodec", spdyFrameCodec)
pipeline.addLast("spdySessionHandler", new SpdySessionHandler(_version, false))
pipeline.addLast("spdyHttpCodec", new SpdyHttpCodec(_version, maxResponseSizeInBytes))
pipeline
}
}
override def prepareConnFactory(
underlying: ServiceFactory[HttpRequest, HttpResponse], params: Stack.Params
): ServiceFactory[HttpRequest, HttpResponse] = {
new GenerateSpdyStreamId andThen super.prepareConnFactory(underlying, params)
}
override def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any, Any] =
new ChannelTransport(ch)
override def newClientDispatcher(transport: Transport[Any, Any], params: Stack.Params) =
new SpdyClientDispatcher(Transport.cast[HttpRequest, HttpResponse](transport))
}
}
def server = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val maxRequestSizeInBytes = _maxRequestSize.inBytes.toInt
val pipeline = Channels.pipeline()
pipeline.addLast("spdyFrameCodec", spdyFrameCodec)
pipeline.addLast("spdySessionHandler", new SpdySessionHandler(_version, true))
pipeline.addLast("spdyHttpCodec", new SpdyHttpCodec(_version, maxRequestSizeInBytes))
pipeline
}
}
override def prepareConnFactory(
underlying: ServiceFactory[HttpRequest, HttpResponse], params: Stack.Params
): ServiceFactory[HttpRequest, HttpResponse] = {
new AnnotateSpdyStreamId andThen super.prepareConnFactory(underlying, params)
}
override def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[HttpRequest, HttpResponse]
): Closable = new SpdyServerDispatcher(
Transport.cast[HttpResponse, HttpRequest](transport), service)
}
}
override val protocolLibraryName: String = "spdy"
}
object Spdy {
def get() = Spdy()
}
|
sveinnfannar/finagle
|
finagle-spdy/src/main/scala/com/twitter/finagle/spdy/Codec.scala
|
Scala
|
apache-2.0
| 5,008
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.netty
import java.io.InputStreamReader
import java.nio._
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeUnit
import scala.concurrent.{Await, Promise}
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
import com.google.common.io.CharStreams
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.ShouldMatchers
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.network.{BlockDataManager, BlockTransferService}
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.BlockFetchingListener
import org.apache.spark.storage.{BlockId, ShuffleBlockId}
class NettyBlockTransferSecuritySuite extends SparkFunSuite with MockitoSugar with ShouldMatchers {
test("security default off") {
val conf = new SparkConf()
.set("spark.app.id", "app-id")
testConnection(conf, conf) match {
case Success(_) => // expected
case Failure(t) => fail(t)
}
}
test("security on same password") {
val conf = new SparkConf()
.set("spark.authenticate", "true")
.set("spark.authenticate.secret", "good")
.set("spark.app.id", "app-id")
testConnection(conf, conf) match {
case Success(_) => // expected
case Failure(t) => fail(t)
}
}
test("security on mismatch password") {
val conf0 = new SparkConf()
.set("spark.authenticate", "true")
.set("spark.authenticate.secret", "good")
.set("spark.app.id", "app-id")
val conf1 = conf0.clone.set("spark.authenticate.secret", "bad")
testConnection(conf0, conf1) match {
case Success(_) => fail("Should have failed")
case Failure(t) => t.getMessage should include ("Mismatched response")
}
}
test("security mismatch auth off on server") {
val conf0 = new SparkConf()
.set("spark.authenticate", "true")
.set("spark.authenticate.secret", "good")
.set("spark.app.id", "app-id")
val conf1 = conf0.clone.set("spark.authenticate", "false")
testConnection(conf0, conf1) match {
case Success(_) => fail("Should have failed")
case Failure(t) => // any funny error may occur, sever will interpret SASL token as RPC
}
}
test("security mismatch auth off on client") {
val conf0 = new SparkConf()
.set("spark.authenticate", "false")
.set("spark.authenticate.secret", "good")
.set("spark.app.id", "app-id")
val conf1 = conf0.clone.set("spark.authenticate", "true")
testConnection(conf0, conf1) match {
case Success(_) => fail("Should have failed")
case Failure(t) => t.getMessage should include ("Expected SaslMessage")
}
}
/**
* Creates two servers with different configurations and sees if they can talk.
* Returns Success() if they can transfer a block, and Failure() if the block transfer was failed
* properly. We will throw an out-of-band exception if something other than that goes wrong.
*/
private def testConnection(conf0: SparkConf, conf1: SparkConf): Try[Unit] = {
val blockManager = mock[BlockDataManager]
val blockId = ShuffleBlockId(0, 1, 2)
val blockString = "Hello, world!"
val blockBuffer = new NioManagedBuffer(ByteBuffer.wrap(
blockString.getBytes(StandardCharsets.UTF_8)))
when(blockManager.getBlockData(blockId)).thenReturn(blockBuffer)
val securityManager0 = new SecurityManager(conf0)
val exec0 = new NettyBlockTransferService(conf0, securityManager0, "localhost", numCores = 1)
exec0.init(blockManager)
val securityManager1 = new SecurityManager(conf1)
val exec1 = new NettyBlockTransferService(conf1, securityManager1, "localhost", numCores = 1)
exec1.init(blockManager)
val result = fetchBlock(exec0, exec1, "1", blockId) match {
case Success(buf) =>
val actualString = CharStreams.toString(
new InputStreamReader(buf.createInputStream(), StandardCharsets.UTF_8))
actualString should equal(blockString)
buf.release()
Success(())
case Failure(t) =>
Failure(t)
}
exec0.close()
exec1.close()
result
}
/** Synchronously fetches a single block, acting as the given executor fetching from another. */
private def fetchBlock(
self: BlockTransferService,
from: BlockTransferService,
execId: String,
blockId: BlockId): Try[ManagedBuffer] = {
val promise = Promise[ManagedBuffer]()
self.fetchBlocks(from.hostName, from.port, execId, Array(blockId.toString),
new BlockFetchingListener {
override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
promise.failure(exception)
}
override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
promise.success(data.retain())
}
})
Await.ready(promise.future, FiniteDuration(10, TimeUnit.SECONDS))
promise.future.value.get
}
}
|
gioenn/xSpark
|
core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
|
Scala
|
apache-2.0
| 5,847
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.apigateway.util
import uk.gov.hmrc.apigateway.exception.GatewayError.NotFound
import uk.gov.hmrc.apigateway.model.ProxyRequest
import uk.gov.hmrc.apigateway.util.HttpHeaders.ACCEPT
import uk.gov.hmrc.apigateway.util.ProxyRequestUtils.{validateContext, parseVersion}
import uk.gov.hmrc.play.test.UnitSpec
class ProxyRequestUtilsSpec extends UnitSpec {
private val proxyRequest = ProxyRequest("", "")
"Request context validation" should {
"fail for request without context" in {
intercept[NotFound] {
await(validateContext(proxyRequest.copy(path = "")))
}
}
"succeed for request with context" in {
await(validateContext(proxyRequest.copy(path = "/foo/bar"))) shouldBe "foo"
}
}
"parseVersion" should {
def runTestWithHeaderFixture(headerFixture: Map[String, String]): String = {
await(parseVersion(proxyRequest.copy(headers = headerFixture)))
}
"return the default version 1.0 when the Accept header can not be parsed" in {
def runTestWithHeaderFixtureAndInterceptException(headersFixtures: Map[String, String]*) = {
headersFixtures.foreach { headersFixture =>
runTestWithHeaderFixture(headersFixture) shouldBe "1.0"
}
}
runTestWithHeaderFixtureAndInterceptException(
Map.empty,
Map(ACCEPT -> "foo/bar"),
Map(ACCEPT -> "application/vnd.hmrc.aaa"))
}
"parse the version from the Accept header" in {
runTestWithHeaderFixture(Map(ACCEPT -> "application/vnd.hmrc.2.0+json")) shouldBe "2.0"
}
}
}
|
hmrc/api-gateway
|
test/uk/gov/hmrc/apigateway/util/ProxyRequestUtilsSpec.scala
|
Scala
|
apache-2.0
| 2,185
|
package io.github.finaglecircuit
import com.twitter.util.Future
trait CircuitBreaker {
val name: CircuitName
/**
* Wrap the passed call in a circuit breaking context. Possibly fails with a CircuitBroken failure in the future result.
*/
def withCircuit[T](body: => Future[T]): Future[T]
/**
* Event callback for when the circuit changes state
* @param state the new state of the circuit
*/
def onCircuitStatusChange(state: CircuitStateChange => Unit): CircuitBreaker
/**
* Shutdown the circuit breaker.
*/
def stop(): Unit
}
|
daviddenton/finagle-circuit
|
src/main/scala/io/github/finaglecircuit/CircuitBreaker.scala
|
Scala
|
apache-2.0
| 567
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.util.concurrent.TimeoutException
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, OffsetOutOfRangeException}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.connector.read.InputPartition
import org.apache.spark.sql.connector.read.streaming.{ContinuousPartitionReader, ContinuousPartitionReaderFactory, ContinuousStream, Offset, PartitionOffset}
import org.apache.spark.sql.kafka010.KafkaSourceProvider._
import org.apache.spark.sql.kafka010.consumer.KafkaDataConsumer
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* A [[ContinuousStream]] for data from kafka.
*
* @param offsetReader a reader used to get kafka offsets. Note that the actual data will be
* read by per-task consumers generated later.
* @param kafkaParams String params for per-task Kafka consumers.
* @param options Params which are not Kafka consumer params.
* @param metadataPath Path to a directory this reader can use for writing metadata.
* @param initialOffsets The Kafka offsets to start reading data at.
* @param failOnDataLoss Flag indicating whether reading should fail in data loss
* scenarios, where some offsets after the specified initial ones can't be
* properly read.
*/
class KafkaContinuousStream(
private[kafka010] val offsetReader: KafkaOffsetReader,
kafkaParams: ju.Map[String, Object],
options: CaseInsensitiveStringMap,
metadataPath: String,
initialOffsets: KafkaOffsetRangeLimit,
failOnDataLoss: Boolean)
extends ContinuousStream with Logging {
private[kafka010] val pollTimeoutMs =
options.getLong(KafkaSourceProvider.CONSUMER_POLL_TIMEOUT, 512)
private val includeHeaders = options.getBoolean(INCLUDE_HEADERS, false)
// Initialized when creating reader factories. If this diverges from the partitions at the latest
// offsets, we need to reconfigure.
// Exposed outside this object only for unit tests.
@volatile private[sql] var knownPartitions: Set[TopicPartition] = _
override def initialOffset(): Offset = {
val offsets = initialOffsets match {
case EarliestOffsetRangeLimit => KafkaSourceOffset(offsetReader.fetchEarliestOffsets())
case LatestOffsetRangeLimit => KafkaSourceOffset(offsetReader.fetchLatestOffsets(None))
case SpecificOffsetRangeLimit(p) => offsetReader.fetchSpecificOffsets(p, reportDataLoss)
case SpecificTimestampRangeLimit(p) => offsetReader.fetchSpecificTimestampBasedOffsets(p,
failsOnNoMatchingOffset = true)
}
logInfo(s"Initial offsets: $offsets")
offsets
}
override def deserializeOffset(json: String): Offset = {
KafkaSourceOffset(JsonUtils.partitionOffsets(json))
}
override def planInputPartitions(start: Offset): Array[InputPartition] = {
val oldStartPartitionOffsets = start.asInstanceOf[KafkaSourceOffset].partitionToOffsets
val currentPartitionSet = offsetReader.fetchEarliestOffsets().keySet
val newPartitions = currentPartitionSet.diff(oldStartPartitionOffsets.keySet)
val newPartitionOffsets = offsetReader.fetchEarliestOffsets(newPartitions.toSeq)
val deletedPartitions = oldStartPartitionOffsets.keySet.diff(currentPartitionSet)
if (deletedPartitions.nonEmpty) {
val message = if (
offsetReader.driverKafkaParams.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
s"$deletedPartitions are gone. ${CUSTOM_GROUP_ID_ERROR_MESSAGE}"
} else {
s"$deletedPartitions are gone. Some data may have been missed."
}
reportDataLoss(message)
}
val startOffsets = newPartitionOffsets ++
oldStartPartitionOffsets.filterKeys(!deletedPartitions.contains(_))
knownPartitions = startOffsets.keySet
startOffsets.toSeq.map {
case (topicPartition, start) =>
KafkaContinuousInputPartition(
topicPartition, start, kafkaParams, pollTimeoutMs, failOnDataLoss, includeHeaders)
}.toArray
}
override def createContinuousReaderFactory(): ContinuousPartitionReaderFactory = {
KafkaContinuousReaderFactory
}
/** Stop this source and free any resources it has allocated. */
def stop(): Unit = synchronized {
offsetReader.close()
}
override def commit(end: Offset): Unit = {}
override def mergeOffsets(offsets: Array[PartitionOffset]): Offset = {
val mergedMap = offsets.map {
case KafkaSourcePartitionOffset(p, o) => Map(p -> o)
}.reduce(_ ++ _)
KafkaSourceOffset(mergedMap)
}
override def needsReconfiguration(): Boolean = {
offsetReader.fetchLatestOffsets(None).keySet != knownPartitions
}
override def toString(): String = s"KafkaSource[$offsetReader]"
/**
* If `failOnDataLoss` is true, this method will throw an `IllegalStateException`.
* Otherwise, just log a warning.
*/
private def reportDataLoss(message: String): Unit = {
if (failOnDataLoss) {
throw new IllegalStateException(message + s". $INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_TRUE")
} else {
logWarning(message + s". $INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_FALSE")
}
}
}
/**
* An input partition for continuous Kafka processing. This will be serialized and transformed
* into a full reader on executors.
*
* @param topicPartition The (topic, partition) pair this task is responsible for.
* @param startOffset The offset to start reading from within the partition.
* @param kafkaParams Kafka consumer params to use.
* @param pollTimeoutMs The timeout for Kafka consumer polling.
* @param failOnDataLoss Flag indicating whether data reader should fail if some offsets
* are skipped.
* @param includeHeaders Flag indicating whether to include Kafka records' headers.
*/
case class KafkaContinuousInputPartition(
topicPartition: TopicPartition,
startOffset: Long,
kafkaParams: ju.Map[String, Object],
pollTimeoutMs: Long,
failOnDataLoss: Boolean,
includeHeaders: Boolean) extends InputPartition
object KafkaContinuousReaderFactory extends ContinuousPartitionReaderFactory {
override def createReader(partition: InputPartition): ContinuousPartitionReader[InternalRow] = {
val p = partition.asInstanceOf[KafkaContinuousInputPartition]
new KafkaContinuousPartitionReader(
p.topicPartition, p.startOffset, p.kafkaParams, p.pollTimeoutMs,
p.failOnDataLoss, p.includeHeaders)
}
}
/**
* A per-task data reader for continuous Kafka processing.
*
* @param topicPartition The (topic, partition) pair this data reader is responsible for.
* @param startOffset The offset to start reading from within the partition.
* @param kafkaParams Kafka consumer params to use.
* @param pollTimeoutMs The timeout for Kafka consumer polling.
* @param failOnDataLoss Flag indicating whether data reader should fail if some offsets
* are skipped.
*/
class KafkaContinuousPartitionReader(
topicPartition: TopicPartition,
startOffset: Long,
kafkaParams: ju.Map[String, Object],
pollTimeoutMs: Long,
failOnDataLoss: Boolean,
includeHeaders: Boolean) extends ContinuousPartitionReader[InternalRow] {
private val consumer = KafkaDataConsumer.acquire(topicPartition, kafkaParams)
private val unsafeRowProjector = new KafkaRecordToRowConverter()
.toUnsafeRowProjector(includeHeaders)
private var nextKafkaOffset = startOffset
private var currentRecord: ConsumerRecord[Array[Byte], Array[Byte]] = _
override def next(): Boolean = {
var r: ConsumerRecord[Array[Byte], Array[Byte]] = null
while (r == null) {
if (TaskContext.get().isInterrupted() || TaskContext.get().isCompleted()) return false
// Our consumer.get is not interruptible, so we have to set a low poll timeout, leaving
// interrupt points to end the query rather than waiting for new data that might never come.
try {
r = consumer.get(
nextKafkaOffset,
untilOffset = Long.MaxValue,
pollTimeoutMs,
failOnDataLoss)
} catch {
// We didn't read within the timeout. We're supposed to block indefinitely for new data, so
// swallow and ignore this.
case _: TimeoutException | _: org.apache.kafka.common.errors.TimeoutException =>
// This is a failOnDataLoss exception. Retry if nextKafkaOffset is within the data range,
// or if it's the endpoint of the data range (i.e. the "true" next offset).
case e: IllegalStateException if e.getCause.isInstanceOf[OffsetOutOfRangeException] =>
val range = consumer.getAvailableOffsetRange()
if (range.latest >= nextKafkaOffset && range.earliest <= nextKafkaOffset) {
// retry
} else {
throw e
}
}
}
nextKafkaOffset = r.offset + 1
currentRecord = r
true
}
override def get(): UnsafeRow = {
unsafeRowProjector(currentRecord)
}
override def getOffset(): KafkaSourcePartitionOffset = {
KafkaSourcePartitionOffset(topicPartition, nextKafkaOffset)
}
override def close(): Unit = {
consumer.release()
}
}
|
goldmedal/spark
|
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaContinuousStream.scala
|
Scala
|
apache-2.0
| 10,146
|
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.geohash
import java.awt.geom.Point2D
import com.vividsolutions.jts.geom.{Coordinate, GeometryFactory, Point, PrecisionModel}
import org.geotools.referencing.GeodeticCalculator
import org.locationtech.geomesa.utils.text.ObjectPoolFactory
/**
* Encapsulates the notion of a geographic distance, and is primarily intended
* to simplify the expression of constants.
*/
trait GeomDistance {
/**
* Simple class, and its companion, to allow us to express constraint distances
* a bit more naturally.
*
* @param distanceInMeters the distance in meters for this description
*/
class Distance(distanceInMeters:Double) {
// conversion functions to be called post-fix, metric
def meter : Distance = new Distance(distanceInMeters)
def meters : Distance = meter
def kilometer : Distance = new Distance(distanceInMeters * 1000.0)
def kilometers : Distance = kilometer
// conversion functions to be called post-fix, imperial
def foot : Distance = new Distance(distanceInMeters * Distance.METERS_PER_FOOT)
def feet : Distance = foot
def mile : Distance = new Distance(distanceInMeters * Distance.METERS_PER_FOOT * 5280.0)
def miles : Distance = mile
// supports an implicit call in the object to convert back to a Double
def getDistanceInMeters : Double = distanceInMeters
override def toString : String = {
distanceInMeters match {
case m if m<1000.0 => "%1.1f meters".format(m)
case m => "%1.1f kilometers".format(m / 1000.0)
}
}
}
object Distance {
val METERS_PER_FOOT : Double = 12.0 * 2.54 / 100.0
// these take care of ensuring that "1 kilometer" is used as
// "(new Distance(1)).kilometer"
implicit def double2distance(x:Double) : Distance = new Distance(x)
implicit def int2distance(x:Int) : Distance = new Distance(x.asInstanceOf[Double])
implicit def long2distance(x:Long) : Distance = new Distance(x.asInstanceOf[Double])
// this takes care of ensuring that "1 kilometer", when used as a Double,
// can be converted back reasonably
implicit def distance2double(x:Distance) : Double = x.getDistanceInMeters
}
}
/**
* Utility object for computing distances between two points. The points
* are assumed to be specified using WGS-84.
*
* This implementation depends on docs.geotools.org/latest/javadocs/org/geotools/referencing/GeodeticCalculator.html
* Which is backed by Thaddeus Vincenty's formulas for calculating distances on an ellipsoid
* http://en.wikipedia.org/wiki/Vincenty%27s_formulae
*/
object VincentyModel extends GeomDistance {
private val geometryFactory = new GeometryFactory(new PrecisionModel, 4326)
private val geodeticCalculatorPool = ObjectPoolFactory { new GeodeticCalculator }
/**
* Computation of the distance between two points.
*
* @param a The starting point
* @param b The end point
* @return The distance between the two points
*/
def getDistanceBetweenTwoPoints(a:Point, b:Point) : Distance =
getDistanceBetweenTwoPoints(a.getX, a.getY, b.getX, b.getY)
/**
* Computation of the distance between two points.
*
* @param x1 The starting point's x value
* @param y1 The starting point's y value
* @param x2 The ending point's x value
* @param y2 The ending point's y value
* @return The distance between the two points
*/
def getDistanceBetweenTwoPoints(x1: Double, y1: Double, x2: Double, y2: Double) : Distance =
geodeticCalculatorPool.withResource{ calc => {
calc.setStartingGeographicPoint(x1, y1)
calc.setDestinationGeographicPoint(x2, y2)
new Distance(calc.getOrthodromicDistance)
}}
/**
*
* @param a The starting point
* @param bearing The bearing expressed in decimal degrees from -180° to 180°.
* NB: 0° is North, 90° is East, (-)180° is South, and West is -90°.
* @param distance The orthodromic distance from the starting point expressed in meters.
* @return The destination point.
*/
def moveWithBearingAndDistance(a: Point, bearing: Double, distance: Double): Point =
moveWithBearingAndDistance(a.getX, a.getY, bearing, distance)
/**
*
* @param x The starting point's x value
* @param y The starting point's y value
* @param bearing The bearing expressed in decimal degrees from -180° to 180°.
* NB: 0° is North, 90° is East, (-)180° is South, and West is -90°.
* @param distance The orthodromic distance from the starting point expressed in meters.
* @return The destination point.
*/
def moveWithBearingAndDistance(x: Double, y: Double, bearing: Double, distance: Double): Point =
geodeticCalculatorPool.withResource{ calc => {
calc.setStartingGeographicPoint(x, y)
calc.setDirection(bearing, distance)
val point: Point2D = calc.getDestinationGeographicPoint
geometryFactory.createPoint(new Coordinate(point.getX, point.getY))
}}
}
|
drackaer/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geohash/GeomDistance.scala
|
Scala
|
apache-2.0
| 5,531
|
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.util
import org.knora.webapi.messages.store.triplestoremessages.{SparqlSelectResponse, SparqlSelectResponseBody, SparqlSelectResponseHeader, VariableResultsRow}
import spray.json._
/**
* A spray-json protocol that parses JSON returned by a SPARQL endpoint. Empty values and empty rows are
* ignored.
*/
object SparqlResultProtocol extends DefaultJsonProtocol {
/**
* Converts a [[JsValue]] to a [[VariableResultsRow]].
*/
implicit object VariableResultsJsonFormat extends JsonFormat[VariableResultsRow] {
def read(jsonVal: JsValue): VariableResultsRow = {
// Collapse the JSON structure into a simpler Map of SPARQL variable names to values.
val mapToWrap: Map[String, String] = jsonVal.asJsObject.fields.foldLeft(Map.empty[String, String]) {
case (acc, (key, value)) => value.asJsObject.getFields("value") match {
case Seq(JsString(valueStr)) if valueStr.nonEmpty => // Ignore empty strings.
acc + (key -> valueStr)
case _ => acc
}
}
// Wrap that Map in an ErrorHandlingMap that will gracefully report errors about missing values when they
// are accessed later.
VariableResultsRow(new ErrorHandlingMap(mapToWrap, { key: String => s"No value found for SPARQL query variable '$key' in query result row" }))
}
def write(variableResultsRow: VariableResultsRow): JsValue = ???
}
/**
* Converts a [[JsValue]] to a [[SparqlSelectResponseBody]].
*/
implicit object SparqlSelectResponseBodyFormat extends JsonFormat[SparqlSelectResponseBody] {
def read(jsonVal: JsValue): SparqlSelectResponseBody = {
jsonVal.asJsObject.fields.get("bindings") match {
case Some(bindingsJson: JsArray) =>
// Filter out empty rows.
SparqlSelectResponseBody(bindingsJson.convertTo[Seq[VariableResultsRow]].filter(_.rowMap.keySet.nonEmpty))
case _ => SparqlSelectResponseBody(Nil)
}
}
def write(sparqlSelectResponseBody: SparqlSelectResponseBody): JsValue = ???
}
implicit val headerFormat: JsonFormat[SparqlSelectResponseHeader] = jsonFormat1(SparqlSelectResponseHeader)
implicit val responseFormat: JsonFormat[SparqlSelectResponse] = jsonFormat2(SparqlSelectResponse)
}
|
musicEnfanthen/Knora
|
webapi/src/main/scala/org/knora/webapi/util/SparqlResultProtocol.scala
|
Scala
|
agpl-3.0
| 3,210
|
/*
* Copyright 2017 Shinya Mochida
*
* Licensed under the Apache License,Version2.0(the"License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,software
* Distributed under the License is distributed on an"AS IS"BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example
sealed trait Operation[+A]
case class GetString[+A](f: String => A) extends Operation[A]
case class PutString[+A](s: String, x: A) extends Operation[A]
object Operation {
implicit val operationFunctor = new Functor[Operation] {
override def fmap[A, B](m: Operation[A])(f: (A) => B): Operation[B] = m match {
case GetString(s) => GetString(f compose s)
case PutString(s, x) => PutString(s, f(x))
}
}
def getString: FreeM[Operation, String] = Free(GetString({s => Pure(s)}))
def putString(s: String): FreeM[Operation, Unit] = Free(PutString(s, Pure()))
def mapM_[S[+_]: Functor, A](f: A => FreeM[S, Unit], seq: Seq[A]): FreeM[S, Unit] = seq.toList match {
case x::xs => xs.foldLeft(f(x)){ (m: FreeM[S, Unit], s: A) => m.bind { _ => f(s) } }
case Nil => Pure(())
}
}
|
mike-neck/jvm-langs-til
|
scala-projects/scala-review/src/main/scala/com/example/Operation.scala
|
Scala
|
apache-2.0
| 1,423
|
package me.bowdon.ddldiff.parsers
import me.bowdon.ddldiff.ast._
class ParseError(reason: String) {
override def toString = reason
}
object DDLParsers extends ColumnConstraintParsers {
def create: Parser[String] = p"create" <~ (p"temp" | p"temporary").?
// TODO affinities
// https://sqlite.org/datatype3.html#determination_of_column_affinity
def sqlType: Parser[SQLType] = {
p"text" ^^ { _ => Text } |
p"numeric" ^^ { _ => Numeric } |
p"integer" ^^ { _ => Integer } |
p"real" ^^ { _ => Real } |
p"blob" ^^ { _ => Blob }
}
def column: Parser[ColumnDef] = {
// TODO the type is actually optional (defaults to blob with SQLite)
identifier ~ sqlType ~ columnConstraint.* ^^ {
case name ~ sqlType ~ colConstraints => ColumnDef(name, sqlType, colConstraints.toSet)
}
}
def columns: Parser[Map[Identifier, ColumnDef]] = {
val parser = parens(repsep(column, ","))
parser.map(colDefs =>
colDefs.map(col => (col.name, col)).toMap)
}
def table: Parser[TableDef] = {
p"table" ~ p"if not exists".? ~> identifier ~ columns ^^ {
case name ~ cols => TableDef(name, cols, Set.empty)
}
}
def terminator: Parser[String] = ";"
def expr: Parser[TableDef] = {
create ~> table <~ terminator.?
}
def apply(input: String): Either[ParseError, TableDef] = {
// Either is standing in for a type I have yet to design
parseAll(expr, input) match {
case Success(result, _) => Right(result)
case failure: NoSuccess => Left(new ParseError(failure.msg))
}
}
}
|
cbowdon/ddl-diff
|
src/main/scala/me/bowdon/ddldiff/parsers/DDLParsers.scala
|
Scala
|
gpl-3.0
| 1,576
|
package water.fvec
import java.io.File
import java.net.URI
import water._
import water.parser.ParseSetup._
import water.parser.ParseSetup
import water.util.FrameUtils
import water.parser.DefaultParserProviders.GUESS_INFO
/**
* Wrapper around Java H2O Frame to provide more Scala-like API.
*
* @param frameKey reference of new frame
* @param names column names for new frame
* @param vecs vectors composing new frame
*/
class H2OFrame private (frameKey: Key[Frame], names: Array[String], vecs: Array[Vec])
extends Frame(frameKey, names, vecs) with FrameOps {
/** Create a new H2OFrame based on existing Java Frame.
*
* Simple field copy, so the Frames share
* underlying arrays. Recommended that the input Java Frame be dead after
* this call.
*
* @param fr Java frame
* @return new H2O frame with parsed data
*/
def this(fr : Frame) = this(if (fr._key!=null) fr._key else Key.make("dframe"+Key.rand()).asInstanceOf[Key[Frame]], fr._names, fr.vecs())
/**
* Create a new H2OFrame based on existing Java Frame referenced by its key.
* @param key reference to Java Frame
* @return new H2O frame
*/
def this(key : Key[Frame]) = this (DKV.get(key).get.asInstanceOf[Frame])
/**
* Create a new H2OFrame based on existing Java Frame referenced by its key.
*
* @param key string representation of a reference to Java Frame
* @return new H2O frame
*/
def this(key : String) = this (Key.make(key).asInstanceOf[Key[Frame]])
/**
* Create a new frame by parsing given files.
*
* @param parseSetup setup for parser
* @param uris URIs of files to parse
* @return new H2O frame containing parsed data
*/
def this(parseSetup: ParseSetup, uris: URI*) = this(water.util.FrameUtils.parseFrame(
Key.make(ParseSetup.createHexName(H2OFrame.baseName(uris(0)))),
parseSetup,
uris: _*))
/**
* Create a new frame by parsing given files.
*
* @param uris URIs of files to parse
* @return new H2O frame containing parsed data
*/
def this(uris: URI*) = this(water.util.FrameUtils.parseFrame(
Key.make(ParseSetup.createHexName(H2OFrame.baseName(uris(0)))),
uris : _*))
/**
* Create a new frame by parsing given file.
*
* @param file cluster-local file to parse (has to be available on each node)
* @return a new frame containing parsed file data
*/
def this(file : File) = this(file.toURI)
/** Create a new frame by parsing given file.
*
* @param parseSetup setup for parser
* @param file cluster-local file to parse (has to be available on each node)
* @return a new frame containing parsed file data
*/
def this(parseSetup: ParseSetup, file : File) = this(parseSetup, file.toURI)
// No-args public constructor for (de)serialization
def this() = this(null,null,new Array[Vec](0))
/* Constructor */
// Force into K/V store
assert(frameKey != null)
DKV.put(frameKey, new Value(frameKey, this))
/* ---- */
/** Expose internal key via a method.
*
* The motivation is to simplify manipulation with frame from Py4J (pySparkling)
*/
def key: Key[Frame] = _key
override def toString(): String = super[Frame].toString()
override def hashCode(): Int = super[Frame].hashCode()
}
/** Companion object providing factory methods to create frame
* from different sources.
*/
object H2OFrame {
def apply(key : Key[Frame]) = new H2OFrame(key)
def apply(f : Frame) = new H2OFrame(f)
def apply(s : String) = new H2OFrame(s)
def apply(file : File) = new H2OFrame(file)
def apply(uri : URI) = new H2OFrame(uri)
def baseName(uri: URI) = {
val s = uri.toString
s.substring(s.lastIndexOf('/')+1)
}
/** Return default parser setup */
def defaultParserSetup(singleQuotes: Boolean = true) =
new ParseSetup(GUESS_INFO, GUESS_SEP, singleQuotes, GUESS_HEADER, GUESS_COL_CNT,
null, null, null, null, null)
/** Return guessed parser setup for given file.
*
* @param file file to parse
* @return guessed parser setup
*/
def parserSetup(file: File): ParseSetup = parserSetup(file.toURI)
/**
* Return guessed parser setup for given file.
*
* @param userSetup user-specified hint for parser setup
* @param file file to parse
* @return guessed parser setup
*/
def parserSetup(userSetup: ParseSetup, file: File): ParseSetup = parserSetup(file.toURI)
/**
* Return guessed parser setup for given files.
*
* @param uris URIs of files to parse
* @return guessed parser setup
*/
def parserSetup(uris: URI*): ParseSetup = parserSetup(defaultParserSetup(), uris:_*)
/**
* Return guessed parser setup for given files.
*
* @param userSetup user-specified hint for parser setup
* @param uris URIs of files to parse
* @return guessed parser setup
*/
def parserSetup(userSetup: ParseSetup, uris: URI*) = FrameUtils.guessParserSetup(defaultParserSetup(), uris:_*)
}
|
mathemage/h2o-3
|
h2o-scala/src/main/scala/water/fvec/H2OFrame.scala
|
Scala
|
apache-2.0
| 5,044
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.index
import com.vividsolutions.jts.geom.Envelope
/**
* Trait for indexing and querying spatial data
*/
trait SpatialIndex[T] {
def insert(envelope: Envelope, item: T): Unit
def remove(envelope: Envelope, item: T): Boolean
def query(envelope: Envelope): Iterator[T]
def query(envelope: Envelope, filter: (T) => Boolean): Iterator[T] = query(envelope).filter(filter)
}
object SpatialIndex {
def getCenter(envelope: Envelope): (Double, Double) = {
val x = (envelope.getMinX + envelope.getMaxX) / 2.0
val y = (envelope.getMinY + envelope.getMaxY) / 2.0
(x, y)
}
}
|
jahhulbert-ccri/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/index/SpatialIndex.scala
|
Scala
|
apache-2.0
| 1,097
|
package com.softwaremill.macwire.dependencyLookup
import com.softwaremill.macwire.Logger
import com.softwaremill.macwire.dependencyLookup.EligibleValuesFinder.Scope.LocalForward
import scala.reflect.macros.blackbox
private[macwire] class DependencyResolver[C <: blackbox.Context](val c: C, debug: Logger) {
import c.universe._
private val eligibleValuesFinder = new EligibleValuesFinder[c.type](c, debug)
private lazy val eligibleValues = eligibleValuesFinder.find()
/** Look for a single instance of type `t`.
* If either no instance or multiple instances are found,
* a compilation error is reported and `None` is returned.
*/
def resolve(param: Symbol, t: Type): Option[Tree] = {
eligibleValues.findInFirstScope(t).toList match {
case Nil =>
c.error(c.enclosingPosition, s"Cannot find a value of type: [$t]")
None
case value :: Nil =>
val forwardValues = eligibleValues.findInScope(t, LocalForward)
if (forwardValues.nonEmpty) {
c.warning(c.enclosingPosition, s"Found [$value] for parameter [${param.name}], " +
s"but a forward reference [${forwardValues.mkString(", ")}] was also eligible")
}
Some(value)
case values =>
c.error(c.enclosingPosition, s"Found multiple values of type [$t]: [$values]")
None
}
}
/** @return all the instances of type `t` that are accessible.
*/
def resolveAll(t: Type): Iterable[Tree] = {
eligibleValues.findInAllScope(t)
}
}
|
numesmat/macwire
|
macros/src/main/scala/com/softwaremill/macwire/dependencyLookup/DependencyResolver.scala
|
Scala
|
apache-2.0
| 1,520
|
package satisfaction
package hadoop
package hive.ms
import org.joda.time._
/**
* DataInstance for HiveTable with no partitions
* is a non-partitioned table.
*/
case class NonPartitionedTable(
val hiveTable : HiveTable)
(implicit val ms : MetaStore) extends DataInstance with Markable {
def size: Long = {
ms.getSpaceUsed(hiveTable.dbName, hiveTable.tblName).toLong /// XXX Return size ??
}
def created: DateTime = {
/// XXX TBD FIXME
//// Add Method to metastore
null
}
def lastAccessed: DateTime = {
/// XXX TBD FIXME
//// Add method to metastore
null
}
def setMetaData( key: String, md : String ) : Unit = {
hiveTable.setMetaData( key, md)
}
def getMetaData( key: String ) : Option[String] = {
hiveTable.getMetaData( key)
}
/**
* Mark that the producer of this
* DataInstance fully completed .
*/
def markCompleted : Unit = {
hiveTable.setMetaData("isComplete" , "true")
}
def markIncomplete : Unit = {
hiveTable.setMetaData("isComplete" , "false")
}
/**
* Check that the Data instance has been Marked completed,
* according to the test of the markable.
*/
def isMarkedCompleted : Boolean = {
getMetaData("isComplete") match {
case Some( check) => {
check.toBoolean
}
case None => false
}
}
}
|
jeromebanks/satisfaction
|
modules/hive-ms/src/main/scala/satisfaction/hive/ms/NonPartitionedTable.scala
|
Scala
|
apache-2.0
| 1,474
|
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle.mat
import scala.Int
import scala.{specialized => spec}
import org.saddle._
/**
* Houses specialized method implementations for code reuse in Mat subclasses
*/
private[saddle] object MatImpl {
def map[@spec(Boolean, Int, Long, Double) A: ST,
@spec(Boolean, Int, Long, Double) B: ST](mat: Mat[A])(f: A => B): Mat[B] = {
val sca = implicitly[ST[A]]
val scb = implicitly[ST[B]]
val buf = Array.ofDim[B](mat.length)
var i = 0
while(i < mat.length) {
val v = mat(i)
if (sca.isMissing(v))
buf(i) = scb.missing
else
buf(i) = f(v)
i += 1
}
Mat[B](mat.numRows, mat.numCols, buf)
}
def withoutRows[@spec(Boolean, Int, Long, Double) A: ST](m: Mat[A], locs: Array[Int]): Mat[A] = {
if (m.length == 0) Mat.empty[A]
else {
val locset = locs.toSet
val buf = Buffer[A](m.length)
var r = 0
var nRows = 0
while (r < m.numRows) {
if (!locset.contains(r)) {
nRows += 1
var c = 0
while (c < m.numCols) {
buf.add(m(r, c))
c += 1
}
}
r += 1
}
if (nRows == 0)
Mat.empty[A]
else
Mat(nRows, m.numCols, buf)
}
}
def takeRows[@spec(Boolean, Int, Long, Double) A: ST](m: Mat[A], locs: Array[Int]): Mat[A] = {
if (m.length == 0) Mat.empty[A]
else {
val buf = Buffer[A](m.length)
var r = 0
while (r < locs.length) {
val currRow = locs(r)
var c = 0
while (c < m.numCols) {
buf.add(m(currRow, c))
c += 1
}
r += 1
}
Mat(r, m.numCols, buf.toArray)
}
}
}
|
saddle/saddle
|
saddle-core/src/main/scala/org/saddle/mat/MatImpl.scala
|
Scala
|
apache-2.0
| 2,314
|
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.batch
import com.websudos.phantom.builder.QueryBuilder
import com.websudos.phantom.builder.query.{Batchable, CQLQuery, ExecutableStatement}
import com.websudos.phantom.builder.syntax.CQLSyntax
sealed class BatchQuery(val init: CQLQuery, added: Boolean = false) extends ExecutableStatement {
def add(queries: Batchable with ExecutableStatement*): BatchQuery = {
val chain = queries.foldLeft(init) {
(builder, query) => builder.forcePad.append(query.qb.terminate())
}
new BatchQuery(chain)
}
def timestamp(stamp: Long) = {
new BatchQuery(QueryBuilder.timestamp(init, stamp.toString))
}
def terminate: BatchQuery = {
new BatchQuery(QueryBuilder.Batch.applyBatch(init), true)
}
override def qb: CQLQuery = {
if (added) {
init
} else {
terminate.qb
}
}
}
private[phantom] trait Batcher {
def apply(batchType: String = CQLSyntax.Batch.Logged): BatchQuery = {
new BatchQuery(QueryBuilder.Batch.batch(batchType))
}
def logged: BatchQuery = {
new BatchQuery(QueryBuilder.Batch.batch(""))
}
def timestamp(stamp: Long) = {
apply().timestamp(stamp)
}
def unlogged: BatchQuery = {
new BatchQuery(QueryBuilder.Batch.batch(CQLSyntax.Batch.Unlogged))
}
def counter: BatchQuery = {
new BatchQuery(QueryBuilder.Batch.batch(CQLSyntax.Batch.Counter))
}
}
|
dan-mi-sun/phantom
|
phantom-dsl/src/main/scala/com/websudos/phantom/batch/BatchQuery.scala
|
Scala
|
bsd-2-clause
| 2,878
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.{Arrays, ArrayList => JArrayList, List => JList}
import org.apache.log4j.LogManager
import org.apache.spark.sql.AnalysisException
import scala.collection.JavaConverters._
import org.apache.commons.lang3.exception.ExceptionUtils
import org.apache.hadoop.hive.metastore.api.{FieldSchema, Schema}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse
import org.apache.spark.Logging
import org.apache.spark.sql.hive.{HiveContext, HiveMetastoreTypes}
private[hive] class SparkSQLDriver(
val context: HiveContext = SparkSQLEnv.hiveContext)
extends Driver
with Logging {
private[hive] var tableSchema: Schema = _
private[hive] var hiveResponse: Seq[String] = _
override def init(): Unit = {
}
private def getResultSetSchema(query: context.QueryExecution): Schema = {
val analyzed = query.analyzed
logDebug(s"Result Schema: ${analyzed.output}")
if (analyzed.output.isEmpty) {
new Schema(Arrays.asList(new FieldSchema("Response code", "string", "")), null)
} else {
val fieldSchemas = analyzed.output.map { attr =>
new FieldSchema(attr.name, HiveMetastoreTypes.toMetastoreType(attr.dataType), "")
}
new Schema(fieldSchemas.asJava, null)
}
}
override def run(command: String): CommandProcessorResponse = {
// TODO unify the error code
try {
context.sparkContext.setJobDescription(command)
val execution = context.executePlan(context.sql(command).logicalPlan)
hiveResponse = execution.stringResult()
tableSchema = getResultSetSchema(execution)
new CommandProcessorResponse(0)
} catch {
case ae: AnalysisException =>
logDebug(s"Failed in [$command]", ae)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(ae), null, ae)
case cause: Throwable =>
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(cause), null, cause)
}
}
override def close(): Int = {
hiveResponse = null
tableSchema = null
0
}
override def getResults(res: JList[_]): Boolean = {
if (hiveResponse == null) {
false
} else {
res.asInstanceOf[JArrayList[String]].addAll(hiveResponse.asJava)
hiveResponse = null
true
}
}
override def getSchema: Schema = tableSchema
override def destroy() {
super.destroy()
hiveResponse = null
tableSchema = null
}
}
|
chenc10/Spark-PAF
|
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
|
Scala
|
apache-2.0
| 3,350
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zk
import java.util.Properties
import kafka.admin.{AdminOperationException, AdminUtils, BrokerMetadata, RackAwareMode}
import kafka.common.TopicAlreadyMarkedForDeletionException
import kafka.controller.ReplicaAssignment
import kafka.log.LogConfig
import kafka.server.{ConfigEntityName, ConfigType, DynamicConfig}
import kafka.utils._
import kafka.utils.Implicits._
import org.apache.kafka.common.{TopicPartition, Uuid}
import org.apache.kafka.common.errors._
import org.apache.kafka.common.internals.Topic
import org.apache.zookeeper.KeeperException.NodeExistsException
import scala.collection.{Map, Seq}
/**
* Provides admin related methods for interacting with ZooKeeper.
*
* This is an internal class and no compatibility guarantees are provided,
* see org.apache.kafka.clients.admin.AdminClient for publicly supported APIs.
*/
class AdminZkClient(zkClient: KafkaZkClient) extends Logging {
/**
* Creates the topic with given configuration
* @param topic topic name to create
* @param partitions Number of partitions to be set
* @param replicationFactor Replication factor
* @param topicConfig topic configs
* @param rackAwareMode rack aware mode for replica assignment
* @param usesTopicId Boolean indicating whether the topic ID will be created
*/
def createTopic(topic: String,
partitions: Int,
replicationFactor: Int,
topicConfig: Properties = new Properties,
rackAwareMode: RackAwareMode = RackAwareMode.Enforced,
usesTopicId: Boolean = false): Unit = {
val brokerMetadatas = getBrokerMetadatas(rackAwareMode)
val replicaAssignment = AdminUtils.assignReplicasToBrokers(brokerMetadatas, partitions, replicationFactor)
createTopicWithAssignment(topic, topicConfig, replicaAssignment, usesTopicId = usesTopicId)
}
/**
* Gets broker metadata list
*
* @param rackAwareMode rack aware mode for replica assignment
* @param brokerList The brokers to gather metadata about.
* @return The metadata for each broker that was found.
*/
def getBrokerMetadatas(rackAwareMode: RackAwareMode = RackAwareMode.Enforced,
brokerList: Option[Seq[Int]] = None): Seq[BrokerMetadata] = {
val allBrokers = zkClient.getAllBrokersInCluster
val brokers = brokerList.map(brokerIds => allBrokers.filter(b => brokerIds.contains(b.id))).getOrElse(allBrokers)
val brokersWithRack = brokers.filter(_.rack.nonEmpty)
if (rackAwareMode == RackAwareMode.Enforced && brokersWithRack.nonEmpty && brokersWithRack.size < brokers.size) {
throw new AdminOperationException("Not all brokers have rack information. Add --disable-rack-aware in command line" +
" to make replica assignment without rack information.")
}
val brokerMetadatas = rackAwareMode match {
case RackAwareMode.Disabled => brokers.map(broker => BrokerMetadata(broker.id, None))
case RackAwareMode.Safe if brokersWithRack.size < brokers.size =>
brokers.map(broker => BrokerMetadata(broker.id, None))
case _ => brokers.map(broker => BrokerMetadata(broker.id, broker.rack))
}
brokerMetadatas.sortBy(_.id)
}
/**
* Create topic and optionally validate its parameters. Note that this method is used by the
* TopicCommand as well.
*
* @param topic The name of the topic
* @param config The config of the topic
* @param partitionReplicaAssignment The assignments of the topic
* @param validate Boolean indicating if parameters must be validated or not (true by default)
* @param usesTopicId Boolean indicating whether the topic ID will be created
*/
def createTopicWithAssignment(topic: String,
config: Properties,
partitionReplicaAssignment: Map[Int, Seq[Int]],
validate: Boolean = true,
usesTopicId: Boolean = false): Unit = {
if (validate)
validateTopicCreate(topic, partitionReplicaAssignment, config)
info(s"Creating topic $topic with configuration $config and initial partition " +
s"assignment $partitionReplicaAssignment")
// write out the config if there is any, this isn't transactional with the partition assignments
zkClient.setOrCreateEntityConfigs(ConfigType.Topic, topic, config)
// create the partition assignment
writeTopicPartitionAssignment(topic, partitionReplicaAssignment.map { case (k, v) => k -> ReplicaAssignment(v) },
isUpdate = false, usesTopicId)
}
/**
* Validate topic creation parameters. Note that this method is indirectly used by the
* TopicCommand via the `createTopicWithAssignment` method.
*
* @param topic The name of the topic
* @param partitionReplicaAssignment The assignments of the topic
* @param config The config of the topic
*/
def validateTopicCreate(topic: String,
partitionReplicaAssignment: Map[Int, Seq[Int]],
config: Properties): Unit = {
Topic.validate(topic)
if (zkClient.topicExists(topic))
throw new TopicExistsException(s"Topic '$topic' already exists.")
else if (Topic.hasCollisionChars(topic)) {
val allTopics = zkClient.getAllTopicsInCluster()
// check again in case the topic was created in the meantime, otherwise the
// topic could potentially collide with itself
if (allTopics.contains(topic))
throw new TopicExistsException(s"Topic '$topic' already exists.")
val collidingTopics = allTopics.filter(Topic.hasCollision(topic, _))
if (collidingTopics.nonEmpty) {
throw new InvalidTopicException(s"Topic '$topic' collides with existing topics: ${collidingTopics.mkString(", ")}")
}
}
if (partitionReplicaAssignment.values.map(_.size).toSet.size != 1)
throw new InvalidReplicaAssignmentException("All partitions should have the same number of replicas")
partitionReplicaAssignment.values.foreach(reps =>
if (reps.size != reps.toSet.size)
throw new InvalidReplicaAssignmentException("Duplicate replica assignment found: " + partitionReplicaAssignment)
)
val partitionSize = partitionReplicaAssignment.size
val sequenceSum = partitionSize * (partitionSize - 1) / 2
if (partitionReplicaAssignment.size != partitionReplicaAssignment.toSet.size ||
partitionReplicaAssignment.keys.filter(_ >= 0).sum != sequenceSum)
throw new InvalidReplicaAssignmentException("partitions should be a consecutive 0-based integer sequence")
LogConfig.validate(config)
}
private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment],
isUpdate: Boolean, usesTopicId: Boolean = false): Unit = {
try {
val assignment = replicaAssignment.map { case (partitionId, replicas) => (new TopicPartition(topic,partitionId), replicas) }.toMap
if (!isUpdate) {
val topicIdOpt = if (usesTopicId) Some(Uuid.randomUuid()) else None
zkClient.createTopicAssignment(topic, topicIdOpt, assignment.map { case (k, v) => k -> v.replicas })
} else {
val topicIds = zkClient.getTopicIdsForTopics(Set(topic))
zkClient.setTopicAssignment(topic, topicIds.get(topic), assignment)
}
debug("Updated path %s with %s for replica assignment".format(TopicZNode.path(topic), assignment))
} catch {
case _: NodeExistsException => throw new TopicExistsException(s"Topic '$topic' already exists.")
case e2: Throwable => throw new AdminOperationException(e2.toString)
}
}
/**
* Creates a delete path for a given topic
* @param topic Topic name to delete
*/
def deleteTopic(topic: String): Unit = {
if (zkClient.topicExists(topic)) {
try {
zkClient.createDeleteTopicPath(topic)
} catch {
case _: NodeExistsException => throw new TopicAlreadyMarkedForDeletionException(
"topic %s is already marked for deletion".format(topic))
case e: Throwable => throw new AdminOperationException(e.getMessage)
}
} else {
throw new UnknownTopicOrPartitionException(s"Topic `$topic` to delete does not exist")
}
}
/**
* Add partitions to existing topic with optional replica assignment. Note that this
* method is used by the TopicCommand.
*
* @param topic Topic for adding partitions to
* @param existingAssignment A map from partition id to its assignment
* @param allBrokers All brokers in the cluster
* @param numPartitions Number of partitions to be set
* @param replicaAssignment Manual replica assignment, or none
* @param validateOnly If true, validate the parameters without actually adding the partitions
* @return the updated replica assignment
*/
def addPartitions(topic: String,
existingAssignment: Map[Int, ReplicaAssignment],
allBrokers: Seq[BrokerMetadata],
numPartitions: Int = 1,
replicaAssignment: Option[Map[Int, Seq[Int]]] = None,
validateOnly: Boolean = false): Map[Int, Seq[Int]] = {
val proposedAssignmentForNewPartitions = createNewPartitionsAssignment(
topic,
existingAssignment,
allBrokers,
numPartitions,
replicaAssignment
)
if (validateOnly) {
(existingAssignment ++ proposedAssignmentForNewPartitions)
.map { case (k, v) => k -> v.replicas }
} else {
createPartitionsWithAssignment(topic, existingAssignment, proposedAssignmentForNewPartitions)
.map { case (k, v) => k -> v.replicas }
}
}
/**
* Create assignment to add the given number of partitions while validating the
* provided arguments.
*
* @param topic Topic for adding partitions to
* @param existingAssignment A map from partition id to its assignment
* @param allBrokers All brokers in the cluster
* @param numPartitions Number of partitions to be set
* @param replicaAssignment Manual replica assignment, or none
* @return the assignment for the new partitions
*/
def createNewPartitionsAssignment(topic: String,
existingAssignment: Map[Int, ReplicaAssignment],
allBrokers: Seq[BrokerMetadata],
numPartitions: Int = 1,
replicaAssignment: Option[Map[Int, Seq[Int]]] = None): Map[Int, ReplicaAssignment] = {
val existingAssignmentPartition0 = existingAssignment.getOrElse(0,
throw new AdminOperationException(
s"Unexpected existing replica assignment for topic '$topic', partition id 0 is missing. " +
s"Assignment: $existingAssignment")).replicas
val partitionsToAdd = numPartitions - existingAssignment.size
if (partitionsToAdd <= 0)
throw new InvalidPartitionsException(
s"The number of partitions for a topic can only be increased. " +
s"Topic $topic currently has ${existingAssignment.size} partitions, " +
s"$numPartitions would not be an increase.")
replicaAssignment.foreach { proposedReplicaAssignment =>
validateReplicaAssignment(proposedReplicaAssignment, existingAssignmentPartition0.size,
allBrokers.map(_.id).toSet)
}
val proposedAssignmentForNewPartitions = replicaAssignment.getOrElse {
val startIndex = math.max(0, allBrokers.indexWhere(_.id >= existingAssignmentPartition0.head))
AdminUtils.assignReplicasToBrokers(allBrokers, partitionsToAdd, existingAssignmentPartition0.size,
startIndex, existingAssignment.size)
}
proposedAssignmentForNewPartitions.map { case (tp, replicas) =>
tp -> ReplicaAssignment(replicas, List(), List())
}
}
/**
* Add partitions to the existing topic with the provided assignment. This method does
* not validate the provided assignments. Validation must be done beforehand.
*
* @param topic Topic for adding partitions to
* @param existingAssignment A map from partition id to its assignment
* @param newPartitionAssignment The assignments to add
* @return the updated replica assignment
*/
def createPartitionsWithAssignment(topic: String,
existingAssignment: Map[Int, ReplicaAssignment],
newPartitionAssignment: Map[Int, ReplicaAssignment]): Map[Int, ReplicaAssignment] = {
info(s"Creating ${newPartitionAssignment.size} partitions for '$topic' with the following replica assignment: " +
s"$newPartitionAssignment.")
val combinedAssignment = existingAssignment ++ newPartitionAssignment
writeTopicPartitionAssignment(topic, combinedAssignment, isUpdate = true)
combinedAssignment
}
private def validateReplicaAssignment(replicaAssignment: Map[Int, Seq[Int]],
expectedReplicationFactor: Int,
availableBrokerIds: Set[Int]): Unit = {
replicaAssignment.forKeyValue { (partitionId, replicas) =>
if (replicas.isEmpty)
throw new InvalidReplicaAssignmentException(
s"Cannot have replication factor of 0 for partition id $partitionId.")
if (replicas.size != replicas.toSet.size)
throw new InvalidReplicaAssignmentException(
s"Duplicate brokers not allowed in replica assignment: " +
s"${replicas.mkString(", ")} for partition id $partitionId.")
if (!replicas.toSet.subsetOf(availableBrokerIds))
throw new BrokerNotAvailableException(
s"Some brokers specified for partition id $partitionId are not available. " +
s"Specified brokers: ${replicas.mkString(", ")}, " +
s"available brokers: ${availableBrokerIds.mkString(", ")}.")
partitionId -> replicas.size
}
val badRepFactors = replicaAssignment.collect {
case (partition, replicas) if replicas.size != expectedReplicationFactor => partition -> replicas.size
}
if (badRepFactors.nonEmpty) {
val sortedBadRepFactors = badRepFactors.toSeq.sortBy { case (partitionId, _) => partitionId }
val partitions = sortedBadRepFactors.map { case (partitionId, _) => partitionId }
val repFactors = sortedBadRepFactors.map { case (_, rf) => rf }
throw new InvalidReplicaAssignmentException(s"Inconsistent replication factor between partitions, " +
s"partition 0 has $expectedReplicationFactor while partitions [${partitions.mkString(", ")}] have " +
s"replication factors [${repFactors.mkString(", ")}], respectively.")
}
}
/**
* Parse broker from entity name to integer id
* @param broker The broker entity name to parse
* @return Integer brokerId after successfully parsed or default None
*/
def parseBroker(broker: String): Option[Int] = {
broker match {
case ConfigEntityName.Default => None
case _ =>
try Some(broker.toInt)
catch {
case _: NumberFormatException =>
throw new IllegalArgumentException(s"Error parsing broker $broker. The broker's Entity Name must be a single integer value")
}
}
}
/**
* Change the configs for a given entityType and entityName
* @param entityType The entityType of the configs that will be changed
* @param entityName The entityName of the entityType
* @param configs The config of the entityName
*/
def changeConfigs(entityType: String, entityName: String, configs: Properties): Unit = {
entityType match {
case ConfigType.Topic => changeTopicConfig(entityName, configs)
case ConfigType.Client => changeClientIdConfig(entityName, configs)
case ConfigType.User => changeUserOrUserClientIdConfig(entityName, configs)
case ConfigType.Broker => changeBrokerConfig(parseBroker(entityName), configs)
case ConfigType.Ip => changeIpConfig(entityName, configs)
case _ => throw new IllegalArgumentException(s"$entityType is not a known entityType. Should be one of ${ConfigType.all}")
}
}
/**
* Update the config for a client and create a change notification so the change will propagate to other brokers.
* If clientId is <default>, default clientId config is updated. ClientId configs are used only if <user, clientId>
* and <user> configs are not specified.
*
* @param sanitizedClientId: The sanitized clientId for which configs are being changed
* @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or
* existing configs need to be deleted, it should be done prior to invoking this API
*
*/
def changeClientIdConfig(sanitizedClientId: String, configs: Properties): Unit = {
DynamicConfig.Client.validate(configs)
changeEntityConfig(ConfigType.Client, sanitizedClientId, configs)
}
/**
* Update the config for a <user> or <user, clientId> and create a change notification so the change will propagate to other brokers.
* User and/or clientId components of the path may be <default>, indicating that the configuration is the default
* value to be applied if a more specific override is not configured.
*
* @param sanitizedEntityName: <sanitizedUserPrincipal> or <sanitizedUserPrincipal>/clients/<clientId>
* @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or
* existing configs need to be deleted, it should be done prior to invoking this API
*
*/
def changeUserOrUserClientIdConfig(sanitizedEntityName: String, configs: Properties): Unit = {
if (sanitizedEntityName == ConfigEntityName.Default || sanitizedEntityName.contains("/clients"))
DynamicConfig.Client.validate(configs)
else
DynamicConfig.User.validate(configs)
changeEntityConfig(ConfigType.User, sanitizedEntityName, configs)
}
/**
* Validates the IP configs.
* @param ip ip for which configs are being validated
* @param configs properties to validate for the IP
*/
def validateIpConfig(ip: String, configs: Properties): Unit = {
if (!DynamicConfig.Ip.isValidIpEntity(ip))
throw new AdminOperationException(s"$ip is not a valid IP or resolvable host.")
DynamicConfig.Ip.validate(configs)
}
/**
* Update the config for an IP. These overrides will be persisted between sessions, and will override any default
* IP properties.
* @param ip ip for which configs are being updated
* @param configs properties to update for the IP
*/
def changeIpConfig(ip: String, configs: Properties): Unit = {
validateIpConfig(ip, configs)
changeEntityConfig(ConfigType.Ip, ip, configs)
}
/**
* validates the topic configs
* @param topic topic for which configs are being validated
* @param configs properties to validate for the topic
*/
def validateTopicConfig(topic: String, configs: Properties): Unit = {
Topic.validate(topic)
if (!zkClient.topicExists(topic))
throw new UnknownTopicOrPartitionException(s"Topic '$topic' does not exist.")
// remove the topic overrides
LogConfig.validate(configs)
}
/**
* Update the config for an existing topic and create a change notification so the change will propagate to other brokers
*
* @param topic: The topic for which configs are being changed
* @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or
* existing configs need to be deleted, it should be done prior to invoking this API
*
*/
def changeTopicConfig(topic: String, configs: Properties): Unit = {
validateTopicConfig(topic, configs)
changeEntityConfig(ConfigType.Topic, topic, configs)
}
/**
* Override the broker config on some set of brokers. These overrides will be persisted between sessions, and will
* override any defaults entered in the broker's config files
*
* @param brokers: The list of brokers to apply config changes to
* @param configs: The config to change, as properties
*/
def changeBrokerConfig(brokers: Seq[Int], configs: Properties): Unit = {
validateBrokerConfig(configs)
brokers.foreach {
broker => changeEntityConfig(ConfigType.Broker, broker.toString, configs)
}
}
/**
* Override a broker override or broker default config. These overrides will be persisted between sessions, and will
* override any defaults entered in the broker's config files
*
* @param broker: The broker to apply config changes to or None to update dynamic default configs
* @param configs: The config to change, as properties
*/
def changeBrokerConfig(broker: Option[Int], configs: Properties): Unit = {
validateBrokerConfig(configs)
changeEntityConfig(ConfigType.Broker, broker.map(_.toString).getOrElse(ConfigEntityName.Default), configs)
}
/**
* Validate dynamic broker configs. Since broker configs may contain custom configs, the validation
* only verifies that the provided config does not contain any static configs.
* @param configs configs to validate
*/
def validateBrokerConfig(configs: Properties): Unit = {
DynamicConfig.Broker.validate(configs)
}
private def changeEntityConfig(rootEntityType: String, fullSanitizedEntityName: String, configs: Properties): Unit = {
val sanitizedEntityPath = rootEntityType + '/' + fullSanitizedEntityName
zkClient.setOrCreateEntityConfigs(rootEntityType, fullSanitizedEntityName, configs)
// create the change notification
zkClient.createConfigChangeNotification(sanitizedEntityPath)
}
/**
* Read the entity (topic, broker, client, user, <user, client> or <ip>) config (if any) from zk
* sanitizedEntityName is <topic>, <broker>, <client-id>, <user>, <user>/clients/<client-id> or <ip>.
* @param rootEntityType entityType for which configs are being fetched
* @param sanitizedEntityName entityName of the entityType
* @return The successfully gathered configs
*/
def fetchEntityConfig(rootEntityType: String, sanitizedEntityName: String): Properties = {
zkClient.getEntityConfigs(rootEntityType, sanitizedEntityName)
}
/**
* Gets all topic configs
* @return The successfully gathered configs of all topics
*/
def getAllTopicConfigs(): Map[String, Properties] =
zkClient.getAllTopicsInCluster().map(topic => (topic, fetchEntityConfig(ConfigType.Topic, topic))).toMap
/**
* Gets all the entity configs for a given entityType
* @param entityType entityType for which configs are being fetched
* @return The successfully gathered configs of the entityType
*/
def fetchAllEntityConfigs(entityType: String): Map[String, Properties] =
zkClient.getAllEntitiesWithConfig(entityType).map(entity => (entity, fetchEntityConfig(entityType, entity))).toMap
/**
* Gets all the entity configs for a given childEntityType
* @param rootEntityType rootEntityType for which configs are being fetched
* @param childEntityType childEntityType of the rootEntityType
* @return The successfully gathered configs of the childEntityType
*/
def fetchAllChildEntityConfigs(rootEntityType: String, childEntityType: String): Map[String, Properties] = {
def entityPaths(rootPath: Option[String]): Seq[String] = {
val root = rootPath match {
case Some(path) => rootEntityType + '/' + path
case None => rootEntityType
}
val entityNames = zkClient.getAllEntitiesWithConfig(root)
rootPath match {
case Some(path) => entityNames.map(entityName => path + '/' + entityName)
case None => entityNames
}
}
entityPaths(None)
.flatMap(entity => entityPaths(Some(entity + '/' + childEntityType)))
.map(entityPath => (entityPath, fetchEntityConfig(rootEntityType, entityPath))).toMap
}
}
|
lindong28/kafka
|
core/src/main/scala/kafka/zk/AdminZkClient.scala
|
Scala
|
apache-2.0
| 24,793
|
package org.ferrit.core.filter
import org.ferrit.core.uri.CrawlUri
/**
* Strategy for deciding if the given URI can be followed by a crawler.
*
*/
trait UriFilter {
/**
* Tests if the given URI is accepted.
*/
def accept(uri: CrawlUri): Boolean
/**
* Optional method that can be overridden to explain why the given URI is
* accepted or rejected. Useful for debugging and management tools.
*/
def explain(uri: CrawlUri): String = "No explanation available"
}
object UriFilter {
import scala.language.implicitConversions
/**
* Sugar to reduce boilerplate conversions of String to CrawlUri,
* in particular during tests.
*/
implicit def implicitConvertStringToCrawlUri(uri: String):CrawlUri = CrawlUri(uri)
}
|
reggoodwin/ferrit
|
src/main/scala/org/ferrit/core/filter/UriFilter.scala
|
Scala
|
mit
| 760
|
/*
* #%L
* gatling-any
* %%
* Copyright (C) 2013 Thrillsoft
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.thrillsoft.gatling.any
import com.thrillsoft.gatling.any.Predef._
import io.gatling.core.Predef._
import scala.util.Random
class AnySimulation extends Simulation {
setUp(
scenario("count").exec(any("count",
() => {
count(Int.MaxValue - 1)
})).inject(atOnceUsers(5)),
scenario("sort").exec(anyParam[Seq[Int]]("sort",
(s: Seq[Int]) => {
sort(s)
}, () => {
random(1000)
})).inject(atOnceUsers(10)),
scenario("permute").exec(anyCtx[String]("permute",
(s: String) => {
permute(s)
}, () => newUser)).inject(atOnceUsers(5)),
scenario("sum").exec(anyCtxParam[String, Seq[Int]]("sum",
(c: String, p: Seq[Int]) => {
sum(c, p)
}, () => newUser
, (c: String) => {
random(10000)
})).inject(atOnceUsers(20)))
def count = (x: Int) => {
for (a <- 0 to x by 10000)
if (a % 1000000 == 0)
println((a / 1000000) + " millions")
}
def random = (n: Int) => {
val s: Seq[Int] = Seq.fill(n)(Random.nextInt)
println("Random : " + s)
s
}
def sort = (s: Seq[Int]) => {
val sortedS: Seq[Int] = s.sorted
println("Sorted : " + sortedS)
}
def sum = (user: String, s: Seq[Int]) => {
val sum: Int = s.sum
println("Sum of " + user + " : " + sum)
}
def permute = (s: String) => {
val perm = s.permutations
while (perm.hasNext)
println(perm.next())
}
var i: Int = 0
def newUser = {
i = i + 1
val user = "USER" + i
println("New user : " + user)
user
}
}
|
spirylics/gatling-any
|
src/test/scala/com/thrillsoft/gatling/any/AnySimulation.scala
|
Scala
|
apache-2.0
| 2,210
|
val index = "/tmp/index-lm"
val CORPUS = Array(
"Alice ate an apple.",
"Mike likes an orange.",
"An apple is red."
)
def schema(): Schema = {
val builder = AnalyzerBuilder()
builder.withTokenizer("standard")
builder.addTokenFilter("lowercase")
val analyzer = builder.build
builder.addTokenFilter("shingle", "minShingleSize", "2", "maxShingleSize", "2", "outputUnigrams", "false")
val analyzer2g = builder.build
val fieldTypes = Map(
"word" -> FieldType(analyzer, true, true, true, true),
"word2g" -> FieldType(analyzer2g, true, true, true, true)
)
val analyzerDefault = analyzer
Schema(analyzerDefault, fieldTypes)
}
// create a language model index
val writer = IWriter(index, schema())
def addDocument(doc: String): Unit = {
writer.write(Document(Set(
Field("word", doc),
Field("word2g", doc)
)))
}
CORPUS.foreach(addDocument(_))
writer.close()
// try to browse terms in each fields
//
// nlp4l> open("/tmp/index-lm")
// nlp4l> status
// nlp4l> browseTerms("word")
// nlp4l> nt
// nlp4l> browseTerms("word2g")
// nlp4l> nt
// nlp4l> close
// calculate probabilities
//
val reader = RawReader(index)
// P(apple|an) = C(an apple) / C(an)
val count_an_apple = reader.totalTermFreq("word2g", "an apple")
val count_an = reader.totalTermFreq("word", "an")
val prob_apple_an = count_an_apple.toFloat / count_an.toFloat
// P(orange|an) = C(an orange) / C(an)
val count_an_orange = reader.totalTermFreq("word2g", "an orange")
val prob_orange_an = count_an_orange.toFloat / count_an.toFloat
reader.close
|
NLP4L/meetups
|
20150818/language_model.scala
|
Scala
|
apache-2.0
| 1,554
|
class C1(var p: Int)
class C2(var p: Int) extends C1(1) {
println(/* line: 3 */p)
}
|
ilinum/intellij-scala
|
testdata/resolve2/inheritance/override/ClassParameterVariable.scala
|
Scala
|
apache-2.0
| 86
|
package net.thereturningvoid.bladebot
import java.io.{IOException, File, UnsupportedEncodingException}
import java.net.URLDecoder
import javax.security.auth.login.LoginException
import jline.console.ConsoleReader
import jline.console.history.FileHistory
import net.dv8tion.jda.entities.{Message, TextChannel, Guild}
import net.dv8tion.jda.utils.InviteUtil
import net.dv8tion.jda.{MessageBuilder, JDABuilder, JDA}
import net.thereturningvoid.bladebot.command._
import net.thereturningvoid.bladebot.Predefs._
import org.apache.commons.lang3.StringUtils
import scala.collection.JavaConversions._
object BladeBot {
// No error exit codes
final val NORMAL_SHUTDOWN = 10
final val RESTART = 11
final val NEW_CONFIG = 12
// Error exit codes
final val DISCORD_CANNOT_CONNECT = 20
final val NO_LOGIN = 21
final val BAD_LOGIN = 22
var jda: Option[JDA] = None
var permissions: Option[Permissions] = None
private val history: FileHistory = new FileHistory(new File(".history").getAbsoluteFile)
private lazy val reader = new ConsoleReader() tap { r =>
r.setHistory(history)
r.setBellEnabled(false)
r.setExpandEvents(false)
r.setPrompt("bladebot> ")
}
def main(args: Array[String]): Unit = {
if (System.getProperty("file.encoding").equals("UTF-8")) {
setupBot()
println("REPL active.")
while (evalCommand(read()).map(println).isDefined) { }
System.exit(0)
} else {
relaunchInUTF8()
}
}
def getThisJarFile: File = {
val path: String = BladeBot.getClass.getProtectionDomain.getCodeSource.getLocation.getPath
val decodedPath: String = URLDecoder.decode(path, "UTF-8")
if (!decodedPath.endsWith(".jar")) {
new File("BladeBot.jar")
}
new File(decodedPath)
}
def getJDA: JDA = jda.get
def getPermissions: Permissions = permissions.get
private def read(): String = {
println()
reader.readLine() tap { in => println() }
}
private def evalCommand(in: String): Option[String] = {
val input: Array[String] = in.split(" ")
input.head.toLowerCase match {
case "" => Some("Invalid command. Try \\"help\\" for more info.")
case "help" => Some(helpInfo())
case "addop" =>
try {
if (getPermissions.addOp(jda.get.getUsersByName(input(1)).head))
Some(jda.get.getUsersByName(input(1)).head.getUsername + " added to ops!")
else Some(jda.get.getUsersByName(input(1)).head.getUsername + " is already an op!")
} catch {
case e: NoSuchElementException => Some("The given user does not exist!")
}
case "acceptinvite" =>
InviteUtil.join(input(1), jda.get)
Some("Joined server!")
case "setplaying" =>
val game: String = input.drop(1).mkString(" ")
getJDA.getAccountManager.setGame(game)
Some("Set game to \\"" + game + "\\"!")
case "say" =>
val sayReader: ConsoleReader = new ConsoleReader()
val guild: Guild = getJDA.getGuildsByName(sayReader.readLine("Enter the name of the guild to send the message in: ")).head
val channelName: String = sayReader.readLine("Enter the channel to send the message in: ").toLowerCase
val channel: TextChannel = guild.getTextChannels.find(_.getName.toLowerCase == channelName).get
channel.sendMessage(new MessageBuilder()
.appendString(sayReader.readLine("Enter the message to send: "))
.build())
Some("Message sent!")
case r"exit|quit|stop" =>
reader.getHistory.asInstanceOf[FileHistory].flush()
None
case _ => Some("Invalid command. Try \\"help\\" for more info.")
}
}
private def helpInfo(): String = "BladeBot Console Help\\n\\n" +
"addop <user>: Adds the specified user as a bot operator.\\n" +
"acceptInvite <invite>: Joins the server with the given invite.\\n" +
"setplaying <game>: Sets the playing game to the given name.\\n" +
"say <guild> <channel> <text>: Says a message in the specified guild and channel."
private def setupBot(): Unit = {
try {
// Define the settings instance
val settingsManager: SettingsManager = SettingsManager.getInstance
settingsManager.loadSettings()
val settings: Settings = SettingsManager.getInstance.settings
// Setup JDA instance
val jdaBuilder: JDABuilder = new JDABuilder(settings.email, settings.password)
// Setup operator list
Permissions.setupPermissions()
permissions = Some(Permissions.getPermissions)
// Register commands
val help: HelpCommand = new HelpCommand
jdaBuilder.addListener(help.registerCommand(help))
jdaBuilder.addListener(help.registerCommand(new VoiceCommand))
jdaBuilder.addListener(help.registerCommand(new AvatarCommand))
jdaBuilder.addListener(help.registerCommand(new OperatorCommand))
jdaBuilder.addListener(help.registerCommand(new SongQueueCommand))
// Connect with proxy if one is specified
if (settings.proxyHost != null && !settings.proxyHost.isEmpty) {
jdaBuilder.setProxy(settings.proxyHost, settings.proxyPort.toInt)
System.setProperty("http.proxyHost", settings.proxyHost)
System.setProperty("http.proxyPort", settings.proxyPort)
System.setProperty("https.proxyHost", settings.proxyHost)
System.setProperty("https.proxyPort", settings.proxyPort)
}
// Set the bot as operator io itself
jda = Some(jdaBuilder.buildBlocking())
Permissions.getPermissions.setBotAsOp(jda.get.getSelfInfo)
} catch {
case e: IllegalArgumentException =>
println("No login provided! Please specify a login in the config file.")
System.exit(NO_LOGIN)
case e: LoginException =>
println("The login in the config file is incorrect.")
println("Did you modify the config file after it was created?")
System.exit(BAD_LOGIN)
case e: InterruptedException =>
println("The login thread was interrupted somehow!")
System.exit(DISCORD_CANNOT_CONNECT)
}
}
@throws(classOf[InterruptedException])
@throws(classOf[UnsupportedEncodingException])
private def relaunchInUTF8(): Unit = {
println("[Launcher] We are not in UTF-8 mode! This is a problem!")
println("[Launcher] Relaunching in UTF-8 mode with -Dfile.encoding=UTF-8")
val command: Array[String] = Array("java", "-Dfile.encoding=UTF-8", "-jar", BladeBot.getThisJarFile.getAbsolutePath)
val processBuilder: ProcessBuilder = new ProcessBuilder(command.toList)
processBuilder.inheritIO() // Use the same command line
try {
val process: Process = processBuilder.start()
process.waitFor()
System.exit(process.exitValue())
} catch {
case e: IOException =>
if (e.getMessage.contains("\\"java\\"")) {
println("[Launcher] There was an error relaunching the bot. We couldn't find Java.")
println("[Launcher] Tried relaunching with the command:\\n " + StringUtils.join(command, " ", 0, command.length))
println("[Launcher] Make sure Java is properly defined in your PATH.")
println("[Launcher] Terminating.")
} else {
println("[Launcher] An unknown IOException occured during relaunch!")
e.printStackTrace()
}
}
}
}
|
TheReturningVoid/BladeBot
|
src/main/scala/net/thereturningvoid/bladebot/BladeBot.scala
|
Scala
|
mit
| 7,320
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.api.python.{PythonEvalType, PythonFunction}
import org.apache.spark.sql.catalyst.trees.TreePattern.{PYTHON_UDF, TreePattern}
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.types.DataType
/**
* Helper functions for [[PythonUDF]]
*/
object PythonUDF {
private[this] val SCALAR_TYPES = Set(
PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
)
def isScalarPythonUDF(e: Expression): Boolean = {
e.isInstanceOf[PythonUDF] && SCALAR_TYPES.contains(e.asInstanceOf[PythonUDF].evalType)
}
def isGroupedAggPandasUDF(e: Expression): Boolean = {
e.isInstanceOf[PythonUDF] &&
e.asInstanceOf[PythonUDF].evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
}
// This is currently same as GroupedAggPandasUDF, but we might support new types in the future,
// e.g, N -> N transform.
def isWindowPandasUDF(e: Expression): Boolean = isGroupedAggPandasUDF(e)
}
/**
* A serialized version of a Python lambda function. This is a special expression, which needs a
* dedicated physical operator to execute it, and thus can't be pushed down to data sources.
*/
case class PythonUDF(
name: String,
func: PythonFunction,
dataType: DataType,
children: Seq[Expression],
evalType: Int,
udfDeterministic: Boolean,
resultId: ExprId = NamedExpression.newExprId)
extends Expression with Unevaluable with NonSQLExpression with UserDefinedExpression {
override lazy val deterministic: Boolean = udfDeterministic && children.forall(_.deterministic)
override def toString: String = s"$name(${children.mkString(", ")})#${resultId.id}$typeSuffix"
final override val nodePatterns: Seq[TreePattern] = Seq(PYTHON_UDF)
lazy val resultAttribute: Attribute = AttributeReference(toPrettySQL(this), dataType, nullable)(
exprId = resultId)
override def nullable: Boolean = true
override lazy val preCanonicalized: Expression = {
val canonicalizedChildren = children.map(_.preCanonicalized)
// `resultId` can be seen as cosmetic variation in PythonUDF, as it doesn't affect the result.
this.copy(resultId = ExprId(-1)).withNewChildren(canonicalizedChildren)
}
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): PythonUDF =
copy(children = newChildren)
}
/**
* A place holder used when printing expressions without debugging information such as the
* result id.
*/
case class PrettyPythonUDF(
name: String,
dataType: DataType,
children: Seq[Expression])
extends Expression with Unevaluable with NonSQLExpression {
override def toString: String = s"$name(${children.mkString(", ")})"
override def nullable: Boolean = true
override protected def withNewChildrenInternal(
newChildren: IndexedSeq[Expression]): PrettyPythonUDF = copy(children = newChildren)
}
|
ueshin/apache-spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/PythonUDF.scala
|
Scala
|
apache-2.0
| 3,771
|
package mesosphere.mesos.simulation
import java.util.UUID
import akka.actor.{Actor, ActorRef, Cancellable, Props}
import akka.event.LoggingReceive
import com.typesafe.scalalogging.StrictLogging
import mesosphere.marathon.stream.Implicits._
import mesosphere.mesos.simulation.DriverActor._
import mesosphere.mesos.simulation.SchedulerActor.ResourceOffers
import org.apache.mesos.Protos._
import org.apache.mesos.SchedulerDriver
import scala.concurrent.duration._
import scala.util.Random
object DriverActor {
case class DeclineOffer(offerId: OfferID)
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo]): Status`
*/
case class LaunchTasks(offerIds: Seq[OfferID], tasks: Seq[TaskInfo])
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `acceptOffers(o: util.Collection[OfferID], ops: util.Collection[Offer.Operation], filters: Filters): Status`
*/
case class AcceptOffers(offerIds: Seq[OfferID], ops: Seq[Offer.Operation], filters: Filters)
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def killTask(taskId: TaskID): Status`
*/
case class KillTask(taskId: TaskID)
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def reconcileTasks(statuses: util.Collection[TaskStatus]): Status`
*/
case class ReconcileTask(taskStatus: Seq[TaskStatus])
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def suppressOffers(): Status`
*/
case object SuppressOffers
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def reviveOffers(): Status`
*/
case object ReviveOffers
private case object TaskStateTick
private case class SendTaskStatusAt(taskStatus: TaskStatus, create: Boolean, at: Deadline)
}
class DriverActor(schedulerProps: Props) extends Actor with StrictLogging {
// probability of a failing start or a lost message [ 0=no error, 1=always error ]
private[this] val taskFailProbability = 0.1
private[this] val lostMessageProbability = 0.0
private[this] val numberOfOffersPerCycle: Int = 1000
private[this] var taskUpdates = Vector.empty[SendTaskStatusAt]
// use a fixed seed to get reproducible results
private[this] val random = {
val seed = 1L
logger.info(s"Random seed for this test run: $seed")
new Random(new java.util.Random(seed))
}
private[this] var periodicOffers: Option[Cancellable] = None
private[this] var periodicUpdates: Option[Cancellable] = None
private[this] var scheduler: ActorRef = _
private[this] var tasks: Map[String, TaskStatus] = Map.empty.withDefault { taskId =>
TaskStatus.newBuilder()
.setSource(TaskStatus.Source.SOURCE_SLAVE)
.setTaskId(TaskID.newBuilder().setValue(taskId).build())
.setState(TaskState.TASK_LOST)
.build()
}
private[this] def offer(index: Int): Offer = {
def resource(name: String, value: Double): Resource = {
Resource.newBuilder()
.setName(name)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(value))
.build()
}
Offer.newBuilder()
.setId(OfferID.newBuilder().setValue(UUID.randomUUID().toString))
.setFrameworkId(FrameworkID.newBuilder().setValue("notanidframework"))
.setSlaveId(SlaveID.newBuilder().setValue(s"notanidslave-$index"))
.setHostname("hostname")
.addAllResources(Seq(
resource("cpus", 100),
resource("mem", 500000),
resource("disk", 1000000000),
Resource.newBuilder()
.setName("ports")
.setType(Value.Type.RANGES)
.setRanges(
Value.Ranges
.newBuilder()
.addRange(Value.Range.newBuilder().setBegin(10000).setEnd(20000)))
.build()
).asJava)
.build()
}
private[this] def offers: ResourceOffers =
SchedulerActor.ResourceOffers((1 to numberOfOffersPerCycle).map(offer))
override def preStart(): Unit = {
super.preStart()
scheduler = context.actorOf(schedulerProps, "scheduler")
import context.dispatcher
periodicOffers = Some(context.system.scheduler.schedule(1.second, 5.seconds)(scheduler ! offers))
periodicUpdates = Some(context.system.scheduler.schedule(1.second, 1.seconds)(self ! TaskStateTick))
}
override def postStop(): Unit = {
periodicOffers.foreach(_.cancel())
periodicOffers = None
periodicUpdates.foreach(_.cancel())
periodicUpdates = None
super.postStop()
}
override def receive: Receive = LoggingReceive {
case driver: SchedulerDriver =>
logger.debug(s"pass on driver to scheduler $scheduler")
scheduler ! driver
case LaunchTasks(offers, tasks) =>
simulateTaskLaunch(offers, tasks)
case AcceptOffers(offers, ops, filters) =>
val taskInfos = extractTaskInfos(ops)
simulateTaskLaunch(offers, taskInfos)
case KillTask(taskId) =>
logger.debug(s"kill task $taskId")
tasks.get(taskId.getValue) match {
case Some(task) =>
scheduleStatusChange(toState = TaskState.TASK_KILLED, afterDuration = 2.seconds)(taskID = taskId)
case None =>
scheduleStatusChange(toState = TaskState.TASK_LOST, afterDuration = 1.second)(taskID = taskId)
}
case SuppressOffers => ()
case ReviveOffers =>
scheduler ! offers
case TaskStateTick =>
val (sendNow, later) = taskUpdates.partition(_.at.isOverdue())
sendNow.foreach(update => changeTaskStatus(update.taskStatus, update.create))
taskUpdates = later
case ReconcileTask(taskStatuses) =>
if (taskStatuses.isEmpty) {
tasks.values.foreach(scheduler ! _)
} else {
taskStatuses.view.map(_.getTaskId.getValue).map(tasks).foreach(scheduler ! _)
}
}
private[this] def extractTaskInfos(ops: Seq[Offer.Operation]): Seq[TaskInfo] = {
ops.withFilter(_.getType == Offer.Operation.Type.LAUNCH).flatMap { op =>
Option(op.getLaunch).map(_.getTaskInfosList.toSeq).getOrElse(Seq.empty)
}
}
private[this] def simulateTaskLaunch(offers: Seq[OfferID], tasksToLaunch: Seq[TaskInfo]): Unit = {
if (random.nextDouble() > lostMessageProbability) {
logger.debug(s"launch tasksToLaunch $offers, $tasksToLaunch")
if (random.nextDouble() > taskFailProbability) {
tasksToLaunch.map(_.getTaskId).foreach {
scheduleStatusChange(toState = TaskState.TASK_RUNNING, afterDuration = 5.seconds, create = true)
}
} else {
tasksToLaunch.map(_.getTaskId).foreach {
scheduleStatusChange(toState = TaskState.TASK_FAILED, afterDuration = 5.seconds, create = true)
}
}
} else {
logger.info(s"simulating lost launch for $tasksToLaunch")
}
}
private[this] def changeTaskStatus(status: TaskStatus, create: Boolean): Unit = {
if (create || tasks.contains(status.getTaskId.getValue)) {
status.getState match {
case TaskState.TASK_ERROR | TaskState.TASK_FAILED | TaskState.TASK_FINISHED | TaskState.TASK_LOST =>
tasks -= status.getTaskId.getValue
case _ =>
tasks += (status.getTaskId.getValue -> status)
}
logger.debug(s"${tasks.size} tasks")
scheduler ! status
} else {
if (status.getState == TaskState.TASK_LOST) {
scheduler ! status
} else {
logger.debug(s"${status.getTaskId.getValue} does not exist anymore")
}
}
}
private[this] def scheduleStatusChange(
toState: TaskState,
afterDuration: FiniteDuration,
create: Boolean = false)(taskID: TaskID): Unit = {
val newStatus = TaskStatus.newBuilder()
.setSource(TaskStatus.Source.SOURCE_EXECUTOR)
.setTaskId(taskID)
.setState(toState)
.build()
this.taskUpdates :+= SendTaskStatusAt(newStatus, create, afterDuration.fromNow)
}
}
|
gsantovena/marathon
|
mesos-simulation/src/main/scala/mesosphere/mesos/simulation/DriverActor.scala
|
Scala
|
apache-2.0
| 8,175
|
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes
@InterfaceAudience.Public
class ByteArrayComparable(val bytes:Array[Byte], val offset:Int = 0, var length:Int = -1)
extends Comparable[ByteArrayComparable] {
if (length == -1) {
length = bytes.length
}
override def compareTo(o: ByteArrayComparable): Int = {
Bytes.compareTo(bytes, offset, length, o.bytes, o.offset, o.length)
}
override def hashCode(): Int = {
Bytes.hashCode(bytes, offset, length)
}
override def equals (obj: Any): Boolean = {
obj match {
case b: ByteArrayComparable =>
Bytes.equals(bytes, offset, length, b.bytes, b.offset, b.length)
case _ =>
false
}
}
}
|
gustavoanatoly/hbase
|
hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala
|
Scala
|
apache-2.0
| 1,602
|
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
import leon.annotation._
import leon.lang._
import scala.language.implicitConversions
package object invariant {
@library
def tmpl(templateFunc: BigInt => Boolean): Boolean = true
@library
def tmpl(templateFunc: (BigInt, BigInt) => Boolean): Boolean = true
@library
def tmpl(templateFunc: (BigInt, BigInt, BigInt) => Boolean): Boolean = true
@library
def tmpl(templateFunc: (BigInt, BigInt, BigInt, BigInt) => Boolean): Boolean = true
@library
def tmpl(templateFunc: (BigInt, BigInt, BigInt, BigInt, BigInt) => Boolean): Boolean = true
@library
def ? : BigInt = 0
@library
def ?(id: BigInt) = id
}
|
epfl-lara/leon
|
library/leon/invariant/package.scala
|
Scala
|
gpl-3.0
| 685
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.impl
import scala.collection.mutable
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.spark.{SparkContext, Logging}
import org.apache.spark.storage.StorageLevel
/**
* This abstraction helps with persisting and checkpointing RDDs and types derived from RDDs
* (such as Graphs and DataFrames). In documentation, we use the phrase "Dataset" to refer to
* the distributed data type (RDD, Graph, etc.).
*
* Specifically, this abstraction automatically handles persisting and (optionally) checkpointing,
* as well as unpersisting and removing checkpoint files.
*
* Users should call update() when a new Dataset has been created,
* before the Dataset has been materialized. After updating [[PeriodicCheckpointer]], users are
* responsible for materializing the Dataset to ensure that persisting and checkpointing actually
* occur.
*
* When update() is called, this does the following:
* - Persist new Dataset (if not yet persisted), and put in queue of persisted Datasets.
* - Unpersist Datasets from queue until there are at most 3 persisted Datasets.
* - If using checkpointing and the checkpoint interval has been reached,
* - Checkpoint the new Dataset, and put in a queue of checkpointed Datasets.
* - Remove older checkpoints.
*
* WARNINGS:
* - This class should NOT be copied (since copies may conflict on which Datasets should be
* checkpointed).
* - This class removes checkpoint files once later Datasets have been checkpointed.
* However, references to the older Datasets will still return isCheckpointed = true.
*
* @param checkpointInterval Datasets will be checkpointed at this interval
* @param sc SparkContext for the Datasets given to this checkpointer
* @tparam T Dataset type, such as RDD[Double]
*/
private[mllib] abstract class PeriodicCheckpointer[T](
val checkpointInterval: Int,
val sc: SparkContext) extends Logging {
/** FIFO queue of past checkpointed Datasets */
private val checkpointQueue = mutable.Queue[T]()
/** FIFO queue of past persisted Datasets */
private val persistedQueue = mutable.Queue[T]()
/** Number of times [[update()]] has been called */
private var updateCount = 0
/**
* Update with a new Dataset. Handle persistence and checkpointing as needed.
* Since this handles persistence and checkpointing, this should be called before the Dataset
* has been materialized.
*
* @param newData New Dataset created from previous Datasets in the lineage.
*/
def update(newData: T): Unit = {
persist(newData)
persistedQueue.enqueue(newData)
// We try to maintain 2 Datasets in persistedQueue to support the semantics of this class:
// Users should call [[update()]] when a new Dataset has been created,
// before the Dataset has been materialized.
while (persistedQueue.size > 3) {
val dataToUnpersist = persistedQueue.dequeue()
unpersist(dataToUnpersist)
}
updateCount += 1
// Handle checkpointing (after persisting)
if ((updateCount % checkpointInterval) == 0 && sc.getCheckpointDir.nonEmpty) {
// Add new checkpoint before removing old checkpoints.
checkpoint(newData)
checkpointQueue.enqueue(newData)
// Remove checkpoints before the latest one.
var canDelete = true
while (checkpointQueue.size > 1 && canDelete) {
// Delete the oldest checkpoint only if the next checkpoint exists.
if (isCheckpointed(checkpointQueue.head)) {
removeCheckpointFile()
} else {
canDelete = false
}
}
}
}
/** Checkpoint the Dataset */
protected def checkpoint(data: T): Unit
/** Return true iff the Dataset is checkpointed */
protected def isCheckpointed(data: T): Boolean
/**
* Persist the Dataset.
* Note: This should handle checking the current [[StorageLevel]] of the Dataset.
*/
protected def persist(data: T): Unit
/** Unpersist the Dataset */
protected def unpersist(data: T): Unit
/** Get list of checkpoint files for this given Dataset */
protected def getCheckpointFiles(data: T): Iterable[String]
/**
* Call this at the end to delete any remaining checkpoint files.
*/
def deleteAllCheckpoints(): Unit = {
while (checkpointQueue.nonEmpty) {
removeCheckpointFile()
}
}
/**
* Dequeue the oldest checkpointed Dataset, and remove its checkpoint files.
* This prints a warning but does not fail if the files cannot be removed.
*/
private def removeCheckpointFile(): Unit = {
val old = checkpointQueue.dequeue()
// Since the old checkpoint is not deleted by Spark, we manually delete it.
val fs = FileSystem.get(sc.hadoopConfiguration)
getCheckpointFiles(old).foreach { checkpointFile =>
try {
fs.delete(new Path(checkpointFile), true)
} catch {
case e: Exception =>
logWarning("PeriodicCheckpointer could not remove old checkpoint file: " +
checkpointFile)
}
}
}
}
|
chenc10/Spark-PAF
|
mllib/src/main/scala/org/apache/spark/mllib/impl/PeriodicCheckpointer.scala
|
Scala
|
apache-2.0
| 5,843
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.cassandra
import com.twitter.cassie
import com.twitter.cassie._
import com.twitter.cassie.codecs.{Codec, LongCodec, Utf8Codec}
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.util.{Future, FuturePool, Duration, Time}
import com.twitter.zipkin.Constants
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.thriftscala.{Span => ThriftSpan}
import com.twitter.zipkin.storage.{TraceIdDuration, IndexedTraceId, SpanStore}
import com.twitter.zipkin.util.Util
import java.nio.ByteBuffer
import scala.collection.JavaConverters._
case class ZipkinColumnFamilyNames(
traces: String = "Traces",
serviceNames: String = "ServiceNames",
spanNames: String = "SpanNames",
serviceNameIndex: String = "ServiceNameIndex",
serviceSpanNameIndex: String = "ServiceSpanNameIndex",
annotationsIndex: String = "AnnotationsIndex",
durationIndex: String = "DurationIndex")
object CassieSpanStoreDefaults {
val KeyspaceName = "Zipkin"
val ColumnFamilyNames = ZipkinColumnFamilyNames()
val WriteConsistency = cassie.WriteConsistency.One
val ReadConsistency = cassie.ReadConsistency.One
val SpanTtl = 7.days
val IndexTtl = 3.days
val IndexBuckets = 10
val MaxTraceCols = 100000
val ReadBatchSize = 500
val SpanCodec = new SnappyCodec(new ScroogeThriftCodec[ThriftSpan](ThriftSpan))
}
class CassieSpanStore(
keyspace: Keyspace,
stats: StatsReceiver = DefaultStatsReceiver.scope("CassieSpanStore"),
cfs: ZipkinColumnFamilyNames = CassieSpanStoreDefaults.ColumnFamilyNames,
writeConsistency: WriteConsistency = CassieSpanStoreDefaults.WriteConsistency,
readConsistency: ReadConsistency = CassieSpanStoreDefaults.ReadConsistency,
spanTtl: Duration = CassieSpanStoreDefaults.SpanTtl,
indexTtl: Duration = CassieSpanStoreDefaults.IndexTtl,
bucketsCount: Int = CassieSpanStoreDefaults.IndexBuckets,
maxTraceCols: Int = CassieSpanStoreDefaults.MaxTraceCols,
readBatchSize: Int = CassieSpanStoreDefaults.ReadBatchSize,
spanCodec: Codec[ThriftSpan] = CassieSpanStoreDefaults.SpanCodec
) extends SpanStore {
private[this] val ServiceNamesKey = "servicenames"
private[this] val IndexDelimiter = ":"
private[this] val IndexDelimiterBytes = IndexDelimiter.getBytes
private[this] val SomeIndexTtl = Some(indexTtl)
/**
* Internal helper methods
*/
private[this] def createSpanColumnName(span: Span): String =
"%d_%d_%d".format(span.id, span.annotations.hashCode, span.binaryAnnotations.hashCode)
private[this] def newBCF[N, V](cf: String, nCodec: Codec[N], vCodec: Codec[V]) =
BucketedColumnFamily(keyspace, cf, nCodec, vCodec, writeConsistency, readConsistency)
private[this] def idxCol[Name, Value](n: Name, v: Value): Column[Name, Value] =
Column[Name, Value](n, v, None, SomeIndexTtl)
private[this] def nameKey(serviceName: String, spanName: Option[String]): String =
(serviceName + spanName.map("." + _).getOrElse("")).toLowerCase
private[this] def annotationKey(serviceName: String, annotation: String, value: Option[ByteBuffer]): ByteBuffer = {
ByteBuffer.wrap(
serviceName.getBytes ++ IndexDelimiterBytes ++ annotation.getBytes ++
value.map { v => IndexDelimiterBytes ++ Util.getArrayFromBuffer(v) }.getOrElse(Array()))
}
private[this] def colToIndexedTraceId(cols: Seq[Column[Long, Long]]): Seq[IndexedTraceId] =
cols map { c => IndexedTraceId(traceId = c.value, timestamp = c.name) }
/**
* Column Families
* and type aliases to their batch types
*/
private type BatchTraces = BatchMutationBuilder[Long, String, Span]
private[this] val Traces = keyspace
.columnFamily(cfs.traces, LongCodec, Utf8Codec, spanCodec)
.consistency(writeConsistency)
.consistency(readConsistency)
private type BatchServiceNames = BatchMutationBuilder[String, String, String]
private[this] val ServiceNames = new StringBucketedColumnFamily(
newBCF(cfs.serviceNames, Utf8Codec, Utf8Codec), bucketsCount)
private type BatchSpanNames = BatchMutationBuilder[String, String, String]
private[this] val SpanNames = new StringBucketedColumnFamily(
newBCF(cfs.spanNames, Utf8Codec, Utf8Codec), bucketsCount)
private type BatchServiceNameIndex = BatchMutationBuilder[String, Long, Long]
private[this] val ServiceNameIndex = new StringBucketedColumnFamily(
newBCF(cfs.serviceNameIndex, LongCodec, LongCodec), bucketsCount)
private type BatchServiceSpanNameIndex = BatchMutationBuilder[String, Long, Long]
private[this] val ServiceSpanNameIndex = keyspace
.columnFamily(cfs.serviceSpanNameIndex, Utf8Codec, LongCodec, LongCodec)
.consistency(writeConsistency)
.consistency(readConsistency)
private type BatchAnnotationsIndex = BatchMutationBuilder[ByteBuffer, Long, Long]
private[this] val AnnotationsIndex = new ByteBufferBucketedColumnFamily(
newBCF(cfs.annotationsIndex, LongCodec, LongCodec), bucketsCount)
private type BatchDurationIndex = BatchMutationBuilder[Long, Long, String]
private[this] val DurationIndex = keyspace
.columnFamily(cfs.durationIndex, LongCodec, LongCodec, Utf8Codec)
.consistency(writeConsistency)
.consistency(readConsistency)
/**
* Stats
*/
private[this] val SpansStats = stats.scope("spans")
private[this] val SpansStoredCounter = SpansStats.counter("stored")
private[this] val SpansIndexedCounter = SpansStats.counter("indexed")
private[this] val IndexStats = stats.scope("index")
private[this] val IndexServiceNameCounter = IndexStats.counter("serviceName")
private[this] val IndexServiceNameNoNameCounter = IndexStats.scope("serviceName").counter("noName")
private[this] val IndexSpanNameCounter = IndexStats.scope("serviceName").counter("spanName")
private[this] val IndexSpanNameNoNameCounter = IndexStats.scope("serviceName").scope("spanName").counter("noName")
private[this] val IndexTraceStats = IndexStats.scope("trace")
private[this] val IndexTraceNoLastAnnotationCounter = IndexTraceStats.counter("noLastAnnotation")
private[this] val IndexTraceByServiceNameCounter = IndexTraceStats.counter("serviceName")
private[this] val IndexTraceBySpanNameCounter = IndexTraceStats.counter("spanName")
private[this] val IndexAnnotationCounter = IndexStats.scope("annotation").counter("standard")
private[this] val IndexAnnotationNoLastAnnotationCounter = IndexStats.scope("annotation").counter("noLastAnnotation")
private[this] val IndexBinaryAnnotationCounter = IndexStats.scope("annotation").counter("binary")
private[this] val IndexDurationCounter = IndexStats.counter("duration")
private[this] val QueryStats = stats.scope("query")
private[this] val QueryGetTtlCounter = QueryStats.counter("getTimeToLive")
private[this] val QueryTracesExistStat = QueryStats.stat("tracesExist")
private[this] val QueryGetSpansByTraceIdsStat = QueryStats.stat("getSpansByTraceIds")
private[this] val QueryGetSpansByTraceIdsTooBigCounter = QueryStats.scope("getSpansByTraceIds").counter("tooBig")
private[this] val QueryGetServiceNamesCounter = QueryStats.counter("getServiceNames")
private[this] val QueryGetSpanNamesCounter = QueryStats.counter("getSpanNames")
private[this] val QueryGetTraceIdsByNameCounter = QueryStats.counter("getTraceIdsByName")
private[this] val QueryGetTraceIdsByAnnotationCounter = QueryStats.counter("getTraceIdsByAnnotation")
private[this] val QueryGetTracesDurationStat = QueryStats.stat("getTracesDuration")
/**
* Internal indexing helpers
*/
private[this] def indexServiceName(idx: BatchServiceNames, span: Span) {
IndexServiceNameCounter.incr()
span.serviceNames foreach {
case "" =>
IndexServiceNameNoNameCounter.incr()
case s =>
idx.insert(ServiceNamesKey, idxCol(s.toLowerCase, ""))
}
}
private[this] def indexSpanNameByService(idx: BatchSpanNames, span: Span) {
if (span.name == "") {
IndexSpanNameNoNameCounter.incr()
} else {
IndexSpanNameCounter.incr()
val spanNameCol = idxCol(span.name.toLowerCase, "")
span.serviceNames foreach { idx.insert(_, spanNameCol) }
}
}
private[this] def indexTraceIdByName(
serviceNameIdx: BatchServiceNameIndex,
serviceSpanNameIdx: BatchServiceSpanNameIndex,
span: Span
) {
if (span.lastAnnotation.isEmpty)
IndexTraceNoLastAnnotationCounter.incr()
span.lastAnnotation foreach { lastAnnotation =>
val timestamp = lastAnnotation.timestamp
val serviceNames = span.serviceNames
serviceNames foreach { serviceName =>
val col = idxCol(timestamp, span.traceId)
IndexTraceByServiceNameCounter.incr()
serviceNameIdx.insert(nameKey(serviceName, None), col)
if (span.name != "") {
IndexTraceBySpanNameCounter.incr()
serviceSpanNameIdx.insert(nameKey(serviceName, Some(span.name)), col)
}
}
}
}
private[this] def indexByAnnotations(idx: BatchAnnotationsIndex, span: Span) {
if (span.lastAnnotation.isEmpty)
IndexAnnotationNoLastAnnotationCounter.incr()
span.lastAnnotation foreach { lastAnnotation =>
val timestamp = lastAnnotation.timestamp
// skip core annotations since that query can be done by service name/span name anyway
span.annotations
.filter { a => !Constants.CoreAnnotations.contains(a.value) }
.groupBy(_.value)
.foreach { case (_, as) =>
val a = as.min
a.host foreach { endpoint =>
IndexAnnotationCounter.incr()
idx.insert(
annotationKey(endpoint.serviceName, a.value, None),
idxCol(a.timestamp, span.traceId))
}
}
span.binaryAnnotations foreach { ba =>
ba.host foreach { endpoint =>
IndexBinaryAnnotationCounter.incr()
val col = idxCol(timestamp, span.traceId)
idx.insert(annotationKey(endpoint.serviceName, ba.key, Some(ba.value)), col)
idx.insert(annotationKey(endpoint.serviceName, ba.key, None), col)
}
}
}
}
private[this] def indexSpanDuration(idx: BatchDurationIndex, span: Span) {
Seq(span.firstAnnotation, span.lastAnnotation).flatten foreach { a =>
IndexDurationCounter.incr()
idx.insert(span.traceId, idxCol(a.timestamp, ""))
}
}
private[this] def getSpansByTraceIds(traceIds: Seq[Long], count: Int): Future[Seq[Seq[Span]]] = {
val results = traceIds.grouped(readBatchSize) map { idBatch =>
Traces.multigetRows(idBatch.toSet.asJava, None, None, Order.Normal, count) map { rowSet =>
val rows = rowSet.asScala
idBatch flatMap { id =>
rows(id).asScala match {
case cols if cols.isEmpty =>
None
case cols if cols.size > maxTraceCols =>
QueryGetSpansByTraceIdsTooBigCounter.incr()
None
case cols =>
Some(cols.toSeq map { case (_, col) => col.value.toSpan })
}
}
}
}
Future.collect(results.toSeq).map(_.flatten)
}
/**
* API Implementation
*/
override def close(deadline: Time): Future[Unit] =
FuturePool.unboundedPool { keyspace.close() }
// TODO: break these into smaller batches?
def apply(spans: Seq[Span]): Future[Unit] = {
SpansStoredCounter.incr(spans.size)
val traces = Traces.batch()
val serviceNames = ServiceNames.batch()
val spanNames = SpanNames.batch()
val serviceNameIdx = ServiceNameIndex.batch()
val serviceSpanNameIdx = ServiceSpanNameIndex.batch()
val annotationsIdx = AnnotationsIndex.batch()
val durationIdx = DurationIndex.batch()
spans foreach { span =>
traces.insert(span.traceId, Column[String, ThriftSpan](createSpanColumnName(span), span.toThrift, None, Some(spanTtl)))
if (shouldIndex(span)) {
SpansIndexedCounter.incr()
indexServiceName(serviceNames, span)
indexSpanNameByService(spanNames, span)
indexTraceIdByName(serviceNameIdx, serviceSpanNameIdx, span)
indexByAnnotations(annotationsIdx, span)
indexSpanDuration(durationIdx, span)
}
}
Future.collect(Seq(
traces,
serviceNames,
spanNames,
serviceNameIdx,
serviceSpanNameIdx,
annotationsIdx,
durationIdx
).map(_.execute())).unit
}
def setTimeToLive(traceId: Long, ttl: Duration): Future[Unit] = {
Traces.getRow(traceId) flatMap { row =>
val traces = Traces.batch()
row.values.asScala foreach { col =>
traces.insert(traceId, col.copy(timestamp = None, ttl = Some(ttl)))
}
traces.execute().unit
}
}
def getTimeToLive(traceId: Long): Future[Duration] = {
QueryGetTtlCounter.incr()
Traces.getRow(traceId) flatMap { row =>
val ttl = row.values.asScala.foldLeft(Int.MaxValue) { (ttl, col) =>
math.min(ttl, col.ttl.map(_.inSeconds).getOrElse(Int.MaxValue))
}
if (ttl == Int.MaxValue)
Future.exception(new IllegalArgumentException("The trace " + traceId + " does not have any ttl set!"))
else
Future.value(ttl.seconds)
}
}
def tracesExist(traceIds: Seq[Long]): Future[Set[Long]] = {
QueryTracesExistStat.add(traceIds.size)
getSpansByTraceIds(traceIds, 1) map {
_.flatMap(_.headOption.map(_.traceId)).toSet
}
}
def getSpansByTraceId(traceId: Long): Future[Seq[Span]] =
getSpansByTraceIds(Seq(traceId)).map(_.head)
def getSpansByTraceIds(traceIds: Seq[Long]): Future[Seq[Seq[Span]]] = {
QueryGetSpansByTraceIdsStat.add(traceIds.size)
getSpansByTraceIds(traceIds, maxTraceCols)
}
def getAllServiceNames: Future[Set[String]] = {
QueryGetServiceNamesCounter.incr()
ServiceNames.getRow(ServiceNamesKey).map(_.values.asScala.map(_.name).toSet)
}
def getSpanNames(service: String): Future[Set[String]] = {
QueryGetSpanNamesCounter.incr()
SpanNames.getRow(service).map(_.values.asScala.map(_.name).toSet)
}
def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByNameCounter.incr()
val key = nameKey(serviceName, spanName)
// if we have a span name, look up in the service + span name index
// if not, look up by service name only
val idx: ColumnFamily[String, Long, Long] =
spanName.map(_ => ServiceSpanNameIndex).getOrElse(ServiceNameIndex)
// TODO: endTs seems wrong here
idx.getRowSlice(key, Some(endTs), None, limit, Order.Reversed) map colToIndexedTraceId
}
def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByAnnotationCounter.incr()
val key = annotationKey(serviceName, annotation, value)
AnnotationsIndex.getRowSlice(key, None, Some(endTs), limit, Order.Reversed) map colToIndexedTraceId
}
def getTracesDuration(traceIds: Seq[Long]): Future[Seq[TraceIdDuration]] = {
QueryGetTracesDurationStat.add(traceIds.size)
val traceIdSet = traceIds.toSet.asJava
Future.collect(Seq(
DurationIndex.multigetRows(traceIdSet, None, None, Order.Normal, 1),
DurationIndex.multigetRows(traceIdSet, None, None, Order.Reversed, 1)
)) map { results =>
val Seq(startRows, endRows) = results map { rows =>
rows.asScala.toSeq map { case (traceId, cols) =>
cols.asScala.headOption map { case (_, col) => (traceId, col.name) }
}
}
startRows zip(endRows) collect {
case (Some((startId, startTs)), Some((endId, endTs))) if (startId == endId) =>
TraceIdDuration(endId, endTs - startTs, startTs)
}
}
}
}
|
travisbrown/zipkin
|
zipkin-cassandra/src/main/scala/com/twitter/zipkin/storage/cassandra/CassieSpanStore.scala
|
Scala
|
apache-2.0
| 16,448
|
package lila.lobby
import akka.actor._
import com.typesafe.config.Config
import lila.common.PimpedConfig._
import lila.socket.History
final class Env(
config: Config,
db: lila.db.Env,
hub: lila.hub.Env,
onStart: String => Unit,
blocking: String => Fu[Set[String]],
playban: String => Fu[Option[lila.playban.TempBan]],
system: ActorSystem,
scheduler: lila.common.Scheduler) {
private val settings = new {
val MessageTtl = config duration "message.ttl"
val NetDomain = config getString "net.domain"
val SocketName = config getString "socket.name"
val SocketUidTtl = config duration "socket.uid.ttl"
val OrphanHookTtl = config duration "orphan_hook.ttl"
val ActorName = config getString "actor.name"
val BroomPeriod = config duration "broom_period"
val ResyncIdsPeriod = config duration "resync_ids_period"
val CollectionSeek = config getString "collection.seek"
val CollectionSeekArchive = config getString "collection.seek_archive"
val SeekMaxPerPage = config getInt "seek.max_per_page"
val SeekMaxPerUser = config getInt "seek.max_per_user"
}
import settings._
private val socket = system.actorOf(Props(new Socket(
history = history,
router = hub.actor.router,
uidTtl = SocketUidTtl
)), name = SocketName)
lazy val seekApi = new SeekApi(
coll = db(CollectionSeek),
archiveColl = db(CollectionSeekArchive),
blocking = blocking,
maxPerPage = SeekMaxPerPage,
maxPerUser = SeekMaxPerUser)
val lobby = system.actorOf(Props(new Lobby(
socket = socket,
seekApi = seekApi,
blocking = blocking,
playban = playban,
onStart = onStart
)), name = ActorName)
lazy val socketHandler = new SocketHandler(
hub = hub,
lobby = lobby,
socket = socket,
blocking = blocking)
lazy val history = new History[actorApi.Messadata](ttl = MessageTtl)
private val abortListener = new AbortListener(seekApi = seekApi)
system.actorOf(Props(new Actor {
system.lilaBus.subscribe(self, 'abortGame)
def receive = {
case lila.game.actorApi.AbortedBy(pov) if pov.game.isCorrespondence =>
abortListener recreateSeek pov
}
}))
{
import scala.concurrent.duration._
scheduler.once(10 seconds) {
scheduler.message(BroomPeriod) {
lobby -> lila.socket.actorApi.Broom
}
scheduler.message(ResyncIdsPeriod) {
lobby -> actorApi.Resync
}
}
}
}
object Env {
lazy val current = "lobby" boot new Env(
config = lila.common.PlayApp loadConfig "lobby",
db = lila.db.Env.current,
hub = lila.hub.Env.current,
onStart = lila.game.Env.current.onStart,
blocking = lila.relation.Env.current.api.blocking,
playban = lila.playban.Env.current.api.currentBan _,
system = lila.common.PlayApp.system,
scheduler = lila.common.PlayApp.scheduler)
}
|
r0k3/lila
|
modules/lobby/src/main/Env.scala
|
Scala
|
mit
| 2,883
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import edu.emory.mathcs.jtransforms.dct._
import org.apache.spark.annotation.Since
import org.apache.spark.ml.UnaryTransformer
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.ml.param.BooleanParam
import org.apache.spark.ml.util._
import org.apache.spark.sql.types.DataType
/**
* A feature transformer that takes the 1D discrete cosine transform of a real vector. No zero
* padding is performed on the input vector.
* It returns a real vector of the same length representing the DCT. The return vector is scaled
* such that the transform matrix is unitary (aka scaled DCT-II).
*
* More information on <a href="https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II">
* DCT-II in Discrete cosine transform (Wikipedia)</a>.
*/
@Since("1.5.0")
class DCT @Since("1.5.0") (@Since("1.5.0") override val uid: String)
extends UnaryTransformer[Vector, Vector, DCT] with DefaultParamsWritable {
@Since("1.5.0")
def this() = this(Identifiable.randomUID("dct"))
/**
* Indicates whether to perform the inverse DCT (true) or forward DCT (false).
* Default: false
* @group param
*/
@Since("1.5.0")
def inverse: BooleanParam = new BooleanParam(
this, "inverse", "Set transformer to perform inverse DCT")
/** @group setParam */
@Since("1.5.0")
def setInverse(value: Boolean): this.type = set(inverse, value)
/** @group getParam */
@Since("1.5.0")
def getInverse: Boolean = $(inverse)
setDefault(inverse -> false)
override protected def createTransformFunc: Vector => Vector = { vec =>
val result = vec.toArray
val jTransformer = new DoubleDCT_1D(result.length)
if ($(inverse)) jTransformer.inverse(result, true) else jTransformer.forward(result, true)
Vectors.dense(result)
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType.isInstanceOf[VectorUDT], s"Input type must be VectorUDT but got $inputType.")
}
override protected def outputDataType: DataType = new VectorUDT
}
@Since("1.6.0")
object DCT extends DefaultParamsReadable[DCT] {
@Since("1.6.0")
override def load(path: String): DCT = super.load(path)
}
|
esi-mineset/spark
|
mllib/src/main/scala/org/apache/spark/ml/feature/DCT.scala
|
Scala
|
apache-2.0
| 3,015
|
package io.hydrosphere.mist
import scala.concurrent.duration._
object Constants {
object Actors {
final val syncJobRunnerName = "SyncJobRunner"
final val asyncJobRunnerName = "AsyncJobRunner"
final val clusterManagerName = "ClusterManager"
final val mqttServiceName = "MQTTService"
final val kafkaServiceName = "KafkaService"
final val contextNode = "ContextNode"
final val cliName = "CLI"
final val cliResponderName = "CliResponder"
}
object CLI {
object Commands {
final val stopWorker = "kill worker"
final val stopJob = "kill job"
final val listWorkers = "list workers"
final val listRouters = "list routers"
final val listJobs = "list jobs"
final val stopAllWorkers = "kill all"
final val exit = "exit"
final val startJob = "start job"
}
final val noWorkersMsg = "no workers"
final val internalUserInterfaceActorName = "InternalUIActor"
final val timeoutDuration = 60.second
final val stopAllWorkers = "All contexts are scheduled for shutdown."
}
}
|
KineticCookie/mist
|
src/main/scala/io/hydrosphere/mist/Constants.scala
|
Scala
|
apache-2.0
| 1,070
|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import sbt.Def.{ Initialize, ScopedKey }
import sbt.Previous._
import sbt.Scope.Global
import sbt.SlashSyntax0._
import sbt.internal.util._
import sbt.std.TaskExtra._
import sbt.util.StampedFormat
import sjsonnew.JsonFormat
import scala.util.control.NonFatal
/**
* Reads the previous value of tasks on-demand. The read values are cached so that they are only read once per task execution.
* `referenced` provides the `Format` to use for each key.
*/
private[sbt] final class Previous(streams: Streams, referenced: IMap[Previous.Key, Referenced]) {
private[this] var map = IMap.empty[Previous.Key, ReferencedValue]
// We can't use mapValues to transform the map because mapValues is lazy and evaluates the
// transformation function every time a value is fetched from the map, defeating the entire
// purpose of ReferencedValue.
for (referenced.TPair(k, v) <- referenced.toTypedSeq) map = map.put(k, new ReferencedValue(v))
private[this] final class ReferencedValue[T](referenced: Referenced[T]) {
lazy val previousValue: Option[T] = referenced.read(streams)
}
/** Used by the .previous runtime implementation to get the previous value for task `key`. */
private def get[T](key: Key[T]): Option[T] =
map.get(key).flatMap(_.previousValue)
}
object Previous {
import sjsonnew.BasicJsonProtocol.StringJsonFormat
private[sbt] type ScopedTaskKey[T] = ScopedKey[Task[T]]
private type AnyTaskKey = ScopedTaskKey[Any]
private type Streams = sbt.std.Streams[ScopedKey[_]]
/** The stream where the task value is persisted. */
private final val StreamName = "previous"
private[sbt] final val DependencyDirectory = "previous-dependencies"
/** Represents a reference task.previous*/
private[sbt] final class Referenced[T](val key: Key[T], val format: JsonFormat[T]) {
def this(task: ScopedTaskKey[T], format: JsonFormat[T]) = this(Key(task, task), format)
@deprecated("unused", "1.3.0")
private[sbt] def task: ScopedKey[Task[T]] = key.task
lazy val stamped: JsonFormat[T] =
StampedFormat.withStamp(key.task.key.manifest.toString)(format)
def setTask(newTask: ScopedKey[Task[T]]) = new Referenced(newTask, format)
private[sbt] def read(streams: Streams): Option[T] =
try Option(streams(key.cacheKey).cacheStoreFactory.make(StreamName).read[T]()(stamped))
catch { case NonFatal(_) => None }
}
private[sbt] val references = SettingKey[References](
"previous-references",
"Collects all static references to previous values of tasks.",
KeyRanks.Invisible
)
private[sbt] val cache = TaskKey[Previous](
"previous-cache",
"Caches previous values of tasks read from disk for the duration of a task execution.",
KeyRanks.Invisible
)
private[sbt] class Key[T](val task: ScopedKey[Task[T]], val enclosing: AnyTaskKey) {
override def equals(o: Any): Boolean = o match {
case that: Key[_] => this.task == that.task && this.enclosing == that.enclosing
case _ => false
}
override def hashCode(): Int = (task.## * 31) ^ enclosing.##
def cacheKey: AnyTaskKey = {
if (task == enclosing) task.asInstanceOf[ScopedKey[Task[Any]]]
else {
val am = enclosing.scope.extra match {
case Select(a) => a.put(scopedKeyAttribute, task.asInstanceOf[AnyTaskKey])
case _ => AttributeMap.empty.put(scopedKeyAttribute, task.asInstanceOf[AnyTaskKey])
}
Def.ScopedKey(enclosing.scope.copy(extra = Select(am)), enclosing.key)
}
}.asInstanceOf[AnyTaskKey]
}
private[sbt] object Key {
def apply[T, U](key: ScopedKey[Task[T]], enclosing: ScopedKey[Task[U]]): Key[T] =
new Key(key, enclosing.asInstanceOf[AnyTaskKey])
}
/** Records references to previous task value. This should be completely populated after settings finish loading. */
private[sbt] final class References {
private[this] var map = IMap.empty[Key, Referenced]
@deprecated("unused", "1.3.0")
def recordReference[T](key: ScopedKey[Task[T]], format: JsonFormat[T]): Unit =
recordReference(Key(key, key), format)
// TODO: this arbitrarily chooses a JsonFormat.
// The need to choose is a fundamental problem with this approach, but this should at least make a stable choice.
def recordReference[T](key: Key[T], format: JsonFormat[T]): Unit = synchronized {
map = map.put(key, new Referenced(key, format))
}
def getReferences: IMap[Key, Referenced] = synchronized { map }
}
/** Persists values of tasks t where there is some task referencing it via t.previous. */
private[sbt] def complete(
referenced: References,
results: RMap[Task, Result],
streams: Streams
): Unit = {
val map = referenced.getReferences
val reverse = map.keys.groupBy(_.task)
// We first collect all of the successful tasks and write their scoped key into a map
// along with their values.
val successfulTaskResults = (for {
results.TPair(task, Value(v)) <- results.toTypedSeq
key <- task.info.attributes.get(Def.taskDefinitionKey).asInstanceOf[Option[AnyTaskKey]]
} yield key -> v).toMap
// We then traverse the successful results and look up all of the referenced values for
// each of these tasks. This can be a many to one relationship if multiple tasks refer
// the previous value of another task. For each reference we find, we check if the task has
// been successfully evaluated. If so, we write it to the appropriate previous cache for
// the completed task.
for {
(k, v) <- successfulTaskResults
keys <- reverse.get(k)
key <- keys if successfulTaskResults.contains(key.enclosing)
ref <- map.get(key.asInstanceOf[Key[Any]])
} {
val out = streams(key.cacheKey).cacheStoreFactory.make(StreamName)
try out.write(v)(ref.stamped)
catch { case NonFatal(_) => }
}
}
private[sbt] val scopedKeyAttribute = AttributeKey[AnyTaskKey](
"previous-scoped-key-attribute",
"Specifies a scoped key for a task on which .previous is called. Used to " +
"set the cache directory for the task-specific previous value: see Previous.runtimeInEnclosingTask."
)
/** Public as a macro implementation detail. Do not call directly. */
def runtime[T](skey: TaskKey[T])(implicit format: JsonFormat[T]): Initialize[Task[Option[T]]] = {
val inputs = (Global / cache) zip Def.validated(skey, selfRefOk = true) zip (Global / references)
inputs {
case ((prevTask, resolved), refs) =>
val key = Key(resolved, resolved)
refs.recordReference(key, format) // always evaluated on project load
prevTask.map(_.get(key)) // evaluated if this task is evaluated
}
}
/** Public as a macro implementation detail. Do not call directly. */
def runtimeInEnclosingTask[T](skey: TaskKey[T])(
implicit format: JsonFormat[T]
): Initialize[Task[Option[T]]] = {
val inputs = (Global / cache)
.zip(Def.validated(skey, selfRefOk = true))
.zip(Global / references)
.zip(Def.resolvedScoped)
inputs {
case (((prevTask, resolved), refs), inTask: ScopedKey[Task[_]] @unchecked) =>
val key = Key(resolved, inTask)
refs.recordReference(key, format) // always evaluated on project load
prevTask.map(_.get(key)) // evaluated if this task is evaluated
}
}
}
|
sbt/sbt
|
main-settings/src/main/scala/sbt/Previous.scala
|
Scala
|
apache-2.0
| 7,529
|
package scala.scalanative
package runtime
import native._
/**
* The Boehm GC conservative garbage collector
*
* @see [[http://hboehm.info/gc/gcinterface.html C Interface]]
*/
@link("gc")
@extern
object GC {
@name("GC_malloc")
def malloc(size: CSize): Ptr[_] = extern
@name("GC_malloc_atomic")
def malloc_atomic(size: CSize): Ptr[_] = extern
@name("GC_init")
def init(): Unit = extern
}
|
phdoerfler/scala-native
|
nativelib/src/main/scala/scala/scalanative/runtime/GC.scala
|
Scala
|
bsd-3-clause
| 404
|
package org.monkeynuthead.riak
object AlbumRepositoryProtocol {
case class StoreAlbum(album: Album) //response on success is the album
case class FetchAlbumByTitle(title: String) //reponse is an Option[Album]
}
|
georgenicoll/monkey-barrel
|
old20150804/riak-scala-client-example/src/main/scala/org/monkeynuthead/riak/AlbumRepositoryProtocol.scala
|
Scala
|
gpl-2.0
| 222
|
package authentication
import models.User
import org.scalatest.{MustMatchers, WordSpec}
/**
* Tests for user profile
*/
class UserProfileTest extends WordSpec with MustMatchers {
"An unauthenticated profile" should {
"be not authenticated" in {
UnauthenticatedProfile.authenticated mustBe false
}
"not contain a user" in {
UnauthenticatedProfile.userOpt mustBe None
UnauthenticatedProfile.userIdOpt mustBe None
}
"have no groups" in {
UnauthenticatedProfile.groups mustBe 'empty
}
}
"An authenticated profile" when {
"having no groups" should {
"be authenticated" in new AuthenticatedProfileFixture {
profile.authenticated mustBe true
}
"contain a user" in new AuthenticatedProfileFixture {
profile.userOpt mustBe 'defined
profile.userOpt.get.username mustBe "dummy"
}
"contain the correct user id" in new AuthenticatedProfileFixture {
profile.userIdOpt mustBe 'defined
profile.userIdOpt mustBe Some(userId)
}
"have no groups" in new AuthenticatedProfileFixture {
profile.groups mustBe 'empty
}
}
"having some groups" should {
"be authenticated" in new AuthenticatedProfileGroupFixture {
profile.authenticated mustBe true
}
"contain a user" in new AuthenticatedProfileGroupFixture {
profile.userOpt mustBe 'defined
profile.userOpt.get.username mustBe "dummy"
}
"contain the correct user id" in new AuthenticatedProfileGroupFixture {
profile.userIdOpt mustBe 'defined
profile.userIdOpt mustBe Some(userId)
}
"contain the groups" in new AuthenticatedProfileGroupFixture {
profile.groups.size mustBe groups.size
profile.groups mustBe groups
}
}
}
"UserProfile factory" must {
"construct unauthenticated profile from empty parameters" in {
UserProfile().authenticated mustBe false
UserProfile(None).authenticated mustBe false
}
"construct authenticated profile from user and no groups" in new AuthenticatedProfileFixture {
UserProfile(user).authenticated mustBe true
UserProfile(Some(user)).authenticated mustBe true
}
"construct authenticated profile from user and groups" in new AuthenticatedProfileGroupFixture {
UserProfile(user).authenticated mustBe true
UserProfile(Some(user)).authenticated mustBe true
}
}
trait AuthenticatedProfileFixture {
val userId = "123456789"
val user = User(userId, "dummy", "", "dummy", "dummy@dummy.de", None, None, None)
val profile = AuthenticatedProfile(user)
}
trait AuthenticatedProfileGroupFixture {
val group1Id = "999888777"
val group2Id = "666555444"
val userId = "123456789"
val user = User(userId, "dummy", "", "dummy", "dummy@dummy.de", None, None, Some(Seq(group1Id, group2Id)))
val groups = Set(group1Id, group2Id)
val profile = AuthenticatedProfile(user)
}
}
|
metaxmx/FridayNightBeer
|
modules/datamodel/src/test/scala/authentication/UserProfileTest.scala
|
Scala
|
apache-2.0
| 3,083
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.compression.internal.operators
import java.util.zip.{DataFormatException, Inflater}
import java.{util => ju}
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.reactive.Observable.Operator
import monix.reactive.compression.CompressionException
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.concurrent.Future
import scala.util.Success
import scala.util.control.NonFatal
private[compression] final class InflateOperator(bufferSize: Int, noWrap: Boolean)
extends Operator[Array[Byte], Array[Byte]] {
def apply(out: Subscriber[Array[Byte]]): Subscriber[Array[Byte]] =
new Subscriber[Array[Byte]] {
implicit val scheduler = out.scheduler
private[this] var isDone = false
private[this] var ack: Future[Ack] = _
private[this] val inflater = new InflateAdapter(bufferSize, noWrap)
def onNext(elem: Array[Byte]): Future[Ack] = {
if (isDone) {
Stop
} else {
try {
val result = inflater.onChunk(elem)
// signaling downstream
ack = out.onNext(result)
ack
} catch {
case e if NonFatal(e) =>
onError(e)
Stop
}
}
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
inflater.close()
out.onError(ex)
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
if (ack == null) ack = Continue
ack.syncOnComplete {
case Success(Continue) =>
var streamErrors = true
try {
val lastArray = inflater.finish()
streamErrors = false
out.onNext(lastArray)
out.onComplete()
} catch {
case NonFatal(e) if streamErrors =>
out.onError(e)
} finally {
inflater.close()
}
case _ => inflater.close()
}
()
}
}
}
// https://github.com/zio/zio/blob/master/streams/jvm/src/main/scala/zio/stream/platform.scala
private final class InflateAdapter(bufferSize: Int, noWrap: Boolean) {
private val inflater = new Inflater(noWrap)
private val buffer = new Array[Byte](bufferSize)
def finish(): Array[Byte] = {
try {
if (inflater.finished()) {
inflater.reset()
Array.emptyByteArray
} else {
throw CompressionException(
"Inflater is not finished when input stream completed"
)
}
} catch {
case e: DataFormatException => throw CompressionException(e)
}
}
def onChunk(input: Array[Byte]): Array[Byte] = {
try {
inflater.setInput(input)
pullAllOutput(input)
} catch {
case e: DataFormatException => throw CompressionException(e)
}
}
private def pullAllOutput(input: Array[Byte]): Array[Byte] = {
@tailrec
def next(acc: Array[Byte]): Array[Byte] = {
val read = inflater.inflate(buffer)
val remaining = inflater.getRemaining()
val current = ju.Arrays.copyOf(buffer, read)
if (remaining > 0) {
if (read > 0) next(acc ++ current)
else if (inflater.finished()) {
val leftover = input.takeRight(remaining)
inflater.reset()
inflater.setInput(leftover.toArray)
next(acc ++ current)
} else {
// Impossible happened (aka programmer error). Die.
throw new Exception("read = 0, remaining > 0, not finished")
}
} else if (read > 0) next(acc ++ current)
else acc ++ current
}
if (inflater.needsInput()) Array.emptyByteArray
else next(Array.emptyByteArray)
}
def close(): Unit = inflater.`end`()
}
|
alexandru/monifu
|
monix-reactive/jvm/src/main/scala/monix/reactive/compression/internal/operators/InflateOperator.scala
|
Scala
|
apache-2.0
| 4,536
|
package com.shrbank.bigdata.storm
import org.slf4j.LoggerFactory
/**
* Created by wushaojie on 2016/9/20.
* 查看当前SyncSpout在Zk中的配置情况
*/
object ZkConfigHelper {
private val log = LoggerFactory.getLogger(this.getClass)
def main(args: Array[String]): Unit = {
if ( args.length < 3 ){
log.error("用法:ZkConfigHelper zkServerList list|rm [topologyName|all]")
println("用法:ZkConfigHelper zkServerList list|rm [topologyName|all]")
return
}
val Array( zkServer,cmd,topologyName ) = args
log.info(s"查找[$zkServer]下[$topologyName]的配置情况")
println(s"查找[$zkServer]下[$topologyName]的配置情况")
var zkConfig:SyncSpoutZkConfig = null
if(topologyName.toLowerCase=="all"){
zkConfig = new SyncSpoutZkConfig(zkServer)
val children = zkConfig.getZkClient.getChildren.forPath(SyncSpoutZkConfig.ZK_ROOT_PATH)
println(s"$cmd 运行中的Spout列表")
for(i<-0 until children.size()){
println(s"$i,${children.get(i)}")
val childrenPath = s"${SyncSpoutZkConfig.ZK_ROOT_PATH}/${children.get(i)}"
val server = zkConfig.getZkClient.getChildren.forPath(childrenPath)
if(cmd.toLowerCase=="list"){
println(s"${children.get(i)} 分布详情")
for(j<-0 until server.size()){
println(s" $j,${server.get(j)},type = ${new String(zkConfig.getZkClient.getData.forPath(s"$childrenPath/${server.get(j)}"))}")
}
}else{
val children = zkConfig.getZkClient.getChildren.forPath(childrenPath)
if(children.isEmpty){
zkConfig.getZkClient.delete.forPath(childrenPath)
println(s"$childrenPath 已删除")
}else{
for(i<-0 until children.size()){
zkConfig.getZkClient.delete.forPath(s"$childrenPath/${children.get(i)}")
println(s"$childrenPath/${children.get(i)} 已删除")
}
}
}
}
}else{
zkConfig = new SyncSpoutZkConfig(zkServer,topologyName)
println(s"$topologyName 分布详情")
if(cmd.toLowerCase=="list"){
zkConfig.getServerPort.foreach(println)
}else{
if(zkConfig.getServerPort.isEmpty){
zkConfig.deleteServerPath()
println(s"$topologyName 已删除")
}
}
}
zkConfig.close()
}
}
|
shrbank/SyncSpout
|
core/src/main/scala/com/shrbank/bigdata/storm/ZkConfigHelper.scala
|
Scala
|
gpl-2.0
| 2,375
|
package algorithms.implementation
import org.scalatest.FunSuite
import scala.language.implicitConversions
/**
* Created by yujieshui on 2017/5/11.
*/
class BiggerIsGreaterTest extends FunSuite {
import BiggerIsGreater._
implicit def s2l(s: String): List[Char] = Predef.augmentString(s).toList
test("a") {
assert(solution("ab") === Some(s2l("ba")))
assert(solution("bb") === None)
assert(solution("hefg") === Some(s2l("hegf")))
assert(solution("dhck") === Some(s2l("dhkc")))
assert(solution("dkhc") === Some(s2l("hcdk")))
}
test("n") {
println(
solution("a")
)
}
test("opt") {
val x = 1 to 100000 map { i =>
solution(1 to 100 map (_ => (math.random * 255).toInt.toChar) toList)
}
// val o = x.mkString("\n")
// Thread.sleep(10000)
// println(o)
// Thread.sleep(10000)
}
}
|
1178615156/hackerrank
|
src/test/scala/algorithms/implementation/BiggerIsGreaterTest.scala
|
Scala
|
apache-2.0
| 859
|
package sangria.validation.rules
import org.scalatest.WordSpec
import sangria.util.{Pos, ValidationSupport}
class FieldsOnCorrectTypeSpec extends WordSpec with ValidationSupport {
override val defaultRule = Some(new FieldsOnCorrectType)
"Validate: Fields on correct type" should {
"Object field selection" in expectPasses(
"""
fragment objectFieldSelection on Dog {
__typename
name
}
""")
"Aliased object field selection" in expectPasses(
"""
fragment aliasedObjectFieldSelection on Dog {
tn : __typename
otherName : name
}
""")
"Interface field selection" in expectPasses(
"""
fragment interfaceFieldSelection on Pet {
__typename
name
}
""")
"Aliased interface field selection" in expectPasses(
"""
fragment interfaceFieldSelection on Pet {
otherName : name
}
""")
"Lying alias selection" in expectPasses(
"""
fragment lyingAliasSelection on Dog {
name : nickname
}
""")
"Ignores fields on unknown type" in expectPasses(
"""
fragment unknownSelection on UnknownType {
unknownField
}
""")
"Field not defined on fragment" in expectFails(
"""
fragment fieldNotDefined on Dog {
meowVolume
}
""",
List(
"Cannot query field 'meowVolume' on 'Dog'." -> Some(Pos(3, 11))
))
"Field not defined deeply, only reports first" in expectFails(
"""
fragment deepFieldNotDefined on Dog {
unknown_field {
deeper_unknown_field
}
}
""",
List(
"Cannot query field 'unknown_field' on 'Dog'." -> Some(Pos(3, 11))
))
"Sub-field not defined" in expectFails(
"""
fragment subFieldNotDefined on Human {
pets {
unknown_field
}
}
""",
List(
"Cannot query field 'unknown_field' on 'Pet'." -> Some(Pos(4, 13))
))
"Field not defined on inline fragment" in expectFails(
"""
fragment fieldNotDefined on Pet {
... on Dog {
meowVolume
}
}
""",
List(
"Cannot query field 'meowVolume' on 'Dog'." -> Some(Pos(4, 13))
))
"Aliased field target not defined" in expectFails(
"""
fragment aliasedFieldTargetNotDefined on Dog {
volume : mooVolume
}
""",
List(
"Cannot query field 'mooVolume' on 'Dog'." -> Some(Pos(3, 11))
))
"Aliased lying field target not defined" in expectFails(
"""
fragment aliasedLyingFieldTargetNotDefined on Dog {
barkVolume : kawVolume
}
""",
List(
"Cannot query field 'kawVolume' on 'Dog'." -> Some(Pos(3, 11))
))
"Not defined on interface" in expectFails(
"""
fragment notDefinedOnInterface on Pet {
tailLength
}
""",
List(
"Cannot query field 'tailLength' on 'Pet'." -> Some(Pos(3, 11))
))
"Defined on implmentors but not on interface" in expectFails(
"""
fragment definedOnImplementorsButNotInterface on Pet {
nickname
}
""",
List(
"Cannot query field 'nickname' on 'Pet'." -> Some(Pos(3, 11))
))
"Meta field selection on union" in expectPasses(
"""
fragment directFieldSelectionOnUnion on CatOrDog {
__typename
}
""")
"Direct field selection on union" in expectFails(
"""
fragment directFieldSelectionOnUnion on CatOrDog {
directField
}
""",
List(
"Cannot query field 'directField' on 'CatOrDog'." -> Some(Pos(3, 11))
))
"Defined on implementors queried on union" in expectFails(
"""
fragment definedOnImplementorsQueriedOnUnion on CatOrDog {
name
}
""",
List(
"Cannot query field 'name' on 'CatOrDog'." -> Some(Pos(3, 11))
))
"valid field in inline fragment" in expectPasses(
"""
fragment objectFieldSelection on Pet {
... on Dog {
name
}
}
""")
}
}
|
narahari92/sangria
|
src/test/scala/sangria/validation/rules/FieldsOnCorrectTypeSpec.scala
|
Scala
|
apache-2.0
| 4,338
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.