code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package com.datastax.spark.connector.rdd.partitioner
import java.net.{InetAddress, InetSocketAddress}
import com.datastax.oss.driver.api.core.CqlIdentifier
import com.datastax.spark.connector.ColumnSelector
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.util._
import com.datastax.spark.connector.writer.RowWriterFactory
import org.apache.spark.{Partition, Partitioner}
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
case class ReplicaPartition(index: Int, endpoints: Array[String]) extends EndpointPartition
/**
* The replica partitioner will work on an RDD which is keyed on sets of InetAddresses representing Cassandra
* Hosts . It will group keys which share a common IP address into partitionsPerReplicaSet Partitions.
* @param partitionsPerReplicaSet The number of Spark Partitions to make Per Unique Endpoint
*/
class ReplicaPartitioner[T](
table: String,
keyspace: String,
partitionsPerReplicaSet: Int,
partitionKeyMapper: ColumnSelector,
val connector: CassandraConnector)(
implicit
currentType: ClassTag[T],
@transient private val rwf: RowWriterFactory[T]) extends Partitioner {
val _keyspace = CqlIdentifier.fromInternal(keyspace) // TODO Fix this
val tableDef = tableFromCassandra(connector, keyspace, table)
val rowWriter = implicitly[RowWriterFactory[T]].rowWriter(
tableDef,
partitionKeyMapper.selectFrom(tableDef)
)
@transient lazy private val tokenGenerator = new TokenGenerator[T](connector, tableDef, rowWriter)
@transient lazy private val tokenMap = connector.withSessionDo(_.getMetadata.getTokenMap.get)//TODO Handle missing
@transient lazy private val protocolVersion = connector.withSessionDo(_.getContext.getProtocolVersion)
@transient lazy private val clazz = implicitly[ClassTag[T]].runtimeClass
private val hosts = connector.hosts.map(_.getAddress).toVector
private val hostSet = hosts.toSet
private val numHosts = hosts.size
private val partitionIndexes = (0 until partitionsPerReplicaSet * numHosts)
.grouped(partitionsPerReplicaSet)
.toList
private val hostMap = (hosts zip partitionIndexes).toMap
// Ip1 -> (0,1,2,..), Ip2 -> (11,12,13...)
private val indexMap = for ((ip, partitions) <- hostMap; partition <- partitions) yield (partition, ip)
// 0->IP1, 1-> IP1, ...
private def absModulo(dividend: Int, divisor: Int) : Int = Math.abs(dividend % divisor)
private def randomHost(index: Int): InetAddress = hosts(absModulo(index, hosts.length))
/**
* Given a set of endpoints, pick a random endpoint, and then a random partition owned by that
* endpoint. If the requested host doesn't exist chose another random host. Only uses valid hosts
* from the connected datacenter.
* @param key A Set[InetAddress] of replicas for this Cassandra Partition
* @return An integer between 0 and numPartitions
*/
override def getPartition(key: Any): Int = {
key match {
case key: T if clazz.isInstance(key) =>
//Only use ReplicaEndpoints in the connected DC
val token = tokenGenerator.getTokenFor(key)
val tokenHash = Math.abs(token.hashCode())
val replicas = tokenMap
.getReplicas(_keyspace, token)
.map(n => DriverUtil.toAddress(n).get.getAddress)
val replicaSetInDC = (hostSet & replicas).toVector
if (replicaSetInDC.nonEmpty) {
val endpoint = replicaSetInDC(absModulo(tokenHash, replicaSetInDC.size))
hostMap(endpoint)(absModulo(tokenHash, partitionsPerReplicaSet))
} else {
hostMap(randomHost(tokenHash))(absModulo(tokenHash, partitionsPerReplicaSet))
}
case _ => throw new IllegalArgumentException(
"ReplicaPartitioner can only determine the partition of a tuple whose key is a non-empty Set[InetAddress]. " +
s"Invalid key: $key")
}
}
override def numPartitions: Int = partitionsPerReplicaSet * numHosts
val nodeAddresses = new NodeAddresses(connector)
def getEndpointPartition(partition: Partition): ReplicaPartition = {
val endpoints = indexMap.getOrElse(partition.index,
throw new RuntimeException(s"$indexMap : Can't get an endpoint for Partition $partition.index"))
ReplicaPartition(index = partition.index, endpoints = nodeAddresses.hostNames(endpoints).toArray)
}
}
|
datastax/spark-cassandra-connector
|
connector/src/main/scala/com/datastax/spark/connector/rdd/partitioner/ReplicaPartitioner.scala
|
Scala
|
apache-2.0
| 4,356
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters.mapAsJavaMapConverter
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.NamespaceAlreadyExistsException
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.connector.catalog.SupportsNamespaces
import org.apache.spark.util.Utils
/**
* Physical plan node for creating a namespace.
*/
case class CreateNamespaceExec(
catalog: SupportsNamespaces,
namespace: Seq[String],
ifNotExists: Boolean,
private var properties: Map[String, String])
extends V2CommandExec {
override protected def run(): Seq[InternalRow] = {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.SupportsNamespaces._
val ns = namespace.toArray
if (!catalog.namespaceExists(ns)) {
try {
val ownership =
Map(PROP_OWNER_NAME -> Utils.getCurrentUserName(), PROP_OWNER_TYPE -> "USER")
catalog.createNamespace(ns, (properties ++ ownership).asJava)
} catch {
case _: NamespaceAlreadyExistsException if ifNotExists =>
logWarning(s"Namespace ${namespace.quoted} was created concurrently. Ignoring.")
}
} else if (!ifNotExists) {
throw new NamespaceAlreadyExistsException(ns)
}
Seq.empty
}
override def output: Seq[Attribute] = Seq.empty
}
|
ptkool/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/CreateNamespaceExec.scala
|
Scala
|
apache-2.0
| 2,261
|
package dk.tennis.compare.rating.multiskill.infer.outcome
import dk.bayes.math.gaussian.Gaussian
import dk.tennis.compare.rating.multiskill.model.perfdiff.Score
import scala.math._
import Gaussian._
object InferOutcomeGivenPerfDiff {
def totalLoglik(perfDiffs: Array[Gaussian], scores: Array[Score], filter: (Score) => Boolean = { score => true }): Double = {
val logliks = scores.zip(perfDiffs).filter { case (score, perfDiff) => filter(score) }.map {
case (score, perfDiff) =>
val loglik = score.pointsWon.get._1 * InferOutcomeGivenPerfDiff.loglik(perfDiff, true) + score.pointsWon.get._2 * InferOutcomeGivenPerfDiff.loglik(perfDiff, false)
loglik
}
logliks.sum
}
def loglik(perfDiff: Gaussian, win: Boolean): Double = {
val winProb = 1 - stdCdf(-perfDiff.m / sqrt(perfDiff.v))
win match {
case true => log(winProb)
case false => log1p(-winProb)
}
}
/**
* @param perfDiffsMuD Partial derivative for the mean of the game performance difference with respect to some hyper parameter
* @param perfDiffsVaRD Partial derivative for the variance of the game performance difference with respect to some hyper parameter
*/
def totalLoglikD(perfDiffs: Array[Gaussian], perfDiffsMuD: Array[Double], perfDiffsVarD: Array[Double], scores: Array[Score]): Double = {
val totalLogLikD = (0 until perfDiffs.size).map { i =>
val perfDiff = perfDiffs(i)
val muD = perfDiffsMuD(i)
val varD = perfDiffsVarD(i)
val score = scores(i)
val loglikD = score.pointsWon.get._1 * InferOutcomeGivenPerfDiff.loglikD(perfDiff, true, muD, varD) + score.pointsWon.get._2 * InferOutcomeGivenPerfDiff.loglikD(perfDiff, false, muD, varD)
loglikD
}.sum
totalLogLikD
}
/**
* Returns partial derivative of log likelihood with respect to some parameter theta
*
* @param skillDiff
* @param win
* @param muD Partial derivative of the mean of skills difference
* @param varD Partial derivative of the variance of skills difference
*/
def loglikD(skillDiff: Gaussian, win: Boolean, muD: Double, varD: Double): Double = {
val m = skillDiff.m
val v = skillDiff.v
val x = -m / sqrt(v)
val xD = -(muD / sqrt(v) - 0.5 * m * (1d / pow(v, 3d / 2) * varD))
//derivative of cdfVal
val cdfValD = stdPdf(x) * xD
win match {
case true => -1 * (1d / (1 - stdCdf(x))) * cdfValD
case false => (1d / stdCdf(x)) * cdfValD
}
}
}
|
danielkorzekwa/tennis-player-compare
|
multiskill/src/main/scala/dk/tennis/compare/rating/multiskill/infer/outcome/InferOutcomeGivenPerfDiff.scala
|
Scala
|
bsd-2-clause
| 2,492
|
package scodec.protocols.pcap
import scodec.Err
import scodec.bits.{ BitVector, ByteOrdering }
import scodec.{ Attempt, Codec, DecodeResult, SizeBound }
import scodec.codecs._
case class GlobalHeader(
ordering: ByteOrdering,
versionMajor: Int,
versionMinor: Int,
thiszone: Int,
sigfigs: Long,
snaplen: Long,
network: LinkType)
object GlobalHeader {
private val MagicNumber = 0xa1b2c3d4L
private val MagicNumberRev = 0xd4c3b2a1L
private val byteOrdering: Codec[ByteOrdering] = new Codec[ByteOrdering] {
def sizeBound = SizeBound.exact(32)
def encode(bo: ByteOrdering) =
endiannessDependent(uint32, uint32L)(bo).encode(MagicNumber)
def decode(buf: BitVector) =
uint32.decode(buf).flatMap {
case DecodeResult(MagicNumber, rest) => Attempt.successful(DecodeResult(ByteOrdering.BigEndian, rest))
case DecodeResult(MagicNumberRev, rest) => Attempt.successful(DecodeResult(ByteOrdering.LittleEndian, rest))
case DecodeResult(other, rest) => Attempt.failure(Err(s"unable to detect byte ordering due to unrecognized magic number $other"))
}
override def toString = "byteOrdering"
}
implicit val codec: Codec[GlobalHeader] = "global-header" | {
("magic_number" | byteOrdering ) >>:~ { implicit ordering =>
("version_major" | guint16 ) ::
("version_minor" | guint16 ) ::
("thiszone" | gint32 ) ::
("sigfigs" | guint32 ) ::
("snaplen" | guint32 ) ::
("network" | LinkType.codec )
}}.as[GlobalHeader]
}
|
scodec/scodec-protocols
|
src/main/scala/scodec/protocols/pcap/GlobalHeader.scala
|
Scala
|
bsd-3-clause
| 1,579
|
package reopp.workers.connectors
import actors.OutputChannel
import reopp.workers.Node
import reopp.common.guardedcommands.{Formula, GCSolution}
import reopp.common.guardedcommands.dataconnectors.GCLossy
/**
* Created with IntelliJ IDEA.
* User: jose
* Date: 09/05/12
* Time: 15:37
* To change this template use File | Settings | File Templates.
*/
class Lossy extends Node[GCSolution, Formula] {
// val uid = hashCode
val connector = new GCLossy("a","b")
// // what ends depend on "end" - just a guess to decide when to search for a solution
// def dependsOn(end: String) = if (end == "a") Set("b") else Set()
// suggests which ends must have dataflow if "end" has also dataflow
// "b" requires "a", but not vice-versa!
def guessRequirements(nd: Node[GCSolution, Formula]): Set[Node[GCSolution,Formula]] =
if (connections contains nd) { // if the node nd is actually connected to nd
// for ((myend,_,_) <- connections(nd)) {// set of ends
for ((myend,_) <- getConnectedEndsTo(nd)) {
if (myend == "a") return invConnections("b")
else if (myend == "b") return invConnections("a")
}
Set()
}
else Set()
// if (neighbours.tail.head == nd) Set(neighbours.head)
// else Set(neighbours.head)
}
|
joseproenca/ip-constraints
|
code/src/main/scala/reopp/workers/connectors/Lossy.scala
|
Scala
|
mit
| 1,274
|
package com.seancheatham
package object chess {
//
// Piece-type Constants
//
/**
* An "invalid" square, or "out-of-bounds"
*/
final val _I: Byte =
0
/**
* An "empty" square
*/
final val _E: Byte =
1
/**
* Black Pawn
*/
final val BP: Byte =
2
/**
* Black Bishop
*/
final val BB: Byte =
3
/**
* Black Knight ("N" is a convention to avoid conflicting with the King)
*/
final val BN: Byte =
4
/**
* Black Rook
*/
final val BR: Byte =
5
/**
* Black Queen
*/
final val BQ: Byte =
6
/**
* Black King
*/
final val BK: Byte =
7
/**
* White Pawn
*/
final val WP: Byte =
8
/**
* White Bishop
*/
final val WB: Byte =
9
/**
* White Knight ("N" is a convention to avoid conflicting with the King)
*/
final val WN: Byte =
10
/**
* White Rook
*/
final val WR: Byte =
11
/**
* White Queen
*/
final val WQ: Byte =
12
/**
* White King
*/
final val WK: Byte =
13
//
// Team/Side Constants
//
final val BLACK: Byte =
0
final val WHITE: Byte =
1
//
// Board Constants
//
/**
* A collection of squares which are legal on the board. Since a [[com.seancheatham.chess.Board]] is
* represented as 10x12, the outer squares provide padding, but are illegal for play
*/
final val LEGAL_SQUARES: Vector[Byte] =
(
(20 until 29).toVector ++
(30 until 39) ++
(40 until 49) ++
(50 until 59) ++
(60 until 69) ++
(70 until 79) ++
(80 until 89) ++
(90 until 99)
)
.map(_.toByte)
/**
* The basic chess board representation
*/
final val DEFAULT_BOARD_SQUARES =
Vector[Byte](
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 9
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 19
_I, BR, BN, BB, BQ, BK, BB, BN, BR, _I, // 29
_I, BP, BP, BP, BP, BP, BP, BP, BP, _I, // 39
_I, _E, _E, _E, _E, _E, _E, _E, _E, _I, // 49
_I, _E, _E, _E, _E, _E, _E, _E, _E, _I, // 59
_I, _E, _E, _E, _E, _E, _E, _E, _E, _I, // 69
_I, _E, _E, _E, _E, _E, _E, _E, _E, _I, // 79
_I, WP, WP, WP, WP, WP, WP, WP, WP, _I, // 89
_I, WR, WN, WB, WQ, WK, WB, WN, WR, _I, // 99
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 109
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I // 119
)
/**
* A "challenge" test board, for testing purposes
*/
final val CHALLENGE_BOARD_SQUARES =
Vector[Byte](
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 9
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 19
_I, _E, BK, _E, BR, _E, BN, BR, _E, _I, // 29
_I, BP, _E, BQ, _E, BB, BP, _E, _E, _I, // 39
_I, BP, BB, BP, _E, BP, _E, _E, _E, _I, // 49
_I, _E, _E, WN, BP, WP, BN, _E, BP, _I, // 59
_I, WP, WP, _E, WP, _E, _E, BP, _E, _I, // 69
_I, _E, _E, WP, _E, _E, _E, _E, _E, _I, // 79
_I, _E, _E, _E, _E, WQ, WP, WP, WP, _I, // 89
_I, WR, _E, WB, _E, WN, WR, WK, _E, _I, // 99
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I, // 109
_I, _I, _I, _I, _I, _I, _I, _I, _I, _I // 119
)
//
// Implicit Helpers
//
implicit class PieceTypeImplicits(pieceType: Byte) {
def isWhite: Boolean =
Range(WP.toInt, WK.toInt) contains pieceType.toInt
def isBlack: Boolean =
Range(BP.toInt, BK.toInt) contains pieceType.toInt
def isEmpty: Boolean =
pieceType == _E
def isIllegal: Boolean =
pieceType == _I
def weight: Double =
pieceType match {
case `_E` | `_I` => 0
case `BP` | `WP` => 1
case `BB` | `WB` => 3
case `BN` | `WN` => 3
case `BR` | `WR` => 5
case `BQ` | `WQ` => 9
case `BK` | `WK` => Double.MaxValue
}
}
}
|
SeanCheatham/chess-scala
|
src/main/scala/com/seancheatham/chess/package.scala
|
Scala
|
apache-2.0
| 3,853
|
package controllers
import scala.util.{ Try, Success, Failure }
import play.api.mvc._
import play.twirl.api.Html
import lila.api.Context
import lila.app._
import lila.puzzle.PuzzleId
import lila.puzzle.{ Generated, Puzzle => PuzzleModel }
import lila.user.{ User => UserModel, UserRepo }
import views._
import views.html.puzzle.JsData
object Puzzle extends LilaController {
private def env = Env.puzzle
private def renderShow(puzzle: PuzzleModel, mode: String)(implicit ctx: Context) =
env userInfos ctx.me map { infos =>
views.html.puzzle.show(puzzle, infos, mode, animationDuration = env.AnimationDuration)
}
def daily = Open { implicit ctx =>
OptionFuResult(env.daily() flatMap {
_.map(_.id) ?? env.api.puzzle.find
}) { puzzle =>
(ctx.me ?? { env.api.attempt.hasPlayed(_, puzzle) map (!_) }) flatMap { asPlay =>
renderShow(puzzle, asPlay.fold("play", "try")) map { html =>
Ok(html).withHeaders(
CACHE_CONTROL -> "no-cache", PRAGMA -> "no-cache")
}
}
}
}
def home = Open { implicit ctx =>
selectPuzzle(ctx.me) flatMap { puzzle =>
renderShow(puzzle, ctx.isAuth.fold("play", "try")) map { Ok(_) }
}
}
def show(id: PuzzleId) = Open { implicit ctx =>
OptionFuOk(env.api.puzzle find id) { puzzle =>
(ctx.me ?? { env.api.attempt.hasPlayed(_, puzzle) map (!_) }) flatMap { asPlay =>
renderShow(puzzle, asPlay.fold("play", "try"))
}
}
}
def load(id: PuzzleId) = Open { implicit ctx =>
XhrOnly {
OptionFuOk(env.api.puzzle find id) { puzzle =>
(env userInfos ctx.me) zip
(ctx.me ?? { env.api.attempt.hasPlayed(_, puzzle) map (!_) }) map {
case (infos, asPlay) => JsData(puzzle, infos, asPlay.fold("play", "try"), animationDuration = env.AnimationDuration)
}
} map (_ as JSON)
}
}
def history = Auth { implicit ctx =>
me =>
XhrOnly {
env userInfos me map { ui => Ok(views.html.puzzle.history(ui)) }
}
}
// XHR load next play puzzle
def newPuzzle = Open { implicit ctx =>
XhrOnly {
selectPuzzle(ctx.me) zip (env userInfos ctx.me) map {
case (puzzle, infos) => Ok(JsData(puzzle, infos, ctx.isAuth.fold("play", "try"), animationDuration = env.AnimationDuration)) as JSON
}
}
}
def difficulty = AuthBody { implicit ctx =>
me =>
implicit val req = ctx.body
env.forms.difficulty.bindFromRequest.fold(
err => fuccess(BadRequest(err.errorsAsJson)),
value => Env.pref.api.setPref(
me,
(p: lila.pref.Pref) => p.copy(puzzleDifficulty = value),
notifyChange = false) >> {
reqToCtx(ctx.req) flatMap { newCtx =>
selectPuzzle(newCtx.me) zip env.userInfos(newCtx.me) map {
case (puzzle, infos) => Ok(JsData(puzzle, infos, ctx.isAuth.fold("play", "try"), animationDuration = env.AnimationDuration)(newCtx))
}
}
}
) map (_ as JSON)
}
private def selectPuzzle(user: Option[UserModel]) =
Env.pref.api.getPref(user) flatMap { pref =>
env.selector(user, pref.puzzleDifficulty)
}
def attempt(id: PuzzleId) = OpenBody { implicit ctx =>
implicit val req = ctx.body
OptionFuResult(env.api.puzzle find id) { puzzle =>
env.forms.attempt.bindFromRequest.fold(
err => fuccess(BadRequest(err.errorsAsJson)),
data => ctx.me match {
case Some(me) => env.finisher(puzzle, me, data) flatMap {
case (newAttempt, None) => UserRepo byId me.id map (_ | me) flatMap { me2 =>
env.api.puzzle find id zip
(env userInfos me2.some) zip
(env.api.attempt hasVoted me2) map {
case ((p2, infos), voted) => Ok {
JsData(p2 | puzzle, infos, "view",
attempt = newAttempt.some,
voted = voted.some,
animationDuration = env.AnimationDuration)
}
}
}
case (oldAttempt, Some(win)) => env userInfos me.some map { infos =>
Ok(JsData(puzzle, infos, "view",
attempt = oldAttempt.some,
win = win.some,
animationDuration = env.AnimationDuration))
}
}
case None => fuccess {
Ok(JsData(puzzle, none, "view",
win = data.isWin.some,
animationDuration = env.AnimationDuration))
}
}
) map (_ as JSON)
}
}
def vote(id: PuzzleId) = AuthBody { implicit ctx =>
me =>
implicit val req = ctx.body
OptionFuResult(env.api.attempt.find(id, me.id)) { attempt =>
env.forms.vote.bindFromRequest.fold(
err => fuccess(BadRequest(err.errorsAsJson)),
vote => env.api.attempt.vote(attempt, vote == 1) map {
case (p, a) => Ok(play.api.libs.json.Json.arr(a.vote, p.vote.sum))
}
) map (_ as JSON)
}
}
def embed = Action { req =>
Ok {
val bg = get("bg", req) | "light"
val theme = get("theme", req) | "brown"
val url = s"""${req.domain + routes.Puzzle.frame}?bg=$bg&theme=$theme"""
s"""document.write("<iframe src='http://$url&embed=" + document.domain + "' class='lichess-training-iframe' allowtransparency='true' frameBorder='0' style='width: 224px; height: 264px;' title='Lichess free online chess'></iframe>");"""
} as JAVASCRIPT withHeaders (CACHE_CONTROL -> "max-age=86400")
}
def frame = Open { implicit ctx =>
OptionOk(env.daily()) { daily =>
html.puzzle.embed(
daily,
get("bg") | "light",
lila.pref.Theme(~get("theme")).cssClass)
}
}
}
|
danilovsergey/i-bur
|
app/controllers/Puzzle.scala
|
Scala
|
mit
| 5,782
|
package spark
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
object ScalaWordCount {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Scala Word Count").setMaster("yarn-client")
val sc = new SparkContext(conf)
val file = sc.textFile("hdfs://namenode:8020/user/root/constitution.txt")
val counts = file.flatMap(line => line.split(" "))
.map(word => (word, 1))
.reduceByKey(_ + _)
counts.cache()
counts.saveAsTextFile("hdfs://namenode:8020/user/root/scala-wc-out")
}
}
|
gaoxuesong/HDPLAB
|
Java_Rev3/workspace/Spark/src/main/scala/spark/ScalaWordCount.scala
|
Scala
|
apache-2.0
| 593
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalRel
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalRel
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalRel
import org.apache.calcite.plan.{Convention, RelTraitSet}
import org.apache.calcite.rel.RelNode
/**
* Override the default convention implementation to support using AbstractConverter for conversion
*/
class FlinkConvention(name: String, relClass: Class[_ <: RelNode])
extends Convention.Impl(name, relClass) {
override def useAbstractConvertersForConversion(
fromTraits: RelTraitSet,
toTraits: RelTraitSet): Boolean = {
if (relClass == classOf[StreamPhysicalRel]) {
// stream
!fromTraits.satisfies(toTraits) &&
fromTraits.containsIfApplicable(FlinkConventions.STREAM_PHYSICAL) &&
toTraits.containsIfApplicable(FlinkConventions.STREAM_PHYSICAL)
} else {
// batch
!fromTraits.satisfies(toTraits) &&
fromTraits.containsIfApplicable(FlinkConventions.BATCH_PHYSICAL) &&
toTraits.containsIfApplicable(FlinkConventions.BATCH_PHYSICAL)
}
}
}
object FlinkConventions {
val LOGICAL = new Convention.Impl("LOGICAL", classOf[FlinkLogicalRel])
val STREAM_PHYSICAL = new FlinkConvention("STREAM_PHYSICAL", classOf[StreamPhysicalRel])
val BATCH_PHYSICAL = new FlinkConvention("BATCH_PHYSICAL", classOf[BatchPhysicalRel])
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/FlinkConventions.scala
|
Scala
|
apache-2.0
| 2,303
|
package unfiltered.jetty
import org.eclipse.jetty.server.{NCSARequestLog, Handler}
import unfiltered.util.{ PlanServer, RunnableServer }
import javax.servlet.Filter
import org.eclipse.jetty.server.handler.{ContextHandlerCollection, RequestLogHandler, HandlerCollection}
/** Holds port bindings for selected ports and interfaces. The
* PortBindings trait provides convenience methods for bindings. */
case class Server(
portBindings: List[PortBinding],
contextAdders: List[ContextAdder],
requestLogging: Option[RequestLogging] = None
) extends RunnableServer
with PlanServer[Filter]
with PortBindings {
type ServerBuilder = Server
/** Add a port binding to this server. */
def portBinding(binding: PortBinding) = copy(
portBindings = binding :: portBindings
)
/** Update the server's first-added context. */
def originalContext(replace: ContextAdder => ContextAdder) = copy(
contextAdders = contextAdders.reverse match {
case head :: tail => (replace(head) :: tail).reverse
case _ => contextAdders
})
/** The mutable underlying jetty server object. This is built
* on-demand according to the described configuration. */
lazy val underlying = {
val server = new org.eclipse.jetty.server.Server()
for (binding <- portBindings.reverseIterator)
server.addConnector(binding.connector(server))
val contextHandlers = new ContextHandlerCollection
for (adder <- contextAdders.reverseIterator)
adder.addToParent(contextHandlers)
server.setHandler(withLogging(contextHandlers, requestLogging))
server
}
private def withLogging(contextHandlers: ContextHandlerCollection,
requestLogging: Option[RequestLogging]) = {
requestLogging.fold[Handler](
contextHandlers)(rl => {
val handlers = new HandlerCollection()
val requestLogHandler = new RequestLogHandler()
val requestLog = new NCSARequestLog(rl.filename)
requestLog.setRetainDays(rl.retainDays)
requestLog.setExtended(rl.extended)
requestLog.setLogTimeZone(rl.timezone)
requestLog.setLogDateFormat(rl.dateFormat)
requestLogHandler.setRequestLog(requestLog)
handlers.setHandlers(Array(contextHandlers, requestLogHandler))
handlers
})
}
/** Add a servlet context with the given path */
def context(path: String)(block: ContextAdder => ContextAdder) = copy(
contextAdders =
block(DefaultServletContextAdder(path, Nil, None)) :: contextAdders
)
@deprecated("Use `plan(filter)`", "0.8.1")
def filter(filter: Filter) = plan(filter)
/** Add a filter as a by-name parameter. Generally you should use
* `plan(plan)` instead. */
def makePlan(plan: => Filter) = originalContext(
_.filterAdder(FilterAdder(BasicFilterHolder(plan)))
)
/** Add a resource path to the original, root context */
def resources(path: java.net.URL) = originalContext(_.resources(path))
/** Configure global logging of requests to a logfile in Common or Extended log format.
* [[http://en.wikipedia.org/wiki/Category:Log_file_formats]] */
def requestLogging(filename: String,
extended: Boolean = true,
dateFormat: String = "dd/MMM/yyyy:HH:mm:ss Z",
timezone: String = "GMT",
retainDays: Int = 31) = copy(requestLogging = {
Some(RequestLogging(filename, extended, dateFormat, timezone, retainDays))
})
/** Ports used by this server, reported by super-trait */
def ports: Iterable[Int] = portBindings.reverse.map(_.port)
/** Starts server in the background */
def start() = {
underlying.setStopAtShutdown(true)
underlying.start()
this
}
/** Stops server running in the background */
def stop() = {
underlying.stop()
this
}
/** Destroys the Jetty server instance and frees its resources.
* Call after stopping a server, if finished with the instance,
* to help avoid PermGen errors in an ongoing JVM session. */
def destroy() = {
underlying.destroy()
this
}
}
/** Base object that used to construct Server instances. The
* PortBindings trait provides convenience methods for adding
* bindings. */
object Server extends PortBindings {
def portBinding(portBinding: PortBinding) =
Server(portBinding :: Nil, DefaultServletContextAdder("/", Nil, None) :: Nil)
}
|
omarkilani/unfiltered
|
jetty/src/main/scala/Server.scala
|
Scala
|
mit
| 4,376
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{BufferedInputStream, FileNotFoundException, InputStream, IOException, OutputStream}
import java.util.UUID
import java.util.concurrent.{ExecutorService, Executors, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.mutable
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.security.AccessControlException
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.io.CompressionCodec
import org.apache.spark.scheduler._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import FsHistoryProvider._
private val NOT_STARTED = "<Not Started>"
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.cleaner.interval", "1d")
private val logDir = conf.getOption("spark.history.fs.logDirectory")
.map { d => Utils.resolveURI(d).toString }
.getOrElse(DEFAULT_LOG_DIR)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private val fs = Utils.getHadoopFileSystem(logDir, hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
.setNameFormat("spark-history-task-%d").setDaemon(true).build())
// The modification time of the newest log detected during the last scan. This is used
// to ignore logs that are older during subsequent scans, to avoid processing data that
// is already known.
private var lastScanTime = -1L
// Mapping of application IDs to their metadata, in descending end time order. Apps are inserted
// into the map in order, so the LinkedHashMap maintains the correct ordering.
@volatile private var applications: mutable.LinkedHashMap[String, FsApplicationHistoryInfo]
= new mutable.LinkedHashMap()
// List of application logs to be deleted by event log cleaner.
private var attemptsToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* An Executor to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!conf.contains("spark.testing")) {
ThreadUtils.newDaemonSingleThreadExecutor("log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
// Conf option used for testing the initialization code.
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
// Validate the log directory.
val path = new Path(logDir)
if (!fs.exists(path)) {
var msg = s"Log directory specified does not exist: $logDir."
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new IllegalArgumentException(msg)
}
if (!fs.getFileStatus(path).isDir) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
pool.scheduleWithFixedDelay(getRunner(checkForLogs), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.getBoolean("spark.history.fs.cleaner.enabled", false)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(getRunner(cleanLogs), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
}
}
override def getListing(): Iterable[FsApplicationHistoryInfo] = applications.values
override def getAppUI(appId: String, attemptId: Option[String]): Option[SparkUI] = {
try {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId).flatMap { attempt =>
val replayBus = new ReplayListenerBus()
val ui = {
val conf = this.conf.clone()
val appSecManager = new SecurityManager(conf)
SparkUI.createHistoryUI(conf, replayBus, appSecManager, appInfo.name,
HistoryServer.getAttemptURI(appId, attempt.attemptId), attempt.startTime)
// Do not call ui.bind() to avoid creating a new server for each application
}
val appListener = new ApplicationEventListener()
replayBus.addListener(appListener)
val appAttemptInfo = replay(fs.getFileStatus(new Path(logDir, attempt.logPath)),
replayBus)
appAttemptInfo.map { info =>
val uiAclsEnabled = conf.getBoolean("spark.history.ui.acls.enable", false)
ui.getSecurityManager.setAcls(uiAclsEnabled)
// make sure to set admin acls before view acls so they are properly picked up
ui.getSecurityManager.setAdminAcls(appListener.adminAcls.getOrElse(""))
ui.getSecurityManager.setViewAcls(attempt.sparkUser,
appListener.viewAcls.getOrElse(""))
ui
}
}
}
} catch {
case e: FileNotFoundException => None
}
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = getNewLastScanTime()
val statusList = Option(fs.listStatus(new Path(logDir))).map(_.toSeq)
.getOrElse(Seq[FileStatus]())
val logInfos: Seq[FileStatus] = statusList
.filter { entry =>
try {
getModificationTime(entry).map { time =>
time >= lastScanTime
}.getOrElse(false)
} catch {
case e: AccessControlException =>
// Do not use "logInfo" since these messages can get pretty noisy if printed on
// every poll.
logDebug(s"No permission to read $entry, ignoring.")
false
}
}
.flatMap { entry => Some(entry) }
.sortWith { case (entry1, entry2) =>
val mod1 = getModificationTime(entry1).getOrElse(-1L)
val mod2 = getModificationTime(entry2).getOrElse(-1L)
mod1 >= mod2
}
logInfos.grouped(20)
.map { batch =>
replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(batch)
})
}
.foreach { task =>
try {
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: Exception =>
logError("Exception while merging application listings", e)
}
}
lastScanTime = newLastScanTime
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private def getNewLastScanTime(): Long = {
val fileName = "." + UUID.randomUUID().toString
val path = new Path(logDir, fileName)
val fos = fs.create(path)
try {
fos.close()
fs.getFileStatus(path).getModificationTime
} catch {
case e: Exception =>
logError("Exception encountered when attempting to update last scan time", e)
lastScanTime
} finally {
if (!fs.delete(path)) {
logWarning(s"Error deleting ${path}")
}
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = FileSystem.get(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
applications.get(appId) match {
case Some(appInfo) =>
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
appInfo.attempts.filter { attempt =>
attempt.attemptId.isEmpty || attemptId.isEmpty || attempt.attemptId.get == attemptId.get
}.foreach { attempt =>
val logPath = new Path(logDir, attempt.logPath)
// If this is a legacy directory, then add the directory to the zipStream and add
// each file to that directory.
if (isLegacyLogDirectory(fs.getFileStatus(logPath))) {
val files = fs.listStatus(logPath)
zipStream.putNextEntry(new ZipEntry(attempt.logPath + "/"))
zipStream.closeEntry()
files.foreach { file =>
val path = file.getPath
zipFileToStream(path, attempt.logPath + Path.SEPARATOR + path.getName, zipStream)
}
} else {
zipFileToStream(new Path(logDir, attempt.logPath), attempt.logPath, zipStream)
}
}
} finally {
zipStream.close()
}
case None => throw new SparkException(s"Logs for $appId not found.")
}
}
/**
* Replay the log files in the list and merge the list of old applications with new ones
*/
private def mergeApplicationListing(logs: Seq[FileStatus]): Unit = {
val newAttempts = logs.flatMap { fileStatus =>
try {
val bus = new ReplayListenerBus()
val res = replay(fileStatus, bus)
res match {
case Some(r) => logDebug(s"Application log ${r.logPath} loaded successfully.")
case None => logWarning(s"Failed to load application log ${fileStatus.getPath}. " +
"The application may have not started.")
}
res
} catch {
case e: Exception =>
logError(
s"Exception encountered when attempting to load application log ${fileStatus.getPath}",
e)
None
}
}
if (newAttempts.isEmpty) {
return
}
// Build a map containing all apps that contain new attempts. The app information in this map
// contains both the new app attempt, and those that were already loaded in the existing apps
// map. If an attempt has been updated, it replaces the old attempt in the list.
val newAppMap = new mutable.HashMap[String, FsApplicationHistoryInfo]()
newAttempts.foreach { attempt =>
val appInfo = newAppMap.get(attempt.appId)
.orElse(applications.get(attempt.appId))
.map { app =>
val attempts =
app.attempts.filter(_.attemptId != attempt.attemptId).toList ++ List(attempt)
new FsApplicationHistoryInfo(attempt.appId, attempt.name,
attempts.sortWith(compareAttemptInfo))
}
.getOrElse(new FsApplicationHistoryInfo(attempt.appId, attempt.name, List(attempt)))
newAppMap(attempt.appId) = appInfo
}
// Merge the new app list with the existing one, maintaining the expected ordering (descending
// end time). Maintaining the order is important to avoid having to sort the list every time
// there is a request for the log list.
val newApps = newAppMap.values.toSeq.sortWith(compareAppInfo)
val mergedApps = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
if (!mergedApps.contains(info.id)) {
mergedApps += (info.id -> info)
}
}
val newIterator = newApps.iterator.buffered
val oldIterator = applications.values.iterator.buffered
while (newIterator.hasNext && oldIterator.hasNext) {
if (newAppMap.contains(oldIterator.head.id)) {
oldIterator.next()
} else if (compareAppInfo(newIterator.head, oldIterator.head)) {
addIfAbsent(newIterator.next())
} else {
addIfAbsent(oldIterator.next())
}
}
newIterator.foreach(addIfAbsent)
oldIterator.foreach(addIfAbsent)
applications = mergedApps
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = {
try {
val maxAge = conf.getTimeAsSeconds("spark.history.fs.cleaner.maxAge", "7d") * 1000
val now = clock.getTimeMillis()
val appsToRetain = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def shouldClean(attempt: FsApplicationAttemptInfo): Boolean = {
now - attempt.lastUpdated > maxAge && attempt.completed
}
// Scan all logs from the log directory.
// Only completed applications older than the specified max age will be deleted.
applications.values.foreach { app =>
val (toClean, toRetain) = app.attempts.partition(shouldClean)
attemptsToClean ++= toClean
if (toClean.isEmpty) {
appsToRetain += (app.id -> app)
} else if (toRetain.nonEmpty) {
appsToRetain += (app.id ->
new FsApplicationHistoryInfo(app.id, app.name, toRetain.toList))
}
}
applications = appsToRetain
val leftToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
attemptsToClean.foreach { attempt =>
try {
val path = new Path(logDir, attempt.logPath)
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
} catch {
case e: AccessControlException =>
logInfo(s"No permission to delete ${attempt.logPath}, ignoring.")
case t: IOException =>
logError(s"IOException in cleaning ${attempt.logPath}", t)
leftToClean += attempt
}
}
attemptsToClean = leftToClean
} catch {
case t: Exception => logError("Exception in cleaning logs", t)
}
}
/**
* Comparison function that defines the sort order for the application listing.
*
* @return Whether `i1` should precede `i2`.
*/
private def compareAppInfo(
i1: FsApplicationHistoryInfo,
i2: FsApplicationHistoryInfo): Boolean = {
val a1 = i1.attempts.head
val a2 = i2.attempts.head
if (a1.endTime != a2.endTime) a1.endTime >= a2.endTime else a1.startTime >= a2.startTime
}
/**
* Comparison function that defines the sort order for application attempts within the same
* application. Order is: attempts are sorted by descending start time.
* Most recent attempt state matches with current state of the app.
*
* Normally applications should have a single running attempt; but failure to call sc.stop()
* may cause multiple running attempts to show up.
*
* @return Whether `a1` should precede `a2`.
*/
private def compareAttemptInfo(
a1: FsApplicationAttemptInfo,
a2: FsApplicationAttemptInfo): Boolean = {
a1.startTime >= a2.startTime
}
/**
* Replays the events in the specified log file and returns information about the associated
* application. Return `None` if the application ID cannot be located.
*/
private def replay(
eventLog: FileStatus,
bus: ReplayListenerBus): Option[FsApplicationAttemptInfo] = {
val logPath = eventLog.getPath()
logInfo(s"Replaying log path: $logPath")
val logInput =
if (isLegacyLogDirectory(eventLog)) {
openLegacyEventLog(logPath)
} else {
EventLoggingListener.openEventLog(logPath, fs)
}
try {
val appListener = new ApplicationEventListener
val appCompleted = isApplicationCompleted(eventLog)
bus.addListener(appListener)
bus.replay(logInput, logPath.toString, !appCompleted)
// Without an app ID, new logs will render incorrectly in the listing page, so do not list or
// try to show their UI. Some old versions of Spark generate logs without an app ID, so let
// logs generated by those versions go through.
if (appListener.appId.isDefined || !sparkVersionHasAppId(eventLog)) {
Some(new FsApplicationAttemptInfo(
logPath.getName(),
appListener.appName.getOrElse(NOT_STARTED),
appListener.appId.getOrElse(logPath.getName()),
appListener.appAttemptId,
appListener.startTime.getOrElse(-1L),
appListener.endTime.getOrElse(-1L),
getModificationTime(eventLog).get,
appListener.sparkUser.getOrElse(NOT_STARTED),
appCompleted))
} else {
None
}
} finally {
logInput.close()
}
}
/**
* Loads a legacy log directory. This assumes that the log directory contains a single event
* log file (along with other metadata files), which is the case for directories generated by
* the code in previous releases.
*
* @return input stream that holds one JSON record per line.
*/
private[history] def openLegacyEventLog(dir: Path): InputStream = {
val children = fs.listStatus(dir)
var eventLogPath: Path = null
var codecName: Option[String] = None
children.foreach { child =>
child.getPath().getName() match {
case name if name.startsWith(LOG_PREFIX) =>
eventLogPath = child.getPath()
case codec if codec.startsWith(COMPRESSION_CODEC_PREFIX) =>
codecName = Some(codec.substring(COMPRESSION_CODEC_PREFIX.length()))
case _ =>
}
}
if (eventLogPath == null) {
throw new IllegalArgumentException(s"$dir is not a Spark application log directory.")
}
val codec = try {
codecName.map { c => CompressionCodec.createCodec(conf, c) }
} catch {
case e: Exception =>
throw new IllegalArgumentException(s"Unknown compression codec $codecName.")
}
val in = new BufferedInputStream(fs.open(eventLogPath))
codec.map(_.compressedInputStream(in)).getOrElse(in)
}
/**
* Return whether the specified event log path contains a old directory-based event log.
* Previously, the event log of an application comprises of multiple files in a directory.
* As of Spark 1.3, these files are consolidated into a single one that replaces the directory.
* See SPARK-2261 for more detail.
*/
private def isLegacyLogDirectory(entry: FileStatus): Boolean = entry.isDir()
/**
* Returns the modification time of the given event log. If the status points at an empty
* directory, `None` is returned, indicating that there isn't an event log at that location.
*/
private def getModificationTime(fsEntry: FileStatus): Option[Long] = {
if (isLegacyLogDirectory(fsEntry)) {
val statusList = fs.listStatus(fsEntry.getPath)
if (!statusList.isEmpty) Some(statusList.map(_.getModificationTime()).max) else None
} else {
Some(fsEntry.getModificationTime())
}
}
/**
* Return true when the application has completed.
*/
private def isApplicationCompleted(entry: FileStatus): Boolean = {
if (isLegacyLogDirectory(entry)) {
fs.exists(new Path(entry.getPath(), APPLICATION_COMPLETE))
} else {
!entry.getPath().getName().endsWith(EventLoggingListener.IN_PROGRESS)
}
}
/**
* Returns whether the version of Spark that generated logs records app IDs. App IDs were added
* in Spark 1.1.
*/
private def sparkVersionHasAppId(entry: FileStatus): Boolean = {
if (isLegacyLogDirectory(entry)) {
fs.listStatus(entry.getPath())
.find { status => status.getPath().getName().startsWith(SPARK_VERSION_PREFIX) }
.map { status =>
val version = status.getPath().getName().substring(SPARK_VERSION_PREFIX.length())
version != "1.0" && version != "1.1"
}
.getOrElse(true)
} else {
true
}
}
/**
* Checks whether HDFS is in safe mode. The API is slightly different between hadoop 1 and 2,
* so we have to resort to ugly reflection (as usual...).
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
// For testing.
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
val hadoop1Class = "org.apache.hadoop.hdfs.protocol.FSConstants$SafeModeAction"
val hadoop2Class = "org.apache.hadoop.hdfs.protocol.HdfsConstants$SafeModeAction"
val actionClass: Class[_] =
try {
getClass().getClassLoader().loadClass(hadoop2Class)
} catch {
case _: ClassNotFoundException =>
getClass().getClassLoader().loadClass(hadoop1Class)
}
val action = actionClass.getField("SAFEMODE_GET").get(null)
val method = dfs.getClass().getMethod("setSafeMode", action.getClass())
method.invoke(dfs, action).asInstanceOf[Boolean]
}
}
private[history] object FsHistoryProvider {
val DEFAULT_LOG_DIR = "file:/tmp/spark-events"
// Constants used to parse Spark 1.0.0 log directories.
val LOG_PREFIX = "EVENT_LOG_"
val SPARK_VERSION_PREFIX = EventLoggingListener.SPARK_VERSION_KEY + "_"
val COMPRESSION_CODEC_PREFIX = EventLoggingListener.COMPRESSION_CODEC_KEY + "_"
val APPLICATION_COMPLETE = "APPLICATION_COMPLETE"
}
private class FsApplicationAttemptInfo(
val logPath: String,
val name: String,
val appId: String,
attemptId: Option[String],
startTime: Long,
endTime: Long,
lastUpdated: Long,
sparkUser: String,
completed: Boolean = true)
extends ApplicationAttemptInfo(
attemptId, startTime, endTime, lastUpdated, sparkUser, completed)
private class FsApplicationHistoryInfo(
id: String,
override val name: String,
override val attempts: List[FsApplicationAttemptInfo])
extends ApplicationHistoryInfo(id, name, attempts)
|
chenc10/Spark-PAF
|
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala
|
Scala
|
apache-2.0
| 26,584
|
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.netio.service.handle;
import java.util.HashMap
import scala.collection.JavaConversions._
import scouter.io.DataInputX
import scouter.io.DataOutputX
import scouter.lang.AlertLevel
import scouter.lang.pack.AlertPack
import scouter.lang.pack.MapPack
import scouter.lang.pack.ObjectPack
import scouter.lang.value.DecimalValue
import scouter.net.TcpFlag
import scouter.server.core.cache.AlertCache
import scouter.server.core.cache.CacheOut
import scouter.server.db.AlertRD
import scouter.server.db.ObjectRD
import scouter.server.netio.service.anotation.ServiceHandler
import scouter.server.util.EnumerScala
import scouter.net.RequestCmd
import scouter.lang.value.MapValue
import scouter.server.db.SummaryRD
import scouter.util.{DateUtil, StringUtil}
import scouter.lang.pack.SummaryPack
import scouter.lang.SummaryEnum
class AlertService {
@ServiceHandler(RequestCmd.ALERT_REAL_TIME)
def getRealtime(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readPack().asInstanceOf[MapPack];
val index = param.getInt("index");
val loop = param.getLong("loop");
val objType = if(StringUtil.isEmpty(param.getText("objType"))) null else param.getText("objType");
val first = param.getBoolean("first");
val d = AlertCache.get(objType, loop, index);
if (d == null)
return ;
// 첫번째 패킷에 정보를 전송한다.
val outparam = new MapPack();
outparam.put("loop", new DecimalValue(d.loop));
outparam.put("index", new DecimalValue(d.index));
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(outparam);
if (first) {
return ;
}
EnumerScala.forward(d.data, (pack: AlertPack) => {
dout.writeByte(TcpFlag.HasNEXT);
dout.write(new DataOutputX().writePack(pack).toByteArray());
})
}
@ServiceHandler(RequestCmd.ALERT_LOAD_TIME)
def getAlertHistory(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readPack().asInstanceOf[MapPack];
//////////
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val max = param.getInt("count");
val level = param.getText("level");
val obj = param.getText("object");
val key = param.getText("key");
val levelCode = AlertLevel.getValue(level);
val ccount = if (max < 1 || max > 1000) 500 else max
var srchCnt = 0;
val tempObjNameMap = new HashMap[Integer, String]();
val handler = (time: Long, data: Array[Byte]) => {
if (srchCnt > ccount) {
return
}
var ok = check(date, level, obj, key, levelCode, tempObjNameMap, data)
if (ok) {
srchCnt += 1
dout.writeByte(TcpFlag.HasNEXT);
dout.write(data);
}
}
AlertRD.readByTime(date, stime, etime, handler)
}
private def check(date: String, level: String, obj: String, key: String, levelCode: Byte, tempObjNameMap: java.util.HashMap[Integer, String], data: Array[Byte]): Boolean = {
val pack = new DataInputX(data).readPack().asInstanceOf[AlertPack];
if (level != null && levelCode != pack.level) {
return false
}
if (obj != null) {
var objName = tempObjNameMap.get(pack.objHash);
if (objName == null) {
val objPack = ObjectRD.getObjectPack(date, pack.objHash);
if (objPack == null) {
return false
}
objName = objPack.objName;
tempObjNameMap.put(pack.objHash, objName);
}
if (objName.contains(obj) == false) {
return false
}
}
if (key != null) {
if (pack.title.contains(key) == false && pack.message.contains(key) == false) {
return false
}
}
return true
}
@ServiceHandler(RequestCmd.ALERT_TITLE_COUNT)
def titleAlertCount(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val valueMap = new HashMap[String, MapPack]();
val handler = (time: Long, b: Array[Byte]) => {
val data = new DataInputX(b).readPack().asInstanceOf[SummaryPack];
if (data.stype == SummaryEnum.ALERT ) {
val hhmm = DateUtil.hhmm(time);
val titleLv = data.table.getList("title");
val levelLv = data.table.getList("level");
val countLv = data.table.getList("count");
for (i <- 0 to titleLv.size() - 1) {
val title = titleLv.getString(i);
val level = levelLv.getLong(i).asInstanceOf[Byte];
val count = countLv.getInt(i)
var pack = valueMap.get(title);
if (pack == null) {
pack = new MapPack();
pack.put("title", title);
pack.put("level", level);
pack.put("count", new MapValue());
valueMap.put(title, pack);
}
val mv = pack.get("count").asInstanceOf[MapValue];
mv.put(hhmm, count);
}
}
}
SummaryRD.readByTime(SummaryEnum.ALERT, date, stime, etime, handler)
val keySet = valueMap.keySet();
for (title <- keySet) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(valueMap.get(title));
}
}
}
|
scouter-project/scouter
|
scouter.server/src/main/scala/scouter/server/netio/service/handle/AlertService.scala
|
Scala
|
apache-2.0
| 6,594
|
package jdub.async
import com.github.mauricio.async.db.ResultSet
trait RawQuery[A] {
def sql: String
def values: Seq[Any] = Seq.empty
def handle(results: ResultSet): A
}
trait Query[A] extends RawQuery[A] {
def handle(results: ResultSet) = reduce(results.toIterator.map(rd => new Row(rd)))
def reduce(rows: Iterator[Row]): A
}
trait SingleRowQuery[A] extends Query[A] {
def map(row: Row): A
override final def reduce(rows: Iterator[Row]) = if (rows.hasNext) {
rows.map(map).next()
} else {
throw new IllegalStateException(s"No row returned for [$sql].")
}
}
trait FlatSingleRowQuery[A] extends Query[Option[A]] {
def flatMap(row: Row): Option[A]
override final def reduce(rows: Iterator[Row]) = if (rows.hasNext) { flatMap(rows.next()) } else { None }
}
|
KyleU/jdub-async
|
src/main/scala/jdub/async/Query.scala
|
Scala
|
mit
| 789
|
package org.automanlang.core.question
import org.automanlang.core.AutomanAdapter
import org.automanlang.core.answer.AbstractAnswer
import org.automanlang.core.policy.aggregation.MetaAggregationPolicy
import org.automanlang.core.scheduler.MetaScheduler
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
trait MetaQuestion {
type MA <: Any
type MAA <: AbstractAnswer[MA]
type MAP <: MetaAggregationPolicy
def metaSchedulerFuture(backend: AutomanAdapter) : Future[MAA] = {
Future{
blocking {
new MetaScheduler(this, backend).run().asInstanceOf[MAA]
}
}
}
def metaAnswer(round: Int, backend: AutomanAdapter) : MAA
def done(round: Int, backend: AutomanAdapter) : Boolean
}
|
dbarowy/AutoMan
|
libautoman/src/main/scala/org/automanlang/core/question/MetaQuestion.scala
|
Scala
|
gpl-2.0
| 773
|
import scala.io.Source
import fileIO._
/**
* Yaml辞書よりIndex辞書を構築
*/
object createYamlIndex {
//パラメータ
val infile: String = "yaml/new_argframes.yaml"
val outfile: String = "yaml/new_argframes.dic"
def main(args: Array[String]): Unit = {
println("-start-")
val yaml = Source.fromFile(infile).getLines
val index = this.createIndex(yaml)
this.OutputIndex(index, outfile)
println("-done-")
}
/**
* Dictクラス直下の要素ごとに開始地点と終了地点のファイルポインタを保存
* 要素ごとに開始地点のみを保存した配列(ary)を作成した後、次の要素の開始地点を終了地点としたMap(index)を作成
*/
def createIndex(yaml: Iterator[String]): Array[(String, Int, Int)] = {
var ary: Array[(String, Int)] = Array.empty
var fp: Int = 0
yaml.foreach { str =>
str.startsWith("- ") match {
case true => ary = ary :+ (str.split(": ")(1), fp)
case false =>
}
fp = fp + str.getBytes().size + 1
}
ary = ary :+ ("end", fp)
val index: Array[(String, Int, Int)] = ary.sliding(2).toArray.map { n =>
(n(0)._1, n(0)._2, n(1)._2)
}
return index
}
def OutputIndex(index: Array[(String, Int, Int)], outfile: String) {
val file = new OutputFiles(outfile)
index.foreach(in => file.write(in._1 + " " + in._2 + " " + in._3))
file.close
}
}
|
Takeuchi-Lab-LM/scala_asa3
|
ASA/src/main/scala/createYamlIndex.scala
|
Scala
|
mit
| 1,365
|
package com.arcusys.valamis.persistence.impl.scorm.storage
import com.arcusys.valamis.lesson.scorm.model.tracking.GlobalObjectiveState
import com.arcusys.valamis.lesson.scorm.storage.tracking.GlobalObjectiveStorage
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.model.GlblObjectiveStateModel
import com.arcusys.valamis.persistence.impl.scorm.schema.{ActivityStateTreeTableComponent, AttemptTableComponent, GlblObjectiveStateTableComponent, ScormUserComponent}
import scala.collection.mutable
import scala.slick.driver.JdbcProfile
import scala.slick.jdbc.JdbcBackend
class GlobalObjectiveStorageImpl(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends GlobalObjectiveStorage
with GlblObjectiveStateTableComponent
with ActivityStateTreeTableComponent
with AttemptTableComponent
with ScormUserComponent
with SlickProfile {
import driver.simple._
override def create(treeId: Long, key: String, state: GlobalObjectiveState): Unit =
db.withSession { implicit session =>
val glblObjectiveState = new GlblObjectiveStateModel(
None,
state.satisfied,
state.normalizedMeasure,
state.attemptCompleted,
key,
treeId)
(glblObjectiveStateTQ returning glblObjectiveStateTQ.map(_.id)) += glblObjectiveState
}
override def modify(attemptId: Long, key: String, state: GlobalObjectiveState): Unit =
db.withSession { implicit session =>
val stateTree = activityStateTreeTQ.filter(_.attemptId === attemptId).firstOption
.getOrElse(throw new UnsupportedOperationException("State tree should be defined for attempt " + attemptId))
val treeId = stateTree.id.get
val old = glblObjectiveStateTQ.filter(o => o.mapKey === key && o.treeId === treeId).run
val entity = new GlblObjectiveStateModel(None,
state.satisfied,
state.normalizedMeasure,
state.attemptCompleted,
key,
treeId)
if (old.isEmpty)
glblObjectiveStateTQ += entity
else
glblObjectiveStateTQ.filter(_.id === stateTree.id.get).map(_.update).update(entity)
}
override def getAllObjectives(treeId: Long): mutable.Map[String, GlobalObjectiveState] =
db.withSession { implicit session =>
val result = scala.collection.mutable.Map[String, GlobalObjectiveState]()
val glblObjectiveStates = glblObjectiveStateTQ.filter(_.treeId === treeId).run
glblObjectiveStates.foreach{e => result(e.mapKey) =
new GlobalObjectiveState(
e.satisfied,
e.normalizedMeasure,
e.attemptCompleted)}
result
}
}
|
igor-borisov/valamis
|
valamis-slick-persistence/src/main/scala/com/arcusys/valamis/persistence/impl/scorm/storage/GlobalObjectiveStorageImpl.scala
|
Scala
|
gpl-3.0
| 2,696
|
package com.twitter.algebird
import org.specs._
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Gen.choose
object BloomFilterLaws extends Properties("BloomFilter") {
import BaseProperties._
val NUM_HASHES = 6
val WIDTH = 32
val SEED = 1
implicit val bfMonoid = new BloomFilterMonoid(NUM_HASHES, WIDTH, SEED)
implicit val bfGen =
Arbitrary {
for (v <- choose(0, 10000)) yield (bfMonoid.create(v.toString))
}
property("BloomFilter is a Monoid") = monoidLaws[BF]
}
class BloomFilterTest extends Specification {
noDetailedDiffs()
val SEED = 1
val RAND = new scala.util.Random
"BloomFilter" should {
"identify all true positives" in {
(0 to 100).foreach{
_ => {
val bfMonoid = new BloomFilterMonoid(RAND.nextInt(5)+1, RAND.nextInt(64)+32, SEED)
val numEntries = 5
val entries = (0 until numEntries).map(_ => RAND.nextInt.toString)
val bf = bfMonoid.create(entries: _*)
entries.foreach{
i => bf.contains(i.toString).isTrue must be_==(true)
}
}
}
}
"have small false positive rate" in {
val iter = 10000
Seq(0.1, 0.01, 0.001).foreach{
fpProb => {
val fps = (0 until iter).par.map{
_ => {
val numEntries = RAND.nextInt(10) + 1
val bfMonoid = BloomFilter(numEntries, fpProb, SEED)
val entries = RAND.shuffle((0 until 1000).toList).take(numEntries + 1).map(_.toString)
val bf = bfMonoid.create(entries.drop(1): _*)
if(bf.contains(entries(0)).isTrue) 1.0 else 0.0
}
}
val observedFpProb = fps.sum / fps.size
observedFpProb must be_<=(2 * fpProb)
}
}
}
"approximate cardinality" in {
val bfMonoid = BloomFilterMonoid(10, 100000, SEED)
Seq(10, 100, 1000, 10000).foreach { exactCardinality =>
val items = (1 until exactCardinality).map { _.toString }
val bf = bfMonoid.create(items: _*)
val size = bf.size
(size ~ exactCardinality) must be_==(true)
size.min must be_<=(size.estimate)
size.max must be_>=(size.estimate)
}
}
}
}
|
snoble/algebird
|
algebird-test/src/test/scala/com/twitter/algebird/BloomFilterTest.scala
|
Scala
|
apache-2.0
| 2,311
|
package preact.dsl.symbol
import preact.Preact
import scala.scalajs.js
sealed trait Entry
object Entry {
case class Attribute(value: (String, js.Any)) extends Entry
case class Child(value: Preact.Child) extends Entry
case class Children(value: Iterable[Preact.Child]) extends Entry
case object EmptyAttribute extends Entry
case object EmptyChild extends Entry
}
trait EntryImplicits {
implicit def tupleToEntry(tuple: (String, js.Any)): Entry = {
Entry.Attribute(tuple)
}
implicit def stringTupleToEntry(tuple: (String, String)): Entry = {
Entry.Attribute((tuple._1, tuple._2))
}
implicit def conversableTupleToEntry[T](tuple: (String, T))(implicit conversion: T => js.Any): Entry = {
Entry.Attribute((tuple._1, tuple._2))
}
implicit def function0ToEntry[T](tuple: (String, Function0[T])): Entry = {
Entry.Attribute((tuple._1, tuple._2))
}
implicit def function1ToEntry[T, U](tuple: (String, Function1[T, U])): Entry = {
Entry.Attribute((tuple._1, tuple._2))
}
implicit def function2ToEntry[T1, T2, U](tuple: (String, Function2[T1, T2, U])): Entry = {
Entry.Attribute((tuple._1, tuple._2))
}
implicit def childToEntry(child: Preact.Child): Entry = {
Entry.Child(child)
}
implicit def childrenToEntry(children: Iterable[Preact.Child]): Entry = {
Entry.Children(children)
}
implicit def vnodeToEntry(vnode: Preact.VNode): Entry = {
Entry.Child(vnode)
}
implicit def vnodesToEntry(vnodes: Iterable[Preact.VNode]): Entry = {
Entry.Children(vnodes.asInstanceOf[Iterable[Preact.Child]])
}
implicit def stringToEntry(x: String): Entry = {
Entry.Child(x)
}
}
|
LMnet/scala-js-preact
|
dsl/symbol/src/main/scala/preact/dsl/symbol/Entry.scala
|
Scala
|
mit
| 1,664
|
package sri.test
import sri.web.router.{History, HistoryOptions}
import scala.scalajs.js
package object router {
def memoryHistory(options: js.UndefOr[HistoryOptions] = js.undefined) = if (options.isDefined && options.get.basename.isDefined) History.useQueries(History.useBasename(History.createHashHistory))(options)
else History.useQueries(History.createMemoryHistory)(options)
}
|
chandu0101/sri
|
test/src/main/scala/sri/test/router/package.scala
|
Scala
|
apache-2.0
| 390
|
package com.wavesplatform.lang.v1.testing
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.Base58
import com.wavesplatform.lang.v1.parser.BinaryOperation
import com.wavesplatform.lang.v1.parser.BinaryOperation._
import com.wavesplatform.lang.v1.parser.Expressions.Pos.AnyPos
import com.wavesplatform.lang.v1.parser.Expressions._
import com.wavesplatform.lang.v1.parser.Parser.keywords
import org.scalacheck._
import scala.reflect.ClassTag
trait ScriptGen {
def CONST_LONGgen: Gen[(EXPR, Long)] = Gen.choose(Long.MinValue, Long.MaxValue).map(v => (CONST_LONG(AnyPos, v), v))
def BOOLgen(gas: Int): Gen[(EXPR, Boolean)] =
if (gas > 0) Gen.oneOf(GEgen(gas - 1), GTgen(gas - 1), EQ_INTgen(gas - 1), ANDgen(gas - 1), ORgen(gas - 1), IF_BOOLgen(gas - 1))
else Gen.const((TRUE(AnyPos), true))
def SUMgen(gas: Int): Gen[(EXPR, Long)] =
for {
(i1, v1) <- INTGen((gas - 2) / 2)
(i2, v2) <- INTGen((gas - 2) / 2)
} yield
if ((BigInt(v1) + BigInt(v2)).isValidLong) {
(BINARY_OP(AnyPos, i1, SUM_OP, i2), v1 + v2)
} else {
(BINARY_OP(AnyPos, i1, SUB_OP, i2), v1 - v2)
}
def SUBgen(gas: Int): Gen[(EXPR, Long)] =
for {
(i1, v1) <- INTGen((gas - 2) / 2)
(i2, v2) <- INTGen((gas - 2) / 2)
} yield
if ((BigInt(v1) - BigInt(v2)).isValidLong) {
(BINARY_OP(AnyPos, i1, SUB_OP, i2), v1 - v2)
} else {
(BINARY_OP(AnyPos, i1, SUM_OP, i2), v1 + v2)
}
def INTGen(gas: Int): Gen[(EXPR, Long)] =
if (gas > 0)
Gen.oneOf(
CONST_LONGgen,
SUMgen(gas - 1),
SUBgen(gas - 1),
IF_INTgen(gas - 1),
INTGen(gas - 1).filter(v => (-BigInt(v._2)).isValidLong).map(e => (FUNCTION_CALL(AnyPos, PART.VALID(AnyPos, "-"), List(e._1)), -e._2))
)
else CONST_LONGgen
def GEgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(i1, v1) <- INTGen((gas - 2) / 2)
(i2, v2) <- INTGen((gas - 2) / 2)
} yield (BINARY_OP(AnyPos, i1, GE_OP, i2), v1 >= v2)
def GTgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(i1, v1) <- INTGen((gas - 2) / 2)
(i2, v2) <- INTGen((gas - 2) / 2)
} yield (BINARY_OP(AnyPos, i1, GT_OP, i2), v1 > v2)
def EQ_INTgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(i1, v1) <- INTGen((gas - 2) / 2)
(i2, v2) <- INTGen((gas - 2) / 2)
} yield (BINARY_OP(AnyPos, i1, EQ_OP, i2), v1 == v2)
def ANDgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(i1, v1) <- BOOLgen((gas - 2) / 2)
(i2, v2) <- BOOLgen((gas - 2) / 2)
} yield (BINARY_OP(AnyPos, i1, AND_OP, i2), v1 && v2)
def ORgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(i1, v1) <- BOOLgen((gas - 2) / 2)
(i2, v2) <- BOOLgen((gas - 2) / 2)
} yield (BINARY_OP(AnyPos, i1, OR_OP, i2), v1 || v2)
def IF_BOOLgen(gas: Int): Gen[(EXPR, Boolean)] =
for {
(cnd, vcnd) <- BOOLgen((gas - 3) / 3)
(t, vt) <- BOOLgen((gas - 3) / 3)
(f, vf) <- BOOLgen((gas - 3) / 3)
} yield (IF(AnyPos, cnd, t, f), if (vcnd) { vt } else { vf })
def IF_INTgen(gas: Int): Gen[(EXPR, Long)] =
for {
(cnd, vcnd) <- BOOLgen((gas - 3) / 3)
(t, vt) <- INTGen((gas - 3) / 3)
(f, vf) <- INTGen((gas - 3) / 3)
} yield (IF(AnyPos, cnd, t, f), if (vcnd) { vt } else { vf })
def STRgen: Gen[EXPR] =
Gen.identifier.map(PART.VALID[String](AnyPos, _)).map(CONST_STRING(AnyPos, _))
def LETgen(gas: Int): Gen[LET] =
for {
name <- Gen.identifier.filter(!keywords(_))
(value, _) <- BOOLgen((gas - 3) / 3)
} yield LET(AnyPos, PART.VALID(AnyPos, name), value)
def REFgen: Gen[EXPR] =
Gen.identifier.filter(!keywords(_)).map(PART.VALID[String](AnyPos, _)).map(REF(AnyPos, _))
def BLOCKgen(gas: Int): Gen[EXPR] =
for {
let <- LETgen((gas - 3) / 3)
body <- Gen.oneOf(BOOLgen((gas - 3) / 3).map(_._1), BLOCKgen((gas - 3) / 3)) // BLOCKGen wasn't add to BOOLGen since issue: NODE-700
} yield BLOCK(AnyPos, let, body)
private val spaceChars: Seq[Char] = " \\t\\n\\r"
val whitespaceChar: Gen[Char] = Gen.oneOf(spaceChars)
val whitespaces: Gen[String] = for {
n <- Gen.choose(1, 5)
xs <- Gen.listOfN(n, whitespaceChar)
} yield xs.mkString
def withWhitespaces(expr: String): Gen[String] =
for {
pred <- whitespaces
post <- whitespaces
} yield pred + expr + post
private def toString[T](part: PART[T])(implicit ct: ClassTag[T]): String = part match {
case PART.VALID(_, x: String) => x
case PART.VALID(_, xs: ByteStr) => Base58.encode(xs.arr)
case _ => throw new RuntimeException(s"Can't stringify $part")
}
def toString(expr: EXPR): Gen[String] = expr match {
case CONST_LONG(_, x, _) => withWhitespaces(s"$x")
case REF(_, x, _, _) => withWhitespaces(toString(x))
case CONST_STRING(_, x, _) => withWhitespaces(s"""\\"${toString(x)}\\"""")
case CONST_BYTESTR(_, x, _) => withWhitespaces(s"""base58'${toString(x)}'""")
case _: TRUE => withWhitespaces("true")
case _: FALSE => withWhitespaces("false")
case BINARY_OP(_, x, op: BinaryOperation, y, _, _) =>
for {
arg1 <- toString(x)
arg2 <- toString(y)
} yield s"($arg1${opsToFunctions(op)}$arg2)"
case IF(_, cond, x, y, _, _) =>
for {
c <- toString(cond)
t <- toString(x)
f <- toString(y)
} yield s"(if ($c) then $t else $f)"
case BLOCK(_, let: LET, body, _, _) =>
for {
v <- toString(let.value)
b <- toString(body)
isNewLine <- Arbitrary.arbBool.arbitrary
sep <- if (isNewLine) Gen.const("\\n") else withWhitespaces(";")
} yield s"let ${toString(let.name)} = $v$sep$b"
case FUNCTION_CALL(_, PART.VALID(_, "-"), List(CONST_LONG(_, v, _)), _, _) if v >= 0 =>
s"-($v)"
case FUNCTION_CALL(_, op, List(e), _, _) => toString(e).map(e => s"${toString(op)}$e")
case x => throw new NotImplementedError(s"toString for ${x.getClass.getSimpleName}")
}
}
trait ScriptGenParser extends ScriptGen {
override def BOOLgen(gas: Int): Gen[(EXPR, Boolean)] = {
if (gas > 0)
Gen.oneOf(GEgen(gas - 1), GTgen(gas - 1), EQ_INTgen(gas - 1), ANDgen(gas - 1), ORgen(gas - 1), IF_BOOLgen(gas - 1), REFgen.map(r => (r, false)))
else Gen.const((TRUE(AnyPos), true))
}
override def INTGen(gas: Int): Gen[(EXPR, Long)] =
if (gas > 0) Gen.oneOf(CONST_LONGgen, SUMgen(gas - 1), IF_INTgen(gas - 1), REFgen.map(r => (r, 0L))) else CONST_LONGgen
}
|
wavesplatform/Waves
|
lang/testkit/src/main/scala/com/wavesplatform/lang/v1/testing/ScriptGen.scala
|
Scala
|
mit
| 6,606
|
package com.evecentral.util
object ActorNames {
val statCache = "StatCache"
val statCapture = "statcapture"
val http_quicklookquery = "http_quicklookquery"
val http_marketstat = "http_marketstat"
val http_oldupload = "http_oldupload"
val routefinder = "routefinder"
val getorders = "getorders"
val uploadstorage = "uploadstorage"
val unifiedparser = "unifiedparser"
val gethiststats = "gethiststats"
val http_apiv3 = "http_apiv3"
val http_apiv2 = "http_apiv2"
}
|
theatrus/eve-central.com
|
core/src/main/scala/com/evecentral/util/ActorNames.scala
|
Scala
|
agpl-3.0
| 488
|
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord
import scala.language.implicitConversions
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.generic.DefaultSerializationProxy
import scala.collection.immutable.{AbstractMap, LongMap, StrictOptimizedMapOps}
import scala.collection.mutable.ListBuffer
import scala.collection.{BuildFrom, Factory, View, mutable}
import ackcord.data.SnowflakeType
//A wrapper around a LongMap which allows a nice API. We overwrite everything LongMap overrides.
class SnowflakeMap[K, +V](private val inner: LongMap[V])
extends AbstractMap[SnowflakeType[K], V]
with StrictOptimizedMapOps[SnowflakeType[K], V, Map, SnowflakeMap[K, V]]
with Serializable {
type Key = SnowflakeType[K]
override protected def fromSpecific(
coll: IterableOnce[(SnowflakeType[K], V)] @uncheckedVariance
): SnowflakeMap[K, V] = {
val b = newSpecificBuilder
b.sizeHint(coll)
b.addAll(coll)
b.result()
}
override protected def newSpecificBuilder
: mutable.Builder[(SnowflakeType[K], V), SnowflakeMap[K, V]] @uncheckedVariance =
new mutable.ImmutableBuilder[(SnowflakeType[K], V), SnowflakeMap[K, V]](empty) {
override def addOne(elem: (SnowflakeType[K], V)): this.type = {
elems = elems + elem
this
}
}
private def keyToSnowflake(k: Long): Key = SnowflakeType[K](k)
override def empty: SnowflakeMap[K, V] = new SnowflakeMap(inner.empty)
override def toList: List[(Key, V)] = {
val buffer = new ListBuffer[(Key, V)]
foreach(buffer += _)
buffer.toList
}
override def iterator: Iterator[(Key, V)] = inner.iterator.map {
case (k, v) => (keyToSnowflake(k), v)
}
final override def foreach[U](f: ((Key, V)) => U): Unit =
inner.foreach {
case (k, v) => f((keyToSnowflake(k), v))
}
final override def foreachEntry[U](f: (Key, V) => U): Unit =
inner.foreachEntry((k, v) => f(keyToSnowflake(k), v))
override def keysIterator: Iterator[Key] = inner.keysIterator.map(keyToSnowflake)
/**
* Loop over the keys of the map. The same as keys.foreach(f), but may
* be more efficient.
*
* @param f The loop body
*/
final def foreachKey(f: Key => Unit): Unit = inner.foreachKey(k => f(keyToSnowflake(k)))
override def valuesIterator: Iterator[V] = inner.valuesIterator
/**
* Loop over the values of the map. The same as values.foreach(f), but may
* be more efficient.
*
* @param f The loop body
*/
final def foreachValue(f: V => Unit): Unit = inner.foreachValue(f)
override protected[this] def className = "SnowflakeMap"
override def isEmpty: Boolean = inner.isEmpty
override def knownSize: Int = inner.knownSize
override def filter(p: ((Key, V)) => Boolean): SnowflakeMap[K, V] =
new SnowflakeMap(inner.filter {
case (k, v) => p((keyToSnowflake(k), v))
})
override def transform[S](f: (Key, V) => S): SnowflakeMap[K, S] =
new SnowflakeMap(inner.transform[S] {
case (k, v) => f(keyToSnowflake(k), v)
})
final override def size: Int = inner.size
final override def get(key: Key): Option[V] = inner.get(key)
final override def getOrElse[V1 >: V](key: Key, default: => V1): V1 = inner.getOrElse(key, default)
final override def apply(key: Key): V = inner.apply(key)
override def +[V1 >: V](kv: (Key, V1)): SnowflakeMap[K, V1] = new SnowflakeMap(inner.updated(kv._1, kv._2))
override def updated[V1 >: V](key: Key, value: V1): SnowflakeMap[K, V1] =
new SnowflakeMap(inner.updated(key, value))
/**
* Updates the map, using the provided function to resolve conflicts if the key is already present.
*
* Equivalent to
* {{{
* this.get(key) match {
* case None => this.update(key, value)
* case Some(oldvalue) => this.update(key, f(oldvalue, value)
* }
* }}}
*
* @tparam S The supertype of values in this `SnowflakeMap`.
* @param key The key to update.
* @param value The value to use if there is no conflict.
* @param f The function used to resolve conflicts.
* @return The updated map.
*/
def updateWith[S >: V](key: Key, value: S, f: (V, S) => S): SnowflakeMap[K, S] =
new SnowflakeMap(inner.updateWith(key, value, f))
override def removed(key: Key): SnowflakeMap[K, V] = new SnowflakeMap(inner.removed(key))
/**
* A combined transform and filter function. Returns an `SnowflakeMap` such that
* for each `(key, value)` mapping in this map, if `f(key, value) == None`
* the map contains no mapping for key, and if `f(key, value)`.
*
* @tparam S The type of the values in the resulting `SnowflakeMap`.
* @param f The transforming function.
* @return The modified map.
*/
def modifyOrRemove[S](f: (Key, V) => Option[S]): SnowflakeMap[K, S] =
new SnowflakeMap(inner.modifyOrRemove {
case (k, v) => f(keyToSnowflake(k), v)
})
/**
* Forms a union map with that map, using the combining function to resolve conflicts.
*
* @tparam S The type of values in `that`, a supertype of values in `this`.
* @param that The map to form a union with.
* @param f The function used to resolve conflicts between two mappings.
* @return Union of `this` and `that`, with identical key conflicts resolved using the function `f`.
*/
def unionWith[S >: V](that: SnowflakeMap[Key, S], f: (Key, S, S) => S): SnowflakeMap[K, S] =
new SnowflakeMap(inner.unionWith[S](that.inner, (l, s1, s2) => f(keyToSnowflake(l), s1, s2)))
/**
* Forms the intersection of these two maps with a combining function. The
* resulting map is a map that has only keys present in both maps and has
* values produced from the original mappings by combining them with `f`.
*
* @tparam S The type of values in `that`.
* @tparam R The type of values in the resulting `SnowflakeMap`.
* @param that The map to intersect with.
* @param f The combining function.
* @return Intersection of `this` and `that`, with values for identical keys produced by function `f`.
*/
def intersectionWith[S, R](that: SnowflakeMap[Key, S], f: (Key, V, S) => R): SnowflakeMap[K, R] =
new SnowflakeMap(inner.intersectionWith[S, R](that.inner, (l, v, s) => f(keyToSnowflake(l), v, s)))
/**
* Left biased intersection. Returns the map that has all the same mappings as this but only for keys
* which are present in the other map.
*
* @tparam R The type of values in `that`.
* @param that The map to intersect with.
* @return A map with all the keys both in `this` and `that`, mapped to corresponding values from `this`.
*/
def intersection[R](that: SnowflakeMap[K, R]): SnowflakeMap[K, V] = new SnowflakeMap(inner.intersection(that.inner))
def ++[S >: V](that: SnowflakeMap[K, S]): SnowflakeMap[K, S] = new SnowflakeMap(inner ++ that.inner)
final def firstKey: Key = keyToSnowflake(inner.firstKey)
final def lastKey: Key = keyToSnowflake(inner.lastKey)
def map[K2, V2](f: ((Key, V)) => (SnowflakeType[K2], V2)): SnowflakeMap[K2, V2] =
SnowflakeMap.from(new View.Map(coll, f))
def flatMap[K2, V2](f: ((Key, V)) => IterableOnce[(SnowflakeType[K2], V2)]): SnowflakeMap[K2, V2] =
SnowflakeMap.from(new View.FlatMap(coll, f))
override def concat[V1 >: V](that: collection.IterableOnce[(Key, V1)]): SnowflakeMap[K, V1] =
super.concat(that).asInstanceOf[SnowflakeMap[K, V1]] // Already has corect type but not declared as such
override def ++[V1 >: V](that: collection.IterableOnce[(Key, V1)]): SnowflakeMap[K, V1] = concat(that)
def collect[K2, V2](pf: PartialFunction[(Key, V), (SnowflakeType[K2], V2)]): SnowflakeMap[K2, V2] =
strictOptimizedCollect(SnowflakeMap.newBuilder[K2, V2], pf)
protected[this] def writeReplace(): AnyRef =
new DefaultSerializationProxy(SnowflakeMap.toFactory[K, V](SnowflakeMap), this)
}
object SnowflakeMap {
/**
* Create an empty snowflake map.
*/
def empty[K, V]: SnowflakeMap[K, V] = new SnowflakeMap(LongMap.empty)
/**
* Create a snowflake map with a single value.
*/
def singleton[K, V](key: SnowflakeType[K], value: V): SnowflakeMap[K, V] =
new SnowflakeMap(LongMap.singleton(key, value))
/**
* Create a snowflake map from multiple values.
*/
def apply[K, V](elems: (SnowflakeType[K], V)*): SnowflakeMap[K, V] =
new SnowflakeMap(LongMap.apply(elems: _*))
/**
* Create a snowflake map from an IterableOnce of snowflakes and values.
*/
def from[K, V](coll: IterableOnce[(SnowflakeType[K], V)]): SnowflakeMap[K, V] =
newBuilder[K, V].addAll(coll).result()
def newBuilder[K, V]: mutable.Builder[(SnowflakeType[K], V), SnowflakeMap[K, V]] =
new mutable.ImmutableBuilder[(SnowflakeType[K], V), SnowflakeMap[K, V]](empty) {
override def addOne(elem: (SnowflakeType[K], V)): this.type = {
elems = elems + elem
this
}
}
/**
* Create a snowflake map from an iterable of values while using a provided
* function to get the key.
*/
def withKey[K, V](iterable: Iterable[V])(f: V => SnowflakeType[K]): SnowflakeMap[K, V] =
from(iterable.map(v => f(v) -> v))
implicit def toFactory[K, V](dummy: SnowflakeMap.type): Factory[(SnowflakeType[K], V), SnowflakeMap[K, V]] =
ToFactory.asInstanceOf[Factory[(SnowflakeType[K], V), SnowflakeMap[K, V]]]
@SerialVersionUID(3L)
private[this] object ToFactory
extends Factory[(SnowflakeType[AnyRef], AnyRef), SnowflakeMap[AnyRef, AnyRef]]
with Serializable {
def fromSpecific(it: IterableOnce[(SnowflakeType[AnyRef], AnyRef)]): SnowflakeMap[AnyRef, AnyRef] =
SnowflakeMap.from[AnyRef, AnyRef](it)
def newBuilder: mutable.Builder[(SnowflakeType[AnyRef], AnyRef), SnowflakeMap[AnyRef, AnyRef]] =
SnowflakeMap.newBuilder[AnyRef, AnyRef]
}
implicit def toBuildFrom[K, V](
factory: SnowflakeMap.type
): BuildFrom[Any, (SnowflakeType[K], V), SnowflakeMap[K, V]] =
ToBuildFrom.asInstanceOf[BuildFrom[Any, (SnowflakeType[K], V), SnowflakeMap[K, V]]]
private[this] object ToBuildFrom
extends BuildFrom[Any, (SnowflakeType[AnyRef], AnyRef), SnowflakeMap[AnyRef, AnyRef]] {
def fromSpecific(from: Any)(it: IterableOnce[(SnowflakeType[AnyRef], AnyRef)]): SnowflakeMap[AnyRef, AnyRef] =
SnowflakeMap.from(it)
def newBuilder(from: Any): mutable.Builder[(SnowflakeType[AnyRef], AnyRef), SnowflakeMap[AnyRef, AnyRef]] =
SnowflakeMap.newBuilder[AnyRef, AnyRef]
}
implicit def iterableFactory[K, V]: Factory[(SnowflakeType[K], V), SnowflakeMap[K, V]] = toFactory(this)
implicit def buildFromSnowflakeMap[K, V]: BuildFrom[SnowflakeMap[_, _], (SnowflakeType[K], V), SnowflakeMap[K, V]] =
toBuildFrom(this)
}
|
Katrix-/AckCord
|
data/src/main/scala-2.13/ackcord/SnowflakeMap.scala
|
Scala
|
mit
| 11,999
|
/*
* ModelResource.scala
*
* To change this template, choose Tools | Template Manager
* and open the template in the editor.
*/
package pt.cnbc.wikimodels.rest
import java.io.InputStream
import java.lang.annotation._
import java.net.URI
import javax.ws.rs.GET
import javax.ws.rs.POST
import javax.ws.rs.PUT
import javax.ws.rs.DELETE
import javax.ws.rs.Consumes
import javax.ws.rs.HeaderParam
import javax.ws.rs.Path
import javax.ws.rs.PathParam
import javax.ws.rs.Produces
import javax.ws.rs.WebApplicationException
import javax.ws.rs.core.Context
import javax.ws.rs.core.SecurityContext
import javax.ws.rs.core.Response
import javax.ws.rs.core.UriInfo
import scala.collection.mutable.Map
import scala.collection.mutable.HashMap
import pt.cnbc.wikimodels.dataModel.Compartment
import pt.cnbc.wikimodels.dataModel.Reaction
import pt.cnbc.wikimodels.dataAccess.CompartmentsDAO
import pt.cnbc.wikimodels.exceptions.BadFormatException
import pt.cnbc.wikimodels.security.SecurityContextFactory
import pt.cnbc.wikimodels.sbmlVisitors.SBML2BeanConverter
class CompartmentResource(sbmlModelResource:String) extends RESTResource {
@Context
var security:SecurityContext = null
@Context
var uriInfo:UriInfo =null;
@GET
@Produces(Array("application/xml"))
@Path("{compartmentid}")//: [a-zA-Z][a-zA-Z_0-9]}")
def get(@PathParam("compartmentid") compartmentResource:String
):String = {
val username:String = security.getUserPrincipal().getName()
Console.print("GET verb was used in compartment " + compartmentResource)
if(secContext.isAuthorizedTo(username,
"GET", "model/" + sbmlModelResource +
"/compartment/" + compartmentResource ) ){
try{
val dao = new CompartmentsDAO
val compartment = dao.loadCompartment(compartmentResource)
Console.println("Obtained compartment:")
if(compartment != null &&
compartment.metaid == compartmentResource){
Console.println("The right compartment is being returned")
compartment.toXML.toString
} else {
throw new WebApplicationException(Response.Status.NOT_FOUND)
}
} catch {
case e:WebApplicationException => throw e
case e:Exception => {
e.printStackTrace
throw throw new WebApplicationException(
Response.Status.BAD_REQUEST)
}
}
} else {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
}
/**
* Creates a new resource (compartment in this case) with its metaid generated
* automatically by wikiModels server.
* the metaid can be suggested and, for that to happen, the XML that
* represents the compartment should come with the metaid attribute filled
*/
@POST
@Consumes(Array("application/xml"))
def post(requestContent:InputStream) = {
val username = security.getUserPrincipal().getName()
Console.print("POST verb was used in user " + username)
var ret = ""
if(secContext.isAuthorizedTo(username,
"POST", "model/" + sbmlModelResource +
"/compartment/" ) ){
val compartmentMetaId =
try{
val dao = new CompartmentsDAO
dao.tryToCreateCompartmentInModel(sbmlModelResource,
SBML2BeanConverter.visitCompartment(
scala.xml.XML.load(requestContent)))
} catch {
case e:Exception => {
e.printStackTrace
throw throw new WebApplicationException(
Response.Status.BAD_REQUEST)
}
}
if(compartmentMetaId == null){
throw new BadFormatException("Creating compartment did not went according to plan.");
}else {
val uri:URI = uriInfo.getAbsolutePathBuilder()
.path(compartmentMetaId)
.build();
Response.created( uri ).build()
}
} else {
//user is trying to access a resource for which
//it does not have permissions
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
}
/**
* Updates an already created resource
* According to the REST style architecture the PUT request can be used
* to create new reesources. Yet this is only allowed as long as the request
* remains idempotent.
* Yet, creating a new compartment is not an idempotent request since it is
* sbuject to verifications and may not result in exactly the sent entity
* being created. Ids and other infromation may be modified.
*/
@PUT
@Path("{compartmentid}")//: [a-zA-Z][a-zA-Z_0-9]}")
@Consumes(Array("application/xml"))
def put(@PathParam("compartmentid") compartmentResource:String,
requestContent:String):Response = {
val username = security.getUserPrincipal().getName()
Console.print("PUT verb was used in user " + username)
Console.print("compartmentid = " + compartmentResource)
Console.print("Content of request = " + requestContent)
Console.print("--------------------------------------")
var ret = ""
if(secContext.isAuthorizedTo(username,
"PUT", "model/" + sbmlModelResource +
"/compartment/" + compartmentResource ) ){
try{
val dao = new CompartmentsDAO
//XXX if there are performance problems in this part replace:
// - requstcontent:String -> requastcontont:InputStream
// - scala.xml.XML.loadString -> scala.xml.XML.load
if( dao.updateCompartment(
SBML2BeanConverter.visitCompartment(
scala.xml.XML.loadString(requestContent))) ){
Response.ok.build
} else {
throw new WebApplicationException(
Response.Status.NOT_FOUND)
}
} catch {
case e:WebApplicationException => throw e
case e:Exception => {
e.printStackTrace
throw new WebApplicationException(
Response.Status.BAD_REQUEST)
}
}
} else {
//user is trying to access a resource for which
//it does not have permissions
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
}
@DELETE
@Path("{compartmentid}")//: [a-zA-Z][a-zA-Z_0-9]}")
def delete(@PathParam("compartmentid") compartmentResource:String
):Unit = {
val username = security.getUserPrincipal().getName()
Console.print("DELETE verb was used with user " + username)
Console.print("DELETE verb was used with compartmentid " + compartmentResource)
var ret = ""
if(secContext.isAuthorizedTo(username,
"DELETE", "model/" + sbmlModelResource +
"/compartment/" + compartmentResource ) ){
try{
val dao = new CompartmentsDAO()
if(dao.deleteCompartment(
new Compartment(
compartmentResource, Nil, compartmentResource, null, null, 0, null, null, null, false)) ){
} else {
throw new WebApplicationException(
Response.Status.NOT_FOUND)
}
} catch {
case e:WebApplicationException => throw e
case e:Exception => {
e.printStackTrace
throw new WebApplicationException(
Response.Status.BAD_REQUEST)
}
}
} else {
//user is trying to access a resource for which
//it does not have permissions
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
}
}
|
alexmsmartins/WikiModels
|
wm_server/src/main/scala/pt/cnbc/wikimodels/rest/CompartmentResource.scala
|
Scala
|
mit
| 8,538
|
package monocle.syntax
import monocle.function._
import monocle.macros.GenLens
import monocle.std._
import monocle.{Lens, MonocleSuite, Prism}
class SymbolicSyntaxExample extends MonocleSuite {
case class Store(articles: List[Article])
sealed trait Article
case class Table(wood: String) extends Article
case class Sofa(color: String, price: Int) extends Article
val _articles = Lens((_: Store).articles)(as => s => s.copy(articles = as))
val _sofa = Prism[Article, Sofa ]{ case s: Sofa => Some(s); case _ => None}(identity)
val sofaGenLens = GenLens[Sofa]
val (_color, _price) = (sofaGenLens(_.color), sofaGenLens(_.price))
test("Symbols can replace composeX and applyX methods") {
val myStore = Store(List(Sofa("Red", 10), Table("oak"), Sofa("Blue", 26)))
(_articles ^|-? headOption ^<-? _sofa ^|-> _color).getOption(myStore) shouldEqual
(myStore &|-> _articles ^|-? headOption ^<-? _sofa ^|-> _color getOption)
(_articles ^<-> iListToList.reverse ^|->> each ^<-? _sofa ^|-> _price).modify(_ / 2)(myStore) ===
(myStore &|-> _articles ^<-> iListToList.reverse ^|->> each ^<-? _sofa ^|-> _price modify(_ / 2))
(myStore.articles &|-? index(1) ^<-? _sofa getOption) shouldEqual None
}
}
|
malcolmgreaves/Monocle
|
example/src/test/scala/monocle/syntax/SymbolicSyntaxExample.scala
|
Scala
|
mit
| 1,251
|
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.multideposit
import java.util.Locale
import better.files.File
import cats.data.NonEmptyChain
import cats.syntax.either._
import javax.naming.Context
import javax.naming.ldap.InitialLdapContext
import nl.knaw.dans.easy.multideposit.PathExplorer._
import nl.knaw.dans.easy.multideposit.actions.CreateMultiDeposit
import nl.knaw.dans.easy.multideposit.model.Datamanager
import nl.knaw.dans.easy.multideposit.parser.MultiDepositParser
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
class SplitMultiDepositApp(formats: Set[String], userLicenses: Set[String], ldap: Ldap, ffprobe: FfprobeRunner, permissions: DepositPermissions, dansDoiPrefix: String) extends AutoCloseable with DebugEnhancedLogging {
private val createMultiDeposit = new CreateMultiDeposit(formats, ldap, ffprobe, permissions, dansDoiPrefix)
override def close(): Unit = ldap.close()
def validate(paths: PathExplorers, datamanagerId: Datamanager): Either[NonEmptyChain[SmdError], Unit] = {
implicit val input: InputPathExplorer = paths
implicit val staging: StagingPathExplorer = paths
Locale.setDefault(Locale.US)
for {
deposits <- MultiDepositParser.parse(input.multiDepositDir, userLicenses).toEitherNec
_ <- createMultiDeposit.validateDeposits(deposits).toEither
_ <- createMultiDeposit.getDatamanagerEmailaddress(datamanagerId).toEitherNec
} yield ()
}
def convert(paths: PathExplorers, datamanagerId: Datamanager): Either[NonEmptyChain[SmdError], Unit] = {
implicit val input: InputPathExplorer = paths
implicit val staging: StagingPathExplorer = paths
implicit val output: OutputPathExplorer = paths
Locale.setDefault(Locale.US)
for {
deposits <- MultiDepositParser.parse(input.multiDepositDir, userLicenses).toEitherNec
dataManagerEmailAddress <- createMultiDeposit.getDatamanagerEmailaddress(datamanagerId).toEitherNec
_ <- createMultiDeposit.convertDeposits(deposits, paths, datamanagerId, dataManagerEmailAddress)
_ = logger.info("deposits were created successfully")
_ <- createMultiDeposit.report(deposits).toEitherNec
_ = logger.info(s"report generated at ${ paths.reportFile }")
_ <- createMultiDeposit.moveDepositsToOutputDir(deposits).toEitherNec
_ = logger.info(s"deposits were successfully moved to ${ output.outputDepositDir }")
} yield ()
}
}
object SplitMultiDepositApp {
def apply(configuration: Configuration): SplitMultiDepositApp = {
val ldap = {
val env = new java.util.Hashtable[String, String]
env.put(Context.PROVIDER_URL, configuration.properties.getString("auth.ldap.url"))
env.put(Context.SECURITY_AUTHENTICATION, "simple")
env.put(Context.SECURITY_PRINCIPAL, configuration.properties.getString("auth.ldap.user"))
env.put(Context.SECURITY_CREDENTIALS, configuration.properties.getString("auth.ldap.password"))
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory")
Ldap(new InitialLdapContext(env, null))
}
val permissions = DepositPermissions(
permissions = configuration.properties.getString("deposit.permissions.access"),
group = configuration.properties.getString("deposit.permissions.group")
)
val ffprobe = {
val ffProbePath = configuration.properties.getString("audio-video.ffprobe")
require(ffProbePath != null, "Missing configuration for ffprobe")
val exeFile = File(ffProbePath)
require(exeFile.isRegularFile, s"Ffprobe at $exeFile does not exist or is not a regular file")
require(exeFile.isExecutable, s"Ffprobe at $exeFile is not executable")
FfprobeRunner(exeFile)
}
val dansDoiPrefix = configuration.properties.getString("dans.doi.prefix")
new SplitMultiDepositApp(configuration.formats, configuration.licenses, ldap, ffprobe, permissions, dansDoiPrefix)
}
}
|
DANS-KNAW/easy-process-sip
|
src/main/scala/nl.knaw.dans.easy.multideposit/SplitMultiDepositApp.scala
|
Scala
|
apache-2.0
| 4,532
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.calculations
import org.joda.time.{LocalDate, Period, PeriodType}
object PeriodCalculator {
def periodHeadingComponents(startDate: LocalDate, endDate: LocalDate): PeriodHeadingComponents = {
val friendlyEndDate = endDate.toString("d MMM yyyy")
val yearEndDate = endDate.toString("yyyy")
val months = monthsInPeriod(startDate, endDate)
months match {
case 12 => PeriodHeadingComponents(monthCount = 12, messageKey = "", dateText = yearEndDate)
case months if (months > 1) => PeriodHeadingComponents(monthCount = months, messageKey = "periodHeader.plural", dateText = friendlyEndDate)
case _ => PeriodHeadingComponents(monthCount = months, messageKey = "periodHeader.singular", dateText = friendlyEndDate)
}
}
private def monthsInPeriod(startDate: LocalDate, endDate: LocalDate): Int = {
val period = new Period(startDate.minusDays(1), endDate, PeriodType.yearMonthDay().withYearsRemoved())
period match {
case p if p.getDays > 15 => p.getMonths + 1
case p if p.getMonths < 1 => 1
case p => p.getMonths
}
}
}
case class PeriodHeadingComponents(monthCount: Int, messageKey: String, dateText: String)
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/calculations/PeriodCalculator.scala
|
Scala
|
apache-2.0
| 1,829
|
#!/bin/bash
exec scala \
-Djava.library.path="../lib/linux_x86" \
-classpath "../bin:../lib:../lib/linux_x86/com.google.ortools.jar" \
"$0" "$@"
!#
/** USAGE: ./coeffTest1.scala DIM_A DIM_B
*/
import scala.collection.mutable.HashSet
import polytope._
/** Print extremal edges for given dimensions
*/
object ExtEdges extends App {
val dims = List(args(0).toInt, args(1).toInt)
val cubicles = InequalityFactory.cubiclesDM(dims)
val es = HashSet[ABEdge]()
if (!cubicles.isEmpty) {
for (T <- cubicles) {
for (e <- T.toCone.edges(dims(0))._2) {
if (!es.contains(e)) {
println(e.toCSV)
es += e
}
}
}
}
}
// Run the code
ExtEdges.main(args)
|
expz/polytope
|
scripts/edges.scala
|
Scala
|
gpl-3.0
| 710
|
package exemples
import helpers.sorus._
import helpers.sorus.SorusDSL._
import scala.concurrent.Future
import scalaz._
class BasicExemple extends Sorus {
// Sample User class
case class User(id: Option[Long], email: String, validate: Boolean)
def doSomething(): Future[Fail \\/ User] = {
for {
user <- loadUser(12L) ?| "Error while loading user" // <- you don't create Fail yoursefl but the ?| operator do it for you
_ <- user.validate ?| "Account need to be validated"
_ <- logUserAction(user) ?| () // <- You can just forward underlying Fail without adding a message
} yield {
user
}
}
private def loadUser(id: Long): Future[Option[User]] = {
// Load it from DB / API / Services ...
Future.successful(Some(User(Some(id), "foo@bar.com", false)))
}
private def logUserAction(user: User): Future[Fail \\/ Unit] = {
for {
id <- user.id ?| "Can't log action of user wihtout id"
} yield {
println(s"user $id access the resource")
}
}
}
|
Driox/sorus
|
test/examples/BasicExemple.scala
|
Scala
|
apache-2.0
| 1,071
|
package org.squeryl.test.arrays
import _root_.org.squeryl.framework._
import org.squeryl.test.PrimitiveTypeModeForTests._
abstract class PrimitiveArrayTest extends SchemaTester with RunTestsInsideTransaction {
self: DBConnector =>
val schema = PrimitiveArraySchema
import PrimitiveArraySchema._
test("can insert and query integer, double, and long array values in database") {
transaction {
schema.drop
schema.create
swimmers.insert(new Swimmer(1, Array(10.55, 12.99, 15.32), Array(100, 110, 20), Array(9876543210L, 123456789L), Array("testing", "stuff")))
}
val query = from(swimmers)((s) => select(s))
val res = transaction { query.toList }
res.size should equal(1)
res(0).lap_times.size should equal(3)
res(0).lap_times(0) should equal(10.55)
res(0).lap_times(1) should equal(12.99)
res(0).lap_times(2) should equal(15.32)
res(0).scores.size should equal(3)
res(0).scores(0) should equal(100)
res(0).scores(1) should equal(110)
res(0).scores(2) should equal(20)
res(0).orgids.size should equal(2)
res(0).orgids(0) should equal(9876543210L)
res(0).orgids(1) should equal(123456789L)
res(0).tags.size should equal(2)
res(0).tags(0) should equal("testing")
res(0).tags(1) should equal("stuff")
}
test("can update integer, double, and long array values in database") {
transaction {
schema.drop
schema.create
swimmers.insert(new Swimmer(1, Array(10.55, 12.99, 15.32), Array(100, 110, 20), Array(9876543210L, 123456789L), Array("testing", "stuff")))
}
val query = from(swimmers)((s) => select(s))
val res = transaction { query.toList }
res.size should equal(1)
res(0).lap_times.size should equal(3)
res(0).scores.size should equal(3)
res(0).orgids.size should equal(2)
res(0).tags.size should equal(2)
transaction {
update(swimmers)(s =>
where(s.id === 1)
set (s.lap_times := Array(11.69), s.scores := Array(1, 2, 3, 4, 5), s.orgids := Array(13L), s.tags := Array("and things")))
}
val query2 = from(swimmers)((s) => select(s))
val res2 = transaction { query.toList }
res2.size should equal(1)
res2(0).lap_times.size should equal(1)
res2(0).scores.size should equal(5)
res2(0).orgids.size should equal(1)
res2(0).tags.size should equal(1)
res2(0).lap_times(0) should equal(11.69)
res2(0).scores(2) should equal(3)
res2(0).orgids(0) should equal(13L)
res2(0).tags(0) should equal("and things")
}
}
import _root_.org.squeryl.Schema
object PrimitiveArraySchema extends Schema {
val swimmers = table[Swimmer]("swimmer")
override def drop = super.drop
}
class Swimmer(val id: Int, val lap_times: Array[Double], val scores: Array[Int], val orgids: Array[Long], val tags: Array[String])
|
wukaikailive/squeryl
|
src/test/scala/org/squeryl/test/arrays/PrimitiveArrayTest.scala
|
Scala
|
apache-2.0
| 2,930
|
/*
* Copyright 2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't lose the braces here, we use nested packages in this source
package net.liftweb {
package mockweb {
import javax.servlet.http.HttpServletRequest
import common.{Box,Empty,Full}
import http.{LiftRules,LiftSession,Req,S}
import util.ThreadGlobal
import util.Helpers._
import http.provider.servlet.HTTPRequestServlet
import mocks.MockHttpServletRequest
import scala.xml.{MetaData,Null}
import org.specs._
/**
* The MockWeb object contains various methods to simplify
* unit testing in Lift outside of the full-blown testkit
* stack.
*
* There is partial support for configuration defined in the
* LiftRules object. This includes:
*
* <ul>
* <li>early</li>
* <li>statelessRewrite</li>
* <li>statelessTest</li>
* <li>statefulRewrite</li>
* </ul>
*
*/
object MockWeb {
// TODO : Uncomment this code when LiftRules can be scoped
// /**
// * Setting this var to <code>true</code>
// * will force all tests to use LiftRules. See
// * useLiftRules for more granular control.
// */
// var useLiftRulesGlobally = false
//
// object useLiftRules extends ThreadGlobal[Boolean]
//
// private def liftRulesEnabled = useLiftRulesGlobally || useLiftRules.box == Full(true)
//
// private def withLiftRules [T] (f : => T) = {
// if (liftRulesEnabled) {
// f
// }
// }
/**
* Executes a given function against a new Req constructed
* from the given url/path String and contextPath. See MockHttpServletRequest.processUrl
* for details on the url String format, and see
* testReq(HttpServletRequest) for more details on
* how the Req is processed.
*/
def testReq [T](url : String, contextPath : String = "")(f : Req => T) : T = {
testReq(new MockHttpServletRequest(url, contextPath))(f)
}
/**
* Executes a given function against a new Req constructed
* from the given HttpServletRequest. If useLiftRules
* is set to true, then LiftRules.early, LiftRules.statelessRewrite,
* and LiftRules.statelessTest rules are applied.
*/
def testReq [T](request : HttpServletRequest)(f : Req => T) : T = {
// TODO : Confirm that we can pass in a null provider without issue
val req = new HTTPRequestServlet(request, null)
// TODO : Uncomment this code when LiftRules can be scoped
// withLiftRules {
// tryo {
// LiftRules.early.toList.foreach(_(req))
// }
// }
//
// val r =
// if(liftRulesEnabled) {
// // Apply stateless rewrites
// Req(req, LiftRules.statelessRewrite.toList,
// LiftRules.statelessTest.toList, System.nanoTime)
// } else {
// Req(req, Nil, System.nanoTime)
// }
f(Req(req, Nil, System.nanoTime))
}
/**
* Sets up S based on the provided url, contextPath
* and session. You can provide your own session if you
* want to simulate sharing a session across multiple
* requests. For example:
*
* <pre name="code" class="scala">
* object testVar extends SessionVar[String]("Empty")
*
* val session = testS("http://foo.com/test") {
testVar("Foo!")
S.session // returns the current session
}
// A second test
testS("http://foo.com/test2", newSession = session) {
testVar.is must_== "Foo!"
}
* </pre>
*
* @param url The url to use for this request. Can either be a
* full URL, or just the path and queryString. See MockHttpServletRequest.processUrl
* for more details
*
* @param session The LiftSession to use for this request. If you don't provide
* one a new one will be created for you
*
* @param contextPath The servlet context path for this request
*
* @param testFunc The function to be executed in the scope of a new S
*/
def testS [T](url : String,
session : Box[LiftSession] = Empty,
contextPath : String = "")(testFunc : => T) : T =
testReq(url, contextPath)(realTestS(session)(() => testFunc))
/**
* Sets up S based on the provided request
* and session. You can use this method if you
* want to do special setup on the request outside
* of what is handled by the MockHttpServletRequest
* constructor, or if you want to use a different
* mock impl.
*
* You can provide your own session if you
* want to simulate sharing a session across multiple
* requests. See testS(String,String,Box[LiftSession])
* for an example of this use.
*
* Note that if useLiftRules is set to true, then rules like LiftRules.early,
* LiftRules.statelessTest, etc, will be applied.
*
* @param request The request to be used for this test
* @param session The LiftSession to use for this request. Passing Empty
* will force creation of a new session
* @param testFunc The function to be executed in the scope of a new S
*/
def testS [T](request : HttpServletRequest,
session : Box[LiftSession])(testFunc : => T) : T =
testReq(request)(realTestS(session)(() => testFunc))
/**
* Sets up S based on the provided request
* and a new session.
*/
def testS [T](request : HttpServletRequest)(testFunc : => T) : T =
testReq(request)(realTestS(Empty)(() => testFunc))
/**
* This is the common delegate for the testS methods to avoid
* code duplication.
*/
private def realTestS [T](newSession : Box[LiftSession])(f : () => T)(req : Req) : T = {
val session = newSession openOr LiftSession(req)
S.init(req, session) {
f()
}
}
/**
* This is a utility method to allow you to set the
* S.currentSnippet method for testing.
*
* @param name The snippet name to be tested. For example, <lift:MyWidget.foo/> has a
* name of MyWidget.foo. You can retrieve this via <code>S.currentSnippet</code> or
* <code>S.invokedAs</code>
*
* @param attrs The list of snippet tag attributes. Defaults to Null. See <code>S.attrs</code>
* for more details
*
* @param f The function to execute in the context of the emulated snippet
*
*/
def withSnippet[T](name : String, attrs : MetaData = Null)(f : => T) : T =
S.withAttrs(attrs) {
S.setVars(attrs) {
http.httpPackageProxy.doSnippet(name)(f)
}
}
}
}
// This is a bridge to allow use to use the http-private
// S.doSnippet method
package http {
private[liftweb] object httpPackageProxy {
def doSnippet[T](name : String)(f : => T) : T = {
S.doSnippet(name)(f)
}
}
}
} // end package net.liftweb
|
lift/lift
|
framework/lift-base/lift-webkit/src/main/scala/net/liftweb/mockweb/MockWeb.scala
|
Scala
|
apache-2.0
| 7,091
|
/*
* Copyright 2014 JHC Systems Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sqlest.ast.syntax
import sqlest.ast._
trait InsertSyntax {
/** Insert into a relation: `insert.into(...)`. */
def into(into: Table) =
new InsertBuilder(into)
}
/** Helper class to prevent users writing `insert.into(...)` without `.columns(...).values(...)` or `.set(...)`. */
class InsertBuilder(into: Table) {
def columns(columns: TableColumn[_]*) =
new InsertColumnsBuilder(into, columns)
def values(setters: Setter[_, _]*) =
InsertValues(into, Seq(setters))
def values(setters: Seq[Setter[_, _]])(implicit d: DummyImplicit) =
InsertValues(into, Seq(setters))
def values(setterLists: Seq[Seq[Setter[_, _]]])(implicit d1: DummyImplicit, d2: DummyImplicit) =
InsertValues(into, setterLists)
def set(setters: Setter[_, _]*) =
InsertValues(into, Seq(setters))
def set(setters: Seq[Setter[_, _]])(implicit d: DummyImplicit) =
InsertValues(into, Seq(setters))
}
/** Helper class to prevent users writing `insert.into(...).columns(...)` without `.values(...)` */
class InsertColumnsBuilder(into: Table, columns: Seq[TableColumn[_]]) {
def values(setters: Setter[_, _]*) = {
if (columns != setters.map(_.column)) throw new AssertionError(s"Cannot insert value to the columns declared")
InsertValues(into, Seq(setters))
}
def values(setters: Seq[Setter[_, _]])(implicit d: DummyImplicit) = {
if (columns != setters.map(_.column)) throw new AssertionError(s"Cannot insert value to the columns declared")
InsertValues(into, Seq(setters))
}
def values(setterLists: Seq[Seq[Setter[_, _]]])(implicit d1: DummyImplicit, d2: DummyImplicit) = {
if (!setterLists.forall(columns == _.map(_.column))) throw new AssertionError(s"Cannot insert value to the columns declared")
InsertValues(into, setterLists)
}
def from[A: AliasedColumns](select: Select[A, _ <: Relation]) =
InsertFromSelect(into = into, columns = columns, select = select)
}
|
DavidGregory084/sqlest
|
sqlest/src/main/scala/sqlest/ast/syntax/InsertSyntax.scala
|
Scala
|
apache-2.0
| 2,532
|
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.slf4j.LoggerFactory
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.clustering.dbscan.DBSCAN
object DBSCAN_acute_infl {
val log = LoggerFactory.getLogger(DBSCAN_acute_infl.getClass)
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("DBSCAN on Acute Inflammations").
set("spark.kryoserializer.buffer.max", "1024") //in MB ; by default 64MB
val sc = new SparkContext(conf)
val eps = args(0).toFloat
val minPoints = args(1).toInt
val maxPointsPerPartition = args(2).toInt
val src = args(3)
val dest = args(4)
log.info(s"EPS: $eps minPoints: $minPoints")
val nb_patients = 120
val raw_data = sc.parallelize(sc.textFile(src).take(nb_patients))
val elements = raw_data.map{
line =>
val split = line.split('\\t')
val id = split(0)
val temperature = split(1).toDouble
val others = split.slice(2, 7).map{ v => if (v == "no") 0.0 else 1.0 }
val res = Array(temperature) ++ others
(id, Vectors.dense(res))
}
val phenotypes = raw_data.map{
line =>
val split = line.split('\\t')
val id = split(0)
val decisions = split.takeRight(2)
var label = 0
if (decisions(0) == "no" && decisions(1) == "no") {
label = 0 //"neither"
} else if (decisions(0) == "yes" && decisions(1) == "no") {
label = 1 //"inflammation"
} else if (decisions(0) == "no" && decisions(1) == "yes") {
label = 2 // "nephretis"
} else {
label = 3 //"both"
}
(id, label)
}
val nb_elements = elements.count.toInt
import org.apache.spark.mllib.feature.StandardScaler
val stdscaler = new StandardScaler(withMean = true, withStd = true).fit(elements.values)
val samples = elements.mapValues{ stdscaler.transform(_) }.persist
val model = DBSCAN.train(
samples,
eps = eps,
minPoints = minPoints,
maxPointsPerPartition = maxPointsPerPartition)
val both = model.labeledPoints.map(p => (p.id, p.cluster)).join(phenotypes).map{case (k,v) => (k.toInt, v)}
val to_eval = both.cartesian(both)
def RID(to_eval : RDD[((Int, (Int, Int)), (Int, (Int, Int)))]) : Double = {
def choose2(n : Int) : Double = {
return n * (n - 1) / 2;
}
val denom = choose2(nb_elements)
// a : number of pairs in the same cluster in C and in K
// b : number of pairs in different clusters in C and in K
val a = sc.accumulator(0, "Acc a : same cluster in both")
val b = sc.accumulator(0, "Acc b : different cluster in both")
to_eval.foreach{
case ((id1, classes1), (id2, classes2)) =>
if (id1 != id2) {
if (classes1._1 == classes2._1 && classes1._2 == classes2._2) {
a += 1
}
else if (classes1._1 != classes2._1 && classes1._2 != classes2._2) {
b += 1
}
}
}
(a.value/2 + b.value/2) / denom
}
val eval_res = RID(to_eval)
val numClusters = model.labeledPoints.map(p => p.cluster).distinct.count.toInt
println(s"RID = $eval_res | for nb_elements = $nb_elements & numClusters = $numClusters")
val predictions = model.labeledPoints.map(p => (p.id, p.cluster))
import org.apache.hadoop.fs._
import java.io.BufferedOutputStream
val fs = FileSystem.get(sc.hadoopConfiguration)
class TextFile(output_path : String) {
val physical_file = fs.create(new Path(output_path))
val stream = new BufferedOutputStream(physical_file)
def write(text : String) : Unit = {
stream.write(text.getBytes("UTF-8"))
}
def close() : Unit = {
stream.close()
}
}
val t = new TextFile(dest)
t.write("Patient ID,Cluster ID")
predictions.collect.foreach{
case (id, cluster) =>
t.write(id + "," + cluster + "\\n")
}
t.close()
sc.stop()
}
}
|
neurodev-thesis/notebooks
|
DBSCAN/DBSCAN_acute_infl.scala
|
Scala
|
apache-2.0
| 3,885
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicLong
import java.util.{TimerTask, Timer}
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.concurrent.duration._
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
/**
* Schedules tasks for multiple types of clusters by acting through a SchedulerBackend.
* It can also work with a local setup by using a LocalBackend and setting isLocal to true.
* It handles common logic, like determining a scheduling order across jobs, waking up to launch
* speculative tasks, etc.
*
* Clients should first call initialize() and start(), then submit task sets through the
* runTasks method.
*
* THREADING: SchedulerBackends and task-submitting clients can call this class from multiple
* threads, so it needs locks in public API methods to maintain its state. In addition, some
* SchedulerBackends sycnchronize on themselves when they want to send events here, and then
* acquire a lock on us, so we need to make sure that we don't try to lock the backend while
* we are holding a lock on ourselves.
*/
private[spark] class TaskSchedulerImpl(
val sc: SparkContext,
val maxTaskFailures: Int,
isLocal: Boolean = false)
extends TaskScheduler with Logging
{
def this(sc: SparkContext) = this(sc, sc.conf.getInt("spark.task.maxFailures", 4))
val conf = sc.conf
// How often to check for speculative tasks
val SPECULATION_INTERVAL = conf.getLong("spark.speculation.interval", 100)
// Threshold above which we warn user initial TaskSet may be starved
val STARVATION_TIMEOUT = conf.getLong("spark.starvation.timeout", 15000)
// TaskSetManagers are not thread safe, so any access to one should be synchronized
// on this class.
val activeTaskSets = new HashMap[String, TaskSetManager]
val taskIdToTaskSetId = new HashMap[Long, String]
val taskIdToExecutorId = new HashMap[Long, String]
@volatile private var hasReceivedTask = false
@volatile private var hasLaunchedTask = false
private val starvationTimer = new Timer(true)
// Incrementing task IDs
val nextTaskId = new AtomicLong(0)
// Which executor IDs we have executors on
val activeExecutorIds = new HashSet[String]
// The set of executors we have on each host; this is used to compute hostsAlive, which
// in turn is used to decide when we can attain data locality on a given host
private val executorsByHost = new HashMap[String, HashSet[String]]
private val executorIdToHost = new HashMap[String, String]
// Listener object to pass upcalls into
var dagScheduler: DAGScheduler = null
var backend: SchedulerBackend = null
val mapOutputTracker = SparkEnv.get.mapOutputTracker
var schedulableBuilder: SchedulableBuilder = null
var rootPool: Pool = null
// default scheduler is FIFO
val schedulingMode: SchedulingMode = SchedulingMode.withName(
conf.get("spark.scheduler.mode", "FIFO"))
// This is a var so that we can reset it for testing purposes.
private[spark] var taskResultGetter = new TaskResultGetter(sc.env, this)
override def setDAGScheduler(dagScheduler: DAGScheduler) {
this.dagScheduler = dagScheduler
}
def initialize(backend: SchedulerBackend) {
this.backend = backend
// temporarily set rootPool name to empty
rootPool = new Pool("", schedulingMode, 0, 0)
schedulableBuilder = {
schedulingMode match {
case SchedulingMode.FIFO =>
new FIFOSchedulableBuilder(rootPool)
case SchedulingMode.FAIR =>
new FairSchedulableBuilder(rootPool, conf)
}
}
schedulableBuilder.buildPools()
}
def newTaskId(): Long = nextTaskId.getAndIncrement()
override def start() {
backend.start()
if (!isLocal && conf.getBoolean("spark.speculation", false)) {
logInfo("Starting speculative execution thread")
import sc.env.actorSystem.dispatcher
sc.env.actorSystem.scheduler.schedule(SPECULATION_INTERVAL milliseconds,
SPECULATION_INTERVAL milliseconds) {
checkSpeculatableTasks()
}
}
}
override def submitTasks(taskSet: TaskSet) {
val tasks = taskSet.tasks
logInfo("Adding task set " + taskSet.id + " with " + tasks.length + " tasks")
this.synchronized {
val manager = new TaskSetManager(this, taskSet, maxTaskFailures)
activeTaskSets(taskSet.id) = manager
schedulableBuilder.addTaskSetManager(manager, manager.taskSet.properties)
if (!isLocal && !hasReceivedTask) {
starvationTimer.scheduleAtFixedRate(new TimerTask() {
override def run() {
if (!hasLaunchedTask) {
logWarning("Initial job has not accepted any resources; " +
"check your cluster UI to ensure that workers are registered " +
"and have sufficient memory")
} else {
this.cancel()
}
}
}, STARVATION_TIMEOUT, STARVATION_TIMEOUT)
}
hasReceivedTask = true
}
backend.reviveOffers()
}
override def cancelTasks(stageId: Int): Unit = synchronized {
logInfo("Cancelling stage " + stageId)
activeTaskSets.find(_._2.stageId == stageId).foreach { case (_, tsm) =>
// There are two possible cases here:
// 1. The task set manager has been created and some tasks have been scheduled.
// In this case, send a kill signal to the executors to kill the task and then abort
// the stage.
// 2. The task set manager has been created but no tasks has been scheduled. In this case,
// simply abort the stage.
tsm.runningTasksSet.foreach { tid =>
val execId = taskIdToExecutorId(tid)
backend.killTask(tid, execId)
}
tsm.abort("Stage %s cancelled".format(stageId))
logInfo("Stage %d was cancelled".format(stageId))
}
}
/**
* Called to indicate that all task attempts (including speculated tasks) associated with the
* given TaskSetManager have completed, so state associated with the TaskSetManager should be
* cleaned up.
*/
def taskSetFinished(manager: TaskSetManager): Unit = synchronized {
activeTaskSets -= manager.taskSet.id
manager.parent.removeSchedulable(manager)
logInfo("Removed TaskSet %s, whose tasks have all completed, from pool %s"
.format(manager.taskSet.id, manager.parent.name))
}
/**
* Called by cluster manager to offer resources on slaves. We respond by asking our active task
* sets for tasks in order of priority. We fill each node with tasks in a round-robin manner so
* that tasks are balanced across the cluster.
*/
def resourceOffers(offers: Seq[WorkerOffer]): Seq[Seq[TaskDescription]] = synchronized {
SparkEnv.set(sc.env)
// Mark each slave as alive and remember its hostname
for (o <- offers) {
executorIdToHost(o.executorId) = o.host
if (!executorsByHost.contains(o.host)) {
executorsByHost(o.host) = new HashSet[String]()
executorGained(o.executorId, o.host)
}
}
// Build a list of tasks to assign to each worker
val tasks = offers.map(o => new ArrayBuffer[TaskDescription](o.cores))
val availableCpus = offers.map(o => o.cores).toArray
val sortedTaskSets = rootPool.getSortedTaskSetQueue()
for (taskSet <- sortedTaskSets) {
logDebug("parentName: %s, name: %s, runningTasks: %s".format(
taskSet.parent.name, taskSet.name, taskSet.runningTasks))
}
// Take each TaskSet in our scheduling order, and then offer it each node in increasing order
// of locality levels so that it gets a chance to launch local tasks on all of them.
var launchedTask = false
for (taskSet <- sortedTaskSets; maxLocality <- TaskLocality.values) {
do {
launchedTask = false
for (i <- 0 until offers.size) {
val execId = offers(i).executorId
val host = offers(i).host
for (task <- taskSet.resourceOffer(execId, host, availableCpus(i), maxLocality)) {
tasks(i) += task
val tid = task.taskId
taskIdToTaskSetId(tid) = taskSet.taskSet.id
taskIdToExecutorId(tid) = execId
activeExecutorIds += execId
executorsByHost(host) += execId
availableCpus(i) -= 1
launchedTask = true
}
}
} while (launchedTask)
}
if (tasks.size > 0) {
hasLaunchedTask = true
}
return tasks
}
def statusUpdate(tid: Long, state: TaskState, serializedData: ByteBuffer) {
var failedExecutor: Option[String] = None
synchronized {
try {
if (state == TaskState.LOST && taskIdToExecutorId.contains(tid)) {
// We lost this entire executor, so remember that it's gone
val execId = taskIdToExecutorId(tid)
if (activeExecutorIds.contains(execId)) {
removeExecutor(execId)
failedExecutor = Some(execId)
}
}
taskIdToTaskSetId.get(tid) match {
case Some(taskSetId) =>
if (TaskState.isFinished(state)) {
taskIdToTaskSetId.remove(tid)
taskIdToExecutorId.remove(tid)
}
activeTaskSets.get(taskSetId).foreach { taskSet =>
if (state == TaskState.FINISHED) {
taskSet.removeRunningTask(tid)
taskResultGetter.enqueueSuccessfulTask(taskSet, tid, serializedData)
} else if (Set(TaskState.FAILED, TaskState.KILLED, TaskState.LOST).contains(state)) {
taskSet.removeRunningTask(tid)
taskResultGetter.enqueueFailedTask(taskSet, tid, state, serializedData)
}
}
case None =>
logError(
("Ignoring update with state %s for TID %s because its task set is gone (this is " +
"likely the result of receiving duplicate task finished status updates)")
.format(state, tid))
}
} catch {
case e: Exception => logError("Exception in statusUpdate", e)
}
}
// Update the DAGScheduler without holding a lock on this, since that can deadlock
if (failedExecutor.isDefined) {
dagScheduler.executorLost(failedExecutor.get)
backend.reviveOffers()
}
}
def handleTaskGettingResult(taskSetManager: TaskSetManager, tid: Long) {
taskSetManager.handleTaskGettingResult(tid)
}
def handleSuccessfulTask(
taskSetManager: TaskSetManager,
tid: Long,
taskResult: DirectTaskResult[_]) = synchronized {
taskSetManager.handleSuccessfulTask(tid, taskResult)
}
def handleFailedTask(
taskSetManager: TaskSetManager,
tid: Long,
taskState: TaskState,
reason: TaskEndReason) = synchronized {
taskSetManager.handleFailedTask(tid, taskState, reason)
if (!taskSetManager.isZombie && taskState != TaskState.KILLED) {
// Need to revive offers again now that the task set manager state has been updated to
// reflect failed tasks that need to be re-run.
backend.reviveOffers()
}
}
def error(message: String) {
synchronized {
if (activeTaskSets.size > 0) {
// Have each task set throw a SparkException with the error
for ((taskSetId, manager) <- activeTaskSets) {
try {
manager.abort(message)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
} else {
// No task sets are active but we still got an error. Just exit since this
// must mean the error is during registration.
// It might be good to do something smarter here in the future.
logError("Exiting due to error from cluster scheduler: " + message)
System.exit(1)
}
}
}
override def stop() {
if (backend != null) {
backend.stop()
}
if (taskResultGetter != null) {
taskResultGetter.stop()
}
// sleeping for an arbitrary 1 seconds to ensure that messages are sent out.
Thread.sleep(1000L)
}
override def defaultParallelism() = backend.defaultParallelism()
// Check for speculatable tasks in all our active jobs.
def checkSpeculatableTasks() {
var shouldRevive = false
synchronized {
shouldRevive = rootPool.checkSpeculatableTasks()
}
if (shouldRevive) {
backend.reviveOffers()
}
}
def executorLost(executorId: String, reason: ExecutorLossReason) {
var failedExecutor: Option[String] = None
synchronized {
if (activeExecutorIds.contains(executorId)) {
val hostPort = executorIdToHost(executorId)
logError("Lost executor %s on %s: %s".format(executorId, hostPort, reason))
removeExecutor(executorId)
failedExecutor = Some(executorId)
} else {
// We may get multiple executorLost() calls with different loss reasons. For example, one
// may be triggered by a dropped connection from the slave while another may be a report
// of executor termination from Mesos. We produce log messages for both so we eventually
// report the termination reason.
logError("Lost an executor " + executorId + " (already removed): " + reason)
}
}
// Call dagScheduler.executorLost without holding the lock on this to prevent deadlock
if (failedExecutor.isDefined) {
dagScheduler.executorLost(failedExecutor.get)
backend.reviveOffers()
}
}
/** Remove an executor from all our data structures and mark it as lost */
private def removeExecutor(executorId: String) {
activeExecutorIds -= executorId
val host = executorIdToHost(executorId)
val execs = executorsByHost.getOrElse(host, new HashSet)
execs -= executorId
if (execs.isEmpty) {
executorsByHost -= host
}
executorIdToHost -= executorId
rootPool.executorLost(executorId, host)
}
def executorGained(execId: String, host: String) {
dagScheduler.executorGained(execId, host)
}
def getExecutorsAliveOnHost(host: String): Option[Set[String]] = synchronized {
executorsByHost.get(host).map(_.toSet)
}
def hasExecutorsAliveOnHost(host: String): Boolean = synchronized {
executorsByHost.contains(host)
}
def isExecutorAlive(execId: String): Boolean = synchronized {
activeExecutorIds.contains(execId)
}
// By default, rack is unknown
def getRackForHost(value: String): Option[String] = None
}
private[spark] object TaskSchedulerImpl {
/**
* Used to balance containers across hosts.
*
* Accepts a map of hosts to resource offers for that host, and returns a prioritized list of
* resource offers representing the order in which the offers should be used. The resource
* offers are ordered such that we'll allocate one container on each host before allocating a
* second container on any host, and so on, in order to reduce the damage if a host fails.
*
* For example, given <h1, [o1, o2, o3]>, <h2, [o4]>, <h1, [o5, o6]>, returns
* [o1, o5, o4, 02, o6, o3]
*/
def prioritizeContainers[K, T] (map: HashMap[K, ArrayBuffer[T]]): List[T] = {
val _keyList = new ArrayBuffer[K](map.size)
_keyList ++= map.keys
// order keyList based on population of value in map
val keyList = _keyList.sortWith(
(left, right) => map(left).size > map(right).size
)
val retval = new ArrayBuffer[T](keyList.size * 2)
var index = 0
var found = true
while (found) {
found = false
for (key <- keyList) {
val containerList: ArrayBuffer[T] = map.get(key).getOrElse(null)
assert(containerList != null)
// Get the index'th entry for this host - if present
if (index < containerList.size){
retval += containerList.apply(index)
found = true
}
}
index += 1
}
retval.toList
}
}
|
iiisthu/sparkSdn
|
core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
|
Scala
|
apache-2.0
| 16,827
|
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.internal
import java.util.concurrent.ConcurrentLinkedQueue
import org.mongodb.scala.{Observable, Observer, Subscription}
private[scala] case class ZipObservable[T, U](
observable1: Observable[T],
observable2: Observable[U]
) extends Observable[(T, U)] {
def subscribe(observer: Observer[_ >: (T, U)]): Unit = {
val helper = SubscriptionHelper(observer)
observable1.subscribe(SubscriptionCheckingObserver(helper.createFirstObserver))
observable2.subscribe(SubscriptionCheckingObserver(helper.createSecondObserver))
}
case class SubscriptionHelper(observer: Observer[_ >: (T, U)]) {
private val thisQueue: ConcurrentLinkedQueue[(Long, T)] = new ConcurrentLinkedQueue[(Long, T)]()
private val thatQueue: ConcurrentLinkedQueue[(Long, U)] = new ConcurrentLinkedQueue[(Long, U)]()
@volatile private var terminated: Boolean = false
@volatile private var observable1Subscription: Option[Subscription] = None
@volatile private var observable2Subscription: Option[Subscription] = None
def createFirstObserver: Observer[T] = createSubObserver[T](thisQueue, observer, firstSub = true)
def createSecondObserver: Observer[U] = createSubObserver[U](thatQueue, observer, firstSub = false)
private def createSubObserver[A](queue: ConcurrentLinkedQueue[(Long, A)], observer: Observer[_ >: (T, U)], firstSub: Boolean): Observer[A] = {
new Observer[A] {
@volatile private var counter: Long = 0
override def onError(throwable: Throwable): Unit = {
terminated = true
observer.onError(throwable)
}
override def onSubscribe(subscription: Subscription): Unit = {
if (firstSub) {
observable1Subscription = Some(subscription)
} else {
observable2Subscription = Some(subscription)
}
if (observable1Subscription.nonEmpty && observable2Subscription.nonEmpty) {
observer.onSubscribe(jointSubscription)
}
}
override def onComplete(): Unit = {
if (!firstSub) {
terminated = true
observer.onComplete()
}
}
override def onNext(tResult: A): Unit = {
counter += 1
queue.add((counter, tResult))
if (!firstSub) processNext(observer)
}
}
}
private def processNext(observer: Observer[_ >: (T, U)]): Unit = {
(thisQueue.peek, thatQueue.peek) match {
case ((k1: Long, _), (k2: Long, _)) if k1 == k2 => observer.onNext((thisQueue.poll()._2, thatQueue.poll()._2))
case _ => if (!terminated && !jointSubscription.isUnsubscribed) jointSubscription.request(1) // Uneven queues request more data
// from downstream so to honor the original request for data.
}
}
private val jointSubscription: Subscription = new Subscription() {
var subscribed: Boolean = true
override def isUnsubscribed: Boolean = !subscribed
override def request(n: Long): Unit = {
observable1Subscription.foreach(_.request(n))
observable2Subscription.foreach(_.request(n))
}
override def unsubscribe(): Unit = {
subscribed = false
observable1Subscription.foreach(_.unsubscribe())
observable2Subscription.foreach(_.unsubscribe())
}
}
}
}
|
rozza/mongo-scala-driver
|
driver/src/main/scala/org/mongodb/scala/internal/ZipObservable.scala
|
Scala
|
apache-2.0
| 3,962
|
/*
Copyright (C) 2013-2019 Expedia Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hotels.styx.proxy.https
import com.github.tomakehurst.wiremock.client.WireMock._
import com.hotels.styx.api.HttpHeaderNames.{X_FORWARDED_PROTO, _}
import com.hotels.styx.api.HttpRequest
import com.hotels.styx.api.HttpResponseStatus.OK
import com.hotels.styx.infrastructure.HttpResponseImplicits
import com.hotels.styx.support.ResourcePaths.fixturesHome
import com.hotels.styx.support.backends.FakeHttpServer
import com.hotels.styx.support.configuration._
import com.hotels.styx.{SSLSetup, StyxClientSupplier, StyxProxySpec}
import org.scalatest.{FunSpec, Matchers}
class HttpsSpec extends FunSpec
with StyxProxySpec
with HttpResponseImplicits
with StyxClientSupplier
with Matchers
with SSLSetup {
val crtFile = fixturesHome(this.getClass, "/ssl/testCredentials.crt").toString
val keyFile = fixturesHome(this.getClass, "/ssl/testCredentials.key").toString
override val styxConfig = StyxConfig(
ProxyConfig(
Connectors(
HttpConnectorConfig(),
HttpsConnectorConfig(
cipherSuites = Seq("TLS_RSA_WITH_AES_128_GCM_SHA256"),
certificateFile = crtFile,
certificateKeyFile = keyFile))
)
)
val recordingBackend = FakeHttpServer.HttpsStartupConfig().start()
override protected def afterAll() = {
recordingBackend.stop()
super.afterAll()
}
describe("Terminating https") {
recordingBackend.stub(urlPathEqualTo("/secure"), aResponse.withStatus(200))
it("should set the X-Forward-Proto header for https request") {
styxServer.setBackends("/secure" -> HttpsBackend(
"https-app", Origins(recordingBackend), TlsSettings()
))
val req = HttpRequest.get("/secure")
.header(HOST, styxServer.httpsProxyHost)
.build()
val resp = decodedRequest(req, secure = true)
resp.status() should be(OK)
recordingBackend.verify(getRequestedFor(urlPathEqualTo("/secure"))
.withHeader(X_FORWARDED_PROTO.toString, equalTo("https"))
.withHeader("Host", equalTo(styxServer.httpsProxyHost))
)
}
}
}
|
mikkokar/styx
|
system-tests/e2e-suite/src/test/scala/com/hotels/styx/proxy/https/HttpsSpec.scala
|
Scala
|
apache-2.0
| 2,656
|
package org.jetbrains.plugins.scala
package decompiler
import java.io.IOException
import com.intellij.lang.LanguageParserDefinitions
import com.intellij.openapi.project.{DefaultProjectFactory, Project, ProjectManager}
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.compiled.ClsStubBuilder
import com.intellij.psi.stubs.{PsiFileStub, PsiFileStubImpl}
import com.intellij.psi.tree.IStubFileElementType
import com.intellij.psi.{PsiFile, PsiManager}
import com.intellij.util.indexing.FileContent
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.StubVersion
import scala.annotation.tailrec
import scala.reflect.NameTransformer
/**
* @author ilyas
*/
object ScClsStubBuilder {
def canBeProcessed(file: VirtualFile): Boolean = {
try {
canBeProcessed(file, file.contentsToByteArray())
} catch {
case ex: IOException => false
case u: UnsupportedOperationException => false //why we need to handle this?
}
}
private def canBeProcessed(file: VirtualFile, bytes: => Array[Byte]): Boolean = {
if (DecompilerUtil.isScalaFile(file, bytes)) return true
val fileName: String = file.getNameWithoutExtension
val parent = file.getParent
def split(str: String): Option[(String, String)] = {
val index = str.indexOf('$')
if (index == -1) None
else Some(str.substring(0, index), str.substring(index + 1, str.length))
}
@tailrec
def go(prefix: String, suffix: String): Boolean = {
if (!prefix.endsWith("$")) {
val child = parent.findChild(prefix + ".class")
if (child != null && DecompilerUtil.isScalaFile(child)) return true
}
split(suffix) match {
case Some((suffixPrefix, suffixSuffix)) => go(prefix + "$" + suffixPrefix, suffixSuffix)
case _ => false
}
}
split(fileName) match {
case Some((prefix, suffix)) => go(prefix, suffix)
case _ => false
}
}
}
class ScClsStubBuilder extends ClsStubBuilder {
override def getStubVersion: Int = StubVersion.STUB_VERSION
override def buildFileStub(content: FileContent): PsiFileStub[ScalaFile] = {
if (isInnerClass(content.getFile)) null
else buildFileStub(content.getFile, content.getContent, ProjectManager.getInstance().getDefaultProject)
}
private def buildFileStub(vFile: VirtualFile, bytes: Array[Byte], project: Project): PsiFileStub[ScalaFile] = {
val result = DecompilerUtil.decompile(vFile, bytes)
val source = result.sourceName
val text = result.sourceText
val file = ScalaPsiElementFactory.createScalaFile(text.replace("\\r", ""),
PsiManager.getInstance(DefaultProjectFactory.getInstance().getDefaultProject))
val adj = file.asInstanceOf[CompiledFileAdjuster]
adj.setCompiled(c = true)
adj.setSourceFileName(source)
adj.setVirtualFile(vFile)
val fType = LanguageParserDefinitions.INSTANCE.forLanguage(ScalaFileType.SCALA_LANGUAGE).getFileNodeType
val stub = fType.asInstanceOf[IStubFileElementType[PsiFileStub[PsiFile]]].getBuilder.buildStubTree(file)
stub.asInstanceOf[PsiFileStubImpl[PsiFile]].clearPsi("Stub was built from decompiled file")
stub.asInstanceOf[PsiFileStub[ScalaFile]]
}
private def isInnerClass(file: VirtualFile): Boolean = {
if (file.getExtension != "class") return false
val name: String = file.getNameWithoutExtension
val parent: VirtualFile = file.getParent
isInner(name, new ParentDirectory(parent))
}
private def isInner(name: String, directory: Directory): Boolean = {
if (name.endsWith("$") && directory.contains(name.dropRight(1))) {
return false //let's handle it separately to avoid giving it for Java.
}
isInner(NameTransformer.decode(name), 0, directory)
}
@tailrec
private def isInner(name: String, from: Int, directory: Directory): Boolean = {
val index: Int = name.indexOf('$', from)
index != -1 && (containsPart(directory, name, index) || isInner(name, index + 1, directory))
}
private def containsPart(directory: Directory, name: String, endIndex: Int): Boolean = {
endIndex > 0 && directory.contains(name.substring(0, endIndex))
}
private trait Directory {
def contains(name: String): Boolean
}
private class ParentDirectory(dir: VirtualFile) extends Directory {
def contains(name: String): Boolean = {
if (dir == null) return false
!dir.getChildren.forall(child =>
child.getExtension != "class" || NameTransformer.decode(child.getNameWithoutExtension) == name
)
}
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/decompiler/ScClsStubBuilder.scala
|
Scala
|
apache-2.0
| 4,664
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.ahc
import scala.util.control.NonFatal
import io.gatling.core.CoreComponents
import io.gatling.core.session._
import io.gatling.core.util.NameGen
import io.gatling.http.HeaderNames._
import io.gatling.http.HeaderValues._
import io.gatling.http.fetch.ResourceFetcher
import io.gatling.http.protocol.{ HttpComponents, HttpProtocol }
import io.gatling.http.request.builder.Http
import io.gatling.http.resolver.DelegatingNameResolver
import io.gatling.http.util.HttpTypeHelper
import akka.actor.ActorSystem
import com.typesafe.scalalogging.StrictLogging
import io.netty.resolver.dns.DefaultDnsCache
import org.asynchttpclient.{ AsyncHttpClient, RequestBuilder }
object HttpEngine {
val AhcAttributeName = SessionPrivateAttributes.PrivateAttributePrefix + "http.ahc"
def apply(system: ActorSystem, coreComponents: CoreComponents): HttpEngine =
new HttpEngine(system, coreComponents, AhcFactory(system, coreComponents))
}
class HttpEngine(
system: ActorSystem,
protected val coreComponents: CoreComponents,
ahcFactory: AhcFactory
)
extends ResourceFetcher with NameGen with StrictLogging {
private[this] lazy val dnsResolver = ahcFactory.newNameResolver()
def newDnsResolver: DelegatingNameResolver = DelegatingNameResolver(dnsResolver, new DefaultDnsCache)
def httpClient(session: Session, httpProtocol: HttpProtocol): (Session, AsyncHttpClient) =
if (httpProtocol.enginePart.shareClient) {
(session, ahcFactory.defaultAhc)
} else {
// import optimized TypeCaster
import HttpTypeHelper._
session(HttpEngine.AhcAttributeName).asOption[AsyncHttpClient] match {
case Some(client) => (session, client)
case _ =>
val httpClient = ahcFactory.newAhc(session)
(session.set(HttpEngine.AhcAttributeName, httpClient), httpClient)
}
}
private[this] var warmedUp = false
def warmpUp(httpComponents: HttpComponents): Unit =
if (!warmedUp) {
logger.info("Start warm up")
warmedUp = true
import httpComponents._
if (httpProtocol.enginePart.perUserNameResolution) {
// eager load
val unused = dnsResolver
}
httpProtocol.warmUpUrl match {
case Some(url) =>
val requestBuilder = new RequestBuilder().setUrl(url)
.setHeader(Accept, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
.setHeader(AcceptLanguage, "en-US,en;q=0.5")
.setHeader(AcceptEncoding, "gzip")
.setHeader(Connection, KeepAlive)
.setHeader(UserAgent, "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20100101 Firefox/16.0")
.setRequestTimeout(1000)
httpProtocol.proxyPart.proxy.foreach(requestBuilder.setProxyServer)
try {
ahcFactory.defaultAhc.executeRequest(requestBuilder.build).get
} catch {
case NonFatal(e) => logger.info(s"Couldn't execute warm up request $url", e)
}
case _ =>
val expression = "foo".expressionSuccess
implicit val protocol = this
new Http(expression)
.get(expression)
.header("bar", expression)
.queryParam(expression, expression)
.build(coreComponents, httpComponents, throttled = false)
new Http(expression)
.post(expression)
.header("bar", expression)
.formParam(expression, expression)
.build(coreComponents, httpComponents, throttled = false)
}
logger.info("Warm up done")
}
}
|
timve/gatling
|
gatling-http/src/main/scala/io/gatling/http/ahc/HttpEngine.scala
|
Scala
|
apache-2.0
| 4,258
|
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.JsonSubTypes.Type
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonSubTypes}
import com.twitter.conversions.time._
import com.twitter.finagle.Stack
import com.twitter.finagle.loadbalancer.LoadBalancerFactory.EnableProbation
import com.twitter.finagle.loadbalancer.{Balancers, LoadBalancerFactory}
import io.buoyant.config.PolymorphicConfig
@JsonSubTypes(Array(
new Type(value = classOf[P2C], name = "p2c"),
new Type(value = classOf[P2CEwma], name = "ewma"),
new Type(value = classOf[Aperture], name = "aperture"),
new Type(value = classOf[Heap], name = "heap"),
new Type(value = classOf[RoundRobin], name = "roundRobin")
))
abstract class LoadBalancerConfig extends PolymorphicConfig {
val factory: LoadBalancerFactory
val enableProbation: Option[Boolean] = None
@JsonIgnore
def clientParams = Stack.Params.empty + LoadBalancerFactory.Param(factory) +
LoadBalancerFactory.EnableProbation(enableProbation.getOrElse(false))
}
case class P2C(maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.p2c(maxEffort.getOrElse(Balancers.MaxEffort))
}
case class P2CEwma(decayTimeMs: Option[Int], maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.p2cPeakEwma(
decayTime = decayTimeMs.map(_.millis).getOrElse(10.seconds),
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort)
)
}
case class Aperture(
smoothWindowMs: Option[Int],
maxEffort: Option[Int],
lowLoad: Option[Double],
highLoad: Option[Double],
minAperture: Option[Int]
) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.aperture(
smoothWin = smoothWindowMs.map(_.millis).getOrElse(5.seconds),
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort),
lowLoad = lowLoad.getOrElse(0.5),
highLoad = highLoad.getOrElse(2.0),
minAperture = minAperture.getOrElse(1)
)
}
class Heap extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.heap()
}
case class RoundRobin(maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.roundRobin(
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort)
)
}
|
hhtpcd/linkerd
|
linkerd/core/src/main/scala/io/buoyant/linkerd/LoadBalancerConfig.scala
|
Scala
|
apache-2.0
| 2,237
|
package pureconfig
import com.typesafe.config._
import pureconfig.error.{CannotConvert, KeyNotFound, WrongType}
class ConfigCursorSuite extends BaseSuite {
val defaultPath = List("key2", "key1")
val defaultPathStr = "key1.key2"
def conf(confStr: String): ConfigValue = {
ConfigFactory.parseString(s"aux = $confStr").root.get("aux")
}
def cursor(confStr: String, pathElems: List[String] = defaultPath): ConfigCursor =
ConfigCursor(conf(confStr), pathElems)
behavior of "ConfigCursor"
it should "provide a correct path string" in {
cursor("abc").path shouldBe defaultPathStr
cursor("abc", Nil).path shouldBe ""
}
it should "allow being casted to string in a safe way" in {
cursor("abc").asString shouldBe Right("abc")
cursor("4").asString shouldBe Right("4")
cursor("true").asString shouldBe Right("true")
cursor("null").asString should failWith(
WrongType(ConfigValueType.NULL, Set(ConfigValueType.STRING)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("[1, 2]").asString should failWith(
WrongType(ConfigValueType.LIST, Set(ConfigValueType.STRING)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("{ a: 1, b: 2 }").asString should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.STRING)),
defaultPathStr,
stringConfigOrigin(1)
)
}
it should "allow being cast to boolean in a safe way" in {
cursor("true").asBoolean shouldBe Right(true)
cursor("false").asBoolean shouldBe Right(false)
cursor("abc").asBoolean should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.BOOLEAN)))
cursor("1").asBoolean should failWith(WrongType(ConfigValueType.NUMBER, Set(ConfigValueType.BOOLEAN)))
cursor("TRUE").asBoolean should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.BOOLEAN)))
}
it should "allow being cast to long in a safe way" in {
cursor("3").asLong shouldBe Right(3L)
cursor("abc").asLong should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)))
cursor("true").asLong should failWith(WrongType(ConfigValueType.BOOLEAN, Set(ConfigValueType.NUMBER)))
cursor("1.1").asLong should failWith(CannotConvert("1.1", "Long", "Unable to convert Number to Long"))
}
it should "allow being cast to int in a safe way" in {
cursor("3").asInt shouldBe Right(3)
cursor("abc").asInt should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)))
cursor("true").asInt should failWith(WrongType(ConfigValueType.BOOLEAN, Set(ConfigValueType.NUMBER)))
cursor("1.1").asInt should failWith(CannotConvert("1.1", "Int", "Unable to convert Number to Int"))
}
it should "allow being cast to short in a safe way" in {
cursor("3").asShort shouldBe Right(3)
cursor("abc").asShort should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)))
cursor("true").asShort should failWith(WrongType(ConfigValueType.BOOLEAN, Set(ConfigValueType.NUMBER)))
cursor("1.1").asShort should failWith(CannotConvert("1.1", "Short", "Unable to convert Number to Short"))
}
it should "allow being cast to double in a safe way" in {
cursor("3").asDouble shouldBe Right(3.0)
cursor("3.1").asDouble shouldBe Right(3.1)
cursor("21412415121234567L").asDouble should failWith(
WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER))
)
cursor("abc").asDouble should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)))
cursor("true").asDouble should failWith(WrongType(ConfigValueType.BOOLEAN, Set(ConfigValueType.NUMBER)))
}
it should "allow being cast to float in a safe way" in {
cursor("3").asFloat shouldBe Right(3.0)
cursor("1.1").asFloat shouldBe Right(1.1f)
cursor("abc").asFloat should failWith(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)))
cursor("true").asFloat should failWith(WrongType(ConfigValueType.BOOLEAN, Set(ConfigValueType.NUMBER)))
}
it should "allow being casted to a list cursor in a safe way" in {
cursor("abc").asListCursor should failWith(
WrongType(ConfigValueType.STRING, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("[1, 2]").asListCursor shouldBe
Right(ConfigListCursor(conf("[1, 2]").asInstanceOf[ConfigList], defaultPath))
cursor("{ a: 1, b: 2 }").asListCursor should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("{ 0: a, 1: b }").asListCursor shouldBe
Right(ConfigListCursor(conf("""["a", "b"]""").asInstanceOf[ConfigList], defaultPath))
cursor("{ 10: a, 3: b }").asListCursor shouldBe
Right(ConfigListCursor(conf("""["b", "a"]""").asInstanceOf[ConfigList], defaultPath))
cursor("{ 1: a, c: b }").asListCursor shouldBe
Right(ConfigListCursor(conf("""["a"]""").asInstanceOf[ConfigList], defaultPath))
cursor("{}").asListCursor should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
}
it should "allow being casted to a list of cursors in a safe way" in {
cursor("abc").asList should failWith(
WrongType(ConfigValueType.STRING, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("[1, 2]").asList shouldBe
Right(List(cursor("1", "0" :: defaultPath), cursor("2", "1" :: defaultPath)))
cursor("{ a: 1, b: 2 }").asList should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("{ 3: a, 10: b }").asList shouldBe
Right(List(cursor("a", "0" :: defaultPath), cursor("b", "1" :: defaultPath)))
cursor("{ 1: a, c: b }").asList shouldBe
Right(List(cursor("a", "0" :: defaultPath)))
cursor("{}").asList should failWith(
WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)),
defaultPathStr,
stringConfigOrigin(1)
)
}
it should "allow being casted to an object cursor in a safe way" in {
cursor("abc").asObjectCursor should failWith(
WrongType(ConfigValueType.STRING, Set(ConfigValueType.OBJECT)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("[1, 2]").asObjectCursor should failWith(
WrongType(ConfigValueType.LIST, Set(ConfigValueType.OBJECT)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("{ a: 1, b: 2 }").asObjectCursor shouldBe
Right(ConfigObjectCursor(conf("{ a: 1, b: 2 }").asInstanceOf[ConfigObject], defaultPath))
}
it should "allow being casted to a map of cursors in a safe way" in {
cursor("abc").asMap should failWith(
WrongType(ConfigValueType.STRING, Set(ConfigValueType.OBJECT)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("[1, 2]").asMap should failWith(
WrongType(ConfigValueType.LIST, Set(ConfigValueType.OBJECT)),
defaultPathStr,
stringConfigOrigin(1)
)
cursor("{ a: 1, b: 2 }").asMap shouldBe
Right(Map("a" -> cursor("1", "a" :: defaultPath), "b" -> cursor("2", "b" :: defaultPath)))
}
it should "handle in a safe way cursors to undefined values" in {
val cur = ConfigCursor(None, defaultPath)
cur.path shouldBe defaultPathStr
cur.isUndefined shouldBe true
cur.isNull shouldBe false
cur.asString should failWithReason[KeyNotFound]
cur.asListCursor should failWithReason[KeyNotFound]
cur.asList should failWithReason[KeyNotFound]
cur.asObjectCursor should failWithReason[KeyNotFound]
cur.asMap should failWithReason[KeyNotFound]
}
behavior of "ConfigListCursor"
def listCursor(confStr: String, pathElems: List[String] = defaultPath): ConfigListCursor =
cursor(confStr, pathElems).asListCursor.right.get
it should "have correct isEmpty and size methods" in {
listCursor("[1, 2]").isEmpty shouldBe false
listCursor("[]").isEmpty shouldBe true
listCursor("[1, 2]").size shouldBe 2
listCursor("[]").size shouldBe 0
}
it should "allow access to a given index in a safe way" in {
listCursor("[1, 2]").atIndex(0) shouldBe Right(cursor("1", "0" :: defaultPath))
listCursor("[1, 2]").atIndex(1) shouldBe Right(cursor("2", "1" :: defaultPath))
listCursor("[1, 2]").atIndex(2) should failWith(KeyNotFound("2", Set()), defaultPathStr, stringConfigOrigin(1))
}
it should "allow access to a given index returning an undefined value cursor on out-of-range indices" in {
listCursor("[1, 2]").atIndexOrUndefined(0) shouldBe cursor("1", "0" :: defaultPath)
listCursor("[1, 2]").atIndexOrUndefined(1) shouldBe cursor("2", "1" :: defaultPath)
listCursor("[1, 2]").atIndexOrUndefined(2).isUndefined shouldBe true
}
it should "provide a tailOption method that keeps the absolute paths correct" in {
listCursor("[1, 2]").tailOption shouldBe Some(listCursor("[2]").copy(offset = 1))
listCursor("[1, 2]").tailOption.get.atIndex(0) shouldBe Right(cursor("2", "1" :: defaultPath))
listCursor("[1, 2]").tailOption.get
.atIndex(1) should failWith(KeyNotFound("2", Set()), defaultPathStr, stringConfigOrigin(1))
listCursor("[]").tailOption shouldBe None
}
it should "provide a direct conversion to a list of cursors" in {
listCursor("[1, 2]").list shouldBe List(cursor("1", "0" :: defaultPath), cursor("2", "1" :: defaultPath))
listCursor("[]").list shouldBe Nil
}
it should "retain the correct offset after calling the asListCursor method" in {
listCursor("[1, 2]").tailOption.map(_.asListCursor) shouldBe (Some(Right(listCursor("[2]").copy(offset = 1))))
}
behavior of "ConfigObjectCursor"
def objCursor(confStr: String, pathElems: List[String] = defaultPath): ConfigObjectCursor =
cursor(confStr, pathElems).asObjectCursor.right.get
it should "have correct isEmpty and size methods" in {
objCursor("{ a: 1, b: 2 }").isEmpty shouldBe false
objCursor("{}").isEmpty shouldBe true
objCursor("{ a: 1, b: 2 }").size shouldBe 2
objCursor("{}").size shouldBe 0
}
it should "provide the list of keys in the object" in {
objCursor("{ a: 1, b: 2 }").keys.toSet shouldBe Set("a", "b")
objCursor("{}").keys.toSet shouldBe Set.empty
}
it should "allow access to a given key in a safe way" in {
objCursor("{ a: 1, b: 2 }").atKey("a") shouldBe Right(cursor("1", "a" :: defaultPath))
objCursor("{ a: 1, b: 2 }")
.atKey("c") should failWith(KeyNotFound("c", Set()), defaultPathStr, stringConfigOrigin(1))
}
it should "allow access to a given key returning an undefined value cursor on non-existing keys" in {
objCursor("{ a: 1, b: 2 }").atKeyOrUndefined("a") shouldBe cursor("1", "a" :: defaultPath)
objCursor("{ a: 1, b: 2 }").atKeyOrUndefined("c").isUndefined shouldBe true
}
it should "provide a correct withoutKey method" in {
objCursor("{ a: 1, b: 2 }").withoutKey("a") shouldBe objCursor("{ b: 2 }")
objCursor("{ a: 1, b: 2 }").withoutKey("c") shouldBe objCursor("{ a: 1, b: 2 }")
}
it should "provide a direct conversion to a map of cursors" in {
objCursor("{ a: 1, b: 2 }").map shouldBe Map(
"a" -> cursor("1", "a" :: defaultPath),
"b" -> cursor("2", "b" :: defaultPath)
)
objCursor("{}").map shouldBe Map.empty
}
}
|
pureconfig/pureconfig
|
tests/src/test/scala/pureconfig/ConfigCursorSuite.scala
|
Scala
|
mpl-2.0
| 11,430
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.mongo
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import reactivemongo.api.commands.Command
import reactivemongo.api.indexes.{Index, IndexType}
import reactivemongo.api.{BSONSerializationPack, FailoverStrategy, ReadPreference}
import reactivemongo.bson.{BSONBoolean, BSONDocument, BSONValue}
class FailOnUnindexedQueriesSpec
extends AnyWordSpec
with Matchers
with FailOnUnindexedQueries
with MongoSpecSupport
with ScalaFutures
with IntegrationPatience {
import scala.concurrent.ExecutionContext.Implicits.global
"FailOnUnindexedQueries" should {
"cause an exception be thrown when a query on unindexed property is performed" in {
testCollection.insert(ordered = false).one(BSONDocument("unidexed" -> "value")).futureValue
// we shouldn't get a match error on extracting the result
val NotableScanError(_) = testCollection.find(BSONDocument("unidexed" -> "value"), projection = None).one.failed.futureValue
}
"cause no exception be thrown when a query on indexed property is performed" in {
testCollection.indexesManager.create(Index(Seq("indexed" -> IndexType.Ascending))).futureValue
testCollection.insert(ordered = false).one(BSONDocument("indexed" -> "value")).futureValue
testCollection.find(BSONDocument("indexed" -> "value"), projection = None).one.futureValue should not be empty
}
}
"beforeAll" should {
"set parameter 'notablescan' on 'admin' database" in new ReadNotableScanValue {
beforeAll()
maybeNotableScanValue shouldBe Some(true)
}
}
"afterAll" should {
"unset parameter 'notablescan' on 'admin' database" in new ReadNotableScanValue {
beforeAll()
afterAll()
maybeNotableScanValue shouldBe Some(false)
}
}
private trait ReadNotableScanValue {
private val runner = Command.run(BSONSerializationPack, FailoverStrategy())
def maybeNotableScanValue: Option[Boolean] =
(for {
adminDb <- mongo().connection.database("admin")
maybeNotableScanValue <- runner(
adminDb,
runner.rawCommand(BSONDocument("getParameter" -> 1, "notablescan" -> 1)))
.one[BSONDocument](ReadPreference.primaryPreferred)
.map(_.get("notablescan"))
.map(_.map(toBoolean))
} yield maybeNotableScanValue
).futureValue
private val toBoolean: BSONValue => Boolean = {
case BSONBoolean(boolean) => boolean
case other => throw new IllegalStateException(s"Boolean expected but got $other")
}
}
private lazy val collectionName = "test-collection"
private lazy val testCollection = bsonCollection(collectionName)()
override protected def afterAll(): Unit = {
super.afterAll()
dropTestCollection(collectionName)
}
}
|
hmrc/reactivemongo-test
|
src/test/scala/uk/gov/hmrc/mongo/FailOnUnindexedQueriesSpec.scala
|
Scala
|
apache-2.0
| 3,654
|
package ch.epfl.bigdata.btc.crawler.btc
import org.apache.http.client.fluent._;
object RequestFactory {
var mock: Request = _
def Get(uri: String) : Request = mock match {
case _ : Request => mock
case _ => Request.Get(uri)
}
def Post(uri: String) : Request = mock match {
case _ : Request => mock
case _ => Request.Post(uri)
}
def setInstance(inst: Request) = mock = inst
}
|
cheseaux/BitcoinTradingSystem
|
crawl-framework/src/main/scala/ch/epfl/bigdata/btc/crawler/btc/RequestFactory.scala
|
Scala
|
gpl-2.0
| 402
|
package example.gql_server
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter
import java.util.Date
import sangria.ast.StringValue
import sangria.marshalling.DateSupport
import sangria.schema._
import sangria.validation.ValueCoercionViolation
import scala.util.{Failure, Success, Try}
package object schema {
val Id = Argument("id", IntType)
case object ZonedDateTimeCoercionViolation
extends ValueCoercionViolation("ZonedDateTime value expected")
def parseZonedDateTime(s: String) =
Try(ZonedDateTime.parse(s)) match {
case Success(date) => Right(date)
case Failure(_) => Left(ZonedDateTimeCoercionViolation)
}
@SuppressWarnings(Array("org.wartremover.warts.Any"))
implicit val ZonedDateTimeType = ScalarType[ZonedDateTime](
"ZonedDateTime",
coerceOutput = (d, caps) =>
if (caps.contains(DateSupport)) Date.from(d.toInstant)
else DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXX'['VV']'").format(d),
coerceUserInput = {
case s: String => parseZonedDateTime(s)
case _ => Left(ZonedDateTimeCoercionViolation)
},
coerceInput = {
case StringValue(s, _, _, _, _) => parseZonedDateTime(s)
case _ => Left(ZonedDateTimeCoercionViolation)
}
)
}
|
t-mochizuki/scala-study
|
circleci-example/gql-server/src/main/scala/example/gql_server/schema/package.scala
|
Scala
|
mit
| 1,276
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import java.io.{IOException, ObjectInputStream}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path, PathFilter}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark.rdd.{RDD, UnionRDD}
import org.apache.spark.streaming._
import org.apache.spark.streaming.event.Event
import org.apache.spark.streaming.scheduler.StreamInputInfo
import org.apache.spark.util.{SerializableConfiguration, TimeStampedHashMap, Utils}
import scala.collection.mutable
import scala.reflect.ClassTag
/**
* This class represents an input stream that monitors a Hadoop-compatible filesystem for new
* files and creates a stream out of them. The way it works as follows.
*
* At each batch interval, the file system is queried for files in the given directory and
* detected new files are selected for that batch. In this case "new" means files that
* became visible to readers during that time period. Some extra care is needed to deal
* with the fact that files may become visible after they are created. For this purpose, this
* class remembers the information about the files selected in past batches for
* a certain duration (say, "remember window") as shown in the figure below.
*
* {{{
* |<----- remember window ----->|
* ignore threshold --->| |<--- current batch time
* |____.____.____.____.____.____|
* | | | | | | |
* ---------------------|----|----|----|----|----|----|-----------------------> Time
* |____|____|____|____|____|____|
* remembered batches
* }}}
*
* The trailing end of the window is the "ignore threshold" and all files whose mod times
* are less than this threshold are assumed to have already been selected and are therefore
* ignored. Files whose mod times are within the "remember window" are checked against files
* that have already been selected. At a high level, this is how new files are identified in
* each batch - files whose mod times are greater than the ignore threshold and
* have not been considered within the remember window. See the documentation on the method
* `isNewFile` for more details.
*
* This makes some assumptions from the underlying file system that the system is monitoring.
*
* - The clock of the file system is assumed to synchronized with the clock of the machine running
* the streaming app.
* - If a file is to be visible in the directory listings, it must be visible within a certain
* duration of the mod time of the file. This duration is the "remember window", which is set to
* 1 minute (see `FileInputDStream.minRememberDuration`). Otherwise, the file will never be
* selected as the mod time will be less than the ignore threshold when it becomes visible.
* - Once a file is visible, the mod time cannot change. If it does due to appends, then the
* processing semantics are undefined.
*/
private[streaming]
class FileInputDStream[K, V, F <: NewInputFormat[K, V]](
_ssc: StreamingContext,
directory: String,
filter: Path => Boolean = FileInputDStream.defaultFilter,
newFilesOnly: Boolean = true,
conf: Option[Configuration] = None)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F])
extends InputDStream[(K, V)](_ssc) {
private val serializableConfOpt = conf.map(new SerializableConfiguration(_))
/**
* Minimum duration of remembering the information of selected files. Defaults to 60 seconds.
*
* Files with mod times older than this "window" of remembering will be ignored. So if new
* files are visible within this window, then the file will get selected in the next batch.
*/
private val minRememberDurationS = {
Seconds(ssc.conf.getTimeAsSeconds("spark.streaming.fileStream.minRememberDuration",
ssc.conf.get("spark.streaming.minRememberDuration", "60s")))
}
// This is a def so that it works during checkpoint recovery:
private def clock = ssc.scheduler.clock
// Data to be saved as part of the streaming checkpoints
protected[streaming] override val checkpointData = new FileInputDStreamCheckpointData
// Initial ignore threshold based on which old, existing files in the directory (at the time of
// starting the streaming application) will be ignored or considered
private val initialModTimeIgnoreThreshold = if (newFilesOnly) clock.getTimeMillis() else 0L
/*
* Make sure that the information of files selected in the last few batches are remembered.
* This would allow us to filter away not-too-old files which have already been recently
* selected and processed.
*/
private val numBatchesToRemember = FileInputDStream
.calculateNumBatchesToRemember(slideDuration, minRememberDurationS)
private val durationToRemember = slideDuration * numBatchesToRemember
remember(durationToRemember)
// Map of batch-time to selected file info for the remembered batches
// This is a concurrent map because it's also accessed in unit tests
@transient private[streaming] var batchEventToSelectedFiles =
new mutable.HashMap[Event, Array[String]]
// Set of files that were selected in the remembered batches
@transient private var recentlySelectedFiles = new mutable.HashSet[String]()
// Read-through cache of file mod times, used to speed up mod time lookups
@transient private var fileToModTime = new TimeStampedHashMap[String, Long](true)
// Timestamp of the last round of finding files
@transient private var lastNewFileFindingTime = 0L
@transient private var _path: Path = null
@transient private var _fs: FileSystem = null
override def start() { }
override def stop() { }
/**
* Finds the files that were modified since the last time this method was called and makes
* a union RDD out of them. Note that this maintains the list of files that were processed
* in the latest modification time in the previous call to this method. This is because the
* modification time returned by the FileStatus API seems to return times only at the
* granularity of seconds. And new files may have the same modification time as the
* latest modification time in the previous call to this method yet was not reported in
* the previous call.
*/
override def compute(event: Event): Option[RDD[(K, V)]] = {
// Find new files
val newFiles = findNewFiles(event.time.milliseconds)
logInfo("New files at time " + event + ":\n" + newFiles.mkString("\n"))
batchEventToSelectedFiles.synchronized {
batchEventToSelectedFiles += ((event, newFiles))
}
recentlySelectedFiles ++= newFiles
val rdds = Some(filesToRDD(newFiles))
// Copy newFiles to immutable.List to prevent from being modified by the user
val metadata = Map(
"files" -> newFiles.toList,
StreamInputInfo.METADATA_KEY_DESCRIPTION -> newFiles.mkString("\n"))
val inputInfo = StreamInputInfo(id, 0, metadata)
ssc.scheduler.inputInfoTracker.reportInfo(event, inputInfo)
rdds
}
/** Clear the old time-to-files mappings along with old RDDs */
protected[streaming] override def clearMetadata(event: Event) {
batchEventToSelectedFiles.synchronized {
val generatedEventsCut = generatedEvents.to(event)
val rddsToRememberCount = numberOfRDDsToRemember(generatedEventsCut)
val oldEvents = generatedEventsCut.take(generatedEventsCut.size - rddsToRememberCount)
val oldFiles = batchEventToSelectedFiles.filterKeys(oldEvents.contains)
batchEventToSelectedFiles --= oldEvents
recentlySelectedFiles --= oldFiles.values.flatten
logInfo("Cleared " + oldFiles.size + " old files that were " +
rememberExtent + " old: " + oldFiles.keys.mkString(", "))
logDebug("Cleared files are:\n" +
oldFiles.map(p => (p._1, p._2.mkString(", "))).mkString("\n"))
}
// Delete file mod times that weren't accessed in the last round of getting new files
fileToModTime.clearOldValues(lastNewFileFindingTime - 1)
}
/**
* Find new files for the batch of `currentTime`. This is done by first calculating the
* ignore threshold for file mod times, and then getting a list of files filtered based on
* the current batch time and the ignore threshold. The ignore threshold is the max of
* initial ignore threshold and the trailing end of the remember window (that is, which ever
* is later in time).
*/
private def findNewFiles(currentTime: Long): Array[String] = {
try {
lastNewFileFindingTime = clock.getTimeMillis()
// Calculate ignore threshold
val modTimeIgnoreThreshold = math.max(
initialModTimeIgnoreThreshold, // initial threshold based on newFilesOnly setting
currentTime - durationToRemember.milliseconds // trailing end of the remember window
)
logDebug(s"Getting new files for time $currentTime, " +
s"ignoring files older than $modTimeIgnoreThreshold")
val newFileFilter = new PathFilter {
def accept(path: Path): Boolean = isNewFile(path, currentTime, modTimeIgnoreThreshold)
}
val directoryFilter = new PathFilter {
override def accept(path: Path): Boolean = fs.getFileStatus(path).isDirectory
}
val directories = fs.globStatus(directoryPath, directoryFilter).map(_.getPath)
val newFiles = directories.flatMap(dir =>
fs.listStatus(dir, newFileFilter).map(_.getPath.toString))
val timeTaken = clock.getTimeMillis() - lastNewFileFindingTime
logInfo("Finding new files took " + timeTaken + " ms")
logDebug("# cached file times = " + fileToModTime.size)
if (timeTaken > slideDuration.milliseconds) {
logWarning(
"Time taken to find new files exceeds the batch size. " +
"Consider increasing the batch size or reducing the number of " +
"files in the monitored directory."
)
}
newFiles
} catch {
case e: Exception =>
logWarning("Error finding new files", e)
reset()
Array.empty
}
}
/**
* Identify whether the given `path` is a new file for the batch of `currentTime`. For it to be
* accepted, it has to pass the following criteria.
* - It must pass the user-provided file filter.
* - It must be newer than the ignore threshold. It is assumed that files older than the ignore
* threshold have already been considered or are existing files before start
* (when newFileOnly = true).
* - It must not be present in the recently selected files that this class remembers.
* - It must not be newer than the time of the batch (i.e. `currentTime` for which this
* file is being tested. This can occur if the driver was recovered, and the missing batches
* (during downtime) are being generated. In that case, a batch of time T may be generated
* at time T+x. Say x = 5. If that batch T contains file of mod time T+5, then bad things can
* happen. Let's say the selected files are remembered for 60 seconds. At time t+61,
* the batch of time t is forgotten, and the ignore threshold is still T+1.
* The files with mod time T+5 are not remembered and cannot be ignored (since, t+5 > t+1).
* Hence they can get selected as new files again. To prevent this, files whose mod time is more
* than current batch time are not considered.
*/
private def isNewFile(path: Path, currentTime: Long, modTimeIgnoreThreshold: Long): Boolean = {
val pathStr = path.toString
// Reject file if it does not satisfy filter
if (!filter(path)) {
logDebug(s"$pathStr rejected by filter")
return false
}
// Reject file if it was created before the ignore time
val modTime = getFileModTime(path)
if (modTime <= modTimeIgnoreThreshold) {
// Use <= instead of < to avoid SPARK-4518
logDebug(s"$pathStr ignored as mod time $modTime <= ignore time $modTimeIgnoreThreshold")
return false
}
// Reject file if mod time > current batch time
if (modTime > currentTime) {
logDebug(s"$pathStr not selected as mod time $modTime > current time $currentTime")
return false
}
// Reject file if it was considered earlier
if (recentlySelectedFiles.contains(pathStr)) {
logDebug(s"$pathStr already considered")
return false
}
logDebug(s"$pathStr accepted with mod time $modTime")
return true
}
/** Generate one RDD from an array of files */
private def filesToRDD(files: Seq[String]): RDD[(K, V)] = {
val fileRDDs = files.map { file =>
val rdd = serializableConfOpt.map(_.value) match {
case Some(config) => context.sparkContext.newAPIHadoopFile(
file,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
config)
case None => context.sparkContext.newAPIHadoopFile[K, V, F](file)
}
if (rdd.partitions.isEmpty) {
logError("File " + file + " has no data in it. Spark Streaming can only ingest " +
"files that have been \"moved\" to the directory assigned to the file stream. " +
"Refer to the streaming programming guide for more details.")
}
rdd
}
new UnionRDD(context.sparkContext, fileRDDs)
}
/** Get file mod time from cache or fetch it from the file system */
private def getFileModTime(path: Path) = {
fileToModTime.getOrElseUpdate(path.toString, fs.getFileStatus(path).getModificationTime())
}
private def directoryPath: Path = {
if (_path == null) _path = new Path(directory)
_path
}
private def fs: FileSystem = {
if (_fs == null) _fs = directoryPath.getFileSystem(ssc.sparkContext.hadoopConfiguration)
_fs
}
private def reset() {
_fs = null
}
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
logDebug(this.getClass().getSimpleName + ".readObject used")
ois.defaultReadObject()
generatedRDDs = new mutable.LinkedHashMap[Event, RDD[(K, V)]]()
batchEventToSelectedFiles = new mutable.HashMap[Event, Array[String]]
recentlySelectedFiles = new mutable.HashSet[String]()
fileToModTime = new TimeStampedHashMap[String, Long](true)
}
/**
* A custom version of the DStreamCheckpointData that stores names of
* Hadoop files as checkpoint data.
*/
private[streaming]
class FileInputDStreamCheckpointData extends DStreamCheckpointData(this) {
private def hadoopFiles = data.asInstanceOf[mutable.HashMap[Event, Array[String]]]
override def update(event: Event) {
hadoopFiles.clear()
batchEventToSelectedFiles.synchronized { hadoopFiles ++= batchEventToSelectedFiles }
}
override def cleanup(event: Event) { }
override def restore() {
hadoopFiles.toSeq.sortBy(_._1)(Event.ordering).foreach {
case (e, f) =>
// Restore the metadata in both files and generatedRDDs
logInfo("Restoring files for time " + e + " - " +
f.mkString("[", ", ", "]") )
batchEventToSelectedFiles.synchronized { batchEventToSelectedFiles += ((e, f)) }
recentlySelectedFiles ++= f
generatedRDDs += ((e, filesToRDD(f)))
}
}
override def toString: String = {
"[\n" + hadoopFiles.size + " file sets\n" +
hadoopFiles.map(p => (p._1, p._2.mkString(", "))).mkString("\n") + "\n]"
}
}
}
private[streaming]
object FileInputDStream {
def defaultFilter(path: Path): Boolean = !path.getName().startsWith(".")
/**
* Calculate the number of last batches to remember, such that all the files selected in
* at least last minRememberDurationS duration can be remembered.
*/
def calculateNumBatchesToRemember(batchDuration: Duration,
minRememberDurationS: Duration): Int = {
math.ceil(minRememberDurationS.milliseconds.toDouble / batchDuration.milliseconds).toInt
}
}
|
mashin-io/rich-spark
|
streaming/src/main/scala/org/apache/spark/streaming/dstream/FileInputDStream.scala
|
Scala
|
apache-2.0
| 16,914
|
package skinny.task.generator
import skinny.controller.Params
/**
* Scaffold generator with ssp template.
*/
object ScaffoldSspGenerator extends ScaffoldSspGenerator
/**
* Scaffold generator with ssp template.
*/
trait ScaffoldSspGenerator extends ScaffoldGenerator {
val packageImportsWarning =
s"""<%-- Be aware of package imports.
| 1. ${sourceDir}/templates/ScalatePackage.scala
| 2. scalateTemplateConfig in project/Build.scala
|--%>""".stripMargin
override def formHtmlCode(namespaces: Seq[String], resources: String, resource: String, nameAndTypeNamePairs: Seq[(String, String)]): String = {
val controllerName = "Controllers." + toControllerName(namespaces, resources)
"<%@val s: skinny.Skinny %>\\n<%@val keyAndErrorMessages: skinny.KeyAndErrorMessages %>\\n\\n" +
packageImportsWarning + "\\n\\n" +
nameAndTypeNamePairs.toList.map { case (k, t) => (k, extractTypeIfOptionOrSeq(t)) }.map {
case (name, "Boolean") =>
s"""<div class="form-group">
| <label class="control-label" for="${toSnakeCase(name)}">
| $${s.i18n.getOrKey("${resource}.${name}")}
| </label>
| <div class="controls row">
| <div class="col-xs-12">
| <input type="checkbox" name="${toSnakeCase(name)}" value="true" #if(s.params.${toSnakeCase(name)}==Some(true)) checked #end />
| </div>
| </div>
|</div>
|""".stripMargin
case (name, "DateTime") =>
s"""<div class="form-group">
| <label class="control-label">
| $${s.i18n.getOrKey("${resource}.${name}")}
| </label>
| <div class="controls row">
| <div class="$${if(keyAndErrorMessages.hasErrors("${toSnakeCase(name)}")) "has-error" else ""}">
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Year)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Year)}}" placeholder="$${s.i18n.getOrKey("year")}" maxlength=4 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Month)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Month)}}" placeholder="$${s.i18n.getOrKey("month")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Day)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Day)}}" placeholder="$${s.i18n.getOrKey("day")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Hour)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Hour)}}" placeholder="$${s.i18n.getOrKey("hour")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Minute)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Minute)}}" placeholder="$${s.i18n.getOrKey("minute")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Second)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Second)}}" placeholder="$${s.i18n.getOrKey("second")}" maxlength=2 />
| </div>
| </div>
| #if (keyAndErrorMessages.hasErrors("${toSnakeCase(name)}"))
| <div class="col-xs-12 has-error">
| #for (error <- keyAndErrorMessages.getErrors("${toSnakeCase(name)}"))
| <label class="control-label">$${error}</label>
| #end
| </div>
| #end
| </div>
|</div>
|""".stripMargin
case (name, "LocalDate") =>
s"""<div class="form-group">
| <label class="control-label">
| $${s.i18n.getOrKey("${resource}.${name}")}
| </label>
| <div class="controls row">
| <div class="$${if(keyAndErrorMessages.hasErrors("${toSnakeCase(name)}")) "has-error" else ""}">
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Year)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Year)}}" placeholder="$${s.i18n.getOrKey("year")}" maxlength=4 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Month)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Month)}}" placeholder="$${s.i18n.getOrKey("month")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Day)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Day)}}" placeholder="$${s.i18n.getOrKey("day")}" maxlength=2 />
| </div>
| </div>
| #if (keyAndErrorMessages.hasErrors("${toSnakeCase(name)}"))
| <div class="col-xs-12 has-error">
| #for (error <- keyAndErrorMessages.getErrors("${toSnakeCase(name)}"))
| <label class="control-label">$${error}</label>
| #end
| </div>
| #end
| </div>
|</div>
|""".stripMargin
case (name, "LocalTime") =>
s"""<div class="form-group">
| <label class="control-label">
| $${s.i18n.getOrKey("${resource}.${name}")}
| </label>
| <div class="controls row">
| <div class="$${if(keyAndErrorMessages.hasErrors("${toSnakeCase(name)}")) "has-error" else ""}">
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Hour)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Hour)}}" placeholder="$${s.i18n.getOrKey("hour")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Minute)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Minute)}}" placeholder="$${s.i18n.getOrKey("minute")}" maxlength=2 />
| </div>
| <div class="col-xs-2">
| <input type="text" name="${toSnakeCase(name + Params.Second)}" class="form-control" value="$${s.params.${toSnakeCase(name + Params.Second)}}" placeholder="$${s.i18n.getOrKey("second")}" maxlength=2 />
| </div>
| </div>
| #if (keyAndErrorMessages.hasErrors("${toSnakeCase(name)}"))
| <div class="col-xs-12 has-error">
| #for (error <- keyAndErrorMessages.getErrors("${toSnakeCase(name)}"))
| <label class="control-label">$${error}</label>
| #end
| </div>
| #end
| </div>
|</div>
|""".stripMargin
case (name, _) =>
s"""<div class="form-group">
| <label class="control-label" for="${toSnakeCase(name)}">
| $${s.i18n.getOrKey("${resource}.${name}")}
| </label>
| <div class="controls row">
| <div class="$${if(keyAndErrorMessages.hasErrors("${toSnakeCase(name)}")) "has-error" else ""}">
| <div class="col-xs-12">
| <input type="text" name="${toSnakeCase(name)}" class="form-control" value="$${s.params.${toSnakeCase(name)}}" />
| </div>
| </div>
| #if (keyAndErrorMessages.hasErrors("${toSnakeCase(name)}"))
| <div class="col-xs-12 has-error">
| #for (error <- keyAndErrorMessages.getErrors("${toSnakeCase(name)}"))
| <label class="control-label">$${error}</label>
| #end
| </div>
| #end
| </div>
|</div>
|""".stripMargin
}.mkString +
s"""<div class="form-actions">
| $${unescape(s.csrfHiddenInputTag)}
| <input type="submit" class="btn btn-primary" value="$${s.i18n.getOrKey("submit")}">
| <a class="btn btn-default" href="$${s.url(${controllerName}.indexUrl)}">$${s.i18n.getOrKey("cancel")}</a>
|</div>
|""".stripMargin
}
override def newHtmlCode(namespaces: Seq[String], resources: String, resource: String, nameAndTypeNamePairs: Seq[(String, String)]): String = {
val controllerName = "Controllers." + toControllerName(namespaces, resources)
s"""<%@val s: skinny.Skinny %>
|
|${packageImportsWarning}
|
|<h3>$${s.i18n.getOrKey("${resource}.new")}</h3>
|<hr/>
|
|<%--
|#for (e <- s.errorMessages)
|<p class="alert alert-danger">$${e}</p>
|#end
|--%>
|
|<form method="post" action="$${s.url(${controllerName}.createUrl)}" class="form">
| $${include("_form.html.ssp")}
|</form>
|""".stripMargin
}
override def editHtmlCode(namespaces: Seq[String], resources: String, resource: String, nameAndTypeNamePairs: Seq[(String, String)]): String = {
val controllerName = "Controllers." + toControllerName(namespaces, resources)
s"""<%@val s: skinny.Skinny %>
|
|${packageImportsWarning}
|
|<h3>$${s.i18n.getOrKey("${resource}.edit")} : #$${s.params.id}</h3>
|<hr/>
|
|<%--
|#for (e <- s.errorMessages)
|<p class="alert alert-danger">$${e}</p>
|#end
|--%>
|
|<form method="post" action="$${s.url(${controllerName}.updateUrl, "${snakeCasedPrimaryKeyName}" -> s.params.${snakeCasedPrimaryKeyName})}" class="form">
| $${include("_form.html.ssp")}
|</form>
|""".stripMargin
}
override def indexHtmlCode(namespaces: Seq[String], resources: String, resource: String, nameAndTypeNamePairs: Seq[(String, String)]): String = {
val controllerName = "Controllers." + toControllerName(namespaces, resources)
val modelClassName = toClassName(resource)
s"""<%@val s: skinny.Skinny %>
|<%@val items: Seq[${toNamespace(modelPackage, namespaces)}.${modelClassName}] %>
|<%@val totalPages: Int %>
|<%@val page: Int = s.params.page.map(_.toString.toInt).getOrElse(1) %>
|
|${packageImportsWarning}
|
|<h3>$${s.i18n.getOrKey("${resource}.list")}</h3>
|<hr/>
|#for (notice <- s.flash.notice)
| <p class="alert alert-info">$${notice}</p>
|#end
|
|#if (totalPages > 1)
| <ul class="pagination">
| <li>
| <a href="$${s.url(${controllerName}.indexUrl, "page" -> 1)}">«</a>
| </li>
| <% val maxPage = Math.min(totalPages, if (page <= 5) 11 else page + 5) %>
| #for (i <- Math.max(1, maxPage - 10) to maxPage)
| <li class="$${if (i == page) "active" else ""}">
| <a href="$${s.url(${controllerName}.indexUrl, "page" -> i)}">$${i}</a>
| </li>
| #end
| <li>
| <a href="$${s.url(${controllerName}.indexUrl, "page" -> totalPages)}">»</a>
| </li>
| <li>
| <span>$${Math.min(page, totalPages)} / $${totalPages}</span>
| </li>
| </ul>
|#end
|
|<table class="table table-bordered">
|<thead>
| <tr>
|${((primaryKeyName -> "Long") :: nameAndTypeNamePairs.toList).map { case (k, _) => " <th>${s.i18n.getOrKey(\\"" + resource + "." + k + "\\")}</th>" }.mkString("\\n")}
| <th></th>
| </tr>
|</thead>
|<tbody>
| #for (item <- items)
| <tr>
|${((primaryKeyName -> "Long") :: nameAndTypeNamePairs.toList).map { case (k, _) => s" <td>$${item.${k}}</td>" }.mkString("\\n")}
| <td>
| <a href="$${s.url(${controllerName}.showUrl, "${snakeCasedPrimaryKeyName}" -> item.${primaryKeyName})}" class="btn btn-default">$${s.i18n.getOrKey("detail")}</a>
| <a href="$${s.url(${controllerName}.editUrl, "${snakeCasedPrimaryKeyName}" -> item.${primaryKeyName})}" class="btn btn-info">$${s.i18n.getOrKey("edit")}</a>
| <a data-method="delete" data-confirm="$${s.i18n.getOrKey("${resource}.delete.confirm")}"
| href="$${s.url(${controllerName}.destroyUrl, "${snakeCasedPrimaryKeyName}" -> item.${primaryKeyName})}" rel="nofollow" class="btn btn-danger">$${s.i18n.getOrKey("delete")}</a>
| </td>
| </tr>
| #end
| #if (items.isEmpty)
| <tr>
| <td colspan="${2 + nameAndTypeNamePairs.size}">$${s.i18n.getOrKey("empty")}</td>
| </tr>
| #end
|</tbody>
|</table>
|
|<a href="$${s.url(${controllerName}.newUrl)}" class="btn btn-primary">$${s.i18n.getOrKey("new")}</a>
|""".stripMargin
}
override def showHtmlCode(namespaces: Seq[String], resources: String, resource: String, nameAndTypeNamePairs: Seq[(String, String)]): String = {
val controllerName = "Controllers." + toControllerName(namespaces, resources)
val modelClassName = toClassName(resource)
val modelNamespace = toNamespace(modelPackage, namespaces)
val attributesPart = ((primaryKeyName -> "Long") :: nameAndTypeNamePairs.toList).map {
case (name, _) =>
s""" <tr>
| <th>$${s.i18n.getOrKey("${resource}.${name}")}</th>
| <td>$${item.${name}}</td>
| </tr>
|""".stripMargin
}.mkString
s"""<%@val item: ${modelNamespace}.${modelClassName} %>
|<%@val s: skinny.Skinny %>
|
|${packageImportsWarning}
|
|<h3>$${s.i18n.getOrKey("${resource}.detail")}</h3>
|<hr/>
|#for (notice <- s.flash.notice)
| <p class="alert alert-info">$${notice}</p>
|#end
|<table class="table table-bordered">
|<tbody>
|${attributesPart}
|</tbody>
|</table>
|
|<hr/>
|<div class="form-actions">
| <a class="btn btn-default" href="$${s.url(${controllerName}.indexUrl)}">$${s.i18n.getOrKey("backToList")}</a>
| <a href="$${s.url(${controllerName}.editUrl, "${snakeCasedPrimaryKeyName}" -> item.${primaryKeyName})}" class="btn btn-info">$${s.i18n.getOrKey("edit")}</a>
| <a data-method="delete" data-confirm="$${s.i18n.getOrKey("${resource}.delete.confirm")}"
| href="$${s.url(${controllerName}.destroyUrl, "${snakeCasedPrimaryKeyName}" -> item.${primaryKeyName})}" rel="nofollow" class="btn btn-danger">$${s.i18n.getOrKey("delete")}</a>
|</div>
|""".stripMargin
}
}
|
holycattle/skinny-framework
|
task/src/main/scala/skinny/task/generator/ScaffoldSspGenerator.scala
|
Scala
|
mit
| 14,857
|
package vu.ml
import shapeless._
import shapeless.labelled._
import shapeless.nat._
import shapeless.ops.nat.ToInt
import vu.ml.Weka._
import weka.core.{Attribute, Instance}
/**
* Polymorphic functions on Weka primitives for generalisation of the Weka algoritms
*
* @author v.uspenskiy
* @since 18/03/15
*/
object Functions {
object getDistinctValues extends Poly1 {
implicit def caseFieldDouble [K](implicit wk: Witness.Aux[K]) = at[FieldType[K, Option[Double]]] { field => wk.value -> List[Double]() }
implicit def caseFieldLong [K](implicit wk: Witness.Aux[K]) = at[FieldType[K, Option[Long]]] { field => wk.value -> List[Long]() }
implicit def caseFieldBoolean[K](implicit wk: Witness.Aux[K]) = at[FieldType[K, Option[Boolean]]] { field => wk.value -> field.toList /* List[Boolean] */ }
implicit def caseFieldString [K](implicit wk: Witness.Aux[K]) = at[FieldType[K, Option[String]]] { field => wk.value -> field.toList /* List[String] */ }
}
object combineValues extends Poly1 {
implicit def caseAny[K,V] = at[((K, List[V]), (K, List[V]))](fields => fields._1._1 -> (fields._1._2 ++ fields._2._2).distinct)
}
object size extends Poly1 {
implicit def caseList[K,V] = at[(K, List[V])](_._2.size)
}
object createAttribute extends Poly1 {
implicit def caseDouble [K] = at[(K, List[Double])] { values => new Attribute(values._1.toString) }
implicit def caseLong [K] = at[(K, List[Long])] { values => new Attribute(values._1.toString) }
implicit def caseBoolean[K] = at[(K, List[Boolean])] { values => new Attribute(values._1.toString, values._2.map(_.toString)) }
implicit def caseString [K] = at[(K, List[String])] { values => new Attribute(values._1.toString, values._2) }
}
object fillExample extends Poly1 {
implicit def caseDouble [K] = at[((Attribute, FieldType[K, Option[Double]] ), Instance)] { fve => if(fve._1._2.isDefined) fve._2.setValue(fve._1._1, fve._1._2.get) else fve._2.setMissing(fve._1._1); fve._2 }
implicit def caseLong [K] = at[((Attribute, FieldType[K, Option[Long]] ), Instance)] { fve => if(fve._1._2.isDefined) fve._2.setValue(fve._1._1, fve._1._2.get.toDouble) else fve._2.setMissing(fve._1._1); fve._2 }
implicit def caseBoolean[K] = at[((Attribute, FieldType[K, Option[Boolean]]), Instance)] { fve => if(fve._1._2.isDefined) fve._2.setValue(fve._1._1, fve._1._2.get.toString) else fve._2.setMissing(fve._1._1); fve._2 }
implicit def caseString [K] = at[((Attribute, FieldType[K, Option[String]] ), Instance)] { fve => if(fve._1._2.isDefined) fve._2.setValue(fve._1._1, fve._1._2.get) else fve._2.setMissing(fve._1._1); fve._2 }
}
object fitWithInstanceData extends Poly1 {
implicit def caseDouble[K, N <: Nat: ToInt] =
at[((FieldType[K, Option[Double]], N), Instance)] { fv => field[K](Option(fv._2.value(toInt[N]))) }
implicit def caseBoolean[K, N <: Nat: ToInt] =
at[((FieldType[K, Option[Boolean]], N), Instance)] { fv => field[K](Option("true".equals(fv._2.stringValue(toInt[N])))) }
implicit def caseString[K, N <: Nat: ToInt] =
at[((FieldType[K, Option[String]], N), Instance)] { fv => field[K](Option(fv._2.stringValue(toInt[N]))) }
implicit def caseLong[K, N <: Nat: ToInt] =
at[((FieldType[K, Option[Long]], N), Instance)] { fv => field[K](Option(fv._2.value(toInt[N]).toLong)) }
}
}
|
vuspenskiy/shapeless-weka
|
src/main/scala/vu/ml/Functions.scala
|
Scala
|
mit
| 3,399
|
package models.team
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import scalaz.syntax.SemigroupOps
import cache._
import db._
import models.Constants._
import io.megam.auth.funnel.FunnelErrors._
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import utils.DateHelper
import io.megam.util.Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat}
import io.megam.common.uid.UID
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import controllers.stack.ImplicitJsonFormats
case class OrganizationsInput(name: String) {
val json = "{\\"name\\":\\"" + name + "\\"}"
}
case class OrganizationsInviteInput(id: String) {
val json = "{\\"id\\":\\"" + id + "\\"}"
}
case class OrganizationsResult(
id: String,
accounts_id: String,
name: String,
json_claz: String,
created_at: DateTime) {}
object OrganizationsResult {
val empty = new OrganizationsResult("", "", "", "Megam::Organizations", DateHelper.now())
}
sealed class OrganizationsT extends CassandraTable[OrganizationsT, OrganizationsResult] {
object id extends StringColumn(this) with PrimaryKey[String]
object accounts_id extends StringColumn(this) with PartitionKey[String]
object name extends StringColumn(this)
object json_claz extends StringColumn(this)
object created_at extends DateTimeColumn(this)
override def fromRow(r: Row): OrganizationsResult = {
OrganizationsResult(
id(r),
accounts_id(r),
name(r),
json_claz(r),
created_at(r))
}
}
/*
* This class talks to scylla and performs the actions
*/
abstract class ConcreteOrg extends OrganizationsT with ScyllaConnector {
override lazy val tableName = "organizations"
def insertNewRecord(org: OrganizationsResult): ResultSet = {
val res = insert.value(_.id, org.id)
.value(_.accounts_id, org.accounts_id)
.value(_.name, org.name)
.value(_.json_claz, org.json_claz)
.value(_.created_at, org.created_at)
.future()
Await.result(res, 5.seconds)
}
def deleteRecords(email: String): ValidationNel[Throwable, ResultSet] = {
val res = delete.where(_.accounts_id eqs email).future()
Await.result(res, 5.seconds).successNel
}
}
object Organizations extends ConcreteOrg with ImplicitJsonFormats {
private def orgNel(input: String): ValidationNel[Throwable, OrganizationsInput] = {
(Validation.fromTryCatchThrowable[OrganizationsInput, Throwable] {
parse(input).extract[OrganizationsInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
}
private def inviteNel(input: String): ValidationNel[Throwable, OrganizationsInviteInput] = {
(Validation.fromTryCatchThrowable[OrganizationsInviteInput, Throwable] {
parse(input).extract[OrganizationsInviteInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
}
private def organizationsSet(id: String, email: String, c: OrganizationsInput): ValidationNel[Throwable, OrganizationsResult] = {
(Validation.fromTryCatchThrowable[OrganizationsResult, Throwable] {
OrganizationsResult(id, email, c.name, "Megam::Organizations", DateHelper.now())
} leftMap { t: Throwable => new MalformedBodyError(c.json, t.getMessage) }).toValidationNel
}
def create(email: String, input: String): ValidationNel[Throwable, Option[OrganizationsResult]] = {
for {
c <- orgNel(input)
uir <- (UID("org").get leftMap { u: NonEmptyList[Throwable] => u })
org <- organizationsSet(uir.get._1 + uir.get._2, email, c)
} yield {
insertNewRecord(org)
org.some
}
}
def findByEmail(accounts_id: String): ValidationNel[Throwable, Seq[OrganizationsResult]] = {
val resp = select.allowFiltering().where(_.accounts_id eqs accounts_id).fetch()
(Await.result(resp, 5.seconds)).successNel
}
def delete(email: String): ValidationNel[Throwable, Option[OrganizationsResult]] = {
deleteRecords(email) match {
case Success(value) => Validation.success[Throwable, Option[OrganizationsResult]](none).toValidationNel
case Failure(err) => Validation.success[Throwable, Option[OrganizationsResult]](none).toValidationNel
}
}
private def findById(id: String): ValidationNel[Throwable, Option[OrganizationsResult]] = {
val resp = select.allowFiltering().where(_.id eqs id).one()
(Await.result(resp, 5.second)).successNel
}
def inviteOrganization(email: String, input: String): ValidationNel[Throwable, ResultSet] = {
for {
c <- inviteNel(input)
upd <- findById(c.id)
} yield {
val org = new OrganizationsResult(upd.head.id, email, upd.head.name, upd.head.json_claz, DateHelper.now())
insertNewRecord(org)
}
}
}
|
indykish/vertice_gateway
|
app/models/team/Organizations.scala
|
Scala
|
mit
| 5,141
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3FrequencyTest extends Specification with StatTestHelper {
def createStat(precision: Int, observe: Boolean): Z3Frequency = {
val s = Stat(sft, Stat.Z3Frequency("geom", "dtg", TimePeriod.Week, precision))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Z3Frequency]
}
def createStat(observe: Boolean = true): Z3Frequency = createStat(25, observe)
def toDate(string: String) = java.util.Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String) = WKTUtils.read(string)
"FrequencyZ3 stat" should {
"work with geometries and dates" >> {
"be empty initially" >> {
val stat = createStat(observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = createStat()
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(1L, 6L)
}
}
"serialize and deserialize" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Frequency].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = createStat(observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Frequency].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Frequency].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"clear" >> {
val stat = createStat()
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) mustEqual 0
}
stat.count(toGeom("POINT(-180 -90)"), toDate("2012-01-01T00:00:00.000Z")) mustEqual 0
}
}
}
}
|
ddseapy/geomesa
|
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/Z3FrequencyTest.scala
|
Scala
|
apache-2.0
| 4,247
|
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.chain
import java.io.{BufferedWriter, FileInputStream, FileOutputStream, FileWriter}
import cc.factorie._
import cc.factorie.infer.MaximizeByBPChain
import cc.factorie.la.{GrowableSparseBinaryTensor1, GrowableSparseTensor1}
import cc.factorie.optimize.OnlineTrainer
import cc.factorie.util.DefaultCmdOptions
import cc.factorie.variable._
import scala.io.Source
import scala.util.Random
object Chain {
private object ChainOpts extends DefaultCmdOptions {
val writeSequences = new CmdOption("write-sequences", "sequences", "FILE", "Filename in which to save the sequences' labels and features.")
// provide either these 3
val readSequences = new CmdOption("read-sequences", "sequences", "FILE", "Filename from which to read the sequences' labels and features in one-line-per-token format.")
val trainingPortion = new CmdOption("training-portion", 0.5, "FRACTION", "The fraction of the sequences that should be used for training. testing-portion is 1.0 - training-portion - validation-portion.")
val crossValidation = new CmdOption("cross-validation", 1, "N", "The number of folds for cross-validation (DEFAULT=1)") //todo implement cross-validation
// or these 2
val readTrainingSequences = new CmdOption("read-training-sequences", "sequences", "FILE", "Filename from which to read the training sequences' labels and features.")
val readTestingSequences = new CmdOption("read-testing-sequences", "sequences", "FILE", "Filename from which to read the testing sequences' labels and features.")
val readBinaryFeatures = new CmdOption("read-binary-features", true, "true|false", "If true, features will be binary as opposed to counts. Default is true.")
val readTextEncoding = new CmdOption("read-text-encoding", "UTF-8", "ENCODING", "The name of the encoding to use, e.g. UTF-8.")
val writeClassifications = new CmdOption("write-classifications", "classifications", "FILE", "Filename in which to save the classifications.")
val writeChainModel = new CmdOption("write-chain-model", "chain-model", "FILE", "Filename in which to save the chain model.")
val readChainModel = new CmdOption("read-chain-model", "chain-model", "FILE", "Filename from which to read the chain model.")
val localRandomSeed = new CmdOption("random-seed", -1, "N", "The random seed for randomly selecting a proportion of the instance list for training")
val trainer = new CmdOption("trainer", "OnlineTrainer", "ChainTrainer", "Scala expression providing ChainTrainer class.") //todo implement this CLA
// TODO Consider enabling the system to use multiple ChainTrainers at the same time, and compare results
val evaluator = new CmdOption("evaluator", "Trial", "Class()", "The constructor for a ClassifierEvaluator class.") //todo implement this CLA
}
private object FeaturesDomain extends CategoricalVectorDomain[String]
private object LabelDomain extends CategoricalDomain[String]
private class FeatureChain extends Chain[FeatureChain, Features]
private class Features(val featureStrings: Iterable[String], val label: Label, val useBinaryFeatures: Boolean = ChainOpts.readBinaryFeatures.value)
extends FeatureVectorVariable[String] with Observation[Features] with ChainLink[Features, FeatureChain] {
if (useBinaryFeatures) set(new GrowableSparseBinaryTensor1(domain.dimensionDomain))(null)
else set(new GrowableSparseTensor1(domain.dimensionDomain))(null)
this ++= featureStrings
val string = "N/A"
override def domain = FeaturesDomain
override val skipNonCategories = true
}
private class Label(value: String, featureStrings: Iterable[String]) extends LabeledCategoricalVariable[String](value) {
val features = new Features(featureStrings, this)
def token = features
override def domain = LabelDomain
}
def main(args: Array[String]): Unit = {
val startTime = System.currentTimeMillis()
ChainOpts.parse(args)
if (ChainOpts.trainer.wasInvoked) {
throw new NotImplementedError("Specifying a trainer isn't yet implemented")
}
if (ChainOpts.evaluator.wasInvoked) {
throw new NotImplementedError("Specifying an evaluator isn't yet implemented")
}
implicit val random = ChainOpts.localRandomSeed.wasInvoked match {
case true => new Random(ChainOpts.localRandomSeed.value)
case false => new Random()
}
def processInstances(filename: String): Iterable[FeatureChain] = {
val src = Source.fromFile(filename)
val featureChains = src.getLines().toSeq.map(_.split("\\\\s+").toList).split(_.nonEmpty).map {
chains =>
new FeatureChain() ++= chains.collect {
case labelString :: featureStrings =>
new Label(labelString, featureStrings).features
}.toIterable
}.toList
src.close()
featureChains
}
// todo share across this and classify
val (trainingLabels, testingLabels) = if (Seq(ChainOpts.readSequences, ChainOpts.trainingPortion).map(_.wasInvoked).reduce(_ && _)) {
processInstances(ChainOpts.readSequences.value) match {
case labels if ChainOpts.trainingPortion.value == 1.0 => labels.shuffle -> Seq()
case labels => labels.shuffle.split(ChainOpts.trainingPortion.value)
}
} else if (Seq(ChainOpts.readTrainingSequences, ChainOpts.readTestingSequences).map(_.wasInvoked).reduce(_ && _)) {
processInstances(ChainOpts.readTrainingSequences.value) -> processInstances(ChainOpts.readTestingSequences.value)
} else {
throw new IllegalArgumentException("Invalid argument combination, supply either a read sequence and training portion or a pair of training and testing sequences.")
}
val model = new ChainModel[Label, Features, Features](LabelDomain, FeaturesDomain, _.features, _.token, _.label)
if (ChainOpts.readChainModel.wasInvoked) {
val f = new FileInputStream(ChainOpts.readChainModel.value)
model.deserialize(f)
f.close()
} else {
val examples = trainingLabels.map(fc => new model.ChainLikelihoodExample(fc.value.map(_.label)))
val trainer = new OnlineTrainer(model.parameters, maxIterations = 1)
trainer.trainFromExamples(examples)
}
var totalTokens = 0.0
var correctTokens = 0.0
testingLabels.foreach {
fc =>
val res = MaximizeByBPChain.infer(fc.value.map(_.label), model, null)
res.setToMaximize(null) // sets each label to it's maximum value
totalTokens += fc.size
correctTokens += HammingObjective.accuracy(fc.value.map(_.label)) * fc.size
}
if (ChainOpts.writeClassifications.wasInvoked) {
val classificationResults = new BufferedWriter(new FileWriter(ChainOpts.writeClassifications.value))
testingLabels.foreach {
featureChain =>
featureChain.foreach {
link =>
classificationResults.write("%s %s".format(link.label.categoryValue, link.featureStrings.mkString(" ")))
classificationResults.newLine()
}
classificationResults.newLine()
}
classificationResults.flush()
classificationResults.close()
}
println("Overall accuracy: " + (correctTokens / totalTokens))
println("Total elapsed time: " + (System.currentTimeMillis() - startTime) / 1000.0 + "sec")
if (ChainOpts.writeChainModel.wasInvoked) {
val f = new FileOutputStream(ChainOpts.writeChainModel.value)
model.serialize(f)
f.flush()
f.close()
}
}
}
|
strubell/factorie
|
src/main/scala/cc/factorie/app/chain/Chain.scala
|
Scala
|
apache-2.0
| 8,273
|
package controllers.addons
import scalaz._
import Scalaz._
import scalaz.NonEmptyList._
import scalaz.Validation._
import io.megam.auth.funnel._
import io.megam.auth.funnel.FunnelErrors._
import models.billing._
import play.api.mvc._
import controllers.stack.Results
import net.liftweb.json._
import net.liftweb.json.JsonParser._
/**
* @author ranjitha
*
*/
object Addons extends Controller with controllers.stack.APIAuthElement {
/**
* Create a new Addons for the user.
**/
def post = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result,Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
val clientAPIBody = freq.clientAPIBody.getOrElse(throw new Error("Body not found (or) invalid."))
models.addons.Addons.create(email, clientAPIBody) match {
case Success(succ) =>
Status(CREATED)(
FunnelResponse(CREATED, """Addons created successfully.
|
|You can use the the 'Addons':{%s}.""".format(succ.getOrElse("none")), "Megam::Addons").toJson(true))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
def show(name: String) = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result, Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
models.addons.Addons.findById(email,name) match {
case Success(succ) => Ok(Results.resultset(models.Constants.ADDONSCOLLECTIONCLAZ, compactRender(Extraction.decompose(succ))))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
}
|
indykish/vertice_gateway
|
app/controllers/addons/Addons.scala
|
Scala
|
mit
| 2,734
|
object Test {
for {
x1 <- List(1, 2)
x2 <- Iterator(3, 4)
x3 <- Seq(5, 6).iterator
x4 <- LazyList(7, 8)
} yield x1 + x2 + x3 + x4
}
|
som-snytt/dotty
|
tests/pos/iterator-traversable-mix.scala
|
Scala
|
apache-2.0
| 152
|
/*
* OpenSRS -- a Scala library for using the OpenSRS API
* Copyright (C) 2016 James Edwin Cain (user opensrs, domain jcain.net)
*
* This file is part of the net.jcain.opensrs library. This Library is free
* software; you may redistribute it or modify it under the terms of the
* license contained in the file LICENCE.txt. If you did not receive a copy of
* the license, please contact the copyright holder.
*/
package net.jcain.opensrs
import akka.actor.{ActorSystem, Props}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.{Matchers, WordSpecLike}
import concurrent.duration._
object RegistrySpec {
val User = Option(System.getProperty("user"))
val PrivateKey = Option(System.getProperty("key"))
val Hostname = "horizon.opensrs.net"
}
class RegistrySpec extends TestKit(ActorSystem("RegistrySpec")) with WordSpecLike with Matchers {
import RegistrySpec._
class RegistryFixture(label: String, key: Option[String] = None) {
// cancel the test if user and private key are not given on command line
assume(User.isDefined && PrivateKey.isDefined, "Options -Duser and -Dkey are not both specified")
val testProbe = TestProbe(s"$label-probe")
val registry = TestActorRef(Props(classOf[Registry],
User.get, key.getOrElse(PrivateKey.get), Some(Hostname), None, Some(testProbe.ref)
), s"$label-registry")
def stop() = {
system.stop(registry)
}
}
"Registry" when {
"md5Digest()" should {
"return a valid MD5 hex digest" in {
Registry.md5Digest("<test/>") shouldBe "f1430934c390c118ed2f148e1d44d36c"
// from the OpenSRS guide
Registry.md5Digest("ConnecttoOpenSRSviaSSL") shouldBe "e787cc1d1951dfec4827cede7b1a0933"
}
}
"sign()" should {
"return a valid signature" in {
Registry.sign("<test/>", "key") shouldBe "48ed6e2bb03a40a5c9a7d4b5957d7663"
}
}
"uses an invalid user/key" should {
"respond with AuthenticationFailed" in new RegistryFixture("authfail", Some("badkey")) {
registry ! request.domain.LookUp("example.com")
testProbe.expectMsgPF(30.seconds) { case response.AuthenticationFailed(_) => }
stop()
}
}
"receives a Request" should {
"respond with the Response" in new RegistryFixture("lookup") {
registry ! request.domain.LookUp("example.com")
testProbe.expectMsgPF(30.seconds) {
case response.BadRequest(e) => fail(e)
case response.AuthenticationFailed(e) => fail(e)
case x: Any => println(s"Got response: $x")
}
stop()
}
}
}
}
|
jec/OpenSRS
|
src/test/scala/net/jcain/opensrs/RegistrySpec.scala
|
Scala
|
bsd-3-clause
| 2,637
|
//: ----------------------------------------------------------------------------
//: Copyright (C) 2014 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package remotely
import scala.reflect.runtime.universe.TypeTag
import scodec.Codec
case class Protocol(codecs: Codecs, signatures: Signatures) {
def codec[A:TypeTag:Codec]: Protocol =
this.copy(codecs = codecs.codec[A])
def specify0[O](name: String, out: Type[O]): Protocol =
this.copy(signatures = signatures.specify0(name, out.name))
def specify1[A,O](name: String, in: Field[A], out: Type[O]): Protocol =
this.copy(signatures = signatures.specify1(name, in, out.name))
def specify2[A,B,O](name: String, in1: Field[A], in2: Field[B], out: Type[O]): Protocol =
this.copy(signatures = signatures.specify2(name, in1, in2, out.name))
def specify3[A,B,C,O](name: String, in1: Field[A], in2: Field[B], in3: Field[C], out: Type[O]): Protocol =
this.copy(signatures = signatures.specify3(name, in1, in2, in3, out.name))
def specify4[A,B,C,D,O](name: String, in1: Field[A], in2: Field[B], in3: Field[C], in4: Field[D], out: Type[O]): Protocol =
this.copy(signatures = signatures.specify4(name, in1, in2, in3, in4, out.name))
def specify5[A,B,C,D,E,O](name: String, in1: Field[A], in2: Field[B], in3: Field[C], in4: Field[D], in5: Field[E], out: Type[O]): Protocol =
this.copy(signatures = signatures.specify5(name, in1, in2, in3, in4, in5, out.name))
def pretty: String =
"Protocol(\\n" +
Signatures.indent(" ")(codecs.pretty) + ",\\n" +
Signatures.indent(" ")(signatures.pretty) + "\\n)"
override def toString = pretty
}
object Protocol {
val empty = Protocol(Codecs.empty, Signatures.empty)
/// Convenience Syntax
implicit class ProtocolOps(inner: Protocol) {
import Field._
def ++(other: Protocol): Protocol = Protocol(
codecs = inner.codecs ++ other.codecs,
signatures = Signatures(inner.signatures.signatures ++ other.signatures.signatures)
)
def define0[O: TypeTag](name: String): Protocol =
inner.specify0(name, Type[O])
def define1[A: TypeTag, O: TypeTag](name: String): Protocol = {
val f1 = strict[A]("in1")
inner.specify1(name, f1, Type[O])
}
def define2[A: TypeTag, B: TypeTag, O: TypeTag](name: String): Protocol = {
val f1 = strict[A]("in1")
val f2 = strict[B]("in2")
inner.specify2(name, f1, f2, Type[O])
}
def define3[A: TypeTag, B: TypeTag, C: TypeTag, O: TypeTag](name: String): Protocol = {
val f1 = strict[A]("in1")
val f2 = strict[B]("in2")
val f3 = strict[C]("in3")
inner.specify3(name, f1, f2, f3, Type[O])
}
def define4[A: TypeTag, B: TypeTag, C: TypeTag, D: TypeTag, O: TypeTag](name: String): Protocol = {
val f1 = strict[A]("in1")
val f2 = strict[B]("in2")
val f3 = strict[C]("in3")
val f4 = strict[D]("in4")
inner.specify4(name, f1, f2, f3, f4, Type[O])
}
def define5[A: TypeTag, B: TypeTag, C: TypeTag, D: TypeTag, E: TypeTag, O: TypeTag](name: String): Protocol = {
val f1 = strict[A]("in1")
val f2 = strict[B]("in2")
val f3 = strict[C]("in3")
val f4 = strict[D]("in4")
val f5 = strict[E]("in5")
inner.specify5(name, f1, f2, f3, f4, f5, Type[O])
}
}
}
|
oncue/remotely
|
core/src/main/scala/Protocol.scala
|
Scala
|
apache-2.0
| 3,982
|
package database
import akka.actor.{Actor, ActorRef, Props}
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.commons.{MongoDBObject, Imports}
import database.BusRouteDefinitionsDB.ROUTE_DEFINITION_DOCUMENT
import database.BusRouteDefinitionsDB.ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION
import datadefinitions.BusDefinitions._
import play.api.Logger
import play.api.libs.concurrent.Akka
import play.api.Play.current
object BusRouteDefinitionsDB extends DatabaseCollections {
case object ROUTE_DEFINITION_DOCUMENT {
val ROUTE_ID = "ROUTE_ID"
val DIRECTION = "DIRECTION"
val BUS_STOP_SEQUENCE = "BUS_STOP_SEQUENCE"
case object STOP_SEQUENCE_DEFINITION {
val SEQUENCE_NO = "SEQUENCE_NO"
val BUS_STOP_ID = "BUS_STOP_ID"
val BUS_STOP_NAME = "BUS_STOP_NAME"
val TOWARDS = "TOWARDS"
val BUS_STOP_INDICATOR = "BUS_STOP_INDICATOR"
val BUS_STOP_STATUS = "BUS_STOP_STATUS"
val LONGITUDE = "LONGITUDE"
val LATITUDE = "LATITUDE"
val POLYLINE = "POLYLINE"
}
}
override val supervisor: ActorRef = Akka.system.actorOf(Props[BusDefinitionsDBSupervisor], name = "BusDefinitionsDBSupervisor")
def insertRouteDefinitionsIntoDB(busRouteDefinitions: BusRouteDefinitions) = {
incrementLogRequest(IncrementNumberInsertsRequested(busRouteDefinitions.size))
supervisor ! busRouteDefinitions
}
def insertRouteDefinitionsIntoDB(busRoute: BusRoute, sequenceList: List[BusStopInSequence]) = {
incrementLogRequest(IncrementNumberInsertsRequested(1))
supervisor !(busRoute, sequenceList)
}
def getBusRouteDefinitionsFromDB: BusRouteDefinitions = {
incrementLogRequest(IncrementNumberGetRequests(1))
val cursor = dBConnection.find()
cursor.map(x => {
BusRoute(
x.getAs[String](ROUTE_DEFINITION_DOCUMENT.ROUTE_ID).get,
x.getAs[String](ROUTE_DEFINITION_DOCUMENT.DIRECTION).get) ->
x.getAs[List[DBObject]](ROUTE_DEFINITION_DOCUMENT.BUS_STOP_SEQUENCE).get.map(y => {
BusStopInSequence(
y.getAs[Int](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.SEQUENCE_NO).get,
BusStop(
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.BUS_STOP_ID).get,
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.BUS_STOP_NAME).get,
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.TOWARDS).get,
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.BUS_STOP_INDICATOR).get,
y.getAs[Boolean](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.BUS_STOP_STATUS).get,
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.LATITUDE).get,
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.LONGITUDE).get
),
y.getAs[String](ROUTE_DEFINITION_DOCUMENT.STOP_SEQUENCE_DEFINITION.POLYLINE) match {
case Some(str) => Some(str)
case None => None
}
)
})
}) toMap
}
override val collectionName: String = "RouteDefinitions"
override val fieldsVector = Vector(ROUTE_DEFINITION_DOCUMENT.ROUTE_ID, ROUTE_DEFINITION_DOCUMENT.DIRECTION, ROUTE_DEFINITION_DOCUMENT.BUS_STOP_SEQUENCE)
override val indexKeyList = List((ROUTE_DEFINITION_DOCUMENT.ROUTE_ID, 1), (ROUTE_DEFINITION_DOCUMENT.DIRECTION, 1))
override val uniqueIndex = true
override val dBConnection: MongoCollection = MongoDatabase.getCollection(this)
}
class BusDefinitionsDBSupervisor extends Actor {
val busDefinitionsDBWorker: ActorRef = context.actorOf(Props[BusDefinitionsDBWorker], name = "BusDefinitionsDBWorker")
override def receive: Actor.Receive = {
case doc: BusRouteDefinitions => doc.foreach(singleRoute => busDefinitionsDBWorker ! singleRoute)
case doc: (BusRoute, List[BusStopInSequence]) => busDefinitionsDBWorker ! doc
case doc: IncrementNumberInsertsRequested => BusRouteDefinitionsDB.numberInsertsRequested += doc.incrementBy
case doc: IncrementNumberInsertsCompleted => BusRouteDefinitionsDB.numberInsertsCompleted += doc.incrementBy
case doc: IncrementNumberGetRequests => BusRouteDefinitionsDB.numberGetRequests += doc.incrementBy
case _ =>
Logger.error("BusDefinitionsDBSupervisor Actor received unknown message: ")
throw new IllegalStateException("BusDefinitionsDBSupervisor received unknown message")
}
}
class BusDefinitionsDBWorker extends Actor {
override def receive: Receive = {
case doc: (BusRoute, List[BusStopInSequence]) => insertToDB(doc._1, doc._2)
case _ =>
Logger.error("BusDefinitionsDBWorker Actor received unknown message")
throw new IllegalStateException("BusDefinitionsDBWorker received unknown message")
}
private def insertToDB(busRoute: BusRoute, sequenceList: List[BusStopInSequence]) = {
val stopSequenceList: List[Imports.DBObject] = sequenceList.map(seq => {
MongoDBObject(
STOP_SEQUENCE_DEFINITION.SEQUENCE_NO -> seq.sequenceNumber,
STOP_SEQUENCE_DEFINITION.BUS_STOP_ID -> seq.busStop.busStopID,
STOP_SEQUENCE_DEFINITION.BUS_STOP_NAME -> seq.busStop.busStopName,
STOP_SEQUENCE_DEFINITION.TOWARDS -> seq.busStop.towards,
STOP_SEQUENCE_DEFINITION.BUS_STOP_INDICATOR -> seq.busStop.busStopIndicator,
STOP_SEQUENCE_DEFINITION.BUS_STOP_STATUS -> seq.busStop.busStopStatus,
STOP_SEQUENCE_DEFINITION.LONGITUDE -> seq.busStop.longitude,
STOP_SEQUENCE_DEFINITION.LATITUDE -> seq.busStop.latitude,
STOP_SEQUENCE_DEFINITION.POLYLINE -> seq.polyLineToNextStop)
})
val newRouteDefDoc = MongoDBObject(
ROUTE_DEFINITION_DOCUMENT.ROUTE_ID -> busRoute.routeID,
ROUTE_DEFINITION_DOCUMENT.DIRECTION -> busRoute.direction,
ROUTE_DEFINITION_DOCUMENT.BUS_STOP_SEQUENCE -> stopSequenceList)
val query = MongoDBObject(
ROUTE_DEFINITION_DOCUMENT.ROUTE_ID -> busRoute.routeID,
ROUTE_DEFINITION_DOCUMENT.DIRECTION -> busRoute.direction
)
BusRouteDefinitionsDB.dBConnection.update(query, newRouteDefDoc, upsert = true)
BusRouteDefinitionsDB.incrementLogRequest(IncrementNumberInsertsCompleted(1))
}
}
|
chrischivers/London-Bus-Tracker-Play-Framework
|
app/database/BusRouteDefinitionsDB.scala
|
Scala
|
mit
| 6,238
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{UnaryNode, _}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.optimizer.CarbonDecoderRelation
import org.apache.spark.sql.types._
import org.apache.carbondata.spark.CarbonAliasDecoderRelation
/**
* Top command
*/
case class Top(count: Int, topOrBottom: Int, dim: NamedExpression, msr: NamedExpression,
child: LogicalPlan) extends UnaryNode {
def output: Seq[Attribute] = child.output
override def references: AttributeSet = {
val list = List(dim, msr)
AttributeSet(list.flatMap(_.references))
}
}
object getDB {
def getDatabaseName(dbName: Option[String], sqlContext: SQLContext): String = {
dbName.getOrElse(sqlContext.asInstanceOf[HiveContext].catalog.client.currentDatabase)
}
}
/**
* Shows Loads in a table
*/
case class ShowLoadsCommand(databaseNameOp: Option[String], table: String, limit: Option[String])
extends LogicalPlan with Command {
override def children: Seq[LogicalPlan] = Seq.empty
override def output: Seq[Attribute] = {
Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(),
AttributeReference("Status", StringType, nullable = false)(),
AttributeReference("Load Start Time", TimestampType, nullable = false)(),
AttributeReference("Load End Time", TimestampType, nullable = false)())
}
}
/**
* Describe formatted for hive table
*/
case class DescribeFormattedCommand(sql: String, tblIdentifier: TableIdentifier)
extends LogicalPlan with Command {
override def children: Seq[LogicalPlan] = Seq.empty
override def output: Seq[AttributeReference] =
Seq(AttributeReference("result", StringType, nullable = false)())
}
case class CarbonDictionaryCatalystDecoder(
relations: Seq[CarbonDecoderRelation],
profile: CarbonProfile,
aliasMap: CarbonAliasDecoderRelation,
isOuter: Boolean,
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
}
abstract class CarbonProfile(attributes: Seq[Attribute]) extends Serializable {
def isEmpty: Boolean = attributes.isEmpty
}
case class IncludeProfile(attributes: Seq[Attribute]) extends CarbonProfile(attributes)
case class ExcludeProfile(attributes: Seq[Attribute]) extends CarbonProfile(attributes)
case class CreateDatabase(dbName: String, sql: String) extends LogicalPlan with Command {
override def children: Seq[LogicalPlan] = Seq.empty
override def output: Seq[AttributeReference] = {
Seq()
}
}
case class DropDatabase(dbName: String, isCascade: Boolean, sql: String)
extends LogicalPlan with Command {
override def children: Seq[LogicalPlan] = Seq.empty
override def output: Seq[AttributeReference] = {
Seq()
}
}
/**
* A logical plan representing insertion into Hive table.
* This plan ignores nullability of ArrayType, MapType, StructType unlike InsertIntoTable
* because Hive table doesn't have nullability for ARRAY, MAP, STRUCT types.
*/
case class InsertIntoCarbonTable(
table: CarbonDatasourceRelation,
partition: Map[String, Option[String]],
child: LogicalPlan,
overwrite: Boolean,
ifNotExists: Boolean)
extends LogicalPlan with Command {
override def children: Seq[LogicalPlan] = child :: Nil
override def output: Seq[Attribute] = Seq.empty
// This is the expected schema of the table prepared to be inserted into,
// including dynamic partition columns.
val tableOutput = table.carbonRelation.output
}
|
ashokblend/incubator-carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
|
Scala
|
apache-2.0
| 4,417
|
package chandu0101.scalajs.react.components.optionselectors
import japgolly.scalajs.react._
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.{Array => JArray}
/**
* key: PropTypes.string,
ref: PropTypes.String,
allowCreate: React.PropTypes.bool,
asyncOptions: React.PropTypes.func,
autoload: React.PropTypes.bool,
className: React.PropTypes.string,
clearable: React.PropTypes.bool,
clearAllText: React.PropTypes.string,
clearValueText: React.PropTypes.string,
delimiter: React.PropTypes.string,
disabled: React.PropTypes.bool,
filterOption: React.PropTypes.(SelectOption,String) => Boolean,
filterOptions: React.PropTypes.(JArray[SelectOption],String) => JArray[String],
ignoreCase: React.PropTypes.bool,
inputProps: React.PropTypes.object,
matchPos: React.PropTypes.string,
matchProp: React.PropTypes.string,
multi: React.PropTypes.bool,
name: React.PropTypes.string,
noResultsText: React.PropTypes.string,
onBlur: React.PropTypes.ReactEvent => Unit,
onChange: React.PropTypes.String => Unit,
onFocus: React.PropTypes.ReactEvent => Unit,
onOptionLabelClick: React.PropTypes.(String,ReactEvent) => Unit,
optionRenderer: React.PropTypes.SelectOption => ReactElement,
options: React.PropTypes.JArray[SelectOption],
placeholder: React.PropTypes.string,
searchable: React.PropTypes.bool,
searchPromptText: React.PropTypes.string,
value: React.PropTypes.string,
valueRenderer: React.PropTypes.SelectOption => ReactElement
*/
object ReactSelect {
def apply[T <: SelectOption](inputProps: js.UndefOr[js.Object] = js.undefined,
onBlur: js.UndefOr[ReactEvent => Unit] = js.undefined,
name: js.UndefOr[String] = js.undefined,
clearable: js.UndefOr[Boolean] = js.undefined,
clearAllText: js.UndefOr[String] = js.undefined,
filterOptions: js.UndefOr[(JArray[js.Object], String) => JArray[String]] = js.undefined,
asyncOptions: js.UndefOr[js.Function] = js.undefined,
onChange: js.UndefOr[String => Unit] = js.undefined,
valueRenderer: js.UndefOr[js.Object => ReactElement] = js.undefined,
clearValueText: js.UndefOr[String] = js.undefined,
matchPos: js.UndefOr[String] = js.undefined,
matchProp: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
allowCreate: js.UndefOr[Boolean] = js.undefined,
placeholder: js.UndefOr[String] = js.undefined,
filterOption: js.UndefOr[(js.Object, String) => Boolean] = js.undefined,
key: js.UndefOr[String] = js.undefined,
searchable: js.UndefOr[Boolean] = js.undefined,
noResultsText: js.UndefOr[String] = js.undefined,
options: js.UndefOr[JArray[T]] = js.undefined,
onOptionLabelClick: js.UndefOr[(String, ReactEvent) => Unit] = js.undefined,
optionRenderer: js.UndefOr[js.Object => ReactElement] = js.undefined,
className: js.UndefOr[String] = js.undefined,
onFocus: js.UndefOr[ReactEvent => Unit] = js.undefined,
ignoreCase: js.UndefOr[Boolean] = js.undefined,
disabled: js.UndefOr[Boolean] = js.undefined,
autoload: js.UndefOr[Boolean] = js.undefined,
value: js.UndefOr[String] = js.undefined,
multi: js.UndefOr[Boolean] = js.undefined,
searchPromptText: js.UndefOr[String] = js.undefined,
delimiter: js.UndefOr[String] = js.undefined) = {
val p = js.Dynamic.literal()
inputProps.foreach(v => p.updateDynamic("inputProps")(v))
onBlur.foreach(v => p.updateDynamic("onBlur")(v))
name.foreach(v => p.updateDynamic("name")(v))
clearable.foreach(v => p.updateDynamic("clearable")(v))
clearAllText.foreach(v => p.updateDynamic("clearAllText")(v))
filterOptions.foreach(v => p.updateDynamic("filterOptions")(v))
asyncOptions.foreach(v => p.updateDynamic("asyncOptions")(v))
onChange.foreach(v => p.updateDynamic("onChange")(v))
valueRenderer.foreach(v => p.updateDynamic("valueRenderer")(v))
clearValueText.foreach(v => p.updateDynamic("clearValueText")(v))
matchPos.foreach(v => p.updateDynamic("matchPos")(v))
matchProp.foreach(v => p.updateDynamic("matchProp")(v))
ref.foreach(v => p.updateDynamic("ref")(v))
allowCreate.foreach(v => p.updateDynamic("allowCreate")(v))
placeholder.foreach(v => p.updateDynamic("placeholder")(v))
filterOption.foreach(v => p.updateDynamic("filterOption")(v))
key.foreach(v => p.updateDynamic("key")(v))
searchable.foreach(v => p.updateDynamic("searchable")(v))
noResultsText.foreach(v => p.updateDynamic("noResultsText")(v))
options.foreach(v => p.updateDynamic("options")(v.map(_.toJson)))
onOptionLabelClick.foreach(v => p.updateDynamic("onOptionLabelClick")(v))
optionRenderer.foreach(v => p.updateDynamic("optionRenderer")(v))
className.foreach(v => p.updateDynamic("className")(v))
onFocus.foreach(v => p.updateDynamic("onFocus")(v))
ignoreCase.foreach(v => p.updateDynamic("ignoreCase")(v))
disabled.foreach(v => p.updateDynamic("disabled")(v))
autoload.foreach(v => p.updateDynamic("autoload")(v))
value.foreach(v => p.updateDynamic("value")(v))
multi.foreach(v => p.updateDynamic("multi")(v))
searchPromptText.foreach(v => p.updateDynamic("searchPromptText")(v))
delimiter.foreach(v => p.updateDynamic("delimiter")(v))
val f = React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.ReactSelect)
f(p).asInstanceOf[ReactComponentU_]
}
}
trait SelectOption {
def toJson:js.Dynamic
}
|
coreyauger/scalajs-react-components
|
core/src/main/scala/chandu0101/scalajs/react/components/optionselectors/ReactSelect.scala
|
Scala
|
apache-2.0
| 5,696
|
package de.khamrakulov.play.metrics.ganglia
import org.scalatest.{FlatSpec, Matchers}
import play.api.{Configuration, Environment}
import scala.collection.JavaConversions._
/**
* @author Timur Khamrakulov <timur.khamrakulov@gmail.com>.
*/
class ConfigCheckSpec extends FlatSpec with Matchers {
val env = Environment.simple()
val config = Configuration.load(env)
"Configuration" should "have new factory type" in {
val configurations = asScalaBuffer(config.getConfigList("metrics.factories").get)
configurations.size shouldBe 2
}
}
|
htimur/metrics-reporter-play
|
ganglia/src/test/scala/de/khamrakulov/play/metrics/ganglia/ConfigCheckSpec.scala
|
Scala
|
mit
| 555
|
package com.karasiq.mapdb.transaction
import java.util.concurrent.Executors
import com.karasiq.mapdb.MapDbProvider
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.Try
trait TransactionScheduler { self: MapDbProvider ⇒
protected final val txSchedulerExecutionContext = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor())
private object NoTransaction extends TxCtx {
override def doInTransaction[T](tx: TxCtx ⇒ T): Future[T] = {
implicit def context: ExecutionContext = TransactionScheduler.this.txSchedulerExecutionContext
val newContext = new TxCtx {
override def doInTransaction[T1](tx: (TxCtx) ⇒ T1): Future[T1] = {
Future.fromTry(Try(tx(this)))
}
}
Future {
val result = try {
tx(newContext)
} catch {
case th: Throwable ⇒
db.rollback()
throw th
}
// No errors
db.commit()
result
}
}
}
def newTransaction: TxCtx = NoTransaction
/**
* Performs asynchronous transaction
* @param tx Transaction body
* @param ctx Transaction context
* @tparam T Result type
* @return Future
*/
final def scheduleTransaction[T](tx: TxCtx ⇒ T)(implicit ctx: TxCtx = newTransaction): Future[T] = {
ctx.doInTransaction[T](tx)
}
/**
* Performs synchronous transaction
* @param tx Transaction body
* @param ctx Transaction context
* @tparam T Result type
* @return Transaction result
*/
final def withTransaction[T](tx: TxCtx ⇒ T)(implicit ctx: TxCtx = newTransaction): T = {
val future = scheduleTransaction(tx)(ctx)
Await.result(future, Duration.Inf)
}
}
|
Karasiq/mapdbutils
|
src/main/scala/com/karasiq/mapdb/transaction/TransactionScheduler.scala
|
Scala
|
mit
| 1,778
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl.dataset.{DistributedDataSet, _}
import com.intel.analytics.bigdl.{DataSet, Module}
import com.intel.analytics.bigdl.optim.DistriOptimizer.{Cache, logger}
import com.intel.analytics.bigdl.optim.Optimizer.{saveModel, saveOptimMethod}
import com.intel.analytics.bigdl.parameters.AllReduceParameter
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.intermediate.IRGraph
import com.intel.analytics.bigdl.utils.{Engine, MklBlas, MklDnn, Table}
import com.intel.analytics.bigdl.visualization.{TrainSummary, ValidationSummary}
import org.apache.spark.rdd.{RDD, ZippedPartitionsWithLocalityRDD}
import scala.reflect.ClassTag
abstract class AbstractOptimizer {
protected def getModel[T: ClassTag](
models: RDD[Cache[T]],
parameters: AllReduceParameter[T],
trainingModel: Module[T])(implicit ev: TensorNumeric[T]): Module[T]
/**
* Save train summaries.
* @param trainSummary train logger
* @param models cached models
* @param driverState driver state
* @param parameters [[AllReduceParameter]]
*/
protected def saveSummary[T: ClassTag](
trainSummary: TrainSummary,
models: RDD[Cache[T]],
driverState: Table,
parameters: AllReduceParameter[T],
trainingModel: Module[T])(implicit ev: TensorNumeric[T]): Unit = {
val currentIteration = driverState[Int]("neval") - 1
val parametersTrigger = trainSummary.getSummaryTrigger("Parameters")
if (parametersTrigger.isDefined && parametersTrigger.get(driverState)) {
val model = getModel(models, parameters, trainingModel)
val parametersTable = model.getParametersTable()
// Parallelize to create Histogram.
Engine.default.invokeAndWait(
parametersTable.keySet.toSeq.map(moduleName => () => {
val paramTable = parametersTable[Table](moduleName)
paramTable.keySet.foreach { paramName =>
trainSummary.addHistogram(
s"$moduleName/$paramName", paramTable[Tensor[T]](paramName), currentIteration)}
}))
}
val scalarTrigger = trainSummary.getScalarTriggers()
// Not parallelizable, because driverState is changing each iteration.
scalarTrigger.foreach { v =>
if (v._2(driverState)) {
// TODO: Support show learningrate for multiOptimMethod
require(driverState.contains(v._1), s"DistriOptimizer.saveSummary: Summary ${v._1} " +
s"is not supported now.")
trainSummary.addScalar(
v._1, driverState[Float](v._1), currentIteration
)
}
}
}
/**
* Validate current model and save the result.
* @param validationTrigger validation trigger
* @param validationDataSet validation dataset
* @param validationMethods validation methods
* @param coresPerNode cores per node
* @param models cached models
* @param state state table
* @param validationSummary validation logger.
* @param header log header string
*/
protected def validate[T](validationTrigger: Option[Trigger],
validationDataSet: Option[DataSet[MiniBatch[T]]],
validationMethods: Option[Array[ValidationMethod[T]]],
coresPerNode: Int,
models: RDD[Cache[T]],
state: Table,
validationSummary: Option[ValidationSummary],
header: String,
parameters: AllReduceParameter[T] = null): Unit = {
if (validationTrigger.isEmpty || validationDataSet.isEmpty) {
return
}
val trigger = validationTrigger.get
if (!trigger(state)) {
return
}
val vMethods = validationMethods.get
val validateRDD = validationDataSet.get.toDistributed().data(train = false)
logger.info(s"$header Validate model...")
val _subModelNumber = Engine.getEngineType match {
case MklBlas => coresPerNode
case MklDnn => 1
case _ => throw new IllegalArgumentException
}
val start = System.nanoTime()
val results = ZippedPartitionsWithLocalityRDD(models, validateRDD)((modelIter, dataIter) => {
val cached = modelIter.next()
val vMethodsArr = cached.localMethods
val workingModels = cached.localModels
// update with latest weight for validation
if (parameters != null) {
parameters.getWeights(cached.modelWeights.head.narrow(1,
parameters.paramOffset, parameters.size))
.waitResult()
}
if (Engine.getEngineType() == MklDnn) {
if (dataIter.hasNext) workingModels.foreach(_.evaluate())
} else {
workingModels.foreach(_.evaluate())
}
dataIter.map(batch => {
val stackSize = batch.size() / _subModelNumber
val extraSize = batch.size() % _subModelNumber
val parallelism = if (stackSize == 0) extraSize else _subModelNumber
Engine.default.invokeAndWait(
(0 until parallelism).map(b =>
() => {
val offset = b * stackSize + math.min(b, extraSize) + 1
val length = stackSize + (if (b < extraSize) 1 else 0)
val miniBatch = batch.slice(offset, length)
val input = miniBatch.getInput()
val target = miniBatch.getTarget()
if (Engine.getEngineType() == MklDnn && !workingModels(b).isInstanceOf[IRGraph[T]]) {
Engine.dnnComputing.invokeAndWait2(Array(0).map(_ => () => {
workingModels(b).forward(input)
}))
} else {
workingModels(b).forward(input)
}
val output = workingModels(b).output
val validatMethods = vMethodsArr(b).get
validatMethods.map(validation => {
validation(output, target)
})
}
)
).reduce((left, right) => {
left.zip(right).map { case (l, r) =>
l + r
}
})
})
}).reduce((left, right) => {
left.zip(right).map { case (l, r) =>
l + r
}
}).zip(vMethods)
val validateTime = (System.nanoTime() - start) / 1e9f
val count = results(0)._1.result()._2.toFloat
// print validation throughput
logger.info(s"$header validate model throughput is ${count / validateTime} records/second")
results.foreach(r => {
logger.info(s"$header ${r._2} is ${r._1}")
})
state("score") = results(0)._1.result._1
if(validationSummary.isDefined) {
results.foreach { r =>
val result = r._1.result
validationSummary.get.addScalar(r._2.toString(), result._1,
state[Int]("neval") - 1
)
}
}
}
/**
** Create checkpoint.
* @param cacheTrigger cache trigger
* @param cachePath cache path
* @param isOverWrite whether over write
* @param wallClockTime wall clock time
* @param models cached models
* @param state state table
* @param parameters all reduce parameters
* @param optimMethods all optim methods
* @param trainingModel training model
*/
protected def checkpoint[T: ClassTag](
cacheTrigger: Option[Trigger],
cachePath: Option[String],
isOverWrite: Boolean,
wallClockTime: Long,
models: RDD[Cache[T]],
state: Table,
parameters: AllReduceParameter[T],
optimMethods: Map[String, OptimMethod[T]],
trainingModel: Module[T])(implicit ev: TensorNumeric[T]): Unit = {
cacheTrigger.foreach { trigger =>
cachePath.foreach { path =>
if (trigger(state)) {
saveModel(getModel(models, parameters, trainingModel), cachePath, isOverWrite,
s".${state[Int]("neval")}")
logger.info(s"[Wall Clock ${wallClockTime / 1e9}s] Save model to $path")
optimMethods.foreach{case (name, optimMethod) =>
optimMethod.state.update("epoch", state[Int]("epoch"))
optimMethod.state.update("neval", state[Int]("neval"))
saveOptimMethod(optimMethod, cachePath, isOverWrite, s"-$name.${state[Int]("neval")}")
logger.info(s"[Wall Clock ${wallClockTime / 1e9}s] Save optimMethod " +
s"${optimMethod} to $path")
}
}
}
}
}
/**
* Clean some internal states, so this or other optimizers can run optimize again
* This method will be called at the end of optimize. You need not call it if optimize succeed.
* If the optimize fails, you may call it before next optimize.
*/
private[bigdl] def clearState[T: ClassTag](models: RDD[DistriOptimizer.Cache[T]]) : Unit = {
// Reset the singleton flag, so other optimizers can run
models.mapPartitions(iter => {
Engine.resetSingletonFlag()
iter
}).count()
}
private[bigdl] def endEpoch[T: ClassTag](optimMethods: Map[String, OptimMethod[T]]): Unit = {
optimMethods.foreach { case (moduleName, optimMethod) =>
val records = optimMethod.state.get[Int]("recordsProcessedThisEpoch")
if (records.isDefined && records.get != 0) {
optimMethod.state("epoch") = optimMethod.state[Int]("epoch") + 1
optimMethod.state("recordsProcessedThisEpoch") = 0
}
}
}
private[bigdl] def setTrainData[T: ClassTag](
sampleRDD: RDD[Sample[T]],
batchSize: Int,
miniBatch: MiniBatch[T])(implicit ev: TensorNumeric[T])
: DistributedDataSet[MiniBatch[T]] = {
(DataSet.rdd(sampleRDD) ->
SampleToMiniBatch(miniBatch, batchSize, None))
.asInstanceOf[DistributedDataSet[MiniBatch[T]]]
}
private[bigdl] def setTrainData[T: ClassTag](sampleRDD: RDD[Sample[T]],
batchSize: Int,
featurePaddingParam: PaddingParam[T] = null,
labelPaddingParam: PaddingParam[T] = null)(implicit ev: TensorNumeric[T])
: DistributedDataSet[MiniBatch[T]] = {
val _featurePaddingParam = if (featurePaddingParam != null) Some(featurePaddingParam) else None
val _labelPaddingParam = if (labelPaddingParam != null) Some(labelPaddingParam) else None
(DataSet.rdd(sampleRDD) ->
SampleToMiniBatch(batchSize, _featurePaddingParam, _labelPaddingParam))
.asInstanceOf[DistributedDataSet[MiniBatch[T]]]
}
private[bigdl] def prepareInput[T: ClassTag](dataset: DataSet[MiniBatch[T]],
validationDataSet: Option[DataSet[MiniBatch[T]]]): Unit = {
dataset.asInstanceOf[DistributedDataSet[MiniBatch[T]]].cache()
if (validationDataSet.isDefined) {
validationDataSet.get.toDistributed().cache()
}
}
}
|
wzhongyuan/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/optim/AbstractOptimizer.scala
|
Scala
|
apache-2.0
| 11,022
|
package com.hypertino.binders.json.internal
import java.io.Writer
import com.hypertino.binders.util.MacroAdapter
import MacroAdapter.Context
import scala.language.experimental.macros
private [json] object JsonMacro {
def parseJson[O: c.WeakTypeTag]
(c: Context): c.Expr[O] = {
val c0: c.type = c
val bundle = new {
val ctx: c0.type = c0
} with JsonMacroImpl
c.Expr[O](bundle.parseJson[O])
}
def readJson[O: c.WeakTypeTag]
(c: Context): c.Expr[O] = {
val c0: c.type = c
val bundle = new {
val ctx: c0.type = c0
} with JsonMacroImpl
c.Expr[O](bundle.readJson[O])
}
def toJson[O: c.WeakTypeTag]
(c: Context): c.Expr[String] = {
val c0: c.type = c
val bundle = new {
val ctx: c0.type = c0
} with JsonMacroImpl
c.Expr[String](bundle.toJson[O])
}
def writeJson[O: c.WeakTypeTag]
(c: Context)(writer: c.Expr[Writer]): c.Expr[Unit] = {
val c0: c.type = c
val bundle = new {
val ctx: c0.type = c0
} with JsonMacroImpl
c.Expr[Unit](bundle.writeJson[O](writer))
}
}
|
hypertino/json-binders
|
jsonBinders/shared/src/main/scala/com/hypertino/binders/json/internal/JsonMacro.scala
|
Scala
|
bsd-3-clause
| 1,078
|
object Test extends App {
import collection._
val xs: SeqView[(String, Int), Seq[_]] = List("x").view.zip(Stream.from(0))
println(xs)
val ys = List(1, 2, 3).view map { x => println("mapping "+x); x + 1 }
println("ys defined")
println(ys.head)
println(ys.tail)
println(ys(2))
println(ys)
println(ys.force)
val zs = Array(1, 2, 3).view
val as: SeqView[Int, Array[Int]] = zs map (_ + 1)
val bs: Array[Int] = as.force
val cs = zs.reverse
cs(0) += 1
assert(cs.force.deep == Array(4, 2, 1).deep)
assert(zs(2) == 4)
assert(bs.deep == Array(2, 3, 4).deep)
}
/* crash confirmed.
2.8 regression: CCE when zipping list projection with stream
Reported by: szeiger Owned by: odersky
Priority: normal Component: Standard Library
Keywords: collections, zip Cc:
Fixed in version:
Description
Welcome to Scala version 2.8.0.r18784-b20090925021043 (Java HotSpot(TM) Client VM, Java 1.6.0_11).
Type in expressions to have them evaluated.
Type :help for more information.
scala> List("x").view.zip(Stream.from(0))List("x").view.zip(Stream.from(0))
java.lang.ClassCastException: scala.collection.generic.IterableViewTemplate$$anon$8 cannot be cast to scala.collection.generic.SequenceView
at .<init>(<console>:5)
at .<clinit>(<console>)
at RequestResult$.<init>(<console>:4)
at RequestResult$.<clinit>(<console>)
at RequestResult$result(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.Nat...
*/
|
felixmulder/scala
|
test/files/run/viewtest.scala
|
Scala
|
bsd-3-clause
| 1,520
|
package zeroadv.position
import zeroadv.{IBeaconBeacon, GimbalBeacon, DimM, BeaconSpotting}
class BeaconDistance {
def distanceToBeacon(spotting: BeaconSpotting): DimM = {
val txPower = spotting.beacon match {
case g: GimbalBeacon => -69
case IBeaconBeacon(_, _, _, tp) => tp
}
DimM(distanceTo(spotting.rssi.rssi, txPower))
}
// from http://stackoverflow.com/questions/20416218/understanding-ibeacon-distancing
private def distanceTo(rssi: Int, txPower: Int) = {
val ratio = rssi.toDouble*1.0d/txPower.toDouble
if (ratio < 1.0) {
Math.pow(ratio, 10)
}
else {
val accuracy = 0.89976d*Math.pow(ratio, 7.7095d) + 0.111d
accuracy
}
}
}
|
adamw/zeroadv
|
collector/src/main/scala/zeroadv/position/BeaconDistance.scala
|
Scala
|
gpl-2.0
| 709
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import scala.concurrent.Future
import io.fabric8.kubernetes.api.model.Pod
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.spark.SparkContext
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.KubernetesUtils
import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.internal.config.SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc.{RpcAddress, RpcCallContext}
import org.apache.spark.scheduler.{ExecutorKilled, ExecutorLossReason, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SchedulerBackendUtils}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RegisterExecutor
import org.apache.spark.util.{ThreadUtils, Utils}
private[spark] class KubernetesClusterSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
kubernetesClient: KubernetesClient,
executorService: ScheduledExecutorService,
snapshotsStore: ExecutorPodsSnapshotsStore,
podAllocator: ExecutorPodsAllocator,
lifecycleEventHandler: ExecutorPodsLifecycleManager,
watchEvents: ExecutorPodsWatchSnapshotSource,
pollEvents: ExecutorPodsPollingSnapshotSource)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
protected override val minRegisteredRatio =
if (conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).isEmpty) {
0.8
} else {
super.minRegisteredRatio
}
private val initialExecutors = SchedulerBackendUtils.getInitialTargetExecutorNumber(conf)
private val shouldDeleteDriverService = conf.get(KUBERNETES_DRIVER_SERVICE_DELETE_ON_TERMINATION)
private val shouldDeleteExecutors = conf.get(KUBERNETES_DELETE_EXECUTORS)
private val defaultProfile = scheduler.sc.resourceProfileManager.defaultResourceProfile
// Allow removeExecutor to be accessible by ExecutorPodsLifecycleEventHandler
private[k8s] def doRemoveExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
if (isExecutorActive(executorId)) {
removeExecutor(executorId, reason)
}
}
private def setUpExecutorConfigMap(driverPod: Option[Pod]): Unit = {
val configMapName = KubernetesClientUtils.configMapNameExecutor
val confFilesMap = KubernetesClientUtils
.buildSparkConfDirFilesMap(configMapName, conf, Map.empty)
val labels =
Map(SPARK_APP_ID_LABEL -> applicationId(), SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE)
val configMap = KubernetesClientUtils.buildConfigMap(configMapName, confFilesMap, labels)
KubernetesUtils.addOwnerReference(driverPod.orNull, Seq(configMap))
kubernetesClient.configMaps().create(configMap)
}
/**
* Get an application ID associated with the job.
* This returns the string value of spark.app.id if set, otherwise
* the locally-generated ID from the superclass.
*
* @return The application ID
*/
override def applicationId(): String = {
conf.getOption("spark.app.id").map(_.toString).getOrElse(super.applicationId)
}
override def start(): Unit = {
super.start()
val initExecs = Map(defaultProfile -> initialExecutors)
podAllocator.setTotalExpectedExecutors(initExecs)
lifecycleEventHandler.start(this)
podAllocator.start(applicationId(), this)
watchEvents.start(applicationId())
pollEvents.start(applicationId())
if (!conf.get(KUBERNETES_EXECUTOR_DISABLE_CONFIGMAP)) {
setUpExecutorConfigMap(podAllocator.driverPod)
}
}
override def stop(): Unit = {
// When `CoarseGrainedSchedulerBackend.stop` throws `SparkException`,
// K8s cluster scheduler should log and proceed in order to delete the K8s cluster resources.
Utils.tryLogNonFatalError {
super.stop()
}
Utils.tryLogNonFatalError {
snapshotsStore.stop()
}
Utils.tryLogNonFatalError {
watchEvents.stop()
}
Utils.tryLogNonFatalError {
pollEvents.stop()
}
if (shouldDeleteDriverService) {
Utils.tryLogNonFatalError {
kubernetesClient
.services()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.delete()
}
}
if (shouldDeleteExecutors) {
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.delete()
}
if (!conf.get(KUBERNETES_EXECUTOR_DISABLE_CONFIGMAP)) {
Utils.tryLogNonFatalError {
kubernetesClient
.configMaps()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.delete()
}
}
}
Utils.tryLogNonFatalError {
ThreadUtils.shutdown(executorService)
}
Utils.tryLogNonFatalError {
kubernetesClient.close()
}
}
override def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] = {
podAllocator.setTotalExpectedExecutors(resourceProfileToTotalExecs)
Future.successful(true)
}
override def sufficientResourcesRegistered(): Boolean = {
totalRegisteredExecutors.get() >= initialExecutors * minRegisteredRatio
}
override def getExecutorIds(): Seq[String] = synchronized {
super.getExecutorIds()
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = {
executorIds.foreach { id =>
removeExecutor(id, ExecutorKilled)
}
// Give some time for the executors to shut themselves down, then forcefully kill any
// remaining ones. This intentionally ignores the configuration about whether pods
// should be deleted; only executors that shut down gracefully (and are then collected
// by the ExecutorPodsLifecycleManager) will respect that configuration.
val killTask = new Runnable() {
override def run(): Unit = Utils.tryLogNonFatalError {
val running = kubernetesClient
.pods()
.withField("status.phase", "Running")
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withLabelIn(SPARK_EXECUTOR_ID_LABEL, executorIds: _*)
if (!running.list().getItems().isEmpty()) {
logInfo(s"Forcefully deleting ${running.list().getItems().size()} pods " +
s"(out of ${executorIds.size}) that are still running after graceful shutdown period.")
running.delete()
}
}
}
executorService.schedule(killTask, conf.get(KUBERNETES_DYN_ALLOC_KILL_GRACE_PERIOD),
TimeUnit.MILLISECONDS)
// Return an immediate success, since we can't confirm or deny that executors have been
// actually shut down without waiting too long and blocking the allocation thread, which
// waits on this future to complete, blocking further allocations / deallocations.
//
// This relies a lot on the guarantees of Spark's RPC system, that a message will be
// delivered to the destination unless there's an issue with the connection, in which
// case the executor will shut itself down (and the driver, separately, will just declare
// it as "lost"). Coupled with the allocation manager keeping track of which executors are
// pending release, returning "true" here means that eventually all the requested executors
// will be removed.
//
// The cleanup timer above is just an optimization to make sure that stuck executors don't
// stick around in the k8s server. Normally it should never delete any pods at all.
Future.successful(true)
}
override def createDriverEndpoint(): DriverEndpoint = {
new KubernetesDriverEndpoint()
}
override protected def createTokenManager(): Option[HadoopDelegationTokenManager] = {
Some(new HadoopDelegationTokenManager(conf, sc.hadoopConfiguration, driverEndpoint))
}
override protected def isExecutorExcluded(executorId: String, hostname: String): Boolean = {
podAllocator.isDeleted(executorId)
}
private class KubernetesDriverEndpoint extends DriverEndpoint {
private def ignoreRegisterExecutorAtStoppedContext: PartialFunction[Any, Unit] = {
case _: RegisterExecutor if sc.isStopped => // No-op
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] =
ignoreRegisterExecutorAtStoppedContext.orElse(super.receiveAndReply(context))
override def onDisconnected(rpcAddress: RpcAddress): Unit = {
// Don't do anything besides disabling the executor - allow the Kubernetes API events to
// drive the rest of the lifecycle decisions
// TODO what if we disconnect from a networking issue? Probably want to mark the executor
// to be deleted eventually.
addressToExecutorId.get(rpcAddress).foreach(disableExecutor)
}
}
}
|
BryanCutler/spark
|
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
|
Scala
|
apache-2.0
| 9,970
|
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package hadoop
// Jackson
import com.fasterxml.jackson.databind.JsonNode
// Scalaz
import scalaz._
import Scalaz._
// Scalding
import com.twitter.scalding.Args
// Iglu Scala Client
import iglu.client.Resolver
import iglu.client.validation.ProcessingMessageMethods._
// Snowplow Common Enrich
import common.utils.ConversionUtils
// This project
import utils.{
ScalazArgs,
JsonUtils
}
/**
* The configuration for the SnowPlowEtlJob.
*/
case class ShredJobConfig(
inFolder: String,
outFolder: String,
badFolder: String,
exceptionsFolder: Option[String],
igluResolver: Resolver)
/**
* Module to handle configuration for
* the SnowPlowEtlJob
*/
object ShredJobConfig {
private val IgluConfigArg = "iglu_config"
/**
* Loads the Config from the Scalding
* job's supplied Args.
*
* @param args The arguments to parse
* @return the EtLJobConfig, or one or
* more error messages, boxed
* in a Scalaz Validation Nel
*/
def loadConfigFrom(args: Args): ValidatedNel[ShredJobConfig] = {
import ScalazArgs._
val inFolder = args.requiredz("input_folder")
val outFolder = args.requiredz("output_folder")
val badFolder = args.requiredz("bad_rows_folder")
val exceptionsFolder = args.optionalz("exceptions_folder")
val igluResolver = args.requiredz(IgluConfigArg) match {
case Failure(e) => e.failNel
case Success(s) => for {
node <- (base64ToJsonNode(s).toValidationNel: ValidatedNel[JsonNode])
reso <- Resolver.parse(node)
} yield reso
}
(inFolder.toValidationNel |@| outFolder.toValidationNel |@| badFolder.toValidationNel |@| exceptionsFolder.toValidationNel |@| igluResolver) { ShredJobConfig(_,_,_,_,_) }
}
/**
* Converts a base64-encoded JSON
* String into a JsonNode.
*
* @param str base64-encoded JSON
* @return a JsonNode on Success,
* a NonEmptyList of
* ProcessingMessages on
* Failure
*/
private[hadoop] def base64ToJsonNode(str: String): Validated[JsonNode] =
(for {
raw <- ConversionUtils.decodeBase64Url(IgluConfigArg, str)
node <- JsonUtils.extractJson(IgluConfigArg, raw)
} yield node).toProcessingMessage
}
|
mdavid/lessig-bigdata
|
lib/snowplow/3-enrich/scala-hadoop-shred/src/main/scala/com.snowplowanalytics.snowplow.enrich/hadoop/ShredJobConfig.scala
|
Scala
|
mit
| 3,005
|
package avrohugger
package format
object FieldRenamer {
// Reserved words from https://www.scala-lang.org/files/archive/spec/2.13/01-lexical-syntax.html#identifiers
private val RESERVED_WORDS: Set[String] = Set("abstract", "case", "catch", "class", "def", "do", "else", "extends", "final", "finally",
"for", "forSome", "if", "implicit", "lazy", "macro", "match", "new", "object", "override", "package", "private", "protected", "return",
"sealed", "super", "this", "throw", "trait", "try", "type", "val", "var", "while", "with", "yield")
private def backtick(variable: String): String = s"`$variable`"
private def isMangled(fieldName: String): Boolean = RESERVED_WORDS.contains(fieldName) || fieldName.endsWith("_")
def rename(fieldName: String): String = if (isMangled(fieldName)) backtick(fieldName) else fieldName
}
|
julianpeeters/avrohugger
|
avrohugger-core/src/main/scala/format/FieldRenamer.scala
|
Scala
|
apache-2.0
| 840
|
package uk.co.mattthomson.coursera.ggp.gresley.gdl
import com.twitter.util.Memoize
case class GameState(game: GameDescription, trueFacts: Set[Fact]) {
lazy val legalActions = Memoize(legalActionsUnmemoized)
private def legalActionsUnmemoized(role: String) = {
game.actions.getOrElse(role, List()).filter(isLegal(role))
}
def isLegal(role: String)(action: Action) = prove(Legal(Role(role), action), None)
def update(actions: Map[String, Action]) = {
val actionsWithRoles = actions.map { case (role, action) => (Role(role), action) }.toMap
val facts = game.baseFacts.filter(f => prove(Next(f), Some(actionsWithRoles)))
new GameState(game, facts)
}
lazy val isTerminal = prove(Terminal, None)
lazy val value = Memoize(valueUnmemoized)
private def valueUnmemoized(role: String) = game.possibleValues(role)
.map(v => Goal(Role(role), LiteralTerm(v)))
.find(prove(_, None))
.fold(0)(_.value)
val prove = Memoize(proveUnmemoized)
private def proveUnmemoized(input: (Fact, Option[Map[Role, Action]])) = {
val (fact, actions) = input
if (game.constantFacts.getOrElse(fact.tag, Set()).contains(fact)) true
else game.rules(fact).exists(_.prove(fact, this, actions))
}
}
|
matt-thomson/gresley
|
src/main/scala/uk/co/mattthomson/coursera/ggp/gresley/gdl/GameState.scala
|
Scala
|
mit
| 1,234
|
/*
* Copyright 2021 Spotify AB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.spotify.scio.extra.sparkey.instances
trait SparkeySetBase[T] extends Set[T] {
override def incl(elem: T): Set[T] =
throw new NotImplementedError("Sparkey-backed set; operation not supported.")
override def excl(elem: T): Set[T] =
throw new NotImplementedError("Sparkey-backed set; operation not supported.")
}
|
spotify/scio
|
scio-extra/src/main/scala-2.13/com/spotify/scio/extra/sparkey/instances/SparkeySetBase.scala
|
Scala
|
apache-2.0
| 932
|
package module2
class HelloScala2 {
case class TryOut(some: String, fields: List[String])
def test = "Hello"
def someOther = 42
}
|
RadoBuransky/sonar-scoverage-plugin
|
samples/maven/combined-scala-java-multi-module-sonar/module2/src/main/scala/module2/HelloScala2.scala
|
Scala
|
lgpl-3.0
| 139
|
package com.twitter.finagle
private[finagle] object IOExceptionStrings {
/** Strings that commonly signal a broken socket connection */
val ChannelClosedStrings: Set[String] = Set(
"Connection reset by peer", // Found on linux
"Broken pipe", // Found on linux
"An existing connection was forcibly closed by the remote host", // Found on windows
"syscall:read(..) failed: Connection reset by peer" // Found on linux w/ native epoll
)
/** Strings that commonly signal failure to establish a socket connection */
val ConnectionFailedStrings: Set[String] = Set(
"Connection timed out", // from ConnectionFailedException found on linux NIO1
"No route to host"
)
/** Exception strings that are common for `IOException`s that don't need vocal logging */
val FinestIOExceptionMessages: Set[String] = ChannelClosedStrings ++ ConnectionFailedStrings
}
|
koshelev/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/IOExceptionStrings.scala
|
Scala
|
apache-2.0
| 886
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization.tfocs
import scala.io.Source
import org.scalatest.FunSuite
import org.apache.spark.mllib.linalg.{ DenseVector, Vectors }
import org.apache.spark.mllib.optimization.tfocs.DVectorFunctions._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
class SolverSLPSuite extends FunSuite with MLlibTestSparkContext {
test("The values and losses returned by Spark SolverSLP should match those returned by Matlab " +
"tfocs's solver_sLP") {
// This test also serves to validate the implementation of TFOCS_SCD.optimize against the Matlab
// tfocs implementation.
// The test below checks that the results match those of the following TFOCS matlab program
// (using TFOCS version 1945a771f315acd4cc6eba638b5c01fb52ee7aaa):
//
// A = sparse([2, 5, 3, 2, 4], [2, 2, 3, 8, 10], [0.632374636716572, 0.198436985375040, ...
// 0.179885783103202, 0.014792694748719, 0.244326895623829], 5, 10);
// b = [0 7.127414296861894 1.781441255102280 2.497425876822379 2.186136752456199]';
// c = [-1.078275146772097 -0.368208440839284 0.680376092886272 0.256371934668609 ...
// 1.691983132986665 0.059837119884475 -0.221648385883038 -0.298134575377277 ...
// -1.913199010346937 0.745084172661387]';
// mu = 1e-2;
// opts = struct('restart', -Inf, 'continuation', true, 'noscale', true, ...
// 'output_always_use_x', true, 'stopCrit', 4, 'tol', 1e-3);
// contOpts = struct('maxIts', 10, 'initialTol', 1e-2);
// [x,out,optsOut] = solver_sLP(c, A, b, mu, [], [], opts, contOpts);
// format long
// x
// out.f
val A = sc.parallelize(Array(Vectors.zeros(5),
Vectors.sparse(5, Seq((1, 0.632374636716572), (4, 0.198436985375040))),
Vectors.sparse(5, Seq((2, 0.179885783103202))), Vectors.zeros(5), Vectors.zeros(5),
Vectors.zeros(5), Vectors.zeros(5), Vectors.sparse(5, Seq((1, 0.014792694748719))),
Vectors.zeros(5), Vectors.sparse(5, Seq((3, 0.244326895623829)))), 2)
var b = new DenseVector(Array(0, 7.127414296861894, 1.781441255102280, 2.497425876822379,
2.186136752456199))
val c = sc.parallelize(Array(-1.078275146772097, -0.368208440839284, 0.680376092886272,
0.256371934668609, 1.691983132986665, 0.059837119884475, -0.221648385883038,
-0.298134575377277, -1.913199010346937, 0.745084172661387), 2).glom.map(
new DenseVector(_))
val mu = 1e-2
val dualTolCheckInterval = 1 // Matlab tfocs checks for convergence on every iteration.
val (x, lossHistory) =
SolverSLP.run(c, A, b, mu, None, None, 10, 1e-3, 1e-2, dualTolCheckInterval)
val expectedX = Vectors.dense(2048.722778866985, 0, 0, 0, 0, 0, 421.131933177772,
546.803269626285, 3635.078119659181, 10.514625914138)
val expectedLossHistory = Array(-252.005414340769, -252.005414340769, -251.156484099887,
-250.900750472038, -250.441137874951, -746.515181668927, -746.515181668927, -745.988362852497,
-1365.253694768042, -1365.253694768042, -1364.529817385060, -2107.579888200214,
-2107.579888200214, -2106.568363677963, -2973.333616393671, -2973.333616393671,
-2971.953815423126, -3962.641290922493, -3962.641290922493, -3961.901828375015,
-5076.658876844795, -5076.658876844795, -5076.122659281430, -5075.480196164118,
-5074.650921295725, -5073.921424808851, -5072.987948040954, -6311.277495149125,
-6311.277495149125, -6310.451241168823, -7672.322045345107, -7672.322045345107,
-7671.209444458280, -9157.089180439810, -9157.089180439810, -9155.947984506271)
assert(Vectors.dense(x.collectElements) ~= expectedX relTol 1e-12,
"Each x vector element should match the expected value, within tolerance.")
// The tfocs implementation may return loss values for either the x or y vectors of the
// accelerated descent implementation. The spark implementation may also return losses for
// either x or y but using different criteria to select between these vectors. As a result
// only the first and last losses reported by the optimization task are validated here, both of
// which are calculated from the x vector.
assert(lossHistory.length == expectedLossHistory.length,
"The number of iterations should be the same.")
assert(lossHistory.head ~= expectedLossHistory.head relTol 1e-12,
"The loss values on the first iteration should match, within tolerance.")
assert(lossHistory.last ~= expectedLossHistory.last relTol 1e-12,
"The loss values on the last iteration should match, within tolerance.")
}
}
|
databricks/spark-tfocs
|
src/test/scala/org/apache/spark/mllib/optimization/tfocs/SolverSLPSuite.scala
|
Scala
|
apache-2.0
| 5,432
|
package com.twitter.finagle.protobuf.rpc
class RpcControllerWithOnFailureCallback extends RpcController {
private var cancelRequested = false
private var callaback: RpcCallback[Throwable] = null
def reset(): Unit = {
cancelRequested = false
}
def failed(): Boolean = { throw new RuntimeException("Not implemented") }
def errorText(): String = { throw new RuntimeException("Not implemented") }
def startCancel(): Unit = { cancelRequested = true; }
def setFailed(reason: String): Unit = { throw new RuntimeException("Not implemented") }
def setFailed(e: Throwable): Unit = {
callaback.run(adapt(e))
}
def isCanceled() = cancelRequested;
def notifyOnCancel(callback: RpcCallback[Object]): Unit = { throw new RuntimeException("Not implemented") }
def onFailure(callback: RpcCallback[Throwable]): RpcControllerWithOnFailureCallback = {
this.callaback = callback
this
}
def adapt(e: Throwable): Throwable = {
e match {
case _: TimeoutException => {
def wrapped = new java.util.concurrent.TimeoutException(e.getMessage())
wrapped.initCause(e)
wrapped
}
case _: ChannelClosedException => return new RuntimeException(e)
case _ => e
}
}
}
|
firebase/finagle
|
finagle-protobuf/src/main/scala/com/twitter/finagle/protobuf/rpc/RpcControllerWithOnFailureCallback.scala
|
Scala
|
apache-2.0
| 1,246
|
package com.github.mdr.mash.runtime
import scala.PartialFunction.condOpt
sealed trait MashBoolean extends MashValue with Comparable[MashBoolean] {
import MashBoolean._
def value: Boolean = this == True
def negate: MashBoolean = this match {
case True ⇒ False
case False ⇒ True
}
override def toString = this match {
case True ⇒ "true"
case False ⇒ "false"
}
def compareTo(that: MashBoolean) = (this, that) match {
case (True, True) | (False, False) ⇒ 0
case (False, True) ⇒ -1
case (True, False) ⇒ 1
}
}
object MashBoolean {
case object True extends MashBoolean
case object False extends MashBoolean
def apply(x: Boolean): MashBoolean = if (x) True else False
def unapply(x: MashValue): Option[MashBoolean] = condOpt(x) {
case x: MashBoolean ⇒ x
}
}
|
mdr/mash
|
src/main/scala/com/github/mdr/mash/runtime/MashBoolean.scala
|
Scala
|
mit
| 871
|
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package play.modules.reactivemongo.json.commands
import play.api.libs.json.{ Json, JsObject, OWrites }, Json.JsValueWrapper
import reactivemongo.api.commands.{
FindAndModifyCommand,
ResolvedCollectionCommand
}
import play.modules.reactivemongo.json.JSONSerializationPack
object JSONFindAndModifyCommand extends FindAndModifyCommand[JSONSerializationPack.type] {
val pack: JSONSerializationPack.type = JSONSerializationPack
}
object JSONFindAndModifyImplicits {
import JSONFindAndModifyCommand._
import reactivemongo.utils.option
implicit object FindAndModifyResultReader extends DealingWithGenericCommandErrorsReader[FindAndModifyResult] {
def readResult(result: JsObject): FindAndModifyResult =
FindAndModifyResult(
(result \ "lastErrorObject").asOpt[JsObject].map { doc =>
UpdateLastError(
updatedExisting = (doc \ "updatedExisting").
asOpt[Boolean].getOrElse(false),
n = (doc \ "n").asOpt[Int].getOrElse(0),
err = (doc \ "err").asOpt[String],
upsertedId = (doc \ "upserted").toOption)
},
(result \ "value").asOpt[JsObject])
}
implicit object FindAndModifyWriter
extends OWrites[ResolvedCollectionCommand[FindAndModify]] {
def writes(command: ResolvedCollectionCommand[FindAndModify]): JsObject = {
val optionalFields = List[Option[(String, JsValueWrapper)]](
command.command.sort.map("sort" -> _),
command.command.fields.map("fields" -> _)).flatten
Json.obj(
"findAndModify" -> command.collection,
"query" -> command.command.query) ++
Json.obj(optionalFields: _*) ++
(command.command.modify match {
case Update(document, fetchNewObject, upsert) => Json.obj(
"update" -> document,
"new" -> fetchNewObject,
"upsert" -> upsert)
case Remove => Json.obj("remove" -> true)
})
}
}
}
|
duncancrawford/Play-Json-ReactiveMongo
|
src/main/scala/play/modules/reactivemongo/findandmodify.scala
|
Scala
|
apache-2.0
| 2,569
|
package yuuto.yuutogates.miltipart
import java.util
import java.lang.Iterable
import codechicken.lib.vec.BlockCoord
import codechicken.multipart.MultiPartRegistry.IPartConverter
import codechicken.multipart.TMultiPart
import net.minecraft.block.Block
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.tileentity.TileEntity
import net.minecraft.world.World
import yuuto.yuutogates.api.base.{PartGate, TileGate}
import yuuto.yuutogates.api.gates.GateHelper
import yuuto.yuutogates.api.material.{GateMaterialRegistry, GateMaterial}
import yuuto.yuutogates.blocks.BlocksYG
import yuuto.yuutogates.tile.{TileGateXOR, TileGateOR, TileGateAND}
/**
* Created by Yuuto on 9/26/2015.
*/
object MultiPartConverter extends IPartConverter{
val blocks:util.List[Block]=util.Arrays.asList(BlocksYG.blockGateBasic);
override def blockTypes:Iterable[Block]=blocks;
override def convert(world: World, pos: BlockCoord):TMultiPart={
val tile:TileEntity=world.getTileEntity(pos.x, pos.y, pos.z);
if(!tile.isInstanceOf[TileGate])
return null;
val part:PartGate={
tile match {
case a:TileGateAND=>new PartGateAND();
case o:TileGateOR=>new PartGateOR();
case x:TileGateXOR=>new PartGateXOR();
case default=>null;
}
}
if(part == null)
return null;
val nbt:NBTTagCompound=new NBTTagCompound();
tile.writeToNBT(nbt);
part.load(nbt);
part;
}
}
|
AnimeniacYuuto/YuutoGates
|
src/main/scala/yuuto/yuutogates/miltipart/MultiPartConverter.scala
|
Scala
|
lgpl-3.0
| 1,434
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.io.ByteArrayOutputStream
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.streaming.{StreamTest, Trigger}
class ConsoleWriterSuite extends StreamTest {
import testImplicits._
test("microbatch - default") {
val input = MemoryStream[Int]
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
val query = input.toDF().writeStream.format("console").start()
try {
input.addData(1, 2, 3)
query.processAllAvailable()
input.addData(4, 5, 6)
query.processAllAvailable()
input.addData()
query.processAllAvailable()
} finally {
query.stop()
}
}
assert(captured.toString() ==
"""-------------------------------------------
|Batch: 0
|-------------------------------------------
|+-----+
||value|
|+-----+
|| 1|
|| 2|
|| 3|
|+-----+
|
|-------------------------------------------
|Batch: 1
|-------------------------------------------
|+-----+
||value|
|+-----+
|| 4|
|| 5|
|| 6|
|+-----+
|
|-------------------------------------------
|Batch: 2
|-------------------------------------------
|+-----+
||value|
|+-----+
|+-----+
|
|""".stripMargin)
}
test("microbatch - with numRows") {
val input = MemoryStream[Int]
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
val query = input.toDF().writeStream.format("console").option("NUMROWS", 2).start()
try {
input.addData(1, 2, 3)
query.processAllAvailable()
} finally {
query.stop()
}
}
assert(captured.toString() ==
"""-------------------------------------------
|Batch: 0
|-------------------------------------------
|+-----+
||value|
|+-----+
|| 1|
|| 2|
|+-----+
|only showing top 2 rows
|
|""".stripMargin)
}
test("microbatch - truncation") {
val input = MemoryStream[String]
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
val query = input.toDF().writeStream.format("console").option("TRUNCATE", true).start()
try {
input.addData("123456789012345678901234567890")
query.processAllAvailable()
} finally {
query.stop()
}
}
assert(captured.toString() ==
"""-------------------------------------------
|Batch: 0
|-------------------------------------------
|+--------------------+
|| value|
|+--------------------+
||12345678901234567...|
|+--------------------+
|
|""".stripMargin)
}
test("continuous - default") {
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
val input = spark.readStream
.format("rate")
.option("numPartitions", "1")
.option("rowsPerSecond", "5")
.load()
.select('value)
val query = input.writeStream.format("console").trigger(Trigger.Continuous(200)).start()
assert(query.isActive)
query.stop()
}
}
}
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/sources/ConsoleWriterSuite.scala
|
Scala
|
apache-2.0
| 4,296
|
package parsers
import com.github.agourlay.cornichon.json.CornichonJson
import org.openjdk.jmh.annotations.{ Benchmark, BenchmarkMode, Fork, Measurement, Mode, Scope, State, Warmup }
@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
"-XX:+FlightRecorder",
"-XX:StartFlightRecording=filename=./CornichonJSonBench-profiling-data.jfr,name=profile,settings=profile",
"-Xmx1G"))
class CornichonJsonBench {
/*
[info] Benchmark Mode Cnt Score Error Units
[info] CornichonJsonBench.parseDslStringJsonArray thrpt 10 3789623,689 ± 18608,502 ops/s
[info] CornichonJsonBench.parseDslStringJsonString thrpt 10 110534818,941 ± 788300,991 ops/s
[info] CornichonJsonBench.parseDslStringJsonTable thrpt 10 125283,927 ± 1468,178 ops/s
*/
@Benchmark
def parseDslStringJsonString() = {
val res = CornichonJson.parseDslJson(" a rather long string about cornichon ")
assert(res.isRight)
}
@Benchmark
def parseDslStringJsonArray() = {
val res = CornichonJson.parseDslJson("""[ "a", "very", "cool", "feature" ] """)
assert(res.isRight)
}
@Benchmark
def parseDslStringJsonTable() = {
val res = CornichonJson.parseDslJson("""
| Name | Age | City |
| "John" | 30 | "Paris" |
| "Bob" | 41 | "Berlin" |
| "Carl" | 29 | "Milan" |
""")
assert(res.isRight)
}
}
|
agourlay/cornichon
|
benchmarks/src/main/scala/parsers/CornichonJsonBench.scala
|
Scala
|
apache-2.0
| 1,531
|
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions.ifReadAllowed
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.stubs.ScSelfTypeElementStub
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
*/
class ScSelfTypeElementImpl private(stub: ScSelfTypeElementStub, node: ASTNode)
extends ScalaStubBasedElementImpl(stub, ScalaElementTypes.SELF_TYPE, node) with ScSelfTypeElement {
def this(node: ASTNode) = this(null, node)
def this(stub: ScSelfTypeElementStub) = this(stub, null)
override def toString: String = "SelfType: " + ifReadAllowed(name)("")
def nameId: PsiElement = findChildByType[PsiElement](TokenSets.SELF_TYPE_ID)
def `type`(): TypeResult = {
val parent = PsiTreeUtil.getParentOfType(this, classOf[ScTemplateDefinition])
assert(parent != null)
typeElement match {
case Some(ste) =>
for {
templateType <- parent.`type`()
selfType <- ste.`type`()
} yield ScCompoundType(Seq(templateType, selfType))
case None => parent.`type`()
}
}
def typeElement: Option[ScTypeElement] = byPsiOrStub(findChild(classOf[ScTypeElement]))(_.typeElement)
def classNames: Array[String] = byStubOrPsi(_.classNames) {
val names = new ArrayBuffer[String]()
def fillNames(typeElement: ScTypeElement) {
typeElement match {
case s: ScSimpleTypeElement => s.reference match {
case Some(ref) => names += ref.refName
case _ =>
}
case p: ScParameterizedTypeElement => fillNames(p.typeElement)
case c: ScCompoundTypeElement =>
c.components.foreach(fillNames)
case _ => //do nothing
}
}
typeElement.foreach(fillNames)
names.toArray
}
}
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScSelfTypeElementImpl.scala
|
Scala
|
apache-2.0
| 2,274
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import kafka.common.ErrorMapping
import kafka.network.{RequestOrResponseSend, RequestChannel}
import kafka.network.RequestChannel.Response
object GroupMetadataRequest {
val CurrentVersion = 0.shortValue
val DefaultClientId = ""
def readFrom(buffer: ByteBuffer) = {
// envelope
val versionId = buffer.getShort
val correlationId = buffer.getInt
val clientId = ApiUtils.readShortString(buffer)
// request
val group = ApiUtils.readShortString(buffer)
GroupMetadataRequest(group, versionId, correlationId, clientId)
}
}
case class GroupMetadataRequest(group: String,
versionId: Short = GroupMetadataRequest.CurrentVersion,
correlationId: Int = 0,
clientId: String = GroupMetadataRequest.DefaultClientId)
extends RequestOrResponse(Some(RequestKeys.GroupMetadataKey)) {
def sizeInBytes =
2 + /* versionId */
4 + /* correlationId */
ApiUtils.shortStringLength(clientId) +
ApiUtils.shortStringLength(group)
def writeTo(buffer: ByteBuffer) {
// envelope
buffer.putShort(versionId)
buffer.putInt(correlationId)
ApiUtils.writeShortString(buffer, clientId)
// consumer metadata request
ApiUtils.writeShortString(buffer, group)
}
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
// return ConsumerCoordinatorNotAvailable for all uncaught errors
val errorResponse = GroupMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, errorResponse)))
}
def describe(details: Boolean) = {
val consumerMetadataRequest = new StringBuilder
consumerMetadataRequest.append("Name: " + this.getClass.getSimpleName)
consumerMetadataRequest.append("; Version: " + versionId)
consumerMetadataRequest.append("; CorrelationId: " + correlationId)
consumerMetadataRequest.append("; ClientId: " + clientId)
consumerMetadataRequest.append("; Group: " + group)
consumerMetadataRequest.toString()
}
}
|
bluebreezecf/kafka
|
core/src/main/scala/kafka/api/GroupMetadataRequest.scala
|
Scala
|
apache-2.0
| 3,042
|
package edu.uci.eecs.spectralLDA.algorithm
import org.scalatest._
import org.apache.spark.SparkContext
import edu.uci.eecs.spectralLDA.testharness.Context
import breeze.linalg._
import breeze.stats.distributions._
import org.apache.commons.math3.random.MersenneTwister
class TensorLDATest extends FlatSpec with Matchers {
private val sc: SparkContext = Context.getSparkContext
def simulateLDAData(alpha: DenseVector[Double],
allTokenDistributions: DenseMatrix[Double],
numDocuments: Int,
numTokensPerDocument: Int)
(implicit randBasis: RandBasis = Rand)
: Seq[(Long, SparseVector[Double])] = {
assert(alpha.size == allTokenDistributions.cols)
val k = alpha.size
val V = allTokenDistributions.rows
// Simulate the word histogram of each document
val dirichlet = Dirichlet(alpha)
val wordCounts: Seq[(Long, SparseVector[Double])] = for {
d <- 0 until numDocuments
topicDistribution: DenseVector[Double] = dirichlet.sample()
tokenDistribution: DenseVector[Double] = allTokenDistributions * topicDistribution
tokens = Multinomial(tokenDistribution) sample numTokensPerDocument
c = SparseVector.zeros[Double](V)
tokensCount = tokens foreach { t => c(t) += 1.0 }
} yield (d.toLong, c)
wordCounts
}
"Simulated LDA" should "be recovered" in {
val alpha: DenseVector[Double] = DenseVector[Double](20.0, 10.0, 5.0)
val allTokenDistributions: DenseMatrix[Double] = new DenseMatrix[Double](6, 3,
Array[Double](0.4, 0.4, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05, 0.4, 0.4, 0.05, 0.05,
0.05, 0.05, 0.05, 0.05, 0.4, 0.4))
implicit val randBasis: RandBasis =
new RandBasis(new ThreadLocalRandomGenerator(new MersenneTwister(57175437L)))
val documents = simulateLDAData(
alpha,
allTokenDistributions,
numDocuments = 5000,
numTokensPerDocument = 100
)
val documentsRDD = sc.parallelize(documents)
val tensorLDA = new TensorLDA(
dimK = 3,
alpha0 = sum(alpha),
maxIterations = 200,
randomisedSVD = false
)
val (fitted_beta: DenseMatrix[Double], fitted_alpha: DenseVector[Double], _, _, _) = tensorLDA.fit(documentsRDD)
// Rearrange the elements/columns of fitted_alpha and fitted_beta
// to the order of initial alpha and beta
val idx = argtopk(fitted_alpha, 3)
val sorted_beta = fitted_beta(::, idx).toDenseMatrix
// if one vector is all negative, multiply it by -1 to turn it positive
for (j <- 0 until sorted_beta.cols) {
if (max(sorted_beta(::, j)) <= 0.0) {
sorted_beta(::, j) :*= -1.0
}
}
val sorted_alpha = fitted_alpha(idx).toDenseVector
val diff_beta: DenseMatrix[Double] = sorted_beta - allTokenDistributions
val diff_alpha: DenseVector[Double] = sorted_alpha - alpha
val norm_diff_beta = norm(norm(diff_beta(::, *)).toDenseVector)
val norm_diff_alpha = norm(diff_alpha)
info(s"Expecting alpha: $alpha")
info(s"Obtained alpha: $sorted_alpha")
info(s"Norm of difference alpha: $norm_diff_alpha")
info(s"Expecting beta:\\n$allTokenDistributions")
info(s"Obtained beta:\\n$sorted_beta")
info(s"Norm of difference beta: $norm_diff_beta")
norm_diff_beta should be <= 0.2
norm_diff_alpha should be <= 4.0
}
"Simulated LDA" should "be recovered with randomised SVD" in {
implicit val randBasis: RandBasis =
new RandBasis(new ThreadLocalRandomGenerator(new MersenneTwister(23476541L)))
val alpha: DenseVector[Double] = DenseVector[Double](20.0, 10.0, 5.0)
val allTokenDistributions: DenseMatrix[Double] = DenseMatrix.rand(100, 3, Uniform(0.0, 1.0))
allTokenDistributions(0 until 10, 0) += 3.0
allTokenDistributions(10 until 20, 1) += 3.0
allTokenDistributions(20 until 30, 2) += 3.0
val s = sum(allTokenDistributions(::, *))
val normalisedAllTokenDistributions: DenseMatrix[Double] =
allTokenDistributions * diag(1.0 / s.toDenseVector)
val documents = simulateLDAData(
alpha,
allTokenDistributions,
numDocuments = 5000,
numTokensPerDocument = 500
)
val documentsRDD = sc.parallelize(documents)
val dimK = 3
val tensorLDA = new TensorLDA(
dimK = dimK,
alpha0 = sum(alpha(0 until dimK)),
maxIterations = 200,
randomisedSVD = true
)
val (fitted_beta: DenseMatrix[Double], fitted_alpha: DenseVector[Double], _, _, _) = tensorLDA.fit(documentsRDD)
// Rearrange the elements/columns of fitted_alpha and fitted_beta
// to the order of initial alpha and beta
val idx = argtopk(fitted_alpha, dimK)
val sorted_beta = fitted_beta(::, idx).toDenseMatrix
val sorted_alpha = fitted_alpha(idx).toDenseVector
val expected_alpha = alpha(0 until dimK)
val expected_beta = normalisedAllTokenDistributions(::, 0 until dimK)
val diff_beta: DenseMatrix[Double] = sorted_beta - expected_beta
val diff_alpha: DenseVector[Double] = sorted_alpha - expected_alpha
val norm_diff_beta = norm(norm(diff_beta(::, *)).toDenseVector)
val norm_diff_alpha = norm(diff_alpha)
info(s"Expecting alpha: $expected_alpha")
info(s"Obtained alpha: $sorted_alpha")
info(s"Norm of difference alpha: $norm_diff_alpha")
info(s"Expecting beta:\\n$expected_beta")
info(s"Obtained beta:\\n$sorted_beta")
info(s"Norm of difference beta: $norm_diff_beta")
norm_diff_beta should be <= 0.025
norm_diff_alpha should be <= 3.5
}
}
|
FurongHuang/SpectralLDA-TensorSpark
|
src/test/scala/edu/uci/eecs/spectralLDA/algorithm/TensorLDATest.scala
|
Scala
|
apache-2.0
| 5,559
|
package views.html
import play.twirl.api._
import play.twirl.api.TemplateMagic._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object main extends BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with play.twirl.api.Template2[String,Html,play.twirl.api.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(title: String)(content: Html):play.twirl.api.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.32*/("""
"""),format.raw/*3.1*/("""<!DOCTYPE html>
<html>
<head>
<title>"""),_display_(/*8.17*/title),format.raw/*8.22*/("""</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src='"""),_display_(/*10.23*/routes/*10.29*/.Assets.at("javascripts/jquery-1.10.2.min.js")),format.raw/*10.75*/("""'></script>
<script type='text/javascript' src='"""),_display_(/*11.46*/routes/*11.52*/.Assets.at("javascripts/html5.js")),format.raw/*11.86*/("""'> </script>
<link href='"""),_display_(/*12.22*/routes/*12.28*/.Assets.at("stylesheets/bootstrap.css")),format.raw/*12.67*/("""' rel='stylesheet' type='text/css' />
<link href='"""),_display_(/*13.22*/routes/*13.28*/.Assets.at("stylesheets/style-responsive.css")),format.raw/*13.74*/("""' rel='stylesheet' type='text/css' />
<link href='"""),_display_(/*14.22*/routes/*14.28*/.Assets.at("images/favicon.gif")),format.raw/*14.60*/("""' rel='icon' type='image' />
</head>
<body>
</body>
</html>
"""))}
}
def render(title:String,content:Html): play.twirl.api.HtmlFormat.Appendable = apply(title)(content)
def f:((String) => (Html) => play.twirl.api.HtmlFormat.Appendable) = (title) => (content) => apply(title)(content)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Tue Jul 28 21:21:26 IST 2015
SOURCE: /home/tarun/9tanki/github/play_2.3.9_project/dockerized-play-app/app/views/main.scala.html
HASH: e3ab38845deddd32bcb2445a6bfca1a747a006a7
MATRIX: 727->1|845->31|875->35|958->92|983->97|1122->209|1137->215|1204->261|1289->319|1304->325|1359->359|1421->394|1436->400|1496->439|1584->500|1599->506|1666->552|1754->613|1769->619|1822->651
LINES: 26->1|29->1|31->3|36->8|36->8|38->10|38->10|38->10|39->11|39->11|39->11|40->12|40->12|40->12|41->13|41->13|41->13|42->14|42->14|42->14
-- GENERATED --
*/
|
tarunmittal/dockerized-play-app
|
target/scala-2.11/twirl/main/views/html/main.template.scala
|
Scala
|
gpl-2.0
| 2,875
|
/*
* Copyright (c) 2006-2007, AIOTrade Computing Co. and Contributors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* o Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* o Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* o Neither the name of AIOTrade Computing Co. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.aiotrade.lib.indicator.basic
import org.aiotrade.lib.indicator.Indicator
/**
*
* @author Caoyuan Deng
*/
class BIASIndicator extends Indicator {
sname = "BIAS"
lname = "Bias to Moving Average"
val period1 = Factor("Period Short", 6)
val period2 = Factor("Period Mediaum", 12)
val period3 = Factor("Period Long", 24)
val bias1 = TVar[Double]("BIAS1", Plot.Line)
val bias2 = TVar[Double]("BIAS2", Plot.Line)
val bias3 = TVar[Double]("BIAS3", Plot.Line)
protected def compute(fromIdx: Int, size: Int) {
var i = fromIdx
while (i < size) {
val ma1 = ma(i, C, period1)
val ma2 = ma(i, C, period2)
val ma3 = ma(i, C, period3)
bias1(i) = (C(i) - ma1) / ma1 * 100
bias2(i) = (C(i) - ma2) / ma2 * 100
bias3(i) = (C(i) - ma3) / ma3 * 100
i += 1
}
}
}
|
wandoulabs/wandou-math
|
wandou-indicator-basic/src/main/scala/org/aiotrade/lib/indicator/basic/BIASIndicator.scala
|
Scala
|
apache-2.0
| 2,522
|
package org.leialearns.crystallize.item
import grizzled.slf4j.Logging
import org.leialearns.crystallize.util.LoggingConfiguration
import org.scalatest.FunSuite
class TestItems extends FunSuite with LoggingConfiguration with Logging {
test("Items") {
info("\n\nTest items")
val actions = Category.getCategory("actions")
assert(actions eq Category.getCategory("actions"))
val responses = Category.getCategory("responses")
assert(actions != responses)
val dark = Item.getItem(responses, "dark")
assert(dark eq Item.getItem(responses, "dark"))
val light = Item.getItem(responses, "light")
assert(dark != light)
val left = Item.getItem(actions, "left")
assert(left eq Item.getItem(actions, "left"))
val right = Item.getItem(responses, "right")
assert(left != right)
val justLeft = Node.getNode(left)
assert(justLeft eq Node.getNode(left))
val leftDark = Node.getNode(justLeft, dark)
assert(leftDark eq Node.getNode(justLeft, dark))
}
}
|
jeroenvanmaanen/crystallize
|
src/test/scala/org/leialearns/crystallize/item/TestItems.scala
|
Scala
|
lgpl-2.1
| 1,006
|
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.internals.operators
import monifu.concurrent.cancelables.{BooleanCancelable, SerialCancelable}
import monifu.reactive.Ack.{Cancel, Continue}
import monifu.reactive._
import monifu.reactive.internals._
import scala.concurrent.Future
private[reactive] object switch {
/**
* Implementation for [[Observable.switch]].
*/
def apply[T,U](source: Observable[T])(implicit ev: T <:< Observable[U]): Observable[U] = {
Observable.create { observerU: Subscriber[U] =>
import observerU.{scheduler => s}
source.onSubscribe(new Observer[T] { self =>
// Global subscription, is canceled by the downstream
// observer and if canceled all streaming is supposed to stop
private[this] val upstream = SerialCancelable()
// MUST BE synchronized by `self`
private[this] var ack: Future[Ack] = Continue
// MUST BE synchronized by `self`
def onNext(childObservable: T) = self.synchronized {
if (upstream.isCanceled) Cancel else {
// canceling current observable in order to
// start the new stream
val activeRef = BooleanCancelable()
upstream := activeRef
ack.fastFlatMap {
case Continue =>
childObservable.onSubscribe(new Observer[U] {
def onNext(elem: U) = self.synchronized {
if (activeRef.isCanceled) Cancel else {
ack = ack.onContinueStreamOnNext(observerU, elem)
ack.ifCanceledDoCancel(upstream)
}
}
def onComplete(): Unit = ()
def onError(ex: Throwable): Unit = {
self.onError(ex)
}
})
Continue
case Cancel =>
Cancel
}
}
}
def onError(ex: Throwable): Unit = self.synchronized {
if (!upstream.isCanceled) {
upstream.cancel()
ack.onContinueSignalError(observerU, ex)
}
}
def onComplete(): Unit = self.synchronized {
if (!upstream.isCanceled) {
upstream.cancel()
ack.onContinueSignalComplete(observerU)
}
}
})
}
}
}
|
sergius/monifu
|
monifu/shared/src/main/scala/monifu/reactive/internals/operators/switch.scala
|
Scala
|
apache-2.0
| 3,015
|
package im.tox.antox.utils
import android.util.Log
import im.tox.antox.utils.FileTransferManager._
object FileTransferManager {
private val TAG = "im.tox.antox.utils.FileTransferManager"
}
class FileTransferManager () {
private var _transfers: Map[Long, FileTransfer] = Map[Long, FileTransfer]()
private var _keyAndFileNumberToId: Map[(String, Integer), Long] = Map[(String, Integer), Long]()
def add(t: FileTransfer) = {
Log.d(TAG, "Adding file transfer")
_transfers = _transfers + (t.id -> t)
_keyAndFileNumberToId = _keyAndFileNumberToId + ((t.key, t.fileNumber) -> t.id)
}
def remove(id: Long): Unit = {
Log.d(TAG, "Removing file transfer")
val mTransfer = this.get(id)
mTransfer match {
case Some(t) =>
_transfers = _transfers - id
_keyAndFileNumberToId = _keyAndFileNumberToId - ((t.key, t.fileNumber))
case None =>
}
}
def remove(key: String, fileNumber: Integer): Unit = {
val mId = _keyAndFileNumberToId.get(key, fileNumber)
mId match {
case Some(id) => this.remove(id)
case None =>
}
}
def get(id: Long): Option[FileTransfer] = {
_transfers.get(id)
}
def get(address: String, fileNumber: Integer): Option[FileTransfer] = {
val mId = _keyAndFileNumberToId.get(address, fileNumber)
mId match {
case Some(address) => this.get(address)
case None => None
}
}
}
|
0xPoly/Antox
|
app/src/main/scala/im/tox/antox/utils/FileTransferManager.scala
|
Scala
|
gpl-3.0
| 1,402
|
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.cassandra.sink
import com.datamountaineer.streamreactor.common.utils.{JarManifest, ProgressCounter}
import java.util
import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigSink, CassandraSettings}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
/**
* <h1>CassandraSinkTask</h1>
*
* Kafka Connect Cassandra sink task. Called by
* framework to put records to the target sink
**/
class CassandraSinkTask extends SinkTask with StrictLogging {
private var writer: Option[CassandraJsonWriter] = None
private val progressCounter = new ProgressCounter
private var enableProgress: Boolean = false
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
logger.info("Task initialising")
/**
* Parse the configurations and setup the writer
**/
override def start(props: util.Map[String, String]): Unit = {
logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/cass-sink-ascii.txt")).mkString + s" $version")
logger.info(manifest.printManifest())
val config = if (context.configs().isEmpty) props else context.configs()
val taskConfig = Try(new CassandraConfigSink(config)) match {
case Failure(f) => throw new ConnectException("Couldn't start CassandraSink due to configuration error.", f)
case Success(s) => s
}
val sinkSettings = CassandraSettings.configureSink(taskConfig)
enableProgress = sinkSettings.enableProgress
writer = Some(CassandraWriter(connectorConfig = taskConfig, context = context))
}
/**
* Pass the SinkRecords to the writer for Writing
**/
override def put(records: util.Collection[SinkRecord]): Unit = {
require(writer.nonEmpty, "Writer is not set!")
val seq = records.asScala.toVector
writer.foreach(w => w.write(seq))
if (enableProgress) {
progressCounter.update(seq)
}
}
/**
* Clean up Cassandra connections
**/
override def stop(): Unit = {
logger.info("Stopping Cassandra sink.")
writer.foreach(w => w.close())
if (enableProgress) {
progressCounter.empty
}
}
override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}
override def version: String = manifest.version()
}
|
datamountaineer/stream-reactor
|
kafka-connect-cassandra/src/main/scala/com/datamountaineer/streamreactor/connect/cassandra/sink/CassandraSinkTask.scala
|
Scala
|
apache-2.0
| 3,212
|
package deaktator
import com.google.caliper.Benchmark
import com.google.caliper.Param
import com.google.caliper.BeforeExperiment
import org.apache.commons.vfs2.VFS
class BagOfWordsBench {
@Param(Array("32","64","128","256", "512", "1000","10000","100000","1000000")) var size: Int = _
private[this] val pi1000000 = scala.io.Source.fromInputStream(VFS.getManager.resolveFile("res:pi_1000000.txt").getContent.getInputStream).getLines().mkString
private[this] var pi: String = _
private[this] var split: String = _
@BeforeExperiment def before(): Unit = {
pi = pi1000000 take size
split = (math.round(math.log10(size)).toInt + 1) + "+"
}
@Benchmark def skipGrams1(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.skipGrams1(pi, 1, splitString = split)
i += 1
}
dummy
}
@Benchmark def skipGrams2(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.skipGrams2(pi, 1, splitString = split)
i += 1
}
dummy
}
@Benchmark def skipGrams3(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.skipGrams3(pi, 1, splitString = split)
i += 1
}
dummy
}
@Benchmark def skipGrams4(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.skipGrams4(pi, 1, splitString = split)
i += 1
}
dummy
}
@Benchmark def skipGrams5(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.skipGrams5(pi, 1, splitString = split)
i += 1
}
dummy
}
@Benchmark def bagOfWords1(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.bagOfWords1(pi, splitString = split)
i += 1
}
dummy
}
@Benchmark def bagOfWords2(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.bagOfWords2(pi, splitString = split)
i += 1
}
dummy
}
@Benchmark def bagOfWords3(reps: Int): Int = {
var i = 0
val dummy = 0
while (i < reps) {
SkipGrams.bagOfWords3(pi, splitString = split)
i += 1
}
dummy
}
}
|
deaktator/skip-grams-bench
|
src/test/scala/deaktator/BagOfWordsBench.scala
|
Scala
|
mit
| 2,196
|
package com.exratione.sdgexample
import com.exratione.sdgexample.config._
import com.exratione.sdgexample.guice._
import com.exratione.sdgexample.service._
import com.hubspot.dropwizard.guice.GuiceBundle
import com.yammer.dropwizard.bundles.ScalaBundle
import com.yammer.dropwizard.config.{Bootstrap, Configuration, Environment}
import com.yammer.dropwizard.ScalaService
import net.codingwell.scalaguice.InjectorExtensions._
object SdgExampleScalaService extends ScalaServiceWithGuiceBundle {
def initialize (bootstrap: Bootstrap[SdgExampleConfiguration]) {
// Create the bundle for dropwizard-guice integration.
guiceBundle = GuiceBundle.newBuilder[SdgExampleConfiguration]
.addModule(new SdgExampleModule)
// This ensures that Resource implementations in this package are set up
// automatically.
.enableAutoConfig(getClass.getPackage.getName)
// The configuration class will be available via the injector obtained via
// guiceBundle.getInjector.
.setConfigClass(classOf[SdgExampleConfiguration])
.build
bootstrap.setName("scala-dropwizard-guice-example")
bootstrap.addBundle(new ScalaBundle)
bootstrap.addBundle(guiceBundle)
bootstrap.addCommand(new SdgExampleEnvironmentCommand(this))
}
def run (configuration: SdgExampleConfiguration, environment: Environment) {
// Nothing needs to be done here since the GuiceBundle wires up the
// Resources automatically.
// If obtaining instances here to start processes or take other actions
// then use the injector. E.g.:
//
// val injector = guiceBundle.getInjector
// val contentService = injector.instance[ContentService]
//
// Though we are passed the configuration and environment as arguments,
// dropwizard-guice lets us get these instances from the injector as well.
// his can be useful elsewhere in an application. E.g.:
//
// val config = injector.instance[Configuration]
// val env = injector.instance[Environment]
}
}
|
exratione/scala-dropwizard-guice-example
|
dropwizard-6.2/src/main/scala/com/exratione/sdgexample/SdgExampleScalaService.scala
|
Scala
|
mit
| 2,020
|
package com.identityblitz.saml.action
import com.identityblitz.saml.ws.transport.{PlayResponseAdapter, PlayRequestAdapter}
import com.identityblitz.shibboleth.idp.util.HttpHelper
import com.identityblitz.saml.IdpPlayBridge._
import org.opensaml.util.storage.StorageService
import edu.internet2.middleware.shibboleth.idp.authn.LoginContextEntry
/**
*/
trait LoginContextBridge {
private lazy val storageService = samlCtx.getBean("shibboleth.StorageService").asInstanceOf[StorageService[String, LoginContextEntry]]
protected def getIdpLoginContext(implicit inTr: PlayRequestAdapter, outTr: PlayResponseAdapter) = {
HttpHelper.getLoginContext(storageService, inTr, outTr)
}
}
|
brainysmith/idp-play-bridge
|
src/main/scala/com/identityblitz/saml/action/LoginContextBridge.scala
|
Scala
|
mit
| 689
|
package com.lunatic.mlx.cuisines.model
/**
*
*/
case class Recipe(id: Int, cuisine: String, ingredients: List[String])
|
tupol/sparx-mllib
|
src/main/scala/com/lunatic/mlx/cuisines/model/Recipe.scala
|
Scala
|
apache-2.0
| 123
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.epoch.util
import java.net.SocketTimeoutException
import java.util
import kafka.cluster.BrokerEndPoint
import kafka.server.BlockingSend
import org.apache.kafka.clients.{ClientRequest, ClientResponse, MockClient, NetworkClientUtils}
import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochResponseData}
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.AbstractRequest.Builder
import org.apache.kafka.common.requests.{AbstractRequest, FetchResponse, OffsetsForLeaderEpochResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.utils.{SystemTime, Time}
import org.apache.kafka.common.{Node, TopicPartition, Uuid}
import scala.collection.Map
/**
* Stub network client used for testing the ReplicaFetcher, wraps the MockClient used for consumer testing
*
* The common case is that there is only one OFFSET_FOR_LEADER_EPOCH request/response. So, the
* response to OFFSET_FOR_LEADER_EPOCH is 'offsets' map. If the test needs to set another round of
* OFFSET_FOR_LEADER_EPOCH with different offsets in response, it should update offsets using
* setOffsetsForNextResponse
*/
class ReplicaFetcherMockBlockingSend(offsets: java.util.Map[TopicPartition, EpochEndOffset],
sourceBroker: BrokerEndPoint,
time: Time)
extends BlockingSend {
private val client = new MockClient(new SystemTime)
var fetchCount = 0
var epochFetchCount = 0
var lastUsedOffsetForLeaderEpochVersion = -1
var callback: Option[() => Unit] = None
var currentOffsets: util.Map[TopicPartition, EpochEndOffset] = offsets
var fetchPartitionData: Map[TopicPartition, FetchResponseData.PartitionData] = Map.empty
var topicIds: Map[String, Uuid] = Map.empty
private val sourceNode = new Node(sourceBroker.id, sourceBroker.host, sourceBroker.port)
def setEpochRequestCallback(postEpochFunction: () => Unit): Unit = {
callback = Some(postEpochFunction)
}
def setOffsetsForNextResponse(newOffsets: util.Map[TopicPartition, EpochEndOffset]): Unit = {
currentOffsets = newOffsets
}
def setFetchPartitionDataForNextResponse(partitionData: Map[TopicPartition, FetchResponseData.PartitionData]): Unit = {
fetchPartitionData = partitionData
}
def setIdsForNextResponse(topicIds: Map[String, Uuid]): Unit = {
this.topicIds = topicIds
}
override def sendRequest(requestBuilder: Builder[_ <: AbstractRequest]): ClientResponse = {
if (!NetworkClientUtils.awaitReady(client, sourceNode, time, 500))
throw new SocketTimeoutException(s"Failed to connect within 500 ms")
//Send the request to the mock client
val clientRequest = request(requestBuilder)
client.send(clientRequest, time.milliseconds())
//Create a suitable response based on the API key
val response = requestBuilder.apiKey() match {
case ApiKeys.OFFSET_FOR_LEADER_EPOCH =>
callback.foreach(_.apply())
epochFetchCount += 1
lastUsedOffsetForLeaderEpochVersion = requestBuilder.latestAllowedVersion()
val data = new OffsetForLeaderEpochResponseData()
currentOffsets.forEach((tp, offsetForLeaderPartition) => {
var topic = data.topics.find(tp.topic)
if (topic == null) {
topic = new OffsetForLeaderTopicResult()
.setTopic(tp.topic)
data.topics.add(topic)
}
topic.partitions.add(offsetForLeaderPartition)
})
new OffsetsForLeaderEpochResponse(data)
case ApiKeys.FETCH =>
fetchCount += 1
val partitionData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val topicIdsForRequest = new util.HashMap[String, Uuid]()
fetchPartitionData.foreach { case (tp, data) => partitionData.put(tp, data) }
topicIds.foreach { case (name, id) => topicIdsForRequest.put(name, id)}
fetchPartitionData = Map.empty
topicIds = Map.empty
FetchResponse.of(Errors.NONE, 0,
if (partitionData.isEmpty) JFetchMetadata.INVALID_SESSION_ID else 1,
partitionData, topicIdsForRequest)
case _ =>
throw new UnsupportedOperationException
}
//Use mock client to create the appropriate response object
client.respondFrom(response, sourceNode)
client.poll(30, time.milliseconds()).iterator().next()
}
private def request(requestBuilder: Builder[_ <: AbstractRequest]): ClientRequest = {
client.newClientRequest(
sourceBroker.id.toString,
requestBuilder,
time.milliseconds(),
true)
}
override def initiateClose(): Unit = {}
override def close(): Unit = {}
}
|
guozhangwang/kafka
|
core/src/test/scala/unit/kafka/server/epoch/util/ReplicaFetcherMockBlockingSend.scala
|
Scala
|
apache-2.0
| 5,665
|
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.service.conversation
import com.waz.content._
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.model.ConversationData.ConversationType
import com.waz.model.UserData.ConnectionStatus
import com.waz.model.{ConvId, Name, UserData, UserId}
import com.waz.threading.SerialDispatchQueue
import com.waz.utils.events.EventContext
import com.waz.utils.{BiRelation, ThrottledProcessingQueue}
import scala.collection.{GenTraversable, breakOut}
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Updates conversation names when any dependency changes (members list, user names).
*/
class NameUpdater(selfUserId: UserId,
usersStorage: UsersStorage,
convs: ConversationStorage,
membersStorage: MembersStorage) {
private implicit val ev = EventContext.Global
private implicit val dispatcher = new SerialDispatchQueue(name = "NameUpdaterQueue")
// unnamed group conversations with active members
// we are keeping that in memory, it should be fine,
// ppl usually don't have many unnamed group conversations (especially with many users)
private var groupConvs = Set.empty[ConvId]
private var groupMembers = BiRelation.empty[ConvId, UserId]
private val queue = new ThrottledProcessingQueue[Any](500.millis, { ids => updateGroupNames(ids.toSet) }, "GroupConvNameUpdater")
// load groups and members
lazy val init = for {
all <- convs.list()
groups = all.filter(c => c.convType == ConversationType.Group && c.name.isEmpty)
members <- Future.traverse(groups) { c => membersStorage.getActiveUsers(c.id) map (c.id -> _) }
} yield {
groupConvs ++= groups.map(_.id)
addMembers(members)
}
def registerForUpdates(): Unit = {
usersStorage.onAdded { onUsersChanged(_) }
usersStorage.onUpdated { updates =>
onUsersChanged(updates.collect {
case (prev, current) if prev.name != current.name || prev.displayName != current.displayName => current
})
}
convs.onAdded { cs =>
val unnamedGroups = cs.collect { case c if c.convType == ConversationType.Group && c.name.isEmpty => c.id }
if (unnamedGroups.nonEmpty) {
init map { _ =>
groupConvs ++= unnamedGroups
addMembersForConvs(unnamedGroups)
}
}
}
convs.onUpdated { updates =>
val changedGroups = updates.collect {
case (prev, conv) if conv.convType == ConversationType.Group && prev.name.isDefined != conv.name.isDefined => conv
}
if (changedGroups.nonEmpty) {
val (named, unnamed) = changedGroups.partition(_.name.isDefined)
val namedIds = named.map(_.id)
val unnamedIds = unnamed.map(_.id)
init map { _ =>
groupConvs = groupConvs -- namedIds ++ unnamedIds
if (named.nonEmpty)
groupMembers = groupMembers.removeAllLeft(namedIds)
if (unnamed.nonEmpty)
addMembersForConvs(unnamedIds) map { _ => queue.enqueue(unnamedIds)}
}
}
}
membersStorage.onAdded { members =>
init map { _ =>
val ms = members.filter(m => groupConvs(m.convId))
groupMembers = groupMembers ++ ms.map(m => m.convId -> m.userId)
queue.enqueue(ms.map(_.convId).distinct)
}
}
membersStorage.onDeleted { members =>
init map { _ =>
val ms = members.filter(m => groupConvs(m._2))
groupMembers = groupMembers -- ms.map(m => m._2 -> m._1)
queue.enqueue(ms.map(_._2).distinct)
}
}
}
def forceNameUpdate(id: ConvId, defaultName: String) = convs.get(id) flatMap {
case Some(conv) if conv.convType == ConversationType.Group =>
for {
members <- membersStorage.getByConv(conv.id)
users <- usersStorage.getAll(members.map(_.userId).filter(_ != selfUserId))
name = generatedName(users.map {
case Some(u) if !u.deleted => Some(u.getDisplayName)
case _ => None
})
newName = if(name.isEmpty) Name(defaultName) else name
res <- convs.update(conv.id, _.copy(generatedName = newName))
} yield res
case Some(conv) => // one to one conv should use full user name
usersStorage.get(UserId(conv.id.str)) flatMap {
case Some(user) if !user.deleted => convs.update(conv.id, _.copy(generatedName = user.name))
case None => Future successful None
}
case None =>
Future successful None
}
private def addMembersForConvs(convs: Traversable[ConvId]) =
Future.traverse(convs) { c => membersStorage.getActiveUsers(c) map (c -> _) } map addMembers
private def addMembers(members: Traversable[(ConvId, Seq[UserId])]) =
groupMembers ++= members.flatMap { case (c, us) => us.map(c -> _) }
private def onUsersChanged(users: Seq[UserData]) = {
def updateGroups() = queue.enqueue(users.map(_.id))
def updateOneToOnes() = {
val names: Map[ConvId, Name] = users.collect {
case u if u.connection != ConnectionStatus.Unconnected && !u.deleted => ConvId(u.id.str) -> u.name // one to one use full name
} (breakOut)
if (names.isEmpty) Future successful Nil
else convs.updateAll2(names.keys, { c => c.copy(generatedName = names(c.id)) })
}
updateGroups()
updateOneToOnes()
}
private def updateGroupNames(ids: Set[Any]) = init flatMap { _ =>
val convIds = ids flatMap {
case id: ConvId => Seq(id)
case id: UserId => groupMembers.foreset(id)
case _ => Nil
}
val members: Map[ConvId, Seq[UserId]] = convIds.map { id => id -> groupMembers.afterset(id).toSeq } (breakOut)
val users = members.flatMap(_._2).toSeq.distinct.filter(_ != selfUserId)
usersStorage.getAll(users) flatMap { uds =>
val names: Map[UserId, Option[Name]] = users.zip(uds.map(_.flatMap {
case u if !u.deleted => Some(u.getDisplayName)
case _ => None
}))(breakOut)
val convNames = members.mapValues { us => generatedName(us.filter(_ != selfUserId) map { names.get(_).flatten }) }
convs.updateAll2(convIds, { c => convNames.get(c.id).fold(c) { name => c.copy(generatedName = name) } })
}
}
private def generatedName(userNames: GenTraversable[Option[Name]]): Name = {
Name(userNames.flatten.filter(_.nonEmpty).mkString(", "))
}
}
object NameUpdater {
def generatedName(convType: ConversationType)(users: GenTraversable[UserData]): Name = {
val us = users.filter(u => u.connection != ConnectionStatus.Self && !u.deleted)
if (convType == ConversationType.Group) Name(us.map(user => user.getDisplayName).filter(_.nonEmpty).mkString(", "))
else us.headOption.fold(Name.Empty)(_.name)
}
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/service/conversation/NameUpdater.scala
|
Scala
|
gpl-3.0
| 7,445
|
package com.coinport
import com.twitter.util.Future
import com.coinport.thrift.ExchangeBackend
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import conversions.FutureConversion._
class ExchangeBackendService extends thrift.ExchangeBackend.FutureIface {
val m = new TestModule()
implicit val timeout = Timeout(10 seconds)
def hi(): Future[String] = toTwitterFuture[String] {
m.testActor ? "hhhh"
}
}
|
CryptArc/gyro
|
servers/src/main/scala/com.coinport/ExchangeBackendService.scala
|
Scala
|
apache-2.0
| 535
|
package com.wavesplatform.api.http
import scala.concurrent.Future
import scala.util.Success
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.server.Route
import cats.instances.either._
import cats.instances.list._
import cats.instances.try_._
import cats.syntax.alternative._
import cats.syntax.either._
import cats.syntax.traverse._
import com.wavesplatform.account.{Address, AddressOrAlias}
import com.wavesplatform.api.common.CommonTransactionsApi
import com.wavesplatform.api.common.CommonTransactionsApi.TransactionMeta
import com.wavesplatform.api.http.ApiError._
import com.wavesplatform.block.Block
import com.wavesplatform.block.Block.TransactionProof
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.{Base58, _}
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.network.TransactionPublisher
import com.wavesplatform.settings.RestAPISettings
import com.wavesplatform.state.{Blockchain, InvokeScriptResult, TxMeta}
import com.wavesplatform.state.reader.LeaseDetails
import com.wavesplatform.transaction._
import com.wavesplatform.transaction.lease._
import com.wavesplatform.utils.Time
import com.wavesplatform.wallet.Wallet
import monix.eval.Task
import monix.execution.Scheduler
import play.api.libs.json._
case class TransactionsApiRoute(
settings: RestAPISettings,
commonApi: CommonTransactionsApi,
wallet: Wallet,
blockchain: Blockchain,
utxPoolSize: () => Int,
transactionPublisher: TransactionPublisher,
time: Time
) extends ApiRoute
with BroadcastRoute
with AuthRoute {
import TransactionsApiRoute._
private[this] val serializer = TransactionJsonSerializer(blockchain, commonApi)
private[this] implicit val transactionMetaWrites = OWrites[TransactionMeta](serializer.transactionWithMetaJson)
override lazy val route: Route =
pathPrefix("transactions") {
unconfirmed ~ addressWithLimit ~ info ~ status ~ sign ~ calculateFee ~ signedBroadcast ~ merkleProof
}
def addressWithLimit: Route = {
(get & path("address" / AddrSegment / "limit" / IntNumber) & parameter("after".?)) { (address, limit, maybeAfter) =>
val after =
maybeAfter.map(s => ByteStr.decodeBase58(s).getOrElse(throw ApiException(CustomValidationError(s"Unable to decode transaction id $s"))))
if (limit > settings.transactionsByAddressLimit) throw ApiException(TooBigArrayAllocation)
extractScheduler { implicit sc =>
complete(transactionsByAddress(address, limit, after).map(txs => List(txs))) // Double list - [ [tx1, tx2, ...] ]
}
}
}
private[this] def readTransactionMeta(id: String): Either[ApiError, TransactionMeta] =
for {
id <- ByteStr.decodeBase58(id).toEither.leftMap(err => CustomValidationError(err.toString))
meta <- commonApi.transactionById(id).toRight(ApiError.TransactionDoesNotExist)
} yield meta
def info: Route = pathPrefix("info") {
(get & path(TransactionId)) { id =>
complete(commonApi.transactionById(id).toRight(ApiError.TransactionDoesNotExist))
} ~ (pathEndOrSingleSlash & anyParam("id")) { ids =>
val result = for {
_ <- Either.cond(ids.nonEmpty, (), InvalidTransactionId("Transaction ID was not specified"))
statuses <- ids.map(readTransactionMeta).toList.sequence
} yield statuses
complete(result)
}
}
private[this] def loadTransactionStatus(id: ByteStr): JsObject = {
import Status._
val statusJson = blockchain.transactionInfo(id) match {
case Some((tm, tx)) =>
Json.obj(
"status" -> Confirmed,
"height" -> JsNumber(tm.height),
"confirmations" -> (blockchain.height - tm.height).max(0)
) ++ serializer.metaJson(tm)
case None =>
commonApi.unconfirmedTransactionById(id) match {
case Some(_) => Json.obj("status" -> Unconfirmed)
case None => Json.obj("status" -> NotFound)
}
}
statusJson ++ Json.obj("id" -> id.toString)
}
def status: Route = pathPrefix("status") {
path(TransactionId) { id =>
complete(loadTransactionStatus(id))
} ~ pathEndOrSingleSlash {
anyParam("id").filter(_.nonEmpty) { ids =>
if (ids.toSeq.length > settings.transactionsByAddressLimit)
complete(TooBigArrayAllocation)
else {
ids.map(id => ByteStr.decodeBase58(id).toEither.leftMap(_ => id)).toList.separate match {
case (Nil, ids) =>
val results = ids.toSet.map((id: ByteStr) => id -> loadTransactionStatus(id)).toMap
complete(ids.map(id => results(id)))
case (errors, _) => complete(InvalidIds(errors))
}
}
} ~ pathEndOrSingleSlash {
complete(CustomValidationError("Empty request"))
}
}
}
def unconfirmed: Route = (pathPrefix("unconfirmed") & get) {
pathEndOrSingleSlash {
complete(JsArray(commonApi.unconfirmedTransactions.map(serializer.unconfirmedTxExtendedJson)))
} ~ utxSize ~ utxTransactionInfo
}
def utxSize: Route = (pathPrefix("size") & get) {
complete(Json.obj("size" -> JsNumber(utxPoolSize())))
}
def utxTransactionInfo: Route = (pathPrefix("info") & get) {
pathEndOrSingleSlash {
complete(InvalidSignature)
} ~
path(TransactionId) { id =>
commonApi.unconfirmedTransactionById(id) match {
case Some(tx) =>
complete(serializer.unconfirmedTxExtendedJson(tx))
case None =>
complete(ApiError.TransactionDoesNotExist)
}
}
}
def calculateFee: Route =
path("calculateFee")(jsonPost[JsObject] { jsv =>
val senderPk = (jsv \\ "senderPublicKey").as[String]
// Just for converting the request to the transaction
val enrichedJsv = jsv ++ Json.obj(
"fee" -> 1234567,
"sender" -> senderPk
)
createTransaction(senderPk, enrichedJsv) { tx =>
commonApi
.calculateFee(tx)
.map { case (assetId, assetAmount, _) => Json.obj("feeAssetId" -> assetId, "feeAmount" -> assetAmount) }
}
})
def sign: Route = (pathPrefix("sign") & withAuth) {
pathEndOrSingleSlash(jsonPost[JsObject] { jsv =>
TransactionFactory.parseRequestAndSign(wallet, (jsv \\ "sender").as[String], time, jsv)
}) ~ signWithSigner
}
def signWithSigner: Route = path(AddrSegment) { address =>
jsonPost[JsObject](TransactionFactory.parseRequestAndSign(wallet, address.stringRepr, time, _))
}
def signedBroadcast: Route = path("broadcast")(broadcast[JsValue](TransactionFactory.fromSignedRequest))
def merkleProof: Route = path("merkleProof") {
(get & parameters("id".as[String].*))(ids => complete(merkleProof(ids.toList.reverse))) ~
jsonPost[JsObject](
jsv =>
(jsv \\ "ids").validate[List[String]] match {
case JsSuccess(ids, _) => merkleProof(ids)
case JsError(err) => WrongJson(errors = err.toSeq)
}
)
}
private def merkleProof(encodedIds: List[String]): ToResponseMarshallable =
encodedIds.traverse(ByteStr.decodeBase58) match {
case Success(txIds) =>
commonApi.transactionProofs(txIds) match {
case Nil => CustomValidationError(s"transactions do not exist or block version < ${Block.ProtoBlockVersion}")
case proofs => proofs
}
case _ => InvalidSignature
}
def transactionsByAddress(address: Address, limitParam: Int, maybeAfter: Option[ByteStr])(implicit sc: Scheduler): Future[List[JsObject]] = {
val aliasesOfAddress: Task[Set[AddressOrAlias]] =
commonApi
.aliasesOfAddress(address)
.collect { case (_, cat) => cat.alias }
.toListL
.map(aliases => (address :: aliases).toSet)
.memoize
/**
* Produces compact representation for large transactions by stripping unnecessary data.
* Currently implemented for MassTransfer transaction only.
*/
def compactJson(address: Address, meta: TransactionMeta): Task[JsObject] = {
import com.wavesplatform.transaction.transfer._
meta.transaction match {
case mtt: MassTransferTransaction if mtt.sender.toAddress != address =>
aliasesOfAddress.map(mtt.compactJson(_) ++ serializer.transactionMetaJson(meta))
case _ => Task.now(serializer.transactionWithMetaJson(meta))
}
}
commonApi
.transactionsByAddress(address, None, Set.empty, maybeAfter)
.take(limitParam)
.mapEval(compactJson(address, _))
.toListL
.runToFuture
}
}
object TransactionsApiRoute {
type LeaseStatus = LeaseStatus.Value
//noinspection TypeAnnotation
object LeaseStatus extends Enumeration {
val active = Value(1)
val canceled = Value(0)
def apply(bool: Boolean): LeaseStatus = if (bool) active else canceled
}
object Status {
val Confirmed = "confirmed"
val Unconfirmed = "unconfirmed"
val NotFound = "not_found"
}
object ApplicationStatus {
val Succeeded = "succeeded"
val ScriptExecutionFailed = "script_execution_failed"
}
implicit val transactionProofWrites: Writes[TransactionProof] = Writes { mi =>
Json.obj(
"id" -> mi.id.toString,
"transactionIndex" -> mi.transactionIndex,
"merkleProof" -> mi.digests.map(d => s"${Base58.encode(d)}")
)
}
implicit val transactionProofReads: Reads[TransactionProof] = Reads { jsv =>
for {
encoded <- (jsv \\ "id").validate[String]
id <- ByteStr.decodeBase58(encoded).fold(_ => JsError(InvalidSignature.message), JsSuccess(_))
transactionIndex <- (jsv \\ "transactionIndex").validate[Int]
merkleProof <- (jsv \\ "merkleProof").validate[List[String]].map(_.map(Base58.decode))
} yield TransactionProof(id, transactionIndex, merkleProof)
}
private[http] object TransactionJsonSerializer {
def applicationStatus(isBlockV5: Boolean, succeeded: Boolean): JsObject =
if (isBlockV5)
Json.obj("applicationStatus" -> (if (succeeded) ApplicationStatus.Succeeded else ApplicationStatus.ScriptExecutionFailed))
else
JsObject.empty
def height(height: Int): JsObject =
Json.obj("height" -> height)
}
private[http] final case class TransactionJsonSerializer(blockchain: Blockchain, commonApi: CommonTransactionsApi) {
def transactionMetaJson(meta: TransactionMeta): JsObject = {
val specificInfo = meta.transaction match {
case lease: LeaseTransaction =>
import com.wavesplatform.api.http.TransactionsApiRoute.LeaseStatus._
Json.obj("status" -> (if (blockchain.leaseDetails(lease.id()).exists(_.isActive)) active else canceled))
case leaseCancel: LeaseCancelTransaction =>
Json.obj("lease" -> leaseIdToLeaseRef(leaseCancel.leaseId))
case _ => JsObject.empty
}
val stateChanges = meta match {
case i: TransactionMeta.Invoke => Json.obj("stateChanges" -> i.invokeScriptResult)
case _ => JsObject.empty
}
Seq(
TransactionJsonSerializer.height(meta.height),
metaJson(TxMeta(meta.height, meta.succeeded, meta.spentComplexity)),
stateChanges,
specificInfo
).reduce(_ ++ _)
}
def transactionWithMetaJson(meta: TransactionMeta): JsObject = {
meta.transaction.json() ++ transactionMetaJson(meta)
}
def unconfirmedTxExtendedJson(tx: Transaction): JsObject = tx match {
case leaseCancel: LeaseCancelTransaction =>
leaseCancel.json() ++ Json.obj("lease" -> leaseIdToLeaseRef(leaseCancel.leaseId))
case t => t.json()
}
def metaJson(m: TxMeta): JsObject =
TransactionJsonSerializer.applicationStatus(isBlockV5(m.height), m.succeeded) ++ Json.obj("spentComplexity" -> m.spentComplexity)
private[this] def isBlockV5(height: Int): Boolean = blockchain.isFeatureActivated(BlockchainFeatures.BlockV5, height)
// Extended lease format. Overrides default
private[this] def leaseIdToLeaseRef(leaseId: ByteStr): LeaseRef = {
val ld = blockchain.leaseDetails(leaseId).get
val tm = blockchain.transactionMeta(ld.sourceId).get
val recipient = blockchain.resolveAlias(ld.recipient).explicitGet()
val (status, cancelHeight, cancelTxId) = ld.status match {
case LeaseDetails.Status.Active => (true, None, None)
case LeaseDetails.Status.Cancelled(height, txId) => (false, Some(height), txId)
case LeaseDetails.Status.Expired(height) => (false, Some(height), None)
}
LeaseRef(leaseId, ld.sourceId, ld.sender.toAddress, recipient, ld.amount, tm.height, LeaseStatus(status), cancelHeight, cancelTxId)
}
private[http] implicit val leaseWrites: OWrites[InvokeScriptResult.Lease] =
LeaseRef.jsonWrites.contramap((l: InvokeScriptResult.Lease) => leaseIdToLeaseRef(l.id))
private[http] implicit val leaseCancelWrites: OWrites[InvokeScriptResult.LeaseCancel] =
LeaseRef.jsonWrites.contramap((l: InvokeScriptResult.LeaseCancel) => leaseIdToLeaseRef(l.id))
// To override nested InvokeScriptResult writes
private[http] implicit lazy val invocationWrites: OWrites[InvokeScriptResult.Invocation] = (i: InvokeScriptResult.Invocation) =>
Json.obj(
"dApp" -> i.dApp,
"call" -> i.call,
"payment" -> i.payments,
"stateChanges" -> invokeScriptResultWrites.writes(i.stateChanges)
)
private[http] implicit lazy val invokeScriptResultWrites: OWrites[InvokeScriptResult] = {
import InvokeScriptResult.{issueFormat, reissueFormat, burnFormat, sponsorFeeFormat}
Json.writes[InvokeScriptResult]
}
}
private[this] final case class LeaseRef(
id: ByteStr,
originTransactionId: ByteStr,
sender: Address,
recipient: Address,
amount: TxAmount,
height: Int,
status: LeaseStatus = LeaseStatus.active,
cancelHeight: Option[Int] = None,
cancelTransactionId: Option[ByteStr] = None
)
private[this] object LeaseRef {
import com.wavesplatform.utils.byteStrFormat
implicit val config = JsonConfiguration(optionHandlers = OptionHandlers.WritesNull)
implicit val jsonWrites: OWrites[LeaseRef] = Json.writes[LeaseRef]
}
}
|
wavesplatform/Waves
|
node/src/main/scala/com/wavesplatform/api/http/TransactionsApiRoute.scala
|
Scala
|
mit
| 14,483
|
package com.rasterfoundry.database
import com.rasterfoundry.database.Implicits._
import com.rasterfoundry.database.filter._
import com.rasterfoundry.datamodel._
import doobie._
import doobie.implicits._
object MetricDao extends Dao[Metric] {
val tableName = "metrics"
val selectF = sql"""SELECT
period, metric_event, requester, metric_value
FROM """ ++ tableF
// See raster-foundry/raster-foundry #4914 for the beginnings of a plan for
// querying and aggregating metrics
def getMetric(metric: Metric): ConnectionIO[Option[Metric]] =
query.filter(uniquenessFilters(metric)).selectOption
def unsafeGetMetric(metric: Metric): ConnectionIO[Metric] =
query.filter(uniquenessFilters(metric)).select
def uniquenessFilters(metric: Metric) = {
val periodStart = metric.period._1.toString ++ "T00:00:00Z"
List(
Some(fr"metrics.requester = ${metric.requester}"),
Some(fr"""metrics.period @> $periodStart :: timestamp""")
) ++ Filters.metricQP(metric.metricEvent.toQueryParams)
}
def insert(metric: Metric): ConnectionIO[Int] = {
val baseCount = 1
val frag = (fr"""
INSERT INTO metrics (period, metric_event, metric_value, requester)
VALUES (
${metric.period}, ${metric.metricEvent}, $baseCount, ${metric.requester}
)
ON CONFLICT ON CONSTRAINT metric_event_period_unique
DO UPDATE
SET metric_value = metrics.metric_value + 1
""" ++ Fragments.whereAndOpt(uniquenessFilters(metric): _*))
frag.update.run
}
}
|
raster-foundry/raster-foundry
|
app-backend/db/src/main/scala/MetricDao.scala
|
Scala
|
apache-2.0
| 1,519
|
package latis.ops
import latis.dm.Function
import latis.dm.Tuple
import latis.dm.Variable
/**
* Splits a Function with a Tuple codomain into a Tuple
* of scalar Functions.
*
* Note: Will cause the original Function to be read.
*/
class Split extends Operation {
override def applyToFunction(function: Function): Option[Variable] = function.getRange match {
case Tuple(vars) => {
val samples = function.iterator.toSeq //have to read the whole function so that the domain can be repeated
val dom = samples.map(_.domain)
val ran = samples.map(_.range.asInstanceOf[Tuple].getVariables).transpose
Some(Tuple(ran.map(Function(dom, _))))
}
case _ => Some(function)
}
}
object Split extends OperationFactory {
override def apply(): Split = new Split()
}
|
dlindhol/LaTiS
|
src/main/scala/latis/ops/Split.scala
|
Scala
|
epl-1.0
| 803
|
package net.walend.disentangle.graph.semiring.benchmark
import scalax.collection.GraphPredef.EdgeLikeIn
import net.walend.disentangle.graph.semiring.{Dijkstra, FloydWarshall, Brandes, FewestNodes, FirstStepsTrait, AllPathsFirstSteps}
import net.walend.disentangle.scalagraph.semiring.{ConvertToLabelDigraph, GraphFactory}
/**
* @author dwalend
* @since v0.0.1
*/
object TimingStudiesTest {
def main (args:Array[String]) {
//Time Brandes' algorithm with AllShortestPaths
val brandesResults = study(11,timeBrandes,expectedTimeDijkstra)
brandesResults.foreach(x => println(x))
}
def timeFloyd(nodeCount:Int):Long = {
import net.walend.disentangle.graph.DigraphFactory
val support = new AllPathsFirstSteps[Int,Int,Int](FewestNodes)
val graph = DigraphFactory.createRandomNormalDigraph(nodeCount,16)
val result = timeFunction{FloydWarshall.allPairsLeastPaths(graph.edges,graph.nodes.to[Seq],support,support.convertEdgeToLabelFunc[Boolean](FewestNodes.convertEdgeToLabel))}
result._2
}
def timeDijkstra(nodeCount:Int):Long = {
import net.walend.disentangle.graph.DigraphFactory
val support = new AllPathsFirstSteps[Int,Int,Int](FewestNodes)
// val support = FFewestNodes
val graph = DigraphFactory.createRandomNormalDigraph(nodeCount,16)
val result = timeFunction{Dijkstra.allPairsLeastPaths(graph.edges, support, support.convertEdgeToLabelFunc[Boolean](FewestNodes.convertEdgeToLabel), graph.nodes.to[Seq])}
/*
val result = timeFunction{
val initNode = initialGraph.innerNodes.head
DDijkstra.dijkstraSingleSource(initialGraph, support)(initNode)
}
*/
// println(s"$nodeCount ${result._2}")
result._2
}
def timeBrandes(nodeCount:Int):Long = {
import net.walend.disentangle.graph.DigraphFactory
val support = FewestNodes
val graph = DigraphFactory.createRandomNormalDigraph(nodeCount,16)
val result = timeFunction{Brandes.allLeastPathsAndBetweenness(graph.edges,graph.nodes.to[Seq],support,FewestNodes.convertEdgeToLabel)}
/*
val result = timeFunction{
val initNode = initialGraph.innerNodes.head
DDijkstra.dijkstraSingleSource(initialGraph, support)(initNode)
}
*/
// println(s"$nodeCount ${result._2}")
result._2
}
def timeJungDijkstra(nodeCount:Int):Long = {
val graph = GraphFactory.createRandomNormalGraph(nodeCount,16)
import scala.collection.JavaConversions._
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraShortestPath
import edu.uci.ics.jung.graph.DirectedSparseGraph
val jungGraph = new DirectedSparseGraph[Int,Any]()
for(node <- graph.nodes) {
jungGraph.addVertex(node)
}
var i=0
for(edge <- graph.edges) {
jungGraph.addEdge(i,edge._1,edge._2)
i = i + 1
}
val dijkstraShortestPath = new DijkstraShortestPath(jungGraph)
val result = timeFunction{for(node <- jungGraph.getVertices){
dijkstraShortestPath.getIncomingEdgeMap(node)
}}
result._2
}
def expectedTimeDijkstra(calibration:(Int,Long),nodeCount:Int):Long = {
//O(|V|^2 ln|V|)
def bigO(nodeCount:Int):Double = {
Math.pow(nodeCount,2) * Math.log(nodeCount)
}
((bigO(nodeCount)/bigO(calibration._1))*calibration._2).toLong
}
def expectedTimeSingleDijkstra(calibration:(Int,Long),nodeCount:Int):Long = {
//O(|V| ln|V|)
def bigO(nodeCount:Int):Double = {
nodeCount * Math.log(nodeCount)
}
((bigO(nodeCount)/bigO(calibration._1))*calibration._2).toLong
}
def timeScalaGraphConvertDijkstra(nodeCount:Int):Long = {
import scalax.collection.Graph
import scalax.collection.GraphEdge.DiEdge
val support:AllPathsFirstSteps[Int,Int,Int] = new AllPathsFirstSteps(FewestNodes)
val graph:Graph[Int,DiEdge] = GraphFactory.createRandomNormalGraph(nodeCount,16)
import scala.language.higherKinds
def convertToLabel[E[X] <: EdgeLikeIn[X]](edge:E[Int]):(Int,Int,Option[FirstStepsTrait[Int,Int]]) = {
(edge._1,edge._2,Some(support.FirstSteps(1,Set.empty[Int])))
}
val result = timeFunction{
val labelGraphParts = ConvertToLabelDigraph.convert(graph,support)(convertToLabel)
def labelForLabel[N,E,L](from:N,to:N,edge:E):L = edge.asInstanceOf[L]
Dijkstra.allPairsLeastPaths(labelGraphParts._1, support, labelForLabel, labelGraphParts._2)
}
result._2
}
def expectedTimeFloyd(calibration:(Int,Long),nodeCount:Int):Long = {
(Math.pow(nodeCount.toDouble/calibration._1,3) * calibration._2).toLong
}
def study(maxExponent:Int,timeF:Int => Long,expectedF:((Int,Long),Int) => Long):Seq[(Int,Long,Long,Double)] = {
warmUp(16,{timeF(32)})
warmUp(16,{timeF(64)})
warmUp(16,{timeF(128)})
val nodeCountAndTime:Seq[(Int,Long)] = nodeCountsFrom32(maxExponent).map(x=>(x,timeF(x)))
val calibration = nodeCountAndTime.head
val expected = nodeCountAndTime.map(x => x._1 -> expectedF(calibration,x._1)).toMap
val ratio = nodeCountAndTime.map(x => x._1 -> x._2.toDouble/expected.get(x._1).get).toMap
nodeCountAndTime.map(x => (x._1,x._2,expected(x._1),ratio(x._1)))
}
def nodeCountsFrom32(exponent:Int):Seq[Int] = {
(5.0.to(exponent.toDouble,0.25)).map(x => Math.pow(2,x).toInt)
}
def warmUp[T](number:Int,body: ⇒ T) = {
for(i <- 0 until number) body
}
def timeFunction[T](body: ⇒ T):(T,Long) = {
val startTime:Long = System.nanoTime()
val result = body
val endTime:Long = System.nanoTime()
(result,endTime-startTime)
}
}
|
dwalend/Disentangle
|
benchmark/src/test/scala/net/walend/disentangle/graph/semiring/benchmark/TimingStudiesTest.scala
|
Scala
|
mit
| 5,568
|
import sbt._
import Keys._
object ExampleBuild extends Build {
val paradiseV = "2.1.0-M5"
lazy val baseSettings = Seq(
scalaVersion := "2.11.6",
scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature", "-language:experimental.macros"),
crossScalaVersions := Seq("2.10.2", "2.10.3", "2.10.4", "2.11.0", "2.11.1", "2.11.2", "2.11.3", "2.11.4", "2.11.5"),
resolvers += Resolver.typesafeRepo("releases"),
addCompilerPlugin("org.scalamacros" % "paradise" % paradiseV cross CrossVersion.full)
)
lazy val spray_dev = ProjectRef(file("../../../metamorphic-spray"), "spray_dev")
lazy val slick_dev = ProjectRef(file("../../../metamorphic-slick-3/sqlite"), "sqlite_dev")
lazy val example = (project in file(".")).
dependsOn(conf).
dependsOn(spray_dev).
dependsOn(slick_dev).
settings(baseSettings: _*)
lazy val conf = (project in file("./conf")).
settings(
scalaVersion := "2.11.6",
unmanagedResourceDirectories in Compile += baseDirectory.value,
excludeFilter in unmanagedResources := "target"
)
}
|
frroliveira/metamorphic
|
examples/slick3-spray/shop/project/Build.scala
|
Scala
|
mit
| 1,080
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp.
package scala.runtime
abstract class AbstractFunction4[-T1, -T2, -T3, -T4, +R] extends Function4[T1, T2, T3, T4, R] {
}
|
scala/scala
|
src/library/scala/runtime/AbstractFunction4.scala
|
Scala
|
apache-2.0
| 480
|
package models.query
case class CityCode(cityCode: String)
|
LeonardoZ/SAEB
|
app/models/query/CityCode.scala
|
Scala
|
mit
| 63
|
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.bam
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.bam.api.{SamOrder, SamRecord, SamSource, SamWriter}
import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool}
import com.fulcrumgenomics.commons.util.LazyLogging
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.util.{Io, Metric, ProgressLogger}
import enumeratum.EnumEntry
import htsjdk.samtools.SAMFileHeader.SortOrder
import htsjdk.samtools.SamPairUtil
import htsjdk.samtools.reference.ReferenceSequenceFileWalker
import scala.collection.immutable.IndexedSeq
@clp(group = ClpGroups.SamOrBam, description=
"""
|Clips reads from the same template. Ensures that at least N bases are clipped from any end of the read (i.e.
|R1 5' end, R1 3' end, R2 5' end, and R2 3' end). Optionally clips reads from the same template to eliminate overlap
|between the reads. This ensures that downstream processes, particularly variant calling, cannot double-count
|evidence from the same template when both reads span a variant site in the same template.
|
|Clipping overlapping reads is only performed on `FR` read pairs, and is implemented by clipping approximately half
|the overlapping bases from each read. By default hard clipping is performed; soft-clipping may be substituted
|using the `--soft-clip` parameter.
|
|Secondary alignments and supplemental alignments are not clipped, but are passed through into the
|output.
|
|If the input BAM is neither `queryname` sorted nor `query` grouped, it will be sorted into queryname
|order so that clipping can be performed on both ends of a pair simultaneously and so that mate
|pair information can be reset across all reads for the template. Post-clipping the reads are
|resorted into coordinate order, any existing `NM`, `UQ` and `MD` tags are repaired, and the output is
|written in coordinate order.
|
|Three clipping modes are supported:
|1. `Soft` - soft-clip the bases and qualities.
|2. `SoftWithMask` - soft-clip and mask the bases and qualities (make bases Ns and qualities the minimum).
|3. `Hard` - hard-clip the bases and qualities.
|
|The `--upgrade-clipping` parameter will convert all existing clipping in the input to the given more stringent mode:
|from `Soft` to either `SoftWithMask` or `Hard`, and `SoftWithMask` to `Hard`. In all other cases, clipping remains
|the same prior to applying any other clipping criteria.
""")
class ClipBam
( @arg(flag='i', doc="Input SAM or BAM file of aligned reads in coordinate order.") val input: PathToBam,
@arg(flag='o', doc="Output SAM or BAM file.") val output: PathToBam,
@arg(flag='m', doc="Optional output of clipping metrics.") val metrics: Option[FilePath] = None,
@arg(flag='r', doc="Reference sequence fasta file.") val ref: PathToFasta,
@arg(flag='c', doc="The type of clipping to perform.") val clippingMode: ClippingMode = ClippingMode.Hard,
@arg(flag='a', doc="Automatically clip extended attributes that are the same length as bases.") val autoClipAttributes: Boolean = false,
@arg(flag='H', doc="Upgrade all existing clipping in the input to the given clipping mode prior to applying any other clipping criteria.") val upgradeClipping: Boolean = false,
@arg( doc="Require at least this number of bases to be clipped on the 5' end of R1") val readOneFivePrime: Int = 0,
@arg( doc="Require at least this number of bases to be clipped on the 3' end of R1") val readOneThreePrime: Int = 0,
@arg( doc="Require at least this number of bases to be clipped on the 5' end of R2") val readTwoFivePrime: Int = 0,
@arg( doc="Require at least this number of bases to be clipped on the 3' end of R2") val readTwoThreePrime: Int = 0,
@arg( doc="Clip overlapping reads.") val clipOverlappingReads: Boolean = false,
@arg( doc="Clip reads in FR pairs that sequence past the far end of their mate.") val clipBasesPastMate: Boolean = false
) extends FgBioTool with LazyLogging {
Io.assertReadable(input)
Io.assertReadable(ref)
Io.assertCanWriteFile(output)
validate(upgradeClipping || clipOverlappingReads || clipBasesPastMate || Seq(readOneFivePrime, readOneThreePrime, readTwoFivePrime, readTwoThreePrime).exists(_ != 0),
"At least one clipping option is required")
if (clipBasesPastMate && clipOverlappingReads) {
logger.info("Clipping overlapping reads supersedes clipping past the far end of their mate.")
}
private val clipper = new SamRecordClipper(mode=clippingMode, autoClipAttributes=autoClipAttributes)
override def execute(): Unit = {
val in = SamSource(input)
val progress = ProgressLogger(logger)
val sorter = Bams.sorter(SamOrder.Coordinate, in.header)
val metricsMap: Map[ReadType, ClippingMetrics] = this.metrics.map { _ =>
ReadType.values.map { readType => readType -> ClippingMetrics(read_type=readType) }.toMap
}.getOrElse(Map.empty)
// Go through and clip reads and fix their mate information
Bams.templateIterator(in).foreach { template =>
if (this.upgradeClipping) template.allReads.foreach { r => this.clipper.upgradeAllClipping(r) }
(template.r1, template.r2) match {
case (Some(r1), Some(r2)) =>
clipPair(r1=r1, r2=r2, r1Metric=metricsMap.get(ReadType.ReadOne), r2Metric=metricsMap.get(ReadType.ReadTwo))
SamPairUtil.setMateInfo(r1.asSam, r2.asSam, true)
template.r1Supplementals.foreach(s => SamPairUtil.setMateInformationOnSupplementalAlignment(s.asSam, r2.asSam, true))
template.r2Supplementals.foreach(s => SamPairUtil.setMateInformationOnSupplementalAlignment(s.asSam, r1.asSam, true))
case (Some(frag), None) =>
clipFragment(frag=frag, metric=metricsMap.get(ReadType.Fragment))
case _ => ()
}
template.allReads.foreach { r =>
sorter += r
progress.record(r)
}
}
// Then go through the coordinate sorted reads and fix up tags
logger.info("Re-sorting into coordinate order and writing output.")
val header = in.header.clone()
SamOrder.Coordinate.applyTo(header)
header.setSortOrder(SortOrder.coordinate)
val walker = new ReferenceSequenceFileWalker(ref.toFile)
val out = SamWriter(output, header, ref=Some(ref))
sorter.foreach { rec =>
Bams.regenerateNmUqMdTags(rec, walker.get(rec.refIndex))
out += rec
}
this.metrics.foreach { path =>
// Update the metrics for "All" and "Pair" read types
import ReadType._
metricsMap.foreach {
case (Fragment, metric) => metricsMap(All).add(metric)
case (ReadOne | ReadTwo, metric) => Seq(Pair, All).foreach { r => metricsMap(r).add(metric) }
case _ => ()
}
// Write it!
Metric.write(path, ReadType.values.map { readType => metricsMap(readType)})
}
out.close()
}
/** Clips a fixed amount from the reads and then clips overlapping reads.
*/
private[bam] def clipFragment(frag: SamRecord, metric: Option[ClippingMetrics] = None): Unit = {
val priorBasesClipped = frag.cigar.clippedBases
// Clip the read!
val numFivePrime = this.clipper.clip5PrimeEndOfRead(frag, readOneFivePrime)
val numThreePrime = this.clipper.clip3PrimeEndOfRead(frag, readOneThreePrime)
// Update metrics
metric.foreach { m =>
m.update(
rec = frag,
priorBasesClipped = priorBasesClipped,
numFivePrime = numFivePrime,
numThreePrime = numThreePrime,
numOverlappingBases = 0,
numExtendingBases = 0
)
}
}
/** Clips a fixed amount from the reads and then clips overlapping reads.
*/
private[bam] def clipPair(r1: SamRecord, r2: SamRecord, r1Metric: Option[ClippingMetrics] = None, r2Metric: Option[ClippingMetrics] = None): Unit = {
val priorBasesClippedReadOne = r1.cigar.clippedBases
val priorBasesClippedReadTwo = r2.cigar.clippedBases
// Clip the read!
val numReadOneFivePrime = this.clipper.clip5PrimeEndOfRead(r1, readOneFivePrime)
val numReadOneThreePrime = this.clipper.clip3PrimeEndOfRead(r1, readOneThreePrime)
val numReadTwoFivePrime = this.clipper.clip5PrimeEndOfRead(r2, readTwoFivePrime)
val numReadTwoThreePrime = this.clipper.clip3PrimeEndOfRead(r2, readTwoThreePrime)
val (numOverlappingBasesReadOne, numOverlappingBasesReadTwo) = {
if (clipOverlappingReads && r1.isFrPair) this.clipper.clipOverlappingReads(r1, r2)
else (0, 0)
}
val (numExtendingPastMateStartReadOne, numExtendingPastMateStartReadTwo) = {
if (clipBasesPastMate && r1.isFrPair) {
val clip1 = this.clipper.clipExtendingPastMateEnd(rec=r1, mateEnd=r2.end)
val clip2 = this.clipper.clipExtendingPastMateEnd(rec=r2, mateEnd=r1.end)
(clip1, clip2)
}
else (0, 0)
}
r1Metric.foreach { m =>
m.update(
rec = r1,
priorBasesClipped = priorBasesClippedReadOne,
numFivePrime = numReadOneFivePrime,
numThreePrime = numReadOneThreePrime,
numOverlappingBases = numOverlappingBasesReadOne,
numExtendingBases = numExtendingPastMateStartReadOne
)
}
r2Metric.foreach { m =>
m.update(
rec = r2,
priorBasesClipped = priorBasesClippedReadTwo,
numFivePrime = numReadTwoFivePrime,
numThreePrime = numReadTwoThreePrime,
numOverlappingBases = numOverlappingBasesReadTwo,
numExtendingBases = numExtendingPastMateStartReadTwo
)
}
}
}
sealed trait ReadType extends EnumEntry
object ReadType extends FgBioEnum[ReadType] {
def values: IndexedSeq[ReadType] = findValues
case object Fragment extends ReadType
case object ReadOne extends ReadType
case object ReadTwo extends ReadType
case object Pair extends ReadType
case object All extends ReadType
}
/** Metrics produced by [[ClipBam]] that detail how many reads and bases are clipped respectively.
*
* @param read_type The type of read (i.e. Fragment, ReadOne, ReadTwo).
* @param reads The number of reads examined.
* @param reads_clipped_pre The number of reads with any type of clipping prior to clipping with [[ClipBam]].
* @param reads_clipped_post The number of reads with any type of clipping after clipping with [[ClipBam]], including reads that became unmapped.
* @param reads_clipped_five_prime The number of reads with the 5' end clipped.
* @param reads_clipped_three_prime The number of reads with the 3' end clipped.
* @param reads_clipped_overlapping The number of reads clipped due to overlapping reads.
* @param reads_clipped_extending The number of reads clipped due to a read extending past its mate.
* @param reads_unmapped The number of reads that became unmapped due to clipping.
* @param bases The number of aligned bases after clipping.
* @param bases_clipped_pre The number of bases clipped prior to clipping with [[ClipBam]].
* @param bases_clipped_post The number of bases clipped after clipping with [[ClipBam]], including bases from reads that became unmapped.
* @param bases_clipped_five_prime The number of bases clipped on the 5' end of the read.
* @param bases_clipped_three_prime The number of bases clipped on the 3 end of the read.
* @param bases_clipped_overlapping The number of bases clipped due to overlapping reads.
* @param bases_clipped_extending The number of bases clipped due to a read extending past its mate.
*/
case class ClippingMetrics
(read_type: ReadType,
var reads: Long = 0,
var reads_unmapped: Long = 0,
var reads_clipped_pre: Long = 0,
var reads_clipped_post: Long = 0,
var reads_clipped_five_prime: Long = 0,
var reads_clipped_three_prime: Long = 0,
var reads_clipped_overlapping: Long = 0,
var reads_clipped_extending: Long = 0,
var bases: Long = 0,
var bases_clipped_pre: Long = 0,
var bases_clipped_post: Long = 0,
var bases_clipped_five_prime: Long = 0,
var bases_clipped_three_prime: Long = 0,
var bases_clipped_overlapping: Long = 0,
var bases_clipped_extending: Long = 0,
) extends Metric {
def update(rec: SamRecord, priorBasesClipped: Int, numFivePrime: Int, numThreePrime: Int, numOverlappingBases: Int, numExtendingBases: Int): Unit = {
this.reads += 1
this.bases += rec.cigar.alignedBases
if (priorBasesClipped > 0) {
this.reads_clipped_pre += 1
this.bases_clipped_pre += priorBasesClipped
}
if (numFivePrime > 0) {
this.reads_clipped_five_prime += 1
this.bases_clipped_five_prime += numFivePrime
}
if (numThreePrime > 0) {
this.reads_clipped_three_prime += 1
this.bases_clipped_three_prime += numThreePrime
}
if (numOverlappingBases > 0) {
this.reads_clipped_overlapping += 1
this.bases_clipped_overlapping += numOverlappingBases
}
if (numExtendingBases > 0) {
this.reads_clipped_extending += 1
this.bases_clipped_extending += numExtendingBases
}
val additionalClippedBases = numFivePrime + numThreePrime + numOverlappingBases + numExtendingBases
val totalClippedBases = additionalClippedBases + priorBasesClipped
if (totalClippedBases > 0) {
this.reads_clipped_post += 1
this.bases_clipped_post += totalClippedBases
if (rec.unmapped && additionalClippedBases > 0) this.reads_unmapped += 1
}
}
def add(metric: ClippingMetrics*): Unit = {
this.reads += metric.sumBy(_.reads)
this.reads_unmapped += metric.sumBy(_.reads_unmapped)
this.reads_clipped_pre += metric.sumBy(_.reads_clipped_pre)
this.reads_clipped_post += metric.sumBy(_.reads_clipped_post)
this.reads_clipped_five_prime += metric.sumBy(_.reads_clipped_five_prime)
this.reads_clipped_three_prime += metric.sumBy(_.reads_clipped_three_prime)
this.reads_clipped_overlapping += metric.sumBy(_.reads_clipped_overlapping)
this.reads_clipped_extending += metric.sumBy(_.reads_clipped_extending)
this.bases += metric.sumBy(_.bases)
this.bases_clipped_pre += metric.sumBy(_.bases_clipped_pre)
this.bases_clipped_post += metric.sumBy(_.bases_clipped_post)
this.bases_clipped_five_prime += metric.sumBy(_.bases_clipped_five_prime)
this.bases_clipped_three_prime += metric.sumBy(_.bases_clipped_three_prime)
this.bases_clipped_overlapping += metric.sumBy(_.bases_clipped_overlapping)
this.bases_clipped_extending += metric.sumBy(_.bases_clipped_extending)
}
}
|
fulcrumgenomics/fgbio
|
src/main/scala/com/fulcrumgenomics/bam/ClipBam.scala
|
Scala
|
mit
| 15,919
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.log.LogConfig
import kafka.security.CredentialProvider
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.common.config.ConfigDef.Importance._
import org.apache.kafka.common.config.ConfigDef.Range._
import org.apache.kafka.common.config.ConfigDef.Type._
import scala.jdk.CollectionConverters._
/**
* Class used to hold dynamic configs. These are configs which have no physical manifestation in the server.properties
* and can only be set dynamically.
*/
object DynamicConfig {
object Broker {
//Properties
val LeaderReplicationThrottledRateProp = "leader.replication.throttled.rate"
val FollowerReplicationThrottledRateProp = "follower.replication.throttled.rate"
val ReplicaAlterLogDirsIoMaxBytesPerSecondProp = "replica.alter.log.dirs.io.max.bytes.per.second"
//Defaults
val DefaultReplicationThrottledRate = ReplicationQuotaManagerConfig.QuotaBytesPerSecondDefault
//Documentation
val LeaderReplicationThrottledRateDoc = "A long representing the upper bound (bytes/sec) on replication traffic for leaders enumerated in the " +
s"property ${LogConfig.LeaderReplicationThrottledReplicasProp} (for each topic). This property can be only set dynamically. It is suggested that the " +
s"limit be kept above 1MB/s for accurate behaviour."
val FollowerReplicationThrottledRateDoc = "A long representing the upper bound (bytes/sec) on replication traffic for followers enumerated in the " +
s"property ${LogConfig.FollowerReplicationThrottledReplicasProp} (for each topic). This property can be only set dynamically. It is suggested that the " +
s"limit be kept above 1MB/s for accurate behaviour."
val ReplicaAlterLogDirsIoMaxBytesPerSecondDoc = "A long representing the upper bound (bytes/sec) on disk IO used for moving replica between log directories on the same broker. " +
s"This property can be only set dynamically. It is suggested that the limit be kept above 1MB/s for accurate behaviour."
//Definitions
val brokerConfigDef = new ConfigDef()
//round minimum value down, to make it easier for users.
.define(LeaderReplicationThrottledRateProp, LONG, DefaultReplicationThrottledRate, atLeast(0), MEDIUM, LeaderReplicationThrottledRateDoc)
.define(FollowerReplicationThrottledRateProp, LONG, DefaultReplicationThrottledRate, atLeast(0), MEDIUM, FollowerReplicationThrottledRateDoc)
.define(ReplicaAlterLogDirsIoMaxBytesPerSecondProp, LONG, DefaultReplicationThrottledRate, atLeast(0), MEDIUM, ReplicaAlterLogDirsIoMaxBytesPerSecondDoc)
DynamicBrokerConfig.addDynamicConfigs(brokerConfigDef)
val nonDynamicProps = KafkaConfig.configNames.toSet -- brokerConfigDef.names.asScala
def names = brokerConfigDef.names
def validate(props: Properties) = DynamicConfig.validate(brokerConfigDef, props, customPropsAllowed = true)
}
object Client {
//Properties
val ProducerByteRateOverrideProp = "producer_byte_rate"
val ConsumerByteRateOverrideProp = "consumer_byte_rate"
val RequestPercentageOverrideProp = "request_percentage"
//Defaults
val DefaultProducerOverride = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val DefaultConsumerOverride = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val DefaultRequestOverride = ClientQuotaManagerConfig.QuotaRequestPercentDefault
//Documentation
val ProducerOverrideDoc = "A rate representing the upper bound (bytes/sec) for producer traffic."
val ConsumerOverrideDoc = "A rate representing the upper bound (bytes/sec) for consumer traffic."
val RequestOverrideDoc = "A percentage representing the upper bound of time spent for processing requests."
//Definitions
private val clientConfigs = new ConfigDef()
.define(ProducerByteRateOverrideProp, LONG, DefaultProducerOverride, MEDIUM, ProducerOverrideDoc)
.define(ConsumerByteRateOverrideProp, LONG, DefaultConsumerOverride, MEDIUM, ConsumerOverrideDoc)
.define(RequestPercentageOverrideProp, DOUBLE, DefaultRequestOverride, MEDIUM, RequestOverrideDoc)
def configKeys = clientConfigs.configKeys
def names = clientConfigs.names
def validate(props: Properties) = DynamicConfig.validate(clientConfigs, props, customPropsAllowed = false)
}
object User {
//Definitions
private val userConfigs = CredentialProvider.userCredentialConfigs
.define(Client.ProducerByteRateOverrideProp, LONG, Client.DefaultProducerOverride, MEDIUM, Client.ProducerOverrideDoc)
.define(Client.ConsumerByteRateOverrideProp, LONG, Client.DefaultConsumerOverride, MEDIUM, Client.ConsumerOverrideDoc)
.define(Client.RequestPercentageOverrideProp, DOUBLE, Client.DefaultRequestOverride, MEDIUM, Client.RequestOverrideDoc)
def configKeys = userConfigs.configKeys
def names = userConfigs.names
def validate(props: Properties) = DynamicConfig.validate(userConfigs, props, customPropsAllowed = false)
}
private def validate(configDef: ConfigDef, props: Properties, customPropsAllowed: Boolean) = {
//Validate Names
val names = configDef.names()
val propKeys = props.keySet.asScala.map(_.asInstanceOf[String])
if (!customPropsAllowed) {
val unknownKeys = propKeys.filter(!names.contains(_))
require(unknownKeys.isEmpty, s"Unknown Dynamic Configuration: $unknownKeys.")
}
val propResolved = DynamicBrokerConfig.resolveVariableConfigs(props)
//ValidateValues
configDef.parse(propResolved)
}
}
|
sslavic/kafka
|
core/src/main/scala/kafka/server/DynamicConfig.scala
|
Scala
|
apache-2.0
| 6,378
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.