code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Connection, Date, Timestamp}
import java.util.Properties
import org.scalatest.Ignore
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{BooleanType, ByteType, ShortType, StructType}
import org.apache.spark.tags.DockerTest
@DockerTest
@Ignore // AMPLab Jenkins needs to be updated before shared memory works on docker
class DB2IntegrationSuite extends DockerJDBCIntegrationSuite {
override val db = new DatabaseOnDocker {
override val imageName = "lresende/db2express-c:10.5.0.5-3.10.0"
override val env = Map(
"DB2INST1_PASSWORD" -> "rootpass",
"LICENSE" -> "accept"
)
override val usesIpc = false
override val jdbcPort: Int = 50000
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:db2://$ip:$port/foo:user=db2inst1;password=rootpass;retrieveMessagesFromServerOnGetMessage=true;" //scalastyle:ignore
override def getStartupProcessName: Option[String] = Some("db2start")
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE TABLE tbl (x INTEGER, y VARCHAR(8))").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (42,'fred')").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (17,'dave')").executeUpdate()
conn.prepareStatement("CREATE TABLE numbers ( small SMALLINT, med INTEGER, big BIGINT, "
+ "deci DECIMAL(31,20), flt FLOAT, dbl DOUBLE, real REAL, "
+ "decflt DECFLOAT, decflt16 DECFLOAT(16), decflt34 DECFLOAT(34))").executeUpdate()
conn.prepareStatement("INSERT INTO numbers VALUES (17, 77777, 922337203685477580, "
+ "123456745.56789012345000000000, 42.75, 5.4E-70, "
+ "3.4028234663852886e+38, 4.2999, DECFLOAT('9.999999999999999E19', 16), "
+ "DECFLOAT('1234567891234567.123456789123456789', 34))").executeUpdate()
conn.prepareStatement("CREATE TABLE dates (d DATE, t TIME, ts TIMESTAMP )").executeUpdate()
conn.prepareStatement("INSERT INTO dates VALUES ('1991-11-09', '13:31:24', "
+ "'2009-02-13 23:31:30')").executeUpdate()
// TODO: Test locale conversion for strings.
conn.prepareStatement("CREATE TABLE strings (a CHAR(10), b VARCHAR(10), c CLOB, d BLOB, e XML)")
.executeUpdate()
conn.prepareStatement("INSERT INTO strings VALUES ('the', 'quick', 'brown', BLOB('fox'),"
+ "'<cinfo cid=\"10\"><name>Kathy</name></cinfo>')").executeUpdate()
}
test("Basic test") {
val df = sqlContext.read.jdbc(jdbcUrl, "tbl", new Properties)
val rows = df.collect()
assert(rows.length == 2)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 2)
assert(types(0).equals("class java.lang.Integer"))
assert(types(1).equals("class java.lang.String"))
}
test("Numeric types") {
val df = sqlContext.read.jdbc(jdbcUrl, "numbers", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 10)
assert(types(0).equals("class java.lang.Integer"))
assert(types(1).equals("class java.lang.Integer"))
assert(types(2).equals("class java.lang.Long"))
assert(types(3).equals("class java.math.BigDecimal"))
assert(types(4).equals("class java.lang.Double"))
assert(types(5).equals("class java.lang.Double"))
assert(types(6).equals("class java.lang.Float"))
assert(types(7).equals("class java.math.BigDecimal"))
assert(types(8).equals("class java.math.BigDecimal"))
assert(types(9).equals("class java.math.BigDecimal"))
assert(rows(0).getInt(0) == 17)
assert(rows(0).getInt(1) == 77777)
assert(rows(0).getLong(2) == 922337203685477580L)
val bd = new BigDecimal("123456745.56789012345000000000")
assert(rows(0).getAs[BigDecimal](3).equals(bd))
assert(rows(0).getDouble(4) == 42.75)
assert(rows(0).getDouble(5) == 5.4E-70)
assert(rows(0).getFloat(6) == 3.4028234663852886e+38)
assert(rows(0).getDecimal(7) == new BigDecimal("4.299900000000000000"))
assert(rows(0).getDecimal(8) == new BigDecimal("99999999999999990000.000000000000000000"))
assert(rows(0).getDecimal(9) == new BigDecimal("1234567891234567.123456789123456789"))
}
test("Date types") {
val df = sqlContext.read.jdbc(jdbcUrl, "dates", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 3)
assert(types(0).equals("class java.sql.Date"))
assert(types(1).equals("class java.sql.Timestamp"))
assert(types(2).equals("class java.sql.Timestamp"))
assert(rows(0).getAs[Date](0).equals(Date.valueOf("1991-11-09")))
assert(rows(0).getAs[Timestamp](1).equals(Timestamp.valueOf("1970-01-01 13:31:24")))
assert(rows(0).getAs[Timestamp](2).equals(Timestamp.valueOf("2009-02-13 23:31:30")))
}
test("String types") {
val df = sqlContext.read.jdbc(jdbcUrl, "strings", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 5)
assert(types(0).equals("class java.lang.String"))
assert(types(1).equals("class java.lang.String"))
assert(types(2).equals("class java.lang.String"))
assert(types(3).equals("class [B"))
assert(rows(0).getString(0).equals("the "))
assert(rows(0).getString(1).equals("quick"))
assert(rows(0).getString(2).equals("brown"))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](3), Array[Byte](102, 111, 120)))
assert(rows(0).getString(4).equals("""<cinfo cid="10"><name>Kathy</name></cinfo>"""))
}
test("Basic write test") {
// cast decflt column with precision value of 38 to DB2 max decimal precision value of 31.
val df1 = sqlContext.read.jdbc(jdbcUrl, "numbers", new Properties)
.selectExpr("small", "med", "big", "deci", "flt", "dbl", "real",
"cast(decflt as decimal(31, 5)) as decflt")
val df2 = sqlContext.read.jdbc(jdbcUrl, "dates", new Properties)
val df3 = sqlContext.read.jdbc(jdbcUrl, "strings", new Properties)
df1.write.jdbc(jdbcUrl, "numberscopy", new Properties)
df2.write.jdbc(jdbcUrl, "datescopy", new Properties)
df3.write.jdbc(jdbcUrl, "stringscopy", new Properties)
// spark types that does not have exact matching db2 table types.
val df4 = sqlContext.createDataFrame(
sparkContext.parallelize(Seq(Row("1".toShort, "20".toByte, true))),
new StructType().add("c1", ShortType).add("b", ByteType).add("c3", BooleanType))
df4.write.jdbc(jdbcUrl, "otherscopy", new Properties)
val rows = sqlContext.read.jdbc(jdbcUrl, "otherscopy", new Properties).collect()
assert(rows(0).getInt(0) == 1)
assert(rows(0).getInt(1) == 20)
assert(rows(0).getString(2) == "1")
}
test("query JDBC option") {
val expectedResult = Set(
(42, "fred"),
(17, "dave")
).map { case (x, y) =>
Row(Integer.valueOf(x), String.valueOf(y))
}
val query = "SELECT x, y FROM tbl WHERE x > 10"
// query option to pass on the query string.
val df = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("query", query)
.load()
assert(df.collect.toSet === expectedResult)
// query option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', query '$query')
""".stripMargin.replaceAll("\n", " "))
assert(sql("select x, y from queryOption").collect.toSet == expectedResult)
}
}
| pgandhi999/spark | external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DB2IntegrationSuite.scala | Scala | apache-2.0 | 8,478 |
package org.http4s
import scala.util.control.{NoStackTrace, NonFatal}
import scalaz.concurrent.Task
import scalaz.{\\/-, -\\/, Equal}
/** Indicates a failure to handle an HTTP [[Message]]. */
sealed abstract class MessageFailure extends RuntimeException {
/** Provides a message appropriate for logging. */
def message: String
/* Overridden for sensible logging of the failure */
final override def getMessage: String =
message
/** Provides a default rendering of this failure as a [[Response]]. */
def toHttpResponse(httpVersion: HttpVersion): Task[Response]
}
/**
* Indicates an error parsing an HTTP [[Message]].
*/
sealed abstract class ParsingFailure extends MessageFailure with NoStackTrace
/**
* Indicates an error parsing an HTTP [[Message]].
*
* @param sanitized May safely be displayed to a client to describe an error
* condition. Should not echo any part of a Request.
* @param details Contains any relevant details omitted from the sanitized
* version of the error. This may freely echo a Request.
*/
final case class ParseFailure(sanitized: String, details: String) extends ParsingFailure {
def message: String =
if (sanitized.isEmpty) details
else if (details.isEmpty) sanitized
else s"$sanitized: $details"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.BadRequest, httpVersion).withBody(sanitized)
}
/** Generic description of a failure to parse an HTTP [[Message]] */
final case class GenericParsingFailure(sanitized: String, details: String, response: HttpVersion => Task[Response]) extends ParsingFailure {
def message: String =
ParseFailure(sanitized, details).message
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
object ParseFailure {
implicit val eq = Equal.equalA[ParseFailure]
}
object ParseResult {
def fail(sanitized: String, details: String): ParseResult[Nothing] =
-\\/(ParseFailure(sanitized, details))
def success[A](a: A): ParseResult[A] =
\\/-(a)
def fromTryCatchNonFatal[A](sanitized: String)(f: => A): ParseResult[A] =
try ParseResult.success(f)
catch {
case NonFatal(e) => -\\/(ParseFailure(sanitized, e.getMessage))
}
}
/** Indicates a problem decoding a [[Message]]. This may either be a problem with
* the entity headers or with the entity itself. */
sealed abstract class DecodeFailure extends MessageFailure
/** Generic description of a failure to decode a [[Message]] */
final case class GenericDecodeFailure(message: String, response: HttpVersion => Task[Response]) extends DecodeFailure {
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
/** Indicates a problem decoding a [[Message]] body. */
sealed abstract class MessageBodyFailure extends DecodeFailure {
def cause: Option[Throwable] = None
override def getCause: Throwable =
cause.orNull
}
/** Generic description of a failure to handle a [[Message]] body */
final case class GenericMessageBodyFailure(message: String,
override val cause: Option[Throwable],
response: HttpVersion => Task[Response]) extends MessageBodyFailure {
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
response(httpVersion)
}
/** Indicates an syntactic error decoding the body of an HTTP [[Message]]. */
sealed case class MalformedMessageBodyFailure(details: String, override val cause: Option[Throwable] = None) extends MessageBodyFailure {
def message: String =
s"Malformed request body: $details"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.BadRequest, httpVersion).withBody(s"The request body was malformed.")
}
/** Indicates a semantic error decoding the body of an HTTP [[Message]]. */
sealed case class InvalidMessageBodyFailure(details: String, override val cause: Option[Throwable] = None) extends MessageBodyFailure {
def message: String =
s"Invalid request body: $details"
override def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.UnprocessableEntity, httpVersion).withBody(s"The request body was invalid.")
}
/** Indicates that a [[Message]] came with no supported [[MediaType]]. */
sealed abstract class UnsupportedMediaTypeFailure(expected: Set[MediaRange]) extends DecodeFailure with NoStackTrace {
def sanitizedResponsePrefix: String
val expectedMsg: String = s"Expected one of the following media ranges: ${expected.map(_.renderString).mkString(", ")}"
val responseMsg: String = s"$sanitizedResponsePrefix. $expectedMsg"
def toHttpResponse(httpVersion: HttpVersion): Task[Response] =
Response(Status.UnsupportedMediaType, httpVersion)
.withBody(responseMsg)
}
/** Indicates that a [[Message]] attempting to be decoded has no [[MediaType]] and no
* [[EntityDecoder]] was lenient enough to accept it. */
final case class MediaTypeMissing(expected: Set[MediaRange])
extends UnsupportedMediaTypeFailure(expected)
{
def sanitizedResponsePrefix: String = "No media type specified in Content-Type header"
val message: String = responseMsg
}
/** Indicates that no [[EntityDecoder]] matches the [[MediaType]] of the [[Message]] being decoded */
final case class MediaTypeMismatch(messageType: MediaType, expected: Set[MediaRange])
extends UnsupportedMediaTypeFailure(expected)
{
def sanitizedResponsePrefix: String = "Media type supplied in Content-Type header is not supported"
def message: String = s"${messageType.renderString} is not a supported media type. $expectedMsg"
}
| hvesalai/http4s | core/src/main/scala/org/http4s/MessageFailure.scala | Scala | apache-2.0 | 5,678 |
package frameless
trait CatalystCast[A, B]
object CatalystCast {
implicit def castToString[T]: CatalystCast[T, String] = new CatalystCast[T, String] {}
implicit def numericToLong[A: CatalystNumeric]: CatalystCast[A, Long] = new CatalystCast[A, Long] {}
implicit def numericToInt[A: CatalystNumeric]: CatalystCast[A, Int] = new CatalystCast[A, Int] {}
implicit def numericToShort[A: CatalystNumeric]: CatalystCast[A, Short] = new CatalystCast[A, Short] {}
implicit def numericToByte[A: CatalystNumeric]: CatalystCast[A, Byte] = new CatalystCast[A, Byte] {}
implicit def numericToDecimal[A: CatalystNumeric]: CatalystCast[A, BigDecimal] = new CatalystCast[A, BigDecimal] {}
implicit def numericToDouble[A: CatalystNumeric]: CatalystCast[A, Double] = new CatalystCast[A, Double] {}
implicit def booleanToNumeric[A: CatalystNumeric]: CatalystCast[Boolean, A] = new CatalystCast[Boolean, A] {}
// doesn't make any sense to include:
// - sqlDateToBoolean: always None
// - sqlTimestampToBoolean: compares us to 0
implicit object stringToBoolean extends CatalystCast[String, Option[Boolean]]
implicit object longToBoolean extends CatalystCast[Long, Boolean]
implicit object intToBoolean extends CatalystCast[Int, Boolean]
implicit object shortToBoolean extends CatalystCast[Short, Boolean]
implicit object byteToBoolean extends CatalystCast[Byte, Boolean]
implicit object bigDecimalToBoolean extends CatalystCast[BigDecimal, Boolean]
implicit object doubleToBoolean extends CatalystCast[Double, Boolean]
// TODO
// needs verification, does it make sense to include? probably better as a separate function
// implicit object stringToInt extends CatalystCast[String, Option[Int]]
// implicit object stringToShort extends CatalystCast[String, Option[Short]]
// implicit object stringToByte extends CatalystCast[String, Option[Byte]]
// implicit object stringToDecimal extends CatalystCast[String, Option[BigDecimal]]
// implicit object stringToLong extends CatalystCast[String, Option[Long]]
// implicit object stringToSqlDate extends CatalystCast[String, Option[SQLDate]]
// needs verification:
// implicit object sqlTimestampToSqlDate extends CatalystCast[SQLTimestamp, SQLDate]
// needs verification:
// implicit object sqlTimestampToDecimal extends CatalystCast[SQLTimestamp, BigDecimal]
// implicit object sqlTimestampToLong extends CatalystCast[SQLTimestamp, Long]
// needs verification:
// implicit object stringToSqlTimestamp extends CatalystCast[String, SQLTimestamp]
// implicit object longToSqlTimestamp extends CatalystCast[Long, SQLTimestamp]
// implicit object intToSqlTimestamp extends CatalystCast[Int, SQLTimestamp]
// implicit object doubleToSqlTimestamp extends CatalystCast[Double, SQLTimestamp]
// implicit object floatToSqlTimestamp extends CatalystCast[Float, SQLTimestamp]
// implicit object bigDecimalToSqlTimestamp extends CatalystCast[BigDecimal, SQLTimestamp]
// implicit object sqlDateToSqlTimestamp extends CatalystCast[SQLDate, SQLTimestamp]
// doesn't make sense to include:
// - booleanToSqlTimestamp: 1L or 0L
// - shortToSqlTimestamp: ???
// - byteToSqlTimestamp: ???
// doesn't make sense to include:
// - sqlDateToLong: always None
// - sqlDateToInt: always None
// - sqlDateToInt: always None
// - sqlDateToInt: always None
// - sqlDateToInt: always None
// doesn't make sense to include:
// - sqlTimestampToInt: useful? can be done through `-> Long -> Int`
// - sqlTimestampToShort: useful? can be done through `-> Long -> Int`
// - sqlTimestampToShort: useful? can be done through `-> Long -> Int`
}
| OlivierBlanvillain/frameless | core/src/main/scala/frameless/CatalystCast.scala | Scala | apache-2.0 | 3,650 |
package org.podval.families.numbers.simple.monolithic
trait NumberSystem {
import NumberSystem.Raw
protected type Number <: NumberBase
protected def createNumber(raw: Raw): Number
trait NumberBase extends Ordered[Number] {
protected final def numberSystem: NumberSystem = NumberSystem.this
// Defining it as
// = numberSystem.createNumber(raw)
// causes an error:
// type mismatch;
// found : org.podval.families.numbers.simple.one.NumberSystem#Number
// required: NumberSystem.this.Number
protected final def create(raw: Raw): Number =
NumberSystem.this.createNumber(raw)
def negative: Boolean
def digits: List[Int]
final def digit(n: Int): Int = if (digits.length >= n) digits(n) else 0
final def digit(n: Int, value: Int): Number =
create(negative, digits.padTo(n + 1, 0).updated(n, value))
final def +(that: Number): Number = create(plusMinus(operationNegation = false, that))
final def -(that: Number): Number = create(plusMinus(operationNegation = true, that))
final def *(n: Int): Number = create(negative, digits map (n * _))
private[this] final def plusMinus(operationNegation: Boolean, that: Number): Raw = ???
final def compare(that: Number): Int = ???
// TODO unchecked because of the erasure; compare NumberSystems...
final override def equals(other: Any): Boolean =
if (!other.isInstanceOf[Number]) false else compare(other.asInstanceOf[Number]) == 0
}
abstract class AbstractNumber(raw: Raw) extends NumberBase {
final override def negative: Boolean = raw._1
final override def digits: List[Int] = raw._2
}
}
object NumberSystem {
type Raw = (Boolean, List[Int])
}
| dubinsky/podval-families | src/main/scala/org/podval/families/numbers/simple/monolithic/NumberSystem.scala | Scala | apache-2.0 | 1,729 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2016, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package mutable
/** `ReusableBuilder` is a marker trait that indicates that a `Builder`
* can be reused to build more than one instance of a collection. In
* particular, calling `result` followed by `clear` will produce a
* collection and reset the builder to begin building a new collection
* of the same type.
*
* It is up to subclasses to implement this behavior, and to document any
* other behavior that varies from standard `ReusableBuilder` usage
* (e.g. operations being well-defined after a call to `result`, or allowing
* multiple calls to result to obtain different snapshots of a collection under
* construction).
*
* @tparam Elem the type of elements that get added to the builder.
* @tparam To the type of collection that it produced.
*
* @since 2.12
*/
trait ReusableBuilder[-Elem, +To] extends Builder[Elem, To] {
/** Clears the contents of this builder.
* After execution of this method, the builder will contain no elements.
*
* If executed immediately after a call to `result`, this allows a new
* instance of the same type of collection to be built.
*/
override def clear(): Unit // Note: overriding for scaladoc only!
/** Produces a collection from the added elements.
*
* After a call to `result`, the behavior of all other methods is undefined
* save for `clear`. If `clear` is called, then the builder is reset and
* may be used to build another instance.
*
* @return a collection containing the elements added to this builder.
*/
override def result(): To // Note: overriding for scaladoc only!
}
| slothspot/scala | src/library/scala/collection/mutable/ReusableBuilder.scala | Scala | bsd-3-clause | 2,177 |
package com.twitter.zipkin.storage.redis
import com.google.common.io.Closer
import com.twitter.finagle.redis.Client
import com.twitter.util.{Duration, Future}
import com.twitter.zipkin.adjuster.ApplyTimestampAndDuration
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.storage._
import java.nio.ByteBuffer
/**
* @param client the redis client to use
* @param ttl expires keys older than this many seconds.
*/
class RedisSpanStore(client: Client, ttl: Option[Duration])
extends SpanStore with CollectAnnotationQueries {
private[this] val closer = Closer.create()
private[this] val index = closer.register(new RedisIndex(client, ttl))
private[this] val storage = closer.register(new RedisStorage(client, ttl))
/** For testing, clear this store. */
private[redis] def clear(): Future[Unit] = client.flushDB()
override def close() = closer.close()
override def apply(newSpans: Seq[Span]): Future[Unit] = Future.join(
newSpans.map(s => s.copy(annotations = s.annotations.sorted))
.map(ApplyTimestampAndDuration.apply)
.flatMap(s => Seq(storage.storeSpan(s), index.index(s))))
override def getTracesByIds(traceIds: Seq[Long]): Future[Seq[List[Span]]] = {
storage.getSpansByTraceIds(traceIds)
}
override def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
index.getTraceIdsByName(serviceName, spanName, endTs * 1000, lookback * 1000, limit)
}
override def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
index.getTraceIdsByAnnotation(serviceName, annotation, value, endTs * 1000, lookback * 1000, limit)
}
override def getAllServiceNames() = index.getServiceNames.map(_.toList.sorted)
override def getSpanNames(_serviceName: String) = {
val serviceName = _serviceName.toLowerCase // service names are always lowercase!
index.getSpanNames(serviceName).map(_.toList.sorted)
}
}
| jkdcdlly/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/RedisSpanStore.scala | Scala | apache-2.0 | 2,140 |
package org.rovak.steamclient.steam3.handlers
import org.rovak.steamclient.steam3._
import steam.SteamId
import com.google.protobuf.ByteString
import rovak.steamkit.steam.{ClientMsgProtobuf, IPacketMsg}
import rovak.steamkit.steam.language.{EChatEntryType, EMsg}
import steam.generated.SteammessagesClientserver._
/**
* Friend Commands like chat and friend requests
*/
trait FriendHandler extends MessageHandlerStack {
this: SteamClient =>
import Commands._
/**
* Sends a chatmessage
*
* @param steamId SteamId to which to send the message
* @param message Text to send
*/
def sendChatMessage(steamId: SteamId, message: String) = {
val chatMsg = new ClientMsgProtobuf[CMsgClientFriendMsg.Builder](classOf[CMsgClientFriendMsg], EMsg.ClientFriendMsg)
chatMsg.body.setSteamid(steamId)
chatMsg.body.setChatEntryType(EChatEntryType.ChatMsg.v())
chatMsg.body.setMessage(ByteString.copyFromUtf8(message))
send(chatMsg)
}
override def internalHandleMessage(message: IPacketMsg) = {
message.msgType match {
case _ => super.internalHandleMessage(message)
}
}
override def receive = super.receive orElse {
case SendChatMessage(steamid, message) => sendChatMessage(steamid, message)
}
}
| Rovak/scala-steamkit | steamkit/src/main/scala/org/rovak/steamclient/steam3/handlers/FriendHandler.scala | Scala | mit | 1,254 |
package gapt.expr.formula.hol
import gapt.expr.Apps
import gapt.expr.Expr
import gapt.expr.Var
import gapt.expr.formula.NonLogicalConstant
import gapt.expr.ty.To
object HOLFunction {
def apply( head: Expr, args: List[Expr] ): Expr = {
val res = Apps( head, args )
require( res.ty != To )
res
}
def unapply( e: Expr ): Option[( Expr, List[Expr] )] = e match {
case Apps( head @ ( NonLogicalConstant( _, _, _ ) | Var( _, _ ) ), args ) if e.ty != To => Some( head, args )
case _ => None
}
}
| gapt/gapt | core/src/main/scala/gapt/expr/formula/hol/HOLFunction.scala | Scala | gpl-3.0 | 518 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.JavaConversions._
import java.util.concurrent.atomic.AtomicBoolean
import kafka.api.LeaderAndIsr
import kafka.common.{TopicAndPartition, StateChangeFailedException, PartitionOfflineException}
import kafka.utils.{Logging, ZkUtils}
import org.I0Itec.zkclient.IZkChildListener
import org.I0Itec.zkclient.exception.ZkNodeExistsException
/**
* This class represents the state machine for partitions. It defines the states that a partition can be in, and
* transitions to move the partition to another legal state. The different states that a partition can be in are -
* 1. NonExistentPartition: This state indicates that the partition was either never created or was created and then
* deleted. Valid previous state, if one exists, is OfflinePartition
* 2. NewPartition : After creation, the partition is in the NewPartition state. In this state, the partition should have
* replicas assigned to it, but no leader/isr yet. Valid previous states are NonExistentPartition
* 3. OnlinePartition : Once a leader is elected for a partition, it is in the OnlinePartition state.
* Valid previous states are NewPartition/OfflinePartition
* 4. OfflinePartition : If, after successful leader election, the leader for partition dies, then the partition
* moves to the OfflinePartition state. Valid previous states are NewPartition/OnlinePartition
*/
class PartitionStateMachine(controller: KafkaController) extends Logging {
this.logIdent = "[Partition state machine on Controller " + controller.config.brokerId + "]: "
private val controllerContext = controller.controllerContext
private val zkClient = controllerContext.zkClient
var partitionState: mutable.Map[TopicAndPartition, PartitionState] = mutable.Map.empty
val brokerRequestBatch = new ControllerBrokerRequestBatch(controller.sendRequest)
val offlinePartitionSelector = new OfflinePartitionLeaderSelector(controllerContext)
private val isShuttingDown = new AtomicBoolean(false)
/**
* Invoked on successful controller election. First registers a topic change listener since that triggers all
* state transitions for partitions. Initializes the state of partitions by reading from zookeeper. Then triggers
* the OnlinePartition state change for all new or offline partitions.
*/
def startup() {
isShuttingDown.set(false)
// initialize partition state
initializePartitionState()
// try to move partitions to online state
triggerOnlinePartitionStateChange()
info("Started partition state machine with initial state -> " + partitionState.toString())
}
// register topic and partition change listeners
def registerListeners() {
registerTopicChangeListener()
}
/**
* Invoked on controller shutdown.
*/
def shutdown() {
isShuttingDown.compareAndSet(false, true)
partitionState.clear()
}
/**
* This API invokes the OnlinePartition state change on all partitions in either the NewPartition or OfflinePartition
* state. This is called on a successful controller election and on broker changes
*/
def triggerOnlinePartitionStateChange() {
try {
brokerRequestBatch.newBatch()
// try to move all partitions in NewPartition or OfflinePartition state to OnlinePartition state
for((topicAndPartition, partitionState) <- partitionState) {
if(partitionState.equals(OfflinePartition) || partitionState.equals(NewPartition))
handleStateChange(topicAndPartition.topic, topicAndPartition.partition, OnlinePartition, offlinePartitionSelector)
}
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.liveBrokers)
} catch {
case e => error("Error while moving some partitions to the online state", e)
}
}
/**
* This API is invoked by the partition change zookeeper listener
* @param partitions The list of partitions that need to be transitioned to the target state
* @param targetState The state that the partitions should be moved to
*/
def handleStateChanges(partitions: Set[TopicAndPartition], targetState: PartitionState,
leaderSelector: PartitionLeaderSelector = offlinePartitionSelector) {
info("Invoking state change to %s for partitions %s".format(targetState, partitions.mkString(",")))
try {
brokerRequestBatch.newBatch()
partitions.foreach { topicAndPartition =>
handleStateChange(topicAndPartition.topic, topicAndPartition.partition, targetState, leaderSelector)
}
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.liveBrokers)
}catch {
case e => error("Error while moving some partitions to %s state".format(targetState), e)
}
}
/**
* This API exercises the partition's state machine. It ensures that every state transition happens from a legal
* previous state to the target state.
* @param topic The topic of the partition for which the state transition is invoked
* @param partition The partition for which the state transition is invoked
* @param targetState The end state that the partition should be moved to
*/
private def handleStateChange(topic: String, partition: Int, targetState: PartitionState,
leaderSelector: PartitionLeaderSelector) {
val topicAndPartition = TopicAndPartition(topic, partition)
val currState = partitionState.getOrElseUpdate(topicAndPartition, NonExistentPartition)
try {
targetState match {
case NewPartition =>
// pre: partition did not exist before this
// post: partition has been assigned replicas
assertValidPreviousStates(topicAndPartition, List(NonExistentPartition), NewPartition)
assignReplicasToPartitions(topic, partition)
partitionState.put(topicAndPartition, NewPartition)
info("Partition [%s, %d] state changed from NotExists to New with assigned replicas ".format(topic, partition) +
"%s".format(controllerContext.partitionReplicaAssignment(topicAndPartition).mkString(",")))
case OnlinePartition =>
assertValidPreviousStates(topicAndPartition, List(NewPartition, OnlinePartition, OfflinePartition), OnlinePartition)
partitionState(topicAndPartition) match {
case NewPartition =>
// initialize leader and isr path for new partition
initializeLeaderAndIsrForPartition(topicAndPartition)
case OfflinePartition =>
electLeaderForPartition(topic, partition, leaderSelector)
case OnlinePartition => // invoked when the leader needs to be re-elected
electLeaderForPartition(topic, partition, leaderSelector)
case _ => // should never come here since illegal previous states are checked above
}
info("Partition [%s, %d] state changed from %s to OnlinePartition with leader %d".format(topic, partition,
partitionState(topicAndPartition), controllerContext.allLeaders(topicAndPartition).leaderAndIsr.leader))
partitionState.put(topicAndPartition, OnlinePartition)
// post: partition has a leader
case OfflinePartition =>
// pre: partition should be in Online state
assertValidPreviousStates(topicAndPartition, List(NewPartition, OnlinePartition), OfflinePartition)
// should be called when the leader for a partition is no longer alive
info("Partition [%s, %d] state changed from Online to Offline".format(topic, partition))
partitionState.put(topicAndPartition, OfflinePartition)
// post: partition has no alive leader
case NonExistentPartition =>
// pre: partition could be in either of the above states
assertValidPreviousStates(topicAndPartition, List(OfflinePartition), NonExistentPartition)
info("Partition [%s, %d] state changed from Offline to NotExists".format(topic, partition))
partitionState.put(topicAndPartition, NonExistentPartition)
// post: partition state is deleted from all brokers and zookeeper
}
} catch {
case t: Throwable => error("State change for partition [%s, %d] ".format(topic, partition) +
"from %s to %s failed".format(currState, targetState), t)
}
}
/**
* Invoked on startup of the partition's state machine to set the initial state for all existing partitions in
* zookeeper
*/
private def initializePartitionState() {
for((topicPartition, replicaAssignment) <- controllerContext.partitionReplicaAssignment) {
val topic = topicPartition.topic
val partition = topicPartition.partition
// check if leader and isr path exists for partition. If not, then it is in NEW state
ZkUtils.getLeaderAndIsrForPartition(zkClient, topic, partition) match {
case Some(currentLeaderAndIsr) =>
// else, check if the leader for partition is alive. If yes, it is in Online state, else it is in Offline state
controllerContext.liveBrokerIds.contains(currentLeaderAndIsr.leader) match {
case true => // leader is alive
partitionState.put(topicPartition, OnlinePartition)
case false =>
partitionState.put(topicPartition, OfflinePartition)
}
case None =>
partitionState.put(topicPartition, NewPartition)
}
}
}
private def assertValidPreviousStates(topicAndPartition: TopicAndPartition, fromStates: Seq[PartitionState],
targetState: PartitionState) {
if(!fromStates.contains(partitionState(topicAndPartition)))
throw new IllegalStateException("Partition %s should be in the %s states before moving to %s state"
.format(topicAndPartition, fromStates.mkString(","), targetState) + ". Instead it is in %s state"
.format(partitionState(topicAndPartition)))
}
/**
* Invoked on the NonExistentPartition->NewPartition state transition to update the controller's cache with the
* partition's replica assignment.
* @param topic The topic of the partition whose replica assignment is to be cached
* @param partition The partition whose replica assignment is to be cached
*/
private def assignReplicasToPartitions(topic: String, partition: Int) {
val assignedReplicas = ZkUtils.getReplicasForPartition(controllerContext.zkClient, topic, partition)
controllerContext.partitionReplicaAssignment += TopicAndPartition(topic, partition) -> assignedReplicas
}
/**
* Invoked on the NewPartition->OnlinePartition state change. When a partition is in the New state, it does not have
* a leader and isr path in zookeeper. Once the partition moves to the OnlinePartition state, it's leader and isr
* path gets initialized and it never goes back to the NewPartition state. From here, it can only go to the
* OfflinePartition state.
* @param topicAndPartition The topic/partition whose leader and isr path is to be initialized
*/
private def initializeLeaderAndIsrForPartition(topicAndPartition: TopicAndPartition) {
debug("Initializing leader and isr for partition %s".format(topicAndPartition))
val replicaAssignment = controllerContext.partitionReplicaAssignment(topicAndPartition)
val liveAssignedReplicas = replicaAssignment.filter(r => controllerContext.liveBrokerIds.contains(r))
liveAssignedReplicas.size match {
case 0 =>
ControllerStats.offlinePartitionRate.mark()
throw new StateChangeFailedException(("During state change of partition %s from NEW to ONLINE, assigned replicas are " +
"[%s], live brokers are [%s]. No assigned replica is alive").format(topicAndPartition,
replicaAssignment.mkString(","), controllerContext.liveBrokerIds))
case _ =>
debug("Live assigned replicas for partition %s are: [%s]".format(topicAndPartition, liveAssignedReplicas))
// make the first replica in the list of assigned replicas, the leader
val leader = liveAssignedReplicas.head
val leaderIsrAndControllerEpoch = new LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader, liveAssignedReplicas.toList),
controller.epoch)
try {
ZkUtils.createPersistentPath(controllerContext.zkClient,
ZkUtils.getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition),
ZkUtils.leaderAndIsrZkData(leaderIsrAndControllerEpoch.leaderAndIsr, controller.epoch))
// NOTE: the above write can fail only if the current controller lost its zk session and the new controller
// took over and initialized this partition. This can happen if the current controller went into a long
// GC pause
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(liveAssignedReplicas, topicAndPartition.topic,
topicAndPartition.partition, leaderIsrAndControllerEpoch, replicaAssignment.size)
controllerContext.allLeaders.put(topicAndPartition, leaderIsrAndControllerEpoch)
partitionState.put(topicAndPartition, OnlinePartition)
} catch {
case e: ZkNodeExistsException =>
// read the controller epoch
val leaderIsrAndEpoch = ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topicAndPartition.topic,
topicAndPartition.partition).get
ControllerStats.offlinePartitionRate.mark()
throw new StateChangeFailedException("Error while changing partition %s's state from New to Online"
.format(topicAndPartition) + " since Leader and isr path already exists with value " +
"%s and controller epoch %d".format(leaderIsrAndEpoch.leaderAndIsr.toString(), leaderIsrAndEpoch.controllerEpoch))
}
}
}
/**
* Invoked on the OfflinePartition->OnlinePartition state change. It invokes the leader election API to elect a leader
* for the input offline partition
* @param topic The topic of the offline partition
* @param partition The offline partition
* @param leaderSelector Specific leader selector (e.g., offline/reassigned/etc.)
*/
def electLeaderForPartition(topic: String, partition: Int, leaderSelector: PartitionLeaderSelector) {
// handle leader election for the partitions whose leader is no longer alive
info("Electing leader for partition [%s, %d]".format(topic, partition))
try {
var zookeeperPathUpdateSucceeded: Boolean = false
var newLeaderAndIsr: LeaderAndIsr = null
var replicasForThisPartition: Seq[Int] = Seq.empty[Int]
while(!zookeeperPathUpdateSucceeded) {
val currentLeaderIsrAndEpoch = getLeaderIsrAndEpochOrThrowException(topic, partition)
val currentLeaderAndIsr = currentLeaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = currentLeaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > controller.epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(controller.epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
// elect new leader or throw exception
val (leaderAndIsr, replicas) = leaderSelector.selectLeader(topic, partition, currentLeaderAndIsr)
val (updateSucceeded, newVersion) = ZkUtils.conditionalUpdatePersistentPath(zkClient,
ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition),
ZkUtils.leaderAndIsrZkData(leaderAndIsr, controller.epoch), currentLeaderAndIsr.zkVersion)
newLeaderAndIsr = leaderAndIsr
newLeaderAndIsr.zkVersion = newVersion
zookeeperPathUpdateSucceeded = updateSucceeded
replicasForThisPartition = replicas
}
val newLeaderIsrAndControllerEpoch = new LeaderIsrAndControllerEpoch(newLeaderAndIsr, controller.epoch)
// update the leader cache
controllerContext.allLeaders.put(TopicAndPartition(topic, partition), newLeaderIsrAndControllerEpoch)
info("Elected leader %d for Offline partition [%s, %d]".format(newLeaderAndIsr.leader, topic, partition))
// store new leader and isr info in cache
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(replicasForThisPartition, topic, partition,
newLeaderIsrAndControllerEpoch, controllerContext.partitionReplicaAssignment(TopicAndPartition(topic, partition)).size)
} catch {
case poe: PartitionOfflineException => throw new PartitionOfflineException("All replicas for partition [%s, %d] are dead."
.format(topic, partition) + " Marking this partition offline", poe)
case sce => throw new StateChangeFailedException(("Error while electing leader for partition " +
" [%s, %d] due to: %s.").format(topic, partition, sce.getMessage), sce)
}
debug("After leader election, leader cache is updated to %s".format(controllerContext.allLeaders.map(l => (l._1, l._2))))
}
private def registerTopicChangeListener() = {
zkClient.subscribeChildChanges(ZkUtils.BrokerTopicsPath, new TopicChangeListener())
}
def registerPartitionChangeListener(topic: String) = {
zkClient.subscribeChildChanges(ZkUtils.getTopicPath(topic), new PartitionChangeListener(topic))
}
private def getLeaderIsrAndEpochOrThrowException(topic: String, partition: Int): LeaderIsrAndControllerEpoch = {
ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition) match {
case Some(currentLeaderIsrAndEpoch) => currentLeaderIsrAndEpoch
case None =>
throw new StateChangeFailedException("Leader and ISR information doesn't exist for partition " +
"[%s, %d] in %s state".format(topic, partition, partitionState(TopicAndPartition(topic, partition))))
}
}
/**
* This is the zookeeper listener that triggers all the state transitions for a partition
*/
class TopicChangeListener extends IZkChildListener with Logging {
this.logIdent = "[TopicChangeListener on Controller " + controller.config.brokerId + "]: "
@throws(classOf[Exception])
def handleChildChange(parentPath : String, children : java.util.List[String]) {
if(!isShuttingDown.get()) {
controllerContext.controllerLock synchronized {
try {
debug("Topic change listener fired for path %s with children %s".format(parentPath, children.mkString(",")))
val currentChildren = JavaConversions.asBuffer(children).toSet
val newTopics = currentChildren -- controllerContext.allTopics
val deletedTopics = controllerContext.allTopics -- currentChildren
// val deletedPartitionReplicaAssignment = replicaAssignment.filter(p => deletedTopics.contains(p._1._1))
controllerContext.allTopics = currentChildren
val addedPartitionReplicaAssignment = ZkUtils.getReplicaAssignmentForTopics(zkClient, newTopics.toSeq)
controllerContext.partitionReplicaAssignment = controllerContext.partitionReplicaAssignment.filter(p =>
!deletedTopics.contains(p._1.topic))
controllerContext.partitionReplicaAssignment.++=(addedPartitionReplicaAssignment)
info("New topics: [%s], deleted topics: [%s], new partition replica assignment [%s]".format(newTopics,
deletedTopics, addedPartitionReplicaAssignment))
if(newTopics.size > 0)
controller.onNewTopicCreation(newTopics, addedPartitionReplicaAssignment.keySet.toSet)
} catch {
case e => error("Error while handling new topic", e )
}
// TODO: kafka-330 Handle deleted topics
}
}
}
}
class PartitionChangeListener(topic: String) extends IZkChildListener with Logging {
this.logIdent = "[Controller " + controller.config.brokerId + "], "
@throws(classOf[Exception])
def handleChildChange(parentPath : String, children : java.util.List[String]) {
controllerContext.controllerLock synchronized {
// TODO: To be completed as part of KAFKA-41
}
}
}
}
sealed trait PartitionState { def state: Byte }
case object NewPartition extends PartitionState { val state: Byte = 0 }
case object OnlinePartition extends PartitionState { val state: Byte = 1 }
case object OfflinePartition extends PartitionState { val state: Byte = 2 }
case object NonExistentPartition extends PartitionState { val state: Byte = 3 }
| dchenbecker/kafka-sbt | core/src/main/scala/kafka/controller/PartitionStateMachine.scala | Scala | apache-2.0 | 21,530 |
package vu.elements.of.style
/**
* @author v.uspenskiy
* @since 25/06/15
*/
object c$structure {
// TODO: AvoidGotoCompletely
// TODO: ChooseADataRepresentationThatMakesTheProgramSimple
// TODO: DontPatchBadCodeRewriteIt
// TODO: ModularizePeriodUseSubroutines
// TODO: UseDataArraysToAvoidRepetitiveControlSequences
// TODO: UseGotoOnlyToImplementAFundamentalStructure
// TODO: UseIfElseToImplementMultiwayBranches
// TODO: UseRecursiveProceduresForRecursiveDataStructures
// TODO: WriteAndTestABigProgramInSmallPieces
// TODO: WriteEasyPseudoLanguageFirst
}
| vuspenskiy/programming-style | src/vu/elements/of/style/c$structure.scala | Scala | mit | 595 |
// ==> 040fb47fbaf718cecb11a7d51ac5a48bf4f6a1fe.scala <==
object x0 {
val x0 : _ with // error // error
// error | som-snytt/dotty | tests/neg/i4373a.scala | Scala | apache-2.0 | 114 |
package org.http4s
package server
package staticcontent
import org.http4s.Method.{GET, POST}
import org.http4s.server.staticcontent.WebjarService.Config
import org.http4s.server.staticcontent.WebjarServiceFilterSpec.{runReq, throwA}
object WebjarServiceSpec extends Http4sSpec with StaticContentShared {
def s: HttpService = webjarService(Config())
"The WebjarService" should {
"Return a 200 Ok file" in {
val req = Request(GET, Uri(path = "/test-lib/1.0.0/testresource.txt"))
val rb = runReq(req)
rb._1 must_== testWebjarResource
rb._2.status must_== Status.Ok
}
"Return a 200 Ok file in a subdirectory" in {
val req = Request(GET, Uri(path = "/test-lib/1.0.0/sub/testresource.txt"))
val rb = runReq(req)
rb._1 must_== testWebjarSubResource
rb._2.status must_== Status.Ok
}
"Not find missing file" in {
val req = Request(uri = uri("/test-lib/1.0.0/doesnotexist.txt"))
s.apply(req) must returnValue(Pass)
}
"Not find missing library" in {
val req = Request(uri = uri("/1.0.0/doesnotexist.txt"))
s.apply(req) must returnValue(Pass)
}
"Not find missing version" in {
val req = Request(uri = uri("/test-lib//doesnotexist.txt"))
s.apply(req) must returnValue(Pass)
}
"Not find missing asset" in {
val req = Request(uri = uri("/test-lib/1.0.0/"))
s.apply(req) must returnValue(Pass)
}
"Not match a request with POST" in {
val req = Request(POST, Uri(path = "/test-lib/1.0.0/testresource.txt"))
s.apply(req) must returnValue(Pass)
}
}
}
| ZizhengTai/http4s | server/src/test/scala/org/http4s/server/staticcontent/WebjarServiceSpec.scala | Scala | apache-2.0 | 1,614 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data
/** Provides high level interfaces to the Event Store from within a prediction
* engine.
*/
package object store {}
| alex9311/PredictionIO | data/src/main/scala/org/apache/predictionio/data/store/package.scala | Scala | apache-2.0 | 772 |
package org.jetbrains.plugins.scala.lang.psi.api.statements
import com.intellij.psi.PsiNamedElement
import org.jetbrains.plugins.scala.extensions.PsiNamedElementExt
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
trait ScDeclaredElementsHolder extends ScalaPsiElement {
def declaredElements : Seq[PsiNamedElement]
def declaredNames: Seq[String] = declaredElements.map(_.name)
/**
* @return array for Java compatibility
*/
def declaredElementsArray : Array[PsiNamedElement] = declaredElements.toArray
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScDeclaredElementsHolder.scala | Scala | apache-2.0 | 531 |
/*
* Copyright 2017-2020 Aleksey Fomkin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package korolev.server
import korolev.effect.{Effect, Reporter}
import korolev.state.IdGenerator
import korolev.{Context, Extension, Router}
import levsha.Document
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
case class KorolevServiceConfig[F[_]: Effect, S, M](
stateLoader: StateLoader[F, S],
stateStorage: korolev.state.StateStorage[F, S] = null, // By default it StateStorage.DefaultStateStorage
http: PartialFunction[HttpRequest[F], F[HttpResponse[F]]] = PartialFunction.empty[HttpRequest[F], F[HttpResponse[F]]],
router: Router[F, S] = Router.empty[F, S],
rootPath: String = "/",
@deprecated("Use `document` instead of `render`. Do not use `render` and `document` together.", "0.16.0") render: S => Document.Node[Context.Binding[F, S, M]] = (_: S) => levsha.dsl.html.body(),
@deprecated("Add head() tag to `document`. Do not use `head` and `document` together.", "0.16.0") head: S => Seq[Document.Node[Context.Binding[F, S, M]]] = (_: S) => Seq.empty,
document: S => Document.Node[Context.Binding[F, S, M]] = null, // TODO (_: S) => levsha.dsl.html.Html(),
connectionLostWidget: Document.Node[Context.Binding[F, S, M]] =
KorolevServiceConfig.defaultConnectionLostWidget[Context.Binding[F, S, M]],
maxFormDataEntrySize: Int = 1024 * 8,
extensions: List[Extension[F, S, M]] = Nil,
idGenerator: IdGenerator[F] = IdGenerator.default[F](),
heartbeatInterval: FiniteDuration = 5.seconds,
reporter: Reporter = Reporter.PrintReporter
)(implicit val executionContext: ExecutionContext)
object KorolevServiceConfig {
def defaultConnectionLostWidget[MiscType]: Document.Node[MiscType] = {
import levsha.dsl._
import html._
optimize {
div(
position @= "fixed",
top @= "0",
left @= "0",
right @= "0",
backgroundColor @= "lightyellow",
borderBottom @= "1px solid black",
padding @= "10px",
"Connection lost. Waiting to resume."
)
}
}
}
| fomkin/korolev | modules/korolev/src/main/scala/korolev/server/KorolevServiceConfig.scala | Scala | apache-2.0 | 2,595 |
package com.signalcollect.triplerush.optimizers
import com.signalcollect.triplerush.PredicateSelectivity
import com.signalcollect.triplerush.TriplePattern
import com.signalcollect.triplerush.TriplePattern
import com.signalcollect.triplerush.TriplePattern
import com.signalcollect.triplerush.TriplePattern
import scala.annotation.tailrec
import com.signalcollect.triplerush.PredicateStats
final class ExplorationOptimizer(
val predicateSelectivity: PredicateSelectivity,
val reliableStats: Boolean = true,
val useHeuristic: Boolean = false) extends Optimizer {
case class CostEstimate(frontier: Double, lastExploration: Double, explorationSum: Double)
def optimize(cardinalities: Map[TriplePattern, Long], predicateStats: Map[Int, PredicateStats]): Array[TriplePattern] = {
@inline def reverseMutableArray(arr: Array[TriplePattern]) {
var fromStart = 0
var fromEnd = arr.length - 1
while (fromStart < fromEnd) {
val t = arr(fromStart)
arr(fromStart) = arr(fromEnd)
arr(fromEnd) = t
fromStart += 1
fromEnd -= 1
}
}
/**
* parameters:
* list of candidate pattern (length 1 or 2)
* Optional list of previous optimal pattern order and the corresponding costs: size of frontier, exploration cost, totalcost
* returns:
* optimal ordering of candidate with previously picked patterns, together with the corresponding costs: size of frontier, exploration cost, totalcost
*/
def costOfPatternGivenPrevious(candidate: TriplePattern, previous: (List[TriplePattern], CostEstimate)): (List[TriplePattern], CostEstimate) = {
val cost: (List[TriplePattern], CostEstimate) = {
val res = costForPattern(candidate, previous)
if (res.lastExploration == 0) {
(candidate :: previous._1, CostEstimate(0, 0, 0))
} else {
(candidate :: previous._1, CostEstimate(res.frontier, res.lastExploration, res.explorationSum + previous._2.explorationSum))
}
}
cost
}
/**
* parameters:
* candidate pattern
* previously picked pattern order with the corresponding costs: size of frontier, exploration cost, totalcost
* returns:
* cost of the order: size of frontier, exploration cost, totalcost
*/
def costForPattern(candidate: TriplePattern, previous: (List[TriplePattern], CostEstimate)): CostEstimate = {
val exploreCost = previous._2.frontier * exploreCostForCandidatePattern(candidate, previous._1)
val frontierSize = frontierSizeForCandidatePattern(candidate, exploreCost, previous._1)
if (frontierSize == 0) {
CostEstimate(0, 0, 0)
} else {
CostEstimate(frontierSize, exploreCost, exploreCost)
}
}
/**
* returns lookupcost for the candidate pattern, given the cost of previous pattern order
*/
def exploreCostForCandidatePattern(candidate: TriplePattern, pickedPatterns: List[TriplePattern]): Double = {
val boundVariables = pickedPatterns.foldLeft(Set.empty[Int]) { case (result, current) => result.union(current.variableSet) }
val intersectionVariables = boundVariables.intersect(candidate.variableSet)
val numberOfPredicates = predicateSelectivity.predicates.size
val predicateIndexForCandidate = candidate.p
val isSubjectBound = (candidate.s > 0 || intersectionVariables.contains(candidate.s))
val isObjectBound = (candidate.o > 0 || intersectionVariables.contains(candidate.o))
if (candidate.p > 0) {
val stats = predicateStats(predicateIndexForCandidate)
//if all bound
if ((intersectionVariables.size == candidate.variableSet.size)) {
1
} //s,p,*
else if (isSubjectBound && candidate.o < 0) {
math.min(cardinalities(candidate), stats.objectCount)
} //*,p,o
else if (isObjectBound && candidate.s < 0) {
math.min(cardinalities(candidate), stats.subjectCount)
} //s,*,o
else if (isSubjectBound && isObjectBound) {
math.min(cardinalities(candidate), numberOfPredicates)
} //*,p,*
else if (!isSubjectBound && !isObjectBound) {
math.min(cardinalities(candidate), stats.edgeCount * stats.objectCount)
} else {
cardinalities(candidate)
}
} else {
cardinalities(candidate)
}
}
/**
* returns frontierSize for the candidate pattern, given the cost of previous pattern order and previous pattern order
*/
def frontierSizeForCandidatePattern(candidate: TriplePattern, exploreCostOfCandidate: Double, pickedPatterns: List[TriplePattern]): Double = {
val boundVariables = pickedPatterns.foldLeft(Set.empty[Int]) { case (result, current) => result.union(current.variableSet) }
if (pickedPatterns.isEmpty) {
exploreCostOfCandidate
} //if either s or o is bound)
else if ((candidate.o > 0 || candidate.s > 0 || boundVariables.contains(candidate.s) || boundVariables.contains(candidate.o)) && (candidate.p > 0)) {
//TODO: make minimum computation more efficient
var minFrontierSizeEstimate = exploreCostOfCandidate // assume the worst.
pickedPatterns.foreach {
pattern =>
if (pattern.p > 0) {
minFrontierSizeEstimate = math.min(minFrontierSizeEstimate, calculatePredicateSelectivityCost(pattern, candidate))
}
}
minFrontierSizeEstimate
} //otherwise
else {
exploreCostOfCandidate
}
}
def calculatePredicateSelectivityCost(prev: TriplePattern, candidate: TriplePattern): Double = {
val upperBoundBasedOnPredicateSelectivity = (prev.s, prev.o) match {
case (candidate.s, _) =>
predicateSelectivity.outOut(prev.p, candidate.p)
case (candidate.o, _) =>
predicateSelectivity.outIn(prev.p, candidate.p)
case (_, candidate.o) =>
predicateSelectivity.inIn(prev.p, candidate.p)
case (_, candidate.s) =>
predicateSelectivity.inOut(prev.p, candidate.p)
case other =>
Double.MaxValue
}
upperBoundBasedOnPredicateSelectivity
}
val triplePatterns = cardinalities.keys.toArray
val sizeOfFullPlan = cardinalities.size
def extend(p: QueryPlan, tp: TriplePattern): QueryPlan = {
val exploreCost = exploreCostForCandidatePattern(tp, p.patternOrdering)
val newCostSoFar = p.costSoFar + exploreCost
val matchedPatternsSoFar = p.id.size + 1
val remainingUnmatchedPatterns = sizeOfFullPlan - matchedPatternsSoFar
val newEstimatedTotalCost = if (useHeuristic) {
val avgPatternCostSoFar = newCostSoFar / matchedPatternsSoFar
newCostSoFar + remainingUnmatchedPatterns * avgPatternCostSoFar
} else {
newCostSoFar
}
val fringeAfterExploration = frontierSizeForCandidatePattern(tp, exploreCost, p.patternOrdering)
QueryPlan(
id = p.id + tp,
costSoFar = newCostSoFar,
estimatedTotalCost = newEstimatedTotalCost,
patternOrdering = tp :: p.patternOrdering,
fringe = fringeAfterExploration)
}
if (sizeOfFullPlan > 8) {
// The exploration optimizer is awful for queries with many patterns, use the clever cardinality optimizer instead.
CleverCardinalityOptimizer.optimize(cardinalities, predicateStats)
} else {
val allPatterns = cardinalities.keySet
val planHeap = new QueryPlanMinHeap(100 * sizeOfFullPlan * sizeOfFullPlan)
triplePatterns.foreach { tp =>
val cardinality = cardinalities(tp).toDouble
val atomicPlan = QueryPlan(
id = Set(tp),
costSoFar = cardinality,
estimatedTotalCost = cardinality * sizeOfFullPlan,
patternOrdering = List(tp),
fringe = cardinality)
planHeap.insert(atomicPlan)
}
var goodCompletePlan: QueryPlan = null
while (goodCompletePlan == null) {
val topPlan = planHeap.remove
if (reliableStats && topPlan.fringe == 0) {
return Array()
}
if (topPlan.id.size == sizeOfFullPlan) {
goodCompletePlan = topPlan
} else {
val candidatePatterns = allPatterns -- topPlan.id
candidatePatterns.foreach { tp =>
val extendedPlan = extend(topPlan, tp)
planHeap.insert(extendedPlan)
}
}
}
val resultOrder = goodCompletePlan.patternOrdering.toArray
reverseMutableArray(resultOrder)
resultOrder
}
}
}
| jacqueslk/triplerush-filter | src/main/scala/com/signalcollect/triplerush/optimizers/ExplorationOptimizer.scala | Scala | apache-2.0 | 8,596 |
package controllers.admin
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import org.mockito.Matchers.any
import org.mockito.Mockito.when
import org.scalatest.BeforeAndAfterAll
import org.scalatest.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
import jp.t2v.lab.play2.auth.test.Helpers.AuthFakeRequest
import model.Computer
import model.Role
import model.User
import model.form.{BlockPageForm, ComputerForm, SSHOrderForm, SelectComputersForm}
import model.form.data._
import model.json.LoginJson
import org.mockito.Mock
import play.api.Environment
import play.api.i18n.MessagesApi
import play.api.libs.json.{JsValue, Json}
import play.api.mvc.Result
import play.api.test.FakeRequest
import play.inject.Injector
import play.test.WithApplication
import services.state.ActionState
import services.{ComputerService, RoomService, UserService, state}
import test.ControllerTest
/**
* @author Camilo Sampedro <camilo.sampedro@udea.edu.co>
*/
trait ComputerControllerSpec extends ControllerTest {
// Mocked ComputerController dependencies
lazy val roomService = mock[RoomService]
lazy val messagesApi = mock[MessagesApi]
implicit lazy val userService = mock[UserService]
implicit lazy val environment = mock[Environment]
/**
* Execution context is a particular exception to the mocked dependencies
*/
implicit lazy val executionContext: ExecutionContext = ExecutionContext.global
/**
* Mock user authentication
*/
when(userService.checkAndGet(any[String], any[String])) thenReturn Future.successful(Some(User("", "", None, Role.Administrator)))
/**
* Logged in user to pass
*/
val loggedInUser = LoginJson("", "")
/**
* Computer with data to be tested
*/
val computer = Computer(ip = "127.0.0.1", name = Some("Localhost"), SSHUser = "user", SSHPassword = "password",
description = Some(""), roomID = Some(1))
val ipJson: JsValue = Json.parse(
s"""
|{
| "ip": "${computer.ip}"
|}
""".stripMargin)
val blockPageJson: JsValue = Json.parse(
s"""
|{
| "ips": ["${computer.ip}"],
| "page": "example.com"
|}
""".stripMargin
)
/**
* Sample command
*/
val command = "echo \\"Hola\\""
val errorStatus = 1
val errorOutput = "Some errors for tests"
val goodStatus = 0
val goodOutput = "Some good outputs for tests"
/**
* Mocked computer service methods for testing only the controller
* @param actionState Action state to be returned when methods being executed
* @return Mocked computer service
*/
def mockComputerService(actionState: ActionState): ComputerService = {
// Mock the computer service
lazy val computerService = mock[ComputerService]
// This state will be used for methods that don't have other states that ActionCompleted and Failed
val alternativeState = if(actionState!=state.ActionCompleted){
state.Failed
} else {
actionState
}
// For example, add will not have more than ActionCompleted and Failed states.
when(computerService.add(any[String], any[Option[String]], any[String], any[String], any[Option[String]],
any[Option[Long]])) thenReturn Future.successful(alternativeState)
// deleteLaboratory will do have more than those two states
when(computerService.delete(any[String])) thenReturn Future.successful(actionState)
when(computerService.edit(any[Computer])) thenReturn Future.successful(actionState)
when(computerService.shutdown(any[List[String]])(any[String])) thenReturn Future.successful(actionState)
when(computerService.shutdown(any[List[String]])(any[String])) thenReturn Future.successful(actionState)
actionState match {
case state.ActionCompleted =>
val orderCompletedStatus = state.OrderCompleted(errorOutput,errorStatus)
when(computerService.blockPage(any[List[String]], any[String])(any[String])) thenReturn Future.successful(orderCompletedStatus)
when(computerService.upgrade(any[List[String]])(any[String])) thenReturn Future.successful(orderCompletedStatus)
when(computerService.unfreeze(any[List[String]])(any[String])) thenReturn Future.successful(orderCompletedStatus)
when(computerService.sendCommand(any[List[String]], any[Boolean], any[Boolean], any[String])(any[String])) thenReturn Future.successful(orderCompletedStatus)
case state.Failed =>
val orderFailedStatus = state.OrderFailed(errorOutput,errorStatus)
when(computerService.blockPage(any[List[String]], any[String])(any[String])) thenReturn Future.successful(orderFailedStatus)
when(computerService.upgrade(any[List[String]])(any[String])) thenReturn Future.successful(orderFailedStatus)
when(computerService.unfreeze(any[List[String]])(any[String])) thenReturn Future.successful(orderFailedStatus)
when(computerService.sendCommand(any[List[String]], any[Boolean], any[Boolean], any[String])(any[String])) thenReturn Future.successful(orderFailedStatus)
case _ =>
when(computerService.blockPage(any[List[String]], any[String])(any[String])) thenReturn Future.successful(actionState)
when(computerService.upgrade(any[List[String]])(any[String])) thenReturn Future.successful(actionState)
when(computerService.unfreeze(any[List[String]])(any[String])) thenReturn Future.successful(actionState)
when(computerService.sendCommand(any[List[String]], any[Boolean], any[Boolean], any[String])(any[String])) thenReturn Future.successful(actionState)
}
computerService
}
}
| ProjectAton/AtonLab | test/controllers/admin/ComputerControllerSpec.scala | Scala | gpl-3.0 | 5,663 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.util.Locale
import play.api.http.HeaderNames
import play.core.utils.CaseInsensitiveOrdered
import scala.collection.JavaConverters._
import scala.collection.immutable.{ TreeMap, TreeSet }
/**
* The HTTP headers set.
*
* @param _headers The sequence of values. This value is protected and mutable
* since subclasses might initially set it to a `null` value and then initialize
* it lazily.
*/
class Headers(protected var _headers: Seq[(String, String)]) {
/**
* The headers as a sequence of name-value pairs.
*/
def headers: Seq[(String, String)] = _headers
/**
* Checks if the given header is present.
*
* @param headerName The name of the header (case-insensitive)
* @return <code>true</code> if the request did contain the header.
*/
def hasHeader(headerName: String): Boolean = get(headerName).isDefined
/**
* True if this request has a body, so we know if we should trigger body parsing. The base implementation simply
* checks for the Content-Length or Transfer-Encoding headers, but subclasses (such as fake requests) may return
* true in other cases so the headers need not be updated to reflect the body.
*/
def hasBody: Boolean = {
import HeaderNames._
get(CONTENT_LENGTH).exists(_.toLong > 0) || hasHeader(TRANSFER_ENCODING)
}
/**
* Append the given headers
*/
def add(headers: (String, String)*): Headers = new Headers(this.headers ++ headers)
/**
* Retrieves the first header value which is associated with the given key.
*/
def apply(key: String): String = get(key).getOrElse(scala.sys.error("Header doesn't exist"))
/**
* Optionally returns the first header value associated with a key.
*/
def get(key: String): Option[String] = getAll(key).headOption
/**
* Retrieve all header values associated with the given key.
*/
def getAll(key: String): Seq[String] = toMap.getOrElse(key, Nil)
/**
* Retrieve all header keys
*/
def keys: Set[String] = toMap.keySet
/**
* Remove any headers with the given keys
*/
def remove(keys: String*): Headers = {
val keySet = TreeSet(keys: _*)(CaseInsensitiveOrdered)
new Headers(headers.filterNot { case (name, _) => keySet(name) })
}
/**
* Append the given headers, replacing any existing headers having the same keys
*/
def replace(headers: (String, String)*): Headers = remove(headers.map(_._1): _*).add(headers: _*)
/**
* Transform the Headers to a Map
*/
lazy val toMap: Map[String, Seq[String]] = {
val builder = TreeMap.newBuilder[String, Seq[String]](CaseInsensitiveOrdered)
headers.groupBy(_._1.toLowerCase(Locale.ENGLISH)).foreach {
case (_, headers) =>
// choose the case of first header as canonical
builder += headers.head._1 -> headers.map(_._2)
}
builder.result()
}
/**
* Transform the Headers to a Map by ignoring multiple values.
*/
lazy val toSimpleMap: Map[String, String] = toMap.mapValues(_.headOption.getOrElse(""))
lazy val asJava: play.mvc.Http.Headers = new play.mvc.Http.Headers(this.toMap.mapValues(_.asJava).asJava)
/**
* A headers map with all keys normalized to lowercase
*/
private lazy val lowercaseMap: Map[String, Set[String]] = toMap.map {
case (name, value) => name.toLowerCase(Locale.ENGLISH) -> value
}.mapValues(_.toSet)
override def equals(that: Any): Boolean = that match {
case other: Headers => lowercaseMap == other.lowercaseMap
case _ => false
}
override def hashCode: Int = lowercaseMap.hashCode()
override def toString: String = headers.toString()
}
object Headers {
/**
* For calling from Java.
*/
def create() = new Headers(Seq.empty)
def apply(headers: (String, String)*) = new Headers(headers)
}
| zaneli/playframework | framework/src/play/src/main/scala/play/api/mvc/Headers.scala | Scala | apache-2.0 | 3,876 |
package io.ssc.angles.pipeline.explorers
import java.io.PrintWriter
import java.nio.file.Files
import java.nio.file.Paths
import org.slf4j.LoggerFactory
object GenerateAntiPairs extends App {
val logger = LoggerFactory.getLogger(GenerateAntiPairs.getClass)
val clustersFile = args(0)
val antiPairsFile = args(1)
logger.info("Loading cluster file")
val clusters = ClusterReadWriter.readClusterFile(clustersFile)
logger.info("Got {} clusters from CSV", clusters.getNumClusters)
val path = Paths.get(antiPairsFile)
val writer = new PrintWriter(Files.newBufferedWriter(path))
var n = 0
for (i <- 1 until clusters.getNumClusters; j <- (i + 1) until clusters.getNumClusters) {
val cluster1 = clusters.getCluster(i)
val cluster2 = clusters.getCluster(j)
(cluster1 cross cluster2).foreach { p =>
writer.print(p._1)
writer.print(";")
writer.println(p._2)
n += 1
}
}
writer.close()
logger.info("Generated {} anti-pairs", n)
implicit class Crossable[X](xs: Traversable[X]) {
def cross[Y](ys: Traversable[Y]) = (xs).flatMap { case x => (ys).map { case y => (x, y) } }
}
}
| jhendess/angles | src/main/scala/io/ssc/angles/pipeline/explorers/GenerateAntiPairs.scala | Scala | gpl-3.0 | 1,156 |
package scalax.chart
import org.jfree.ui.RectangleInsets
import module.CategoryDatasetConversions._
/** Represents categorized numeric data with a "Spider Web" radar. */
abstract class SpiderWebChart protected () extends Chart {
type Plot = SpiderWebPlot
override def plot: SpiderWebPlot = peer.getPlot.asInstanceOf[SpiderWebPlot]
}
/** Factory for ${chart}s.
*
* @define chart SpiderWeb chart
* @define Chart SpiderWebChart
*/
object SpiderWebChart extends ChartCompanion[SpiderWebChart] {
override final def fromPeer(jfree: JFreeChart): SpiderWebChart = {
require(jfree.getPlot.isInstanceOf[Plot], "Illegal peer plot type.")
new SpiderWebChart {
override final lazy val peer = jfree
}
}
/** Creates a new $chart.
*
* @param data $data
* @param theme $theme
*
* @usecase def apply(data: CategoryDataset): SpiderWebChart = ???
*/
def apply[A: ToCategoryDataset](data: A)
(implicit theme: ChartTheme = ChartTheme.Default): SpiderWebChart = {
val dataset = ToCategoryDataset[A].convert(data)
val plot = new SpiderWebPlot(dataset)
plot.setInsets(new RectangleInsets(0.0, 5.0, 5.0, 5.0))
SpiderWebChart(plot, title = "", legend = true)
}
}
| wookietreiber/scala-chart | src/main/scala/scalax/chart/SpiderWebChart.scala | Scala | lgpl-3.0 | 1,230 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features
import org.locationtech.geomesa.features.SerializationOption.Value
/**
* Options to be applied when encoding. The same options must be specified when decoding.
*/
object SerializationOption extends Enumeration {
type SerializationOption = Value
val WithUserData :Value = Value
val WithoutId :Value = Value
val Immutable :Value = Value
val Lazy :Value = Value
implicit class SerializationOptions(val options: Set[SerializationOption]) extends AnyVal {
/**
* @param value the value to search for
* @return true iff ``this`` contains the given ``value``
*/
def contains(value: SerializationOption): Boolean = options.contains(value)
def withUserData: Boolean = options.contains(WithUserData)
def withoutId: Boolean = options.contains(WithoutId)
def immutable: Boolean = options.contains(Immutable)
def isLazy: Boolean = options.contains(Lazy)
}
object SerializationOptions {
val none: Set[SerializationOption] = Set.empty[SerializationOption]
val withUserData: Set[SerializationOption] = Set(WithUserData)
val withoutId: Set[SerializationOption] = Set(WithoutId)
val immutable: Set[SerializationOption] = Set(Immutable)
def builder: Builder = new Builder()
class Builder {
private val options = scala.collection.mutable.Set.empty[SerializationOption]
def immutable: Builder = { options.add(Immutable); this }
def withUserData: Builder = { options.add(WithUserData); this }
def withoutId: Builder = { options.add(WithoutId); this }
def `lazy`: Builder = { options.add(Lazy); this }
def build: Set[SerializationOption] = options.toSet
}
}
}
| aheyne/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/SerializationOption.scala | Scala | apache-2.0 | 2,200 |
package com.atomist.rug.runtime.js.interop
import com.atomist.param.SimpleParameterValues
import com.atomist.parse.java.ParsingTargets
import com.atomist.project.edit.{NoModificationNeeded, SuccessfulModification}
import com.atomist.rug.TestUtils
import org.scalatest.{FlatSpec, Matchers}
class JavaScriptBackedTypeProviderTest extends FlatSpec with Matchers {
it should "invoke tree finder with one level only" in {
val jsed = TestUtils.editorInSideFile(this, "SimpleBanana.ts")
val target = ParsingTargets.SpringIoGuidesRestServiceSource
jsed.modify(target, SimpleParameterValues.Empty) match {
case _: NoModificationNeeded =>
case _ => ???
}
}
it should "invoke tree finder with two levels" in {
val jsed = TestUtils.editorInSideFile(this, "TwoLevel.ts")
val target = ParsingTargets.SpringIoGuidesRestServiceSource
jsed.modify(target, SimpleParameterValues.Empty) match {
case nmn: NoModificationNeeded =>
case _ => ???
}
}
it should "invoke side effecting tree finder with two levels" in {
val jsed = TestUtils.editorInSideFile(this, "MutatingBanana.ts")
val target = ParsingTargets.SpringIoGuidesRestServiceSource
jsed.modify(target, SimpleParameterValues.Empty) match {
case sm: SuccessfulModification =>
sm.result.allFiles.exists(f => f.name.endsWith(".java") && f.content.startsWith("I am evil!"))
case x =>
fail(s"Unexpected: $x")
}
}
}
| atomist/rug | src/test/scala/com/atomist/rug/runtime/js/interop/JavaScriptBackedTypeProviderTest.scala | Scala | gpl-3.0 | 1,464 |
package com.karasiq.nanoboard.server
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success}
import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.MediaType.Compressible
import akka.http.scaladsl.model.headers.{`Cache-Control`, CacheDirectives}
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.util.ByteString
import boopickle.Default._
import com.karasiq.nanoboard.{NanoboardCategory, NanoboardMessage}
import com.karasiq.nanoboard.api.{NanoboardCaptchaAnswer, NanoboardReply}
import com.karasiq.nanoboard.dispatcher.NanoboardDispatcher
import com.karasiq.nanoboard.server.streaming.NanoboardMessageStream
import com.karasiq.nanoboard.server.utils.{AttachmentGenerator, FractalMusic}
object NanoboardServer {
def apply(dispatcher: NanoboardDispatcher)(implicit actorSystem: ActorSystem, actorMaterializer: ActorMaterializer): NanoboardServer = {
new NanoboardServer(dispatcher)
}
}
private[server] final class NanoboardServer(dispatcher: NanoboardDispatcher)(implicit actorSystem: ActorSystem, actorMaterializer: ActorMaterializer) extends BinaryMarshaller {
private implicit def ec: ExecutionContext = actorSystem.dispatcher
private val maxPostSize = actorSystem.settings.config.getMemorySize("nanoboard.max-post-size").toBytes
val route = {
get {
// Single post
path("post" / NanoboardMessage.HashFormat) { hash ⇒
complete(StatusCodes.OK, dispatcher.post(hash))
} ~
(pathPrefix("posts") & parameters('offset.as[Long].?(0), 'count.as[Long].?(100))) { (offset, count) ⇒
// Thread
path(NanoboardMessage.HashFormat) { hash ⇒
complete(StatusCodes.OK, dispatcher.thread(hash, offset, count))
} ~
// Recent posts
pathEndOrSingleSlash {
complete(StatusCodes.OK, dispatcher.recent(offset, count))
}
} ~
// Pending posts
(path("pending") & parameters('offset.as[Long].?(0), 'count.as[Long].?(100))) { (offset, count) ⇒
complete(StatusCodes.OK, dispatcher.pending(offset, count))
} ~
// Categories
path("categories") {
complete(StatusCodes.OK, dispatcher.categories())
} ~
// Places
path("places") {
complete(StatusCodes.OK, dispatcher.places())
} ~
// Containers
(path("containers") & parameters('offset.as[Long].?(0), 'count.as[Long].?(100))) { (offset, count) ⇒
complete(StatusCodes.OK, dispatcher.containers(offset, count))
} ~
// Fractal music renderer
(path("fractal_music" / Segment) & respondWithHeaders(`Cache-Control`(CacheDirectives.public, CacheDirectives.`max-age`(100000000L)))) { formula ⇒
complete(StatusCodes.OK, FractalMusic(formula).map(HttpEntity(ContentType(MediaType.audio("wav", Compressible)), _)))
} ~
// Verification data
(path("verify" / NanoboardMessage.HashFormat)) { hash ⇒
complete(StatusCodes.OK, dispatcher.requestVerification(hash))
} ~
// Static files
encodeResponse(pathEndOrSingleSlash(getFromResource("webapp/index.html")) ~ getFromResourceDirectory("webapp"))
} ~
post {
// New reply
(path("post") & entity(as[NanoboardReply](defaultUnmarshaller))) { case NanoboardReply(parent, message) ⇒
if (message.length <= maxPostSize) {
complete(StatusCodes.OK, dispatcher.reply(parent, message))
} else {
complete(StatusCodes.custom(400, s"Message is too long. Max size is $maxPostSize bytes"), HttpEntity(""))
}
} ~
// Create container
(path("container") & parameters('pending.as[Int].?(10), 'random.as[Int].?(50), 'format.?("png")) & entity(as[ByteString]) & extractLog) { (pending, random, format, entity, log) ⇒
onComplete(dispatcher.createContainer(pending, random, format, entity)) {
case Success(data) ⇒
complete(StatusCodes.OK, HttpEntity(data))
case Failure(exc) ⇒
log.error(exc, "Container creation error")
complete(StatusCodes.custom(500, "Container creation error"), HttpEntity(ByteString.empty))
}
} ~
// Generate attachment
(path("attachment") & parameters('format.?("jpeg"), 'size.as[Int].?(500), 'quality.as[Int].?(70)) & entity(as[ByteString])) { (format, size, quality, data) ⇒
complete(StatusCodes.OK, HttpEntity(ContentTypes.`text/plain(UTF-8)`, AttachmentGenerator.createImage(format, size, quality, data)))
} ~
// Verify post
(path("verify") & entity[NanoboardCaptchaAnswer](defaultUnmarshaller)) { answer ⇒
complete(StatusCodes.OK, dispatcher.verifyPost(answer.request, answer.answer))
}
} ~
delete {
// Delete single post
path("post" / NanoboardMessage.HashFormat) { hash ⇒
extractLog { log ⇒
log.info("Post permanently deleted: {}", hash)
complete(StatusCodes.OK, dispatcher.delete(hash))
}
} ~
// Delete container posts
(path("posts") & parameter('container.as[Long])) { container ⇒
complete(StatusCodes.OK, dispatcher.clearContainer(container))
} ~
// Delete post from pending list
path("pending" / NanoboardMessage.HashFormat) { hash ⇒
complete(StatusCodes.OK, dispatcher.markAsNotPending(hash))
} ~
// Batch delete recent posts
(path("posts") & parameters('offset.as[Long].?(0), 'count.as[Long])) { (offset, count) ⇒ // Batch delete
complete(StatusCodes.OK, dispatcher.delete(offset, count))
} ~
// Clear deleted posts cache
path("deleted") {
complete(StatusCodes.OK, dispatcher.clearDeleted())
}
} ~
put {
// Update places list
(path("places") & entity(as[Seq[String]](defaultUnmarshaller)) & extractLog) { (places, log) ⇒
log.info("Places updated: {}", places)
complete(StatusCodes.OK, dispatcher.updatePlaces(places))
} ~
// Update categories list
(path("categories") & entity(as[Seq[NanoboardCategory]](defaultUnmarshaller)) & extractLog) { (categories, log) ⇒
log.info("Categories updated: {}", categories)
complete(StatusCodes.OK, dispatcher.updateCategories(categories))
} ~
// Add post to pending list
path("pending" / NanoboardMessage.HashFormat) { hash ⇒
complete(StatusCodes.OK, dispatcher.markAsPending(hash))
}
} ~
// Event channel
path("live") {
handleWebSocketMessages(NanoboardMessageStream.flow)
}
}
}
| Karasiq/nanoboard | src/main/scala/com/karasiq/nanoboard/server/NanoboardServer.scala | Scala | apache-2.0 | 6,626 |
package com.datastax.spark.connector.writer
import java.nio.ByteBuffer
import com.datastax.driver.core.BoundStatement
import com.datastax.spark.connector.cql.TableDef
/** This class computes the routing key of a bound statement. */
class RoutingKeyGenerator(table: TableDef, columnNames: Seq[String])
extends ((BoundStatement) => ByteBuffer) {
private val partitionKeyIdxs = {
val idxs = table.partitionKey
.map(pkColumn => columnNames.indexOf(pkColumn.columnName))
.filter(_ >= 0)
require(idxs.size == table.partitionKey.size, "Not all partition key columns were selected.")
idxs
}
@transient
protected lazy val routingKey = new ThreadLocal[Array[ByteBuffer]] {
override def initialValue() = Array.ofDim[ByteBuffer](partitionKeyIdxs.size)
}
// this method is copied from Java Driver
private def composeRoutingKeys(buffers: Array[ByteBuffer]): ByteBuffer = {
val totalLength = buffers.map(_.remaining() + 3).sum
val out = ByteBuffer.allocate(totalLength)
for (buffer <- buffers) {
val bb = buffer.duplicate
out.put(((bb.remaining >> 8) & 0xFF).toByte)
out.put((bb.remaining & 0xFF).toByte)
out.put(bb)
out.put(0.toByte)
}
out.flip
out
}
private def fillRoutingKey(stmt: BoundStatement): Array[ByteBuffer] = {
val rk = routingKey.get
for (i <- 0 until partitionKeyIdxs.size)
rk(i) = stmt.getBytesUnsafe(partitionKeyIdxs(i))
rk
}
def apply(stmt: BoundStatement): ByteBuffer = {
val rk = fillRoutingKey(stmt)
if (rk.length == 1) rk(0) else composeRoutingKeys(rk)
}
}
| brkyvz/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/writer/RoutingKeyGenerator.scala | Scala | apache-2.0 | 1,609 |
/*
* BlockSize.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape.graph
import de.sciss.fscape.Graph.{ProductReader, RefMapIn}
import de.sciss.fscape.{Graph, Lazy, UGenGraph, UGenInLike, GE => _GE}
import de.sciss.numbers.Implicits.intNumberWrapper
import scala.{Unit => _Unit}
object BlockSize {
sealed trait Result[-A, Res] {
def make(n: Int, g: Graph, result: A): Res
}
implicit object GE extends Result[_GE, _GE] with ProductReader[GE] {
override def read(in: RefMapIn, key: String, arity: Int): GE = {
require (arity == 3)
val _n = in.readInt()
val _g = in.readGraph()
val _result = in.readGE()
new GE(_n, _g, _result)
}
def make(n: Int, g: Graph, result: _GE): _GE = GE(n, g, result)
}
final case class GE(n: Int, g: Graph, result: _GE) extends _GE.Lazy {
override def productPrefix: String = s"BlockSize$$GE" // serialization
override protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike =
b.withBlockSize(n) {
b.expandNested(g)
result.expand
}
}
implicit object Unit extends Result[_Unit, _Unit] with ProductReader[Unit] {
override def read(in: RefMapIn, key: String, arity: Int): Unit = {
require (arity == 2)
val _n = in.readInt()
val _g = in.readGraph()
new Unit(_n, _g)
}
def make(n: Int, g: Graph, result: _Unit): _Unit = {
Unit(n, g)
()
}
}
final case class Unit(n: Int, g: Graph) extends Lazy.Expander[_Unit] {
override def productPrefix: String = s"BlockSize$$Unit" // serialization
override protected def makeUGens(implicit b: UGenGraph.Builder): _Unit = {
b.withBlockSize(n) {
b.expandNested(g)
}
()
}
}
def apply[A, Res](n: Int)(branch: => A)(implicit result: Result[A, Res]): Res = {
require (n > 0 && n.isPowerOfTwo)
var res: A = null.asInstanceOf[A]
val g = Graph {
res = branch
}
result.make(n, g, res)
}
}
//object TestImplicit {
// implicitly[BlockSize.Result[ControlBlockSize, _GE]]
//}
| Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/graph/BlockSize.scala | Scala | agpl-3.0 | 2,325 |
//
// author: Cosmin Basca
//
// Copyright 2010 University of Zurich
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package com.simplehttp
import scala.collection.mutable
import org.simpleframework.http.{Response, Request}
import java.io.{PrintWriter, StringWriter, PrintStream}
import org.slf4j.LoggerFactory
import com.typesafe.scalalogging.slf4j.Logger
/**
* Created by basca on 04/06/14.
*/
/**
* the trait of all Http Handlers for a specific route
* @tparam T the passed application type
*/
trait HttpRouteHandler[T] {
/**
* internal logger
*/
val logger = Logger(LoggerFactory getLogger getClass.getName)
/**
* request headers, inheriting classes can add specific headers to this map which will later be included in the
* request
*/
val headers: mutable.Map[String, String] = mutable.Map[String, String]()
/**
* the content type matching the content of the response of this handler
*
* @return the content type
*/
def contentType: MimeTypes.Value
/**
* request processor
*
* @param request the request
* @param application the application
* @return either a string or a byte array
*/
def process(request: Request, application: Option[T]): Either[String, Array[Byte]]
/**
* the actual request handler.
* Exceptions are trapped and returned in a response with HTTP code 500, else control is passed to the [[process]] method
*
* @param request the request
* @param response the response
* @param application the application
*/
def handleRequest(request: Request, response: Response, application: Option[T]) = {
val body: PrintStream = response.getPrintStream
val time: Long = System.currentTimeMillis()
response.setValue("Content-Type", contentType.toString)
response.setValue("Server", s"${BuildInfo.name}-${BuildInfo.version}")
response.setDate("Date", time)
response.setDate("Last-Modified", time)
// logger.debug(s"request path: ${request.getPath}")
// set any extra headers defined by the user
for ((header, value) <- headers) {
response.setValue(header, value)
}
try {
process(request, application) match {
case Left(stringContent) => body.println(stringContent)
case Right(binaryContent) => body.write(binaryContent)
}
response.setCode(200) // on success
} catch {
case e: Exception =>
response.setCode(500) // on error
val sw: StringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(sw))
body.println(sw.toString)
}
finally {
body.close()
}
}
}
| cosminbasca/simplehttp | src/main/scala/com/simplehttp/HttpRouteHandler.scala | Scala | apache-2.0 | 3,134 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.math.RoundingMode
import java.util.Locale
import com.google.common.math.{DoubleMath, IntMath, LongMath}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode}
import org.apache.spark.sql.catalyst.util.DateTimeConstants.MONTHS_PER_YEAR
import org.apache.spark.sql.catalyst.util.IntervalUtils
import org.apache.spark.sql.catalyst.util.IntervalUtils._
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.DayTimeIntervalType.{DAY, HOUR, MINUTE, SECOND}
import org.apache.spark.sql.types.YearMonthIntervalType.{MONTH, YEAR}
import org.apache.spark.unsafe.types.CalendarInterval
abstract class ExtractIntervalPart[T](
val dataType: DataType,
func: T => Any,
funcName: String) extends UnaryExpression with NullIntolerant with Serializable {
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$iu.$funcName($c)")
}
override protected def nullSafeEval(interval: Any): Any = {
func(interval.asInstanceOf[T])
}
}
case class ExtractIntervalYears(child: Expression)
extends ExtractIntervalPart[CalendarInterval](IntegerType, getYears, "getYears") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalYears =
copy(child = newChild)
}
case class ExtractIntervalMonths(child: Expression)
extends ExtractIntervalPart[CalendarInterval](ByteType, getMonths, "getMonths") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalMonths =
copy(child = newChild)
}
case class ExtractIntervalDays(child: Expression)
extends ExtractIntervalPart[CalendarInterval](IntegerType, getDays, "getDays") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalDays =
copy(child = newChild)
}
case class ExtractIntervalHours(child: Expression)
extends ExtractIntervalPart[CalendarInterval](ByteType, getHours, "getHours") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalHours =
copy(child = newChild)
}
case class ExtractIntervalMinutes(child: Expression)
extends ExtractIntervalPart[CalendarInterval](ByteType, getMinutes, "getMinutes") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalMinutes =
copy(child = newChild)
}
case class ExtractIntervalSeconds(child: Expression)
extends ExtractIntervalPart[CalendarInterval](DecimalType(8, 6), getSeconds, "getSeconds") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalSeconds =
copy(child = newChild)
}
case class ExtractANSIIntervalYears(child: Expression)
extends ExtractIntervalPart[Int](IntegerType, getYears, "getYears") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalYears =
copy(child = newChild)
}
case class ExtractANSIIntervalMonths(child: Expression)
extends ExtractIntervalPart[Int](ByteType, getMonths, "getMonths") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalMonths =
copy(child = newChild)
}
case class ExtractANSIIntervalDays(child: Expression)
extends ExtractIntervalPart[Long](IntegerType, getDays, "getDays") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalDays = {
copy(child = newChild)
}
}
case class ExtractANSIIntervalHours(child: Expression)
extends ExtractIntervalPart[Long](ByteType, getHours, "getHours") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalHours =
copy(child = newChild)
}
case class ExtractANSIIntervalMinutes(child: Expression)
extends ExtractIntervalPart[Long](ByteType, getMinutes, "getMinutes") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalMinutes =
copy(child = newChild)
}
case class ExtractANSIIntervalSeconds(child: Expression)
extends ExtractIntervalPart[Long](DecimalType(8, 6), getSeconds, "getSeconds") {
override protected def withNewChildInternal(newChild: Expression): ExtractANSIIntervalSeconds =
copy(child = newChild)
}
object ExtractIntervalPart {
def parseExtractField(
extractField: String,
source: Expression,
errorHandleFunc: => Nothing): Expression = {
(extractField.toUpperCase(Locale.ROOT), source.dataType) match {
case ("YEAR" | "Y" | "YEARS" | "YR" | "YRS", YearMonthIntervalType(start, end))
if isUnitInIntervalRange(YEAR, start, end) =>
ExtractANSIIntervalYears(source)
case ("YEAR" | "Y" | "YEARS" | "YR" | "YRS", CalendarIntervalType) =>
ExtractIntervalYears(source)
case ("MONTH" | "MON" | "MONS" | "MONTHS", YearMonthIntervalType(start, end))
if isUnitInIntervalRange(MONTH, start, end) =>
ExtractANSIIntervalMonths(source)
case ("MONTH" | "MON" | "MONS" | "MONTHS", CalendarIntervalType) =>
ExtractIntervalMonths(source)
case ("DAY" | "D" | "DAYS", DayTimeIntervalType(start, end))
if isUnitInIntervalRange(DAY, start, end) =>
ExtractANSIIntervalDays(source)
case ("DAY" | "D" | "DAYS", CalendarIntervalType) =>
ExtractIntervalDays(source)
case ("HOUR" | "H" | "HOURS" | "HR" | "HRS", DayTimeIntervalType(start, end))
if isUnitInIntervalRange(HOUR, start, end) =>
ExtractANSIIntervalHours(source)
case ("HOUR" | "H" | "HOURS" | "HR" | "HRS", CalendarIntervalType) =>
ExtractIntervalHours(source)
case ("MINUTE" | "M" | "MIN" | "MINS" | "MINUTES", DayTimeIntervalType(start, end))
if isUnitInIntervalRange(MINUTE, start, end) =>
ExtractANSIIntervalMinutes(source)
case ("MINUTE" | "M" | "MIN" | "MINS" | "MINUTES", CalendarIntervalType) =>
ExtractIntervalMinutes(source)
case ("SECOND" | "S" | "SEC" | "SECONDS" | "SECS", DayTimeIntervalType(start, end))
if isUnitInIntervalRange(SECOND, start, end) =>
ExtractANSIIntervalSeconds(source)
case ("SECOND" | "S" | "SEC" | "SECONDS" | "SECS", CalendarIntervalType) =>
ExtractIntervalSeconds(source)
case _ => errorHandleFunc
}
}
private def isUnitInIntervalRange(unit: Byte, start: Byte, end: Byte): Boolean = {
start <= unit && unit <= end
}
}
abstract class IntervalNumOperation(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
protected val operation: (CalendarInterval, Double) => CalendarInterval
protected def operationName: String
override def inputTypes: Seq[AbstractDataType] = Seq(CalendarIntervalType, DoubleType)
override def dataType: DataType = CalendarIntervalType
override def nullable: Boolean = true
override def nullSafeEval(interval: Any, num: Any): Any = {
operation(interval.asInstanceOf[CalendarInterval], num.asInstanceOf[Double])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (interval, num) => s"$iu.$operationName($interval, $num)")
}
override def prettyName: String = operationName.stripSuffix("Exact") + "_interval"
}
case class MultiplyInterval(
interval: Expression,
num: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends IntervalNumOperation(interval, num) {
override protected val operation: (CalendarInterval, Double) => CalendarInterval =
if (failOnError) multiplyExact else multiply
override protected def operationName: String = if (failOnError) "multiplyExact" else "multiply"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyInterval =
copy(interval = newLeft, num = newRight)
}
case class DivideInterval(
interval: Expression,
num: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends IntervalNumOperation(interval, num) {
override protected val operation: (CalendarInterval, Double) => CalendarInterval =
if (failOnError) divideExact else divide
override protected def operationName: String = if (failOnError) "divideExact" else "divide"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideInterval =
copy(interval = newLeft, num = newRight)
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_([years[, months[, weeks[, days[, hours[, mins[, secs]]]]]]]) - Make interval from years, months, weeks, days, hours, mins and secs.",
arguments = """
Arguments:
* years - the number of years, positive or negative
* months - the number of months, positive or negative
* weeks - the number of weeks, positive or negative
* days - the number of days, positive or negative
* hours - the number of hours, positive or negative
* mins - the number of minutes, positive or negative
* secs - the number of seconds with the fractional part in microsecond precision.
""",
examples = """
Examples:
> SELECT _FUNC_(100, 11, 1, 1, 12, 30, 01.001001);
100 years 11 months 8 days 12 hours 30 minutes 1.001001 seconds
> SELECT _FUNC_(100, null, 3);
NULL
> SELECT _FUNC_(0, 1, 0, 1, 0, 0, 100.000001);
1 months 1 days 1 minutes 40.000001 seconds
""",
since = "3.0.0",
group = "datetime_funcs")
// scalastyle:on line.size.limit
case class MakeInterval(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression,
secs: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends SeptenaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression,
sec: Expression) = {
this(years, months, weeks, days, hours, mins, sec, SQLConf.get.ansiEnabled)
}
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression) = {
this(years, months, weeks, days, hours, mins, Literal(Decimal(0, Decimal.MAX_LONG_DIGITS, 6)),
SQLConf.get.ansiEnabled)
}
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression) = {
this(years, months, weeks, days, hours, Literal(0))
}
def this(years: Expression, months: Expression, weeks: Expression, days: Expression) =
this(years, months, weeks, days, Literal(0))
def this(years: Expression, months: Expression, weeks: Expression) =
this(years, months, weeks, Literal(0))
def this(years: Expression, months: Expression) = this(years, months, Literal(0))
def this(years: Expression) = this(years, Literal(0))
def this() = this(Literal(0))
override def children: Seq[Expression] = Seq(years, months, weeks, days, hours, mins, secs)
// Accept `secs` as DecimalType to avoid loosing precision of microseconds while converting
// them to the fractional part of `secs`.
override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType, IntegerType, IntegerType,
IntegerType, IntegerType, IntegerType, DecimalType(Decimal.MAX_LONG_DIGITS, 6))
override def dataType: DataType = CalendarIntervalType
override def nullable: Boolean = if (failOnError) children.exists(_.nullable) else true
override def nullSafeEval(
year: Any,
month: Any,
week: Any,
day: Any,
hour: Any,
min: Any,
sec: Option[Any]): Any = {
try {
IntervalUtils.makeInterval(
year.asInstanceOf[Int],
month.asInstanceOf[Int],
week.asInstanceOf[Int],
day.asInstanceOf[Int],
hour.asInstanceOf[Int],
min.asInstanceOf[Int],
sec.map(_.asInstanceOf[Decimal]).getOrElse(Decimal(0, Decimal.MAX_LONG_DIGITS, 6)))
} catch {
case _: ArithmeticException if !failOnError => null
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (year, month, week, day, hour, min, sec) => {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
val secFrac = sec.getOrElse("0")
val failOnErrorBranch = if (failOnError) "throw e;" else s"${ev.isNull} = true;"
s"""
try {
${ev.value} = $iu.makeInterval($year, $month, $week, $day, $hour, $min, $secFrac);
} catch (java.lang.ArithmeticException e) {
$failOnErrorBranch
}
"""
})
}
override def prettyName: String = "make_interval"
// Seq(years, months, weeks, days, hours, mins, secs)
override protected def withNewChildrenInternal(
newChildren: IndexedSeq[Expression]): MakeInterval =
copy(
years = newChildren(0),
months = newChildren(1),
weeks = newChildren(2),
days = newChildren(3),
hours = newChildren(4),
mins = newChildren(5),
secs = newChildren(6)
)
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_([days[, hours[, mins[, secs]]]]) - Make DayTimeIntervalType duration from days, hours, mins and secs.",
arguments = """
Arguments:
* days - the number of days, positive or negative
* hours - the number of hours, positive or negative
* mins - the number of minutes, positive or negative
* secs - the number of seconds with the fractional part in microsecond precision.
""",
examples = """
Examples:
> SELECT _FUNC_(1, 12, 30, 01.001001);
1 12:30:01.001001000
> SELECT _FUNC_(2);
2 00:00:00.000000000
> SELECT _FUNC_(100, null, 3);
NULL
""",
since = "3.2.0",
group = "datetime_funcs")
// scalastyle:on line.size.limit
case class MakeDTInterval(
days: Expression,
hours: Expression,
mins: Expression,
secs: Expression)
extends QuaternaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(
days: Expression,
hours: Expression,
mins: Expression) = {
this(days, hours, mins, Literal(Decimal(0, Decimal.MAX_LONG_DIGITS, 6)))
}
def this(days: Expression, hours: Expression) = this(days, hours, Literal(0))
def this(days: Expression) = this(days, Literal(0))
def this() = this(Literal(0))
override def first: Expression = days
override def second: Expression = hours
override def third: Expression = mins
override def fourth: Expression = secs
// Accept `secs` as DecimalType to avoid loosing precision of microseconds when converting
// them to the fractional part of `secs`.
override def inputTypes: Seq[AbstractDataType] = Seq(
IntegerType, IntegerType, IntegerType, DecimalType(Decimal.MAX_LONG_DIGITS, 6))
override def dataType: DataType = DayTimeIntervalType()
override def nullSafeEval(
day: Any,
hour: Any,
min: Any,
sec: Any): Any = {
IntervalUtils.makeDayTimeInterval(
day.asInstanceOf[Int],
hour.asInstanceOf[Int],
min.asInstanceOf[Int],
sec.asInstanceOf[Decimal])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (day, hour, min, sec) => {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
s"$iu.makeDayTimeInterval($day, $hour, $min, $sec)"
})
}
override def prettyName: String = "make_dt_interval"
override protected def withNewChildrenInternal(
days: Expression,
hours: Expression,
mins: Expression,
secs: Expression): MakeDTInterval =
copy(days, hours, mins, secs)
}
@ExpressionDescription(
usage = "_FUNC_([years[, months]]) - Make year-month interval from years, months.",
arguments = """
Arguments:
* years - the number of years, positive or negative
* months - the number of months, positive or negative
""",
examples = """
Examples:
> SELECT _FUNC_(1, 2);
1-2
> SELECT _FUNC_(1, 0);
1-0
> SELECT _FUNC_(-1, 1);
-0-11
> SELECT _FUNC_(2);
2-0
""",
since = "3.2.0",
group = "datetime_funcs")
// scalastyle:on line.size.limit
case class MakeYMInterval(years: Expression, months: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
def this(years: Expression) = this(years, Literal(0))
def this() = this(Literal(0))
override def left: Expression = years
override def right: Expression = months
override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType, IntegerType)
override def dataType: DataType = YearMonthIntervalType()
override def nullSafeEval(year: Any, month: Any): Any = {
Math.toIntExact(Math.addExact(month.asInstanceOf[Number].longValue(),
Math.multiplyExact(year.asInstanceOf[Number].longValue(), MONTHS_PER_YEAR)))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (years, months) => {
val math = classOf[Math].getName.stripSuffix("$")
s"""
|$math.toIntExact(java.lang.Math.addExact($months,
| $math.multiplyExact($years, $MONTHS_PER_YEAR)))
|""".stripMargin
})
}
override def prettyName: String = "make_ym_interval"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): Expression =
copy(years = newLeft, months = newRight)
}
// Multiply an year-month interval by a numeric
case class MultiplyYMInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(YearMonthIntervalType, NumericType)
override def dataType: DataType = YearMonthIntervalType()
@transient
private lazy val evalFunc: (Int, Any) => Any = right.dataType match {
case ByteType | ShortType | IntegerType => (months: Int, num) =>
Math.multiplyExact(months, num.asInstanceOf[Number].intValue())
case LongType => (months: Int, num) =>
Math.toIntExact(Math.multiplyExact(months, num.asInstanceOf[Long]))
case FloatType | DoubleType => (months: Int, num) =>
DoubleMath.roundToInt(months * num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
case _: DecimalType => (months: Int, num) =>
val decimalRes = ((new Decimal).set(months) * num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Int], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case ByteType | ShortType | IntegerType =>
defineCodeGen(ctx, ev, (m, n) => s"java.lang.Math.multiplyExact($m, $n)")
case LongType =>
val jlm = classOf[Math].getName
defineCodeGen(ctx, ev, (m, n) => s"$jlm.toIntExact($jlm.multiplyExact($m, $n))")
case FloatType | DoubleType =>
val dm = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$dm.roundToInt($m * (double)$n, java.math.RoundingMode.HALF_UP)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$times($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()")
}
override def toString: String = s"($left * $right)"
override def sql: String = s"(${left.sql} * ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyYMInterval =
copy(interval = newLeft, num = newRight)
}
// Multiply a day-time interval by a numeric
case class MultiplyDTInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(DayTimeIntervalType, NumericType)
override def dataType: DataType = DayTimeIntervalType()
@transient
private lazy val evalFunc: (Long, Any) => Any = right.dataType match {
case _: IntegralType => (micros: Long, num) =>
Math.multiplyExact(micros, num.asInstanceOf[Number].longValue())
case _: DecimalType => (micros: Long, num) =>
val decimalRes = ((new Decimal).set(micros) * num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, RoundingMode.HALF_UP).longValueExact()
case _: FractionalType => (micros: Long, num) =>
DoubleMath.roundToLong(micros * num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Long], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case _: IntegralType =>
defineCodeGen(ctx, ev, (m, n) => s"java.lang.Math.multiplyExact($m, $n)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$times($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()")
case _: FractionalType =>
val dm = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$dm.roundToLong($m * (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left * $right)"
override def sql: String = s"(${left.sql} * ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyDTInterval =
copy(interval = newLeft, num = newRight)
}
trait IntervalDivide {
def checkDivideOverflow(value: Any, minValue: Any, num: Expression, numValue: Any): Unit = {
if (value == minValue && num.dataType.isInstanceOf[IntegralType]) {
if (numValue.asInstanceOf[Number].longValue() == -1) {
throw QueryExecutionErrors.overflowInIntegralDivideError()
}
}
}
}
// Divide an year-month interval by a numeric
case class DivideYMInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with IntervalDivide
with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(YearMonthIntervalType, NumericType)
override def dataType: DataType = YearMonthIntervalType()
@transient
private lazy val evalFunc: (Int, Any) => Any = right.dataType match {
case LongType => (months: Int, num) =>
// Year-month interval has `Int` as the internal type. The result of the divide operation
// of `Int` by `Long` must fit to `Int`. So, the casting to `Int` cannot cause overflow.
LongMath.divide(months, num.asInstanceOf[Long], RoundingMode.HALF_UP).toInt
case _: IntegralType => (months: Int, num) =>
IntMath.divide(months, num.asInstanceOf[Number].intValue(), RoundingMode.HALF_UP)
case _: DecimalType => (months: Int, num) =>
val decimalRes = ((new Decimal).set(months) / num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()
case _: FractionalType => (months: Int, num) =>
DoubleMath.roundToInt(months / num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
checkDivideOverflow(interval.asInstanceOf[Int], Int.MinValue, right, num)
evalFunc(interval.asInstanceOf[Int], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case t: IntegralType =>
val math = t match {
case LongType => classOf[LongMath].getName
case _ => classOf[IntMath].getName
}
val javaType = CodeGenerator.javaType(dataType)
val months = left.genCode(ctx)
val num = right.genCode(ctx)
val checkIntegralDivideOverflow =
s"""
|if (${months.value} == ${Int.MinValue} && ${num.value} == -1)
| throw QueryExecutionErrors.overflowInIntegralDivideError();
|""".stripMargin
nullSafeCodeGen(ctx, ev, (m, n) =>
// Similarly to non-codegen code. The result of `divide(Int, Long, ...)` must fit to `Int`.
// Casting to `Int` is safe here.
s"""
|$checkIntegralDivideOverflow
|${ev.value} = ($javaType)$math.divide($m, $n, java.math.RoundingMode.HALF_UP);
""".stripMargin)
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$div($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()")
case _: FractionalType =>
val math = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$math.roundToInt($m / (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left / $right)"
override def sql: String = s"(${left.sql} / ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideYMInterval =
copy(interval = newLeft, num = newRight)
}
// Divide a day-time interval by a numeric
case class DivideDTInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with IntervalDivide
with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(DayTimeIntervalType, NumericType)
override def dataType: DataType = DayTimeIntervalType()
@transient
private lazy val evalFunc: (Long, Any) => Any = right.dataType match {
case _: IntegralType => (micros: Long, num) =>
LongMath.divide(micros, num.asInstanceOf[Number].longValue(), RoundingMode.HALF_UP)
case _: DecimalType => (micros: Long, num) =>
val decimalRes = ((new Decimal).set(micros) / num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()
case _: FractionalType => (micros: Long, num) =>
DoubleMath.roundToLong(micros / num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
checkDivideOverflow(interval.asInstanceOf[Long], Long.MinValue, right, num)
evalFunc(interval.asInstanceOf[Long], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case _: IntegralType =>
val math = classOf[LongMath].getName
val micros = left.genCode(ctx)
val num = right.genCode(ctx)
val checkIntegralDivideOverflow =
s"""
|if (${micros.value} == ${Long.MinValue}L && ${num.value} == -1L)
| throw QueryExecutionErrors.overflowInIntegralDivideError();
|""".stripMargin
nullSafeCodeGen(ctx, ev, (m, n) =>
s"""
|$checkIntegralDivideOverflow
|${ev.value} = $math.divide($m, $n, java.math.RoundingMode.HALF_UP);
""".stripMargin)
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$div($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()")
case _: FractionalType =>
val math = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$math.roundToLong($m / (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left / $right)"
override def sql: String = s"(${left.sql} / ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideDTInterval =
copy(interval = newLeft, num = newRight)
}
| chuckchen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/intervalExpressions.scala | Scala | apache-2.0 | 29,250 |
/*******************************************************************************
Copyright (c) 2013, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.debug
import kr.ac.kaist.jsaf.analysis.cfg._
import scala.collection.mutable.{HashMap => MHashMap}
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.debug.commands._
class DebugConsoleDSparse(cfg: CFG, worklist: Worklist, sem: Semantics, table: Table, env: DSparseEnv)
extends DebugConsole(cfg, worklist, sem, table) {
def getEnv = env
/**
* Initialize
*/
override def initialize() = {
register(List(new CmdHelp, new CmdNext, new CmdJump, new CmdPrint, new CmdHome, new CmdMove, new CmdPrintResult, new CmdDU))
updateCompletor()
runCmd("help", Array[String]())
}
}
object DebugConsoleDSparse {
/**
* Singleton object
*/
var console: DebugConsoleDSparse = null
def initialize(cfg: CFG, worklist: Worklist, sem: Semantics, table: Table, env: DSparseEnv) = {
console = new DebugConsoleDSparse(cfg, worklist, sem, table, env)
console.initialize()
}
def runFixpoint(count: Int) = console.runFixpoint(count)
def runFinished() = {
console.target = -1
console.runFixpoint(console.iter)
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/debug/DebugConsoleDSparse.scala | Scala | bsd-3-clause | 1,456 |
package scala.meta.contrib.implicits
import scala.meta._
import scala.meta.contrib._
trait ExtractExtensions {
implicit class XtensionExtractors[A](a: A) {
def extract[B](implicit ev: Extract[A, B]): List[B] = ev.extract(a)
def hasMod(mod: Mod)(implicit ev: Extract[A, Mod]): Boolean =
ev.extract(a).exists(_.isEqual(mod))
}
}
object ExtractExtensions extends ExtractExtensions
| DavidDudson/scalameta | scalameta/contrib/shared/src/main/scala/scala/meta/contrib/implicits/ExtractExtensions.scala | Scala | bsd-3-clause | 400 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import com.yammer.metrics.Metrics
import com.yammer.metrics.core.Timer
import kafka.api.LeaderAndIsr
import kafka.common.TopicAndPartition
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.{TestUtils, ZkUtils}
import kafka.zk.ZooKeeperTestHarness
import org.junit.{After, Before, Test}
import org.junit.Assert.assertTrue
import scala.collection.JavaConverters._
class ControllerIntegrationTest extends ZooKeeperTestHarness {
var servers = Seq.empty[KafkaServer]
@Before
override def setUp() {
super.setUp
servers = Seq.empty[KafkaServer]
}
@After
override def tearDown() {
TestUtils.shutdownServers(servers)
super.tearDown
}
@Test
def testEmptyCluster(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkUtils.pathExists(ZkUtils.ControllerPath), "failed to elect a controller")
waitUntilControllerEpoch(KafkaController.InitialControllerEpoch, "broker failed to set controller epoch")
}
@Test
def testControllerEpochPersistsWhenAllBrokersDown(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkUtils.pathExists(ZkUtils.ControllerPath), "failed to elect a controller")
waitUntilControllerEpoch(KafkaController.InitialControllerEpoch, "broker failed to set controller epoch")
servers.head.shutdown()
servers.head.awaitShutdown()
TestUtils.waitUntilTrue(() => !zkUtils.pathExists(ZkUtils.ControllerPath), "failed to kill controller")
waitUntilControllerEpoch(KafkaController.InitialControllerEpoch, "controller epoch was not persisted after broker failure")
}
@Test
def testControllerMoveIncrementsControllerEpoch(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkUtils.pathExists(ZkUtils.ControllerPath), "failed to elect a controller")
waitUntilControllerEpoch(KafkaController.InitialControllerEpoch, "broker failed to set controller epoch")
servers.head.shutdown()
servers.head.awaitShutdown()
servers.head.startup()
TestUtils.waitUntilTrue(() => zkUtils.pathExists(ZkUtils.ControllerPath), "failed to elect a controller")
waitUntilControllerEpoch(KafkaController.InitialControllerEpoch + 1, "controller epoch was not incremented after controller move")
}
@Test
def testTopicCreation(): Unit = {
servers = makeServers(1)
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, 0, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
}
@Test
def testTopicCreationWithOfflineReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers.take(1))
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
}
@Test
def testTopicPartitionExpansion(): Unit = {
servers = makeServers(1)
val tp0 = TopicAndPartition("t", 0)
val tp1 = TopicAndPartition("t", 1)
val assignment = Map(tp0.partition -> Seq(0))
val expandedAssignment = Map(tp0.partition -> Seq(0), tp1.partition -> Seq(0))
TestUtils.createTopic(zkUtils, tp0.topic, partitionReplicaAssignment = assignment, servers = servers)
zkUtils.updatePersistentPath(ZkUtils.getTopicPath(tp0.topic), zkUtils.replicaAssignmentZkData(expandedAssignment.map(kv => kv._1.toString -> kv._2)))
waitForPartitionState(tp1, KafkaController.InitialControllerEpoch, 0, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic partition expansion")
TestUtils.waitUntilMetadataIsPropagated(servers, tp1.topic, tp1.partition)
}
@Test
def testTopicPartitionExpansionWithOfflineReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp0 = TopicAndPartition("t", 0)
val tp1 = TopicAndPartition("t", 1)
val assignment = Map(tp0.partition -> Seq(otherBrokerId, controllerId))
val expandedAssignment = Map(tp0.partition -> Seq(otherBrokerId, controllerId), tp1.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkUtils, tp0.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkUtils.updatePersistentPath(ZkUtils.getTopicPath(tp0.topic), zkUtils.replicaAssignmentZkData(expandedAssignment.map(kv => kv._1.toString -> kv._2)))
waitForPartitionState(tp1, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic partition expansion")
TestUtils.waitUntilMetadataIsPropagated(Seq(servers(controllerId)), tp1.topic, tp1.partition)
}
@Test
def testPartitionReassignment(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val metricName = s"kafka.controller:type=ControllerStats,name=${ControllerState.PartitionReassignment.rateAndTimeMetricName.get}"
val timerCount = timer(metricName).count
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> Seq(otherBrokerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
zkUtils.createPersistentPath(ZkUtils.ReassignPartitionsPath, ZkUtils.formatAsReassignmentJson(reassignment))
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 3,
"failed to get expected partition state after partition reassignment")
TestUtils.waitUntilTrue(() => zkUtils.getReplicaAssignmentForTopics(Seq(tp.topic)) == reassignment,
"failed to get updated partition assignment on topic znode after partition reassignment")
TestUtils.waitUntilTrue(() => !zkUtils.pathExists(ZkUtils.ReassignPartitionsPath),
"failed to remove reassign partitions path after completion")
val updatedTimerCount = timer(metricName).count
assertTrue(s"Timer count $updatedTimerCount should be greater than $timerCount", updatedTimerCount > timerCount)
}
@Test
def testPartitionReassignmentWithOfflineReplicaHaltingProgress(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> Seq(otherBrokerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkUtils.createPersistentPath(ZkUtils.ReassignPartitionsPath, ZkUtils.formatAsReassignmentJson(reassignment))
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state during partition reassignment with offline replica")
TestUtils.waitUntilTrue(() => zkUtils.pathExists(ZkUtils.ReassignPartitionsPath),
"partition reassignment path should remain while reassignment in progress")
}
@Test
def testPartitionReassignmentResumesAfterReplicaComesOnline(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> Seq(otherBrokerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkUtils.createPersistentPath(ZkUtils.ReassignPartitionsPath, ZkUtils.formatAsReassignmentJson(reassignment))
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state during partition reassignment with offline replica")
servers(otherBrokerId).startup()
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 4,
"failed to get expected partition state after partition reassignment")
TestUtils.waitUntilTrue(() => zkUtils.getReplicaAssignmentForTopics(Seq(tp.topic)) == reassignment,
"failed to get updated partition assignment on topic znode after partition reassignment")
TestUtils.waitUntilTrue(() => !zkUtils.pathExists(ZkUtils.ReassignPartitionsPath),
"failed to remove reassign partitions path after completion")
}
@Test
def testPreferredReplicaLeaderElection(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
servers(otherBrokerId).startup()
TestUtils.waitUntilTrue(() => zkUtils.getInSyncReplicasForPartition(tp.topic, tp.partition).toSet == assignment(tp.partition).toSet, "restarted broker failed to join in-sync replicas")
zkUtils.createPersistentPath(ZkUtils.PreferredReplicaLeaderElectionPath, ZkUtils.preferredReplicaLeaderElectionZkData(Set(tp)))
TestUtils.waitUntilTrue(() => !zkUtils.pathExists(ZkUtils.PreferredReplicaLeaderElectionPath),
"failed to remove preferred replica leader election path after completion")
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 2,
"failed to get expected partition state upon broker startup")
}
@Test
def testPreferredReplicaLeaderElectionWithOfflinePreferredReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkUtils.createPersistentPath(ZkUtils.PreferredReplicaLeaderElectionPath, ZkUtils.preferredReplicaLeaderElectionZkData(Set(tp)))
TestUtils.waitUntilTrue(() => !zkUtils.pathExists(ZkUtils.PreferredReplicaLeaderElectionPath),
"failed to remove preferred replica leader election path after giving up")
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
}
@Test
def testAutoPreferredReplicaLeaderElection(): Unit = {
servers = makeServers(2, autoLeaderRebalanceEnable = true)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(1, 0))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
servers(otherBrokerId).startup()
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 2,
"failed to get expected partition state upon broker startup")
}
@Test
def testLeaderAndIsrWhenEntireIsrOfflineAndUncleanLeaderElectionDisabled(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkUtils.getPartitionLeaderAndIsrForTopics(Set(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), KafkaController.InitialControllerEpoch, LeaderAndIsr.NoLeader, LeaderAndIsr.initialLeaderEpoch + 1) &&
leaderIsrAndControllerEpochMap(tp).leaderAndIsr.isr == List(otherBrokerId)
}, "failed to get expected partition state after entire isr went offline")
}
@Test
def testLeaderAndIsrWhenEntireIsrOfflineAndUncleanLeaderElectionEnabled(): Unit = {
servers = makeServers(2, uncleanLeaderElectionEnable = true)
val controllerId = TestUtils.waitUntilControllerElected(zkUtils)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = TopicAndPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId))
TestUtils.createTopic(zkUtils, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, KafkaController.InitialControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
servers(1).shutdown()
servers(1).awaitShutdown()
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkUtils.getPartitionLeaderAndIsrForTopics(Set(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), KafkaController.InitialControllerEpoch, LeaderAndIsr.NoLeader, LeaderAndIsr.initialLeaderEpoch + 1) &&
leaderIsrAndControllerEpochMap(tp).leaderAndIsr.isr == List.empty
}, "failed to get expected partition state after entire isr went offline")
}
private def waitUntilControllerEpoch(epoch: Int, message: String): Unit = {
TestUtils.waitUntilTrue(() => zkUtils.readDataMaybeNull(ZkUtils.ControllerEpochPath)._1.map(_.toInt) == Some(epoch), message)
}
private def waitForPartitionState(tp: TopicAndPartition,
controllerEpoch: Int,
leader: Int,
leaderEpoch: Int,
message: String): Unit = {
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkUtils.getPartitionLeaderAndIsrForTopics(Set(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), controllerEpoch, leader, leaderEpoch)
}, message)
}
private def isExpectedPartitionState(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch,
controllerEpoch: Int,
leader: Int,
leaderEpoch: Int) =
leaderIsrAndControllerEpoch.controllerEpoch == controllerEpoch &&
leaderIsrAndControllerEpoch.leaderAndIsr.leader == leader &&
leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch == leaderEpoch
private def makeServers(numConfigs: Int, autoLeaderRebalanceEnable: Boolean = false, uncleanLeaderElectionEnable: Boolean = false) = {
val configs = TestUtils.createBrokerConfigs(numConfigs, zkConnect)
configs.foreach { config =>
config.setProperty(KafkaConfig.AutoLeaderRebalanceEnableProp, autoLeaderRebalanceEnable.toString)
config.setProperty(KafkaConfig.UncleanLeaderElectionEnableProp, uncleanLeaderElectionEnable.toString)
config.setProperty(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp, "1")
}
configs.map(config => TestUtils.createServer(KafkaConfig.fromProps(config)))
}
private def timer(metricName: String): Timer = {
Metrics.defaultRegistry.allMetrics.asScala.filterKeys(_.getMBeanName == metricName).values.headOption
.getOrElse(fail(s"Unable to find metric $metricName")).asInstanceOf[Timer]
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala | Scala | apache-2.0 | 18,685 |
package app
import util.{FileUtil}
import org.scalatra._
import org.scalatra.servlet.{MultipartConfig, FileUploadSupport}
import org.apache.commons.io.FileUtils
/**
* Provides Ajax based file upload functionality.
*
* This servlet saves uploaded file as temporary file and returns the unique id.
* You can get uploaded file using [[app.FileUploadControllerBase#getTemporaryFile()]] with this id.
*/
class FileUploadController extends ScalatraServlet
with FileUploadSupport with FlashMapSupport with FileUploadControllerBase {
configureMultipartHandling(MultipartConfig(maxFileSize = Some(3 * 1024 * 1024)))
post("/image"){
fileParams.get("file") match {
case Some(file) if(FileUtil.isImage(file.name)) => {
val fileId = generateFileId
FileUtils.writeByteArrayToFile(getTemporaryFile(fileId), file.get)
session += "upload_" + fileId -> file.name
Ok(fileId)
}
case None => BadRequest
}
}
}
| kxbmap/gitbucket | src/main/scala/app/FileUploadController.scala | Scala | apache-2.0 | 966 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.postgresql.parsers
import com.github.mauricio.async.db.postgresql.messages.backend.{ParameterStatusMessage, ServerMessage}
import com.github.mauricio.async.db.util.ByteBufferUtils
import java.nio.charset.Charset
import io.netty.buffer.ByteBuf
class ParameterStatusParser(charset: Charset) extends MessageParser {
import ByteBufferUtils._
override def parseMessage(b: ByteBuf): ServerMessage = {
new ParameterStatusMessage(readCString(b, charset), readCString(b, charset))
}
}
| carlosFattor/postgresql-async | postgresql-async/src/main/scala/com/github/mauricio/async/db/postgresql/parsers/ParameterStatusParser.scala | Scala | apache-2.0 | 1,166 |
package solution
import service.reflection.PositiveOutput
import tasktest.{SubArrayWithMaxSumSolution, SubArrayWithMaxSumTest}
class TTSubArrayWithMaxSum extends PositiveOutput {
behavior of "ScalaTestRunner for SubArrayWithMaxSum"
override val suiteInstance = new SubArrayWithMaxSumTest(new SubArrayWithMaxSum)
}
//solution
class SubArrayWithMaxSum extends SubArrayWithMaxSumSolution {
def apply(a: Array[Int]): Array[Int] = {
var currentSum = 0
var maxSum = 0
var left, right = 0
var maxI = 0 //used when all negatives in the array
for (i <- a.indices) {
val incSum = currentSum + a(i)
if (incSum > 0) {
currentSum = incSum
if (currentSum > maxSum) {
maxSum = currentSum
right = i
}
} else {
left = i + 1
right = left
currentSum = 0
if (a(i) > a(maxI)) maxI = i
}
}
if (left == a.length) Array(a(maxI))
else a.slice(left, right + 1)
}
}
| DmytroOrlov/devgym | server/test/solution/SubArrayWithMaxSum.scala | Scala | apache-2.0 | 987 |
package controllers
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc.Action
import play.modules.reactivemongo.MongoController
import org.qbproject.api.schema.QBSchema._
import org.qbproject.api.controllers.{JsonHeaders, QBCrudController}
import org.qbproject.api.mongo.{QBCollectionValidation, QBMongoCollection}
import org.qbproject.api.routing.QBRouter
import play.api.libs.json.{JsUndefined, JsValue, Json}
object UserController extends MongoController with QBCrudController {
lazy val collection = new QBMongoCollection("user")(db) with QBCollectionValidation {
override def schema = UserSchema.modelSchema
}
override def createSchema = UserSchema.modelSchema -- "id"
def getView = JsonHeaders {
Action {
Ok(Json.toJson(UserSchema.viewSchema))
}
}
def getModel = JsonHeaders {
Action {
Ok(Json.toJson(UserSchema.modelSchema))
}
}
}
object UserRouter extends QBRouter {
override def qbRoutes = UserController.crudRoutes
}
| sdirix/emf2web | org.eclipse.emf.ecp.emf2web.examples/projects/org.eclipse.emf.ecp.emf2web.playapplication/app/controllers/UserController.scala | Scala | epl-1.0 | 1,019 |
class S[A](f: A => A, x: A) {
Console.println(f(x));
}
class T[B](f: B => B, y: B) extends S((x: B) => f(x), y) {
}
object Test extends App {
new T[Int](x => x * 2, 1);
val f = new S((x: Int) => x, 1);
}
| yusuke2255/dotty | tests/pos/t115.scala | Scala | bsd-3-clause | 210 |
package io.vertx.asyncsql.test
import java.nio.charset.StandardCharsets
import org.vertx.java.core.Handler
import org.vertx.java.core.json.impl.Base64
import org.vertx.scala.core.buffer.Buffer
import scala.concurrent.{Future, Promise}
import org.vertx.scala.core.json.{JsonObject, Json, JsonArray}
import org.vertx.testtools.VertxAssert._
import org.junit.Test
import scala.util.{Success, Failure, Try}
import org.vertx.scala.core.eventbus.Message
import org.vertx.scala.core.FunctionConverters._
trait BaseSqlTests {
this: SqlTestVerticle =>
private val timeout: Int = 15000
protected def failedTest: PartialFunction[Throwable, Unit] = {
case ex: Throwable =>
logger.warn("failed in test", ex)
fail("test failed. see warning above")
}
protected def sendWithTimeout(json: JsonObject): Future[(Message[JsonObject], JsonObject)] = {
val p = Promise[(Message[JsonObject], JsonObject)]()
vertx.eventBus.sendWithTimeout(address, json, timeout, {
case Success(reply) => p.success(reply, reply.body())
case Failure(ex) => p.failure(ex)
}: Try[Message[JsonObject]] => Unit)
p.future
}
protected def replyWithTimeout(msg: Message[JsonObject], json: JsonObject): Future[(Message[JsonObject], JsonObject)] = {
val p = Promise[(Message[JsonObject], JsonObject)]()
msg.replyWithTimeout(json, timeout, {
case Success(reply) => p.success(reply, reply.body())
case Failure(ex) => p.failure(ex)
}: Try[Message[JsonObject]] => Unit)
p.future
}
protected def checkOkay(json: JsonObject)(msg: (Message[JsonObject], JsonObject)): (Message[JsonObject], JsonObject) = {
assertEquals(s"should get 'ok' back when sending ${json.encode()}, but got ${msg._2.encode()}",
"ok", msg._2.getString("status"))
(msg._1, msg._2)
}
protected def checkError(json: JsonObject)(msg: (Message[JsonObject], JsonObject)): (Message[JsonObject], JsonObject) = {
assertEquals(s"should get an 'error' back when sending ${json.encode()}, but got ${msg._2.encode()}",
"error", msg._2.getString("status"))
(msg._1, msg._2)
}
protected def sendOk(json: JsonObject): Future[(Message[JsonObject], JsonObject)] =
sendWithTimeout(json) map checkOkay(json)
protected def sendFail(json: JsonObject): Future[(Message[JsonObject], JsonObject)] =
sendWithTimeout(json) map checkError(json)
protected def replyOk(msg: Message[JsonObject], json: JsonObject): Future[(Message[JsonObject], JsonObject)] =
replyWithTimeout(msg, json) map checkOkay(json)
protected def replyFail(msg: Message[JsonObject], json: JsonObject): Future[(Message[JsonObject], JsonObject)] =
replyWithTimeout(msg, json) map checkError(json)
protected def setupTableTest(): Future[_] = for {
(msg, reply) <- sendOk(raw(createTableStatement("some_test")))
} yield {
assertEquals(0, reply.getInteger("rows"))
}
protected def setupTypeTest(): Future[_] = for {
_ <- setupTableTest()
(msg, reply) <- sendOk(insert("some_test",
Json.fromArrayString( """["name","email","is_male","age","money","wedding_date"]"""),
Json.fromArrayString(
"""[["Mr. Test","test@example.com",true,15,167.31,"2024-04-01"],
| ["Ms Test2","test2@example.com",false,43,167.31,"1997-12-24"]]""".stripMargin)))
} yield ()
private def checkSameFields(arr1: JsonArray, arr2: JsonArray) = {
import scala.collection.JavaConversions._
arr1.foreach(elem => assertTrue(arr2.contains(elem)))
}
private def checkMrTest(mrTest: JsonArray) = {
assertEquals("Mr. Test", mrTest.get[String](0))
assertEquals("test@example.com", mrTest.get[String](1))
assertTrue(mrTest.get[Any](2) match {
case b: Boolean => b
case i: Number => i.intValue() == 1
case x => false
})
assertEquals(15, mrTest.get[Number](3).intValue())
assertEquals(167.31, mrTest.get[Number](4).doubleValue(), 0.0001)
// FIXME check date conversion
// assertEquals("2024-04-01", mrTest.get[JsonObject](5))
}
@Test
def simpleConnection(): Unit = (for {
(msg, reply) <- sendOk(raw("SELECT 0"))
} yield {
val res = reply.getArray("results")
assertEquals(1, res.size())
assertEquals(0, res.get[JsonArray](0).get[Number](0).intValue())
testComplete()
}) recover failedTest
@Test
def poolSize(): Unit = asyncTest {
val n = 10
val futures = for {
i <- 1 to n
} yield {
expectOk(raw("SELECT " + i)) map {
reply =>
val res = reply.getArray("results")
assertEquals(1, res.size())
val result = res.get[JsonArray](0).get[Number](0).intValue()
assertEquals(i, result)
result
}
}
val fs = Future.sequence(futures) map (_.sum)
fs map (assertEquals((n * (n + 1)) / 2, _))
}
@Test
def multipleFields(): Unit = (for {
(msg, reply) <- sendOk(raw("SELECT 1 a, 0 b"))
} yield {
val res = reply.getArray("results")
assertEquals(1, res.size())
val firstElem = res.get[JsonArray](0)
assertEquals(1, firstElem.get[Number](0).intValue())
assertEquals(0, firstElem.get[Number](1).intValue())
testComplete()
}) recover failedTest
@Test
def multipleFieldsOrder(): Unit =
(for {
_ <- setupTypeTest()
(msg, reply) <- sendOk(raw("SELECT is_male, age, email, money, name FROM some_test WHERE is_male = true"))
} yield {
import collection.JavaConverters._
val receivedFields = reply.getArray("fields")
val results = reply.getArray("results").get[JsonArray](0)
assertEquals(1, reply.getInteger("rows"))
val columnNamesList = receivedFields.asScala.toList
assertEquals("Mr. Test", results.get(columnNamesList.indexOf("name")))
assertEquals("test@example.com", results.get(columnNamesList.indexOf("email")))
assertEquals(15, results.get[Int](columnNamesList.indexOf("age")))
assertTrue(results.get[Any](columnNamesList.indexOf("is_male")) match {
case b: Boolean => b
case i: Number => i.intValue() == 1
case x => false
})
assertEquals(167.31, results.get[Number](columnNamesList.indexOf("money")).doubleValue(), 0.01)
testComplete()
}) recover failedTest
@Test
def createAndDropTable(): Unit = (for {
(msg, dropIfExistsReply) <- sendOk(raw("DROP TABLE IF EXISTS some_test;"))
(msg, createReply) <- sendOk(raw("CREATE TABLE some_test (id SERIAL, name VARCHAR(255));"))
(msg, insertReply) <- sendOk(raw("INSERT INTO some_test (name) VALUES ('tester');"))
(msg, selectReply) <- sendOk(raw("SELECT name FROM some_test"))
(msg, dropReply) <- {
assertEquals("tester", try {
selectReply.getArray("results").get[JsonArray](0).get[String](0)
} catch {
case ex: Throwable => fail(s"Should be able to get a result before drop, but got ${selectReply.encode()}")
})
sendOk(raw("DROP TABLE some_test;"))
}
(msg, selectReply) <- sendFail(raw("SELECT name FROM some_test"))
} yield {
val error = selectReply.getString("message")
assertTrue(s"Not the right error message $error",
error.contains("some_test") && (error.contains("doesn't exist") || error.contains("does not exist")))
testComplete()
}) recover failedTest
@Test
def insertCorrectWithMissingValues(): Unit = (for {
_ <- setupTableTest()
_ <- sendOk(insert("some_test",
Json.fromArrayString( """["name","email"]"""),
Json.fromArrayString( """[["Test","test@example.com"],
| ["Test2","test2@example.com"]]""".stripMargin)))
} yield testComplete()) recover failedTest
@Test
def insertNullValues(): Unit = (for {
_ <- setupTableTest()
_ <- sendOk(insert("some_test",
Json.fromArrayString( """["name","email"]"""),
Json.fromArrayString( """[[null,"test@example.com"],
| [null,"test2@example.com"]]""".stripMargin)))
} yield testComplete()) recover failedTest
@Test
def insertTypeTest(): Unit = (for {
_ <- setupTypeTest()
} yield testComplete()) recover failedTest
@Test
def insertMaliciousDataTest(): Unit = (for {
_ <- setupTableTest()
(msg, insertReply) <- sendOk(insert("some_test",
Json.fromArrayString( """["name","email","is_male","age","money","wedding_date"]"""),
Json.fromArrayString(
"""[["Mr. Test","test@example.com",true,15,167.31,"2024-04-01"],
| ["Ms Test2','some@example.com',false,15,167.31,'2024-04-01');DROP TABLE some_test;--","test2@example.com",false,43,167.31,"1997-12-24"]]""".stripMargin)))
(msg, selectReply) <- sendOk(raw("SELECT * FROM some_test"))
} yield {
assertEquals(2, selectReply.getArray("results").size())
testComplete()
}) recover failedTest
@Test
def insertUniqueProblem(): Unit = (for {
_ <- setupTableTest()
(msg, reply) <- sendFail(insert("some_test",
Json.fromArrayString( """["name","email"]"""),
Json.fromArrayString(
"""[["Test","test@example.com"],
| ["Test","test@example.com"]]""".stripMargin)))
} yield testComplete()) recover failedTest
@Test
def selectWithoutFields(): Unit = (for {
_ <- setupTypeTest()
(msg, reply) <- sendOk(select("some_test"))
} yield {
val receivedFields = reply.getArray("fields")
logger.info("received: " + receivedFields.encode())
def assertFieldName(field: String) = {
assertTrue("fields should contain '" + field + "'", receivedFields.contains(field))
}
assertFieldName("id")
assertFieldName("name")
assertFieldName("email")
assertFieldName("is_male")
assertFieldName("age")
assertFieldName("money")
assertFieldName("wedding_date")
val moneyField = receivedFields.toArray.indexOf("money")
val mrTest = reply.getArray("results").get[JsonArray](0)
assertTrue(mrTest.contains("Mr. Test"))
assertTrue(mrTest.contains("test@example.com"))
assertTrue(mrTest.contains(true) || mrTest.contains(1))
assertTrue(mrTest.contains(15))
assertEquals(167.31, mrTest.get[Number](moneyField).doubleValue(), 0.0001)
testComplete()
}) recover failedTest
@Test
def selectEverything(): Unit = {
val fieldsArray = Json.arr("name", "email", "is_male", "age", "money", "wedding_date")
(for {
_ <- setupTypeTest()
(msg, reply) <- sendOk(select("some_test", fieldsArray))
} yield {
val receivedFields = reply.getArray("fields")
checkSameFields(fieldsArray, receivedFields)
val results = reply.getArray("results")
val mrTest = results.get[JsonArray](0)
checkMrTest(mrTest)
testComplete()
}) recover failedTest
}
@Test
def selectFiltered(): Unit = {
val fieldsArray = Json.arr("name", "email")
(for {
_ <- setupTypeTest()
(msg, reply) <- sendOk(select("some_test", fieldsArray))
} yield {
val receivedFields = reply.getArray("fields")
assertEquals(s"arrays ${fieldsArray.encode()} and ${receivedFields.encode()} should match",
fieldsArray, receivedFields)
assertEquals(2, reply.getInteger("rows"))
val results = reply.getArray("results")
val mrOrMrs = results.get[JsonArray](0)
mrOrMrs.get[String](0) match {
case "Mr. Test" =>
assertEquals("Mr. Test", mrOrMrs.get[String](0))
assertEquals("test@example.com", mrOrMrs.get[String](1))
case "Mrs. Test" =>
assertEquals("Mrs. Test", mrOrMrs.get[String](0))
assertEquals("test2@example.com", mrOrMrs.get[String](1))
}
testComplete()
}) recover failedTest
}
@Test
def preparedSelect(): Unit = (for {
_ <- setupTypeTest()
(msg, reply) <- sendOk(prepared("SELECT email FROM some_test WHERE name=? AND age=?", Json.arr("Mr. Test", 15)))
} yield {
val receivedFields = reply.getArray("fields")
assertEquals(Json.arr("email"), receivedFields)
assertEquals(1, reply.getInteger("rows"))
assertEquals("test@example.com", reply.getArray("results").get[JsonArray](0).get[String](0))
testComplete()
}) recover failedTest
@Test
def simpleTransaction(): Unit = (for {
_ <- setupTypeTest()
(msg, transactionReply) <- sendOk(
transaction(
insert("some_test", Json.arr("name", "email", "is_male", "age", "money"),
Json.arr(Json.arr("Mr. Test jr.", "test3@example.com", true, 5, 2))),
raw("UPDATE some_test SET age=6 WHERE name = 'Mr. Test jr.'")))
(msg, reply) <- sendOk(raw("SELECT SUM(age) FROM some_test WHERE is_male = true"))
} yield {
val results = reply.getArray("results")
assertEquals(1, results.size())
assertEquals(21, results.get[JsonArray](0).get[Number](0).intValue())
testComplete()
}) recover failedTest
@Test
def transactionWithPreparedStatement(): Unit = (for {
_ <- setupTypeTest()
(msg, transactionReply) <- sendOk(
transaction(
insert("some_test", Json.arr("name", "email", "is_male", "age", "money"),
Json.arr(Json.arr("Mr. Test jr.", "test3@example.com", true, 5, 2))),
prepared("UPDATE some_test SET age=? WHERE name=?", Json.arr(6, "Mr. Test jr."))))
(msg, reply) <- sendOk(raw("SELECT SUM(age) FROM some_test WHERE is_male = true"))
} yield {
val results = reply.getArray("results")
assertEquals(1, results.size())
assertEquals(21, results.get[JsonArray](0).get[Number](0).intValue())
testComplete()
}) recover failedTest
@Test
def startAndEndTransaction(): Unit = (for {
(msg, beginReply) <- sendOk(Json.obj("action" -> "begin"))
(msg, selectReply) <- replyOk(msg, raw("SELECT 15"))
(msg, commitReply) <- {
val arr = selectReply.getArray("results")
assertEquals("ok", selectReply.getString("status"))
assertEquals(1, arr.size())
assertEquals(15, arr.get[JsonArray](0).get[Number](0).longValue())
replyOk(msg, Json.obj("action" -> "commit"))
}
} yield testComplete()) recover failedTest
@Test
def updateInTransaction(): Unit = (for {
_ <- setupTypeTest()
(msg, beginReply) <- sendOk(Json.obj("action" -> "begin"))
(msg, updateReply) <- replyOk(msg, raw("UPDATE some_test set email = 'updated@test.com' WHERE name = 'Mr. Test'"))
(msg, commitReply) <- replyOk(msg, Json.obj("action" -> "commit"))
(msg, checkReply) <- sendOk(raw("SELECT email FROM some_test WHERE name = 'Mr. Test'"))
} yield {
val results = checkReply.getArray("results")
val mrTest = results.get[JsonArray](0)
assertEquals("updated@test.com", mrTest.get[String](0))
logger.info("all tests completed")
testComplete()
}) recover failedTest
@Test
def violateForeignKey(): Unit = (for {
(msg, beginResult) <- sendOk(Json.obj("action" -> "begin"))
(msg, _) <- replyOk(msg, raw("DROP TABLE IF EXISTS test_two;"))
(msg, _) <- replyOk(msg, raw("DROP TABLE IF EXISTS test_one;"))
(msg, _) <- replyOk(msg, raw( """CREATE TABLE test_one (
| id SERIAL,
| name VARCHAR(255),
| PRIMARY KEY (id)
|);""".stripMargin))
(msg, _) <- replyOk(msg, raw(createTableTestTwo))
(msg, _) <- replyOk(msg, raw(
"""ALTER TABLE test_two ADD CONSTRAINT test_two_one_id_fk
|FOREIGN KEY (one_id)
|REFERENCES test_one (id);""".stripMargin))
(msg, _) <- replyOk(msg, raw("INSERT INTO test_one (name) VALUES ('first'),('second');"))
(msg, setupResult) <- replyOk(msg, raw("INSERT INTO test_two (name, one_id) VALUES ('twoone', 1);"))
(msg, insertViolatedResult) <- replyFail(msg, raw("INSERT INTO test_two (name, one_id) VALUES ('twothree', 3);"))
(msg, rollbackResult) <- replyOk(msg, raw("ROLLBACK;"))
} yield testComplete()) recover failedTest
@Test
def wrongQueryInTransaction(): Unit = (for {
_ <- setupTypeTest()
(msg, beginReply) <- sendOk(Json.obj("action" -> "begin"))
(msg, updateReply) <- replyWithTimeout(msg, raw("this is a bad raw query for sql"))
} yield {
assertEquals("error", updateReply.getString("status"))
testComplete()
}) recover failedTest
@Test
def rollBackTransaction(): Unit = {
val fieldsArray = Json.arr("name", "email", "is_male", "age", "money", "wedding_date")
(for {
_ <- setupTypeTest()
(msg, beginReply) <- sendOk(Json.obj("action" -> "begin"))
(msg, reply) <- replyOk(msg, raw("UPDATE some_test set email = 'shouldRollback@test.com' WHERE name = 'Mr. Test'"))
(msg, checkUpdateReply) <- replyOk(msg, raw("SELECT email FROM some_test WHERE name = 'Mr. Test'"))
(msg, endReply) <- {
val results = checkUpdateReply.getArray("results")
val mrTest = results.get[JsonArray](0)
assertEquals("shouldRollback@test.com", mrTest.get[String](0))
logger.info("Update done, now do rollback")
replyOk(msg, Json.obj("action" -> "rollback"))
}
(msg, checkReply) <- sendOk(select("some_test", fieldsArray))
} yield {
val results = checkReply.getArray("results")
val mrTest = results.get[JsonArray](0)
checkMrTest(mrTest)
logger.info("rolled back nicely")
testComplete()
}) recover failedTest
}
@Test
def dateTest(): Unit = (for {
_ <- setupTableTest()
(msg, insertReply) <- sendOk(raw("INSERT INTO some_test (name, wedding_date) VALUES ('tester', '2015-04-04');"))
(msg, reply) <- sendOk(prepared("SELECT wedding_date FROM some_test WHERE name=?", Json.arr("tester")))
} yield {
val receivedFields = reply.getArray("fields")
assertEquals(Json.arr("wedding_date"), receivedFields)
assertEquals("2015-04-04", reply.getArray("results").get[JsonArray](0).get[String](0))
testComplete()
}) recover failedTest
@Test
def timestampTest(): Unit = (for {
(m, r) <- sendOk(raw("DROP TABLE IF EXISTS date_test"))
(msg, r2) <- sendOk(raw(createDateTable("timestamp")))
(msg, insertReply) <- sendOk(raw("INSERT INTO date_test (test_date) VALUES ('2015-04-04T10:04:00.000');"))
(msg, reply) <- sendOk(raw("SELECT test_date FROM date_test"))
} yield {
val receivedFields = reply.getArray("fields")
assertEquals(Json.arr("test_date"), receivedFields)
val date = reply.getArray("results").get[JsonArray](0).get[String](0)
logger.info(s"date is: $date")
assertEquals("2015-04-04T10:04:00.000", date)
testComplete()
}) recover failedTest
@Test
def blobUpload(): Unit = (for {
image <- readFile("example.jpg")
(msg, r0) <- sendOk(raw("DROP TABLE IF EXISTS blob_test"))
(msg, r1) <- sendOk(raw(createBlobTable))
(msg, r2) <- sendOk(prepared("INSERT INTO blob_test (test_blob) VALUES (?)", Json.emptyArr().addBinary(image)))
(msg, r3) <- sendOk(raw("SELECT test_blob FROM blob_test"))
} yield {
val receivedFields = r3.getArray("fields")
assertEquals(Json.arr("test_blob"), receivedFields)
logger.info(s"blob is: ${r3.getArray("results").get[JsonArray](0).get[Array[Byte]](0)}")
val blob = r3.getArray("results").get[JsonArray](0).get[JsonArray](0).toArray.map(_.asInstanceOf[Byte])
val str = new String(Base64.decode(new String(blob)))
logger.info(s"blob is2: ${blob.getClass}")
assertEquals(new String(image), str)
testComplete()
}) recover failedTest
private def readFile(file: String): Future[Array[Byte]] = {
val p = Promise[Array[Byte]]()
vertx.fileSystem.readFile(file, {
case Success(buffer) =>
logger.info(s"read file buffer in ${StandardCharsets.UTF_8.name()} encoding")
p.success(buffer.toString(StandardCharsets.UTF_8.name()).getBytes)
case Failure(ex) => p.failure(ex)
}: Try[Buffer] => Unit)
p.future
}
}
| vert-x/mod-mysql-postgresql | src/test/scala/io/vertx/asyncsql/test/BaseSqlTests.scala | Scala | apache-2.0 | 19,774 |
package org.scalamu.core.runners
import java.io.DataOutputStream
import java.net.ServerSocket
import cats.syntax.either._
import io.circe.{Decoder, Encoder}
import org.scalamu.core.api.CommunicationException
class ProcessCommunicationHandler[I: Encoder, O: Decoder](
override val socket: ServerSocket,
initialize: DataOutputStream => Unit
) extends SocketConnectionHandler[I, O] {
override def handle(): Either[CommunicationException, CommunicationPipe[I, O]] = {
val tryOpenPipe = super.handle()
for {
pipe <- tryOpenPipe
_ <- Either.catchNonFatal(initialize(pipe.os)).leftMap(CommunicationException)
} yield pipe
}
}
| sugakandrey/scalamu | core/src/main/scala/org/scalamu/core/runners/ProcessCommunicationHandler.scala | Scala | gpl-3.0 | 658 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
/**
* Application mode, either `Dev`, `Test`, or `Prod`.
*
* @see [[play.Mode]]
*/
sealed abstract class Mode(val asJava: play.Mode)
object Mode {
case object Dev extends Mode(play.Mode.DEV)
case object Test extends Mode(play.Mode.TEST)
case object Prod extends Mode(play.Mode.PROD)
lazy val values: Set[play.api.Mode] = Set(Dev, Test, Prod)
}
| benmccann/playframework | core/play/src/main/scala/play/api/Mode.scala | Scala | apache-2.0 | 445 |
package hakoiri
import scala.Option.option2Iterable
import scala.annotation.tailrec
object Solver extends App with Hakoiri {
val width = 4
val height = 5
val maxStep = 1000
val start: Board = Board(
Koma("A", "A", 1, 0, 2, 2),
Koma("B", "B", 0, 0, 1, 2),
Koma("C", "B", 3, 0, 1, 2),
Koma("D", "B", 0, 2, 1, 2),
Koma("E", "B", 3, 2, 1, 2),
Koma("F", "F", 1, 2, 2, 1),
Koma("G", "G", 1, 3, 1, 1),
Koma("H", "G", 2, 3, 1, 1),
Koma("I", "G", 0, 4, 1, 1),
Koma("J", "G", 3, 4, 1, 1))
def isGoal(board: Board): Boolean =
board.koma exists {
case Koma("A", _, 1, 3, _, _) => true
case _ => false
}
for {
(path, number) <- solve(start).zipWithIndex
} {
println(s"解 ${number}")
for {
(board, step) <- path.zipWithIndex
layoutStr <- board.layoutOpt() { _.name }
} {
println(s"[STEP ${step}]")
println(layoutStr)
println()
}
}
}
trait Hakoiri {
type Path = List[Board]
val width: Int
val height: Int
val maxStep: Int
def isGoal(board: Board): Boolean
def solve(start: Board): List[Path] = {
@tailrec
def loop(count: Int, paths: List[Path], history: Set[String]): (List[Path], Set[String]) = {
val (nextPaths, nextHistory) = nextStep(paths, history)
println(s"ステップ ${count}, 経路パターン数 ${nextPaths.size}, 履歴数 ${nextHistory.size}")
if (count > maxStep)
(nextPaths, nextHistory)
else {
val goal = nextPaths.filter { path => isGoal(path.head) }
if (goal.isEmpty)
loop(count + 1, nextPaths, nextHistory)
else
(goal, nextHistory)
}
}
val result = for {
layoutStr <- start.layoutOpt() { _.kind }
(answer, _) = loop(1, List(List(start)), Set(layoutStr))
} yield {
answer map { _.reverse }
}
result.get
}
def nextStep(paths: List[Path], history: Set[String]): (List[Path], Set[String]) = {
var tempHist = history
val nextPaths = for {
path <- paths
current = path.head
koma <- current.koma
moved <- koma.move
next = Board(current.koma map { km => if (km == koma) moved else km }: _*)
layoutStr <- next.layoutOpt() { _.kind }
if (!(tempHist contains layoutStr))
} yield {
tempHist = tempHist + layoutStr
next :: path
}
(nextPaths, tempHist)
}
case class Board(koma: Koma*) {
def layoutOpt(spacer: String = " ")(f: Koma => String): Option[String] = {
val entries = for {
km <- koma
coord <- km.coords
} yield {
(coord, f(km))
}
val layout = Map[(Int, Int), String](entries: _*)
if (layout.size != entries.size)
None
else
Some(layoutString(layout, spacer))
}
private def layoutString(layout: Map[(Int, Int), String], spacer: String): String = {
(for (y <- 0 until height) yield {
(for (x <- 0 until width) yield {
layout.get((x, y)) getOrElse spacer
}).mkString
}).mkString("\\n")
}
}
case class Koma(name: String, kind: String, x: Int, y: Int, w: Int, h: Int) {
def coords: Seq[(Int, Int)] =
for {
dx <- 0 until w
dy <- 0 until h
} yield {
(x + dx, y + dy)
}
def move: List[Koma] =
for {
dx <- List(-1, 0, 1) if (x + dx >= 0 && x + dx + (w - 1) <= width - 1)
dy <- List(-1, 0, 1) if (y + dy >= 0 && y + dy + (h - 1) <= height - 1)
if ((dx == 0 && dy != 0) || (dx != 0 && dy == 0))
} yield {
Koma(name, kind, x + dx, y + dy, w, h)
}
}
}
| agwlvssainokuni/puzzle | hakoiri-scala/src/main/scala/hakoiri/Solver.scala | Scala | apache-2.0 | 3,639 |
package mimir.lenses
import java.io.File
import java.sql._
import java.util
import mimir.algebra._
import mimir.ctables._
import mimir.util.RandUtils
import mimir.Database
import mimir.models._
import mimir.parser.ExpressionParser
import scala.collection.JavaConversions._
import scala.util._
import com.typesafe.scalalogging.LazyLogging
object MissingValueLens extends LazyLogging {
def getConstraint(arg: Expression): Seq[(ID, Expression)] =
{
arg match {
case Var(v) => Seq( (v, Var(v).isNull.not) )
case StringPrimitive(exprString) => {
getConstraint(ExpressionParser.expr(exprString.replaceAll("''", "'")))
}
case e if Typechecker.trivialTypechecker.typeOf(e, (_:ID) => TAny() ).equals(TBool()) => {
ExpressionUtils.getColumns(arg).toSeq match {
case Seq(v) =>
Seq(
(
v,
Var(v).isNull.not.and(
Eval.inline(arg) { x => Var(x) }
)
)
)
case Seq() => throw new RAException(s"Invalid Constraint $e (need a variable in require)")
case _ => throw new RAException(s"Invalid Constraint $e (one variable per require)")
}
}
case e =>
throw new RAException("Invalid constraint $e (Not a Boolean Expression)")
}
}
def create(
db: Database,
name: ID,
humanReadableName: String,
query: Operator,
args:Seq[Expression]
): (Operator, Seq[Model]) =
{
logger.trace(s"Human readable name: ${humanReadableName}")
// Preprocess the lens arguments...
// Semantics are as follows:
//
// - 'columnName' means that we should replace missing values of columnName
// - columnName means that we should replace missing values of columnName
// - REQUIRE(column > ...) or a similar constraint means that we should replace
// missing values and values that don't satisfy the constraint
// - REQUIRE(column, column > ...) is similar, but allows you to define constraints
// over multiple columns
val targetColumnsAndTests:Map[ID, Expression] = args.flatMap { getConstraint(_) }.toMap
// Sanity check. Require that all columns that we fix and all columns referenced in the
// constraints are defined in the original query.
val schema = query.columnNames
val requiredColumns = targetColumnsAndTests.values.flatMap { ExpressionUtils.getColumns(_) }
val missingColumns = (targetColumnsAndTests.keySet ++ requiredColumns) -- schema.toSet
if(!missingColumns.isEmpty){
throw new SQLException(
"Invalid missing value lens: ["+missingColumns.mkString(", ")+
"] not part of ["+schema.mkString(", ")+"]"
)
}
// Create a query where all values that dont satisfy their constraints are removed
// (used for training the models)
val noErroneousValuesQuery =
query.mapByID(
schema.
map { col =>
targetColumnsAndTests.get(col) match {
case Some(IsNullExpression(v)) if col.equals(v) => (col, Var(col))
case Some(test) => (col, test.thenElse { Var(col) } { NullPrimitive() })
case None => (col, Var(col))
}
}:_*
)
logger.trace(s"Safe query: ${noErroneousValuesQuery}")
val modelsByType: Seq[(ID, Seq[(ID, (Model, Int, Seq[Expression]))])] =
ModelRegistry.imputations.toSeq.map {
case (
modelCategory: ID,
constructor: ModelRegistry.ImputationConstructor
) => {
val modelsByTypeAndColumn: Seq[(ID, (Model, Int, Seq[Expression]))] =
constructor(
db,
ID(name, ":", modelCategory),
targetColumnsAndTests.keySet.toSeq,
noErroneousValuesQuery,
humanReadableName
).toSeq
(modelCategory, modelsByTypeAndColumn)
}
}
val (
candidateModels: Map[ID,Seq[(ID,Int,Seq[Expression],ID)]],
modelEntities: Seq[Model]
) =
LensUtils.extractModelsByColumn(modelsByType)
logger.trace(s"Candidate Models: ${candidateModels.keys.mkString(", ")}")
// Sanity check...
targetColumnsAndTests.keySet.foreach( target => {
if(!candidateModels.contains(target)){
throw new SQLException("No valid imputation model for column '"+target+"' in lens '"+name+"'");
}
})
val (
replacementExprsList: Seq[(ID,Expression)],
metaModels: Seq[Model]
) =
candidateModels.
map({
case (column, models) => {
//TODO: Replace Default Model
val metaModel = new DefaultMetaModel(
ID(name, ":META:", column),
s"picking values for ${humanReadableName}.$column",
models.map(_._4)
)
val metaExpr = LensUtils.buildMetaModel(
metaModel.name, 0, Seq(), Seq(),
models, Seq[Expression](RowIdVar())
)
( (column, metaExpr), metaModel )
}
}).
unzip
val replacementExprs = replacementExprsList.toMap
val projectArgs =
query.columnNames.
map( col => replacementExprs.get(col) match {
case None => ProjectArg(col, Var(col))
case Some(replacementExpr) =>
ProjectArg(col,
Conditional(
targetColumnsAndTests(col),
Var(col),
replacementExpr
))
})
return (
Project(projectArgs, query),
modelEntities ++ metaModels
)
}
}
| UBOdin/mimir | src/main/scala/mimir/lenses/MissingValueLens.scala | Scala | apache-2.0 | 5,724 |
package org.talkingpuffin.util
import scala.collection.JavaConversions._
import java.util.concurrent.Executors
import java.util.{ArrayList, Collections}
import java.text.NumberFormat
object Parallelizer extends Loggable {
/**
* Runs, in the number of threads requested, the function f, giving it each A of args, returning a List[T]
*/
def run[T,A](numThreads: Int, args: Iterable[A], f: (A) => T, threadName: String = "Parallel"): Iterable[T] = {
val timings = Collections.synchronizedList(new ArrayList[Long])
val pool = Executors.newFixedThreadPool(numThreads, NamedThreadFactory(threadName))
val result: Iterable[T] = args.map(arg => pool.submit(Threads.callable {
val startTime = System.currentTimeMillis
val result = f(arg)
timings.add(System.currentTimeMillis - startTime)
result
})).map(_.get).toList
pool.shutdown()
logStats(timings)
result
}
private def calcStdDev(timings: java.util.List[Long], mean: Double): Double = {
val difSq = timings.map(timing => {
val dif = timing - mean
dif * dif
})
math.sqrt(difSq.sum / timings.size)
}
private def logStats[A, T](timings: java.util.List[Long]) {
val fmt = NumberFormat.getInstance
val mean = timings.sum.toDouble / timings.size
debug(timings.sorted.map(timing => fmt.format(timing)).toList.mkString(", "))
debug("Mean: " + fmt.format(mean) + ", Std dev: " + fmt.format(calcStdDev(timings, mean)))
}
}
| dcbriccetti/talking-puffin | common/src/main/scala/org/talkingpuffin/util/Parallelizer.scala | Scala | mit | 1,474 |
package com.github.andr83.parsek.spark
import com.github.andr83.parsek.PValue
import com.github.andr83.parsek.spark.SparkPipeContext.LongCountersAccumulable
import org.apache.spark.rdd.RDD
/**
* Repository to get access to streams and their contexts in flow
*
* @author andr83
*/
class FlowRepository(accumulators: Map[String, LongCountersAccumulable]) {
protected var rddByFlow: Map[String, RDD[PValue]] = Map.empty[String, RDD[PValue]]
protected var contextByFlow: Map[String, SparkPipeContext] = Map.empty[String, SparkPipeContext]
def rdds = rddByFlow
/**
* Return PipeContext for current flow. If context is not available it will created
*
* @param flow flow name
* @return
*/
def getContext(flow: String): SparkPipeContext = getContext(flow, flow)
/**
* Return PipeContext for current flow. If context is not available it will copied from currentFlow or created
*
* @param flow flow name for which return PipeContext
* @param currentFlow flow which use to create PipeContext if it is not exist
* @return
*/
def getContext(flow: String, currentFlow: String): SparkPipeContext = contextByFlow.getOrElse(flow, {
val context = SparkPipeContext(accumulators.getOrElse(flow, throw new IllegalStateException(s"Missing accumulator for flow $flow")))
contextByFlow = contextByFlow + (flow -> context)
if (currentFlow != flow) {
contextByFlow.get(currentFlow) foreach (currentContext=> {
SparkPipeContext.copy(currentContext, context)
})
}
context
})
/**
* Return RDD for flow. If stream is not available exeption will thrown
*
* @param flow flow name
* @return
*/
def getRdd(flow: String): RDD[PValue] = rddByFlow.getOrElse(flow,
throw new IllegalStateException(s"Flow $flow is unavailable. Please check configuration."))
/**
* Assign RDD to flow
*
* @param flowRdd
*/
def +=(flowRdd: (String, RDD[PValue])) = rddByFlow = rddByFlow + flowRdd
} | andr83/parsek | spark/src/main/scala/com/github/andr83/parsek/spark/FlowRepository.scala | Scala | mit | 2,006 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import java.io.{ OutputStream, InputStream, Serializable }
import java.util.{ Random, UUID }
import cascading.flow.FlowDef
import cascading.pipe.{ Each, Pipe }
import cascading.tap.Tap
import cascading.tuple.{ Fields, TupleEntry }
import com.twitter.algebird.{ Aggregator, Batched, Monoid, Semigroup }
import com.twitter.scalding.TupleConverter.{ TupleEntryConverter, singleConverter, tuple2Converter }
import com.twitter.scalding.TupleSetter.{ singleSetter, tup2Setter }
import com.twitter.scalding._
import com.twitter.scalding.serialization.OrderedSerialization
import com.twitter.scalding.serialization.OrderedSerialization.Result
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering._
import scala.util.Try
/**
* factory methods for TypedPipe, which is the typed representation of distributed lists in scalding.
* This object is here rather than in the typed package because a lot of code was written using
* the functions in the object, which we do not see how to hide with package object tricks.
*/
object TypedPipe extends Serializable {
import Dsl.flowDefToRichFlowDef
/**
* Create a TypedPipe from a cascading Pipe, some Fields and the type T
* Avoid this if you can. Prefer from(TypedSource).
*/
def from[T](pipe: Pipe, fields: Fields)(implicit flowDef: FlowDef, mode: Mode, conv: TupleConverter[T]): TypedPipe[T] = {
val localFlow = flowDef.onlyUpstreamFrom(pipe)
new TypedPipeInst[T](pipe, fields, localFlow, mode, Converter(conv))
}
/**
* Create a TypedPipe from a TypedSource. This is the preferred way to make a TypedPipe
*/
def from[T](source: TypedSource[T]): TypedPipe[T] =
TypedPipeFactory({ (fd, mode) =>
val pipe = source.read(fd, mode)
from(pipe, source.sourceFields)(fd, mode, source.converter)
})
/**
* Create a TypedPipe from an Iterable in memory.
*/
def from[T](iter: Iterable[T]): TypedPipe[T] =
IterablePipe[T](iter)
/**
* Input must be a Pipe with exactly one Field
* Avoid this method and prefer from(TypedSource) if possible
*/
def fromSingleField[T](pipe: Pipe)(implicit fd: FlowDef, mode: Mode): TypedPipe[T] =
from(pipe, new Fields(0))(fd, mode, singleConverter[T])
/**
* Create an empty TypedPipe. This is sometimes useful when a method must return
* a TypedPipe, but sometimes at runtime we can check a condition and see that
* it should be empty.
* This is the zero of the Monoid[TypedPipe]
*/
def empty: TypedPipe[Nothing] = EmptyTypedPipe
/**
* This enables pipe.hashJoin(that) or pipe.join(that) syntax
* This is a safe enrichment because hashJoinable and CoGroupable are
* only used in the argument position or to give cogroup, join, leftJoin, rightJoin, outerJoin
* methods. Since those methods are unlikely to be used on TypedPipe in the future, this
* enrichment seems safe.
*
* This method is the Vitaly-was-right method.
*/
implicit def toHashJoinable[K, V](pipe: TypedPipe[(K, V)])(implicit ord: Ordering[K]): HashJoinable[K, V] =
new HashJoinable[K, V] {
def mapped = pipe
def keyOrdering = ord
def reducers = None
val descriptions: Seq[String] = LineNumber.tryNonScaldingCaller.map(_.toString).toList
def joinFunction = CoGroupable.castingJoinFunction[V]
}
/**
* TypedPipe instances are monoids. They are isomorphic to multisets.
*/
implicit def typedPipeMonoid[T]: Monoid[TypedPipe[T]] = new Monoid[TypedPipe[T]] {
def zero = empty
def plus(left: TypedPipe[T], right: TypedPipe[T]): TypedPipe[T] =
left ++ right
}
private val identityOrdering: OrderedSerialization[Int] = {
val delegate = BinaryOrdering.ordSer[Int]
new OrderedSerialization[Int] {
override def compareBinary(a: InputStream, b: InputStream): Result = delegate.compareBinary(a, b)
override def compare(x: Int, y: Int): Int = delegate.compare(x, y)
override def dynamicSize(t: Int): Option[Int] = delegate.dynamicSize(t)
override def write(out: OutputStream, t: Int): Try[Unit] = delegate.write(out, t)
override def read(in: InputStream): Try[Int] = delegate.read(in)
override def staticSize: Option[Int] = delegate.staticSize
override def hash(x: Int): Int = x
}
}
}
/**
* Think of a TypedPipe as a distributed unordered list that may or may not yet
* have been materialized in memory or disk.
*
* Represents a phase in a distributed computation on an input data source
* Wraps a cascading Pipe object, and holds the transformation done up until that point
*/
trait TypedPipe[+T] extends Serializable {
/**
* Implements a cross product. The right side should be tiny
* This gives the same results as
* {code for { l <- list1; l2 <- list2 } yield (l, l2) }
*/
def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)]
/**
* This is the fundamental mapper operation.
* It behaves in a way similar to List.flatMap, which means that each
* item is fed to the input function, which can return 0, 1, or many outputs
* (as a TraversableOnce) per input. The returned results will be iterated through once
* and then flattened into a single TypedPipe which is passed to the next step in the
* pipeline.
*
* This behavior makes it a powerful operator -- it can be used to filter records
* (by returning 0 items for a given input), it can be used the way map is used
* (by returning 1 item per input), it can be used to explode 1 input into many outputs,
* or even a combination of all of the above at once.
*/
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U]
/**
* Export back to a raw cascading Pipe. useful for interop with the scalding
* Fields API or with Cascading code.
* Avoid this if possible. Prefer to write to TypedSink.
*/
final def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl._
// Ensure we hook into all pipes coming out of the typed API to apply the FlowState's properties on their pipes
val pipe = asPipe[U](fieldNames).applyFlowConfigProperties(flowDef)
RichPipe.setPipeDescriptionFrom(pipe, LineNumber.tryNonScaldingCaller)
}
/**
* Provide the internal implementation to get from a typed pipe to a cascading Pipe
*/
private[typed] def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe
/////////////////////////////////////////////
//
// The following have default implementations in terms of the above
//
/////////////////////////////////////////////
import Dsl._
/**
* Merge two TypedPipes (no order is guaranteed)
* This is only realized when a group (or join) is
* performed.
*/
def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case EmptyTypedPipe => this
case IterablePipe(thatIter) if thatIter.isEmpty => this
case _ => MergedTypedPipe(this, other)
}
/**
* Aggregate all items in this pipe into a single ValuePipe
*
* Aggregators are composable reductions that allow you to glue together
* several reductions and process them in one pass.
*
* Same as groupAll.aggregate.values
*/
def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
ComputedValue(groupAll.aggregate(agg).values)
/**
* Put the items in this into the keys, and unit as the value in a Group
* in some sense, this is the dual of groupAll
*/
@annotation.implicitNotFound(msg = "For asKeys method to work, the type in TypedPipe must have an Ordering.")
def asKeys[U >: T](implicit ord: Ordering[U]): Grouped[U, Unit] =
map((_, ())).group
/**
* If T <:< U, then this is safe to treat as TypedPipe[U] due to covariance
*/
protected def raiseTo[U](implicit ev: T <:< U): TypedPipe[U] =
this.asInstanceOf[TypedPipe[U]]
/**
* Filter and map. See scala.collection.List.collect.
* {@code
* collect { case Some(x) => fn(x) }
* }
*/
def collect[U](fn: PartialFunction[T, U]): TypedPipe[U] =
filter(fn.isDefinedAt(_)).map(fn)
/**
* Attach a ValuePipe to each element this TypedPipe
*/
def cross[V](p: ValuePipe[V]): TypedPipe[(T, V)] =
p match {
case EmptyValue => EmptyTypedPipe
case LiteralValue(v) => map { (_, v) }
case ComputedValue(pipe) => cross(pipe)
}
/** prints the current pipe to stdout */
def debug: TypedPipe[T] = onRawSingle(_.debug)
/** adds a description to the pipe */
def withDescription(description: String): TypedPipe[T] = new WithDescriptionTypedPipe[T](this, description)
/**
* Returns the set of distinct elements in the TypedPipe
* This is the same as: .map((_, ())).group.sum.keys
* If you want a distinct while joining, consider:
* instead of:
* {@code
* a.join(b.distinct.asKeys)
* }
* manually do the distinct:
* {@code
* a.join(b.asKeys.sum)
* }
* The latter creates 1 map/reduce phase rather than 2
*/
@annotation.implicitNotFound(msg = "For distinct method to work, the type in TypedPipe must have an Ordering.")
def distinct(implicit ord: Ordering[_ >: T]): TypedPipe[T] =
asKeys(ord.asInstanceOf[Ordering[T]]).sum.keys
/**
* Returns the set of distinct elements identified by a given lambda extractor in the TypedPipe
*/
@annotation.implicitNotFound(msg = "For distinctBy method to work, the type to distinct on in the TypedPipe must have an Ordering.")
def distinctBy[U](fn: T => U, numReducers: Option[Int] = None)(implicit ord: Ordering[_ >: U]): TypedPipe[T] = {
// cast because Ordering is not contravariant, but should be (and this cast is safe)
implicit val ordT: Ordering[U] = ord.asInstanceOf[Ordering[U]]
// Semigroup to handle duplicates for a given key might have different values.
implicit val sg = new Semigroup[T] {
def plus(a: T, b: T) = b
}
val op = map { tup => (fn(tup), tup) }.sumByKey
val reduced = numReducers match {
case Some(red) => op.withReducers(red)
case None => op
}
reduced.map(_._2)
}
/** Merge two TypedPipes of different types by using Either */
def either[R](that: TypedPipe[R]): TypedPipe[Either[T, R]] =
map(Left(_)) ++ (that.map(Right(_)))
/**
* Sometimes useful for implementing custom joins with groupBy + mapValueStream when you know
* that the value/key can fit in memory. Beware.
*/
def eitherValues[K, V, R](that: TypedPipe[(K, R)])(implicit ev: T <:< (K, V)): TypedPipe[(K, Either[V, R])] =
mapValues { (v: V) => Left(v) } ++ (that.mapValues { (r: R) => Right(r) })
/**
* If you are going to create two branches or forks,
* it may be more efficient to call this method first
* which will create a node in the cascading graph.
* Without this, both full branches of the fork will be
* put into separate cascading pipes, which can, in some cases,
* be slower.
*
* Ideally the planner would see this
*/
def fork: TypedPipe[T] = onRawSingle(identity)
/**
* limit the output to at most count items, if at least count items exist.
*/
def limit(count: Int): TypedPipe[T] =
groupAll.bufferedTake(count).values
/** Transform each element via the function f */
def map[U](f: T => U): TypedPipe[U] = flatMap { t => Iterator(f(t)) }
/** Transform only the values (sometimes requires giving the types due to scala type inference) */
def mapValues[K, V, U](f: V => U)(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].map { case (k, v) => (k, f(v)) }
/** Similar to mapValues, but allows to return a collection of outputs for each input value */
def flatMapValues[K, V, U](f: V => TraversableOnce[U])(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].flatMap { case (k, v) => f(v).map { v2 => k -> v2 } }
/**
* Keep only items that satisfy this predicate
*/
def filter(f: T => Boolean): TypedPipe[T] =
flatMap { t => if (f(t)) Iterator(t) else Iterator.empty }
// This is just to appease for comprehension
def withFilter(f: T => Boolean): TypedPipe[T] = filter(f)
/**
* If T is a (K, V) for some V, then we can use this function to filter.
* Prefer to use this if your filter only touches the key.
*
* This is here to match the function in KeyedListLike, where it is optimized
*/
def filterKeys[K](fn: K => Boolean)(implicit ev: T <:< (K, Any)): TypedPipe[T] =
filter { ka => fn(ka.asInstanceOf[(K, Any)]._1) }
/**
* Keep only items that don't satisfy the predicate.
* `filterNot` is the same as `filter` with a negated predicate.
*/
def filterNot(f: T => Boolean): TypedPipe[T] =
filter(!f(_))
/** flatten an Iterable */
def flatten[U](implicit ev: T <:< TraversableOnce[U]): TypedPipe[U] =
flatMap { _.asInstanceOf[TraversableOnce[U]] } // don't use ev which may not be serializable
/**
* flatten just the values
* This is more useful on KeyedListLike, but added here to reduce assymmetry in the APIs
*/
def flattenValues[K, U](implicit ev: T <:< (K, TraversableOnce[U])): TypedPipe[(K, U)] =
raiseTo[(K, TraversableOnce[U])].flatMap { case (k, us) => us.map((k, _)) }
protected def onRawSingle(onPipe: Pipe => Pipe): TypedPipe[T] = {
val self = this
TypedPipeFactory({ (fd, m) =>
val pipe = self.toPipe[T](new Fields(java.lang.Integer.valueOf(0)))(fd, m, singleSetter)
TypedPipe.fromSingleField[T](onPipe(pipe))(fd, m)
})
}
/**
* Force a materialization of this pipe prior to the next operation.
* This is useful if you filter almost everything before a hashJoin, for instance.
* This is useful for experts who see some heuristic of the planner causing
* slower performance.
*/
def forceToDisk: TypedPipe[T] = onRawSingle(_.forceToDisk)
/**
* This is the default means of grouping all pairs with the same key. Generally this triggers 1 Map/Reduce transition
*/
def group[K, V](implicit ev: <:<[T, (K, V)], ord: Ordering[K]): Grouped[K, V] =
//If the type of T is not (K,V), then at compile time, this will fail. It uses implicits to do
//a compile time check that one type is equivalent to another. If T is not (K,V), we can't
//automatically group. We cast because it is safe to do so, and we need to convert to K,V, but
//the ev is not needed for the cast. In fact, you can do the cast with ev(t) and it will return
//it as (K,V), but the problem is, ev is not serializable. So we do the cast, which due to ev
//being present, will always pass.
Grouped(raiseTo[(K, V)]).withDescription(LineNumber.tryNonScaldingCaller.map(_.toString))
/** Send all items to a single reducer */
def groupAll: Grouped[Unit, T] = groupBy(x => ())(ordSer[Unit]).withReducers(1)
/** Given a key function, add the key, then call .group */
def groupBy[K](g: T => K)(implicit ord: Ordering[K]): Grouped[K, T] =
map { t => (g(t), t) }.group
/** Group using an explicit Ordering on the key. */
def groupWith[K, V](ord: Ordering[K])(implicit ev: <:<[T, (K, V)]): Grouped[K, V] = group(ev, ord)
/**
* Forces a shuffle by randomly assigning each item into one
* of the partitions.
*
* This is for the case where you mappers take a long time, and
* it is faster to shuffle them to more reducers and then operate.
*
* You probably want shard if you are just forcing a shuffle.
*/
def groupRandomly(partitions: Int): Grouped[Int, T] = {
// Make it lazy so all mappers get their own:
lazy val rng = new java.util.Random(123) // seed this so it is repeatable
groupBy { _ => rng.nextInt(partitions) }(TypedPipe.identityOrdering)
.withReducers(partitions)
}
/**
* Partitions this into two pipes according to a predicate.
*
* Sometimes what you really want is a groupBy in these cases.
*/
def partition(p: T => Boolean): (TypedPipe[T], TypedPipe[T]) = {
val forked = fork
(forked.filter(p), forked.filterNot(p))
}
private[this] def defaultSeed: Long = System.identityHashCode(this) * 2654435761L ^ System.currentTimeMillis
/**
* Sample a fraction (between 0 and 1) uniformly independently at random each element of the pipe
* does not require a reduce step.
*/
def sample(fraction: Double): TypedPipe[T] = sample(fraction, defaultSeed)
/**
* Sample a fraction (between 0 and 1) uniformly independently at random each element of the pipe with
* a given seed.
* Does not require a reduce step.
*/
def sample(fraction: Double, seed: Long): TypedPipe[T] = {
require(0.0 <= fraction && fraction <= 1.0, s"got $fraction which is an invalid fraction")
// Make sure to fix the seed, otherwise restarts cause subtle errors
lazy val rand = new Random(seed)
filter(_ => rand.nextDouble < fraction)
}
/**
* This does a sum of values WITHOUT triggering a shuffle.
* the contract is, if followed by a group.sum the result is the same
* with or without this present, and it never increases the number of
* items. BUT due to the cost of caching, it might not be faster if
* there is poor key locality.
*
* It is only useful for expert tuning,
* and best avoided unless you are struggling with performance problems.
* If you are not sure you need this, you probably don't.
*
* The main use case is to reduce the values down before a key expansion
* such as is often done in a data cube.
*/
def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] = {
val fields: Fields = ('key, 'value)
val selfKV = raiseTo[(K, V)]
TypedPipeFactory({ (fd, mode) =>
val pipe = selfKV.toPipe(fields)(fd, mode, tup2Setter)
val msr = new MapsideReduce(sg, 'key, 'value, None)(singleConverter[V], singleSetter[V])
TypedPipe.from[(K, V)](pipe.eachTo(fields -> fields) { _ => msr }, fields)(fd, mode, tuple2Converter)
})
}
/**
* Used to force a shuffle into a given size of nodes.
* Only use this if your mappers are taking far longer than
* the time to shuffle.
*/
def shard(partitions: Int): TypedPipe[T] = groupRandomly(partitions).forceToReducers.values
/**
* Reasonably common shortcut for cases of total associative/commutative reduction
* returns a ValuePipe with only one element if there is any input, otherwise EmptyValue.
*/
def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] = {
// every 1000 items, compact.
lazy implicit val batchedSG = Batched.compactingSemigroup[U](1000)
ComputedValue(map { t => ((), Batched[U](t)) }
.sumByLocalKeys
// remove the Batched before going to the reducers
.map { case (_, batched) => batched.sum }
.groupAll
.forceToReducers
.sum
.values)
}
/**
* Reasonably common shortcut for cases of associative/commutative reduction by Key
*/
def sumByKey[K, V](implicit ev: T <:< (K, V), ord: Ordering[K], plus: Semigroup[V]): UnsortedGrouped[K, V] =
group[K, V].sum[V]
/**
* This is used when you are working with Execution[T] to create loops.
* You might do this to checkpoint and then flatMap Execution to continue
* from there. Probably only useful if you need to flatMap it twice to fan
* out the data into two children jobs.
*
* This writes the current TypedPipe into a temporary file
* and then opens it after complete so that you can continue from that point
*/
def forceToDiskExecution: Execution[TypedPipe[T]] = {
val cachedRandomUUID = java.util.UUID.randomUUID
lazy val inMemoryDest = new MemorySink[T]
def temporaryPath(conf: Config, uuid: UUID): String = {
val tmpDir = conf.get("hadoop.tmp.dir")
.orElse(conf.get("cascading.tmp.dir"))
.getOrElse("/tmp")
tmpDir + "/scalding/snapshot-" + uuid + ".seq"
}
def hadoopTypedSource(conf: Config): TypedSource[T] with TypedSink[T] = {
// come up with unique temporary filename, use the config here
// TODO: refactor into TemporarySequenceFile class
val tmpSeq = temporaryPath(conf, cachedRandomUUID)
source.TypedSequenceFile[T](tmpSeq)
}
val writeFn = { (conf: Config, mode: Mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
(this, inMemoryDest)
case _: HadoopMode =>
(this, hadoopTypedSource(conf))
}
}
val readFn = { (conf: Config, mode: Mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
TypedPipe.from(inMemoryDest.readResults)
case _: HadoopMode =>
TypedPipe.from(hadoopTypedSource(conf))
}
}
val filesToDeleteFn = { (conf: Config, mode: Mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
Set[String]()
case _: HadoopMode =>
Set(temporaryPath(conf, cachedRandomUUID))
}
}
Execution.write(writeFn, readFn, filesToDeleteFn)
}
/**
* This gives an Execution that when run evaluates the TypedPipe,
* writes it to disk, and then gives you an Iterable that reads from
* disk on the submit node each time .iterator is called.
* Because of how scala Iterables work, mapping/flatMapping/filtering
* the Iterable forces a read of the entire thing. If you need it to
* be lazy, call .iterator and use the Iterator inside instead.
*/
def toIterableExecution: Execution[Iterable[T]] =
forceToDiskExecution.flatMap(_.toIterableExecution)
/** use a TupleUnpacker to flatten U out into a cascading Tuple */
def unpackToPipe[U >: T](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, up: TupleUnpacker[U]): Pipe = {
val setter = up.newSetter(fieldNames)
toPipe[U](fieldNames)(fd, mode, setter)
}
/**
* This attaches a function that is called at the end of the map phase on
* EACH of the tasks that are executing.
* This is for expert use only. You probably won't ever need it. Try hard
* to avoid it. Execution also has onComplete that can run when an Execution
* has completed.
*/
def onComplete(fn: () => Unit): TypedPipe[T] = new WithOnComplete[T](this, fn)
/**
* Safely write to a TypedSink[T]. If you want to write to a Source (not a Sink)
* you need to do something like: toPipe(fieldNames).write(dest)
* @return a pipe equivalent to the current pipe.
*/
def write(dest: TypedSink[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = {
// Make sure that we don't render the whole pipeline twice:
val res = fork
dest.writeFrom(res.toPipe[T](dest.sinkFields)(flowDef, mode, dest.setter))
res
}
/**
* This is the functionally pure approach to building jobs. Note,
* that you have to call run on the result or flatMap/zip it
* into an Execution that is run for anything to happen here.
*/
def writeExecution(dest: TypedSink[T]): Execution[Unit] =
Execution.write(this, dest)
/**
* If you want to write to a specific location, and then read from
* that location going forward, use this.
*/
def writeThrough[U >: T](dest: TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
Execution.write(this, dest, TypedPipe.from(dest))
/**
* If you want to writeThrough to a specific file if it doesn't already exist,
* and otherwise just read from it going forward, use this.
*/
def make[U >: T](dest: Source with TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
Execution.getMode.flatMap { mode =>
try {
dest.validateTaps(mode)
Execution.from(TypedPipe.from(dest))
} catch {
case ivs: InvalidSourceException => writeThrough(dest)
}
}
/** Just keep the keys, or ._1 (if this type is a Tuple2) */
def keys[K](implicit ev: <:<[T, (K, Any)]): TypedPipe[K] =
// avoid capturing ev in the closure:
raiseTo[(K, Any)].map(_._1)
/** swap the keys with the values */
def swap[K, V](implicit ev: <:<[T, (K, V)]): TypedPipe[(V, K)] =
raiseTo[(K, V)].map(_.swap)
/** Just keep the values, or ._2 (if this type is a Tuple2) */
def values[V](implicit ev: <:<[T, (Any, V)]): TypedPipe[V] =
raiseTo[(Any, V)].map(_._2)
/**
* ValuePipe may be empty, so, this attaches it as an Option
* cross is the same as leftCross(p).collect { case (t, Some(v)) => (t, v) }
*/
def leftCross[V](p: ValuePipe[V]): TypedPipe[(T, Option[V])] =
p match {
case EmptyValue => map { (_, None) }
case LiteralValue(v) => map { (_, Some(v)) }
case ComputedValue(pipe) => leftCross(pipe)
}
/** uses hashJoin but attaches None if thatPipe is empty */
def leftCross[V](thatPipe: TypedPipe[V]): TypedPipe[(T, Option[V])] =
map(((), _)).hashLeftJoin(thatPipe.groupAll).values
/**
* common pattern of attaching a value and then map
* recommended style:
* {@code
* mapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def mapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => V): TypedPipe[V] =
leftCross(value).map(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then flatMap
* recommended style:
* {@code
* flatMapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def flatMapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => TraversableOnce[V]): TypedPipe[V] =
leftCross(value).flatMap(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then filter
* recommended style:
* {@code
* filterWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def filterWithValue[U](value: ValuePipe[U])(f: (T, Option[U]) => Boolean): TypedPipe[T] =
leftCross(value).filter(t => f(t._1, t._2)).map(_._1)
/**
* These operations look like joins, but they do not force any communication
* of the current TypedPipe. They are mapping operations where this pipe is streamed
* through one item at a time.
*
* WARNING These behave semantically very differently than cogroup.
* This is because we handle (K,V) tuples on the left as we see them.
* The iterable on the right is over all elements with a matching key K, and it may be empty
* if there are no values for this key K.
*/
def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
smaller.hashCogroupOn(ev(this))(joiner)
/** Do an inner-join without shuffling this TypedPipe, but replicating argument to all tasks */
def hashJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, W))] =
hashCogroup[K, V, W, (V, W)](smaller)(Joiner.hashInner2)
/** Do an leftjoin without shuffling this TypedPipe, but replicating argument to all tasks */
def hashLeftJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, Option[W]))] =
hashCogroup[K, V, W, (V, Option[W])](smaller)(Joiner.hashLeft2)
/**
* For each element, do a map-side (hash) left join to look up a value
*/
def hashLookup[K >: T, V](grouped: HashJoinable[K, V]): TypedPipe[(K, Option[V])] =
map((_, ()))
.hashLeftJoin(grouped)
.map { case (t, (_, optV)) => (t, optV) }
/**
* Enables joining when this TypedPipe has some keys with many many values and
* but many with very few values. For instance, a graph where some nodes have
* millions of neighbors, but most have only a few.
*
* We build a (count-min) sketch of each key's frequency, and we use that
* to shard the heavy keys across many reducers.
* This increases communication cost in order to reduce the maximum time needed
* to complete the join.
*
* {@code pipe.sketch(100).join(thatPipe) }
* will add an extra map/reduce job over a standard join to create the count-min-sketch.
* This will generally only be beneficial if you have really heavy skew, where without
* this you have 1 or 2 reducers taking hours longer than the rest.
*/
def sketch[K, V](reducers: Int,
eps: Double = 1.0E-5, //272k width = 1MB per row
delta: Double = 0.01, //5 rows (= 5 hashes)
seed: Int = 12345)(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)],
serialization: K => Array[Byte],
ordering: Ordering[K]): Sketched[K, V] =
Sketched(ev(this), reducers, delta, eps, seed)
/**
* If any errors happen below this line, but before a groupBy, write to a TypedSink
*/
def addTrap[U >: T](trapSink: Source with TypedSink[T])(implicit conv: TupleConverter[U]): TypedPipe[U] =
TypedPipeFactory({ (flowDef, mode) =>
val fields = trapSink.sinkFields
// TODO: with diamonds in the graph, this might not be correct
val pipe = RichPipe.assignName(fork.toPipe[T](fields)(flowDef, mode, trapSink.setter))
flowDef.addTrap(pipe, trapSink.createTap(Write)(mode))
TypedPipe.from[U](pipe, fields)(flowDef, mode, conv)
})
}
/**
* This object is the EmptyTypedPipe. Prefer to create it with TypedPipe.empty
*/
final case object EmptyTypedPipe extends TypedPipe[Nothing] {
override def aggregate[B, C](agg: Aggregator[Nothing, B, C]): ValuePipe[C] = EmptyValue
// Cross product with empty is always empty.
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(Nothing, U)] = this
override def distinct(implicit ord: Ordering[_ >: Nothing]) = this
override def flatMap[U](f: Nothing => TraversableOnce[U]) = this
override def fork: TypedPipe[Nothing] = this
override def forceToDisk = this
override def leftCross[V](p: ValuePipe[V]) = this
override def limit(count: Int) = this
override def debug: TypedPipe[Nothing] = this
override def ++[U >: Nothing](other: TypedPipe[U]): TypedPipe[U] = other
override def asPipe[U >: Nothing](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
IterableSource(Iterable.empty, fieldNames)(setter, singleConverter[U]).read(fd, mode)
override def toIterableExecution: Execution[Iterable[Nothing]] = Execution.from(Iterable.empty)
override def forceToDiskExecution: Execution[TypedPipe[Nothing]] = Execution.from(this)
override def sum[U >: Nothing](implicit plus: Semigroup[U]): ValuePipe[U] = EmptyValue
override def sumByLocalKeys[K, V](implicit ev: Nothing <:< (K, V), sg: Semigroup[V]) = this
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[Nothing] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
this
}
/**
* Creates a TypedPipe from an Iterable[T]. Prefer TypedPipe.from.
*
* If you avoid toPipe, this class is more efficient than IterableSource.
*/
final case class IterablePipe[T](iterable: Iterable[T]) extends TypedPipe[T] {
override def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
Some(iterable)
.filterNot(_.isEmpty)
.map(it => LiteralValue(agg(it)))
.getOrElse(EmptyValue)
override def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case IterablePipe(thatIter) => IterablePipe(iterable ++ thatIter)
case EmptyTypedPipe => this
case _ if iterable.isEmpty => other
case _ => MergedTypedPipe(this, other)
}
override def cross[U](tiny: TypedPipe[U]) =
tiny.flatMap { u => iterable.map { (_, u) } }
override def filter(f: T => Boolean): TypedPipe[T] =
iterable.filter(f) match {
case eit if eit.isEmpty => EmptyTypedPipe
case filtered => IterablePipe(filtered)
}
/**
* When flatMap is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def flatMap[U](f: T => TraversableOnce[U]) =
toSourcePipe.flatMap(f)
override def fork: TypedPipe[T] = this
override def forceToDisk = this
override def limit(count: Int): TypedPipe[T] = IterablePipe(iterable.take(count))
/**
* When map is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def map[U](f: T => U): TypedPipe[U] =
toSourcePipe.map(f)
override def forceToDiskExecution: Execution[TypedPipe[T]] = Execution.from(this)
override def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] =
Semigroup.sumOption[U](iterable).map(LiteralValue(_))
.getOrElse(EmptyValue)
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) = {
val kvit = raiseTo[(K, V)] match {
case IterablePipe(kviter) => kviter
case p => sys.error("This must be IterablePipe: " + p.toString)
}
IterablePipe(kvit.groupBy(_._1)
// use map to force this so it is not lazy.
.map {
case (k, kvs) =>
// These lists are never empty, get is safe.
(k, Semigroup.sumOption(kvs.iterator.map(_._2)).get)
})
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
// It is slightly more efficient to use this rather than toSourcePipe.toPipe(fieldNames)
IterableSource[U](iterable, fieldNames)(setter, singleConverter[U]).read(flowDef, mode)
private[this] def toSourcePipe =
TypedPipe.from(
IterableSource[T](iterable, new Fields("0"))(singleSetter, singleConverter))
override def toIterableExecution: Execution[Iterable[T]] = Execution.from(iterable)
}
/**
* This is an implementation detail (and should be marked private)
*/
object TypedPipeFactory {
def apply[T](next: (FlowDef, Mode) => TypedPipe[T]): TypedPipeFactory[T] = {
val memo = new java.util.WeakHashMap[FlowDef, (Mode, TypedPipe[T])]()
val fn = { (fd: FlowDef, m: Mode) =>
memo.synchronized {
memo.get(fd) match {
case null =>
val res = next(fd, m)
memo.put(fd, (m, res))
res
case (memoMode, pipe) if memoMode == m => pipe
case (memoMode, pipe) =>
sys.error("FlowDef reused on different Mode. Original: %s, now: %s".format(memoMode, m))
}
}
}
new TypedPipeFactory(NoStackAndThen(fn.tupled))
}
def unapply[T](tp: TypedPipe[T]): Option[NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]] =
tp match {
case tp: TypedPipeFactory[_] =>
Some(tp.asInstanceOf[TypedPipeFactory[T]].next)
case _ => None
}
}
/**
* This is a TypedPipe that delays having access
* to the FlowDef and Mode until toPipe is called
*/
class TypedPipeFactory[T] private (@transient val next: NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]) extends TypedPipe[T] {
private[this] def andThen[U](fn: TypedPipe[T] => TypedPipe[U]): TypedPipe[U] =
new TypedPipeFactory(next.andThen(fn))
override def cross[U](tiny: TypedPipe[U]) = andThen(_.cross(tiny))
override def filter(f: T => Boolean): TypedPipe[T] = andThen(_.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] = andThen(_.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] = andThen(_.map(f))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) =
andThen(_.sumByLocalKeys[K, V])
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
// unwrap in a loop, without recursing
val (unwrapped, st) = unwrap(this, Array())
val pipe = unwrapped.asPipe[U](fieldNames)(flowDef, mode, setter)
RichPipe.setPipeDescriptionFrom(pipe, LineNumber.tryNonScaldingCaller(st))
pipe
}
override def toIterableExecution: Execution[Iterable[T]] = Execution.getConfigMode.flatMap {
case (conf, mode) =>
// This can only terminate in TypedPipeInst, which will
// keep the reference to this flowDef
val flowDef = new FlowDef
val (nextPipe, stackTraces) = unwrap(this, Array())(flowDef, mode)
nextPipe.toIterableExecution
}
@annotation.tailrec
private def unwrap(pipe: TypedPipe[T], st: Array[StackTraceElement])(implicit flowDef: FlowDef, mode: Mode): (TypedPipe[T], Array[StackTraceElement]) = pipe match {
case TypedPipeFactory(n) =>
val fullTrace = n match {
case NoStackAndThen.WithStackTrace(_, st) => st
case _ => Array[StackTraceElement]()
}
unwrap(n(flowDef, mode), st ++ fullTrace)
case tp => (tp, st)
}
}
/**
* This is an instance of a TypedPipe that wraps a cascading Pipe
*/
class TypedPipeInst[T] private[scalding] (@transient inpipe: Pipe,
fields: Fields,
@transient localFlowDef: FlowDef,
@transient val mode: Mode,
flatMapFn: FlatMapFn[T]) extends TypedPipe[T] {
/**
* If this TypedPipeInst represents a Source that was opened with no
* filtering or mapping
*/
private[scalding] def openIfHead: Option[(Tap[_, _, _], Fields, FlatMapFn[T])] =
// Keep this local
if (inpipe.getPrevious.isEmpty) {
val srcs = localFlowDef.getSources
if (srcs.containsKey(inpipe.getName)) {
Some((srcs.get(inpipe.getName), fields, flatMapFn))
} else {
sys.error("Invalid head: pipe has no previous, but there is no registered source.")
}
} else None
def checkMode(m: Mode): Unit =
// This check is not likely to fail unless someone does something really strange.
// for historical reasons, it is not checked by the typed system
assert(m == mode,
"Cannot switch Mode between TypedSource.read and toPipe calls. Pipe: %s, call: %s".format(mode, m))
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case MergedTypedPipe(l, r) => MergedTypedPipe(cross(l), cross(r))
case IterablePipe(iter) => flatMap { t => iter.map { (t, _) } }
// This should work for any, TODO, should we just call this?
case _ => map(((), _)).hashJoin(tiny.groupAll).values
}
override def filter(f: T => Boolean): TypedPipe[T] =
new TypedPipeInst[T](inpipe, fields, localFlowDef, mode, flatMapFn.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.map(f))
/**
* Avoid this method if possible. Prefer to stay in the TypedAPI until
* you write out.
*
* This actually runs all the pure map functions in one Cascading Each
* This approach is more efficient than untyped scalding because we
* don't use TupleConverters/Setters after each map.
*/
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, m: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl.flowDefToRichFlowDef
checkMode(m)
flowDef.mergeFrom(localFlowDef)
RichPipe(inpipe).flatMapTo[TupleEntry, U](fields -> fieldNames)(flatMapFn)
}
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] = {
import Dsl.{ fields => ofields, _ }
val destFields: Fields = ('key, 'value)
val selfKV = raiseTo[(K, V)]
val msr = new TypedMapsideReduce[K, V](
flatMapFn.asInstanceOf[FlatMapFn[(K, V)]],
sg,
fields,
'key,
'value,
None)(tup2Setter)
TypedPipe.from[(K, V)](
inpipe.eachTo(fields -> destFields) { _ => msr },
destFields)(localFlowDef, mode, tuple2Converter)
}
override def toIterableExecution: Execution[Iterable[T]] =
openIfHead match {
// TODO: it might be good to apply flatMaps locally,
// since we obviously need to iterate all,
// but filters we might want the cluster to apply
// for us. So unwind until you hit the first filter, snapshot,
// then apply the unwound functions
case Some((tap, fields, Converter(conv))) =>
// To convert from java iterator to scala below
import scala.collection.JavaConverters._
Execution.getConfigMode.map {
case (conf, m) =>
// Verify the mode has not changed due to invalid TypedPipe DAG construction
checkMode(m)
new Iterable[T] {
def iterator = m.openForRead(conf, tap).asScala.map(tup => conv(tup.selectEntry(fields)))
}
}
case _ => forceToDiskExecution.flatMap(_.toIterableExecution)
}
}
final case class MergedTypedPipe[T](left: TypedPipe[T], right: TypedPipe[T]) extends TypedPipe[T] {
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case _ => MergedTypedPipe(left.cross(tiny), right.cross(tiny))
}
override def debug: TypedPipe[T] =
MergedTypedPipe(left.debug, right.debug)
override def filter(f: T => Boolean): TypedPipe[T] =
MergedTypedPipe(left.filter(f), right.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
MergedTypedPipe(left.flatMap(f), right.flatMap(f))
override def sample(fraction: Double, seed: Long): TypedPipe[T] =
MergedTypedPipe(left.sample(fraction, seed), right.sample(fraction, seed))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] =
MergedTypedPipe(left.sumByLocalKeys, right.sumByLocalKeys)
override def map[U](f: T => U): TypedPipe[U] =
MergedTypedPipe(left.map(f), right.map(f))
override def fork: TypedPipe[T] =
MergedTypedPipe(left.fork, right.fork)
@annotation.tailrec
private def flattenMerge(toFlatten: List[TypedPipe[T]], acc: List[TypedPipe[T]])(implicit fd: FlowDef, m: Mode): List[TypedPipe[T]] =
toFlatten match {
case MergedTypedPipe(l, r) :: rest => flattenMerge(l :: r :: rest, acc)
case TypedPipeFactory(next) :: rest => flattenMerge(next(fd, m) :: rest, acc)
case nonmerge :: rest => flattenMerge(rest, nonmerge :: acc)
case Nil => acc
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
/*
* Cascading can't handle duplicate pipes in merges. What we do here is see if any pipe appears
* multiple times and if it does we can do self merges using flatMap.
* Finally, if there is actually more than one distinct TypedPipe, we use the cascading
* merge primitive. When using the merge primitive we rename all pipes going into it as
* Cascading cannot handle multiple pipes with the same name.
*/
val merged = flattenMerge(List(this), Nil)
// check for repeated pipes
.groupBy(identity)
.mapValues(_.size)
.map {
case (pipe, 1) => pipe
case (pipe, cnt) => pipe.flatMap(List.fill(cnt)(_).iterator)
}
.map(_.toPipe[U](fieldNames)(flowDef, mode, setter)) // linter:ignore
.toList
if (merged.size == 1) {
// there is no actual merging here, no need to rename:
merged.head
} else {
new cascading.pipe.Merge(merged.map(RichPipe.assignName): _*)
}
}
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
MergedTypedPipe(left.hashCogroup(smaller)(joiner), right.hashCogroup(smaller)(joiner))
}
case class WithOnComplete[T](typedPipe: TypedPipe[T], fn: () => Unit) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
new Each(pipe, Fields.ALL, new CleanupIdentityFunction(fn), Fields.REPLACE)
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithOnComplete(typedPipe.cross(tiny), fn)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithOnComplete(typedPipe.flatMap(f), fn)
}
case class WithDescriptionTypedPipe[T](typedPipe: TypedPipe[T], description: String) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
RichPipe.setPipeDescriptions(pipe, List(description))
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithDescriptionTypedPipe(typedPipe.cross(tiny), description)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithDescriptionTypedPipe(typedPipe.flatMap(f), description)
}
/**
* This class is for the syntax enrichment enabling
* .joinBy on TypedPipes. To access this, do
* import Syntax.joinOnMappablePipe
*/
class MappablePipeJoinEnrichment[T](pipe: TypedPipe[T]) {
def joinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, U)] = pipe.groupBy(g).withReducers(reducers).join(smaller.groupBy(h))
def leftJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, Option[U])] = pipe.groupBy(g).withReducers(reducers).leftJoin(smaller.groupBy(h))
def rightJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], U)] = pipe.groupBy(g).withReducers(reducers).rightJoin(smaller.groupBy(h))
def outerJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], Option[U])] = pipe.groupBy(g).withReducers(reducers).outerJoin(smaller.groupBy(h))
}
/**
* These are named syntax extensions that users can optionally import.
* Avoid import Syntax._
*/
object Syntax {
implicit def joinOnMappablePipe[T](p: TypedPipe[T]): MappablePipeJoinEnrichment[T] = new MappablePipeJoinEnrichment(p)
}
| tresata/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/TypedPipe.scala | Scala | apache-2.0 | 46,957 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.source.libsvm
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.io.{NullWritable, Text}
import org.apache.hadoop.mapreduce.{Job, RecordWriter, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat
import org.apache.spark.annotation.Since
import org.apache.spark.mllib.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{DataFrame, DataFrameReader, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, JoinedRow}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.SerializableConfiguration
private[libsvm] class LibSVMOutputWriter(
path: String,
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private[this] val buffer = new Text()
private val recordWriter: RecordWriter[NullWritable, Text] = {
new TextOutputFormat[NullWritable, Text]() {
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
val configuration = context.getConfiguration
val uniqueWriteJobId = configuration.get("spark.sql.sources.writeJobUUID")
val taskAttemptId = context.getTaskAttemptID
val split = taskAttemptId.getTaskID.getId
new Path(path, f"part-r-$split%05d-$uniqueWriteJobId$extension")
}
}.getRecordWriter(context)
}
override def write(row: Row): Unit = {
val label = row.get(0)
val vector = row.get(1).asInstanceOf[Vector]
val sb = new StringBuilder(label.toString)
vector.foreachActive { case (i, v) =>
sb += ' '
sb ++= s"${i + 1}:$v"
}
buffer.set(sb.mkString)
recordWriter.write(NullWritable.get(), buffer)
}
override def close(): Unit = {
recordWriter.close(context)
}
}
/**
* `libsvm` package implements Spark SQL data source API for loading LIBSVM data as [[DataFrame]].
* The loaded [[DataFrame]] has two columns: `label` containing labels stored as doubles and
* `features` containing feature vectors stored as [[Vector]]s.
*
* To use LIBSVM data source, you need to set "libsvm" as the format in [[DataFrameReader]] and
* optionally specify options, for example:
* {{{
* // Scala
* val df = spark.read.format("libsvm")
* .option("numFeatures", "780")
* .load("data/mllib/sample_libsvm_data.txt")
*
* // Java
* DataFrame df = spark.read().format("libsvm")
* .option("numFeatures, "780")
* .load("data/mllib/sample_libsvm_data.txt");
* }}}
*
* LIBSVM data source supports the following options:
* - "numFeatures": number of features.
* If unspecified or nonpositive, the number of features will be determined automatically at the
* cost of one additional pass.
* This is also useful when the dataset is already split into multiple files and you want to load
* them separately, because some features may not present in certain files, which leads to
* inconsistent feature dimensions.
* - "vectorType": feature vector type, "sparse" (default) or "dense".
*
* @see [[https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/ LIBSVM datasets]]
*/
@Since("1.6.0")
class DefaultSource extends FileFormat with DataSourceRegister {
@Since("1.6.0")
override def shortName(): String = "libsvm"
override def toString: String = "LibSVM"
private def verifySchema(dataSchema: StructType): Unit = {
if (dataSchema.size != 2 ||
(!dataSchema(0).dataType.sameType(DataTypes.DoubleType)
|| !dataSchema(1).dataType.sameType(new VectorUDT()))) {
throw new IOException(s"Illegal schema for libsvm data, schema=$dataSchema")
}
}
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
Some(
StructType(
StructField("label", DoubleType, nullable = false) ::
StructField("features", new VectorUDT(), nullable = false) :: Nil))
}
override def prepareRead(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Map[String, String] = {
def computeNumFeatures(): Int = {
val dataFiles = files.filterNot(_.getPath.getName startsWith "_")
val path = if (dataFiles.length == 1) {
dataFiles.head.getPath.toUri.toString
} else if (dataFiles.isEmpty) {
throw new IOException("No input path specified for libsvm data")
} else {
throw new IOException("Multiple input paths are not supported for libsvm data.")
}
val sc = sparkSession.sparkContext
val parsed = MLUtils.parseLibSVMFile(sc, path, sc.defaultParallelism)
MLUtils.computeNumFeatures(parsed)
}
val numFeatures = options.get("numFeatures").filter(_.toInt > 0).getOrElse {
computeNumFeatures()
}
new CaseInsensitiveMap(options + ("numFeatures" -> numFeatures.toString))
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
new OutputWriterFactory {
override def newInstance(
path: String,
bucketId: Option[Int],
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
if (bucketId.isDefined) { sys.error("LibSVM doesn't support bucketing") }
new LibSVMOutputWriter(path, dataSchema, context)
}
}
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
verifySchema(dataSchema)
val numFeatures = options("numFeatures").toInt
assert(numFeatures > 0)
val sparse = options.getOrElse("vectorType", "sparse") == "sparse"
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
(file: PartitionedFile) => {
val points =
new HadoopFileLinesReader(file, broadcastedHadoopConf.value.value)
.map(_.toString.trim)
.filterNot(line => line.isEmpty || line.startsWith("#"))
.map { line =>
val (label, indices, values) = MLUtils.parseLibSVMRecord(line)
LabeledPoint(label, Vectors.sparse(numFeatures, indices, values))
}
val converter = RowEncoder(dataSchema)
points.map { pt =>
val features = if (sparse) pt.features.toSparse else pt.features.toDense
converter.toRow(Row(pt.label, features))
}
}
}
}
| xieguobin/Spark_2.0.0_cn1 | ml/source/libsvm/LibSVMRelation.scala | Scala | apache-2.0 | 7,946 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
import java.io._
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import javax.inject.Inject
import javax.inject.Singleton
import play.api.ApplicationLoader.DevContext
import play.api.http._
import play.api.i18n.I18nComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject._
import play.api.internal.libs.concurrent.CoordinatedShutdownSupport
import play.api.libs.Files._
import play.api.libs.concurrent.AkkaComponents
import play.api.libs.concurrent.AkkaTypedComponents
import play.api.libs.concurrent.CoordinatedShutdownProvider
import play.api.libs.crypto._
import play.api.mvc._
import play.api.mvc.request.DefaultRequestFactory
import play.api.mvc.request.RequestFactory
import play.api.routing.Router
import play.core.j.JavaContextComponents
import play.core.j.JavaHelpers
import play.core.DefaultWebCommands
import play.core.SourceMapper
import play.core.WebCommands
import play.utils._
import scala.annotation.implicitNotFound
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* A Play application.
*
* Application creation is handled by the framework engine.
*
* If you need to create an ad-hoc application,
* for example in case of unit testing, you can easily achieve this using:
* {{{
* val application = new DefaultApplication(new File("."), this.getClass.getClassloader, None, Play.Mode.Dev)
* }}}
*
* This will create an application using the current classloader.
*
*/
@implicitNotFound(
msg = "You do not have an implicit Application in scope. If you want to bring the current running Application into context, please use dependency injection."
)
trait Application {
/**
* The absolute path hosting this application, mainly used by the `getFile(path)` helper method
*/
def path: File
/**
* The application's classloader
*/
def classloader: ClassLoader
/**
* `Dev`, `Prod` or `Test`
*/
def mode: Mode = environment.mode
/**
* The application's environment
*/
def environment: Environment
private[play] def isDev = (mode == Mode.Dev)
private[play] def isTest = (mode == Mode.Test)
private[play] def isProd = (mode == Mode.Prod)
def configuration: Configuration
private[play] lazy val httpConfiguration =
HttpConfiguration.fromConfiguration(configuration, environment)
/**
* The default ActorSystem used by the application.
*/
def actorSystem: ActorSystem
/**
* The default Materializer used by the application.
*/
implicit def materializer: Materializer
/**
* The default CoordinatedShutdown to stop the Application
*/
def coordinatedShutdown: CoordinatedShutdown
/**
* The factory used to create requests for this application.
*/
def requestFactory: RequestFactory
/**
* The HTTP request handler
*/
def requestHandler: HttpRequestHandler
/**
* The HTTP error handler
*/
def errorHandler: HttpErrorHandler
/**
* Return the application as a Java application.
*/
def asJava: play.Application = {
new play.DefaultApplication(this, configuration.underlying, injector.asJava, environment.asJava)
}
/**
* Stop the application. The returned future will be redeemed when all stop hooks have been run.
*/
def stop(): Future[_]
/**
* Get the runtime injector for this application. In a runtime dependency injection based application, this can be
* used to obtain components as bound by the DI framework.
*
* @return The injector.
*/
def injector: Injector = NewInstanceInjector
/**
* Returns true if the global application is enabled for this app. If set to false, this changes the behavior of
* Play.start to disallow access to the global application instance,
* also affecting the deprecated Play APIs that use these.
*/
lazy val globalApplicationEnabled: Boolean = {
configuration.getOptional[Boolean](Play.GlobalAppConfigKey).getOrElse(true)
}
}
object Application {
/**
* Creates a function that caches results of calls to
* `app.injector.instanceOf[T]`. The cache speeds up calls
* when called with the same Application each time, which is
* a big benefit in production. It still works properly if
* called with a different Application each time, such as
* when running unit tests, but it will run more slowly.
*
* Since values are cached, it's important that this is only
* used for singleton values.
*
* This method avoids synchronization so it's possible that
* the injector might be called more than once for a single
* instance if this method is called from different threads
* at the same time.
*
* The cache uses a SoftReference to both the Application and
* the returned instance so it will not cause memory leaks.
* Unlike WeakHashMap it doesn't use a ReferenceQueue, so values
* will still be cleaned even if the ReferenceQueue is never
* activated.
*/
def instanceCache[T: ClassTag]: Application => T =
new InlineCache((app: Application) => app.injector.instanceOf[T])
}
@Singleton
class DefaultApplication @Inject() (
override val environment: Environment,
applicationLifecycle: ApplicationLifecycle,
override val injector: Injector,
override val configuration: Configuration,
override val requestFactory: RequestFactory,
override val requestHandler: HttpRequestHandler,
override val errorHandler: HttpErrorHandler,
override val actorSystem: ActorSystem,
override val materializer: Materializer,
override val coordinatedShutdown: CoordinatedShutdown
) extends Application {
def this(
environment: Environment,
applicationLifecycle: ApplicationLifecycle,
injector: Injector,
configuration: Configuration,
requestFactory: RequestFactory,
requestHandler: HttpRequestHandler,
errorHandler: HttpErrorHandler,
actorSystem: ActorSystem,
materializer: Materializer
) = this(
environment,
applicationLifecycle,
injector,
configuration,
requestFactory,
requestHandler,
errorHandler,
actorSystem,
materializer,
new CoordinatedShutdownProvider(actorSystem, applicationLifecycle).get
)
override def path: File = environment.rootPath
override def classloader: ClassLoader = environment.classLoader
override def stop(): Future[_] =
CoordinatedShutdownSupport.asyncShutdown(actorSystem, ApplicationStoppedReason)
}
private[play] final case object ApplicationStoppedReason extends CoordinatedShutdown.Reason
/**
* Helper to provide the Play built in components.
*/
trait BuiltInComponents extends I18nComponents with AkkaComponents with AkkaTypedComponents {
/** The application's environment, e.g. it's [[ClassLoader]] and root path. */
def environment: Environment
/** Helper to locate the source code for the application. Only available in dev mode. */
@deprecated("Use devContext.map(_.sourceMapper) instead", "2.7.0")
def sourceMapper: Option[SourceMapper] = devContext.map(_.sourceMapper)
/** Helper to interact with the Play build environment. Only available in dev mode. */
def devContext: Option[DevContext] = None
// Define a private val so that webCommands can remain a `def` instead of a `val`
private val defaultWebCommands: WebCommands = new DefaultWebCommands
/** Commands that intercept requests before the rest of the application handles them. Used by Evolutions. */
def webCommands: WebCommands = defaultWebCommands
/** The application's configuration. */
def configuration: Configuration
/** A registry to receive application lifecycle events, e.g. to close resources when the application stops. */
def applicationLifecycle: ApplicationLifecycle
/** The router that's used to pass requests to the correct handler. */
def router: Router
/**
* The runtime [[Injector]] instance provided to the [[DefaultApplication]]. This injector is set up to allow
* existing (deprecated) legacy APIs to function. It is not set up to support injecting arbitrary Play components.
*/
lazy val injector: Injector = {
val simple = new SimpleInjector(NewInstanceInjector) +
cookieSigner + // play.api.libs.Crypto (for cookies)
httpConfiguration + // play.api.mvc.BodyParsers trait
tempFileCreator + // play.api.libs.TemporaryFileCreator object
messagesApi + // play.api.i18n.Messages object
langs // play.api.i18n.Langs object
new ContextClassLoaderInjector(simple, environment.classLoader)
}
lazy val playBodyParsers: PlayBodyParsers =
PlayBodyParsers(tempFileCreator, httpErrorHandler, httpConfiguration.parser)(materializer)
lazy val defaultBodyParser: BodyParser[AnyContent] = playBodyParsers.default
lazy val defaultActionBuilder: DefaultActionBuilder = DefaultActionBuilder(defaultBodyParser)
lazy val httpConfiguration: HttpConfiguration =
HttpConfiguration.fromConfiguration(configuration, environment)
lazy val requestFactory: RequestFactory = new DefaultRequestFactory(httpConfiguration)
lazy val httpErrorHandler: HttpErrorHandler =
new DefaultHttpErrorHandler(environment, configuration, devContext.map(_.sourceMapper), Some(router))
/**
* List of filters, typically provided by mixing in play.filters.HttpFiltersComponents
* or play.api.NoHttpFiltersComponents.
*
* In most cases you will want to mixin HttpFiltersComponents and append your own filters:
*
* {{{
* class MyComponents(context: ApplicationLoader.Context)
* extends BuiltInComponentsFromContext(context)
* with play.filters.HttpFiltersComponents {
*
* lazy val loggingFilter = new LoggingFilter()
* override def httpFilters = {
* super.httpFilters :+ loggingFilter
* }
* }
* }}}
*
* If you want to filter elements out of the list, you can do the following:
*
* {{{
* class MyComponents(context: ApplicationLoader.Context)
* extends BuiltInComponentsFromContext(context)
* with play.filters.HttpFiltersComponents {
* override def httpFilters = {
* super.httpFilters.filterNot(_.getClass == classOf[CSRFFilter])
* }
* }
* }}}
*/
def httpFilters: Seq[EssentialFilter]
lazy val httpRequestHandler: HttpRequestHandler =
new DefaultHttpRequestHandler(
webCommands,
devContext,
() => router,
httpErrorHandler,
httpConfiguration,
httpFilters
)
lazy val application: Application = new DefaultApplication(
environment,
applicationLifecycle,
injector,
configuration,
requestFactory,
httpRequestHandler,
httpErrorHandler,
actorSystem,
materializer,
coordinatedShutdown
)
lazy val cookieSigner: CookieSigner = new CookieSignerProvider(httpConfiguration.secret).get
lazy val csrfTokenSigner: CSRFTokenSigner = new CSRFTokenSignerProvider(cookieSigner).get
lazy val tempFileReaper: TemporaryFileReaper =
new DefaultTemporaryFileReaper(actorSystem, TemporaryFileReaperConfiguration.fromConfiguration(configuration))
lazy val tempFileCreator: TemporaryFileCreator =
new DefaultTemporaryFileCreator(applicationLifecycle, tempFileReaper, configuration)
lazy val fileMimeTypes: FileMimeTypes = new DefaultFileMimeTypesProvider(httpConfiguration.fileMimeTypes).get
@deprecated(
"Use the corresponding methods that provide MessagesApi, Langs, FileMimeTypes or HttpConfiguration",
"2.8.0"
)
lazy val javaContextComponents: JavaContextComponents =
JavaHelpers.createContextComponents(messagesApi, langs, fileMimeTypes, httpConfiguration)
// NOTE: the following helpers are declared as protected since they are only meant to be used inside BuiltInComponents
// This also makes them not conflict with other methods of the same type when used with Macwire.
/**
* Alias method to [[defaultActionBuilder]]. This just helps to keep the idiom of using `Action`
* when creating `Router`s using the built in components.
*
* @return the default action builder.
*/
protected def Action: DefaultActionBuilder = defaultActionBuilder
/**
* Alias method to [[playBodyParsers]].
*/
protected def parse: PlayBodyParsers = playBodyParsers
}
/**
* A component to mix in when no default filters should be mixed in to BuiltInComponents.
*
* @see [[BuiltInComponents.httpFilters]]
*/
trait NoHttpFiltersComponents {
val httpFilters: Seq[EssentialFilter] = Nil
}
| benmccann/playframework | core/play/src/main/scala/play/api/Application.scala | Scala | apache-2.0 | 12,561 |
package daos
import scala.collection.mutable.Buffer
import org.hibernate.Session
import java.lang.reflect.ParameterizedType
import scala.collection.JavaConversions.asScalaBuffer
import java.util.{List => JList}
import models.PersistentEntity
import java.io.Serializable
import scala.collection.immutable
import persistence.query.Order
import org.hibernate.Criteria
import persistence.query.OrderBy
import persistence.query.Order
import org.hibernate.criterion.{Order => HibernateOrder}
abstract class DAO[T <: PersistentEntity, PK <: Serializable] {
import DAO._
protected val persistentClass: Class[T] =
getClass.getGenericSuperclass().asInstanceOf[ParameterizedType].
getActualTypeArguments().apply(0).asInstanceOf[Class[T]]
def findById(id: PK)(implicit s: Session): Option[T] = {
Option(s.get(persistentClass, id).asInstanceOf[T]);
}
def findAll(ordering: OrderBy*)(implicit s: Session): Seq[T] = {
val criteria = s.createCriteria(persistentClass)
addOrders(criteria, ordering)
criteria.list.asInstanceOf[JList[T]]
}
def removeById(id: PK)(implicit s: Session) = {
s.delete(findById(id))
}
def remove(entity: T)(implicit s: Session) = {
s.delete(entity)
}
def removeAll(implicit s: Session) = {
// TODO do this via a query
findAll().foreach(remove)
}
def save(entity: T)(implicit s: Session): T = {
s.save(entity)
entity
}
def save(entities: Iterable[T])(implicit s: Session) {
// this could be very inefficient for large inserts as
// hibernate will disable batch inserts for entities with
// IDENTITY identifier generation strategy. Although, most
// of the time, we won't be inserting large datasets.
entities.map(s.save)
}
def update(entity: T)(implicit s: Session): T = {
s.update(entity)
entity
}
// TODO setup hibernate's batch-size property and use the batch-size value
// here to flush the session every batch-size rows
def update(entities: Iterable[T])(implicit s: Session) {
entities.map(s.update)
}
}
object DAO {
private def toHibernateOrder(order: OrderBy) = {
order match {
case Order.asc(propertyName) => HibernateOrder.asc(propertyName)
case Order.desc(propertyName) => HibernateOrder.desc(propertyName)
case _ => throw new RuntimeException("invalid order type")
}
}
private def addOrders(criteria: Criteria, ordering: Seq[OrderBy]) = {
ordering map { toHibernateOrder } foreach { criteria.addOrder }
}
}
| Bhashit/better-hibernate-with-scala | app/daos/DAO.scala | Scala | unlicense | 2,512 |
package algorithms.graph
import algorithms.graph.data.Graph
class DepthFirstSearch(g: Graph) extends Search {
def search(s: Int): SearchResult = {
val result = new SearchResult(g.numberOfVertices)
dfs(s, result)
result
}
private def dfs(v: Int, res: SearchResult) {
res.marked(v) = true
for (adj <- g.adjacencies(v) if !res.marked(adj)) {
res.predecessor(adj) = Some(v)
dfs(adj, res)
}
}
}
| tadayosi/algorithms | src/main/scala/algorithms/graph/DepthFirstSearch.scala | Scala | apache-2.0 | 439 |
package monadasync
import org.scalacheck.{ Gen, Arbitrary }
import org.specs2.scalaz.Spec
import scalaz.{ Equal, Free }
import Free.Trampoline
object TrampolineSpec extends org.specs2.mutable.SpecWithJUnit with Spec {
type F[A] = Trampoline[A]
implicit val MonadSuspendF: MonadSuspend[F] = MonadSuspend.TrampolineMonadSuspend
def run[A](f: F[A]): A = f.run
implicit val equalTc: Equal[F[Int]] =
new Equal[F[Int]] {
import scalaz.std.anyVal.intInstance
override def equal(tc1: F[Int], tc2: F[Int]): Boolean =
Equal[Int].equal(run(tc1), run(tc2))
}
implicit def arbitraryTC(implicit a: Arbitrary[Int]): Arbitrary[F[Int]] = Arbitrary {
a.arbitrary flatMap { i =>
Gen.oneOf(
Gen.const(MonadSuspendF.now(i)),
Gen.const(MonadSuspendF.delay(i))
)
}
}
implicit def arbitraryF0(implicit a: Arbitrary[Int]): Arbitrary[() => Int] = Arbitrary {
a.arbitrary map { a => () => a }
}
checkAll("MonadSuspend laws", MonadAsyncProperties.monadSuspend.laws[F])
}
| lukiano/monadasync | scalaz/src/test/scala/monadasync/TrampolineSpec.scala | Scala | apache-2.0 | 1,035 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount
import slamdata.Predef._
import quasar.{QuasarError, Variables}
import quasar.contrib.pathy._
import quasar.contrib.scalaz._
import quasar.effect.LiftedOps
import quasar.fp.ski._
import quasar.fs._
import quasar.sql.{ScopedExpr, Sql, Statement}
import matryoshka.data.Fix
import monocle.Prism
import monocle.std.{disjunction => D}
import pathy._, Path._
import scalaz._, Scalaz._
sealed abstract class Mounting[A]
object Mounting {
/** Provides for accessing many mounts at once, which allows certain
* operations (like determining which paths in a directory refer to
* mounts) to be implemented more efficiently. A mount at the supplied
* prefix is not returned as part of the result.
*/
final case class HavingPrefix(dir: ADir)
extends Mounting[Map[APath, MountingError \\/ MountType]]
final case class LookupType(path: APath)
extends Mounting[Option[MountingError \\/ MountType]]
final case class LookupConfig(path: APath)
extends Mounting[Option[MountingError \\/ MountConfig]]
final case class MountView(loc: AFile, scopedExpr: ScopedExpr[Fix[Sql]], vars: Variables)
extends Mounting[MountingError \\/ Unit]
final case class MountFileSystem(loc: ADir, typ: FileSystemType, uri: ConnectionUri)
extends Mounting[MountingError \\/ Unit]
final case class MountModule(loc: ADir, statements: List[Statement[Fix[Sql]]])
extends Mounting[MountingError \\/ Unit]
final case class Unmount(path: APath)
extends Mounting[MountingError \\/ Unit]
final case class Remount[T](from: Path[Abs,T,Sandboxed], to: Path[Abs,T,Sandboxed])
extends Mounting[MountingError \\/ Unit]
/** Indicates the wrong type of path (file vs. dir) was supplied to the `mount`
* convenience function.
*/
final case class PathTypeMismatch(path: APath) extends QuasarError
object PathTypeMismatch {
implicit val pathTypeMismatchShow: Show[PathTypeMismatch] =
Show.shows { v =>
val expectedType = refineType(v.path).fold(κ("file"), κ("directory"))
s"Expected ${expectedType} path instead of '${posixCodec.printPath(v.path)}'"
}
}
final class Ops[S[_]](implicit S: Mounting :<: S)
extends LiftedOps[Mounting, S] {
import MountConfig._
/** Returns mounts located at a path having the given prefix. */
def havingPrefix(dir: ADir): FreeS[Map[APath, MountingError \\/ MountType]] =
lift(HavingPrefix(dir))
/** The views mounted at paths having the given prefix. */
def viewsHavingPrefix(dir: ADir): FreeS[Set[AFile]] =
rPathsHavingPrefix(dir).map(_.foldMap(_.toSet))
def viewsHavingPrefix_(dir: ADir): FreeS[Set[RFile]] =
viewsHavingPrefix(dir).map(_.foldMap(_.relativeTo(dir).toSet))
def modulesHavingPrefix(dir: ADir): FreeS[Set[ADir]] =
havingPrefix(dir).map(_.collect {
case (k, \\/-(v)) if v ≟ MountType.ModuleMount => k }.toSet
.foldMap(p => refineType(p).swap.toSet))
def modulesHavingPrefix_(dir: ADir): FreeS[Set[RDir]] =
modulesHavingPrefix(dir).map(_.foldMap(_.relativeTo(dir).toSet))
/** Whether the given path refers to a mount. */
def exists(path: APath): FreeS[Boolean] =
lookupType(path).run.isDefined
/** Returns the mount configuration if the given path refers to a mount. */
def lookupConfig(path: APath): EitherT[OptionT[FreeS, ?], MountingError, MountConfig] =
EitherT(OptionT(lift(LookupConfig(path))))
def lookupViewConfig(path: AFile): EitherT[OptionT[FreeS, ?], MountingError, ViewConfig] =
lookupConfig(path).flatMap(config =>
EitherT.rightT(OptionT(viewConfig.getOption(config).map(ViewConfig.tupled).point[FreeS])))
def lookupViewConfigIgnoreError(path: AFile): OptionT[FreeS, ViewConfig] =
lookupViewConfig(path).toOption.squash
def lookupModuleConfig(path: ADir): EitherT[OptionT[FreeS, ?], MountingError, ModuleConfig] =
lookupConfig(path).flatMap(config =>
EitherT.rightT(OptionT(moduleConfig.getOption(config).map(ModuleConfig(_)).point[FreeS])))
def lookupModuleConfigIgnoreError(path: ADir): OptionT[FreeS, ModuleConfig] =
lookupModuleConfig(path).run
.flatMap(either => OptionT(either.toOption.η[Free[S, ?]]))
/** Returns the type of mount the path refers to, if any. */
def lookupType(path: APath): EitherT[OptionT[FreeS, ?], MountingError, MountType] =
EitherT(OptionT(lift(LookupType(path))))
/** Create a view mount at the given location. */
def mountView(
loc: AFile,
scopedExpr: ScopedExpr[Fix[Sql]],
vars: Variables
)(implicit
S0: MountingFailure :<: S
): FreeS[Unit] =
MountingFailure.Ops[S].unattempt(lift(MountView(loc, scopedExpr, vars)))
/** Create a filesystem mount at the given location. */
def mountFileSystem(
loc: ADir,
typ: FileSystemType,
uri: ConnectionUri
)(implicit
S0: MountingFailure :<: S
): FreeS[Unit] =
MountingFailure.Ops[S].unattempt(lift(MountFileSystem(loc, typ, uri)))
def mountModule(
loc: ADir,
statements: List[Statement[Fix[Sql]]]
)(implicit
SO: MountingFailure :<: S
): FreeS[Unit] =
MountingFailure.Ops[S].unattempt(lift(MountModule(loc, statements)))
/** Attempt to create a mount described by the given configuration at the
* given location.
*/
def mount(
loc: APath,
config: MountConfig
)(implicit
S0: MountingFailure :<: S,
S1: PathMismatchFailure :<: S
): FreeS[Unit] = {
val mmErr = PathMismatchFailure.Ops[S]
config match {
case ViewConfig(query, vars) =>
D.right.getOption(refineType(loc)) cata (
file => mountView(file, query, vars),
mmErr.fail(PathTypeMismatch(loc)))
case FileSystemConfig(typ, uri) =>
D.left.getOption(refineType(loc)) cata (
dir => mountFileSystem(dir, typ, uri),
mmErr.fail(PathTypeMismatch(loc)))
case ModuleConfig(statements) =>
D.left.getOption(refineType(loc)) cata (
dir => mountModule(dir, statements),
mmErr.fail(PathTypeMismatch(loc)))
}
}
/** Replace the mount at the given path with one described by the
* provided config.
*/
def replace(
loc: APath,
config: MountConfig
)(implicit
S0: MountingFailure :<: S,
S1: PathMismatchFailure :<: S
): FreeS[Unit] =
modify(loc, loc, κ(config))
/** Remove the mount at the given path. */
def unmount(path: APath)(implicit S0: MountingFailure :<: S): FreeS[Unit] =
MountingFailure.Ops[S].unattempt(lift(Unmount(path)))
/** Remount `src` at `dst`, results in an error if there is no mount at
* `src`.
*/
def remount[T](
src: Path[Abs,T,Sandboxed],
dst: Path[Abs,T,Sandboxed]
)(implicit S0: MountingFailure :<: S): FreeS[Unit] =
MountingFailure.Ops[S].unattempt(lift(Remount(src, dst)))
def mountOrReplace(
path: APath,
mountConfig: MountConfig,
replaceIfExists: Boolean
)(implicit
S0: MountingFailure :<: S,
S1: PathMismatchFailure :<: S
): Free[S, Unit] =
for {
exists <- lookupType(path).run.isDefined
_ <- if (replaceIfExists && exists) replace(path, mountConfig)
else mount(path, mountConfig)
} yield ()
////
private val notFound: Prism[MountingError, APath] =
MountingError.pathError composePrism PathError.pathNotFound
private def modify[T](
src: Path[Abs,T,Sandboxed],
dst: Path[Abs,T,Sandboxed],
f: MountConfig => MountConfig
)(implicit
S0: MountingFailure :<: S,
S1: PathMismatchFailure :<: S
): FreeS[Unit] = {
val mntErr = MountingFailure.Ops[S]
val mmErr = PathMismatchFailure.Ops[S]
for {
cfg <- lookupConfig(src).run.run >>=[MountConfig] (
_.cata(_.fold(mntErr.fail(_), _.η[FreeS]), mntErr.fail(notFound(src))))
_ <- unmount(src)
mod = mount(dst, f(cfg))
restore = mount(src, cfg)
res1 = mntErr.onFail(mod, err => restore *> mntErr.fail(err))
_ <- mmErr.onFail(res1, err => restore *> mmErr.fail(err))
} yield ()
}
private def rPathsHavingPrefix(dir: ADir): FreeS[Set[ADir \\/ AFile]] =
havingPrefix(dir).map(_.keySet.map(refineType))
}
object Ops {
implicit def apply[S[_]](implicit S: Mounting :<: S): Ops[S] =
new Ops[S]
}
}
| jedesah/Quasar | core/src/main/scala/quasar/fs/mount/Mounting.scala | Scala | apache-2.0 | 9,169 |
package com.twitter.finagle
import com.twitter.io.Buf
package object framer {
/**
* A `Framer` performs protocol framing. As `Buf`s arrive on the wire, a
* framer accumulates them until completed frames arrive. The return value
* is an ordered sequence of any completed frames as a result of accumulating
* the additional Buf. If no complete frames are present, an empty list is
* returned.
* Stateful implementations should be expected.
* @see [[com.twitter.finagle.framer.LengthFieldFramer]] as an example
* implementation.
*/
type Framer = (Buf => IndexedSeq[Buf])
}
| adriancole/finagle | finagle-core/src/main/scala/com/twitter/finagle/framer/package.scala | Scala | apache-2.0 | 609 |
package com.workshop
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
class DuplicatesRemoverTest extends Specification {
class Context extends Scope {
val remover = new DuplicatesRemover
val noDuplicates: Seq[Int] = 1 to 20
val withDuplicates = noDuplicates ++ noDuplicates.reverse
}
"remove" should {
"return same seq when no duplicates" in new Context {
remover.remove(noDuplicates) must be_===(noDuplicates)
}
"remove the duplicates and keep the order" in new Context {
remover.remove(withDuplicates) must be_===(noDuplicates)
}
"return empty seq for empty input" in new Context {
remover.remove(Seq.empty) must be_===(Seq.empty)
}
}
}
| maximn/scala-workshop | src/test/scala/com/workshop/DuplicatesRemoverTest.scala | Scala | mit | 737 |
package play.api.mvc
import play.api.libs.json._
import play.api.libs.iteratee._
import play.api.libs.concurrent._
import scala.concurrent.Future
import play.core.Execution.Implicits.internalContext
/**
* A WebSocket handler.
*
* @tparam A the socket messages type
* @param f the socket messages generator
*/
case class WebSocket[A](f: RequestHeader => (Enumerator[A], Iteratee[A, Unit]) => Unit)(implicit val frameFormatter: WebSocket.FrameFormatter[A]) extends Handler {
type FRAMES_TYPE = A
/**
* Returns itself, for better support in the routes file.
*
* @return itself
*/
def apply() = this
}
/**
* Helper utilities to generate WebSocket results.
*/
object WebSocket {
/**
* Typeclass to handle WebSocket frames format.
*/
trait FrameFormatter[A] {
/**
* Transform a FrameFormatter[A] to a FrameFormatter[B]
*/
def transform[B](fba: B => A, fab: A => B): FrameFormatter[B]
}
/**
* Defaults frame formatters.
*/
object FrameFormatter {
/**
* String WebSocket frames.
*/
implicit val stringFrame: FrameFormatter[String] = play.core.server.websocket.Frames.textFrame
/**
* Array[Byte] WebSocket frames.
*/
implicit val byteArrayFrame: FrameFormatter[Array[Byte]] = play.core.server.websocket.Frames.binaryFrame
/**
* Either String or Array[Byte] WebSocket frames.
*/
implicit val mixedFrame: FrameFormatter[Either[String, Array[Byte]]] = play.core.server.websocket.Frames.mixedFrame
/**
* Json WebSocket frames.
*/
implicit val jsonFrame: FrameFormatter[JsValue] = stringFrame.transform(Json.stringify, Json.parse)
}
/**
* Creates a WebSocket result from inbound and outbound channels.
*/
def using[A](f: RequestHeader => (Iteratee[A, _], Enumerator[A]))(implicit frameFormatter: FrameFormatter[A]): WebSocket[A] = {
WebSocket[A](h => (e, i) => { val (readIn, writeOut) = f(h); e |>> readIn; writeOut |>> i })
}
def adapter[A](f: RequestHeader => Enumeratee[A, A])(implicit frameFormatter: FrameFormatter[A]): WebSocket[A] = {
WebSocket[A](h => (in, out) => { in &> f(h) |>> out })
}
/**
* Creates a WebSocket result from inbound and outbound channels retrieved asynchronously.
*/
def async[A](f: RequestHeader => Future[(Iteratee[A, _], Enumerator[A])])(implicit frameFormatter: FrameFormatter[A]): WebSocket[A] = {
using { rh =>
val p = f(rh)
val it = Iteratee.flatten(p.map(_._1))
val enum = Enumerator.flatten(p.map(_._2))
(it, enum)
}
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play/src/main/scala/play/api/mvc/WebSocket.scala | Scala | mit | 2,574 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.responsiblepeople
import cats.data.Validated.{Invalid, Valid}
import models.{Country, countries}
import jto.validation._
import jto.validation.forms.UrlFormEncoded
import jto.validation.ValidationError
import play.api.libs.json._
sealed trait Nationality
case object British extends Nationality
case class OtherCountry(name: Country) extends Nationality
object Nationality {
import utils.MappingUtils.Implicits._
val validateCountry: Rule[String, Country] = {
Rule {
case "" => Invalid(Seq(Path -> Seq(ValidationError("error.required.rp.nationality.country"))))
case code =>
countries.collectFirst {
case e @ Country(_, c) if c == code =>
Valid(e)
} getOrElse {
Invalid(Seq(Path -> Seq(ValidationError("error.invalid.rp.nationality.country"))))
}
}
}
implicit val formRule: Rule[UrlFormEncoded, Nationality] =
From[UrlFormEncoded] { readerURLFormEncoded =>
import jto.validation.forms.Rules._
(readerURLFormEncoded \ "nationality").read[String].withMessage("error.required.nationality") flatMap {
case "01" => British
case "02" =>
(readerURLFormEncoded \ "otherCountry").read(validateCountry) map OtherCountry.apply
case _ =>
(Path \ "nationality") -> Seq(ValidationError("error.invalid"))
}
}
implicit val formWrite: Write[Nationality, UrlFormEncoded] = Write {
case British => "nationality" -> "01"
case OtherCountry(value) => Map("nationality" -> "02",
"otherCountry" -> value.code)
}
implicit val jsonReads: Reads[Nationality] = {
import play.api.libs.json._
(__ \ "nationality").read[String].flatMap[Nationality] {
case "01" => British
case "02" => (JsPath \ "otherCountry").read[Country] map OtherCountry.apply
case _ => play.api.libs.json.JsonValidationError("error.invalid")
}
}
implicit val jsonWrites = Writes[Nationality] {
case British => Json.obj("nationality" -> "01")
case OtherCountry(value) => Json.obj(
"nationality" -> "02",
"otherCountry" -> value
)
}
implicit def getNationality(country: Option[Country]): Option[Nationality] = {
country match {
case Some(countryType)=> Some(countryType)
case _ => None
}
}
implicit def getNationality(country: Country): Nationality = {
country match {
case Country("United Kingdom", "GB") => British
case someCountry => OtherCountry(someCountry)
}
}
implicit def getCountry(nationality: Nationality): Country = {
nationality match {
case British =>Country("United Kingdom", "GB")
case OtherCountry(someCountry) => someCountry
}
}
}
| hmrc/amls-frontend | app/models/responsiblepeople/Nationality.scala | Scala | apache-2.0 | 3,329 |
package com.shocktrade.webapp
import io.scalajs.npm.express.{Request, Response}
import scala.scalajs.js
/**
* Routes package object
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
package object routes {
type NextFunction = js.Function0[Unit]
/**
* Request Extensions
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
implicit class RequestExtensions(val request: Request) extends AnyVal {
def getMaxResults(default: Int = 20): Int = request.query.get("maxResults") map (_.toInt) getOrElse default
def getSymbol: String = request.params.apply("symbol").toUpperCase()
}
/**
* Parameter Extensions
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
implicit class ParameterExtensions(val params: js.Dictionary[String]) extends AnyVal {
@inline
def extractParams(names: String*): Option[Seq[String]] = {
val values = names.map(params.get)
if (values.forall(_.isDefined)) Some(values.flatten) else None
}
}
/**
* Response Extensions
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
implicit class ResponseExtensions(val response: Response) extends AnyVal {
@inline
def missingParams(params: String*): Unit = {
val message = s"Bad Request: ${params.mkString(" and ")} ${if (params.length == 1) "is" else "are"} required"
response.status(400).send(message)
}
private def asString(value: js.Any): String = value match {
case v if v == null => ""
case v if js.typeOf(v) == "string" => s""""${v.toString}""""
case v => v.toString
}
}
}
| ldaniels528/shocktrade.js | app/server/webapp/src/main/scala/com/shocktrade/webapp/routes/package.scala | Scala | apache-2.0 | 1,624 |
package breeze.collection.mutable
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.MapLike
import scala.collection.generic._
import scala.reflect.ClassTag
/**
* Wraps an ArrayBuffer with a Map. Note the odd behavior for -=
*
* The key must be nonnegative
*
* Chances are you want to change defValue, which is used to
* fill in blanks if you don't add things consecutively. Otherwise you
* get an Exception.
*
* @author dlwh
*/
@SerialVersionUID(1)
class ArrayMap[@specialized V](defValue: =>V, private val arr: ArrayBuffer[V]) extends scala.collection.mutable.Map[Int,V]
with MapLike[Int,V,ArrayMap[V]] with Serializable {
def this(defValue: =>V = ArrayMap.error) = this(defValue, new ArrayBuffer[V]())
override def default(i: Int): V = defValue
override def apply(i: Int) = {
if(i < arr.length) {
arr(i)
} else {
update(i,default(i))
arr(i)
}
}
override def get(i : Int) = if(i < arr.length) Some(arr(i)) else None
override def += (k: (Int,V)): this.type = { update(k._1,k._2); this}
override def clear = arr.clear()
override def getOrElseUpdate(i: Int, v: =>V) = {
if(i >= arr.length) {
update(i,v)
}
arr(i)
}
override def update(i : Int, v : V) {
while(i > arr.length) {
arr += default(arr.length)
}
if(i == arr.length)
arr += v
else arr(i) = v;
}
override def empty = new ArrayMap(defValue)
/**
* Note that removing an element in the array simply replaces it with the default(i)
*/
def -=(i : Int):this.type = { arr(i) = default(i); this}
override def size = arr.size
def iterator = keysIterator zip valuesIterator
override def keysIterator = (0 until arr.length).iterator
override def keys:Set[Int] = new Set[Int] {
def iterator = keysIterator
def -(elem: Int) = if(!contains(elem)) this else Set.empty ++ keys - elem
def +(elem: Int) = if(contains(elem)) this else Set.empty ++ keys + elem
def contains(i: Int) = i < arr.length && i > 0
}
override def valuesIterator = arr.iterator
/**
* Returns the array we're holding on to.
*/
def innerArray(implicit w: ClassTag[V]):Array[V] = arr.toArray
}
object ArrayMap {
def error = throw new NoSuchElementException("Key not found, and no default value")
}
| eponvert/breeze | src/main/scala/breeze/collection/mutable/ArrayMap.scala | Scala | apache-2.0 | 2,884 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the"License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an"AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datacompaction
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.indexstore.blockletindex.SegmentIndexFileStore
import org.apache.carbondata.core.metadata.{CarbonMetadata, SegmentFileStore}
import org.apache.carbondata.core.util.path.CarbonTablePath
class CompactionSupportGlobalSortParameterTest extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
val filePath: String = s"$resourcesPath/globalsort"
val file1: String = resourcesPath + "/globalsort/sample1.csv"
val file2: String = resourcesPath + "/globalsort/sample2.csv"
val file3: String = resourcesPath + "/globalsort/sample3.csv"
override def beforeEach {
resetConf
sql("DROP TABLE IF EXISTS compaction_globalsort")
sql(
"""
| CREATE TABLE compaction_globalsort(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='global_sort', 'GLOBAL_SORT_PARTITIONS'='1')
""".stripMargin)
sql("DROP TABLE IF EXISTS carbon_localsort")
sql(
"""
| CREATE TABLE carbon_localsort(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
""".stripMargin)
}
override def afterEach {
sql("DROP TABLE IF EXISTS compaction_globalsort")
sql("DROP TABLE IF EXISTS carbon_localsort")
resetConf()
}
test("MINOR, ENABLE_AUTO_LOAD_MERGE: false") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("delete from table compaction_globalsort where SEGMENT.ID in (1,2,3)")
sql("delete from table carbon_localsort where SEGMENT.ID in (1,2,3)")
sql("ALTER TABLE compaction_globalsort COMPACT 'minor'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), false, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(!SegmentSequenceIds.contains("0.1"))
assert(SegmentSequenceIds.length == 6)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(12)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Success")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Marked for Delete")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
}
test("MINOR, ENABLE_AUTO_LOAD_MERGE: true") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
// loaded 6 times and produced 6 segments,
// auto merge will compact and produce 1 segment because 6 is bigger than 4 (default value of minor),
// so total segment number is 7
assert(SegmentSequenceIds.length == 7)
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
}
test("MINOR, PRESERVE_LATEST_SEGMENTS_NUMBER: 0") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
"0")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MINOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 4)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER)
}
test("MINOR, PRESERVE_LATEST_SEGMENTS_NUMBER: 4") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
"4")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MINOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), false, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(!SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 6)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 0)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER)
}
test("MINOR, DAYS_ALLOWED_TO_COMPACT: 0") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
"0")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MINOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 4)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT)
}
test("MINOR, DAYS_ALLOWED_TO_COMPACT: 4") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
"4")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MINOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 4)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT)
}
test("MAJOR, ENABLE_AUTO_LOAD_MERGE: false") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("delete from table compaction_globalsort where SEGMENT.ID in (1,2,3)")
sql("delete from table carbon_localsort where SEGMENT.ID in (1,2,3)")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(SegmentSequenceIds.length == 7)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(12)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Success")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Marked for Delete")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
}
test("MAJOR, ENABLE_AUTO_LOAD_MERGE: true") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
// loaded 6 times and produced 6 segments,
// auto merge will compact and produce 1 segment because 6 is bigger than 4 (default value of minor),
// major compact and prodece 1 segment
// so total segment number is 8
assert(SegmentSequenceIds.length == 8)
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
}
test("MAJOR, PRESERVE_LATEST_SEGMENTS_NUMBER: 0") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
"0")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 6)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER)
}
test("MAJOR, PRESERVE_LATEST_SEGMENTS_NUMBER: 4") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
"4")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 2)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER)
}
test("MAJOR, DAYS_ALLOWED_TO_COMPACT: 0") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
"0")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 6)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT)
}
test("MAJOR, DAYS_ALLOWED_TO_COMPACT: 4") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
"4")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(!SegmentSequenceIds.contains("4.1"))
assert(SegmentSequenceIds.length == 7)
val status = segments.collect().map { each => (each.toSeq) (1) }
assert(status.filter(_.equals("Compacted")).length == 6)
assert(getIndexFileCount("compaction_globalsort", "0.1") === 1)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(24)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT,
CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT)
}
test("MAJOR, ENABLE_PREFETCH_DURING_COMPACTION: true") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_COMPACTION_PREFETCH_ENABLE, "true")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
for (i <- 0 until 2) {
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE carbon_localsort")
sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
sql(s"LOAD DATA LOCAL INPATH '$file3' INTO TABLE compaction_globalsort OPTIONS('GLOBAL_SORT_PARTITIONS'='2')")
}
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "global_sort")
checkExistence(sql("DESCRIBE FORMATTED compaction_globalsort"), true, "city,name")
sql("delete from table compaction_globalsort where SEGMENT.ID in (1,2,3)")
sql("delete from table carbon_localsort where SEGMENT.ID in (1,2,3)")
sql("ALTER TABLE compaction_globalsort COMPACT 'MAJOR'")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Compacted")
val segments = sql("SHOW SEGMENTS FOR TABLE compaction_globalsort")
val SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
assert(SegmentSequenceIds.contains("0.1"))
assert(SegmentSequenceIds.length == 7)
checkAnswer(sql("SELECT COUNT(*) FROM compaction_globalsort"), Seq(Row(12)))
checkAnswer(sql("SELECT * FROM compaction_globalsort"),
sql("SELECT * FROM carbon_localsort"))
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Success")
checkExistence(sql("SHOW SEGMENTS FOR TABLE compaction_globalsort"), true, "Marked for Delete")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_COMPACTION_PREFETCH_ENABLE,
CarbonCommonConstants.CARBON_COMPACTION_PREFETCH_ENABLE_DEFAULT)
}
private def resetConf() {
val prop = CarbonProperties.getInstance()
prop.addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
prop.addProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS, CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS_DEFAULT)
prop.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
}
private def getIndexFileCount(tableName: String, segmentNo: String = "0"): Int = {
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", tableName)
val segmentDir = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segmentNo)
if (FileFactory.isFileExist(segmentDir)) {
new SegmentIndexFileStore().getIndexFilesFromSegment(segmentDir).size()
} else {
val segment = Segment.getSegment(segmentNo, carbonTable.getTablePath)
new SegmentFileStore(carbonTable.getTablePath, segment.getSegmentFileName).getIndexCarbonFiles.size()
}
}
}
| jackylk/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/CompactionSupportGlobalSortParameterTest.scala | Scala | apache-2.0 | 28,056 |
package org.slf4s
import org.slf4j.{LoggerFactory => Underlying}
import scala.reflect.ClassTag
object LoggerFactory {
def apply[A: ClassTag]: Logger = getLogger[A]
def apply(name: String): Logger = getLogger(name)
def apply(clazz: Class[_]): Logger = getLogger(clazz)
def getLogger[A: ClassTag]: Logger = Logger(Underlying.getLogger(implicitly[ClassTag[A]].runtimeClass))
def getLogger(name: String): Logger = Logger(Underlying.getLogger(name))
def getLogger(clazz: Class[_]): Logger = Logger(Underlying.getLogger(clazz))
}
| mattroberts297/slf4s | src/main/scala/org/slf4s/LoggerFactory.scala | Scala | mit | 538 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.http.controllers
import org.joda.time.{DateTime, DateTimeZone, LocalDate, LocalDateTime}
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.matchers.should.Matchers
import play.api.libs.json.{JsSuccess, _}
class RestFormatsSpec extends AnyWordSpecLike with Matchers {
"localDateTimeRead" should {
"return a LocalDateTime for correctly formatted JsString" in {
val testDate = new LocalDateTime(0)
val jsValue = RestFormats.localDateTimeWrite.writes(testDate)
val JsSuccess(result, _) = RestFormats.localDateTimeRead.reads(jsValue)
result shouldBe testDate
}
"return a JsError for a json value that is not a JsString" in {
RestFormats.localDateTimeRead.reads(Json.obj()) shouldBe a[JsError]
}
"return a JsError for a JsString that is not a well-formatted date" in {
RestFormats.localDateTimeRead.reads(JsString("not a valid date")) shouldBe a[JsError]
}
}
"dateTimeRead" should {
"return a DateTime in zone UTC for correctly formatted JsString" in {
val testDate = new DateTime(0)
val jsValue = RestFormats.dateTimeWrite.writes(testDate)
val JsSuccess(result, _) = RestFormats.dateTimeRead.reads(jsValue)
result shouldBe testDate.withZone(DateTimeZone.UTC)
}
"return a JsError for a json value that is not a JsString" in {
RestFormats.dateTimeRead.reads(Json.obj()) shouldBe a[JsError]
}
"return a JsError for a JsString that is not a well-formatted date" in {
RestFormats.dateTimeRead.reads(JsString("not a valid date")) shouldBe a[JsError]
}
}
"localDateRead" should {
"return a LocalDate in zone UTC for correctly formatted JsString" in {
val json = JsString("1994-05-01")
val expectedDate = new LocalDate(1994, 5, 1)
val JsSuccess(result, _) = RestFormats.localDateRead.reads(json)
result shouldBe expectedDate
}
"return a JsError for a json value that is not a JsString" in {
RestFormats.localDateRead.reads(Json.obj()) shouldBe a[JsError]
}
"return a JsError for a JsString that is not a well-formatted date" in {
RestFormats.localDateRead.reads(JsString("not a valid date")) shouldBe a[JsError]
}
"return a JsError for a JsString that is well formatted but has bad values" in {
RestFormats.localDateRead.reads(JsString("1994-13-32")) shouldBe a[JsError]
}
}
}
| hmrc/http-verbs | http-verbs-common/src/test/scala/uk/gov/hmrc/http/controllers/RestFormatsSpec.scala | Scala | apache-2.0 | 3,036 |
package subscrobbler
import de.umass.lastfm._
import com.github.tototoshi.csv._
import java.io.File
import scala.collection.JavaConversions._
import scala.collection.mutable.Map
class UserData {
private var userName: String = Conf.user
private var historySubset: Array[HistoryRow] = Array()
def this(userName: String) {
this()
this.userName = userName
this.historySubset = UserData.getSubset(userName)
}
def downloadHistory(csvFileName: String): Unit = {
UserData.toCsv(
UserData.getSubset(userName),
csvFileName
)
}
def getRecommendation: Recommendation = {
val topTracks: Array[((String, String), Int)] = UserData.getTopTracks(this.historySubset)
val resultSet: Set[(String, String)] =
(for (track <- topTracks;
strack <- UserData.getSimilarTracks(track._1)
) yield strack).toSet
new Recommendation(resultSet.toArray)
}
}
object UserData {
private val key: String = Conf.key
private def getSubset(userName: String): Array[HistoryRow] = {
var i:Int = 1
var pages:PaginatedResult[Track] = User.getRecentTracks(userName, 1, 200, key)
val n:Int = Math.min(3, pages.getTotalPages())
var listSubset: List[HistoryRow] = Nil
while (i <= n) {
for (track <- pages) {
listSubset = listSubset :+ new HistoryRow(track)
}
i += 1
pages = User.getRecentTracks(userName, i, 200, key)
}
listSubset.toArray
}
private def toCsv(history: Seq[HistoryRow], csvFileName: String): Unit = {
if (history.isEmpty) return
val file = new File(csvFileName)
implicit object MyFormat extends DefaultCSVFormat {
override val delimiter = ';'
}
val writer = CSVWriter.open(file)
writer.writeRow(history.head.csvHeader())
for (r <- history) {
writer.writeRow(r.csvRow())
}
writer.close()
}
private def getTopTracks(history: Seq[HistoryRow], limit:Int = 10): Array[((String, String), Int)] = {
val top: Map[(String, String), Int] = Map()
for (row <- history;
key = (row.artist, row.name)
) {
top(key) = top.getOrElse(key, 0) + 1
}
top.toArray
.sortWith(_._2 > _._2)
.take(limit)
}
private def getSimilarTracks(track: (String,String), limit: Int = 10): Array[(String,String)] = {
val simList = Track.getSimilar(track._1, track._2, key)
(for (t <- simList;
r = (t.getArtist, t.getName)
) yield r).take(limit).toArray
}
} | sentenzo/sub-scrobbler | src/main/scala/subscrobbler/UserData.scala | Scala | mit | 2,753 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.http
import akka.event.Logging
import whisk.common.Logging
import whisk.common.TransactionId
/**
* This trait extends the BasicHttpService with a standard "ping" endpoint which
* responds to health queries, intended for monitoring.
*/
trait BasicRasService extends BasicHttpService {
override def routes(implicit transid: TransactionId) = ping
override def loglevelForRoute(route: String): Logging.LogLevel = {
if (route == "/ping") {
Logging.DebugLevel
} else {
super.loglevelForRoute(route)
}
}
val ping = path("ping") {
get { complete("pong") }
}
}
| paulcastro/openwhisk | common/scala/src/main/scala/whisk/http/BasicRasService.scala | Scala | apache-2.0 | 1,416 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.tree.impurity.ImpurityCalculator
import org.apache.spark.mllib.tree.model.{InformationGainStats => OldInformationGainStats,
Node => OldNode, Predict => OldPredict, ImpurityStats}
/**
* :: DeveloperApi ::
* Decision tree node interface.
*/
@DeveloperApi
sealed abstract class Node extends Serializable {
// TODO: Add aggregate stats (once available). This will happen after we move the DecisionTree
// code into the new API and deprecate the old API. SPARK-3727
/** Prediction a leaf node makes, or which an internal node would make if it were a leaf node */
def prediction: Double
/** Impurity measure at this node (for training data) */
def impurity: Double
/**
* Statistics aggregated from training data at this node, used to compute prediction, impurity,
* and probabilities.
* For classification, the array of class counts must be normalized to a probability distribution.
*/
private[ml] def impurityStats: ImpurityCalculator
/** Recursive prediction helper method */
private[ml] def predictImpl(features: Vector): LeafNode
/**
* Get the number of nodes in tree below this node, including leaf nodes.
* E.g., if this is a leaf, returns 0. If both children are leaves, returns 2.
*/
private[tree] def numDescendants: Int
/**
* Recursive print function.
* @param indentFactor The number of spaces to add to each level of indentation.
*/
private[tree] def subtreeToString(indentFactor: Int = 0): String
/**
* Get depth of tree from this node.
* E.g.: Depth 0 means this is a leaf node. Depth 1 means 1 internal and 2 leaf nodes.
*/
private[tree] def subtreeDepth: Int
/**
* Create a copy of this node in the old Node format, recursively creating child nodes as needed.
* @param id Node ID using old format IDs
*/
private[ml] def toOld(id: Int): OldNode
/**
* Trace down the tree, and return the largest feature index used in any split.
* @return Max feature index used in a split, or -1 if there are no splits (single leaf node).
*/
private[ml] def maxSplitFeatureIndex(): Int
}
private[ml] object Node {
/**
* Create a new Node from the old Node format, recursively creating child nodes as needed.
*/
def fromOld(oldNode: OldNode, categoricalFeatures: Map[Int, Int]): Node = {
if (oldNode.isLeaf) {
// TODO: Once the implementation has been moved to this API, then include sufficient
// statistics here.
new LeafNode(prediction = oldNode.predict.predict,
impurity = oldNode.impurity, impurityStats = null)
} else {
val gain = if (oldNode.stats.nonEmpty) {
oldNode.stats.get.gain
} else {
0.0
}
new InternalNode(prediction = oldNode.predict.predict, impurity = oldNode.impurity,
gain = gain, leftChild = fromOld(oldNode.leftNode.get, categoricalFeatures),
rightChild = fromOld(oldNode.rightNode.get, categoricalFeatures),
split = Split.fromOld(oldNode.split.get, categoricalFeatures), impurityStats = null)
}
}
}
/**
* :: DeveloperApi ::
* Decision tree leaf node.
* @param prediction Prediction this node makes
* @param impurity Impurity measure at this node (for training data)
*/
@DeveloperApi
final class LeafNode private[ml] (
override val prediction: Double,
override val impurity: Double,
override private[ml] val impurityStats: ImpurityCalculator) extends Node {
override def toString: String =
s"LeafNode(prediction = $prediction, impurity = $impurity)"
override private[ml] def predictImpl(features: Vector): LeafNode = this
override private[tree] def numDescendants: Int = 0
override private[tree] def subtreeToString(indentFactor: Int = 0): String = {
val prefix: String = " " * indentFactor
prefix + s"Predict: $prediction\\n"
}
override private[tree] def subtreeDepth: Int = 0
override private[ml] def toOld(id: Int): OldNode = {
new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)),
impurity, isLeaf = true, None, None, None, None)
}
override private[ml] def maxSplitFeatureIndex(): Int = -1
}
/**
* :: DeveloperApi ::
* Internal Decision Tree node.
* @param prediction Prediction this node would make if it were a leaf node
* @param impurity Impurity measure at this node (for training data)
* @param gain Information gain value.
* Values < 0 indicate missing values; this quirk will be removed with future updates.
* @param leftChild Left-hand child node
* @param rightChild Right-hand child node
* @param split Information about the test used to split to the left or right child.
*/
@DeveloperApi
final class InternalNode private[ml] (
override val prediction: Double,
override val impurity: Double,
val gain: Double,
val leftChild: Node,
val rightChild: Node,
val split: Split,
override private[ml] val impurityStats: ImpurityCalculator) extends Node {
override def toString: String = {
s"InternalNode(prediction = $prediction, impurity = $impurity, split = $split)"
}
override private[ml] def predictImpl(features: Vector): LeafNode = {
if (split.shouldGoLeft(features)) {
leftChild.predictImpl(features)
} else {
rightChild.predictImpl(features)
}
}
override private[tree] def numDescendants: Int = {
2 + leftChild.numDescendants + rightChild.numDescendants
}
override private[tree] def subtreeToString(indentFactor: Int = 0): String = {
val prefix: String = " " * indentFactor
prefix + s"If (${InternalNode.splitToString(split, left = true)})\\n" +
leftChild.subtreeToString(indentFactor + 1) +
prefix + s"Else (${InternalNode.splitToString(split, left = false)})\\n" +
rightChild.subtreeToString(indentFactor + 1)
}
override private[tree] def subtreeDepth: Int = {
1 + math.max(leftChild.subtreeDepth, rightChild.subtreeDepth)
}
override private[ml] def toOld(id: Int): OldNode = {
assert(id.toLong * 2 < Int.MaxValue, "Decision Tree could not be converted from new to old API"
+ " since the old API does not support deep trees.")
new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)), impurity,
isLeaf = false, Some(split.toOld), Some(leftChild.toOld(OldNode.leftChildIndex(id))),
Some(rightChild.toOld(OldNode.rightChildIndex(id))),
Some(new OldInformationGainStats(gain, impurity, leftChild.impurity, rightChild.impurity,
new OldPredict(leftChild.prediction, prob = 0.0),
new OldPredict(rightChild.prediction, prob = 0.0))))
}
override private[ml] def maxSplitFeatureIndex(): Int = {
math.max(split.featureIndex,
math.max(leftChild.maxSplitFeatureIndex(), rightChild.maxSplitFeatureIndex()))
}
}
private object InternalNode {
/**
* Helper method for [[Node.subtreeToString()]].
* @param split Split to print
* @param left Indicates whether this is the part of the split going to the left,
* or that going to the right.
*/
private def splitToString(split: Split, left: Boolean): String = {
val featureStr = s"feature ${split.featureIndex}"
split match {
case contSplit: ContinuousSplit =>
if (left) {
s"$featureStr <= ${contSplit.threshold}"
} else {
s"$featureStr > ${contSplit.threshold}"
}
case catSplit: CategoricalSplit =>
val categoriesStr = catSplit.leftCategories.mkString("{", ",", "}")
if (left) {
s"$featureStr in $categoriesStr"
} else {
s"$featureStr not in $categoriesStr"
}
}
}
}
/**
* Version of a node used in learning. This uses vars so that we can modify nodes as we split the
* tree by adding children, etc.
*
* For now, we use node IDs. These will be kept internal since we hope to remove node IDs
* in the future, or at least change the indexing (so that we can support much deeper trees).
*
* This node can either be:
* - a leaf node, with leftChild, rightChild, split set to null, or
* - an internal node, with all values set
*
* @param id We currently use the same indexing as the old implementation in
* [[org.apache.spark.mllib.tree.model.Node]], but this will change later.
* @param isLeaf Indicates whether this node will definitely be a leaf in the learned tree,
* so that we do not need to consider splitting it further.
* @param stats Impurity statistics for this node.
*/
private[tree] class LearningNode(
var id: Int,
var leftChild: Option[LearningNode],
var rightChild: Option[LearningNode],
var split: Option[Split],
var isLeaf: Boolean,
var stats: ImpurityStats) extends Serializable {
/**
* Convert this [[LearningNode]] to a regular [[Node]], and recurse on any children.
*/
def toNode: Node = {
if (leftChild.nonEmpty) {
assert(rightChild.nonEmpty && split.nonEmpty && stats != null,
"Unknown error during Decision Tree learning. Could not convert LearningNode to Node.")
new InternalNode(stats.impurityCalculator.predict, stats.impurity, stats.gain,
leftChild.get.toNode, rightChild.get.toNode, split.get, stats.impurityCalculator)
} else {
if (stats.valid) {
new LeafNode(stats.impurityCalculator.predict, stats.impurity,
stats.impurityCalculator)
} else {
// Here we want to keep same behavior with the old mllib.DecisionTreeModel
new LeafNode(stats.impurityCalculator.predict, -1.0, stats.impurityCalculator)
}
}
}
}
private[tree] object LearningNode {
/** Create a node with some of its fields set. */
def apply(
id: Int,
isLeaf: Boolean,
stats: ImpurityStats): LearningNode = {
new LearningNode(id, None, None, None, false, stats)
}
/** Create an empty node with the given node index. Values must be set later on. */
def emptyNode(nodeIndex: Int): LearningNode = {
new LearningNode(nodeIndex, None, None, None, false, null)
}
// The below indexing methods were copied from spark.mllib.tree.model.Node
/**
* Return the index of the left child of this node.
*/
def leftChildIndex(nodeIndex: Int): Int = nodeIndex << 1
/**
* Return the index of the right child of this node.
*/
def rightChildIndex(nodeIndex: Int): Int = (nodeIndex << 1) + 1
/**
* Get the parent index of the given node, or 0 if it is the root.
*/
def parentIndex(nodeIndex: Int): Int = nodeIndex >> 1
/**
* Return the level of a tree which the given node is in.
*/
def indexToLevel(nodeIndex: Int): Int = if (nodeIndex == 0) {
throw new IllegalArgumentException(s"0 is not a valid node index.")
} else {
java.lang.Integer.numberOfTrailingZeros(java.lang.Integer.highestOneBit(nodeIndex))
}
/**
* Returns true if this is a left child.
* Note: Returns false for the root.
*/
def isLeftChild(nodeIndex: Int): Boolean = nodeIndex > 1 && nodeIndex % 2 == 0
/**
* Return the maximum number of nodes which can be in the given level of the tree.
* @param level Level of tree (0 = root).
*/
def maxNodesInLevel(level: Int): Int = 1 << level
/**
* Return the index of the first node in the given level.
* @param level Level of tree (0 = root).
*/
def startIndexInLevel(level: Int): Int = 1 << level
/**
* Traces down from a root node to get the node with the given node index.
* This assumes the node exists.
*/
def getNode(nodeIndex: Int, rootNode: LearningNode): LearningNode = {
var tmpNode: LearningNode = rootNode
var levelsToGo = indexToLevel(nodeIndex)
while (levelsToGo > 0) {
if ((nodeIndex & (1 << levelsToGo - 1)) == 0) {
tmpNode = tmpNode.leftChild.asInstanceOf[LearningNode]
} else {
tmpNode = tmpNode.rightChild.asInstanceOf[LearningNode]
}
levelsToGo -= 1
}
tmpNode
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala | Scala | apache-2.0 | 12,912 |
package com.kelveden.restdriverscala
import org.scalatest.{FunSpec, Matchers}
import com.github.restdriver.serverdriver.RestServerDriver._
import PortFinder._
class RestDrivenTest extends FunSpec with Matchers with RestDriven with RestDrivenMatchers {
override val restDriverPort = getFreePort
val baseUrl = s"http://localhost:$restDriverPort"
describe("expected request builder") {
it("can build a GET request") {
expect(onGetTo("/my/url"), respondWith(666))
val response = get(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can build a GET request with body") {
expect(
onGetTo("/my/url", entity = ("mycontent", "text/plain")),
respondWith(666)
)
val response = get(s"$baseUrl/my/url", body("mycontent", "text/plain"))
response should haveStatus(666)
}
it("can build a GET request with headers") {
expect(
onGetTo("/my/url", headers = Map("myheader" -> "myvalue")),
respondWith(666)
)
val response = get(s"$baseUrl/my/url", header("myheader", "myvalue"))
response should haveStatus(666)
}
it("can build a GET request with params") {
expect(
onGetTo("/my/url", params = Map("myparam" -> "myvalue")),
respondWith(666)
)
val response = get(s"$baseUrl/my/url?myparam=myvalue")
response should haveStatus(666)
}
it("can build a PUT request") {
expect(onPutTo("/my/url"), respondWith(666))
val response = put(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can build a PUT request with body") {
expect(
onPutTo("/my/url", entity = ("mycontent", "text/plain")),
respondWith(666)
)
val response = put(s"$baseUrl/my/url", body("mycontent", "text/plain"))
response should haveStatus(666)
}
it("can build a PUT request with headers") {
expect(
onPutTo("/my/url", headers = Map("myheader" -> "myvalue")),
respondWith(666)
)
val response = put(s"$baseUrl/my/url", header("myheader", "myvalue"))
response should haveStatus(666)
}
it("can build a PUT request with params") {
expect(
onPutTo("/my/url", params = Map("myparam" -> "myvalue")),
respondWith(666)
)
val response = put(s"$baseUrl/my/url?myparam=myvalue")
response should haveStatus(666)
}
it("can build a POST request") {
expect(onPostTo("/my/url"), respondWith(666))
val response = post(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can build a POST request with body") {
expect(
onPostTo("/my/url", entity = ("mycontent", "text/plain")),
respondWith(666)
)
val response = post(s"$baseUrl/my/url", body("mycontent", "text/plain"))
response should haveStatus(666)
}
it("can build a POST request with headers") {
expect(
onPostTo("/my/url", headers = Map("myheader" -> "myvalue")),
respondWith(666)
)
val response = post(s"$baseUrl/my/url", header("myheader", "myvalue"))
response should haveStatus(666)
}
it("can build a POST request with params") {
expect(
onPostTo("/my/url", params = Map("myparam" -> "myvalue")),
respondWith(666)
)
val response = post(s"$baseUrl/my/url?myparam=myvalue")
response should haveStatus(666)
}
it("can build a DELETE request") {
expect(onDeleteTo("/my/url"), respondWith(666))
val response = delete(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can build a DELETE request with body") {
expect(
onDeleteTo("/my/url", entity = ("mycontent", "text/plain")),
respondWith(666)
)
val response = delete(s"$baseUrl/my/url", body("mycontent", "text/plain"))
response should haveStatus(666)
}
it("can build a DELETE request with headers") {
expect(
onDeleteTo("/my/url", headers = Map("myheader" -> "myvalue")),
respondWith(666)
)
val response = delete(s"$baseUrl/my/url", header("myheader", "myvalue"))
response should haveStatus(666)
}
it("can build a DELETE request with params") {
expect(
onDeleteTo("/my/url", params = Map("myparam" -> "myvalue")),
respondWith(666)
)
val response = delete(s"$baseUrl/my/url?myparam=myvalue")
response should haveStatus(666)
}
it("can build a request with ad-hoc method") {
expect(onRequestTo("OPTIONS", "/my/url"), respondWith(666))
val response = options(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can build an ad-hoc request with body") {
expect(
onRequestTo("PUT", "/my/url", entity = ("mycontent", "text/plain")),
respondWith(666)
)
val response = put(s"$baseUrl/my/url", body("mycontent", "text/plain"))
response should haveStatus(666)
}
it("can build an ad-hoc request with headers") {
expect(
onRequestTo("GET", "/my/url", headers = Map("myheader" -> "myvalue")),
respondWith(666)
)
val response = get(s"$baseUrl/my/url", header("myheader", "myvalue"))
response should haveStatus(666)
}
it("can build an ad-hoc request with params") {
expect(
onRequestTo("GET", "/my/url", params = Map("myparam" -> "myvalue")),
respondWith(666)
)
val response = get(s"$baseUrl/my/url?myparam=myvalue")
response should haveStatus(666)
}
}
describe("expected response builder") {
it("can be used to specify the status code to respond with") {
expect(onGetTo("/my/url"), respondWith(666))
val response = get(s"$baseUrl/my/url")
response should haveStatus(666)
}
it("can be used to specify the entity to respond with") {
expect(
onGetTo("/my/url"),
respondWith(666, entity = ("mycontent", "text/plain"))
)
val response = get(s"$baseUrl/my/url")
response should haveBodyContent("mycontent")
}
it("can be used to specify the headers to include in the response") {
expect(
onGetTo("/my/url"),
respondWith(666, headers = Map("myheader" -> "myvalue"))
)
val response = get(s"$baseUrl/my/url")
response should haveHeader("myheader", "myvalue")
}
}
}
| kelveden/rest-driver-scala | src/test/scala/com/kelveden/restdriverscala/RestDrivenTest.scala | Scala | mit | 6,454 |
package controllers.api
import play.api.libs.json._
import playground.json.Implicits._
import models._
import models.requests._
object JsonImplicits {
implicit val userWrites = Json.writes[User]
implicit val userCreateReads = Json.reads[UserCreate]
implicit val userUpdateReads = Json.reads[UserUpdate]
implicit val adminWrites = Json.writes[Admin]
implicit val adminCreateReads = Json.reads[AdminCreate]
implicit val adminUpdateReads = Json.reads[AdminUpdate]
}
| ybr/PlayBootstrap | app/controllers/api/JsonImplicits.scala | Scala | mit | 480 |
// Author: Olivier Chafik (http://ochafik.com)
package scalaxy.fastcaseclasses
import scala.tools.nsc.Global
import scala.tools.nsc.Phase
import scala.tools.nsc.plugins.PluginComponent
import scala.tools.nsc.symtab.Flags
class UntypedFastCaseClassesComponent(val global: Global)
extends PluginComponent
with UntypedFastCaseClassesTransforms {
import global._
override val phaseName = "scalaxy-fastcaseclasses"
override val runsAfter = List("parser")
override val runsBefore = List("namer")
override def info(pos: Position, msg: String) = reporter.info(pos, msg, force = verbose)
def newPhase(prev: Phase): StdPhase = new StdPhase(prev) {
def apply(unit: CompilationUnit) {
unit.body = transformUntyped(unit.body, unit.fresh.newName(_))
}
}
}
| nativelibs4java/Scalaxy | Obsolete/FastCaseClasses/src/main/scala/scalaxy/UntypedFastCaseClassesComponent.scala | Scala | bsd-3-clause | 783 |
import collection.mutable.ArrayBuffer.{newBuilder => f}
println(/* file: this */ f)
def f = {}
| ilinum/intellij-scala | testdata/resolve2/import/clash/Function3.scala | Scala | apache-2.0 | 97 |
package org.scalamu.core
package detection
import java.nio.file.Path
import org.scalamu.core.api.ClassInfo
class ClassFileFinder extends CollectingFileFinder[ClassInfo] {
override def predicate: (Path) => Boolean = _.isClassFile
override def fromPath: (Path) => Option[ClassInfo] = ClassInfo.loadFromPath
}
| sugakandrey/scalamu | core/src/main/scala/org/scalamu/core/detection/ClassFileFinder.scala | Scala | gpl-3.0 | 323 |
package fs2
package interop
import _root_.scalaz.{ Equal, Monoid, Semigroup }
import _root_.scalaz.std.map._
import _root_.scalaz.std.vector._
import fs2.util.{ Catchable, Free }
package object scalaz extends Instances with TaskAsyncInstances {
object reverse extends ReverseInstances
implicit class StreamScalazOps[F[_], A](val self: Stream[F, A]) extends AnyVal {
def changesEq(implicit eq: Equal[A]): Stream[F, A] =
self.filterWithPrevious((x, y) => !eq.equal(x, y))
def changesByEq[B](f: A => B)(implicit eq: Equal[B]): Stream[F, A] =
self.filterWithPrevious((x, y) => !eq.equal(f(x), f(y)))
def foldMap[B](f: A => B)(implicit M: Monoid[B]): Stream[F, B] =
self.fold(M.zero)((b, a) => M.append(b, f(a)))
def foldMonoid(implicit M: Monoid[A]): Stream[F, A] =
self.fold(M.zero)(M.append(_, _))
def foldSemigroup(implicit S: Semigroup[A]): Stream[F, A] =
self.reduce(S.append(_, _))
def runFoldMapFree[B](f: A => B)(implicit M: Monoid[B]): Free[F, B] =
self.runFoldFree(M.zero)((b, a) => M.append(b, f(a)))
def runGroupByFoldMapFree[K, B: Monoid](f: A => K)(g: A => B): Free[F, Map[K, B]] =
runFoldMapFree(a => Map(f(a) -> g(a)))
def runGroupByFoldMonoidFree[K](f: A => K)(implicit M: Monoid[A]): Free[F, Map[K, A]] =
runFoldMapFree(a => Map(f(a) -> a))
def runGroupByFree[K](f: A => K)(implicit M: Monoid[A]): Free[F, Map[K, Vector[A]]] =
runGroupByFoldMapFree(f)(a => Vector(a))
def runFoldMap[B](f: A => B)(implicit F: Catchable[F], M: Monoid[B]): F[B] =
runFoldMapFree(f).run
def runGroupByFoldMap[K, B: Monoid](f: A => K)(g: A => B)(implicit F: Catchable[F]): F[Map[K, B]] =
runGroupByFoldMapFree(f)(g).run
def runGroupByFoldMonoid[K](f: A => K)(implicit F: Catchable[F], M: Monoid[A]): F[Map[K, A]] =
runGroupByFoldMonoidFree(f).run
def runGroupBy[K](f: A => K)(implicit F: Catchable[F], M: Monoid[A]): F[Map[K, Vector[A]]] =
runGroupByFree(f).run
}
}
| functional-streams-for-scala/fs2-scalaz | src/main/scala/fs2/interop/scalaz/scalaz.scala | Scala | mit | 2,017 |
package org.opensplice.mobile.dev.dadds
import org.omg.dds.sub.{ DataReader => DDSDataReader }
import org.omg.dds.sub.DataReader.{ Selector => DDSSelector }
import org.omg.dds.sub.InstanceState
import org.omg.dds.sub.SampleState
import org.omg.dds.sub.ViewState
object Selector {
import org.omg.dds.sub.{ DataReader => DDSDataReader }
import org.omg.dds.sub.SampleState
import org.omg.dds.sub.ViewState
import org.omg.dds.sub.InstanceState
import org.omg.dds.sub.DataReader.{ Selector => DDSSelector }
def apply[T]( instanceStates: List[InstanceState],
viewStates: List[ViewState],
sampleStates: List[SampleState],
reader: DDSDataReader[T] ): DDSSelector[T] = {
val selector = reader.select()
val dataState = reader.getParent().createDataState()
if ( instanceStates == null || instanceStates.isEmpty ) {
dataState.withAnyInstanceState()
} else {
instanceStates.foreach( state => dataState.`with`( state ) )
}
if ( viewStates == null || viewStates.isEmpty ) {
dataState.withAnyViewState()
} else {
viewStates.foreach( state => dataState.`with`( state ) )
}
if ( sampleStates == null || sampleStates.isEmpty ) {
dataState.withAnySampleState()
} else {
sampleStates.foreach( state => dataState.`with`( state ) )
}
selector.dataState( dataState )
}
}
| levitha/levitha | src/main/scala/org/opensplice/mobile/dev/dadds/Selector.scala | Scala | apache-2.0 | 1,402 |
package dsmoq.taskServer
class BucketNotFoundException(message: String) extends RuntimeException(message) | nkawa/dsmoq | server/taskServer/src/main/scala/dsmoq/taskServer/BucketNotFoundException.scala | Scala | apache-2.0 | 106 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.audit
import java.time.ZonedDateTime
import org.apache.accumulo.core.client.Connector
import org.apache.accumulo.core.security.Authorizations
import org.locationtech.geomesa.index.audit.QueryEvent
import org.locationtech.geomesa.security.AuthorizationsProvider
import org.locationtech.geomesa.utils.audit._
import scala.reflect.ClassTag
class AccumuloAuditService(connector: Connector,
authProvider: AuthorizationsProvider,
val table: String,
write: Boolean) extends AuditWriter with AuditReader with AuditLogger {
private val writer = if (write) { new AccumuloEventWriter(connector, table) } else { null }
private val reader = new AccumuloEventReader(connector, table)
override def writeEvent[T <: AuditedEvent](event: T)(implicit ct: ClassTag[T]): Unit = {
if (writer != null) {
writer.queueStat(event)(transform(ct.runtimeClass.asInstanceOf[Class[T]]))
}
super.writeEvent(event)
}
override def getEvents[T <: AuditedEvent](typeName: String,
dates: (ZonedDateTime, ZonedDateTime))
(implicit ct: ClassTag[T]): Iterator[T] = {
import scala.collection.JavaConverters._
val auths = new Authorizations(authProvider.getAuthorizations.asScala: _*)
val iter = reader.query(typeName, dates, auths)(transform(ct.runtimeClass.asInstanceOf[Class[T]]))
iter.asInstanceOf[Iterator[T]]
}
override def close(): Unit = if (writer != null) { writer.close() }
// note: only query audit events are currently supported
private def transform[T <: AuditedEvent](clas: Class[T]): AccumuloEventTransform[T] = {
val transform = clas match {
case c if classOf[QueryEvent].isAssignableFrom(c) => AccumuloQueryEventTransform
case c if classOf[SerializedQueryEvent].isAssignableFrom(c) => SerializedQueryEventTransform
case _ => throw new NotImplementedError(s"Event of type '${clas.getName}' is not supported")
}
transform.asInstanceOf[AccumuloEventTransform[T]]
}
}
object AccumuloAuditService {
val StoreType = "accumulo-vector"
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/audit/AccumuloAuditService.scala | Scala | apache-2.0 | 2,678 |
package no.skytteren.elasticala.mapping
import scala.util.matching.Regex
/**
* @author skytteren
*/
trait Analyzer
abstract class DefinedAnalyzer(val name: String) extends Analyzer
case object StandardAnalyzer extends DefinedAnalyzer("standard")
case object SimpleAnalyzer extends DefinedAnalyzer("simple")
case object WhitespaceAnalyzer extends DefinedAnalyzer("whitespace")
case object StopAnalyzer extends DefinedAnalyzer("stop")
case object KeywordAnalyzer extends DefinedAnalyzer("keyword")
case object PatternAnalyzer extends DefinedAnalyzer("pattern")
case class LanguageAnalyzers(language: String) extends DefinedAnalyzer(language)
case object SnowballAnalyzer extends DefinedAnalyzer("snowball")
case class CustomAnalyzer(override val name: String) extends DefinedAnalyzer(name)
/*
trait AnalyzerDefinition{}
// Tokenizers?
//Standard Tokenizer with the Standard Token Filter, Lower Case Token Filter, and Stop Token Filter.
case class StandardAnalyzerDefinition(stopwords: Seq[String] = Nil, maxTokenLength: Int = 255) extends AnalyzerDefinition
//Lower Case Tokenizer.
case object SimpleAnalyzerDefinition extends AnalyzerDefinition
// Whitespace Tokenizer.
case object WhitespaceAnalyzerDefinition extends AnalyzerDefinition
// Lower Case Tokenizer, with Stop Token Filter.
case class StopAnalyzerDefinition(stopwords: Seq[String] = Nil, stopwordsPath: String = "") extends AnalyzerDefinition
// it might make more sense to simply mark the field as not_analyzed
case object KeywordAnalyzerDefinition extends AnalyzerDefinition
case class PatternAnalyzerDefinition(lowercase: Boolean = true, tokenSeparatorPattern: Regex = "\\\\W".r, flagsForRegex: String, stopwords: List[String] = Nil) extends AnalyzerDefinition
//case class LanguageAnalyzers extends Analyzer
case object SnowballAnalyzerDefinition extends AnalyzerDefinition
//case class CustomAnalyzerDefinition() extends AnalyzerDefinition
*/ | skytteren/elasticala | src/main/scala/no/skytteren/elasticala/mapping/Analyzers.scala | Scala | apache-2.0 | 1,915 |
package db
import org.scalatest.{FunSpec, Matchers}
import org.junit.Assert._
import java.util.UUID
class OrganizationDomainsDaoSpec extends FunSpec with Matchers with util.TestApplication {
it("create") {
val domainName = UUID.randomUUID.toString + ".org"
val org = Util.createOrganization()
val domain = organizationDomainsDao.create(Util.createdBy, org, domainName)
domain.domain should be(domainName)
organizationDomainsDao.findAll(guid = Some(domain.guid)).map(_.guid) should be(Seq(domain.guid))
organizationDomainsDao.softDelete(Util.createdBy, domain)
organizationDomainsDao.findAll(guid = Some(domain.guid)) should be(Seq.empty)
}
it("findAll") {
val domainName = UUID.randomUUID.toString + ".org"
val org = Util.createOrganization()
val domain = organizationDomainsDao.create(Util.createdBy, org, domainName)
organizationDomainsDao.findAll(organizationGuid = Some(org.guid)).map(_.guid) should be(Seq(domain.guid))
organizationDomainsDao.findAll(organizationGuid = Some(UUID.randomUUID)).map(_.guid) should be(Seq.empty)
}
}
| Seanstoppable/apidoc | api/test/db/OrganizationDomainDaoSpec.scala | Scala | mit | 1,099 |
package com.cloudray.scalapress.search.widget
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{ModelAttribute, RequestMethod, RequestMapping}
import scala.Array
import com.cloudray.scalapress.widgets.{WidgetDao, Widget}
import org.springframework.ui.ModelMap
import com.cloudray.scalapress.widgets.controller.WidgetEditController
import com.cloudray.scalapress.theme.MarkupDao
import com.cloudray.scalapress.item.controller.admin.MarkupPopulator
import org.springframework.beans.factory.annotation.Autowired
/** @author Stephen Samuel */
@Controller
@RequestMapping(Array("backoffice/search/widget/results/{id}"))
@Autowired
class SearchResultsWidgetController(val markupDao: MarkupDao, widgetDao: WidgetDao)
extends WidgetEditController(widgetDao: WidgetDao) with MarkupPopulator {
@RequestMapping(method = Array(RequestMethod.GET), produces = Array("text/html"))
override def edit(@ModelAttribute("widget") w: Widget, model: ModelMap) = "admin/search/widget/results.vm"
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/search/widget/SearchResultsWidgetController.scala | Scala | apache-2.0 | 1,033 |
/*
* =========================================================================================
* Copyright © 2015 the khronus project <https://github.com/hotels-tech/khronus>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package com.searchlight.khronus.service
import akka.actor._
import akka.testkit._
import com.searchlight.khronus.service.HandShakeProtocol.Register
import com.typesafe.config.ConfigFactory
import org.scalatest._
import spray.http.HttpResponse
import spray.http.StatusCodes
import spray.httpx.RequestBuilding
import spray.routing.HttpServiceActor
class KhronusHandlerSpec extends TestKitBase with ImplicitSender with FunSpecLike with Matchers with RequestBuilding with BeforeAndAfterAll {
implicit lazy val system: ActorSystem = ActorSystem("khronus-handler-spec", ConfigFactory.parseString(
"""
|akka {
| loglevel = INFO
| stdout-loglevel = DEBUG
| event-handlers = ["akka.event.Logging$DefaultLogger"]
| actor {
| provider = "akka.actor.LocalActorRefProvider"
| }
|}
|
|khronus {
| master {
| tick-expression = "0/1 * * * * ?"
| discovery-start-delay = 1 second
| discovery-interval = 2 seconds
| }
| internal-metrics {
| enabled = false
| }
|}
""".stripMargin))
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
it("returns 404 Not Found when nothing has been registered") {
new Fixture {
master ! Get("/endpoint")
expectMsgType[HttpResponse].status shouldBe StatusCodes.NotFound
}
}
it("can access an endpoint after registering it") {
new Fixture {
val endpointActor = TestActorRef(new DummyEndpoint)
master ! Register("endpoint", endpointActor)
master ! Get("/endpoint")
val response = expectMsgType[HttpResponse]
response.status shouldBe StatusCodes.OK
response.entity.data.asString shouldBe "i'm alive"
}
}
it("can register multiple endpoints") {
new Fixture {
val endpointActor1 = TestActorRef(new DummyEndpoint("1"))
master ! Register("endpoint1", endpointActor1)
val endpointActor2 = TestActorRef(new DummyEndpoint("2"))
master ! Register("endpoint2", endpointActor2)
Seq(
"/endpoint1" → "1",
"/endpoint2" → "2") foreach {
case (endpoint, expected) ⇒
master ! Get(endpoint)
val response = expectMsgType[HttpResponse]
response.status shouldBe StatusCodes.OK
response.entity.data.asString shouldBe expected
}
}
}
trait Fixture {
val master = TestActorRef(new KhronusHandler)
}
}
class DummyEndpoint(response: String = "i'm alive") extends HttpServiceActor {
def receive = runRoute {
complete {
response
}
}
}
| despegar/khronus | khronus-core/src/test/scala/com/searchlight/khronus/service/KhronusHandlerSpec.scala | Scala | apache-2.0 | 3,460 |
package notebook
import org.junit.runner.RunWith
import org.specs2.mutable._
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class JobTrackingTests extends Specification {
"encodes jobGroup" in {
JobTracking.jobGroupId(cellId = "abc") must beEqualTo("cell-abc")
}
"decodes cellId" in {
JobTracking.toCellId(Option("cell-abc")) must beEqualTo(Some("abc"))
}
"encodes jobDescription" in {
// must remove all special chars, especially " and \\n, so job description can be enclosed inside " ".
val code = """val x = sqlContext.sql("select * from users")
|.collect()
|.map { x: Row => s"$x\\"" }""".stripMargin
val expected = "run-1234567: val x = sqlContext.sql('select * from users').collect().map { x: Row = s'x'' }"
JobTracking.jobDescription(
cellCode = code,
runId = 1234567) must beEqualTo(expected)
}
"decodes runId" in {
JobTracking.getCellRunId(Option("run-1234567: val abc=rdd map x")) should beEqualTo(Some(1234567L))
}
}
| radek1st/spark-notebook | test/notebook/JobTrackingTests.scala | Scala | apache-2.0 | 1,024 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert
import java.util.ServiceLoader
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypeLoader
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.JavaConversions._
/**
* Simplified API to build SimpleFeatureType converters
*/
object SimpleFeatureConverters extends LazyLogging {
private[convert] val providers = {
val pList = ServiceLoader.load(classOf[SimpleFeatureConverterFactory[_]]).toList
logger.debug(s"Found ${pList.size} SPI providers for ${classOf[SimpleFeatureConverterFactory[_]].getName}" +
s": ${pList.map(_.getClass.getName).mkString(", ")}")
pList
}
def build[I](typeName: String, converterName: String): SimpleFeatureConverter[I] = {
val sft = SimpleFeatureTypeLoader.sftForName(typeName)
.getOrElse(throw new IllegalArgumentException(s"Unable to load SFT for typeName $typeName"))
build[I](sft, converterName)
}
def build[I](sft: SimpleFeatureType, converterName: String): SimpleFeatureConverter[I] =
ConverterConfigLoader.configForName(converterName).map(build[I](sft, _))
.getOrElse(throw new IllegalArgumentException(s"Unable to find converter config for converterName $converterName"))
def build[I](sft: SimpleFeatureType, converterConf: Config): SimpleFeatureConverter[I] = {
providers
.find(_.canProcess(converterConf))
.map(_.buildConverter(sft, converterConf).asInstanceOf[SimpleFeatureConverter[I]])
.getOrElse(throw new IllegalArgumentException(s"Cannot find factory for ${sft.getTypeName}"))
}
}
| ronq/geomesa | geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert/SimpleFeatureConverters.scala | Scala | apache-2.0 | 2,132 |
package org.jetbrains.plugins.scala.worksheet
import com.intellij.ide.scratch.ScratchFileCreationHelper
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.project.Project
import org.apache.commons.lang3.StringUtils
import org.jetbrains.plugins.scala.{Scala3Language, ScalaLanguage}
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
import org.jetbrains.plugins.scala.worksheet.ScalaScratchFileCreationHelper.worksheetScratchFileType
final class ScalaScratchFileCreationHelper extends ScratchFileCreationHelper {
override def prepareText(
project: Project,
context: ScratchFileCreationHelper.Context,
dataContext: DataContext
): Boolean = {
if (ScalaProjectSettings.getInstance(project).isTreatScratchFilesAsWorksheet) {
// ATTENTION: DIRTY HACK USED: modifying of parameter state can be unexpected to the caller
// TODO: create a proper, clean API for this in IDEA platform
context.language match {
// this helper is also called for any dialect of Scala (e.g. SbtLanguage)
// but we want to create treat only actual Scala strach files as worksheets SCL-16417
case ScalaLanguage.INSTANCE | Scala3Language.INSTANCE =>
val fileType = worksheetScratchFileType
context.fileExtension = fileType.getDefaultExtension
context.language = fileType.getLanguage
true
case _ =>
false
}
} else if (StringUtils.isBlank(context.text)) {
// TODO (minor): Running of scala scratch files in non-worksheet mode doesn't work now
// (and didn't work before)
//val caretMarker = "CARET_MARKER"
//val textOneLine = s"object Scratch {def main(args: Array[String]): Unit = {$caretMarker}}"
//val text = ScratchFileCreationHelper.reformat(project, context.language, textOneLine)
//context.caretOffset = text.indexOf(caretMarker)
//context.text = text.replace(caretMarker, "")
//true
false
} else {
super.prepareText(project, context, dataContext)
}
}
override def beforeCreate(project: Project, context: ScratchFileCreationHelper.Context): Unit =
super.beforeCreate(project, context)
}
object ScalaScratchFileCreationHelper {
val worksheetScratchFileType = WorksheetFileType
} | JetBrains/intellij-scala | scala/worksheet/src/org/jetbrains/plugins/scala/worksheet/ScalaScratchFileCreationHelper.scala | Scala | apache-2.0 | 2,307 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner
import org.apache.flink.annotation.VisibleForTesting
import org.apache.flink.api.dag.Transformation
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.api.internal.SelectTableSink
import org.apache.flink.table.calcite._
import org.apache.flink.table.catalog.{CatalogManager, CatalogManagerCalciteSchema, CatalogTable, ConnectorCatalogTable, _}
import org.apache.flink.table.delegation.{Executor, Parser, Planner}
import org.apache.flink.table.executor.StreamExecutor
import org.apache.flink.table.explain.PlanJsonParser
import org.apache.flink.table.expressions.{ExpressionBridge, PlannerExpression, PlannerExpressionConverter, PlannerTypeInferenceUtilImpl}
import org.apache.flink.table.factories.{TableFactoryUtil, TableSinkFactoryContextImpl}
import org.apache.flink.table.operations.OutputConversionModifyOperation.UpdateMode
import org.apache.flink.table.operations._
import org.apache.flink.table.plan.StreamOptimizer
import org.apache.flink.table.plan.nodes.LogicalSink
import org.apache.flink.table.plan.nodes.datastream.DataStreamRel
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.table.sinks._
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.table.util.{DummyStreamExecutionEnvironment, JavaScalaConversionUtil}
import org.apache.calcite.jdbc.CalciteSchema
import org.apache.calcite.jdbc.CalciteSchemaBuilder.asRootSchema
import org.apache.calcite.plan.RelOptUtil
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.logical.LogicalTableModify
import _root_.java.util
import _root_.java.util.Objects
import _root_.java.util.function.{Supplier => JSupplier}
import _root_.scala.collection.JavaConverters._
/**
* Implementation of [[Planner]] for legacy Flink planner. It supports only streaming use cases.
* (The new [[org.apache.flink.table.sources.InputFormatTableSource]] should work, but will be
* handled as streaming sources, and no batch specific optimizations will be applied).
*
* @param executor instance of [[StreamExecutor]], needed to extract
* [[StreamExecutionEnvironment]] for
* [[org.apache.flink.table.sources.StreamTableSource.getDataStream]]
* @param config mutable configuration passed from corresponding [[TableEnvironment]]
* @param functionCatalog catalog of functions
* @param catalogManager manager of catalog meta objects such as tables, views, databases etc.
*/
class StreamPlanner(
executor: Executor,
config: TableConfig,
functionCatalog: FunctionCatalog,
catalogManager: CatalogManager)
extends Planner {
// temporary utility until we don't use planner expressions anymore
functionCatalog.setPlannerTypeInferenceUtil(PlannerTypeInferenceUtilImpl.INSTANCE)
private val internalSchema: CalciteSchema =
asRootSchema(new CatalogManagerCalciteSchema(catalogManager, config, true))
// temporary bridge between API and planner
private val expressionBridge: ExpressionBridge[PlannerExpression] =
new ExpressionBridge[PlannerExpression](PlannerExpressionConverter.INSTANCE)
private val planningConfigurationBuilder: PlanningConfigurationBuilder =
new PlanningConfigurationBuilder(
config,
functionCatalog,
internalSchema,
expressionBridge)
@VisibleForTesting
private[flink] val optimizer: StreamOptimizer = new StreamOptimizer(
() => config.getPlannerConfig
.unwrap(classOf[CalciteConfig])
.orElse(CalciteConfig.DEFAULT),
planningConfigurationBuilder)
private val parser: Parser = new ParserImpl(
catalogManager,
// we do not cache the parser in order to use the most up to
// date configuration. Users might change parser configuration in TableConfig in between
// parsing statements
new JSupplier[FlinkPlannerImpl] {
override def get(): FlinkPlannerImpl = getFlinkPlanner
},
new JSupplier[CalciteParser] {
override def get(): CalciteParser = planningConfigurationBuilder.createCalciteParser()
}
)
override def getParser: Parser = parser
override def translate(
tableOperations: util.List[ModifyOperation]): util.List[Transformation[_]] = {
val planner = createDummyPlanner()
tableOperations.asScala.map { operation =>
val (ast, updatesAsRetraction) = translateToRel(operation)
val optimizedPlan = optimizer.optimize(ast, updatesAsRetraction, getRelBuilder)
val dataStream = translateToCRow(planner, optimizedPlan)
dataStream.getTransformation.asInstanceOf[Transformation[_]]
}.filter(Objects.nonNull).asJava
}
override def createSelectTableSink(tableSchema: TableSchema): SelectTableSink = {
new StreamSelectTableSink(tableSchema)
}
override def explain(operations: util.List[Operation], extraDetails: ExplainDetail*): String = {
require(operations.asScala.nonEmpty, "operations should not be empty")
val astWithUpdatesAsRetractionTuples = operations.asScala.map {
case queryOperation: QueryOperation =>
val relNode = getRelBuilder.tableOperation(queryOperation).build()
relNode match {
// SQL: explain plan for insert into xx
case modify: LogicalTableModify =>
// convert LogicalTableModify to CatalogSinkModifyOperation
val qualifiedName = modify.getTable.getQualifiedName
require(qualifiedName.size() == 3, "the length of qualified name should be 3.")
val modifyOperation = new CatalogSinkModifyOperation(
ObjectIdentifier.of(qualifiedName.get(0), qualifiedName.get(1), qualifiedName.get(2)),
new PlannerQueryOperation(modify.getInput)
)
translateToRel(modifyOperation)
case _ =>
(relNode, false)
}
case modifyOperation: ModifyOperation =>
translateToRel(modifyOperation)
case o => throw new TableException(s"Unsupported operation: ${o.getClass.getCanonicalName}")
}
val optimizedNodes = astWithUpdatesAsRetractionTuples.map {
case (ast, updatesAsRetraction) =>
optimizer.optimize(ast, updatesAsRetraction, getRelBuilder)
}
val planner = createDummyPlanner()
val dataStreams = optimizedNodes.map(p => translateToCRow(planner, p))
val astPlan = astWithUpdatesAsRetractionTuples.map {
p => RelOptUtil.toString(p._1)
}.mkString(System.lineSeparator)
val optimizedPlan = optimizedNodes.map(RelOptUtil.toString).mkString(System.lineSeparator)
val env = dataStreams.head.getExecutionEnvironment
val jsonSqlPlan = env.getExecutionPlan
val sqlPlan = PlanJsonParser.getSqlExecutionPlan(jsonSqlPlan, false)
s"== Abstract Syntax Tree ==" +
System.lineSeparator +
s"$astPlan" +
System.lineSeparator +
s"== Optimized Logical Plan ==" +
System.lineSeparator +
s"$optimizedPlan" +
System.lineSeparator +
s"== Physical Execution Plan ==" +
System.lineSeparator +
s"$sqlPlan"
}
override def getCompletionHints(
statement: String,
position: Int)
: Array[String] = {
val planner = getFlinkPlanner
planner.getCompletionHints(statement, position)
}
private def translateToRel(modifyOperation: ModifyOperation): (RelNode, Boolean) = {
modifyOperation match {
case s: UnregisteredSinkModifyOperation[_] =>
writeToSink(s.getChild, s.getSink, "UnregisteredSink")
case catalogSink: CatalogSinkModifyOperation =>
getTableSink(catalogSink.getTableIdentifier)
.map(sink => {
TableSinkUtils.validateSink(
catalogSink.getStaticPartitions,
catalogSink.getChild,
catalogSink.getTableIdentifier,
sink)
// set static partitions if it is a partitioned sink
sink match {
case partitionableSink: PartitionableTableSink =>
partitionableSink.setStaticPartition(catalogSink.getStaticPartitions)
case _ =>
}
// set whether to overwrite if it's an OverwritableTableSink
sink match {
case overwritableTableSink: OverwritableTableSink =>
overwritableTableSink.setOverwrite(catalogSink.isOverwrite)
case _ =>
assert(!catalogSink.isOverwrite, "INSERT OVERWRITE requires " +
s"${classOf[OverwritableTableSink].getSimpleName} but actually got " +
sink.getClass.getName)
}
writeToSink(
catalogSink.getChild,
sink,
catalogSink.getTableIdentifier.asSummaryString())
}) match {
case Some(t) => t
case None =>
throw new TableException(s"Sink ${catalogSink.getTableIdentifier} does not exists")
}
case outputConversion: OutputConversionModifyOperation =>
val (isRetract, withChangeFlag) = outputConversion.getUpdateMode match {
case UpdateMode.RETRACT => (true, true)
case UpdateMode.APPEND => (false, false)
case UpdateMode.UPSERT => (false, true)
}
val tableSink = new DataStreamTableSink(
outputConversion.getChild.getTableSchema,
TypeConversions.fromDataTypeToLegacyInfo(outputConversion.getType),
withChangeFlag)
val input = getRelBuilder.tableOperation(modifyOperation.getChild).build()
val sink = LogicalSink.create(input, tableSink, "DataStreamTableSink")
(sink, isRetract)
case _ =>
throw new TableException(s"Unsupported ModifyOperation: $modifyOperation")
}
}
private def getFlinkPlanner: FlinkPlannerImpl = {
val currentCatalogName = catalogManager.getCurrentCatalog
val currentDatabase = catalogManager.getCurrentDatabase
planningConfigurationBuilder.createFlinkPlanner(currentCatalogName, currentDatabase)
}
private[flink] def getRelBuilder: FlinkRelBuilder = {
val currentCatalogName = catalogManager.getCurrentCatalog
val currentDatabase = catalogManager.getCurrentDatabase
planningConfigurationBuilder.createRelBuilder(currentCatalogName, currentDatabase)
}
private[flink] def getConfig: TableConfig = config
private[flink] def getExecutionEnvironment: StreamExecutionEnvironment =
executor.asInstanceOf[StreamExecutor].getExecutionEnvironment
private def translateToCRow(planner: StreamPlanner, logicalPlan: RelNode): DataStream[CRow] = {
logicalPlan match {
case node: DataStreamRel =>
getExecutionEnvironment.configure(
config.getConfiguration,
Thread.currentThread().getContextClassLoader)
node.translateToPlan(planner)
case _ =>
throw new TableException("Cannot generate DataStream due to an invalid logical plan. " +
"This is a bug and should not happen. Please file an issue.")
}
}
private def writeToSink[T](
tableOperation: QueryOperation,
sink: TableSink[T],
sinkName: String): (RelNode, Boolean) = {
val updatesAsRetraction = sink match {
case retractSink: RetractStreamTableSink[T] =>
retractSink match {
case _: PartitionableTableSink =>
throw new TableException("Partitionable sink in retract stream mode " +
"is not supported yet!")
case _ => // do nothing
}
true
case upsertSink: UpsertStreamTableSink[T] =>
upsertSink match {
case _: PartitionableTableSink =>
throw new TableException("Partitionable sink in upsert stream mode " +
"is not supported yet!")
case _ => // do nothing
}
false
case _: AppendStreamTableSink[T] =>
false
case _ =>
throw new ValidationException("Stream Tables can only be emitted by AppendStreamTableSink, "
+ "RetractStreamTableSink, or UpsertStreamTableSink.")
}
val input = getRelBuilder.tableOperation(tableOperation).build()
(LogicalSink.create(input, sink, sinkName), updatesAsRetraction)
}
private def getTableSink(objectIdentifier: ObjectIdentifier): Option[TableSink[_]] = {
JavaScalaConversionUtil.toScala(catalogManager.getTable(objectIdentifier))
.map(_.getTable) match {
case Some(s) if s.isInstanceOf[ConnectorCatalogTable[_, _]] =>
JavaScalaConversionUtil.toScala(s.asInstanceOf[ConnectorCatalogTable[_, _]].getTableSink)
case Some(s) if s.isInstanceOf[CatalogTable] =>
val catalog = catalogManager.getCatalog(objectIdentifier.getCatalogName)
val catalogTable = s.asInstanceOf[CatalogTable]
val context = new TableSinkFactoryContextImpl(
objectIdentifier, catalogTable, config.getConfiguration, false)
if (catalog.isPresent && catalog.get().getTableFactory.isPresent) {
val sink = TableFactoryUtil.createTableSinkForCatalogTable(catalog.get(), context)
if (sink.isPresent) {
return Option(sink.get())
}
}
Option(TableFactoryUtil.findAndCreateTableSink(context))
case _ => None
}
}
private def createDummyPlanner(): StreamPlanner = {
val dummyExecEnv = new DummyStreamExecutionEnvironment(getExecutionEnvironment)
val executor = new StreamExecutor(dummyExecEnv)
new StreamPlanner(executor, config, functionCatalog, catalogManager)
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/StreamPlanner.scala | Scala | apache-2.0 | 14,412 |
/**
* Created by pnagarjuna on 06/11/15.
*/
case class Atomic(Value: Int, Text: String) | pamu/make-model-versoin | src/main/scala/Models.scala | Scala | apache-2.0 | 91 |
object SCL9961 {
case class X(x: X)
case class Y(y: Y)
object X {
implicit def XtoY(outer: X): Y = outer match {
case X(inner) => Y(/*start*/inner/*end*/)
}
}
}
//SCL9961.Y | whorbowicz/intellij-scala | testdata/typeInference/bugs5/SCL9961.scala | Scala | apache-2.0 | 195 |
package arm
/**
* Created by denis on 9/2/16.
*/
object ArmUtils {
import java.io.Closeable
def usingCloseable[A](closeable: Closeable)(block: => A): A = {
try {
block
} finally {
closeable.close()
}
}
def using[A, C <: { def close() }](closeable: C)(block: => A): A = {
try {
block
} finally {
closeable.close()
}
}
}
| denisftw/advanced-scala-code | base/src/main/scala/arm/ArmUtils.scala | Scala | apache-2.0 | 385 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.util.{HashMap => JHashMap}
import scala.collection.mutable
import scala.collection.JavaConversions._
import scala.concurrent.Future
import scala.concurrent.duration._
import akka.actor.{Actor, ActorRef, Cancellable}
import akka.pattern.ask
import org.apache.spark.{Logging, SparkConf, SparkException}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.scheduler._
import org.apache.spark.storage.BlockManagerMessages._
import org.apache.spark.util.{ActorLogReceive, AkkaUtils, Utils}
/**
* BlockManagerMasterActor is an actor on the master node to track statuses of
* all slaves' block managers.
*/
private[spark]
class BlockManagerMasterActor(val isLocal: Boolean, conf: SparkConf, listenerBus: LiveListenerBus)
extends Actor with ActorLogReceive with Logging {
// Mapping from block manager id to the block manager's information.
private val blockManagerInfo = new mutable.HashMap[BlockManagerId, BlockManagerInfo]
// Mapping from executor ID to block manager ID.
private val blockManagerIdByExecutor = new mutable.HashMap[String, BlockManagerId]
// Mapping from block id to the set of block managers that have the block.
private val blockLocations = new JHashMap[BlockId, mutable.HashSet[BlockManagerId]]
private val akkaTimeout = AkkaUtils.askTimeout(conf)
val slaveTimeout = conf.getLong("spark.storage.blockManagerSlaveTimeoutMs", 120 * 1000)
val checkTimeoutInterval = conf.getLong("spark.storage.blockManagerTimeoutIntervalMs", 60000)
var timeoutCheckingTask: Cancellable = null
override def preStart() {
import context.dispatcher
timeoutCheckingTask = context.system.scheduler.schedule(0.seconds,
checkTimeoutInterval.milliseconds, self, ExpireDeadHosts)
super.preStart()
}
override def receiveWithLogging = {
case RegisterBlockManager(blockManagerId, maxMemSize, slaveActor) =>
register(blockManagerId, maxMemSize, slaveActor)
sender ! true
case UpdateBlockInfo(
blockManagerId, blockId, storageLevel, deserializedSize, size, tachyonSize) =>
sender ! updateBlockInfo(
blockManagerId, blockId, storageLevel, deserializedSize, size, tachyonSize)
case GetLocations(blockId) =>
sender ! getLocations(blockId)
case GetLocationsMultipleBlockIds(blockIds) =>
sender ! getLocationsMultipleBlockIds(blockIds)
case GetPeers(blockManagerId) =>
sender ! getPeers(blockManagerId)
case GetActorSystemHostPortForExecutor(executorId) =>
sender ! getActorSystemHostPortForExecutor(executorId)
case GetMemoryStatus =>
sender ! memoryStatus
case GetStorageStatus =>
sender ! storageStatus
case GetBlockStatus(blockId, askSlaves) =>
sender ! blockStatus(blockId, askSlaves)
case GetMatchingBlockIds(filter, askSlaves) =>
sender ! getMatchingBlockIds(filter, askSlaves)
case RemoveRdd(rddId) =>
sender ! removeRdd(rddId)
case RemoveShuffle(shuffleId) =>
sender ! removeShuffle(shuffleId)
case RemoveBroadcast(broadcastId, removeFromDriver) =>
sender ! removeBroadcast(broadcastId, removeFromDriver)
case RemoveBlock(blockId) =>
removeBlockFromWorkers(blockId)
sender ! true
case RemoveExecutor(execId) =>
removeExecutor(execId)
sender ! true
case StopBlockManagerMaster =>
sender ! true
if (timeoutCheckingTask != null) {
timeoutCheckingTask.cancel()
}
context.stop(self)
case ExpireDeadHosts =>
expireDeadHosts()
case BlockManagerHeartbeat(blockManagerId) =>
sender ! heartbeatReceived(blockManagerId)
//from BlockManagerMaster
case RelocateBlock(blockId, oldBlockManager, newBlockManager) =>
sender ! relocateBlockId(blockId, oldBlockManager, newBlockManager)
case GetAllBlockManagerId =>
sender ! getAllBlockManagerId()
case GetBlockManagerIdForHost(host) =>
sender ! getBlockManagerIdForHost(host)
case other =>
logWarning("Got unknown message: " + other)
}
private def removeRdd(rddId: Int): Future[Seq[Int]] = {
// First remove the metadata for the given RDD, and then asynchronously remove the blocks
// from the slaves.
// Find all blocks for the given RDD, remove the block from both blockLocations and
// the blockManagerInfo that is tracking the blocks.
val blocks = blockLocations.keys.flatMap(_.asRDDId).filter(_.rddId == rddId)
blocks.foreach { blockId =>
val bms: mutable.HashSet[BlockManagerId] = blockLocations.get(blockId)
bms.foreach(bm => blockManagerInfo.get(bm).foreach(_.removeBlock(blockId)))
blockLocations.remove(blockId)
}
// Ask the slaves to remove the RDD, and put the result in a sequence of Futures.
// The dispatcher is used as an implicit argument into the Future sequence construction.
import context.dispatcher
val removeMsg = RemoveRdd(rddId)
Future.sequence(
blockManagerInfo.values.map { bm =>
bm.slaveActor.ask(removeMsg)(akkaTimeout).mapTo[Int]
}.toSeq
)
}
private def removeShuffle(shuffleId: Int): Future[Seq[Boolean]] = {
// Nothing to do in the BlockManagerMasterActor data structures
import context.dispatcher
val removeMsg = RemoveShuffle(shuffleId)
Future.sequence(
blockManagerInfo.values.map { bm =>
bm.slaveActor.ask(removeMsg)(akkaTimeout).mapTo[Boolean]
}.toSeq
)
}
/**
* Delegate RemoveBroadcast messages to each BlockManager because the master may not notified
* of all broadcast blocks. If removeFromDriver is false, broadcast blocks are only removed
* from the executors, but not from the driver.
*/
private def removeBroadcast(broadcastId: Long, removeFromDriver: Boolean): Future[Seq[Int]] = {
import context.dispatcher
val removeMsg = RemoveBroadcast(broadcastId, removeFromDriver)
val requiredBlockManagers = blockManagerInfo.values.filter { info =>
removeFromDriver || !info.blockManagerId.isDriver
}
Future.sequence(
requiredBlockManagers.map { bm =>
bm.slaveActor.ask(removeMsg)(akkaTimeout).mapTo[Int]
}.toSeq
)
}
private def removeBlockManager(blockManagerId: BlockManagerId) {
val info = blockManagerInfo(blockManagerId)
// Remove the block manager from blockManagerIdByExecutor.
blockManagerIdByExecutor -= blockManagerId.executorId
// Remove it from blockManagerInfo and remove all the blocks.
blockManagerInfo.remove(blockManagerId)
val iterator = info.blocks.keySet.iterator
while (iterator.hasNext) {
val blockId = iterator.next
val locations = blockLocations.get(blockId)
locations -= blockManagerId
if (locations.size == 0) {
blockLocations.remove(blockId)
}
}
listenerBus.post(SparkListenerBlockManagerRemoved(System.currentTimeMillis(), blockManagerId))
logInfo(s"Removing block manager $blockManagerId")
}
private def expireDeadHosts() {
logTrace("Checking for hosts with no recent heart beats in BlockManagerMaster.")
val now = System.currentTimeMillis()
val minSeenTime = now - slaveTimeout
val toRemove = new mutable.HashSet[BlockManagerId]
for (info <- blockManagerInfo.values) {
if (info.lastSeenMs < minSeenTime && !info.blockManagerId.isDriver) {
logWarning("Removing BlockManager " + info.blockManagerId + " with no recent heart beats: "
+ (now - info.lastSeenMs) + "ms exceeds " + slaveTimeout + "ms")
toRemove += info.blockManagerId
}
}
toRemove.foreach(removeBlockManager)
}
private def removeExecutor(execId: String) {
logInfo("Trying to remove executor " + execId + " from BlockManagerMaster.")
blockManagerIdByExecutor.get(execId).foreach(removeBlockManager)
}
/**
* Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*/
private def heartbeatReceived(blockManagerId: BlockManagerId): Boolean = {
if (!blockManagerInfo.contains(blockManagerId)) {
blockManagerId.isDriver && !isLocal
} else {
blockManagerInfo(blockManagerId).updateLastSeenMs()
true
}
}
// Remove a block from the slaves that have it. This can only be used to remove
// blocks that the master knows about.
private def removeBlockFromWorkers(blockId: BlockId) {
val locations = blockLocations.get(blockId)
if (locations != null) {
locations.foreach { blockManagerId: BlockManagerId =>
val blockManager = blockManagerInfo.get(blockManagerId)
if (blockManager.isDefined) {
// Remove the block from the slave's BlockManager.
// Doesn't actually wait for a confirmation and the message might get lost.
// If message loss becomes frequent, we should add retry logic here.
blockManager.get.slaveActor.ask(RemoveBlock(blockId))(akkaTimeout)
}
}
}
}
// Return a map from the block manager id to max memory and remaining memory.
private def memoryStatus: Map[BlockManagerId, (Long, Long)] = {
blockManagerInfo.map { case(blockManagerId, info) =>
(blockManagerId, (info.maxMem, info.remainingMem))
}.toMap
}
private def storageStatus: Array[StorageStatus] = {
blockManagerInfo.map { case (blockManagerId, info) =>
new StorageStatus(blockManagerId, info.maxMem, info.blocks)
}.toArray
}
/**
* Return the block's status for all block managers, if any. NOTE: This is a
* potentially expensive operation and should only be used for testing.
*
* If askSlaves is true, the master queries each block manager for the most updated block
* statuses. This is useful when the master is not informed of the given block by all block
* managers.
*/
private def blockStatus(
blockId: BlockId,
askSlaves: Boolean): Map[BlockManagerId, Future[Option[BlockStatus]]] = {
import context.dispatcher
val getBlockStatus = GetBlockStatus(blockId)
/*
* Rather than blocking on the block status query, master actor should simply return
* Futures to avoid potential deadlocks. This can arise if there exists a block manager
* that is also waiting for this master actor's response to a previous message.
*/
blockManagerInfo.values.map { info =>
val blockStatusFuture =
if (askSlaves) {
info.slaveActor.ask(getBlockStatus)(akkaTimeout).mapTo[Option[BlockStatus]]
} else {
Future { info.getStatus(blockId) }
}
(info.blockManagerId, blockStatusFuture)
}.toMap
}
/**
* Return the ids of blocks present in all the block managers that match the given filter.
* NOTE: This is a potentially expensive operation and should only be used for testing.
*
* If askSlaves is true, the master queries each block manager for the most updated block
* statuses. This is useful when the master is not informed of the given block by all block
* managers.
*/
private def getMatchingBlockIds(
filter: BlockId => Boolean,
askSlaves: Boolean): Future[Seq[BlockId]] = {
import context.dispatcher
val getMatchingBlockIds = GetMatchingBlockIds(filter)
Future.sequence(
blockManagerInfo.values.map { info =>
val future =
if (askSlaves) {
info.slaveActor.ask(getMatchingBlockIds)(akkaTimeout).mapTo[Seq[BlockId]]
} else {
Future { info.blocks.keys.filter(filter).toSeq }
}
future
}
).map(_.flatten.toSeq)
}
private def register(id: BlockManagerId, maxMemSize: Long, slaveActor: ActorRef) {
val time = System.currentTimeMillis()
if (!blockManagerInfo.contains(id)) {
blockManagerIdByExecutor.get(id.executorId) match {
case Some(oldId) =>
// A block manager of the same executor already exists, so remove it (assumed dead)
logError("Got two different block manager registrations on same executor - "
+ s" will replace old one $oldId with new one $id")
removeExecutor(id.executorId)
case None =>
}
logInfo("Registering block manager %s with %s RAM, %s".format(
id.hostPort, Utils.bytesToString(maxMemSize), id))
blockManagerIdByExecutor(id.executorId) = id
blockManagerInfo(id) = new BlockManagerInfo(
id, System.currentTimeMillis(), maxMemSize, slaveActor)
}
listenerBus.post(SparkListenerBlockManagerAdded(time, id, maxMemSize))
}
private def updateBlockInfo(
blockManagerId: BlockManagerId,
blockId: BlockId,
storageLevel: StorageLevel,
memSize: Long,
diskSize: Long,
tachyonSize: Long): Boolean = {
if (!blockManagerInfo.contains(blockManagerId)) {
if (blockManagerId.isDriver && !isLocal) {
// We intentionally do not register the master (except in local mode),
// so we should not indicate failure.
return true
} else {
return false
}
}
if (blockId == null) {
blockManagerInfo(blockManagerId).updateLastSeenMs()
return true
}
blockManagerInfo(blockManagerId).updateBlockInfo(
blockId, storageLevel, memSize, diskSize, tachyonSize)
var locations: mutable.HashSet[BlockManagerId] = null
if (blockLocations.containsKey(blockId)) {
locations = blockLocations.get(blockId)
} else {
locations = new mutable.HashSet[BlockManagerId]
blockLocations.put(blockId, locations)
}
if (storageLevel.isValid) {
locations.add(blockManagerId)
} else {
locations.remove(blockManagerId)
}
// Remove the block from master tracking if it has been removed on all slaves.
if (locations.size == 0) {
blockLocations.remove(blockId)
}
true
}
private def getLocations(blockId: BlockId): Seq[BlockManagerId] = {
if (blockLocations.containsKey(blockId)) blockLocations.get(blockId).toSeq else Seq.empty
}
private def getLocationsMultipleBlockIds(blockIds: Array[BlockId]): Seq[Seq[BlockManagerId]] = {
blockIds.map(blockId => getLocations(blockId))
}
private def relocateBlockId(blockId: BlockId,
oldBlockManager: BlockManagerId,
newBlockManager: BlockManagerId): Boolean = {
if (blockLocations.contains(blockId)) {
val oldBlockLocations = blockLocations(blockId)
oldBlockLocations.remove(oldBlockManager)
oldBlockLocations.add(newBlockManager)
blockLocations(blockId) = oldBlockLocations //Added by yy
true
} else {
false
}
}
private def getAllBlockManagerId(): Seq[BlockManagerId] = {
blockManagerInfo.keySet.toSeq
}
private def getBlockManagerIdForHost(host: String): Seq[BlockManagerId] = {
val result = blockManagerInfo.keySet.filter(blockManagerId =>
(blockManagerId.host == host && blockManagerId.isDriver == false)).toSeq
logInfo(s"test - blockManagerId for host ${host} is ${result}")
result
}
/** Get the list of the peers of the given block manager */
private def getPeers(blockManagerId: BlockManagerId): Seq[BlockManagerId] = {
val blockManagerIds = blockManagerInfo.keySet
if (blockManagerIds.contains(blockManagerId)) {
blockManagerIds.filterNot { _.isDriver }.filterNot { _ == blockManagerId }.toSeq
} else {
Seq.empty
}
}
/**
* Returns the hostname and port of an executor's actor system, based on the Akka address of its
* BlockManagerSlaveActor.
*/
private def getActorSystemHostPortForExecutor(executorId: String): Option[(String, Int)] = {
for (
blockManagerId <- blockManagerIdByExecutor.get(executorId);
info <- blockManagerInfo.get(blockManagerId);
host <- info.slaveActor.path.address.host;
port <- info.slaveActor.path.address.port
) yield {
(host, port)
}
}
}
@DeveloperApi
case class BlockStatus(
storageLevel: StorageLevel,
memSize: Long,
diskSize: Long,
tachyonSize: Long) {
def isCached: Boolean = memSize + diskSize + tachyonSize > 0
}
@DeveloperApi
object BlockStatus {
def empty: BlockStatus = BlockStatus(StorageLevel.NONE, 0L, 0L, 0L)
}
private[spark] class BlockManagerInfo(
val blockManagerId: BlockManagerId,
timeMs: Long,
val maxMem: Long,
val slaveActor: ActorRef)
extends Logging {
private var _lastSeenMs: Long = timeMs
private var _remainingMem: Long = maxMem
// Mapping from block id to its status.
private val _blocks = new JHashMap[BlockId, BlockStatus]
def getStatus(blockId: BlockId) = Option(_blocks.get(blockId))
def updateLastSeenMs() {
_lastSeenMs = System.currentTimeMillis()
}
def updateBlockInfo(
blockId: BlockId,
storageLevel: StorageLevel,
memSize: Long,
diskSize: Long,
tachyonSize: Long) {
updateLastSeenMs()
if (_blocks.containsKey(blockId)) {
// The block exists on the slave already.
val blockStatus: BlockStatus = _blocks.get(blockId)
val originalLevel: StorageLevel = blockStatus.storageLevel
val originalMemSize: Long = blockStatus.memSize
if (originalLevel.useMemory) {
_remainingMem += originalMemSize
}
}
if (storageLevel.isValid) {
/* isValid means it is either stored in-memory, on-disk or on-Tachyon.
* The memSize here indicates the data size in or dropped from memory,
* tachyonSize here indicates the data size in or dropped from Tachyon,
* and the diskSize here indicates the data size in or dropped to disk.
* They can be both larger than 0, when a block is dropped from memory to disk.
* Therefore, a safe way to set BlockStatus is to set its info in accurate modes. */
if (storageLevel.useMemory) {
_blocks.put(blockId, BlockStatus(storageLevel, memSize, 0, 0))
_remainingMem -= memSize
logInfo("Added %s in memory on %s (size: %s, free: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(memSize),
Utils.bytesToString(_remainingMem)))
}
if (storageLevel.useDisk) {
_blocks.put(blockId, BlockStatus(storageLevel, 0, diskSize, 0))
logInfo("Added %s on disk on %s (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(diskSize)))
}
if (storageLevel.useOffHeap) {
_blocks.put(blockId, BlockStatus(storageLevel, 0, 0, tachyonSize))
logInfo("Added %s on tachyon on %s (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(tachyonSize)))
}
} else if (_blocks.containsKey(blockId)) {
// If isValid is not true, drop the block.
val blockStatus: BlockStatus = _blocks.get(blockId)
_blocks.remove(blockId)
if (blockStatus.storageLevel.useMemory) {
logInfo("Removed %s on %s in memory (size: %s, free: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.memSize),
Utils.bytesToString(_remainingMem)))
}
if (blockStatus.storageLevel.useDisk) {
logInfo("Removed %s on %s on disk (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.diskSize)))
}
if (blockStatus.storageLevel.useOffHeap) {
logInfo("Removed %s on %s on tachyon (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.tachyonSize)))
}
}
}
def removeBlock(blockId: BlockId) {
if (_blocks.containsKey(blockId)) {
_remainingMem += _blocks.get(blockId).memSize
_blocks.remove(blockId)
}
}
def remainingMem: Long = _remainingMem
def lastSeenMs: Long = _lastSeenMs
def blocks: JHashMap[BlockId, BlockStatus] = _blocks
override def toString: String = "BlockManagerInfo " + timeMs + " " + _remainingMem
def clear() {
_blocks.clear()
}
}
| trueyao/spark-lever | core/src/main/scala/org/apache/spark/storage/BlockManagerMasterActor.scala | Scala | apache-2.0 | 20,841 |
package hammock
package apache
import java.net.URI
import cats._
import cats.data.Kleisli
import cats.effect._
import org.apache.http.client.HttpClient
import org.apache.http.client.methods.HttpUriRequest
import org.apache.http.entity.StringEntity
import org.apache.http.message.{BasicHttpResponse, BasicStatusLine}
import org.apache.http.{ProtocolVersion, HttpResponse => ApacheHttpResponse}
import org.mockito.Mockito._
import org.mockito.{Matchers => MM}
import org.scalatest.BeforeAndAfter
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.mockito._
import ApacheInterpreter._
class ApacheInterpreterSpec extends AnyWordSpec with MockitoSugar with BeforeAndAfter {
import MM._
implicit val client: HttpClient = mock[HttpClient]
val httpResponse: ApacheHttpResponse = {
val resp = new BasicHttpResponse(new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), 200, null))
val entity = new StringEntity("content")
resp.setEntity(entity)
resp
}
after {
reset(client)
}
"Interpreter.trans" should {
Seq(
("Options", (uri: Uri, headers: Map[String, String]) => Ops.options(uri, headers)),
("Get", (uri: Uri, headers: Map[String, String]) => Ops.get(uri, headers)),
("Head", (uri: Uri, headers: Map[String, String]) => Ops.head(uri, headers)),
("Post", (uri: Uri, headers: Map[String, String]) => Ops.post(uri, headers, None)),
("Put", (uri: Uri, headers: Map[String, String]) => Ops.put(uri, headers, None)),
("Delete", (uri: Uri, headers: Map[String, String]) => Ops.delete(uri, headers)),
("Trace", (uri: Uri, headers: Map[String, String]) => Ops.trace(uri, headers)),
("Patch", (uri: Uri, headers: Map[String, String]) => Ops.patch(uri, headers, None))
) foreach {
case (method, operation) =>
s"have the same result as transK.run(client) with $method requests" in {
when(client.execute(any[HttpUriRequest])).thenReturn(httpResponse)
val op = operation(Uri(), Map())
val k = op.foldMap[Kleisli[IO, HttpClient, *]](transK)
val transKResult = k.run(client).unsafeRunSync()
val transResult = (op foldMap ApacheInterpreter[IO].trans).unsafeRunSync()
assert(Eq[HttpResponse].eqv(transKResult, transResult))
}
}
"create a correct Apache's HTTP request from HttpF" in {
val req = Get(
HttpRequest(
uri"http://localhost:8080",
Map(
"header1" -> "value1",
"header2" -> "value2"
),
None))
val apacheReq = mapRequest[IO](req).unsafeRunSync()
assert(apacheReq.getURI == new URI("http://localhost:8080"))
assert(apacheReq.getAllHeaders.length == 2)
assert(
apacheReq
.getHeaders("header1")(0)
.getValue == "value1")
assert(
apacheReq
.getHeaders("header2")(0)
.getValue == "value2")
}
"create a correct HttpResponse from Apache's HTTP response" in {
when(client.execute(any[HttpUriRequest])).thenReturn(httpResponse)
val op = Ops.get(Uri(), Map())
val result = (op foldMap ApacheInterpreter[IO].trans).unsafeRunSync()
assert(result.status == Status.OK)
assert(result.headers == Map())
assert(result.entity.content == "content")
}
"create a correct response when Apache's HttpResponse.getEntity is null" in {
val resp = new BasicHttpResponse(new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), 204, null))
when(client.execute(any[HttpUriRequest])).thenReturn(resp)
val op = Ops.get(Uri(), Map())
val result = (op foldMap ApacheInterpreter[IO].trans).unsafeRunSync()
assert(result.status == Status.NoContent)
assert(result.entity == Entity.EmptyEntity)
}
}
}
| pepegar/hammock | hammock-apache-http/src/test/scala/hammock/apache/ApacheInterpreterSpec.scala | Scala | mit | 3,849 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import java.io.File
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.{SparkEnv, TaskContext}
import org.apache.spark.api.python.{ChainedPythonFunctions, PythonEvalType}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.{ExternalAppendOnlyUnsafeRowArray, SparkPlan}
import org.apache.spark.sql.execution.window._
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.ArrowUtils
import org.apache.spark.util.Utils
/**
* This class calculates and outputs windowed aggregates over the rows in a single partition.
*
* This is similar to [[WindowExec]]. The main difference is that this node does not compute
* any window aggregation values. Instead, it computes the lower and upper bound for each window
* (i.e. window bounds) and pass the data and indices to Python worker to do the actual window
* aggregation.
*
* It currently materializes all data associated with the same partition key and passes them to
* Python worker. This is not strictly necessary for sliding windows and can be improved (by
* possibly slicing data into overlapping chunks and stitching them together).
*
* This class groups window expressions by their window boundaries so that window expressions
* with the same window boundaries can share the same window bounds. The window bounds are
* prepended to the data passed to the python worker.
*
* For example, if we have:
* avg(v) over specifiedwindowframe(RowFrame, -5, 5),
* avg(v) over specifiedwindowframe(RowFrame, UnboundedPreceding, UnboundedFollowing),
* avg(v) over specifiedwindowframe(RowFrame, -3, 3),
* max(v) over specifiedwindowframe(RowFrame, -3, 3)
*
* The python input will look like:
* (lower_bound_w1, upper_bound_w1, lower_bound_w3, upper_bound_w3, v)
*
* where w1 is specifiedwindowframe(RowFrame, -5, 5)
* w2 is specifiedwindowframe(RowFrame, UnboundedPreceding, UnboundedFollowing)
* w3 is specifiedwindowframe(RowFrame, -3, 3)
*
* Note that w2 doesn't have bound indices in the python input because it's unbounded window
* so it's bound indices will always be the same.
*
* Bounded window and Unbounded window are evaluated differently in Python worker:
* (1) Bounded window takes the window bound indices in addition to the input columns.
* Unbounded window takes only input columns.
* (2) Bounded window evaluates the udf once per input row.
* Unbounded window evaluates the udf once per window partition.
* This is controlled by Python runner conf "pandas_window_bound_types"
*
* The logic to compute window bounds is delegated to [[WindowFunctionFrame]] and shared with
* [[WindowExec]]
*
* Note this doesn't support partial aggregation and all aggregation is computed from the entire
* window.
*/
case class WindowInPandasExec(
windowExpression: Seq[NamedExpression],
partitionSpec: Seq[Expression],
orderSpec: Seq[SortOrder],
child: SparkPlan)
extends WindowExecBase {
/**
* Helper functions and data structures for window bounds
*
* It contains:
* (1) Total number of window bound indices in the python input row
* (2) Function from frame index to its lower bound column index in the python input row
* (3) Function from frame index to its upper bound column index in the python input row
* (4) Seq from frame index to its window bound type
*/
private type WindowBoundHelpers = (Int, Int => Int, Int => Int, Seq[WindowBoundType])
/**
* Enum for window bound types. Used only inside this class.
*/
private sealed case class WindowBoundType(value: String)
private object UnboundedWindow extends WindowBoundType("unbounded")
private object BoundedWindow extends WindowBoundType("bounded")
private val windowBoundTypeConf = "pandas_window_bound_types"
private def collectFunctions(udf: PythonUDF): (ChainedPythonFunctions, Seq[Expression]) = {
udf.children match {
case Seq(u: PythonUDF) =>
val (chained, children) = collectFunctions(u)
(ChainedPythonFunctions(chained.funcs ++ Seq(udf.func)), children)
case children =>
// There should not be any other UDFs, or the children can't be evaluated directly.
assert(children.forall(_.find(_.isInstanceOf[PythonUDF]).isEmpty))
(ChainedPythonFunctions(Seq(udf.func)), udf.children)
}
}
/**
* See [[WindowBoundHelpers]] for details.
*/
private def computeWindowBoundHelpers(
factories: Seq[InternalRow => WindowFunctionFrame]
): WindowBoundHelpers = {
val functionFrames = factories.map(_(EmptyRow))
val windowBoundTypes = functionFrames.map {
case _: UnboundedWindowFunctionFrame => UnboundedWindow
case _: UnboundedFollowingWindowFunctionFrame |
_: SlidingWindowFunctionFrame |
_: UnboundedPrecedingWindowFunctionFrame => BoundedWindow
// It should be impossible to get other types of window function frame here
case frame => throw QueryExecutionErrors.unexpectedWindowFunctionFrameError(frame.toString)
}
val requiredIndices = functionFrames.map {
case _: UnboundedWindowFunctionFrame => 0
case _ => 2
}
val upperBoundIndices = requiredIndices.scan(0)(_ + _).tail
val boundIndices = requiredIndices.zip(upperBoundIndices).map { case (num, upperBoundIndex) =>
if (num == 0) {
// Sentinel values for unbounded window
(-1, -1)
} else {
(upperBoundIndex - 2, upperBoundIndex - 1)
}
}
def lowerBoundIndex(frameIndex: Int) = boundIndices(frameIndex)._1
def upperBoundIndex(frameIndex: Int) = boundIndices(frameIndex)._2
(requiredIndices.sum, lowerBoundIndex, upperBoundIndex, windowBoundTypes)
}
protected override def doExecute(): RDD[InternalRow] = {
// Unwrap the expressions and factories from the map.
val expressionsWithFrameIndex =
windowFrameExpressionFactoryPairs.map(_._1).zipWithIndex.flatMap {
case (buffer, frameIndex) => buffer.map(expr => (expr, frameIndex))
}
val expressions = expressionsWithFrameIndex.map(_._1)
val expressionIndexToFrameIndex =
expressionsWithFrameIndex.map(_._2).zipWithIndex.map(_.swap).toMap
val factories = windowFrameExpressionFactoryPairs.map(_._2).toArray
// Helper functions
val (numBoundIndices, lowerBoundIndex, upperBoundIndex, frameWindowBoundTypes) =
computeWindowBoundHelpers(factories)
val isBounded = { frameIndex: Int => lowerBoundIndex(frameIndex) >= 0 }
val numFrames = factories.length
val inMemoryThreshold = conf.windowExecBufferInMemoryThreshold
val spillThreshold = conf.windowExecBufferSpillThreshold
val sessionLocalTimeZone = conf.sessionLocalTimeZone
// Extract window expressions and window functions
val windowExpressions = expressions.flatMap(_.collect { case e: WindowExpression => e })
val udfExpressions = windowExpressions.map(_.windowFunction.asInstanceOf[PythonUDF])
// We shouldn't be chaining anything here.
// All chained python functions should only contain one function.
val (pyFuncs, inputs) = udfExpressions.map(collectFunctions).unzip
require(pyFuncs.length == expressions.length)
val udfWindowBoundTypes = pyFuncs.indices.map(i =>
frameWindowBoundTypes(expressionIndexToFrameIndex(i)))
val pythonRunnerConf: Map[String, String] = (ArrowUtils.getPythonRunnerConfMap(conf)
+ (windowBoundTypeConf -> udfWindowBoundTypes.map(_.value).mkString(",")))
// Filter child output attributes down to only those that are UDF inputs.
// Also eliminate duplicate UDF inputs. This is similar to how other Python UDF node
// handles UDF inputs.
val dataInputs = new ArrayBuffer[Expression]
val dataInputTypes = new ArrayBuffer[DataType]
val argOffsets = inputs.map { input =>
input.map { e =>
if (dataInputs.exists(_.semanticEquals(e))) {
dataInputs.indexWhere(_.semanticEquals(e))
} else {
dataInputs += e
dataInputTypes += e.dataType
dataInputs.length - 1
}
}.toArray
}.toArray
// In addition to UDF inputs, we will prepend window bounds for each UDFs.
// For bounded windows, we prepend lower bound and upper bound. For unbounded windows,
// we no not add window bounds. (strictly speaking, we only need to lower or upper bound
// if the window is bounded only on one side, this can be improved in the future)
// Setting window bounds for each window frames. Each window frame has different bounds so
// each has its own window bound columns.
val windowBoundsInput = factories.indices.flatMap { frameIndex =>
if (isBounded(frameIndex)) {
Seq(
BoundReference(lowerBoundIndex(frameIndex), IntegerType, nullable = false),
BoundReference(upperBoundIndex(frameIndex), IntegerType, nullable = false)
)
} else {
Seq.empty
}
}
// Setting the window bounds argOffset for each UDF. For UDFs with bounded window, argOffset
// for the UDF is (lowerBoundOffset, upperBoundOffset, inputOffset1, inputOffset2, ...)
// For UDFs with unbounded window, argOffset is (inputOffset1, inputOffset2, ...)
pyFuncs.indices.foreach { exprIndex =>
val frameIndex = expressionIndexToFrameIndex(exprIndex)
if (isBounded(frameIndex)) {
argOffsets(exprIndex) =
Array(lowerBoundIndex(frameIndex), upperBoundIndex(frameIndex)) ++
argOffsets(exprIndex).map(_ + windowBoundsInput.length)
} else {
argOffsets(exprIndex) = argOffsets(exprIndex).map(_ + windowBoundsInput.length)
}
}
val allInputs = windowBoundsInput ++ dataInputs
val allInputTypes = allInputs.map(_.dataType)
// Start processing.
child.execute().mapPartitions { iter =>
val context = TaskContext.get()
// Get all relevant projections.
val resultProj = createResultProjection(expressions)
val pythonInputProj = UnsafeProjection.create(
allInputs,
windowBoundsInput.map(ref =>
AttributeReference(s"i_${ref.ordinal}", ref.dataType)()) ++ child.output
)
val pythonInputSchema = StructType(
allInputTypes.zipWithIndex.map { case (dt, i) =>
StructField(s"_$i", dt)
}
)
val grouping = UnsafeProjection.create(partitionSpec, child.output)
// The queue used to buffer input rows so we can drain it to
// combine input with output from Python.
val queue = HybridRowQueue(context.taskMemoryManager(),
new File(Utils.getLocalDir(SparkEnv.get.conf)), child.output.length)
context.addTaskCompletionListener[Unit] { _ =>
queue.close()
}
val stream = iter.map { row =>
queue.add(row.asInstanceOf[UnsafeRow])
row
}
val pythonInput = new Iterator[Iterator[UnsafeRow]] {
// Manage the stream and the grouping.
var nextRow: UnsafeRow = null
var nextGroup: UnsafeRow = null
var nextRowAvailable: Boolean = false
private[this] def fetchNextRow(): Unit = {
nextRowAvailable = stream.hasNext
if (nextRowAvailable) {
nextRow = stream.next().asInstanceOf[UnsafeRow]
nextGroup = grouping(nextRow)
} else {
nextRow = null
nextGroup = null
}
}
fetchNextRow()
// Manage the current partition.
val buffer: ExternalAppendOnlyUnsafeRowArray =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
var bufferIterator: Iterator[UnsafeRow] = _
val indexRow = new SpecificInternalRow(Array.fill(numBoundIndices)(IntegerType))
val frames = factories.map(_(indexRow))
private[this] def fetchNextPartition(): Unit = {
// Collect all the rows in the current partition.
// Before we start to fetch new input rows, make a copy of nextGroup.
val currentGroup = nextGroup.copy()
// clear last partition
buffer.clear()
while (nextRowAvailable && nextGroup == currentGroup) {
buffer.add(nextRow)
fetchNextRow()
}
// Setup the frames.
var i = 0
while (i < numFrames) {
frames(i).prepare(buffer)
i += 1
}
// Setup iteration
rowIndex = 0
bufferIterator = buffer.generateIterator()
}
// Iteration
var rowIndex = 0
override final def hasNext: Boolean =
(bufferIterator != null && bufferIterator.hasNext) || nextRowAvailable
override final def next(): Iterator[UnsafeRow] = {
// Load the next partition if we need to.
if ((bufferIterator == null || !bufferIterator.hasNext) && nextRowAvailable) {
fetchNextPartition()
}
val join = new JoinedRow
bufferIterator.zipWithIndex.map {
case (current, index) =>
var frameIndex = 0
while (frameIndex < numFrames) {
frames(frameIndex).write(index, current)
// If the window is unbounded we don't need to write out window bounds.
if (isBounded(frameIndex)) {
indexRow.setInt(
lowerBoundIndex(frameIndex), frames(frameIndex).currentLowerBound())
indexRow.setInt(
upperBoundIndex(frameIndex), frames(frameIndex).currentUpperBound())
}
frameIndex += 1
}
pythonInputProj(join(indexRow, current))
}
}
}
val windowFunctionResult = new ArrowPythonRunner(
pyFuncs,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF,
argOffsets,
pythonInputSchema,
sessionLocalTimeZone,
pythonRunnerConf).compute(pythonInput, context.partitionId(), context)
val joined = new JoinedRow
windowFunctionResult.flatMap(_.rowIterator.asScala).map { windowOutput =>
val leftRow = queue.remove()
val joinedRow = joined(leftRow, windowOutput)
resultProj(joinedRow)
}
}
}
override protected def withNewChildInternal(newChild: SparkPlan): WindowInPandasExec =
copy(child = newChild)
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala | Scala | apache-2.0 | 15,421 |
package mr.merc.local
import java.util.ResourceBundle
import scala.jdk.CollectionConverters._
import java.util.Collections
class MercResourceBundle(locale:String) extends ResourceBundle {
private val map = Localization.messages(locale)
def getKeys(): java.util.Enumeration[String] = Collections.enumeration(map.keys.asJavaCollection)
def handleGetObject(x:String):AnyRef = map(x)
} | RenualdMarch/merc | src/main/scala/mr/merc/local/MercResourceBundle.scala | Scala | gpl-3.0 | 392 |
package com.twitter.finatra.tests.conversions
import com.twitter.concurrent.exp.AsyncStream
import com.twitter.finatra.conversions.asyncStream._
import com.twitter.inject.Test
import com.twitter.util.{Await, Future, Throw, Try}
class AsyncStreamConversionsTest extends Test {
"Future[Seq[T]]" in {
assertAsyncStream(
Future(Seq(1, 2, 3)).toAsyncStream,
Seq(1, 2, 3))
}
"Future[Option[T]]" in {
assertAsyncStream(
Future(Option(1)).toAsyncStream,
Seq(1))
assertAsyncStream(
Future(None).toAsyncStream,
Seq())
}
"Future[T]" in {
assertAsyncStream(
Future(1).toAsyncStream,
Seq(1))
}
"Failed Future[T]" in {
intercept[TestException] {
Await.result(
Future.exception[Int](new TestException).toAsyncStream.toSeq())
}
}
"Option[T]" in {
assertAsyncStream(
Some(1).toAsyncStream,
Seq(1))
assertAsyncStream(
None.toAsyncStream,
Seq())
}
"Try[T]" in {
assertAsyncStream(
Try(1).toAsyncStream,
Seq(1))
}
"Failed Try[T]" in {
intercept[TestException] {
Await.result(
Throw(new TestException).toAsyncStream.toSeq())
}
}
private def assertAsyncStream[T](asyncStream: AsyncStream[T], expected: Seq[T]): Unit = {
Await.result(asyncStream.toSeq()) should equal(expected)
}
class TestException extends Exception
} | deanh/finatra | utils/src/test/scala/com/twitter/finatra/tests/conversions/AsyncStreamConversionsTest.scala | Scala | apache-2.0 | 1,404 |
package scalabpe.plugin
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.Date
import java.io.{ File, StringWriter }
import java.text.SimpleDateFormat
import javax.jms._
import scala.xml._
import scala.collection.mutable.{ ArrayBuffer, HashMap, HashSet }
import org.apache.activemq._
import com.fasterxml.jackson.core.JsonFactory
import com.sdo.billing.queue._
import com.sdo.billing.queue.impl._
import scalabpe.core._
/*
"ActiveMQ InactivityMonitor Worker" daemon prio=10 tid=0x08549c00 nid=0x3fdb waiting on condition [0x9c269000] for each connection
"ActiveMQ Transport: tcp:///10.132.17.201:61616@45078" prio=10 tid=0x088f4c00 nid=0x3fd6 runnable [0x9c3ad000] for each connection
"ActiveMQ InactivityMonitor WriteCheckTimer" daemon prio=10 tid=0x0864f000 nid=0x3fd9 in Object.wait() [0x9c2ba000] shared for all connections
"ActiveMQ InactivityMonitor ReadCheckTimer" daemon prio=10 tid=0x08748000 nid=0x3fd8 in Object.wait() [0x9c30b000] shared for all connections
"mqsedingthread992" prio=10 tid=0x9c7dd400 nid=0x40f5 waiting on condition [0x9c85c000] for each service, destination
"mqsedingthread991" prio=10 tid=0x9c7d8c00 nid=0x40f4 waiting on condition [0x9c8ad000] for each service, destination
"mq991-thread-1" prio=10 tid=0x9c7d2000 nid=0x40f3 waiting on condition [0x9bead000] for mq config node
"Timer-0" prio=10 tid=0x9c7d3000 nid=0x40f2 in Object.wait() [0x9befe000] for each persist queue manager
*/
class DestinationCfg(val queueName: String, val persistent: Boolean = true)
class MqConn(val brokerUrl: String, val username: String, val password: String)
object MqActor {
val localDirs = new HashSet[String]()
}
class MqActor(val router: Router, val cfgNode: Node) extends Actor with Logging with Closable with SelfCheckLike with Dumpable {
val connReg = """^service=([^ ]+)[ ]+username=([^ ]+)[ ]+password=([^ ]+)$""".r
val mqClients = new HashMap[Int, MqClient]()
var serviceIds: String = _
val threadNum = 1
val queueSize = 20000
var threadFactory: ThreadFactory = _
var pool: ThreadPoolExecutor = _
var persistQueueManager: PersistQueueManagerImpl = _
val sendThreads = new ArrayBuffer[Thread]()
val jsonFactory = new JsonFactory()
val localIp = IpUtils.localIp()
val hasIOException = new AtomicBoolean()
var pluginObj: MqSelialize = _
init
def dump() {
val buff = new StringBuilder
buff.append("pool.size=").append(pool.getPoolSize).append(",")
buff.append("pool.getQueue.size=").append(pool.getQueue.size).append(",")
buff.append("sendThreads.size=").append(sendThreads.size).append(",")
log.info(buff.toString)
dumpPersistManager
}
def dumpPersistManager() {
val buff1 = new StringBuilder
val buff2 = new StringBuilder
buff1.append("queue size ")
buff2.append("queue cacheSize ")
val queueNames = persistQueueManager.getQueueNames
for (i <- 0 until queueNames.size) {
val queue = persistQueueManager.getQueue(queueNames.get(i))
buff1.append(queueNames.get(i)).append("=").append(queue.size).append(",")
buff2.append(queueNames.get(i)).append("=").append(queue.cacheSize).append(",")
}
log.info(buff1.toString)
log.info(buff2.toString)
}
def init() {
serviceIds = (cfgNode \\ "ServiceId").text
var localDir = (cfgNode \\ "LocalDir").text
if (localDir == "") {
localDir = Router.dataDir + File.separator + "activemq"
}
val s = (cfgNode \\ "@plugin").text.toString
if (s != "") {
val plugin = s
try {
val obj = Class.forName(plugin).getConstructors()(0).newInstance()
if (!obj.isInstanceOf[MqSelialize]) {
throw new RuntimeException("plugin %s is not MqSelialize".format(plugin))
}
pluginObj = obj.asInstanceOf[MqSelialize]
} catch {
case e: Exception =>
log.error("plugin {} cannot be loaded", plugin)
throw e
}
}
if (MqActor.localDirs.contains(localDir)) {
throw new RuntimeException("Mq localDir cannot be the same, the default is data/mq")
}
MqActor.localDirs.add(localDir)
var dataDir = ""
if (localDir.startsWith("/")) dataDir = localDir
else dataDir = router.rootDir + File.separator + localDir
new File(dataDir).mkdirs()
persistQueueManager = new PersistQueueManagerImpl()
persistQueueManager.setDataDir(dataDir)
persistQueueManager.init()
val firstServiceId = serviceIds.split(",")(0)
threadFactory = new NamedThreadFactory("mq" + firstServiceId)
pool = new ThreadPoolExecutor(threadNum, threadNum, 0, TimeUnit.SECONDS, new ArrayBlockingQueue[Runnable](queueSize), threadFactory)
pool.prestartAllCoreThreads()
val serviceIdArray = serviceIds.split(",").map(_.toInt)
for (serviceId <- serviceIdArray) {
val codec = router.codecs.findTlvCodec(serviceId)
if (codec == null) {
throw new RuntimeException("serviceId not found, serviceId=" + serviceId)
}
}
val connStr = (cfgNode \\ "Connection").text
val mqConn = parseConnStr(connStr)
val destNodes = (cfgNode \\ "Destination")
for (p <- destNodes) {
val serviceId = (p \\ "@serviceId").toString.toInt
val queueName = (p \\ "@queueName").toString
val persistentStr = (p \\ "@persistent").toString.toLowerCase
val persistent = persistentStr == "true" || persistentStr == "yes" || persistentStr == "t" || persistentStr == "y" || persistentStr == "1"
val mqClient = new MqClient(mqConn, new DestinationCfg(queueName, persistent))
mqClients.put(serviceId, mqClient)
val t = new Thread("mqsedingthread" + serviceId) {
override def run() {
sendData(serviceId)
}
}
t.start()
sendThreads += t
}
log.info("MqActor started {}", serviceIds)
}
def parseConnStr(connStr: String): MqConn = {
connStr match {
case connReg(brokerUrl, username, password) =>
val t = new MqConn(brokerUrl, username, password)
t
case _ =>
throw new RuntimeException("connection string is not valid,conn=%s".format(connStr))
}
}
def close() {
val t1 = System.currentTimeMillis
pool.shutdown()
pool.awaitTermination(5, TimeUnit.SECONDS)
val t2 = System.currentTimeMillis
if (t2 - t1 > 100)
log.warn("MqActor long time to shutdown pool, ts={}", t2 - t1)
for (t <- sendThreads) {
t.interrupt()
t.join()
}
sendThreads.clear()
for ((serviceId, mqClient) <- mqClients) {
mqClient.close()
}
mqClients.clear()
if (persistQueueManager != null) {
persistQueueManager.close()
persistQueueManager = null
}
log.info("MqActor closed {} ", serviceIds)
}
override def receive(v: Any): Unit = {
v match {
case req: Request =>
try {
pool.execute(new Runnable() {
def run() {
try {
onReceive(req)
} catch {
case e: Exception =>
log.error("MqActor exception req={}", req, e)
}
}
})
} catch {
case e: RejectedExecutionException =>
replyError(ResultCodes.SERVICE_FULL, req)
log.error("MqActor queue is full, serviceIds={}", serviceIds)
}
case _ =>
log.error("unknown msg")
}
}
def onReceive(v: Any) {
v match {
case req: Request =>
var s = ""
if (pluginObj != null)
s = pluginObj.selialize(req.serviceId, req.msgId, req.body)
else
s = requestToJson(req)
try {
val queue = persistQueueManager.getQueue(String.valueOf(req.serviceId))
queue.put(s)
replyOk(req)
hasIOException.set(false)
} catch {
case e: Exception =>
log.error("cannot save data to local mq data={}", s)
replyError(-10242500, req)
hasIOException.set(true)
}
case _ =>
log.error("unknown msg")
}
}
def sendData(serviceId: Int) {
val queue = persistQueueManager.getQueue(String.valueOf(serviceId))
if (queue == null) return
while (true) {
try {
val idx = queue.get()
if (idx == -1) {
return
}
val str = queue.getString(idx)
if (log.isDebugEnabled()) {
log.debug("json=" + str)
}
val mqClient = mqClients.getOrElse(serviceId, null)
if (mqClient != null) {
var ok = false
do {
ok = mqClient.send(str)
if (!ok) {
Thread.sleep(5000)
}
} while (!ok)
}
queue.commit(idx)
} catch {
case e: InterruptedException =>
return
case e: Exception =>
log.error("exception in sending mq data {}", e.getMessage)
Thread.sleep(5000)
}
}
}
def replyOk(req: Request) {
val res = new Response(0, new HashMapStringAny(), req)
router.reply(new RequestResponseInfo(req, res))
}
def replyError(code: Int, req: Request) {
val res = new Response(code, new HashMapStringAny(), req)
router.reply(new RequestResponseInfo(req, res))
}
def requestToJson(req: Request): String = {
val writer = new StringWriter()
val jsonGenerator = jsonFactory.createGenerator(writer)
jsonGenerator.writeStartObject()
val f = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
val now = f.format(new Date())
// four standard mq fields
jsonGenerator.writeStringField("messageId", req.requestId)
jsonGenerator.writeStringField("messageSourceIp", localIp)
jsonGenerator.writeStringField("messageTimestamp", now)
jsonGenerator.writeStringField("messageType", req.msgId.toString)
for ((key, value) <- req.body) {
value match {
case s: String =>
jsonGenerator.writeStringField(key, s)
case i: Int =>
jsonGenerator.writeNumberField(key, i)
case _ =>
jsonGenerator.writeStringField(key, value.toString)
}
}
jsonGenerator.writeEndObject()
jsonGenerator.close()
writer.toString()
}
def selfcheck(): ArrayBuffer[SelfCheckResult] = {
val buff = new ArrayBuffer[SelfCheckResult]()
var errorId = 65301006
for ((serviceId, mqClient) <- mqClients) {
if (mqClient.hasError()) {
val msg = "mq [" + mqClient.destCfg.queueName + "] has error"
buff += new SelfCheckResult("SCALABPE.MQ", errorId, true, msg)
}
}
var ioErrorId = 65301007
if (hasIOException.get()) {
val msg = "local persistqueue has io error"
buff += new SelfCheckResult("SCALABPE.IO", ioErrorId, true, msg)
}
if (buff.size == 0) {
buff += new SelfCheckResult("SCALABPE.MQ", errorId)
}
buff
}
}
class MqClient(val mqConn: MqConn, val destCfg: DestinationCfg) extends Logging {
var connectionFactory: ActiveMQConnectionFactory = _
var connection: Connection = _
var session: Session = _
var destination: Destination = _
var producer: MessageProducer = _
init
def init() {
loadSo
connectionFactory = new ActiveMQConnectionFactory()
connectionFactory.setUserName(mqConn.username)
connectionFactory.setPassword(decrypt(mqConn.password))
connectionFactory.setBrokerURL(mqConn.brokerUrl)
log.info("mq client started, brokerUrl=%s".format(mqConn.brokerUrl))
}
def loadSo() {
try {
System.loadLibrary("sec");
log.info("library sec loaded")
} catch {
case e: Throwable =>
log.error("cannot load library sec")
}
}
def close() {
reset()
log.info("mq client closed, brokerUrl=%s".format(mqConn.brokerUrl))
}
def hasError(): Boolean = {
return (session == null || producer == null)
}
def decrypt(pwd: String): String = {
if (pwd.startsWith("des:")) {
decryptDes(pwd.substring(4))
} else if (pwd.startsWith("desx:")) {
decryptDesX(pwd.substring(5))
} else if (pwd.startsWith("rsa:")) {
decryptRsa(pwd.substring(4))
} else {
pwd
}
}
@native def decryptDes(s: String): String;
@native def decryptDesX(s: String): String;
@native def decryptRsa(s: String): String;
def reset() {
if (producer != null) {
try {
producer.close()
} catch {
case e: Exception =>
log.error("producer.close error brokerUrl=%s, exception=%s".format(mqConn.brokerUrl, e.getMessage))
}
producer = null
}
destination = null
if (session != null) {
try {
session.close()
} catch {
case e: Exception =>
log.error("session.close error brokerUrl=%s, exception=%s".format(mqConn.brokerUrl, e.getMessage))
}
session = null
}
if (connection != null) {
try {
connection.close()
} catch {
case e: Exception =>
log.error("connection.close error brokerUrl=%s, exception=%s".format(mqConn.brokerUrl, e.getMessage))
}
connection = null
}
}
def prepare() {
connection = connectionFactory.createConnection()
connection.start()
session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
destination = session.createQueue(destCfg.queueName)
producer = session.createProducer(destination)
val d = if (destCfg.persistent) DeliveryMode.PERSISTENT else DeliveryMode.NON_PERSISTENT
producer.setDeliveryMode(d)
}
def send(str: String): Boolean = {
try {
if (session == null || producer == null) {
prepare()
}
val message = session.createTextMessage(str)
producer.send(message)
return true
} catch {
case e: Exception =>
log.error("mq send error brokerUrl=%s, exception=%s, str=%s".format(mqConn.brokerUrl, e.getMessage, str))
reset()
return false
}
}
}
| bruceran/scalabpe | third_party/activemq/src/actor_mq.scala | Scala | apache-2.0 | 15,991 |
package com.github.mdr.mash.evaluator
class ImportTest extends AbstractEvaluatorTest {
"import hash.sha256; sha256" ==> "sha256"
"import hash._; sha256" ==> "sha256"
"obj = { foo: 42 }; import obj.foo; foo" ==> 42
"class A n { def inc = n += 1 }; a = A 0; import a._; inc; a.n" ==> 1
"x = {}; import x._; where.getClass" ==> "Function" // Object.where is a 'shy' method
"x = {}; import x._; isNull.getClass" ==> "Function" // Any.isNull is a 'shy' method
"x = {}; import x.where; where.getClass" ==> "BoundMethod"
"class A { @private def method = 42 }; a = A.new; import a.method".shouldThrowAnException
}
| mdr/mash | src/test/scala/com/github/mdr/mash/evaluator/ImportTest.scala | Scala | mit | 625 |
package scalabpe.plugin
import java.io.File
import java.io.StringWriter
import java.util.Timer
import java.util.TimerTask
import java.util.concurrent.ArrayBlockingQueue
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.ThreadFactory
import java.util.concurrent.ThreadPoolExecutor
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.locks.ReentrantLock
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.xml.Node
import com.fasterxml.jackson.core.JsonFactory
import com.fasterxml.jackson.databind.ObjectMapper
import com.sdo.billing.queue.PersistQueue
import com.sdo.billing.queue.impl.PersistQueueManagerImpl
import scalabpe.core.Actor
import scalabpe.core.AfterInit
import scalabpe.core.BeforeClose
import scalabpe.core.Closable
import scalabpe.core.Dumpable
import scalabpe.core.HashMapStringAny
import scalabpe.core.InvokeResult
import scalabpe.core.Logging
import scalabpe.core.NamedThreadFactory
import scalabpe.core.Request
import scalabpe.core.RequestIdGenerator
import scalabpe.core.RequestResponseInfo
import scalabpe.core.Response
import scalabpe.core.ResultCodes
import scalabpe.core.Router
import scalabpe.core.SelfCheckLike
import scalabpe.core.SelfCheckResult
object LocalQueueActor {
val localDirs = new HashSet[String]()
}
class MsgPaCfg(val maxSendTimes: Int, val retryInterval: Int, val concurrentNum: Int = 0)
class LocalQueueActor(override val router: Router, override val cfgNode: Node)
extends LocalQueueLike(router, cfgNode) {
val queueNameMap = new HashMap[String, String]() // serviceId:msgId -> queueNameKey
init
override def init() {
queueTypeName = "localqueue"
serviceIds = (cfgNode \\ "ServiceId").text
super.init
var infos = cfgNode \\ "Msg"
for (inf <- infos) {
var msgId = (inf \\ "@msgId").text
var s = (inf \\ "@retryInterval").toString()
var retryIntervalCfg = if (s != "") s.toInt else 0
s = (inf \\ "@maxSendTimes").toString()
var maxSendTimesCfg = if (s != "") s.toInt else 0
s = (inf \\ "@concurrentNum").toString()
var concurrentNum = if (s != "") s.toInt else 0
msgIdCfgMap.put(msgId, new MsgPaCfg(maxSendTimesCfg, retryIntervalCfg, concurrentNum))
}
val serviceIdArray = serviceIds.split(",").map(_.toInt)
for (serviceId <- serviceIdArray) {
val codec = router.codecs.findTlvCodec(serviceId)
if (codec == null) {
throw new RuntimeException("serviceId not found, serviceId=" + serviceId)
}
if (codec != null) {
val tlvType = codec.findTlvType(10000)
if (tlvType == null) {
throw new RuntimeException("queueName not configured for serviceId=" + serviceId)
}
val msgIds = codec.msgKeyToTypeMapForReq.keys
for (msgId <- msgIds) {
val reqNameMap = codec.msgKeysForReq.getOrElse(msgId, null)
val keyToTypeMapReq = codec.msgKeyToTypeMapForReq.getOrElse(msgId, null)
var found = false
for (key <- reqNameMap) {
val typeKey = keyToTypeMapReq.getOrElse(key, null)
val tlvType = codec.typeNameToCodeMap.getOrElse(typeKey, null)
if (tlvType.code == 10000) {
found = true
queueNameMap.put(serviceId + ":" + msgId, key)
}
}
if (!found) {
throw new RuntimeException("queueName not configured for serviceId=%d,msgId=%d".format(serviceId, msgId))
}
}
}
}
}
override def checkLocalDir() {
localDir = (cfgNode \\ "LocalDir").text
if (localDir == "") {
localDir = Router.dataDir + File.separator + queueTypeName
}
if (LocalQueueActor.localDirs.contains(localDir)) {
throw new RuntimeException("LocalQueueActor localDir cannot be the same, the default is data/" + queueTypeName)
}
LocalQueueActor.localDirs.add(localDir)
}
override def onReceive(v: Any) {
v match {
case req: Request =>
onReceiveRequest(req)
case _ =>
super.onReceive(v)
}
}
def onReceiveRequest(req: Request) {
val queueNameKey = queueNameMap.getOrElse(req.serviceId + ":" + req.msgId, null)
if (queueNameKey == null) {
log.error("queueName not found serviceId=%d,msgId=%d".format(req.serviceId, req.msgId))
replyError(ResultCodes.SERVICE_INTERNALERROR, req)
return
}
val queueName = req.s(queueNameKey)
if (queueName == null) {
log.error("queueName not found serviceId=%d,msgId=%d".format(req.serviceId, req.msgId))
replyError(ResultCodes.SERVICE_INTERNALERROR, req)
return
}
val s = requestToJson(req)
val ok = saveToQueue(queueName, s)
if (ok)
replyOk(req)
else
replyError(ResultCodes.SERVICE_INTERNALERROR, req)
}
}
class LocalQueueSendingData(val queueName: String, var requestId: String = null, var idx: Long = 0,
var json: String = null, var sendCount: Int = 1) {
var createTime = System.currentTimeMillis
def reset() {
createTime = System.currentTimeMillis
requestId = null
idx = 0
json = null
sendCount = 1
}
}
abstract class LocalQueueLike(val router: Router, val cfgNode: Node) extends Actor with Logging
with Closable with BeforeClose with AfterInit with SelfCheckLike with Dumpable {
var queueTypeName: String = _
var serviceIds: String = _
var localDir: String = _
var threadNum = 1
val queueSize = 20000
var threadFactory: ThreadFactory = _
var pool: ThreadPoolExecutor = _
var persistQueueManager: PersistQueueManagerImpl = _
var timer: Timer = _
var sendThread: Thread = _
var receiverServiceId = 0
var maxSendTimes = 60
var retryInterval = 5000
val jsonFactory = new JsonFactory()
val mapper = new ObjectMapper()
val hasIOException = new AtomicBoolean()
val lock = new ReentrantLock(false)
val hasNewData = lock.newCondition()
val sequence = new AtomicInteger(1)
val waitingRunnableList = new ConcurrentLinkedQueue[Runnable]()
val waitingQueueNameList = new ConcurrentLinkedQueue[String]()
val queuesNoData = new HashMap[String, LocalQueueSendingData]()
val queuesHasData = new HashMap[String, LocalQueueSendingData]()
val requestIdMap = new ConcurrentHashMap[String, LocalQueueSendingData]() // requestId -> LocalQueueSendingData
val shutdown = new AtomicBoolean()
val beforeCloseFlag = new AtomicBoolean()
val msgIdCfgMap = new HashMap[String, MsgPaCfg]() //msgid -> maxSendTimes,retryInterval
def dump() {
val buff = new StringBuilder
buff.append("pool.size=").append(pool.getPoolSize).append(",")
buff.append("pool.getQueue.size=").append(pool.getQueue.size).append(",")
buff.append("sendThread size=1").append(",")
buff.append("retrytimer thread size=1").append(",")
buff.append("waitingRunnableList.size=").append(waitingRunnableList.size).append(",")
buff.append("waitingQueueNameList.size=").append(waitingQueueNameList.size).append(",")
buff.append("queuesNoData.size=").append(queuesNoData.size).append(",")
buff.append("queuesHasData.size=").append(queuesHasData.size).append(",")
buff.append("requestIdMap.size=").append(requestIdMap.size).append(",")
buff.append("queueNameCfgMap.size=").append(msgIdCfgMap.size).append(",")
log.info(buff.toString)
dumpPersistManager
}
def dumpPersistManager() {
val buff1 = new StringBuilder
val buff2 = new StringBuilder
buff1.append("queue size ")
buff2.append("queue cacheSize ")
val queueNames = persistQueueManager.getQueueNames
for (i <- 0 until queueNames.size) {
val queue = persistQueueManager.getQueue(queueNames.get(i))
buff1.append(queueNames.get(i)).append("=").append(queue.size).append(",")
buff2.append(queueNames.get(i)).append("=").append(queue.cacheSize).append(",")
}
log.info(buff1.toString)
log.info(buff2.toString)
}
def init() {
checkLocalDir()
timer = new Timer(queueTypeName + "_retrytimer")
var dataDir = ""
if (localDir.startsWith("/")) dataDir = localDir
else dataDir = router.rootDir + File.separator + localDir
var s = (cfgNode \\ "@maxSendTimes").text
if (s != "") maxSendTimes = s.toInt
s = (cfgNode \\ "@retryInterval").text
if (s != "") retryInterval = s.toInt
s = (cfgNode \\ "@threadNum").text
if (s != "") threadNum = s.toInt
new File(dataDir).mkdirs()
persistQueueManager = new PersistQueueManagerImpl()
persistQueueManager.setDataDir(dataDir)
persistQueueManager.init()
val queueNames = persistQueueManager.getQueueNames
for (i <- 0 until queueNames.size) {
waitingQueueNameList.offer(queueNames.get(i))
}
val firstServiceId = serviceIds.split(",")(0)
threadFactory = new NamedThreadFactory(queueTypeName + "_" + firstServiceId)
pool = new ThreadPoolExecutor(threadNum, threadNum, 0, TimeUnit.SECONDS, new ArrayBlockingQueue[Runnable](queueSize), threadFactory)
pool.prestartAllCoreThreads()
receiverServiceId = (cfgNode \\ "@receiverServiceId").text.toInt
if (receiverServiceId <= 0)
throw new RuntimeException("receiverServiceId is not valid receiverServiceId=" + receiverServiceId)
sendThread = new Thread(queueTypeName + "_sedingthread" + firstServiceId) {
override def run() {
sendData()
}
}
log.info(getClass.getName + " started {}", serviceIds)
}
def afterInit() {
sendThread.start()
log.info(getClass.getName + " sendThread started")
}
def beforeClose() {
beforeCloseFlag.set(true)
log.info(getClass.getName + " beforeClose called")
}
def checkLocalDir()
def close() {
shutdown.set(true)
timer.cancel()
val t1 = System.currentTimeMillis
pool.shutdown()
pool.awaitTermination(5, TimeUnit.SECONDS)
val t2 = System.currentTimeMillis
if (t2 - t1 > 100)
log.warn(getClass.getName + " long time to shutdown pool, ts={}", t2 - t1)
sendThread.interrupt()
sendThread.join()
if (persistQueueManager != null) {
persistQueueManager.close()
persistQueueManager = null
}
log.info(getClass.getName + " closed {} ", serviceIds)
}
override def receive(v: Any): Unit = {
try {
pool.execute(new Runnable() {
def run() {
try {
onReceive(v)
} catch {
case e: Exception =>
log.error(getClass.getName + " exception req={}", v.toString, e)
}
}
})
} catch {
case e: RejectedExecutionException =>
if (v.isInstanceOf[Request])
replyError(ResultCodes.SERVICE_FULL, v.asInstanceOf[Request])
log.error(getClass.getName + " queue is full, serviceIds={}", serviceIds)
}
}
def replyOk(req: Request) {
val res = new Response(0, new HashMapStringAny(), req)
router.reply(new RequestResponseInfo(req, res))
}
def replyError(code: Int, req: Request) {
val res = new Response(code, new HashMapStringAny(), req)
router.reply(new RequestResponseInfo(req, res))
}
def selfcheck(): ArrayBuffer[SelfCheckResult] = {
val buff = new ArrayBuffer[SelfCheckResult]()
var ioErrorId = 65301007
if (hasIOException.get()) {
val msg = "local persistqueue has io error"
buff += new SelfCheckResult("SCALABPE.IO", ioErrorId, true, msg)
}
buff
}
def onReceive(v: Any) {
v match {
case res: InvokeResult =>
onReceiveResponse(res)
case _ =>
log.error("unknown msg")
}
}
def getMaxSendTimes(msgId: Int): Int = {
val tempvalue = msgIdCfgMap.getOrElse(msgId.toString, null)
if (tempvalue != null) {
val c = if (tempvalue.maxSendTimes <= 0) maxSendTimes else tempvalue.maxSendTimes
return c
} else {
return maxSendTimes
}
}
def getRetryInterval(msgId: Int): Int = {
val tempvalue = msgIdCfgMap.getOrElse(msgId.toString, null)
if (tempvalue != null) {
val c = if (tempvalue.retryInterval <= 0) retryInterval else tempvalue.retryInterval
return c
} else {
return retryInterval
}
}
def onReceiveResponse(res: InvokeResult) {
val sendingdata = requestIdMap.remove(res.requestId)
val body = jsonToBody(sendingdata.json)
if (body == null) return
val msgId = body.i("X-MSGID")
if (msgId <= 0) {
log.error("X-MSGID not found or not valid in json " + sendingdata.json)
return
}
val maxSendTimes = getMaxSendTimes(msgId)
val retryInterval = getRetryInterval(msgId)
if (sendingdata == null) return
if (res.code == 0 || sendingdata.sendCount >= maxSendTimes) {
if (res.code != 0) {
log.error("send failed, requestId=" + sendingdata.requestId)
}
waitingRunnableList.offer(
new Runnable() {
def run() {
commit(sendingdata.queueName, sendingdata.idx)
sendingdata.reset()
}
})
wakeUpSendThread()
return
}
timer.schedule(new TimerTask() {
def run() {
waitingRunnableList.offer(
new Runnable() {
def run() {
retry(sendingdata)
}
})
wakeUpSendThread()
}
}, retryInterval)
}
def wakeUpSendThread() {
if (lock.tryLock()) {
try {
hasNewData.signal()
} finally {
lock.unlock()
}
}
}
def sendData() {
lock.lock()
while (!shutdown.get()) {
try {
sendDataInternal()
if (!needRun()) {
hasNewData.await(1000, TimeUnit.MILLISECONDS) // ignore return code
}
} catch {
case e: InterruptedException =>
case e: Throwable =>
log.error("exception in sendData, e={}", e.getMessage, e)
}
}
lock.unlock()
}
def needRun() = {
!waitingRunnableList.isEmpty() ||
!waitingQueueNameList.isEmpty() ||
queuesHasData.values.filter(_.requestId == null).size > 0
}
def addQueueName(queueName: String) {
var existed = queuesHasData.contains(queueName)
if (existed) {
return
}
val sendingdata = queuesNoData.getOrElse(queueName, null)
if (sendingdata != null) {
queuesNoData.remove(queueName)
sendingdata.reset
queuesHasData.put(queueName, sendingdata)
return
}
queuesHasData.put(queueName, new LocalQueueSendingData(queueName))
}
def checkAndSend(queueName: String, sendingdata: LocalQueueSendingData, queue: PersistQueue): Boolean = {
var hasData = true
try {
val idx = queue.get(0) // no wait
if (idx == -1) { // no data
hasData = false
} else {
val json = queue.getString(idx)
sendingdata.json = json
sendingdata.idx = idx
val ok = send(sendingdata, null)
if (!ok) {
queue.commit(idx)
sendingdata.reset
}
}
} catch {
case e: Exception =>
log.error("exception in sending localqueue data {}", e)
}
hasData
}
def sendDataInternal() {
while (!waitingRunnableList.isEmpty()) {
val runnable = waitingRunnableList.poll()
try {
runnable.run()
} catch {
case e: Throwable =>
}
}
while (!waitingQueueNameList.isEmpty()) {
val queueName = waitingQueueNameList.poll()
addQueueName(queueName)
}
val removeList = new ArrayBuffer[String]()
for ((queueName, sendingdata) <- queuesHasData if sendingdata.requestId == null) { // not sending data
val queue = persistQueueManager.getQueue(queueName)
if (queue == null) {
removeList += queueName
} else {
val hasData = checkAndSend(queueName, sendingdata, queue)
if (!hasData)
removeList += queueName
}
}
removeNoDataQueues(removeList)
}
def removeNoDataQueues(removeList: ArrayBuffer[String]) {
for (queueName <- removeList) {
val sendingdata = queuesHasData.getOrElse(queueName, null)
queuesHasData.remove(queueName)
sendingdata.reset
queuesNoData.put(queueName, sendingdata)
}
}
def retry(sendingdata: LocalQueueSendingData) {
sendingdata.sendCount += 1
send(sendingdata)
}
def commit(queueName: String, idx: Long) {
val queue = persistQueueManager.getQueue(queueName)
if (queue == null) {
return
} else {
try {
queue.commit(idx)
} catch {
case e: Exception =>
log.error("exception in commit localqueue data {}", e.getMessage)
}
}
}
def send(sendingdata: LocalQueueSendingData, generatedRequestId: String = null): Boolean = {
//println("send called,idx="+sendingdata.idx)
if (beforeCloseFlag.get()) {
return true
}
val body = jsonToBody(sendingdata.json)
if (body == null) return false
val msgId = body.i("X-MSGID")
if (msgId <= 0) {
log.error("X-MSGID not found or not valid in json " + sendingdata.json)
return false
}
body.remove("X-MSGID")
val maxSendTimes = getMaxSendTimes(msgId)
body.put("x_sendCount", sendingdata.sendCount)
body.put("x_isLastSend", if (sendingdata.sendCount == maxSendTimes) 1 else 0)
body.put("x_maxSendTimes", maxSendTimes)
body.put("x_sendTimeUsed", System.currentTimeMillis - sendingdata.createTime)
var requestId = generatedRequestId
if (requestId == null) {
requestId = "LQ" + RequestIdGenerator.nextId()
}
sendingdata.requestId = requestId
requestIdMap.put(requestId, sendingdata)
val req = new Request(
requestId,
"localqueue:0",
sequence.getAndIncrement(),
1,
receiverServiceId,
msgId,
new HashMapStringAny(),
body,
this)
router.send(req)
true
}
def jsonToBody(json: String): HashMapStringAny = {
val body = new HashMapStringAny()
try {
if (!json.startsWith("{")) {
log.error("not a valid json, json=" + json)
return null
}
val valueTree = mapper.readTree(json)
val names = valueTree.fieldNames
while (names.hasNext) {
val name = names.next()
body.put(name, valueTree.get(name).asText)
}
} catch {
case e: Throwable =>
log.error("not a valid json " + json)
return null
}
body
}
def requestToJson(req: Request): String = {
bodyToJson(req.body, req.msgId)
}
def bodyToJson(body: HashMapStringAny, msgId: Int): String = {
val writer = new StringWriter()
val jsonGenerator = jsonFactory.createGenerator(writer)
jsonGenerator.writeStartObject()
jsonGenerator.writeNumberField("X-MSGID", msgId)
for ((key, value) <- body if key != "queueName") {
value match {
case s: String =>
jsonGenerator.writeStringField(key, s)
case i: Int =>
jsonGenerator.writeNumberField(key, i)
case _ =>
jsonGenerator.writeStringField(key, value.toString)
}
}
jsonGenerator.writeEndObject()
jsonGenerator.close()
writer.toString()
}
def saveToQueue(queueName: String, s: String): Boolean = {
try {
val queue = persistQueueManager.getQueue(queueName)
queue.put(s)
waitingQueueNameList.offer(queueName)
wakeUpSendThread()
hasIOException.set(false)
return true
} catch {
case e: Exception =>
log.error("cannot save data to local queue data={}", s)
hasIOException.set(true)
return false
}
}
}
| bruceran/scalabpe | src/scalabpe/plugin/actor_localqueue.scala | Scala | apache-2.0 | 22,543 |
package org.openjdk.jmh.samples
import java.net.{URL, URLClassLoader}
import java.util.{HashMap, Map, TreeMap}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
object JMHSample_35_Profilers {
/*
* This sample serves as the profiler overview.
*
* JMH has a few very handy profilers that help to understand your benchmarks. While
* these profilers are not the substitute for full-fledged external profilers, in many
* cases, these are handy to quickly dig into the benchmark behavior. When you are
* doing many cycles of tuning up the benchmark code itself, it is important to have
* a quick turnaround for the results.
*
* Use -lprof to list the profilers. There are quite a few profilers, and this sample
* would expand on a handful of most useful ones. Many profilers have their own options,
* usually accessible via -prof <profiler-name>:help.
*
* Since profilers are reporting on different things, it is hard to construct a single
* benchmark sample that will show all profilers in action. Therefore, we have a couple
* of benchmarks in this sample.
*/
@State(Scope.Thread)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class Maps {
private var map: Map[Integer, Integer] = _
@Param(Array("hashmap", "treemap"))
private var `type`: String = _
private var begin: Int = _
private var end: Int = _
@Setup
def setup() {
if (`type` == "hashmap") {
map = new HashMap[Integer, Integer]()
} else if (`type` == "treemap") {
map = new TreeMap[Integer, Integer]()
} else {
throw new IllegalStateException("Unknown type: " + `type`)
}
begin = 1
end = 256
for (i <- begin until end) {
map.put(i, i)
}
}
@Benchmark
def test(bh: Blackhole) {
for (i <- begin until end) {
bh.consume(map.get(i))
}
}
}
object XLoader {
val X_BYTECODE = Array(0xCA, 0xFE, 0xBA, 0xBE, 0x00, 0x00, 0x00, 0x34, 0x00, 0x0D,
0x0A, 0x00, 0x03, 0x00, 0x0A, 0x07, 0x00, 0x0B, 0x07, 0x00, 0x0C, 0x01, 0x00, 0x06, 0x3C, 0x69, 0x6E, 0x69,
0x74, 0x3E, 0x01, 0x00, 0x03, 0x28, 0x29, 0x56, 0x01, 0x00, 0x04, 0x43, 0x6F, 0x64, 0x65, 0x01, 0x00, 0x0F,
0x4C, 0x69, 0x6E, 0x65, 0x4E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6C, 0x65, 0x01, 0x00, 0x0A,
0x53, 0x6F, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6C, 0x65, 0x01, 0x00, 0x06, 0x58, 0x2E, 0x6A, 0x61, 0x76,
0x61, 0x0C, 0x00, 0x04, 0x00, 0x05, 0x01, 0x00, 0x01, 0x58, 0x01, 0x00, 0x10, 0x6A, 0x61, 0x76, 0x61, 0x2F,
0x6C, 0x61, 0x6E, 0x67, 0x2F, 0x4F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x00, 0x20, 0x00, 0x02, 0x00, 0x03, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x01, 0x00, 0x06, 0x00, 0x00, 0x00,
0x1D, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x2A, 0xB7, 0x00, 0x01, 0xB1, 0x00, 0x00,
0x00, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x08,
0x00, 0x00, 0x00, 0x02, 0x00, 0x09).map(_.toByte)
}
class XLoader extends URLClassLoader(Array.ofDim[URL](0), ClassLoader.getSystemClassLoader) {
protected override def findClass(name: String): Class[_] = {
defineClass(name, XLoader.X_BYTECODE, 0, XLoader.X_BYTECODE.length)
}
}
@State(Scope.Thread)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class Classy {
@Benchmark
def load(): Class[_] = Class.forName("X", true, new XLoader())
}
@State(Scope.Benchmark)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(1)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class Atomic {
private var n: AtomicLong = _
@Setup
def setup() {
n = new AtomicLong()
}
@Benchmark
def test(): Long = n.incrementAndGet()
}
}
| bantonsson/sbt-jmh | src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_35_Profilers.scala | Scala | apache-2.0 | 4,419 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.coders
import org.apache.avro.specific.SpecificRecordBase
import scala.reflect.macros.blackbox
private[coders] object AvroCoderMacros {
/** Generate a coder which does not serialize the schema and relies exclusively on types. */
def staticInvokeCoder[T <: SpecificRecordBase: c.WeakTypeTag](c: blackbox.Context): c.Tree = {
import c.universe._
val wtt = weakTypeOf[T]
val companioned = wtt.typeSymbol
q"""
_root_.com.spotify.scio.coders.Coder.beam(
_root_.org.apache.beam.sdk.coders.AvroCoder.of[$companioned](
classOf[$companioned],
new $companioned().getSchema,
true
)
)
"""
}
}
| spotify/scio | scio-macros/src/main/scala/com/spotify/scio/coders/AvroCoderMacros.scala | Scala | apache-2.0 | 1,281 |
package com.versioneye
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
/**
* Java representation of the project JSON response from VersionEye API.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
class ProjectJsonResponse {
private var name: String = null
private var id: String = null
private var dep_number: Integer = null
private var out_number: Integer = null
private var licenses_red: Integer = 0
private var licenses_unknown: Integer = 0
def getLicenses_red: Integer = {
return licenses_red
}
def setLicenses_red(licenses_red: Integer) {
this.licenses_red = licenses_red
}
def getLicenses_unknown: Integer = {
return licenses_unknown
}
def setLicenses_unknown(licenses_unknown: Integer) {
this.licenses_unknown = licenses_unknown
}
def getName: String = {
return name
}
def setName(name: String) {
this.name = name
}
def getId: String = {
return id
}
def setId(id: String) {
this.id = id
}
def getDep_number: Integer = {
return dep_number
}
def setDep_number(dep_number: Integer) {
this.dep_number = dep_number
}
def getOut_number: Integer = {
return out_number
}
def setOut_number(out_number: Integer) {
this.out_number = out_number
}
}
| mp911de/versioneye_sbt_plugin | src/main/scala/com/versioneye/ProjectJsonResponse.scala | Scala | mit | 1,279 |
/*
* Copyright (C) 2012 Lalit Pant <pant.lalit@gmail.com>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.livecoding
import java.util.regex.Pattern
import javax.swing.JLabel
import javax.swing.JSlider
import javax.swing.JTextField
import javax.swing.JToggleButton
import javax.swing.event.ChangeEvent
import javax.swing.event.ChangeListener
import javax.swing.text.Document
import net.kogics.kojo.util.Utils
import org.netbeans.api.lexer.TokenHierarchy;
// import org.netbeans.lib.editor.util.swing.DocumentUtilities
import org.netbeans.modules.scala.core.lexer.ScalaTokenId;
class IntManipulator(ctx: ManipulationContext) extends NumberManipulator(ctx) {
val IntPattern = Pattern.compile("\\\\d*")
def isHyperlinkPoint(doc: Document, offset: Int): Boolean = {
// val pre = DocumentUtilities.getParagraphRootElement(doc)
// val lineIndex = pre.getElementIndex(offset)
// val lineElem = pre.getElement(lineIndex);
try {
val hi = TokenHierarchy.get(doc);
val ts = hi.tokenSequence(ScalaTokenId.language);
if (ts != null) {
ts.move(offset);
ts.moveNext()
val tok = ts.token()
var numOffset = ts.offset()
val possibleNumber = tok.text().toString()
val m = IntPattern.matcher(possibleNumber)
if (m.matches()) {
ts.movePrevious()
val tokp = ts.token()
if (tokp.text.toString == "-") {
numOffset = ts.offset()
target = "-" + possibleNumber
}
else {
target = possibleNumber
}
targetStart = numOffset
targetEnd = targetStart + target.length();
return true;
}
}
return false;
} catch {
case t: Throwable => false
}
}
def getHyperlinkSpan(doc: Document, offset: Int): Array[Int] = {
Array(targetStart, targetEnd)
}
def activate(doc: Document, offset: Int) {
activate(doc, offset, target, targetStart)
}
def activate(doc: Document, offset: Int, target0: String, targetStart: Int) = Utils.safeProcess {
close()
ctx.addManipulator(this)
var target = target0
var ncenter = target0.toInt
var ntarget = ncenter
var delta = math.max(math.floor(ntarget.abs * 2.0 / 10).toInt, 1)
var oldDelta = delta
val slider = new JSlider();
val leftLabel = new JLabel
val rightLabel = new JLabel
def slider2num(n: Int): Int = {
ncenter + (n-9) * delta
}
def num2slider(n: Double): Int = {
9 + math.round((n - ncenter) / delta).toInt
}
def reConfigSlider(around: Int, delta0: Int, zoomB: JToggleButton) {
ncenter = around
delta = delta0
slider.setMinimum(0)
slider.setMaximum(18)
slider.setValue(9)
slider.setMajorTickSpacing(1)
leftLabel.setText(slider2num(slider.getMinimum).toString)
rightLabel.setText(slider2num(slider.getMaximum).toString)
}
slider.setValue(9)
slider.setPaintTicks(true)
var lastrunval = ntarget
slider.addChangeListener(new ChangeListener {
def stateChanged(e: ChangeEvent) = Utils.safeProcess {
stepT.setText(delta.toString)
val eslider = e.getSource.asInstanceOf[JSlider]
val newnum = eslider.getValue()
inSliderChange = true
doc.remove(targetStart, target.length())
ntarget = slider2num(newnum)
target = ntarget.toString
doc.insertString(targetStart, target, null);
inSliderChange = false
if (!eslider.getValueIsAdjusting) {
// drag over
if (ctx.isRunningEnabled) {
if (lastrunval != ntarget) {
lastrunval = ntarget
Utils.invokeLaterInSwingThread {
ctx.runCode(doc.getText(0, doc.getLength))
}
}
}
else {
eslider.setValue(num2slider(lastrunval))
}
if (newnum == eslider.getMaximum || newnum == eslider.getMinimum) {
simulateStepButtonClick()
}
}
}
})
val zoomListener = { zoomB: JToggleButton =>
val around = slider2num(slider.getValue)
if (zoomB.isSelected) {
oldDelta = delta
reConfigSlider(around, 1, zoomB)
}
else {
reConfigSlider(around, oldDelta, zoomB)
}
stepT.setText(delta.toString)
}
val stepListener = { (stepT: JTextField, zoomB: JToggleButton) =>
try {
val step = stepT.getText.toInt
val around = slider2num(slider.getValue)
reConfigSlider(around, step, zoomB)
}
catch {
case nfe: NumberFormatException =>
stepT.setText("Err")
}
}
showPopup(offset, leftLabel, slider, rightLabel, zoomListener, Some(stepListener))
}
}
| vnkmr7620/kojo | KojoEnv/src/net/kogics/kojo/livecoding/IntManipulator.scala | Scala | gpl-3.0 | 5,358 |
package tddmicroexercises.tirepressuremonitoringsystem
class Alarm
{
val LowPressureTreshold : Double = 17
val HighPressureTreshold : Double = 21
val sensor = new Sensor()
var alarmOn = false
def check()
{
val psiPressureValue = sensor.popNextPressurePsiValue()
if (psiPressureValue < LowPressureTreshold || HighPressureTreshold < psiPressureValue)
{
alarmOn = true
}
}
def isAlarmOn() : Boolean = {
return alarmOn
}
} | xDD-CLE/Racing-Car-Katas | scala/src/main/scala/tddmicroexercises/tirepressuremonitoringsystem/Alarm.scala | Scala | mit | 513 |
package sql
import org.apache.spark.{SparkContext, SparkConf}
object JSONSchemaInference {
def main (args: Array[String]) {
val conf = new SparkConf().setAppName("JSONSchemaInference").setMaster("local[4]")
val sc = new SparkContext(conf)
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
// easy case -- one record
val ex1 = sqlContext.read.json("src/main/resources/data/inference1.json")
ex1.schema.printTreeString()
ex1.registerTempTable("table1")
println("simple query")
sqlContext.sql("select b from table1").foreach(println)
// two records, overlapping fields
val ex2 = sqlContext.read.json("src/main/resources/data/inference2.json")
ex2.schema.printTreeString()
ex2.registerTempTable("table2")
println("it's OK to reference a sometimes missing field")
sqlContext.sql("select b from table2").foreach(println)
println("it's OK to reach into a sometimes-missing record")
sqlContext.sql("select g.h from table2").foreach(println)
// two records, scalar and structural conflicts
val ex3 = sqlContext.read.json("src/main/resources/data/inference3.json")
ex3.schema.printTreeString()
ex3.registerTempTable("table3")
println("it's ok to query conflicting types but not reach inside them")
// don't try to query g.h or g[1]
sqlContext.sql("select g from table3").foreach(println)
}
}
| lucaagostini/LearningSpark | src/main/scala/sql/JSONSchemaInference.scala | Scala | mit | 1,394 |
package com.seanshubin.detangler.analysis
import com.seanshubin.detangler.model.Standalone
class AcceptNameFunction(stringToStandaloneFunction: String => Option[Standalone]) extends (String => Boolean) {
override def apply(name: String): Boolean = {
val classSuffix = ".class"
if (name.endsWith(classSuffix)) {
stringToStandaloneFunction(name.take(name.length - ".class".length)).isDefined
} else {
false
}
}
}
| SeanShubin/detangler | analysis/src/main/scala/com/seanshubin/detangler/analysis/AcceptNameFunction.scala | Scala | unlicense | 445 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.partest
import scala.language.experimental.macros
object Util {
/**
* `trace("".isEmpty)` will return `true` and as a side effect print the following to standard out.
* {{{
* trace> "".isEmpty
* res: Boolean = true
*
* }}}
*
* An alternative to [[scala.tools.partest.ReplTest]] that avoids the inconvenience of embedding
* test code in a string.
*/
def trace[A](a: A): A = macro traceImpl[A]
import scala.reflect.macros.blackbox.Context
def traceImpl[A: c.WeakTypeTag](c: Context)(a: c.Expr[A]): c.Expr[A] = {
import c.universe._
import definitions._
// xeno.by: reify shouldn't be used explicitly before the final release of 2.10.0,
// because this impairs reflection refactorings
//
// val exprCode = c.literal(show(a.tree))
// val exprType = c.literal(show(a.actualType))
// reify {
// println(s"trace> ${exprCode.splice}\\nres: ${exprType.splice} = ${a.splice}\\n")
// a.splice
// }
c.Expr(Block(
List(Apply(
Select(Ident(PredefModule), TermName("println")),
List(Apply(
Select(Apply(
Select(Ident(ScalaPackage), TermName("StringContext")),
List(
Literal(Constant("trace> ")),
Literal(Constant("\\\\nres: ")),
Literal(Constant(" = ")),
Literal(Constant("\\\\n")))),
TermName("s")),
List(
Literal(Constant(show(a.tree))),
Literal(Constant(show(a.actualType))),
a.tree))))),
a.tree))
}
def prettyArray(a: Array[_]): collection.IndexedSeq[Any] = new collection.AbstractSeq[Any] with collection.IndexedSeq[Any] {
def length = a.length
def apply(idx: Int): Any = a(idx) match {
case x: AnyRef if x.getClass.isArray => prettyArray(x.asInstanceOf[Array[_]])
case x => x
}
override def className = "Array"
}
implicit class ArrayDeep(val a: Array[_]) extends AnyVal {
def deep: collection.IndexedSeq[Any] = prettyArray(a)
}
}
| martijnhoekstra/scala | src/partest/scala/tools/partest/Util.scala | Scala | apache-2.0 | 2,344 |
package chapter07
object ExceptionHandling {
def main(args: Array[String]): Unit = {
println("Enter a number :")
val n = readInt()
val half =
if (n % 2 == 0) n / 2
else throw new RuntimeException("input must be Even")
println("half is : " + half)
}
}
| aakashmathai/ScalaTutorial | src/main/scala/chapter07/ExceptionHandling.scala | Scala | apache-2.0 | 284 |
/**
* Copyright 2013, 2016, 2018 Gianluca Amato <gianluca.amato@unich.it>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of a
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.ui.gui
import java.awt.event.InputEvent
import java.awt.event.KeyEvent
import java.io.File
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
import scala.collection.JavaConverters._
import scala.swing._
import scala.swing.event.ActionEvent
import scala.swing.event.EditDone
import scala.swing.event.SelectionChanged
import scala.util.Try
import it.unich.jandom._
import it.unich.jandom.targets.Parameters
import it.unich.jandom.targets.jvmsoot._
import it.unich.jandom.ui.OutputInterface
import javax.swing.KeyStroke
import soot.Scene
import soot.SootMethod
/**
* This is the pane used to select the class and method to analyze for
* the Soot Baf and Jimple targets.
*
* @author Gianluca Amato
*
*/
class SootEditorPane(val frame: MainFrame) extends BorderPanel with TargetPane {
private val sootScene = Scene.v()
sootScene.loadBasicClasses()
private val editorPane = new EditorPane
editorPane.editable = false
private val classPathField = new TextField(new File("examples/Java").getCanonicalPath)
private val classComboBox = new ComboBox(Seq[String]())
private val methodComboBox = new ComboBox(Seq[SootMethod]())
private val radioBaf = new RadioButton("Baf")
radioBaf.tooltip = OutputInterface.getRadioBafTip
private val radioJimple = new RadioButton("Jimple")
radioJimple.tooltip = OutputInterface.getRadioJimpleTip
private val typeGroup = new ButtonGroup(radioBaf, radioJimple)
typeGroup.select(radioJimple)
private val radioNumerical = new RadioButton("Numerical")
radioNumerical.tooltip = OutputInterface.getRadioNumericalTip
private val radioObject = new RadioButton("Object")
radioObject.tooltip = OutputInterface.getRadioObjectTip
private val anGroup = new ButtonGroup(radioNumerical, radioObject)
anGroup.select(radioNumerical)
var optMethod: Option[SootCFG[_, _]] = None
val controls: GridBagPanel = new GridBagPanel {
val c = new Constraints
c.weightx = 0
c.gridx = 0
c.gridy = 0
val cplabel = new Label("ClassPath: ")
cplabel.tooltip = OutputInterface.getClassPathTip
layout(cplabel) = c
c.gridy = 1
val clabel = new Label("Class: ")
clabel.tooltip = OutputInterface.getClassTip
layout(clabel) = c
c.gridy = 2
val mlabel = new Label("Method: ")
mlabel.tooltip = OutputInterface.getMethodTip
layout(mlabel) = c
c.fill = GridBagPanel.Fill.Horizontal
c.weightx = 80
c.gridx = 1
c.gridy = 0
layout(classPathField) = c
c.gridy = 1
layout(classComboBox) = c
c.gridy = 2
layout(methodComboBox) = c
c.gridy = 3
c.gridx = 0
c.gridwidth = 2
val horPanel: BoxPanel = new BoxPanel(Orientation.Horizontal) {
val tlabel = new Label("IR type: ")
tlabel.tooltip = OutputInterface.getIRTypeTip
val anlabel = new Label("Analysis type: ")
anlabel.tooltip = OutputInterface.getAnalysisTypeTip
contents += Swing.HGlue += tlabel += radioBaf += radioJimple +=
Swing.HStrut(100) +=
anlabel += radioNumerical += radioObject +=
Swing.HGlue
}
layout(horPanel) = c
}
layout(new ScrollPane(editorPane)) = BorderPanel.Position.Center
layout(controls) = BorderPanel.Position.North
listenTo(classPathField, classComboBox.selection, methodComboBox.selection, radioBaf, radioJimple)
reactions += {
case EditDone(`classPathField`) =>
sootScene.setSootClassPath(sootScene.defaultClassPath + java.io.File.pathSeparator + classPathField.text)
val rootPath = Paths.get(classPathField.text)
val fileProcessor = new ClassFileVisitor(rootPath)
if (Try(Files.walkFileTree(rootPath, fileProcessor)).isSuccess) {
val comboModel = ComboBox.newConstantModel(fileProcessor.classNameList)
classComboBox.peer.setModel(comboModel)
publish(SelectionChanged(classComboBox))
classPathField.foreground = java.awt.Color.black
} else
classPathField.foreground = java.awt.Color.red
case SelectionChanged(`classComboBox`) =>
val klass = sootScene.loadClassAndSupport(classComboBox.selection.item)
val methodList = klass.getMethods.asScala
// these two lines are a mess because Scala Swing does not play well with Java 1.7
val comboModel = ComboBox.newConstantModel(methodList)
methodComboBox.peer.asInstanceOf[javax.swing.JComboBox[SootMethod]].setModel(comboModel)
publish(SelectionChanged(methodComboBox))
case SelectionChanged(`methodComboBox`) | ActionEvent(`radioBaf`) | ActionEvent(`radioJimple`) =>
val sootMethod = methodComboBox.selection.item
optMethod = typeGroup.selected match {
case Some(`radioBaf`) => Some(new BafMethod(sootMethod, false))
case Some(`radioJimple`) => Some(new JimpleMethod(sootMethod, false))
case _ => None
}
editorPane.text = optMethod match {
case None => ""
case Some(m) => m.toString
}
}
publish(EditDone(classPathField))
class ClassFileVisitor(rootPath: Path) extends SimpleFileVisitor[Path] {
private val privateClassNamesList = scala.collection.mutable.SortedSet[String]()
def classNameList: Seq[String] = privateClassNamesList.toSeq
override def visitFile(aFile: Path, aAttrs: BasicFileAttributes): FileVisitResult = {
val relativePath = rootPath.relativize(aFile).asScala
val className = (relativePath.head.toString /: relativePath.tail) (_ + "." + _.toString)
if (className endsWith ".class")
privateClassNamesList += className stripSuffix ".class"
else if (className endsWith ".java")
privateClassNamesList += className stripSuffix ".java"
FileVisitResult.CONTINUE
}
}
val openAction: Action = new Action("Change classpath...") {
accelerator = Some(KeyStroke.getKeyStroke(KeyEvent.VK_O, InputEvent.CTRL_DOWN_MASK))
def apply() {
val fileChooser = new FileChooser(new File(classPathField.text))
fileChooser.title = "Select classpath"
fileChooser.fileSelectionMode = FileChooser.SelectionMode.DirectoriesOnly
val returnVal = fileChooser.showOpenDialog(SootEditorPane.this)
if (returnVal != FileChooser.Result.Approve) return
val file = fileChooser.selectedFile
classPathField.text = file.getPath
publish(EditDone(classPathField))
}
}
def ensureSaved() = true
def analyze: Option[String] = {
optMethod match {
case Some(method) =>
try {
val numericalDomain = frame.parametersPane.selectedNumericalDomain
val objectDomain = frame.parametersPane.selectedObjectDomain
val om = new SootObjectModel(sootScene)
val sootDomain = if (anGroup.selected.contains(radioNumerical))
new SootFrameNumericalDomain(numericalDomain)
else
new SootFrameObjectDomain(objectDomain(om))
typeGroup.selected match {
case Some(`radioBaf`) =>
val bafMethod = method.asInstanceOf[BafMethod]
val params = new Parameters[BafMethod] {
val domain: BafMethod#DomainBase = sootDomain
}
frame.parametersPane.setParameters(params)
val inte = new TopSootInterpretation[BafMethod, params.type](params)
params.interpretation = Some(inte)
val ann = bafMethod.analyze(params)
Some(bafMethod.mkString(params)(ann))
case Some(`radioJimple`) =>
val jimpleMethod = method.asInstanceOf[JimpleMethod]
val params = new Parameters[JimpleMethod] {
val domain: JimpleMethod#DomainBase = sootDomain
}
frame.parametersPane.setParameters(params)
val inte = new TopSootInterpretation[JimpleMethod, params.type](params)
params.interpretation = Some(inte)
val ann = jimpleMethod.analyze(params)
Some(jimpleMethod.mkString(params)(ann))
case _ => None
}
} catch {
case e: UnsupportedSootUnitException =>
Dialog.showMessage(SootEditorPane.this, e.getMessage + " : " + e.unit, "Error in analysing bytecode", Dialog.Message.Error)
e.printStackTrace()
None
case e: Exception =>
Dialog.showMessage(SootEditorPane.this, e.getMessage, "Error in parsing source code", Dialog.Message.Error)
e.printStackTrace()
None
}
case _ => None
}
}
val fileMenuItems: Seq[MenuItem] = Seq(new MenuItem(openAction))
val editMenuItems: Seq[Nothing] = Seq()
def select() {
val newTitle = softwareName + " - Soot"
frame.title = newTitle
}
}
| jandom-devel/Jandom | core/src/main/scala/it/unich/jandom/ui/gui/SootEditorPane.scala | Scala | lgpl-3.0 | 9,543 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId
/**
* Low-level task scheduler interface, currently implemented exclusively by
* [[org.apache.spark.scheduler.TaskSchedulerImpl]].
* This interface allows plugging in different task schedulers. Each TaskScheduler schedules tasks
* for a single SparkContext. These schedulers get sets of tasks submitted to them from the
* DAGScheduler for each stage, and are responsible for sending the tasks to the cluster, running
* them, retrying if there are failures, and mitigating stragglers. They return events to the
* DAGScheduler.
*/
private[spark] trait TaskScheduler {
private val appId = "spark-application-" + System.currentTimeMillis
def rootPool: Pool
def schedulingMode: SchedulingMode
def start(): Unit
// Invoked after system has successfully initialized (typically in spark context).
// Yarn uses this to bootstrap allocation of resources based on preferred locations,
// wait for slave registrations, etc.
def postStartHook() { }
// Disconnect from the cluster.
def stop(): Unit
// Submit a sequence of tasks to run.
def submitTasks(taskSet: TaskSet): Unit
// Cancel a stage.
def cancelTasks(stageId: Int, interruptThread: Boolean)
// Set the DAG scheduler for upcalls. This is guaranteed to be set before submitTasks is called.
def setDAGScheduler(dagScheduler: DAGScheduler): Unit
// Get the default level of parallelism to use in the cluster, as a hint for sizing jobs.
def defaultParallelism(): Int
/**
* Update metrics for in-progress tasks and let the master know that the BlockManager is still
* alive. Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*/
def executorHeartbeatReceived(execId: String, taskMetrics: Array[(Long, TaskMetrics)],
blockManagerId: BlockManagerId): Boolean
/**
* Get an application ID associated with the job.
*
* @return An application ID
*/
def applicationId(): String = appId
/**
* Process a lost executor
*/
def executorLost(executorId: String, reason: ExecutorLossReason): Unit
/**
* Get an application's attempt ID associated with the job.
*
* @return An application's Attempt ID
*/
def applicationAttemptId(): Option[String]
}
| practice-vishnoi/dev-spark-1 | core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala | Scala | apache-2.0 | 3,284 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.